├── CHANGELOG.md ├── docs └── .gitignore ├── vendor └── github.com │ ├── rs │ └── zerolog │ │ ├── go.mod │ │ ├── pretty.png │ │ ├── internal │ │ ├── cbor │ │ │ ├── base.go │ │ │ ├── string.go │ │ │ ├── time.go │ │ │ ├── cbor.go │ │ │ └── README.md │ │ └── json │ │ │ ├── base.go │ │ │ ├── bytes.go │ │ │ ├── time.go │ │ │ └── string.go │ │ ├── encoder_json.go │ │ ├── encoder_cbor.go │ │ ├── LICENSE │ │ ├── ctx.go │ │ ├── syslog.go │ │ ├── hook.go │ │ ├── globals.go │ │ ├── encoder.go │ │ ├── writer.go │ │ ├── sampler.go │ │ └── console.go │ ├── prometheus │ ├── procfs │ │ ├── MAINTAINERS.md │ │ ├── NOTICE │ │ ├── README.md │ │ ├── CONTRIBUTING.md │ │ ├── doc.go │ │ ├── internal │ │ │ └── util │ │ │ │ ├── sysreadfile_linux.go │ │ │ │ └── parse.go │ │ ├── proc_io.go │ │ ├── nfs │ │ │ ├── parse_nfs.go │ │ │ └── parse_nfsd.go │ │ ├── proc_ns.go │ │ ├── fs.go │ │ ├── Makefile │ │ ├── buddyinfo.go │ │ └── mdstat.go │ ├── client_golang │ │ ├── prometheus │ │ │ ├── README.md │ │ │ ├── promhttp │ │ │ │ ├── delegator_pre_1_8.go │ │ │ │ └── instrument_client.go │ │ │ ├── fnv.go │ │ │ ├── untyped.go │ │ │ ├── timer.go │ │ │ ├── observer.go │ │ │ ├── labels.go │ │ │ ├── internal │ │ │ │ └── metric.go │ │ │ └── expvar_collector.go │ │ └── NOTICE │ ├── client_model │ │ └── NOTICE │ └── common │ │ ├── NOTICE │ │ ├── model │ │ ├── model.go │ │ ├── fnv.go │ │ ├── fingerprinting.go │ │ ├── silence.go │ │ ├── metric.go │ │ └── alert.go │ │ ├── expfmt │ │ ├── fuzz.go │ │ ├── expfmt.go │ │ └── encode.go │ │ └── internal │ │ └── bitbucket.org │ │ └── ww │ │ └── goautoneg │ │ └── README.txt │ ├── matttproud │ └── golang_protobuf_extensions │ │ ├── NOTICE │ │ └── pbutil │ │ ├── Makefile │ │ ├── doc.go │ │ ├── encode.go │ │ └── decode.go │ ├── go-kit │ └── kit │ │ ├── metrics │ │ ├── debug.test │ │ ├── internal │ │ │ └── lv │ │ │ │ ├── labelvalues.go │ │ │ │ └── space.go │ │ ├── metrics.go │ │ ├── timer.go │ │ ├── discard │ │ │ └── discard.go │ │ ├── README.md │ │ └── doc.go │ │ └── LICENSE │ ├── beorn7 │ └── perks │ │ └── LICENSE │ └── golang │ └── protobuf │ ├── LICENSE │ └── proto │ └── deprecated.go ├── AUTHORS.md ├── .dockerignore ├── dependencies.txt ├── config └── config.go ├── types ├── block.go ├── bloom.go ├── base.go ├── contract.go ├── event.go ├── header.go ├── receipt.go └── transaction.go ├── contrib ├── git │ ├── .gitconfig.tmp │ └── .gitmessage.tmp └── LICENSE-APPLY ├── .gitattributes ├── README.md ├── monitor ├── pprof.go ├── expvar.go ├── promServ.go └── metrics.go ├── scripts ├── unit_test_cov.sh ├── changelog.sh ├── check_spelling.sh ├── install_behave.sh ├── ensure_deps.sh └── check_license.sh ├── .codecov.yml ├── .circleci └── config.yml ├── .gitignore ├── log ├── logger.go ├── nonlogger_adapter.go ├── README.md ├── config.go ├── log.go └── zerolog_adapter.go ├── version └── version.go ├── rlp ├── doc.go ├── encoder_example_test.go ├── decode_tail_test.go └── typecache.go ├── CONTRIBUTING.md └── Makefile /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rs/zerolog 2 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/MAINTAINERS.md: -------------------------------------------------------------------------------- 1 | * Tobias Schmidt 2 | -------------------------------------------------------------------------------- /vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2012 Matt T. Proud (matt.proud@gmail.com) 2 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/pretty.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DSiSc/craft/HEAD/vendor/github.com/rs/zerolog/pretty.png -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/debug.test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DSiSc/craft/HEAD/vendor/github.com/go-kit/kit/metrics/debug.test -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # Credits 2 | 3 | ## Development Lead 4 | 5 | - DSiSc [DSiSc](https://github.com/DSiSc) 6 | 7 | ## Contributors 8 | 9 | None yet. Why not be the first? -------------------------------------------------------------------------------- /vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | 3 | cover: 4 | go test -cover -v -coverprofile=cover.dat ./... 5 | go tool cover -func cover.dat 6 | 7 | .PHONY: cover 8 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright(c) 2018 DSiSc Group. All Rights Reserved. 3 | # 4 | # SPDX-License-Identifier: Apache-2.0 5 | # 6 | .git 7 | .circleci 8 | .github 9 | .codecov.yml 10 | .mailmap 11 | .travis.yml 12 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/README.md: -------------------------------------------------------------------------------- 1 | See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). 2 | -------------------------------------------------------------------------------- /dependencies.txt: -------------------------------------------------------------------------------- 1 | # Imported packages that does not exsit under "vendor" folder. 2 | # The following lines listed by git repositories(each line for one git repo) 3 | # alone with compatible version(branch/tag/commit-id). 4 | 5 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_model/NOTICE: -------------------------------------------------------------------------------- 1 | Data model artifacts for Prometheus. 2 | Copyright 2012-2015 The Prometheus Authors 3 | 4 | This product includes software developed at 5 | SoundCloud Ltd. (http://soundcloud.com/). 6 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/NOTICE: -------------------------------------------------------------------------------- 1 | Common libraries shared by Prometheus Go components. 2 | Copyright 2015 The Prometheus Authors 3 | 4 | This product includes software developed at 5 | SoundCloud Ltd. (http://soundcloud.com/). 6 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "sync" 4 | 5 | // common config key names 6 | const ( 7 | HashAlgName string = "HashAlgName" 8 | ) 9 | 10 | // GlobalConfig project's global config 11 | var GlobalConfig *sync.Map = new(sync.Map) 12 | -------------------------------------------------------------------------------- /types/block.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Define types and structures relate block 3 | */ 4 | package types 5 | 6 | type Block struct { 7 | Header *Header 8 | Transactions []*Transaction 9 | HeaderHash Hash `json:"headerHash" gencodec:"required"` 10 | } 11 | -------------------------------------------------------------------------------- /types/bloom.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | const ( 4 | // BloomByteLength represents the number of bytes used in a header log bloom. 5 | BloomByteLength = 256 6 | ) 7 | 8 | // Bloom represents a 2048 bit bloom filter. 9 | type Bloom [BloomByteLength]byte 10 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/NOTICE: -------------------------------------------------------------------------------- 1 | procfs provides functions to retrieve system, kernel and process 2 | metrics from the pseudo-filesystem proc. 3 | 4 | Copyright 2014-2015 The Prometheus Authors 5 | 6 | This product includes software developed at 7 | SoundCloud Ltd. (http://soundcloud.com/). 8 | -------------------------------------------------------------------------------- /contrib/git/.gitconfig.tmp: -------------------------------------------------------------------------------- 1 | [commit] 2 | template = ~/.gitmessage 3 | 4 | [alias] 5 | co = checkout 6 | ci = commit 7 | br = branch 8 | st = status 9 | last = log -1 10 | lg = log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit 11 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/cbor/base.go: -------------------------------------------------------------------------------- 1 | package cbor 2 | 3 | type Encoder struct{} 4 | 5 | // AppendKey adds a key (string) to the binary encoded log message 6 | func (e Encoder) AppendKey(dst []byte, key string) []byte { 7 | if len(dst) < 1 { 8 | dst = e.AppendBeginMarker(dst) 9 | } 10 | return e.AppendString(dst, key) 11 | } 12 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/json/base.go: -------------------------------------------------------------------------------- 1 | package json 2 | 3 | type Encoder struct{} 4 | 5 | // AppendKey appends a new key to the output JSON. 6 | func (e Encoder) AppendKey(dst []byte, key string) []byte { 7 | if len(dst) > 1 && dst[len(dst)-1] != '{' { 8 | dst = append(dst, ',') 9 | } 10 | dst = e.AppendString(dst, key) 11 | return append(dst, ':') 12 | } 13 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | 3 | * text=auto 4 | 5 | *.sh text eol=lf 6 | *.go text eol=lf 7 | *.yaml text eol=lf 8 | *.yml text eol=lf 9 | *.md text eol=lf 10 | *.json text eol=lf 11 | *.proto text eol=lf 12 | *.py text eol=lf 13 | *.js text eol=lf 14 | *.txt text eol=lf 15 | *.sol linguist-language=Solidity 16 | LICENSE text eol=lf 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # craft 2 | 3 | Define types used by serialize and deserialize operations. 4 | 5 | [![Build Status](https://circleci.com/gh/DSiSc/craft/tree/master.svg?style=shield)](https://circleci.com/gh/DSiSc/craft/tree/master) 6 | 7 | ## Getting started 8 | 9 | Running it then should be as simple as: 10 | 11 | ``` 12 | $ make all 13 | ``` 14 | 15 | ### Testing 16 | 17 | ``` 18 | $ make test 19 | ``` 20 | 21 | -------------------------------------------------------------------------------- /monitor/pprof.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "fmt" 5 | "github.com/DSiSc/craft/log" 6 | "net/http" 7 | _ "net/http/pprof" 8 | ) 9 | 10 | type PprofConfig struct { 11 | PprofEnabled bool 12 | PprofPort string 13 | } 14 | 15 | func StartPprofServer(config PprofConfig) { 16 | if config.PprofEnabled { 17 | go func() { 18 | log.Info(fmt.Sprintf("pprof: %x", http.ListenAndServe(":"+config.PprofPort, nil))) 19 | }() 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /scripts/unit_test_cov.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -e 4 | 5 | # Change directory to project root folder 6 | PROJ_FOLDER=$(cd "$(dirname "$0")/..";pwd) 7 | cd $PROJ_FOLDER 8 | 9 | echo "" > coverage.txt 10 | 11 | for pkg in $(go list ./... | grep -v vendor); do 12 | go test -timeout 5m -race -coverprofile=profile.cov -covermode=atomic "$pkg" 13 | if [ -f profile.cov ]; then 14 | cat profile.cov >> coverage.txt 15 | rm profile.cov 16 | fi 17 | done 18 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: yes 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "50...80" 9 | 10 | status: 11 | project: yes 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "header, diff" 25 | behavior: default 26 | require_changes: no 27 | -------------------------------------------------------------------------------- /contrib/git/.gitmessage.tmp: -------------------------------------------------------------------------------- 1 | # head: (): 2 | # - type: feat, fix, docs, style, refactor, test, chore 3 | # - scope: can be empty (eg. if the change is a global or difficult to assign to a single component) 4 | # - subject: start with verb (such as 'change'), 50-character line 5 | # 6 | # body: 72-character wrapped. This should answer: 7 | # * Why was this change necessary? 8 | # * How does it address the problem? 9 | # * Are there any side effects? 10 | # 11 | # footer: 12 | # - Include a link to the ticket, if any. 13 | # - BREAKING CHANGE 14 | # 15 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # Golang CircleCI 2.0 configuration file 2 | 3 | version: 2 4 | 5 | jobs: 6 | build: 7 | 8 | docker: 9 | - image: circleci/golang:1.10.3 10 | working_directory: /go/src/github.com/DSiSc/craft 11 | 12 | steps: 13 | - checkout 14 | 15 | - run: 16 | name: Get dependencies 17 | command: make fetch-deps 18 | 19 | - run: 20 | name: Static checks 21 | command: make static-check 22 | 23 | - run: 24 | name: Correctness check 25 | command: make build && make vet 26 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go: -------------------------------------------------------------------------------- 1 | package lv 2 | 3 | // LabelValues is a type alias that provides validation on its With method. 4 | // Metrics may include it as a member to help them satisfy With semantics and 5 | // save some code duplication. 6 | type LabelValues []string 7 | 8 | // With validates the input, and returns a new aggregate labelValues. 9 | func (lvs LabelValues) With(labelValues ...string) LabelValues { 10 | if len(labelValues)%2 != 0 { 11 | labelValues = append(labelValues, "unknown") 12 | } 13 | return append(lvs, labelValues...) 14 | } 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # See http://help.github.com/ignore-files/ for more about ignoring files. 2 | # 3 | # If you find yourself ignoring temporary files generated by your text editor 4 | # or operating system, you probably want to add a global ignore instead: 5 | # git config --global core.excludesfile ~/.gitignore_global 6 | 7 | # govendor 8 | #vendor/ 9 | 10 | # IDEs 11 | .project 12 | .settings 13 | .idea 14 | .vscode 15 | 16 | # May be used by the Makefile 17 | build/_workspace/ 18 | build/_vendor/pkg 19 | build/bin/ 20 | 21 | # travis, codecov 22 | profile.tmp 23 | profile.cov 24 | coverage.txt 25 | 26 | -------------------------------------------------------------------------------- /scripts/changelog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Copyright(c) 2018 DSiSc Group. All Rights Reserved. 4 | # 5 | # SPDX-License-Identifier: Apache-2.0 6 | # 7 | 8 | set -x 9 | 10 | SCRIPT_DIR=$(readlink -f "$(dirname $0)") 11 | CHANGELOG_TEMP="CHANGELOG.new" 12 | 13 | echo "## $2\n$(date)" >> ${CHANGELOG_TEMP} 14 | echo "" >> ${CHANGELOG_TEMP} 15 | git log $1..HEAD --oneline | grep -v Merge | sed -e "s/\([0-9|a-z]*\)/* \[\1\](https:\/\/github.com\/DSiSc\/craft\/commit\/\1)/" >> ${CHANGELOG_TEMP} 16 | echo "" >> ${CHANGELOG_TEMP} 17 | cat ${SCRIPT_DIR}/../CHANGELOG.md >> ${CHANGELOG_TEMP} 18 | mv -f ${CHANGELOG_TEMP} CHANGELOG.md 19 | -------------------------------------------------------------------------------- /types/base.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Define foundation of other types 3 | */ 4 | 5 | package types 6 | 7 | // Lengths of hashes and addresses in bytes. 8 | const ( 9 | HashLength = 32 10 | AddressLength = 20 11 | ) 12 | 13 | // StorageSize is a wrapper around a float value that supports user friendly 14 | // formatting. 15 | type StorageSize float64 16 | 17 | // Type to mark uniqueness of a node 18 | type NodeAddress string 19 | 20 | // Address represents the 20 byte address of an Ethereum account. 21 | type Address [AddressLength]byte 22 | 23 | // Hash represents the 32 byte Keccak256 hash of arbitrary data. 24 | type Hash [HashLength]byte 25 | -------------------------------------------------------------------------------- /contrib/LICENSE-APPLY: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2018 DSiSc Group. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/encoder_json.go: -------------------------------------------------------------------------------- 1 | // +build !binary_log 2 | 3 | package zerolog 4 | 5 | // encoder_json.go file contains bindings to generate 6 | // JSON encoded byte stream. 7 | 8 | import ( 9 | "github.com/rs/zerolog/internal/json" 10 | ) 11 | 12 | var ( 13 | _ encoder = (*json.Encoder)(nil) 14 | 15 | enc = json.Encoder{} 16 | ) 17 | 18 | func appendJSON(dst []byte, j []byte) []byte { 19 | return append(dst, j...) 20 | } 21 | 22 | func decodeIfBinaryToString(in []byte) string { 23 | return string(in) 24 | } 25 | 26 | func decodeObjectToStr(in []byte) string { 27 | return string(in) 28 | } 29 | 30 | func decodeIfBinaryToBytes(in []byte) []byte { 31 | return in 32 | } 33 | -------------------------------------------------------------------------------- /log/logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | // Logger interface defines all behaviors of a backendLogger. 4 | type Logger interface { 5 | Debug(msg string) 6 | Info(msg string) 7 | Warn(msg string) 8 | Error(msg string) 9 | Fatal(msg string) 10 | Panic(msg string) 11 | 12 | DebugKV(msg string, keyvals map[string]interface{}) 13 | InfoKV(msg string, keyvals map[string]interface{}) 14 | WarnKV(msg string, keyvals map[string]interface{}) 15 | ErrorKV(msg string, keyvals map[string]interface{}) 16 | FatalKV(msg string, keyvals map[string]interface{}) 17 | PanicKV(msg string, keyvals map[string]interface{}) 18 | 19 | SetGlobalLogLevel(level Level) 20 | SetOutputFlags(flags *OutputFlags) 21 | SetTimeFieldFormat(format string) 22 | } 23 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/README.md: -------------------------------------------------------------------------------- 1 | # procfs 2 | 3 | This procfs package provides functions to retrieve system, kernel and process 4 | metrics from the pseudo-filesystem proc. 5 | 6 | *WARNING*: This package is a work in progress. Its API may still break in 7 | backwards-incompatible ways without warnings. Use it at your own risk. 8 | 9 | [![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) 10 | [![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) 11 | [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) 12 | -------------------------------------------------------------------------------- /vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Matt T. Proud 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Package pbutil provides record length-delimited Protocol Buffer streaming. 16 | package pbutil 17 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/model/model.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Package model contains common data structures that are shared across 15 | // Prometheus components and libraries. 16 | package model 17 | -------------------------------------------------------------------------------- /scripts/check_spelling.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright(c) 2018 DSiSc Group. All Rights Reserved. 4 | # 5 | # SPDX-License-Identifier: Apache-2.0 6 | # 7 | 8 | CHECK=$(git diff --name-only HEAD * | grep -v .png$ | grep -v .git | grep -v ^CHANGELOG \ 9 | | grep -v ^vendor/ | grep -v ^build/ | sort -u) 10 | 11 | if [[ -z "$CHECK" ]]; then 12 | CHECK=$(git diff-tree --no-commit-id --name-only -r $(git log -2 \ 13 | --pretty=format:"%h") | grep -v .png$ | grep -v .git | grep -v ^CHANGELOG \ 14 | | grep -v ^vendor/ | grep -v ^build/ | sort -u) 15 | fi 16 | 17 | echo "Checking changed go files for spelling errors ..." 18 | errs=`echo $CHECK | xargs misspell -source=text` 19 | if [ -z "$errs" ]; then 20 | echo "spell checker passed" 21 | exit 0 22 | fi 23 | echo "The following files are have spelling errors:" 24 | echo "$errs" 25 | exit 0 26 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | // Counter describes a metric that accumulates values monotonically. 4 | // An example of a counter is the number of received HTTP requests. 5 | type Counter interface { 6 | With(labelValues ...string) Counter 7 | Add(delta float64) 8 | } 9 | 10 | // Gauge describes a metric that takes specific values over time. 11 | // An example of a gauge is the current depth of a job queue. 12 | type Gauge interface { 13 | With(labelValues ...string) Gauge 14 | Set(value float64) 15 | Add(delta float64) 16 | } 17 | 18 | // Histogram describes a metric that takes repeated observations of the same 19 | // kind of thing, and produces a statistical summary of those observations, 20 | // typically expressed as quantiles or buckets. An example of a histogram is 21 | // HTTP request latencies. 22 | type Histogram interface { 23 | With(labelValues ...string) Histogram 24 | Observe(value float64) 25 | } 26 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/encoder_cbor.go: -------------------------------------------------------------------------------- 1 | // +build binary_log 2 | 3 | package zerolog 4 | 5 | // This file contains bindings to do binary encoding. 6 | 7 | import ( 8 | "github.com/rs/zerolog/internal/cbor" 9 | ) 10 | 11 | var ( 12 | _ encoder = (*cbor.Encoder)(nil) 13 | 14 | enc = cbor.Encoder{} 15 | ) 16 | 17 | func appendJSON(dst []byte, j []byte) []byte { 18 | return cbor.AppendEmbeddedJSON(dst, j) 19 | } 20 | 21 | // decodeIfBinaryToString - converts a binary formatted log msg to a 22 | // JSON formatted String Log message. 23 | func decodeIfBinaryToString(in []byte) string { 24 | return cbor.DecodeIfBinaryToString(in) 25 | } 26 | 27 | func decodeObjectToStr(in []byte) string { 28 | return cbor.DecodeObjectToStr(in) 29 | } 30 | 31 | // decodeIfBinaryToBytes - converts a binary formatted log msg to a 32 | // JSON formatted Bytes Log message. 33 | func decodeIfBinaryToBytes(in []byte) []byte { 34 | return cbor.DecodeIfBinaryToBytes(in) 35 | } 36 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/NOTICE: -------------------------------------------------------------------------------- 1 | Prometheus instrumentation library for Go applications 2 | Copyright 2012-2015 The Prometheus Authors 3 | 4 | This product includes software developed at 5 | SoundCloud Ltd. (http://soundcloud.com/). 6 | 7 | 8 | The following components are included in this product: 9 | 10 | perks - a fork of https://github.com/bmizerany/perks 11 | https://github.com/beorn7/perks 12 | Copyright 2013-2015 Blake Mizerany, Björn Rabenstein 13 | See https://github.com/beorn7/perks/blob/master/README.md for license details. 14 | 15 | Go support for Protocol Buffers - Google's data interchange format 16 | http://github.com/golang/protobuf/ 17 | Copyright 2010 The Go Authors 18 | See source code for license details. 19 | 20 | Support for streaming Protocol Buffer messages for the Go language (golang). 21 | https://github.com/matttproud/golang_protobuf_extensions 22 | Copyright 2013 Matt T. Proud 23 | Licensed under the Apache License, Version 2.0 24 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Prometheus uses GitHub to manage reviews of pull requests. 4 | 5 | * If you have a trivial fix or improvement, go ahead and create a pull request, 6 | addressing (with `@...`) the maintainer of this repository (see 7 | [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. 8 | 9 | * If you plan to do something more involved, first discuss your ideas 10 | on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). 11 | This will avoid unnecessary work and surely give you and us a good deal 12 | of inspiration. 13 | 14 | * Relevant coding style guidelines are the [Go Code Review 15 | Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) 16 | and the _Formatting and style_ section of Peter Bourgon's [Go: Best 17 | Practices for Production 18 | Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). 19 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/timer.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import "time" 4 | 5 | // Timer acts as a stopwatch, sending observations to a wrapped histogram. 6 | // It's a bit of helpful syntax sugar for h.Observe(time.Since(x)). 7 | type Timer struct { 8 | h Histogram 9 | t time.Time 10 | u time.Duration 11 | } 12 | 13 | // NewTimer wraps the given histogram and records the current time. 14 | func NewTimer(h Histogram) *Timer { 15 | return &Timer{ 16 | h: h, 17 | t: time.Now(), 18 | u: time.Second, 19 | } 20 | } 21 | 22 | // ObserveDuration captures the number of seconds since the timer was 23 | // constructed, and forwards that observation to the histogram. 24 | func (t *Timer) ObserveDuration() { 25 | d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u) 26 | if d < 0 { 27 | d = 0 28 | } 29 | t.h.Observe(d) 30 | } 31 | 32 | // Unit sets the unit of the float64 emitted by the timer. 33 | // By default, the timer emits seconds. 34 | func (t *Timer) Unit(u time.Duration) { 35 | t.u = u 36 | } 37 | -------------------------------------------------------------------------------- /monitor/expvar.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "expvar" 5 | "fmt" 6 | "net/http" 7 | ) 8 | 9 | type ExpvarConfig struct { 10 | ExpvarEnabled bool 11 | ExpvarPort string 12 | ExpvarPath string 13 | } 14 | 15 | func metricsHandler(w http.ResponseWriter, r *http.Request) { 16 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 17 | 18 | first := true 19 | report := func(key string, value interface{}) { 20 | if !first { 21 | fmt.Fprintf(w, ",\n") 22 | } 23 | first = false 24 | if str, ok := value.(string); ok { 25 | fmt.Fprintf(w, "%q: %q", key, str) 26 | } else { 27 | fmt.Fprintf(w, "%q: %v", key, value) 28 | } 29 | } 30 | 31 | fmt.Fprintf(w, "{\n") 32 | expvar.Do(func(kv expvar.KeyValue) { 33 | report(kv.Key, kv.Value) 34 | }) 35 | fmt.Fprintf(w, "\n}\n") 36 | } 37 | 38 | func StartExpvarServer(config ExpvarConfig) { 39 | if config.ExpvarEnabled { 40 | go func() { 41 | mux := http.NewServeMux() 42 | mux.HandleFunc(config.ExpvarPath, metricsHandler) 43 | http.ListenAndServe(":"+config.ExpvarPort, mux) 44 | }() 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /types/contract.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // define specified type of system contract 4 | const ( 5 | Null = "Null" 6 | JustitiaRightToken = "JustitiaRight" 7 | JustitiaVoting = "Voting" 8 | JustitiaWhiteList = "WhiteList" 9 | JustitiaMetaData = "MetaData" 10 | JustitiaCrossFundsPool = "CrossFundsPool" 11 | ) 12 | 13 | type ContractType int 14 | 15 | const ( 16 | InitialContractType ContractType = iota 17 | JustitiaRightContractType 18 | VoteContractType 19 | WhiteListContractType 20 | MetaDataContractType 21 | CrossFundsPoolContractType 22 | MaximumContractType 23 | ) 24 | 25 | const ( 26 | MinimunNodesForDpos = uint64(4) 27 | MetaDataContractAddress = "8be503bcded90ed42eff31f56199399b2b0154ca" 28 | JustiitaContractDefaultAddress = "bd770416a3345f91e4b34576cb804a576fa48eb1" 29 | VotingContractDefaultAddress = "5a443704dd4b594b382c22a083e2bd3090a6fef3" 30 | WhiteListContractTypeDefaultAddress = "47e9fbef8c83a1714f1951f142132e6e90f5fa5d" 31 | CrossFundsPoolDefaultAddress = "47c5e40890bce4a473a49d7501808b9633f29782" 32 | ) 33 | -------------------------------------------------------------------------------- /log/nonlogger_adapter.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | // Interface assertions 4 | var _ Logger = (*nonLogger)(nil) 5 | 6 | // nonLogger is used when we disable logging. 7 | type nonLogger struct{} 8 | 9 | func (nonLogger) SetTimeFieldFormat(format string) {} 10 | 11 | func (nonLogger) SetOutputFlags(flags *OutputFlags) {} 12 | 13 | func (nonLogger) SetGlobalLogLevel(level Level) {} 14 | 15 | func (nonLogger) Debug(msg string) {} 16 | 17 | func (nonLogger) Info(msg string) {} 18 | 19 | func (nonLogger) Warn(msg string) {} 20 | 21 | func (nonLogger) Error(msg string) {} 22 | 23 | func (nonLogger) Fatal(msg string) {} 24 | 25 | func (nonLogger) Panic(msg string) {} 26 | 27 | func (nonLogger) DebugKV(msg string, keyvals map[string]interface{}) {} 28 | 29 | func (nonLogger) InfoKV(msg string, keyvals map[string]interface{}) {} 30 | 31 | func (nonLogger) WarnKV(msg string, keyvals map[string]interface{}) {} 32 | 33 | func (nonLogger) ErrorKV(msg string, keyvals map[string]interface{}) {} 34 | 35 | func (nonLogger) FatalKV(msg string, keyvals map[string]interface{}) {} 36 | 37 | func (nonLogger) PanicKV(msg string, keyvals map[string]interface{}) {} 38 | -------------------------------------------------------------------------------- /vendor/github.com/beorn7/perks/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 Blake Mizerany 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | // Copyright(c) 2018 DSiSc Group All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package version 16 | 17 | // The git commit that was compiled. This will be filled in by the compiler. 18 | var GitCommit string 19 | 20 | // The main version number that is being run at the moment. 21 | const Version = "0.0.1" 22 | 23 | // A pre-release marker for the version. If this is "" (empty string) 24 | // then it means that it is a final release. Otherwise, this is a pre-release 25 | // such as "dev" (in development) 26 | var VersionPrerelease = "dev" 27 | 28 | var BuildDate = "" 29 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Olivier Poitrey 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Peter Bourgon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/expfmt/fuzz.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Build only when actually fuzzing 15 | // +build gofuzz 16 | 17 | package expfmt 18 | 19 | import "bytes" 20 | 21 | // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: 22 | // 23 | // go-fuzz-build github.com/prometheus/common/expfmt 24 | // go-fuzz -bin expfmt-fuzz.zip -workdir fuzz 25 | // 26 | // Further input samples should go in the folder fuzz/corpus. 27 | func Fuzz(in []byte) int { 28 | parser := TextParser{} 29 | _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) 30 | 31 | if err != nil { 32 | return 0 33 | } 34 | 35 | return 1 36 | } 37 | -------------------------------------------------------------------------------- /scripts/install_behave.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright(c) 2018 DSiSc Group. All Rights Reserved. 4 | # 5 | # SPDX-License-Identifier: Apache-2.0 6 | # 7 | 8 | 9 | # 10 | # This script is used on Debian based linux distros. 11 | # (i.e., linux that supports the apt packaging manager.) 12 | # 13 | 14 | # Update system 15 | apt-get update -qq 16 | 17 | # Install Python, pip, behave 18 | # 19 | # install python-dev and libyaml-dev to get compiled speedups 20 | apt-get install --yes python-dev 21 | apt-get install --yes libyaml-dev 22 | 23 | apt-get install --yes python-setuptools 24 | apt-get install --yes python-pip 25 | apt-get install --yes build-essential 26 | # required dependencies for cryptography, which is required by pyOpenSSL 27 | # https://cryptography.io/en/stable/installation/#building-cryptography-on-linux 28 | apt-get install --yes libssl-dev libffi-dev 29 | pip install --upgrade pip 30 | 31 | # Pip packages required for behave tests 32 | pip install -r ../devenv/bddtests-requirements.txt 33 | 34 | # install ruby and apiaryio 35 | #apt-get install --yes ruby ruby-dev gcc 36 | #gem install apiaryio 37 | 38 | # Install Tcl prerequisites for busywork 39 | apt-get install --yes tcl tclx tcllib 40 | 41 | # Install NPM for the SDK 42 | apt-get install --yes npm 43 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/discard/discard.go: -------------------------------------------------------------------------------- 1 | // Package discard provides a no-op metrics backend. 2 | package discard 3 | 4 | import "github.com/go-kit/kit/metrics" 5 | 6 | type counter struct{} 7 | 8 | // NewCounter returns a new no-op counter. 9 | func NewCounter() metrics.Counter { return counter{} } 10 | 11 | // With implements Counter. 12 | func (c counter) With(labelValues ...string) metrics.Counter { return c } 13 | 14 | // Add implements Counter. 15 | func (c counter) Add(delta float64) {} 16 | 17 | type gauge struct{} 18 | 19 | // NewGauge returns a new no-op gauge. 20 | func NewGauge() metrics.Gauge { return gauge{} } 21 | 22 | // With implements Gauge. 23 | func (g gauge) With(labelValues ...string) metrics.Gauge { return g } 24 | 25 | // Set implements Gauge. 26 | func (g gauge) Set(value float64) {} 27 | 28 | // Add implements metrics.Gauge. 29 | func (g gauge) Add(delta float64) {} 30 | 31 | type histogram struct{} 32 | 33 | // NewHistogram returns a new no-op histogram. 34 | func NewHistogram() metrics.Histogram { return histogram{} } 35 | 36 | // With implements Histogram. 37 | func (h histogram) With(labelValues ...string) metrics.Histogram { return h } 38 | 39 | // Observe implements histogram. 40 | func (h histogram) Observe(value float64) {} 41 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/ctx.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | var disabledLogger *Logger 8 | 9 | func init() { 10 | l := Nop() 11 | disabledLogger = &l 12 | } 13 | 14 | type ctxKey struct{} 15 | 16 | // WithContext returns a copy of ctx with l associated. If an instance of Logger 17 | // is already in the context, the context is not updated. 18 | // 19 | // For instance, to add a field to an existing logger in the context, use this 20 | // notation: 21 | // 22 | // ctx := r.Context() 23 | // l := zerolog.Ctx(ctx) 24 | // l.UpdateContext(func(c Context) Context { 25 | // return c.Str("bar", "baz") 26 | // }) 27 | func (l *Logger) WithContext(ctx context.Context) context.Context { 28 | if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { 29 | if lp == l { 30 | // Do not store same logger. 31 | return ctx 32 | } 33 | } else if l.level == Disabled { 34 | // Do not store disabled logger. 35 | return ctx 36 | } 37 | return context.WithValue(ctx, ctxKey{}, l) 38 | } 39 | 40 | // Ctx returns the Logger associated with the ctx. If no logger 41 | // is associated, a disabled logger is returned. 42 | func Ctx(ctx context.Context) *Logger { 43 | if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { 44 | return l 45 | } 46 | return disabledLogger 47 | } 48 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // +build !go1.8 15 | 16 | package promhttp 17 | 18 | import ( 19 | "io" 20 | "net/http" 21 | ) 22 | 23 | func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { 24 | d := &responseWriterDelegator{ 25 | ResponseWriter: w, 26 | observeWriteHeader: observeWriteHeaderFunc, 27 | } 28 | 29 | id := 0 30 | if _, ok := w.(http.CloseNotifier); ok { 31 | id += closeNotifier 32 | } 33 | if _, ok := w.(http.Flusher); ok { 34 | id += flusher 35 | } 36 | if _, ok := w.(http.Hijacker); ok { 37 | id += hijacker 38 | } 39 | if _, ok := w.(io.ReaderFrom); ok { 40 | id += readerFrom 41 | } 42 | 43 | return pickDelegator[id](d) 44 | } 45 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/model/fnv.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package model 15 | 16 | // Inline and byte-free variant of hash/fnv's fnv64a. 17 | 18 | const ( 19 | offset64 = 14695981039346656037 20 | prime64 = 1099511628211 21 | ) 22 | 23 | // hashNew initializies a new fnv64a hash value. 24 | func hashNew() uint64 { 25 | return offset64 26 | } 27 | 28 | // hashAdd adds a string to a fnv64a hash value, returning the updated hash. 29 | func hashAdd(h uint64, s string) uint64 { 30 | for i := 0; i < len(s); i++ { 31 | h ^= uint64(s[i]) 32 | h *= prime64 33 | } 34 | return h 35 | } 36 | 37 | // hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. 38 | func hashAddByte(h uint64, b byte) uint64 { 39 | h ^= uint64(b) 40 | h *= prime64 41 | return h 42 | } 43 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/fnv.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prometheus 15 | 16 | // Inline and byte-free variant of hash/fnv's fnv64a. 17 | 18 | const ( 19 | offset64 = 14695981039346656037 20 | prime64 = 1099511628211 21 | ) 22 | 23 | // hashNew initializies a new fnv64a hash value. 24 | func hashNew() uint64 { 25 | return offset64 26 | } 27 | 28 | // hashAdd adds a string to a fnv64a hash value, returning the updated hash. 29 | func hashAdd(h uint64, s string) uint64 { 30 | for i := 0; i < len(s); i++ { 31 | h ^= uint64(s[i]) 32 | h *= prime64 33 | } 34 | return h 35 | } 36 | 37 | // hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. 38 | func hashAddByte(h uint64, b byte) uint64 { 39 | h ^= uint64(b) 40 | h *= prime64 41 | return h 42 | } 43 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/syslog.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | // +build !binary_log 3 | 4 | package zerolog 5 | 6 | import ( 7 | "io" 8 | ) 9 | 10 | // SyslogWriter is an interface matching a syslog.Writer struct. 11 | type SyslogWriter interface { 12 | io.Writer 13 | Debug(m string) error 14 | Info(m string) error 15 | Warning(m string) error 16 | Err(m string) error 17 | Emerg(m string) error 18 | Crit(m string) error 19 | } 20 | 21 | type syslogWriter struct { 22 | w SyslogWriter 23 | } 24 | 25 | // SyslogLevelWriter wraps a SyslogWriter and call the right syslog level 26 | // method matching the zerolog level. 27 | func SyslogLevelWriter(w SyslogWriter) LevelWriter { 28 | return syslogWriter{w} 29 | } 30 | 31 | func (sw syslogWriter) Write(p []byte) (n int, err error) { 32 | return sw.w.Write(p) 33 | } 34 | 35 | // WriteLevel implements LevelWriter interface. 36 | func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { 37 | switch level { 38 | case DebugLevel: 39 | err = sw.w.Debug(string(p)) 40 | case InfoLevel: 41 | err = sw.w.Info(string(p)) 42 | case WarnLevel: 43 | err = sw.w.Warning(string(p)) 44 | case ErrorLevel: 45 | err = sw.w.Err(string(p)) 46 | case FatalLevel: 47 | err = sw.w.Emerg(string(p)) 48 | case PanicLevel: 49 | err = sw.w.Crit(string(p)) 50 | case NoLevel: 51 | err = sw.w.Info(string(p)) 52 | default: 53 | panic("invalid level") 54 | } 55 | n = len(p) 56 | return 57 | } 58 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 Prometheus Team 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Package procfs provides functions to retrieve system, kernel and process 15 | // metrics from the pseudo-filesystem proc. 16 | // 17 | // Example: 18 | // 19 | // package main 20 | // 21 | // import ( 22 | // "fmt" 23 | // "log" 24 | // 25 | // "github.com/prometheus/procfs" 26 | // ) 27 | // 28 | // func main() { 29 | // p, err := procfs.Self() 30 | // if err != nil { 31 | // log.Fatalf("could not get process: %s", err) 32 | // } 33 | // 34 | // stat, err := p.NewStat() 35 | // if err != nil { 36 | // log.Fatalf("could not get process stat: %s", err) 37 | // } 38 | // 39 | // fmt.Printf("command: %s\n", stat.Comm) 40 | // fmt.Printf("cpu time: %fs\n", stat.CPUTime()) 41 | // fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) 42 | // fmt.Printf("rss: %dB\n", stat.ResidentMemory()) 43 | // } 44 | // 45 | package procfs 46 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // +build !windows 15 | 16 | package util 17 | 18 | import ( 19 | "bytes" 20 | "os" 21 | "syscall" 22 | ) 23 | 24 | // SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. 25 | // https://github.com/prometheus/node_exporter/pull/728/files 26 | func SysReadFile(file string) (string, error) { 27 | f, err := os.Open(file) 28 | if err != nil { 29 | return "", err 30 | } 31 | defer f.Close() 32 | 33 | // On some machines, hwmon drivers are broken and return EAGAIN. This causes 34 | // Go's ioutil.ReadFile implementation to poll forever. 35 | // 36 | // Since we either want to read data or bail immediately, do the simplest 37 | // possible read using syscall directly. 38 | b := make([]byte, 128) 39 | n, err := syscall.Read(int(f.Fd()), b) 40 | if err != nil { 41 | return "", err 42 | } 43 | 44 | return string(bytes.TrimSpace(b[:n])), nil 45 | } 46 | -------------------------------------------------------------------------------- /vendor/github.com/golang/protobuf/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2010 The Go Authors. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are 5 | met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above 10 | copyright notice, this list of conditions and the following disclaimer 11 | in the documentation and/or other materials provided with the 12 | distribution. 13 | * Neither the name of Google Inc. nor the names of its 14 | contributors may be used to endorse or promote products derived from 15 | this software without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | -------------------------------------------------------------------------------- /rlp/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The go-ethereum Authors 2 | // This file is part of the go-ethereum library. 3 | // 4 | // The go-ethereum library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU Lesser General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | // 9 | // The go-ethereum library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU Lesser General Public License for more details. 13 | // 14 | // You should have received a copy of the GNU Lesser General Public License 15 | // along with the go-ethereum library. If not, see . 16 | 17 | /* 18 | Package rlp implements the RLP serialization format. 19 | 20 | The purpose of RLP (Recursive Linear Prefix) is to encode arbitrarily 21 | nested arrays of binary data, and RLP is the main encoding method used 22 | to serialize objects in Ethereum. The only purpose of RLP is to encode 23 | structure; encoding specific atomic data types (eg. strings, ints, 24 | floats) is left up to higher-order protocols; in Ethereum integers 25 | must be represented in big endian binary form with no leading zeroes 26 | (thus making the integer value zero equivalent to the empty byte 27 | array). 28 | 29 | RLP values are distinguished by a type tag. The type tag precedes the 30 | value in the input stream and defines the size and kind of the bytes 31 | that follow. 32 | */ 33 | package rlp 34 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/expfmt/expfmt.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Package expfmt contains tools for reading and writing Prometheus metrics. 15 | package expfmt 16 | 17 | // Format specifies the HTTP content type of the different wire protocols. 18 | type Format string 19 | 20 | // Constants to assemble the Content-Type values for the different wire protocols. 21 | const ( 22 | TextVersion = "0.0.4" 23 | ProtoType = `application/vnd.google.protobuf` 24 | ProtoProtocol = `io.prometheus.client.MetricFamily` 25 | ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" 26 | 27 | // The Content-Type values for the different wire protocols. 28 | FmtUnknown Format = `` 29 | FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` 30 | FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` 31 | FmtProtoText Format = ProtoFmt + ` encoding=text` 32 | FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` 33 | ) 34 | 35 | const ( 36 | hdrContentType = "Content-Type" 37 | hdrAccept = "Accept" 38 | ) 39 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/hook.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | // Hook defines an interface to a log hook. 4 | type Hook interface { 5 | // Run runs the hook with the event. 6 | Run(e *Event, level Level, message string) 7 | } 8 | 9 | // HookFunc is an adaptor to allow the use of an ordinary function 10 | // as a Hook. 11 | type HookFunc func(e *Event, level Level, message string) 12 | 13 | // Run implements the Hook interface. 14 | func (h HookFunc) Run(e *Event, level Level, message string) { 15 | h(e, level, message) 16 | } 17 | 18 | // LevelHook applies a different hook for each level. 19 | type LevelHook struct { 20 | NoLevelHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook 21 | } 22 | 23 | // Run implements the Hook interface. 24 | func (h LevelHook) Run(e *Event, level Level, message string) { 25 | switch level { 26 | case DebugLevel: 27 | if h.DebugHook != nil { 28 | h.DebugHook.Run(e, level, message) 29 | } 30 | case InfoLevel: 31 | if h.InfoHook != nil { 32 | h.InfoHook.Run(e, level, message) 33 | } 34 | case WarnLevel: 35 | if h.WarnHook != nil { 36 | h.WarnHook.Run(e, level, message) 37 | } 38 | case ErrorLevel: 39 | if h.ErrorHook != nil { 40 | h.ErrorHook.Run(e, level, message) 41 | } 42 | case FatalLevel: 43 | if h.FatalHook != nil { 44 | h.FatalHook.Run(e, level, message) 45 | } 46 | case PanicLevel: 47 | if h.PanicHook != nil { 48 | h.PanicHook.Run(e, level, message) 49 | } 50 | case NoLevel: 51 | if h.NoLevelHook != nil { 52 | h.NoLevelHook.Run(e, level, message) 53 | } 54 | } 55 | } 56 | 57 | // NewLevelHook returns a new LevelHook. 58 | func NewLevelHook() LevelHook { 59 | return LevelHook{} 60 | } 61 | -------------------------------------------------------------------------------- /scripts/ensure_deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Change directory to project root folder 4 | PROJ_FOLDER=$(cd "$(dirname "$0")/..";pwd) 5 | cd $PROJ_FOLDER 6 | 7 | # Read "dependencies.txt" under project root 8 | DEPS=$(grep -v "^#" dependencies.txt | grep -v "^$") 9 | 10 | # Go get all the imported packages (except the ones under "vendor" folder) to $GOPATH 11 | for dep in $DEPS; do 12 | dep_repo=$(echo ${dep} | awk -F ':' '{print $1}') 13 | if [ -d "${GOPATH}/src/${dep_repo}" ]; then 14 | cd ${GOPATH}/src/${dep_repo} 15 | git checkout master &> /dev/null 16 | fi 17 | go get -v -u ${dep_repo} 18 | done 19 | 20 | # Check out to desired version 21 | for dep in $DEPS; do 22 | dep_repo=$(echo ${dep} | awk -F ':' '{print $1}') 23 | dep_ver=$(echo ${dep} | awk -F ':' '{print $2}') 24 | if [ -d "${GOPATH}/src/${dep_repo}" ]; then 25 | 26 | echo "[INFO] Ensuring ${dep_repo} on ${dep_ver} ..." 27 | 28 | cd ${GOPATH}/src/${dep_repo} 29 | 30 | git fetch origin > /dev/null 31 | 32 | # Try checkout to ${dep_ver} 33 | git checkout ${dep_ver} > /dev/null && (git pull &> /dev/null | true) 34 | 35 | if [ $? != 0 ]; then 36 | # If failed, checkout to origin/${dep_ver} 37 | git checkout origin/${dep_ver} > /dev/null 38 | if [ $? != 0 ]; then 39 | echo "[ERROR] Got error when checking out ${dep_ver} under ${dep_repo}, please check." 40 | exit 1 41 | else 42 | echo "[INFO] ${dep_repo} is now on ${dep_ver}" 43 | fi 44 | else 45 | echo "[INFO] ${dep_repo} is now on ${dep_ver}" 46 | fi 47 | else 48 | echo "[WARN] ${GOPATH}/src/${dep_repo} not exist, do nothing, please check dependencies.txt." 49 | fi 50 | done 51 | 52 | -------------------------------------------------------------------------------- /monitor/promServ.go: -------------------------------------------------------------------------------- 1 | package monitor 2 | 3 | import ( 4 | "context" 5 | "github.com/DSiSc/craft/log" 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promhttp" 8 | "net/http" 9 | ) 10 | 11 | var prometheusServ *http.Server 12 | 13 | type PrometheusConfig struct { 14 | PrometheusEnabled bool 15 | PrometheusPort string 16 | PrometheusMaxConn int 17 | } 18 | 19 | // startPrometheusServer starts a Prometheus HTTP server, listening for metrics 20 | // collectors on addr. 21 | func StartPrometheusServer(config PrometheusConfig) { 22 | 23 | if !config.PrometheusEnabled { 24 | return 25 | } 26 | 27 | createMetrics() 28 | 29 | // create prometheus server 30 | if prometheusServ == nil { 31 | prometheusServ = &http.Server{ 32 | Addr: ":" + config.PrometheusPort, 33 | Handler: promhttp.InstrumentMetricHandler( 34 | prometheus.DefaultRegisterer, promhttp.HandlerFor( 35 | prometheus.DefaultGatherer, 36 | promhttp.HandlerOpts{MaxRequestsInFlight: config.PrometheusMaxConn}, 37 | ), 38 | ), 39 | } 40 | } 41 | 42 | // start prometheus server 43 | go func() { 44 | if err := prometheusServ.ListenAndServe(); err != http.ErrServerClosed { 45 | // Error starting or closing listener: 46 | log.Error("Prometheus HTTP server ListenAndServe", "err", err) 47 | } 48 | }() 49 | } 50 | 51 | // stopPrometheusServer stops a Prometheus HTTP server 52 | func StopPrometheusServer() { 53 | if prometheusServ != nil { 54 | if err := prometheusServ.Shutdown(context.Background()); err != nil { 55 | // Error from closing listeners, or context timeout: 56 | log.ErrorKV("Prometheus HTTP server Shutdown", map[string]interface{}{"err": err}) 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Matt T. Proud 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package pbutil 16 | 17 | import ( 18 | "encoding/binary" 19 | "io" 20 | 21 | "github.com/golang/protobuf/proto" 22 | ) 23 | 24 | // WriteDelimited encodes and dumps a message to the provided writer prefixed 25 | // with a 32-bit varint indicating the length of the encoded message, producing 26 | // a length-delimited record stream, which can be used to chain together 27 | // encoded messages of the same type together in a file. It returns the total 28 | // number of bytes written and any applicable error. This is roughly 29 | // equivalent to the companion Java API's MessageLite#writeDelimitedTo. 30 | func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { 31 | buffer, err := proto.Marshal(m) 32 | if err != nil { 33 | return 0, err 34 | } 35 | 36 | var buf [binary.MaxVarintLen32]byte 37 | encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) 38 | 39 | sync, err := w.Write(buf[:encodedLength]) 40 | if err != nil { 41 | return sync, err 42 | } 43 | 44 | n, err = w.Write(buffer) 45 | return n + sync, err 46 | } 47 | -------------------------------------------------------------------------------- /rlp/encoder_example_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The go-ethereum Authors 2 | // This file is part of the go-ethereum library. 3 | // 4 | // The go-ethereum library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU Lesser General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | // 9 | // The go-ethereum library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU Lesser General Public License for more details. 13 | // 14 | // You should have received a copy of the GNU Lesser General Public License 15 | // along with the go-ethereum library. If not, see . 16 | 17 | package rlp 18 | 19 | import ( 20 | "fmt" 21 | "io" 22 | ) 23 | 24 | type MyCoolType struct { 25 | Name string 26 | a, b uint 27 | } 28 | 29 | // EncodeRLP writes x as RLP list [a, b] that omits the Name field. 30 | func (x *MyCoolType) EncodeRLP(w io.Writer) (err error) { 31 | // Note: the receiver can be a nil pointer. This allows you to 32 | // control the encoding of nil, but it also means that you have to 33 | // check for a nil receiver. 34 | if x == nil { 35 | err = Encode(w, []uint{0, 0}) 36 | } else { 37 | err = Encode(w, []uint{x.a, x.b}) 38 | } 39 | return err 40 | } 41 | 42 | func ExampleEncoder() { 43 | var t *MyCoolType // t is nil pointer to MyCoolType 44 | bytes, _ := EncodeToBytes(t) 45 | fmt.Printf("%v → %X\n", t, bytes) 46 | 47 | t = &MyCoolType{Name: "foobar", a: 5, b: 6} 48 | bytes, _ = EncodeToBytes(t) 49 | fmt.Printf("%v → %X\n", t, bytes) 50 | 51 | // Output: 52 | // → C28080 53 | // &{foobar 5 6} → C20506 54 | } 55 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/internal/util/parse.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package util 15 | 16 | import ( 17 | "io/ioutil" 18 | "strconv" 19 | "strings" 20 | ) 21 | 22 | // ParseUint32s parses a slice of strings into a slice of uint32s. 23 | func ParseUint32s(ss []string) ([]uint32, error) { 24 | us := make([]uint32, 0, len(ss)) 25 | for _, s := range ss { 26 | u, err := strconv.ParseUint(s, 10, 32) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | us = append(us, uint32(u)) 32 | } 33 | 34 | return us, nil 35 | } 36 | 37 | // ParseUint64s parses a slice of strings into a slice of uint64s. 38 | func ParseUint64s(ss []string) ([]uint64, error) { 39 | us := make([]uint64, 0, len(ss)) 40 | for _, s := range ss { 41 | u, err := strconv.ParseUint(s, 10, 64) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | us = append(us, u) 47 | } 48 | 49 | return us, nil 50 | } 51 | 52 | // ReadUintFromFile reads a file and attempts to parse a uint64 from it. 53 | func ReadUintFromFile(path string) (uint64, error) { 54 | data, err := ioutil.ReadFile(path) 55 | if err != nil { 56 | return 0, err 57 | } 58 | return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) 59 | } 60 | -------------------------------------------------------------------------------- /types/event.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | type EventType uint8 4 | 5 | const ( 6 | EventBlockCommitted EventType = iota // 0, block submit successfully 7 | EventBlockCommitFailed // 1, block submit failed 8 | EventBlockVerifyFailed // 2, block verified failed 9 | EventBlockExisted // 3. block has exist 10 | EventConsensusFailed // 4. to consensus failed 11 | EventBlockWritten // 5. block has been written 12 | EventBlockWriteFailed // 6. block write failed 13 | EventTxVerifySucceeded // 7. tx has been verified successfully 14 | EventTxVerifyFailed // 8. tx was verified failed 15 | EventMasterChange // 9. change master 16 | EventOnline // 10. node online 17 | EventBlockWithoutTxs // 11. block without any txs 18 | 19 | //P2P Event 20 | EventRemovePeer 21 | EventAddPeer 22 | EventBroadCastMsg 23 | EventRecvNewMsg 24 | 25 | // txpool events 26 | EventAddTxToTxPool 27 | ) 28 | 29 | type EventFunc func(v interface{}) 30 | 31 | type Subscriber chan interface{} 32 | 33 | type EventCenter interface { 34 | 35 | // subscribe specified eventType with eventFunc 36 | Subscribe(eventType EventType, eventFunc EventFunc) Subscriber 37 | 38 | // unsubscribe specified eventType and subscriber 39 | UnSubscribe(eventType EventType, subscriber Subscriber) (err error) 40 | 41 | // notify subscriber of eventType 42 | Notify(eventType EventType, value interface{}) (err error) 43 | 44 | // notify specified eventFunc 45 | NotifySubscriber(eventFunc EventFunc, value interface{}) 46 | 47 | // notify subscriber traversing all events 48 | NotifyAll() (errs []error) 49 | 50 | // unsubscribe all event 51 | UnSubscribeAll() 52 | } 53 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/untyped.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prometheus 15 | 16 | // UntypedOpts is an alias for Opts. See there for doc comments. 17 | type UntypedOpts Opts 18 | 19 | // UntypedFunc works like GaugeFunc but the collected metric is of type 20 | // "Untyped". UntypedFunc is useful to mirror an external metric of unknown 21 | // type. 22 | // 23 | // To create UntypedFunc instances, use NewUntypedFunc. 24 | type UntypedFunc interface { 25 | Metric 26 | Collector 27 | } 28 | 29 | // NewUntypedFunc creates a new UntypedFunc based on the provided 30 | // UntypedOpts. The value reported is determined by calling the given function 31 | // from within the Write method. Take into account that metric collection may 32 | // happen concurrently. If that results in concurrent calls to Write, like in 33 | // the case where an UntypedFunc is directly registered with Prometheus, the 34 | // provided function must be concurrency-safe. 35 | func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { 36 | return newValueFunc(NewDesc( 37 | BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), 38 | opts.Help, 39 | nil, 40 | opts.ConstLabels, 41 | ), UntypedValue, function) 42 | } 43 | -------------------------------------------------------------------------------- /types/header.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Define types and structures relate header 3 | */ 4 | package types 5 | 6 | import "math/big" 7 | 8 | // Header represents a block header in the Ethereum blockchain. 9 | type Header struct { 10 | ChainID uint64 `json:"chainId" gencodec:"required"` // chainid 11 | PrevBlockHash Hash `json:"prevHash" gencodec:"required"` // preblock hash 12 | StateRoot Hash `json:"stateRoot" gencodec:"required"` // statedb root 13 | TxRoot Hash `json:"txRoot" gencodec:"required"` // transactions root 14 | ReceiptsRoot Hash `json:"receiptsRoot" gencodec:"required"` // receipt root 15 | Height uint64 `json:"height" gencodec:"required"` // block height 16 | Timestamp uint64 `json:"timestamp" gencodec:"required"` // timestamp 17 | CoinBase Address `json:"coinbase" gencodec:"required"` // coin base 18 | 19 | // not contain when compute header hash 20 | MixDigest Hash `json:"mixDigest" gencodec:"required"` // digest 21 | SigData [][]byte `json:"signData" gencodec:"required"` // SigData 22 | 23 | //match etherum 24 | ParentHash Hash `json:"parentHash" gencodec:"required"` 25 | Coinbase Address `json:"miner" gencodec:"required"` 26 | TxHash Hash `json:"transactionsRoot" gencodec:"required"` 27 | UncleHash Hash `json:"sha3Uncles" gencodec:"required"` 28 | Bloom Bloom `json:"logsBloom" gencodec:"required"` 29 | Difficulty *big.Int `json:"difficulty" gencodec:"required"` 30 | Number *big.Int `json:"number" gencodec:"required"` 31 | GasLimit uint64 `json:"gasLimit" gencodec:"required"` 32 | GasUsed uint64 `json:"gasUsed" gencodec:"required"` 33 | Extra []byte `json:"extraData" gencodec:"required"` 34 | Nonce uint64 `json:"nonce" gencodec:"required"` 35 | } 36 | -------------------------------------------------------------------------------- /rlp/decode_tail_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The go-ethereum Authors 2 | // This file is part of the go-ethereum library. 3 | // 4 | // The go-ethereum library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU Lesser General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | // 9 | // The go-ethereum library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU Lesser General Public License for more details. 13 | // 14 | // You should have received a copy of the GNU Lesser General Public License 15 | // along with the go-ethereum library. If not, see . 16 | 17 | package rlp 18 | 19 | import ( 20 | "bytes" 21 | "fmt" 22 | ) 23 | 24 | type structWithTail struct { 25 | A, B uint 26 | C []uint `rlp:"tail"` 27 | } 28 | 29 | func ExampleDecode_structTagTail() { 30 | // In this example, the "tail" struct tag is used to decode lists of 31 | // differing length into a struct. 32 | var val structWithTail 33 | 34 | err := Decode(bytes.NewReader([]byte{0xC4, 0x01, 0x02, 0x03, 0x04}), &val) 35 | fmt.Printf("with 4 elements: err=%v val=%v\n", err, val) 36 | 37 | err = Decode(bytes.NewReader([]byte{0xC6, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06}), &val) 38 | fmt.Printf("with 6 elements: err=%v val=%v\n", err, val) 39 | 40 | // Note that at least two list elements must be present to 41 | // fill fields A and B: 42 | err = Decode(bytes.NewReader([]byte{0xC1, 0x01}), &val) 43 | fmt.Printf("with 1 element: err=%q\n", err) 44 | 45 | // Output: 46 | // with 4 elements: err= val={1 2 [3 4]} 47 | // with 6 elements: err= val={1 2 [3 4 5 6]} 48 | // with 1 element: err="rlp: too few elements for rlp.structWithTail" 49 | } 50 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/timer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2016 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prometheus 15 | 16 | import "time" 17 | 18 | // Timer is a helper type to time functions. Use NewTimer to create new 19 | // instances. 20 | type Timer struct { 21 | begin time.Time 22 | observer Observer 23 | } 24 | 25 | // NewTimer creates a new Timer. The provided Observer is used to observe a 26 | // duration in seconds. Timer is usually used to time a function call in the 27 | // following way: 28 | // func TimeMe() { 29 | // timer := NewTimer(myHistogram) 30 | // defer timer.ObserveDuration() 31 | // // Do actual work. 32 | // } 33 | func NewTimer(o Observer) *Timer { 34 | return &Timer{ 35 | begin: time.Now(), 36 | observer: o, 37 | } 38 | } 39 | 40 | // ObserveDuration records the duration passed since the Timer was created with 41 | // NewTimer. It calls the Observe method of the Observer provided during 42 | // construction with the duration in seconds as an argument. ObserveDuration is 43 | // usually called with a defer statement. 44 | // 45 | // Note that this method is only guaranteed to never observe negative durations 46 | // if used with Go1.9+. 47 | func (t *Timer) ObserveDuration() { 48 | if t.observer != nil { 49 | t.observer.Observe(time.Since(t.begin).Seconds()) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /types/receipt.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | type Log struct { 4 | // Consensus fields: 5 | // address of the contract that generated the event 6 | Address Address `json:"address" gencodec:"required"` 7 | // list of topics provided by the contract. 8 | Topics []Hash `json:"topics" gencodec:"required"` 9 | // supplied by the contract, usually ABI-encoded 10 | Data []byte `json:"data" gencodec:"required"` 11 | 12 | // Derived fields. These fields are filled in by the node 13 | // but not secured by consensus. 14 | // block in which the transaction was included 15 | BlockNumber uint64 `json:"blockNumber"` 16 | // hash of the transaction 17 | TxHash Hash `json:"transactionHash" gencodec:"required"` 18 | // index of the transaction in the block 19 | TxIndex uint `json:"transactionIndex" gencodec:"required"` 20 | // hash of the block in which the transaction was included 21 | BlockHash Hash `json:"blockHash"` 22 | // index of the log in the receipt 23 | Index uint `json:"logIndex" gencodec:"required"` 24 | 25 | // The Removed field is true if this log was reverted due to a chain reorganisation. 26 | // You must pay attention to this field if you receive logs through a filter query. 27 | Removed bool `json:"removed"` 28 | } 29 | 30 | // Receipt represents the results of a transaction. 31 | type Receipt struct { 32 | // Consensus fields 33 | PostState []byte `json:"root"` 34 | Status uint64 `json:"status"` 35 | CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` 36 | Bloom Bloom `json:"logsBloom" gencodec:"required"` 37 | Logs []*Log `json:"logs" gencodec:"required"` 38 | 39 | // Implementation fields (don't reorder!) 40 | TxHash Hash `json:"transactionHash" gencodec:"required"` 41 | ContractAddress Address `json:"contractAddress"` 42 | GasUsed uint64 `json:"gasUsed" gencodec:"required"` 43 | } 44 | 45 | type Receipts []*Receipt 46 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/proc_io.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package procfs 15 | 16 | import ( 17 | "fmt" 18 | "io/ioutil" 19 | "os" 20 | ) 21 | 22 | // ProcIO models the content of /proc//io. 23 | type ProcIO struct { 24 | // Chars read. 25 | RChar uint64 26 | // Chars written. 27 | WChar uint64 28 | // Read syscalls. 29 | SyscR uint64 30 | // Write syscalls. 31 | SyscW uint64 32 | // Bytes read. 33 | ReadBytes uint64 34 | // Bytes written. 35 | WriteBytes uint64 36 | // Bytes written, but taking into account truncation. See 37 | // Documentation/filesystems/proc.txt in the kernel sources for 38 | // detailed explanation. 39 | CancelledWriteBytes int64 40 | } 41 | 42 | // NewIO creates a new ProcIO instance from a given Proc instance. 43 | func (p Proc) NewIO() (ProcIO, error) { 44 | pio := ProcIO{} 45 | 46 | f, err := os.Open(p.path("io")) 47 | if err != nil { 48 | return pio, err 49 | } 50 | defer f.Close() 51 | 52 | data, err := ioutil.ReadAll(f) 53 | if err != nil { 54 | return pio, err 55 | } 56 | 57 | ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + 58 | "read_bytes: %d\nwrite_bytes: %d\n" + 59 | "cancelled_write_bytes: %d\n" 60 | 61 | _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, 62 | &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) 63 | 64 | return pio, err 65 | } 66 | -------------------------------------------------------------------------------- /vendor/github.com/golang/protobuf/proto/deprecated.go: -------------------------------------------------------------------------------- 1 | // Go support for Protocol Buffers - Google's data interchange format 2 | // 3 | // Copyright 2018 The Go Authors. All rights reserved. 4 | // https://github.com/golang/protobuf 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are 8 | // met: 9 | // 10 | // * Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // * Redistributions in binary form must reproduce the above 13 | // copyright notice, this list of conditions and the following disclaimer 14 | // in the documentation and/or other materials provided with the 15 | // distribution. 16 | // * Neither the name of Google Inc. nor the names of its 17 | // contributors may be used to endorse or promote products derived from 18 | // this software without specific prior written permission. 19 | // 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | 32 | package proto 33 | 34 | // Deprecated: do not use. 35 | type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } 36 | 37 | // Deprecated: do not use. 38 | func GetStats() Stats { return Stats{} } 39 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/cbor/string.go: -------------------------------------------------------------------------------- 1 | package cbor 2 | 3 | // AppendStrings encodes and adds an array of strings to the dst byte array. 4 | func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { 5 | major := majorTypeArray 6 | l := len(vals) 7 | if l <= additionalMax { 8 | lb := byte(l) 9 | dst = append(dst, byte(major|lb)) 10 | } else { 11 | dst = appendCborTypePrefix(dst, major, uint64(l)) 12 | } 13 | for _, v := range vals { 14 | dst = e.AppendString(dst, v) 15 | } 16 | return dst 17 | } 18 | 19 | // AppendString encodes and adds a string to the dst byte array. 20 | func (Encoder) AppendString(dst []byte, s string) []byte { 21 | major := majorTypeUtf8String 22 | 23 | l := len(s) 24 | if l <= additionalMax { 25 | lb := byte(l) 26 | dst = append(dst, byte(major|lb)) 27 | } else { 28 | dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) 29 | } 30 | return append(dst, s...) 31 | } 32 | 33 | // AppendBytes encodes and adds an array of bytes to the dst byte array. 34 | func (Encoder) AppendBytes(dst, s []byte) []byte { 35 | major := majorTypeByteString 36 | 37 | l := len(s) 38 | if l <= additionalMax { 39 | lb := byte(l) 40 | dst = append(dst, byte(major|lb)) 41 | } else { 42 | dst = appendCborTypePrefix(dst, major, uint64(l)) 43 | } 44 | return append(dst, s...) 45 | } 46 | 47 | // AppendEmbeddedJSON adds a tag and embeds input JSON as such. 48 | func AppendEmbeddedJSON(dst, s []byte) []byte { 49 | major := majorTypeTags 50 | minor := additionalTypeEmbeddedJSON 51 | 52 | // Append the TAG to indicate this is Embedded JSON. 53 | dst = append(dst, byte(major|additionalTypeIntUint16)) 54 | dst = append(dst, byte(minor>>8)) 55 | dst = append(dst, byte(minor&0xff)) 56 | 57 | // Append the JSON Object as Byte String. 58 | major = majorTypeByteString 59 | 60 | l := len(s) 61 | if l <= additionalMax { 62 | lb := byte(l) 63 | dst = append(dst, byte(major|lb)) 64 | } else { 65 | dst = appendCborTypePrefix(dst, major, uint64(l)) 66 | } 67 | return append(dst, s...) 68 | } 69 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/observer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prometheus 15 | 16 | // Observer is the interface that wraps the Observe method, which is used by 17 | // Histogram and Summary to add observations. 18 | type Observer interface { 19 | Observe(float64) 20 | } 21 | 22 | // The ObserverFunc type is an adapter to allow the use of ordinary 23 | // functions as Observers. If f is a function with the appropriate 24 | // signature, ObserverFunc(f) is an Observer that calls f. 25 | // 26 | // This adapter is usually used in connection with the Timer type, and there are 27 | // two general use cases: 28 | // 29 | // The most common one is to use a Gauge as the Observer for a Timer. 30 | // See the "Gauge" Timer example. 31 | // 32 | // The more advanced use case is to create a function that dynamically decides 33 | // which Observer to use for observing the duration. See the "Complex" Timer 34 | // example. 35 | type ObserverFunc func(float64) 36 | 37 | // Observe calls f(value). It implements Observer. 38 | func (f ObserverFunc) Observe(value float64) { 39 | f(value) 40 | } 41 | 42 | // ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. 43 | type ObserverVec interface { 44 | GetMetricWith(Labels) (Observer, error) 45 | GetMetricWithLabelValues(lvs ...string) (Observer, error) 46 | With(Labels) Observer 47 | WithLabelValues(...string) Observer 48 | CurryWith(Labels) (ObserverVec, error) 49 | MustCurryWith(Labels) ObserverVec 50 | 51 | Collector 52 | } 53 | -------------------------------------------------------------------------------- /scripts/check_license.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright(c) 2018 DSiSc Corp, SecureKey Technologies Inc. All Rights Reserved. 4 | # 5 | # SPDX-License-Identifier: Apache-2.0 6 | # 7 | 8 | 9 | function filterGeneratedFiles { 10 | for f in $@; do 11 | head -n2 $f | grep -qE '// Code generated by' || echo $f 12 | done 13 | } 14 | 15 | function filterExcludedFiles { 16 | CHECK=`echo "$CHECK" | grep -v .png$ | grep -v .rst$ | grep -v ^.git/ \ 17 | | grep -v .pem$ | grep -v .block$ | grep -v .tx$ | grep -v ^LICENSE$ | grep -v _sk$ \ 18 | | grep -v .key$ | grep -v \\.gen.go$ | grep -v ^Gopkg.lock$ \ 19 | | grep -v .md$ | grep -v ^vendor/ | grep -v ^build/ | grep -v .pb.go$ | sort -u` 20 | 21 | CHECK=$(filterGeneratedFiles "$CHECK") 22 | } 23 | 24 | CHECK=$(git diff --name-only --diff-filter=ACMRTUXB HEAD) 25 | filterExcludedFiles 26 | if [[ -z "$CHECK" ]]; then 27 | LAST_COMMITS=($(git log -2 --pretty=format:"%h")) 28 | CHECK=$(git diff-tree --no-commit-id --name-only --diff-filter=ACMRTUXB -r ${LAST_COMMITS[1]} ${LAST_COMMITS[0]}) 29 | filterExcludedFiles 30 | fi 31 | 32 | if [[ -z "$CHECK" ]]; then 33 | echo "All files are excluded from having license headers" 34 | exit 0 35 | fi 36 | 37 | missing=`echo "$CHECK" | xargs ls -d 2>/dev/null | xargs grep -L "SPDX-License-Identifier"` 38 | if [[ -z "$missing" ]]; then 39 | echo "All files have SPDX-License-Identifier headers" 40 | exit 0 41 | fi 42 | echo "The following files are missing SPDX-License-Identifier headers:" 43 | echo "$missing" 44 | echo 45 | echo "Please replace the Apache license header comment text with:" 46 | echo "SPDX-License-Identifier: Apache-2.0" 47 | 48 | echo 49 | echo "Checking committed files for traditional Apache License headers ..." 50 | missing=`echo "$missing" | xargs ls -d 2>/dev/null | xargs grep -L "http://www.apache.org/licenses/LICENSE-2.0"` 51 | if [[ -z "$missing" ]]; then 52 | echo "All remaining files have Apache 2.0 headers" 53 | exit 0 54 | fi 55 | echo "The following files are missing traditional Apache 2.0 headers:" 56 | echo "$missing" 57 | echo "Fatal Error - All files must have a license header" 58 | exit 1 59 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/nfs/parse_nfs.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package nfs 15 | 16 | import ( 17 | "bufio" 18 | "fmt" 19 | "io" 20 | "strings" 21 | 22 | "github.com/prometheus/procfs/internal/util" 23 | ) 24 | 25 | // ParseClientRPCStats returns stats read from /proc/net/rpc/nfs 26 | func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { 27 | stats := &ClientRPCStats{} 28 | 29 | scanner := bufio.NewScanner(r) 30 | for scanner.Scan() { 31 | line := scanner.Text() 32 | parts := strings.Fields(scanner.Text()) 33 | // require at least 34 | if len(parts) < 2 { 35 | return nil, fmt.Errorf("invalid NFS metric line %q", line) 36 | } 37 | 38 | values, err := util.ParseUint64s(parts[1:]) 39 | if err != nil { 40 | return nil, fmt.Errorf("error parsing NFS metric line: %s", err) 41 | } 42 | 43 | switch metricLine := parts[0]; metricLine { 44 | case "net": 45 | stats.Network, err = parseNetwork(values) 46 | case "rpc": 47 | stats.ClientRPC, err = parseClientRPC(values) 48 | case "proc2": 49 | stats.V2Stats, err = parseV2Stats(values) 50 | case "proc3": 51 | stats.V3Stats, err = parseV3Stats(values) 52 | case "proc4": 53 | stats.ClientV4Stats, err = parseClientV4Stats(values) 54 | default: 55 | return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) 56 | } 57 | if err != nil { 58 | return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) 59 | } 60 | } 61 | 62 | if err := scanner.Err(); err != nil { 63 | return nil, fmt.Errorf("error scanning NFS file: %s", err) 64 | } 65 | 66 | return stats, nil 67 | } 68 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/globals.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import "time" 4 | import "sync/atomic" 5 | 6 | var ( 7 | // TimestampFieldName is the field name used for the timestamp field. 8 | TimestampFieldName = "time" 9 | 10 | // LevelFieldName is the field name used for the level field. 11 | LevelFieldName = "level" 12 | 13 | // MessageFieldName is the field name used for the message field. 14 | MessageFieldName = "message" 15 | 16 | // ErrorFieldName is the field name used for error fields. 17 | ErrorFieldName = "error" 18 | 19 | // CallerFieldName is the field name used for caller field. 20 | CallerFieldName = "caller" 21 | 22 | // CallerSkipFrameCount is the number of stack frames to skip to find the caller. 23 | CallerSkipFrameCount = 2 24 | 25 | // TimeFieldFormat defines the time format of the Time field type. 26 | // If set to an empty string, the time is formatted as an UNIX timestamp 27 | // as integer. 28 | TimeFieldFormat = time.RFC3339 29 | 30 | // TimestampFunc defines the function called to generate a timestamp. 31 | TimestampFunc = time.Now 32 | 33 | // DurationFieldUnit defines the unit for time.Duration type fields added 34 | // using the Dur method. 35 | DurationFieldUnit = time.Millisecond 36 | 37 | // DurationFieldInteger renders Dur fields as integer instead of float if 38 | // set to true. 39 | DurationFieldInteger = false 40 | ) 41 | 42 | var ( 43 | gLevel = new(uint32) 44 | disableSampling = new(uint32) 45 | ) 46 | 47 | // SetGlobalLevel sets the global override for log level. If this 48 | // values is raised, all Loggers will use at least this value. 49 | // 50 | // To globally disable logs, set GlobalLevel to Disabled. 51 | func SetGlobalLevel(l Level) { 52 | atomic.StoreUint32(gLevel, uint32(l)) 53 | } 54 | 55 | // GlobalLevel returns the current global log level 56 | func GlobalLevel() Level { 57 | return Level(atomic.LoadUint32(gLevel)) 58 | } 59 | 60 | // DisableSampling will disable sampling in all Loggers if true. 61 | func DisableSampling(v bool) { 62 | var i uint32 63 | if v { 64 | i = 1 65 | } 66 | atomic.StoreUint32(disableSampling, i) 67 | } 68 | 69 | func samplingDisabled() bool { 70 | return atomic.LoadUint32(disableSampling) == 1 71 | } 72 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/proc_ns.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package procfs 15 | 16 | import ( 17 | "fmt" 18 | "os" 19 | "strconv" 20 | "strings" 21 | ) 22 | 23 | // Namespace represents a single namespace of a process. 24 | type Namespace struct { 25 | Type string // Namespace type. 26 | Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. 27 | } 28 | 29 | // Namespaces contains all of the namespaces that the process is contained in. 30 | type Namespaces map[string]Namespace 31 | 32 | // NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the 33 | // process is a member. 34 | func (p Proc) NewNamespaces() (Namespaces, error) { 35 | d, err := os.Open(p.path("ns")) 36 | if err != nil { 37 | return nil, err 38 | } 39 | defer d.Close() 40 | 41 | names, err := d.Readdirnames(-1) 42 | if err != nil { 43 | return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) 44 | } 45 | 46 | ns := make(Namespaces, len(names)) 47 | for _, name := range names { 48 | target, err := os.Readlink(p.path("ns", name)) 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | fields := strings.SplitN(target, ":", 2) 54 | if len(fields) != 2 { 55 | return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) 56 | } 57 | 58 | typ := fields[0] 59 | inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) 60 | if err != nil { 61 | return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) 62 | } 63 | 64 | ns[name] = Namespace{typ, uint32(inode)} 65 | } 66 | 67 | return ns, nil 68 | } 69 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/json/bytes.go: -------------------------------------------------------------------------------- 1 | package json 2 | 3 | import "unicode/utf8" 4 | 5 | // AppendBytes is a mirror of appendString with []byte arg 6 | func (Encoder) AppendBytes(dst, s []byte) []byte { 7 | dst = append(dst, '"') 8 | for i := 0; i < len(s); i++ { 9 | if !noEscapeTable[s[i]] { 10 | dst = appendBytesComplex(dst, s, i) 11 | return append(dst, '"') 12 | } 13 | } 14 | dst = append(dst, s...) 15 | return append(dst, '"') 16 | } 17 | 18 | // AppendHex encodes the input bytes to a hex string and appends 19 | // the encoded string to the input byte slice. 20 | // 21 | // The operation loops though each byte and encodes it as hex using 22 | // the hex lookup table. 23 | func (Encoder) AppendHex(dst, s []byte) []byte { 24 | dst = append(dst, '"') 25 | for _, v := range s { 26 | dst = append(dst, hex[v>>4], hex[v&0x0f]) 27 | } 28 | return append(dst, '"') 29 | } 30 | 31 | // appendBytesComplex is a mirror of the appendStringComplex 32 | // with []byte arg 33 | func appendBytesComplex(dst, s []byte, i int) []byte { 34 | start := 0 35 | for i < len(s) { 36 | b := s[i] 37 | if b >= utf8.RuneSelf { 38 | r, size := utf8.DecodeRune(s[i:]) 39 | if r == utf8.RuneError && size == 1 { 40 | if start < i { 41 | dst = append(dst, s[start:i]...) 42 | } 43 | dst = append(dst, `\ufffd`...) 44 | i += size 45 | start = i 46 | continue 47 | } 48 | i += size 49 | continue 50 | } 51 | if noEscapeTable[b] { 52 | i++ 53 | continue 54 | } 55 | // We encountered a character that needs to be encoded. 56 | // Let's append the previous simple characters to the byte slice 57 | // and switch our operation to read and encode the remainder 58 | // characters byte-by-byte. 59 | if start < i { 60 | dst = append(dst, s[start:i]...) 61 | } 62 | switch b { 63 | case '"', '\\': 64 | dst = append(dst, '\\', b) 65 | case '\b': 66 | dst = append(dst, '\\', 'b') 67 | case '\f': 68 | dst = append(dst, '\\', 'f') 69 | case '\n': 70 | dst = append(dst, '\\', 'n') 71 | case '\r': 72 | dst = append(dst, '\\', 'r') 73 | case '\t': 74 | dst = append(dst, '\\', 't') 75 | default: 76 | dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) 77 | } 78 | i++ 79 | start = i 80 | } 81 | if start < len(s) { 82 | dst = append(dst, s[start:]...) 83 | } 84 | return dst 85 | } 86 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/labels.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prometheus 15 | 16 | import ( 17 | "errors" 18 | "fmt" 19 | "strings" 20 | "unicode/utf8" 21 | 22 | "github.com/prometheus/common/model" 23 | ) 24 | 25 | // Labels represents a collection of label name -> value mappings. This type is 26 | // commonly used with the With(Labels) and GetMetricWith(Labels) methods of 27 | // metric vector Collectors, e.g.: 28 | // myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) 29 | // 30 | // The other use-case is the specification of constant label pairs in Opts or to 31 | // create a Desc. 32 | type Labels map[string]string 33 | 34 | // reservedLabelPrefix is a prefix which is not legal in user-supplied 35 | // label names. 36 | const reservedLabelPrefix = "__" 37 | 38 | var errInconsistentCardinality = errors.New("inconsistent label cardinality") 39 | 40 | func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { 41 | if len(labels) != expectedNumberOfValues { 42 | return errInconsistentCardinality 43 | } 44 | 45 | for name, val := range labels { 46 | if !utf8.ValidString(val) { 47 | return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) 48 | } 49 | } 50 | 51 | return nil 52 | } 53 | 54 | func validateLabelValues(vals []string, expectedNumberOfValues int) error { 55 | if len(vals) != expectedNumberOfValues { 56 | return errInconsistentCardinality 57 | } 58 | 59 | for _, val := range vals { 60 | if !utf8.ValidString(val) { 61 | return fmt.Errorf("label value %q is not valid UTF-8", val) 62 | } 63 | } 64 | 65 | return nil 66 | } 67 | 68 | func checkLabelName(l string) bool { 69 | return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) 70 | } 71 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/encoder.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | "net" 5 | "time" 6 | ) 7 | 8 | type encoder interface { 9 | AppendArrayDelim(dst []byte) []byte 10 | AppendArrayEnd(dst []byte) []byte 11 | AppendArrayStart(dst []byte) []byte 12 | AppendBeginMarker(dst []byte) []byte 13 | AppendBool(dst []byte, val bool) []byte 14 | AppendBools(dst []byte, vals []bool) []byte 15 | AppendBytes(dst, s []byte) []byte 16 | AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte 17 | AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte 18 | AppendEndMarker(dst []byte) []byte 19 | AppendFloat32(dst []byte, val float32) []byte 20 | AppendFloat64(dst []byte, val float64) []byte 21 | AppendFloats32(dst []byte, vals []float32) []byte 22 | AppendFloats64(dst []byte, vals []float64) []byte 23 | AppendHex(dst, s []byte) []byte 24 | AppendIPAddr(dst []byte, ip net.IP) []byte 25 | AppendIPPrefix(dst []byte, pfx net.IPNet) []byte 26 | AppendInt(dst []byte, val int) []byte 27 | AppendInt16(dst []byte, val int16) []byte 28 | AppendInt32(dst []byte, val int32) []byte 29 | AppendInt64(dst []byte, val int64) []byte 30 | AppendInt8(dst []byte, val int8) []byte 31 | AppendInterface(dst []byte, i interface{}) []byte 32 | AppendInts(dst []byte, vals []int) []byte 33 | AppendInts16(dst []byte, vals []int16) []byte 34 | AppendInts32(dst []byte, vals []int32) []byte 35 | AppendInts64(dst []byte, vals []int64) []byte 36 | AppendInts8(dst []byte, vals []int8) []byte 37 | AppendKey(dst []byte, key string) []byte 38 | AppendLineBreak(dst []byte) []byte 39 | AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte 40 | AppendNil(dst []byte) []byte 41 | AppendObjectData(dst []byte, o []byte) []byte 42 | AppendString(dst []byte, s string) []byte 43 | AppendStrings(dst []byte, vals []string) []byte 44 | AppendTime(dst []byte, t time.Time, format string) []byte 45 | AppendTimes(dst []byte, vals []time.Time, format string) []byte 46 | AppendUint(dst []byte, val uint) []byte 47 | AppendUint16(dst []byte, val uint16) []byte 48 | AppendUint32(dst []byte, val uint32) []byte 49 | AppendUint64(dst []byte, val uint64) []byte 50 | AppendUint8(dst []byte, val uint8) []byte 51 | AppendUints(dst []byte, vals []uint) []byte 52 | AppendUints16(dst []byte, vals []uint16) []byte 53 | AppendUints32(dst []byte, vals []uint32) []byte 54 | AppendUints64(dst []byte, vals []uint64) []byte 55 | AppendUints8(dst []byte, vals []uint8) []byte 56 | } 57 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/json/time.go: -------------------------------------------------------------------------------- 1 | package json 2 | 3 | import ( 4 | "strconv" 5 | "time" 6 | ) 7 | 8 | // AppendTime formats the input time with the given format 9 | // and appends the encoded string to the input byte slice. 10 | func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { 11 | if format == "" { 12 | return e.AppendInt64(dst, t.Unix()) 13 | } 14 | return append(t.AppendFormat(append(dst, '"'), format), '"') 15 | } 16 | 17 | // AppendTimes converts the input times with the given format 18 | // and appends the encoded string list to the input byte slice. 19 | func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { 20 | if format == "" { 21 | return appendUnixTimes(dst, vals) 22 | } 23 | if len(vals) == 0 { 24 | return append(dst, '[', ']') 25 | } 26 | dst = append(dst, '[') 27 | dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') 28 | if len(vals) > 1 { 29 | for _, t := range vals[1:] { 30 | dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') 31 | } 32 | } 33 | dst = append(dst, ']') 34 | return dst 35 | } 36 | 37 | func appendUnixTimes(dst []byte, vals []time.Time) []byte { 38 | if len(vals) == 0 { 39 | return append(dst, '[', ']') 40 | } 41 | dst = append(dst, '[') 42 | dst = strconv.AppendInt(dst, vals[0].Unix(), 10) 43 | if len(vals) > 1 { 44 | for _, t := range vals[1:] { 45 | dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) 46 | } 47 | } 48 | dst = append(dst, ']') 49 | return dst 50 | } 51 | 52 | // AppendDuration formats the input duration with the given unit & format 53 | // and appends the encoded string to the input byte slice. 54 | func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { 55 | if useInt { 56 | return strconv.AppendInt(dst, int64(d/unit), 10) 57 | } 58 | return e.AppendFloat64(dst, float64(d)/float64(unit)) 59 | } 60 | 61 | // AppendDurations formats the input durations with the given unit & format 62 | // and appends the encoded string list to the input byte slice. 63 | func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { 64 | if len(vals) == 0 { 65 | return append(dst, '[', ']') 66 | } 67 | dst = append(dst, '[') 68 | dst = e.AppendDuration(dst, vals[0], unit, useInt) 69 | if len(vals) > 1 { 70 | for _, d := range vals[1:] { 71 | dst = e.AppendDuration(append(dst, ','), d, unit, useInt) 72 | } 73 | } 74 | dst = append(dst, ']') 75 | return dst 76 | } 77 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt: -------------------------------------------------------------------------------- 1 | PACKAGE 2 | 3 | package goautoneg 4 | import "bitbucket.org/ww/goautoneg" 5 | 6 | HTTP Content-Type Autonegotiation. 7 | 8 | The functions in this package implement the behaviour specified in 9 | http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html 10 | 11 | Copyright (c) 2011, Open Knowledge Foundation Ltd. 12 | All rights reserved. 13 | 14 | Redistribution and use in source and binary forms, with or without 15 | modification, are permitted provided that the following conditions are 16 | met: 17 | 18 | Redistributions of source code must retain the above copyright 19 | notice, this list of conditions and the following disclaimer. 20 | 21 | Redistributions in binary form must reproduce the above copyright 22 | notice, this list of conditions and the following disclaimer in 23 | the documentation and/or other materials provided with the 24 | distribution. 25 | 26 | Neither the name of the Open Knowledge Foundation Ltd. nor the 27 | names of its contributors may be used to endorse or promote 28 | products derived from this software without specific prior written 29 | permission. 30 | 31 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 34 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 36 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 37 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 41 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 | 43 | 44 | FUNCTIONS 45 | 46 | func Negotiate(header string, alternatives []string) (content_type string) 47 | Negotiate the most appropriate content_type given the accept header 48 | and a list of alternatives. 49 | 50 | func ParseAccept(header string) (accept []Accept) 51 | Parse an Accept Header string returning a sorted list 52 | of clauses 53 | 54 | 55 | TYPES 56 | 57 | type Accept struct { 58 | Type, SubType string 59 | Q float32 60 | Params map[string]string 61 | } 62 | Structure to represent a clause in an HTTP Accept Header 63 | 64 | 65 | SUBDIRECTORIES 66 | 67 | .hg 68 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/fs.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package procfs 15 | 16 | import ( 17 | "fmt" 18 | "os" 19 | "path" 20 | 21 | "github.com/prometheus/procfs/nfs" 22 | "github.com/prometheus/procfs/xfs" 23 | ) 24 | 25 | // FS represents the pseudo-filesystem proc, which provides an interface to 26 | // kernel data structures. 27 | type FS string 28 | 29 | // DefaultMountPoint is the common mount point of the proc filesystem. 30 | const DefaultMountPoint = "/proc" 31 | 32 | // NewFS returns a new FS mounted under the given mountPoint. It will error 33 | // if the mount point can't be read. 34 | func NewFS(mountPoint string) (FS, error) { 35 | info, err := os.Stat(mountPoint) 36 | if err != nil { 37 | return "", fmt.Errorf("could not read %s: %s", mountPoint, err) 38 | } 39 | if !info.IsDir() { 40 | return "", fmt.Errorf("mount point %s is not a directory", mountPoint) 41 | } 42 | 43 | return FS(mountPoint), nil 44 | } 45 | 46 | // Path returns the path of the given subsystem relative to the procfs root. 47 | func (fs FS) Path(p ...string) string { 48 | return path.Join(append([]string{string(fs)}, p...)...) 49 | } 50 | 51 | // XFSStats retrieves XFS filesystem runtime statistics. 52 | func (fs FS) XFSStats() (*xfs.Stats, error) { 53 | f, err := os.Open(fs.Path("fs/xfs/stat")) 54 | if err != nil { 55 | return nil, err 56 | } 57 | defer f.Close() 58 | 59 | return xfs.ParseStats(f) 60 | } 61 | 62 | // NFSClientRPCStats retrieves NFS client RPC statistics. 63 | func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { 64 | f, err := os.Open(fs.Path("net/rpc/nfs")) 65 | if err != nil { 66 | return nil, err 67 | } 68 | defer f.Close() 69 | 70 | return nfs.ParseClientRPCStats(f) 71 | } 72 | 73 | // NFSdServerRPCStats retrieves NFS daemon RPC statistics. 74 | func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { 75 | f, err := os.Open(fs.Path("net/rpc/nfsd")) 76 | if err != nil { 77 | return nil, err 78 | } 79 | defer f.Close() 80 | 81 | return nfs.ParseServerRPCStats(f) 82 | } 83 | -------------------------------------------------------------------------------- /log/README.md: -------------------------------------------------------------------------------- 1 | # HOW TO USE 2 | 3 | ## For simple usage 4 | 5 | Import this package: 6 | 7 | ``` 8 | import "github.com/DSiSc/craft/log" 9 | ``` 10 | 11 | And log away: 12 | 13 | ``` 14 | log.DebugKV("This is a debug message.", map[string]interface{}{"hello": "world"}) 15 | log.Info("This is a info message.") 16 | ``` 17 | 18 | Logs printed as follows: 19 | 20 | ``` 21 | 2018-09-03T16:21:57+08:00 |DEBUG| This is a debug message. caller=/home/kang/Workspace/go/src/hello/jtlog/jtlog_test.go:16 hello=world 22 | 2018-09-03T16:21:57+08:00 |INFO| This is a info message. caller=/home/kang/Workspace/go/src/hello/jtlog/jtlog_test.go:17 23 | ``` 24 | 25 | `caller` tells us where we log this record. 26 | 27 | ## Add an appender 28 | 29 | By default, the logs output to console, in `TEXT` format. 30 | 31 | If you want to add a new appender which output into a specified log file: 32 | 33 | ``` 34 | log.AddFileAppender("/tmp/aaa/aaa.log", log.InfoLevel, log.JsonFmt, true, true) 35 | ``` 36 | 37 | The file params are: file path, log level, log format, whether to show caller, whether to show timestamp. 38 | 39 | And do logging: 40 | 41 | ``` 42 | log.DebugKV("This is a debug message on console and file.", map[string]interface{}{"hello": "world"}) 43 | log.Info("This is a info message on console and file.") 44 | ``` 45 | 46 | In STDOUT, log output as always. And in log file `/tmp/aaa/aaa.log`: 47 | 48 | ``` 49 | cat /tmp/aaa/aaa.log 50 | {"level":"info","caller":"/home/kang/Workspace/go/src/hello/jtlog/jtlog_test.go:68","time":"2018-09-03T16:26:31+08:00","message":"This is a info message on console and file."} 51 | ``` 52 | 53 | It only log out `Info` record, with `JSON` format. 54 | 55 | You can use `log.AddAppender` to add appender with other `io.Writer`. 56 | 57 | ## Change logging manners 58 | 59 | If you want to change global-log-level: 60 | 61 | ``` 62 | log.SetGlobalConfig(config) 63 | ``` 64 | 65 | Or format of timestamp: 66 | 67 | ``` 68 | log.SetTimestampFormat(time.RFC3339Nano) 69 | ``` 70 | 71 | Multiple changes can be done as the following: 72 | 73 | ``` 74 | config := log.GetGlobalConfig() // first, get default configurations 75 | config.TimeStampFormat = time.RFC3339Nano // then, make changes, such as timestamp format 76 | config.Appenders[0].Format = log.JsonFmt // or logging format of the first Appender 77 | log.SetGlobalConfig(config) // finally, refresh configurations with the modified config 78 | ``` 79 | 80 | For pros, just compose whole global `Config` is also OK (but not recommended): 81 | 82 | ``` 83 | config := &log.Config{ 84 | ... 85 | } 86 | log.SetGlobalConfig(config) 87 | ``` 88 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The Prometheus Authors 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | 14 | # Ensure GOBIN is not set during build so that promu is installed to the correct path 15 | unexport GOBIN 16 | 17 | GO ?= go 18 | GOFMT ?= $(GO)fmt 19 | FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) 20 | STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck 21 | pkgs = $(shell $(GO) list ./... | grep -v /vendor/) 22 | 23 | PREFIX ?= $(shell pwd) 24 | BIN_DIR ?= $(shell pwd) 25 | 26 | ifdef DEBUG 27 | bindata_flags = -debug 28 | endif 29 | 30 | STATICCHECK_IGNORE = 31 | 32 | all: format staticcheck build test 33 | 34 | style: 35 | @echo ">> checking code style" 36 | @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' 37 | 38 | check_license: 39 | @echo ">> checking license header" 40 | @./scripts/check_license.sh 41 | 42 | test: fixtures/.unpacked sysfs/fixtures/.unpacked 43 | @echo ">> running all tests" 44 | @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) 45 | 46 | format: 47 | @echo ">> formatting code" 48 | @$(GO) fmt $(pkgs) 49 | 50 | vet: 51 | @echo ">> vetting code" 52 | @$(GO) vet $(pkgs) 53 | 54 | staticcheck: $(STATICCHECK) 55 | @echo ">> running staticcheck" 56 | @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) 57 | 58 | %/.unpacked: %.ttar 59 | ./ttar -C $(dir $*) -x -f $*.ttar 60 | touch $@ 61 | 62 | update_fixtures: fixtures.ttar sysfs/fixtures.ttar 63 | 64 | %fixtures.ttar: %/fixtures 65 | rm -v $(dir $*)fixtures/.unpacked 66 | ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/ 67 | 68 | $(FIRST_GOPATH)/bin/staticcheck: 69 | @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck 70 | 71 | .PHONY: all style check_license format test vet staticcheck 72 | 73 | # Declaring the binaries at their default locations as PHONY targets is a hack 74 | # to ensure the latest version is downloaded on every make execution. 75 | # If this is not desired, copy/symlink these binaries to a different path and 76 | # set the respective environment variables. 77 | .PHONY: $(GOPATH)/bin/staticcheck 78 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. 4 | 5 | You can contribute in many ways: 6 | 7 | ## Types of Contributions 8 | 9 | ### Report Bugs 10 | 11 | Report bugs at https://github.com/DSiSc/craft/issues. 12 | 13 | If you are reporting a bug, please include: 14 | 15 | * Your operating system name and version. 16 | * Any details about your local setup that might be helpful in troubleshooting. 17 | * Detailed steps to reproduce the bug. 18 | 19 | ### Fix Bugs 20 | 21 | Look through the GitHub issues for bugs. Anything tagged with "bug" 22 | is open to whoever wants to implement it. 23 | 24 | ### Implement Features 25 | 26 | Look through the GitHub issues for features. Anything tagged with "feature" 27 | is open to whoever wants to implement it. 28 | 29 | ### Write Documentation 30 | 31 | craft could always use more documentation, whether as part of the 32 | official craft docs, in docstrings, or even on the web in blog posts, 33 | articles, and such. 34 | 35 | ### Submit Feedback 36 | 37 | The best way to send feedback is to file an issue at https://github.com/DSiSc/craft/issues. 38 | 39 | If you are proposing a feature: 40 | 41 | * Explain in detail how it would work. 42 | * Keep the scope as narrow as possible, to make it easier to implement. 43 | * Remember that this is a volunteer-driven project, and that contributions 44 | are welcome :) 45 | 46 | ## Get Started! 47 | 48 | Ready to contribute? Here's how to set up `craft` for local development. 49 | 50 | 1. Fork the `craft` repo on GitHub. 51 | 2. Clone your fork locally:: 52 | 53 | $ git clone git@github.com:your_name_here/craft.git 54 | 55 | 3. Create a branch for local development:: 56 | 57 | $ git checkout -b name-of-your-bugfix-or-feature 58 | 59 | Now you can make your changes locally. 60 | 61 | 4. When you're done making changes, check that your changes pass the tests:: 62 | 63 | $ make test 64 | 65 | 6. Commit your changes and push your branch to GitHub, We use [Angular Commit Guidelines](https://github.com/angular/angular.js/blob/master/DEVELOPERS.md#-git-commit-guidelines), Thanks for Angular good job.:: 66 | 67 | $ git add . 68 | $ git commit -m "Your detailed description of your changes." 69 | $ git push origin name-of-your-bugfix-or-feature 70 | 71 | 7. Submit a pull request through the GitHub website. 72 | 73 | Pull Request Guidelines 74 | ----------------------- 75 | 76 | Before you submit a pull request, check that it meets these guidelines: 77 | 78 | 1. The pull request should include tests. 79 | 2. If the pull request adds functionality, the docs should be updated. Put 80 | your new functionality into a function with a docstring, and add the 81 | feature to the list in README.md. 82 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/buddyinfo.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package procfs 15 | 16 | import ( 17 | "bufio" 18 | "fmt" 19 | "io" 20 | "os" 21 | "strconv" 22 | "strings" 23 | ) 24 | 25 | // A BuddyInfo is the details parsed from /proc/buddyinfo. 26 | // The data is comprised of an array of free fragments of each size. 27 | // The sizes are 2^n*PAGE_SIZE, where n is the array index. 28 | type BuddyInfo struct { 29 | Node string 30 | Zone string 31 | Sizes []float64 32 | } 33 | 34 | // NewBuddyInfo reads the buddyinfo statistics. 35 | func NewBuddyInfo() ([]BuddyInfo, error) { 36 | fs, err := NewFS(DefaultMountPoint) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | return fs.NewBuddyInfo() 42 | } 43 | 44 | // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. 45 | func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { 46 | file, err := os.Open(fs.Path("buddyinfo")) 47 | if err != nil { 48 | return nil, err 49 | } 50 | defer file.Close() 51 | 52 | return parseBuddyInfo(file) 53 | } 54 | 55 | func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { 56 | var ( 57 | buddyInfo = []BuddyInfo{} 58 | scanner = bufio.NewScanner(r) 59 | bucketCount = -1 60 | ) 61 | 62 | for scanner.Scan() { 63 | var err error 64 | line := scanner.Text() 65 | parts := strings.Fields(line) 66 | 67 | if len(parts) < 4 { 68 | return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") 69 | } 70 | 71 | node := strings.TrimRight(parts[1], ",") 72 | zone := strings.TrimRight(parts[3], ",") 73 | arraySize := len(parts[4:]) 74 | 75 | if bucketCount == -1 { 76 | bucketCount = arraySize 77 | } else { 78 | if bucketCount != arraySize { 79 | return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) 80 | } 81 | } 82 | 83 | sizes := make([]float64, arraySize) 84 | for i := 0; i < arraySize; i++ { 85 | sizes[i], err = strconv.ParseFloat(parts[i+4], 64) 86 | if err != nil { 87 | return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) 88 | } 89 | } 90 | 91 | buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) 92 | } 93 | 94 | return buddyInfo, scanner.Err() 95 | } 96 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package nfs 15 | 16 | import ( 17 | "bufio" 18 | "fmt" 19 | "io" 20 | "strings" 21 | 22 | "github.com/prometheus/procfs/internal/util" 23 | ) 24 | 25 | // ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd 26 | func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { 27 | stats := &ServerRPCStats{} 28 | 29 | scanner := bufio.NewScanner(r) 30 | for scanner.Scan() { 31 | line := scanner.Text() 32 | parts := strings.Fields(scanner.Text()) 33 | // require at least 34 | if len(parts) < 2 { 35 | return nil, fmt.Errorf("invalid NFSd metric line %q", line) 36 | } 37 | label := parts[0] 38 | 39 | var values []uint64 40 | var err error 41 | if label == "th" { 42 | if len(parts) < 3 { 43 | return nil, fmt.Errorf("invalid NFSd th metric line %q", line) 44 | } 45 | values, err = util.ParseUint64s(parts[1:3]) 46 | } else { 47 | values, err = util.ParseUint64s(parts[1:]) 48 | } 49 | if err != nil { 50 | return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) 51 | } 52 | 53 | switch metricLine := parts[0]; metricLine { 54 | case "rc": 55 | stats.ReplyCache, err = parseReplyCache(values) 56 | case "fh": 57 | stats.FileHandles, err = parseFileHandles(values) 58 | case "io": 59 | stats.InputOutput, err = parseInputOutput(values) 60 | case "th": 61 | stats.Threads, err = parseThreads(values) 62 | case "ra": 63 | stats.ReadAheadCache, err = parseReadAheadCache(values) 64 | case "net": 65 | stats.Network, err = parseNetwork(values) 66 | case "rpc": 67 | stats.ServerRPC, err = parseServerRPC(values) 68 | case "proc2": 69 | stats.V2Stats, err = parseV2Stats(values) 70 | case "proc3": 71 | stats.V3Stats, err = parseV3Stats(values) 72 | case "proc4": 73 | stats.ServerV4Stats, err = parseServerV4Stats(values) 74 | case "proc4ops": 75 | stats.V4Ops, err = parseV4Ops(values) 76 | default: 77 | return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) 78 | } 79 | if err != nil { 80 | return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) 81 | } 82 | } 83 | 84 | if err := scanner.Err(); err != nil { 85 | return nil, fmt.Errorf("error scanning NFSd file: %s", err) 86 | } 87 | 88 | return stats, nil 89 | } 90 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/expfmt/encode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package expfmt 15 | 16 | import ( 17 | "fmt" 18 | "io" 19 | "net/http" 20 | 21 | "github.com/golang/protobuf/proto" 22 | "github.com/matttproud/golang_protobuf_extensions/pbutil" 23 | "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" 24 | 25 | dto "github.com/prometheus/client_model/go" 26 | ) 27 | 28 | // Encoder types encode metric families into an underlying wire protocol. 29 | type Encoder interface { 30 | Encode(*dto.MetricFamily) error 31 | } 32 | 33 | type encoder func(*dto.MetricFamily) error 34 | 35 | func (e encoder) Encode(v *dto.MetricFamily) error { 36 | return e(v) 37 | } 38 | 39 | // Negotiate returns the Content-Type based on the given Accept header. 40 | // If no appropriate accepted type is found, FmtText is returned. 41 | func Negotiate(h http.Header) Format { 42 | for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { 43 | // Check for protocol buffer 44 | if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { 45 | switch ac.Params["encoding"] { 46 | case "delimited": 47 | return FmtProtoDelim 48 | case "text": 49 | return FmtProtoText 50 | case "compact-text": 51 | return FmtProtoCompact 52 | } 53 | } 54 | // Check for text format. 55 | ver := ac.Params["version"] 56 | if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { 57 | return FmtText 58 | } 59 | } 60 | return FmtText 61 | } 62 | 63 | // NewEncoder returns a new encoder based on content type negotiation. 64 | func NewEncoder(w io.Writer, format Format) Encoder { 65 | switch format { 66 | case FmtProtoDelim: 67 | return encoder(func(v *dto.MetricFamily) error { 68 | _, err := pbutil.WriteDelimited(w, v) 69 | return err 70 | }) 71 | case FmtProtoCompact: 72 | return encoder(func(v *dto.MetricFamily) error { 73 | _, err := fmt.Fprintln(w, v.String()) 74 | return err 75 | }) 76 | case FmtProtoText: 77 | return encoder(func(v *dto.MetricFamily) error { 78 | _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) 79 | return err 80 | }) 81 | case FmtText: 82 | return encoder(func(v *dto.MetricFamily) error { 83 | _, err := MetricFamilyToText(w, v) 84 | return err 85 | }) 86 | } 87 | panic("expfmt.NewEncoder: unknown format") 88 | } 89 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/README.md: -------------------------------------------------------------------------------- 1 | # package metrics 2 | 3 | `package metrics` provides a set of uniform interfaces for service instrumentation. 4 | It has 5 | [counters](http://prometheus.io/docs/concepts/metric_types/#counter), 6 | [gauges](http://prometheus.io/docs/concepts/metric_types/#gauge), and 7 | [histograms](http://prometheus.io/docs/concepts/metric_types/#histogram), 8 | and provides adapters to popular metrics packages, like 9 | [expvar](https://golang.org/pkg/expvar), 10 | [StatsD](https://github.com/etsy/statsd), and 11 | [Prometheus](https://prometheus.io). 12 | 13 | ## Rationale 14 | 15 | Code instrumentation is absolutely essential to achieve 16 | [observability](https://speakerdeck.com/mattheath/observability-in-micro-service-architectures) 17 | into a distributed system. 18 | Metrics and instrumentation tools have coalesced around a few well-defined idioms. 19 | `package metrics` provides a common, minimal interface those idioms for service authors. 20 | 21 | ## Usage 22 | 23 | A simple counter, exported via expvar. 24 | 25 | ```go 26 | import ( 27 | "github.com/go-kit/kit/metrics" 28 | "github.com/go-kit/kit/metrics/expvar" 29 | ) 30 | 31 | func main() { 32 | var myCount metrics.Counter 33 | myCount = expvar.NewCounter("my_count") 34 | myCount.Add(1) 35 | } 36 | ``` 37 | 38 | A histogram for request duration, 39 | exported via a Prometheus summary with dynamically-computed quantiles. 40 | 41 | ```go 42 | import ( 43 | "time" 44 | 45 | stdprometheus "github.com/prometheus/client_golang/prometheus" 46 | 47 | "github.com/go-kit/kit/metrics" 48 | "github.com/go-kit/kit/metrics/prometheus" 49 | ) 50 | 51 | func main() { 52 | var dur metrics.Histogram = prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ 53 | Namespace: "myservice", 54 | Subsystem: "api", 55 | Name: "request_duration_seconds", 56 | Help: "Total time spent serving requests.", 57 | }, []string{}) 58 | // ... 59 | } 60 | 61 | func handleRequest(dur metrics.Histogram) { 62 | defer func(begin time.Time) { dur.Observe(time.Since(begin).Seconds()) }(time.Now()) 63 | // handle request 64 | } 65 | ``` 66 | 67 | A gauge for the number of goroutines currently running, exported via StatsD. 68 | 69 | ```go 70 | import ( 71 | "net" 72 | "os" 73 | "runtime" 74 | "time" 75 | 76 | "github.com/go-kit/kit/metrics" 77 | "github.com/go-kit/kit/metrics/statsd" 78 | ) 79 | 80 | func main() { 81 | statsd := statsd.New("foo_svc.", log.NewNopLogger()) 82 | report := time.NewTicker(5 * time.Second) 83 | defer report.Stop() 84 | go statsd.SendLoop(report.C, "tcp", "statsd.internal:8125") 85 | goroutines := statsd.NewGauge("goroutine_count") 86 | go exportGoroutines(goroutines) 87 | // ... 88 | } 89 | 90 | func exportGoroutines(g metrics.Gauge) { 91 | for range time.Tick(time.Second) { 92 | g.Set(float64(runtime.NumGoroutine())) 93 | } 94 | } 95 | ``` 96 | 97 | For more information, see [the package documentation](https://godoc.org/github.com/go-kit/kit/metrics). 98 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/cbor/time.go: -------------------------------------------------------------------------------- 1 | package cbor 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | func appendIntegerTimestamp(dst []byte, t time.Time) []byte { 8 | major := majorTypeTags 9 | minor := additionalTypeTimestamp 10 | dst = append(dst, byte(major|minor)) 11 | secs := t.Unix() 12 | var val uint64 13 | if secs < 0 { 14 | major = majorTypeNegativeInt 15 | val = uint64(-secs - 1) 16 | } else { 17 | major = majorTypeUnsignedInt 18 | val = uint64(secs) 19 | } 20 | dst = appendCborTypePrefix(dst, major, uint64(val)) 21 | return dst 22 | } 23 | 24 | func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { 25 | major := majorTypeTags 26 | minor := additionalTypeTimestamp 27 | dst = append(dst, byte(major|minor)) 28 | secs := t.Unix() 29 | nanos := t.Nanosecond() 30 | var val float64 31 | val = float64(secs)*1.0 + float64(nanos)*1E-9 32 | return e.AppendFloat64(dst, val) 33 | } 34 | 35 | // AppendTime encodes and adds a timestamp to the dst byte array. 36 | func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { 37 | utc := t.UTC() 38 | if utc.Nanosecond() == 0 { 39 | return appendIntegerTimestamp(dst, utc) 40 | } 41 | return e.appendFloatTimestamp(dst, utc) 42 | } 43 | 44 | // AppendTimes encodes and adds an array of timestamps to the dst byte array. 45 | func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { 46 | major := majorTypeArray 47 | l := len(vals) 48 | if l == 0 { 49 | return e.AppendArrayEnd(e.AppendArrayStart(dst)) 50 | } 51 | if l <= additionalMax { 52 | lb := byte(l) 53 | dst = append(dst, byte(major|lb)) 54 | } else { 55 | dst = appendCborTypePrefix(dst, major, uint64(l)) 56 | } 57 | 58 | for _, t := range vals { 59 | dst = e.AppendTime(dst, t, unused) 60 | } 61 | return dst 62 | } 63 | 64 | // AppendDuration encodes and adds a duration to the dst byte array. 65 | // useInt field indicates whether to store the duration as seconds (integer) or 66 | // as seconds+nanoseconds (float). 67 | func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { 68 | if useInt { 69 | return e.AppendInt64(dst, int64(d/unit)) 70 | } 71 | return e.AppendFloat64(dst, float64(d)/float64(unit)) 72 | } 73 | 74 | // AppendDurations encodes and adds an array of durations to the dst byte array. 75 | // useInt field indicates whether to store the duration as seconds (integer) or 76 | // as seconds+nanoseconds (float). 77 | func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { 78 | major := majorTypeArray 79 | l := len(vals) 80 | if l == 0 { 81 | return e.AppendArrayEnd(e.AppendArrayStart(dst)) 82 | } 83 | if l <= additionalMax { 84 | lb := byte(l) 85 | dst = append(dst, byte(major|lb)) 86 | } else { 87 | dst = appendCborTypePrefix(dst, major, uint64(l)) 88 | } 89 | for _, d := range vals { 90 | dst = e.AppendDuration(dst, d, unit, useInt) 91 | } 92 | return dst 93 | } 94 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/writer.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | "io" 5 | "sync" 6 | ) 7 | 8 | // LevelWriter defines as interface a writer may implement in order 9 | // to receive level information with payload. 10 | type LevelWriter interface { 11 | io.Writer 12 | WriteLevel(level Level, p []byte) (n int, err error) 13 | } 14 | 15 | type levelWriterAdapter struct { 16 | io.Writer 17 | } 18 | 19 | func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { 20 | return lw.Write(p) 21 | } 22 | 23 | type syncWriter struct { 24 | mu sync.Mutex 25 | lw LevelWriter 26 | } 27 | 28 | // SyncWriter wraps w so that each call to Write is synchronized with a mutex. 29 | // This syncer can be the call to writer's Write method is not thread safe. 30 | // Note that os.File Write operation is using write() syscall which is supposed 31 | // to be thread-safe on POSIX systems. So there is no need to use this with 32 | // os.File on such systems as zerolog guaranties to issue a single Write call 33 | // per log event. 34 | func SyncWriter(w io.Writer) io.Writer { 35 | if lw, ok := w.(LevelWriter); ok { 36 | return &syncWriter{lw: lw} 37 | } 38 | return &syncWriter{lw: levelWriterAdapter{w}} 39 | } 40 | 41 | // Write implements the io.Writer interface. 42 | func (s *syncWriter) Write(p []byte) (n int, err error) { 43 | s.mu.Lock() 44 | defer s.mu.Unlock() 45 | return s.lw.Write(p) 46 | } 47 | 48 | // WriteLevel implements the LevelWriter interface. 49 | func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { 50 | s.mu.Lock() 51 | defer s.mu.Unlock() 52 | return s.lw.WriteLevel(l, p) 53 | } 54 | 55 | type multiLevelWriter struct { 56 | writers []LevelWriter 57 | } 58 | 59 | func (t multiLevelWriter) Write(p []byte) (n int, err error) { 60 | for _, w := range t.writers { 61 | n, err = w.Write(p) 62 | if err != nil { 63 | return 64 | } 65 | if n != len(p) { 66 | err = io.ErrShortWrite 67 | return 68 | } 69 | } 70 | return len(p), nil 71 | } 72 | 73 | func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { 74 | for _, w := range t.writers { 75 | n, err = w.WriteLevel(l, p) 76 | if err != nil { 77 | return 78 | } 79 | if n != len(p) { 80 | err = io.ErrShortWrite 81 | return 82 | } 83 | } 84 | return len(p), nil 85 | } 86 | 87 | // MultiLevelWriter creates a writer that duplicates its writes to all the 88 | // provided writers, similar to the Unix tee(1) command. If some writers 89 | // implement LevelWriter, their WriteLevel method will be used instead of Write. 90 | func MultiLevelWriter(writers ...io.Writer) LevelWriter { 91 | lwriters := make([]LevelWriter, 0, len(writers)) 92 | for _, w := range writers { 93 | if lw, ok := w.(LevelWriter); ok { 94 | lwriters = append(lwriters, lw) 95 | } else { 96 | lwriters = append(lwriters, levelWriterAdapter{w}) 97 | } 98 | } 99 | return multiLevelWriter{lwriters} 100 | } 101 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package internal 15 | 16 | import ( 17 | "sort" 18 | 19 | dto "github.com/prometheus/client_model/go" 20 | ) 21 | 22 | // metricSorter is a sortable slice of *dto.Metric. 23 | type metricSorter []*dto.Metric 24 | 25 | func (s metricSorter) Len() int { 26 | return len(s) 27 | } 28 | 29 | func (s metricSorter) Swap(i, j int) { 30 | s[i], s[j] = s[j], s[i] 31 | } 32 | 33 | func (s metricSorter) Less(i, j int) bool { 34 | if len(s[i].Label) != len(s[j].Label) { 35 | // This should not happen. The metrics are 36 | // inconsistent. However, we have to deal with the fact, as 37 | // people might use custom collectors or metric family injection 38 | // to create inconsistent metrics. So let's simply compare the 39 | // number of labels in this case. That will still yield 40 | // reproducible sorting. 41 | return len(s[i].Label) < len(s[j].Label) 42 | } 43 | for n, lp := range s[i].Label { 44 | vi := lp.GetValue() 45 | vj := s[j].Label[n].GetValue() 46 | if vi != vj { 47 | return vi < vj 48 | } 49 | } 50 | 51 | // We should never arrive here. Multiple metrics with the same 52 | // label set in the same scrape will lead to undefined ingestion 53 | // behavior. However, as above, we have to provide stable sorting 54 | // here, even for inconsistent metrics. So sort equal metrics 55 | // by their timestamp, with missing timestamps (implying "now") 56 | // coming last. 57 | if s[i].TimestampMs == nil { 58 | return false 59 | } 60 | if s[j].TimestampMs == nil { 61 | return true 62 | } 63 | return s[i].GetTimestampMs() < s[j].GetTimestampMs() 64 | } 65 | 66 | // NormalizeMetricFamilies returns a MetricFamily slice with empty 67 | // MetricFamilies pruned and the remaining MetricFamilies sorted by name within 68 | // the slice, with the contained Metrics sorted within each MetricFamily. 69 | func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { 70 | for _, mf := range metricFamiliesByName { 71 | sort.Sort(metricSorter(mf.Metric)) 72 | } 73 | names := make([]string, 0, len(metricFamiliesByName)) 74 | for name, mf := range metricFamiliesByName { 75 | if len(mf.Metric) > 0 { 76 | names = append(names, name) 77 | } 78 | } 79 | sort.Strings(names) 80 | result := make([]*dto.MetricFamily, 0, len(names)) 81 | for _, name := range names { 82 | result = append(result, metricFamiliesByName[name]) 83 | } 84 | return result 85 | } 86 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/model/fingerprinting.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package model 15 | 16 | import ( 17 | "fmt" 18 | "strconv" 19 | ) 20 | 21 | // Fingerprint provides a hash-capable representation of a Metric. 22 | // For our purposes, FNV-1A 64-bit is used. 23 | type Fingerprint uint64 24 | 25 | // FingerprintFromString transforms a string representation into a Fingerprint. 26 | func FingerprintFromString(s string) (Fingerprint, error) { 27 | num, err := strconv.ParseUint(s, 16, 64) 28 | return Fingerprint(num), err 29 | } 30 | 31 | // ParseFingerprint parses the input string into a fingerprint. 32 | func ParseFingerprint(s string) (Fingerprint, error) { 33 | num, err := strconv.ParseUint(s, 16, 64) 34 | if err != nil { 35 | return 0, err 36 | } 37 | return Fingerprint(num), nil 38 | } 39 | 40 | func (f Fingerprint) String() string { 41 | return fmt.Sprintf("%016x", uint64(f)) 42 | } 43 | 44 | // Fingerprints represents a collection of Fingerprint subject to a given 45 | // natural sorting scheme. It implements sort.Interface. 46 | type Fingerprints []Fingerprint 47 | 48 | // Len implements sort.Interface. 49 | func (f Fingerprints) Len() int { 50 | return len(f) 51 | } 52 | 53 | // Less implements sort.Interface. 54 | func (f Fingerprints) Less(i, j int) bool { 55 | return f[i] < f[j] 56 | } 57 | 58 | // Swap implements sort.Interface. 59 | func (f Fingerprints) Swap(i, j int) { 60 | f[i], f[j] = f[j], f[i] 61 | } 62 | 63 | // FingerprintSet is a set of Fingerprints. 64 | type FingerprintSet map[Fingerprint]struct{} 65 | 66 | // Equal returns true if both sets contain the same elements (and not more). 67 | func (s FingerprintSet) Equal(o FingerprintSet) bool { 68 | if len(s) != len(o) { 69 | return false 70 | } 71 | 72 | for k := range s { 73 | if _, ok := o[k]; !ok { 74 | return false 75 | } 76 | } 77 | 78 | return true 79 | } 80 | 81 | // Intersection returns the elements contained in both sets. 82 | func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { 83 | myLength, otherLength := len(s), len(o) 84 | if myLength == 0 || otherLength == 0 { 85 | return FingerprintSet{} 86 | } 87 | 88 | subSet := s 89 | superSet := o 90 | 91 | if otherLength < myLength { 92 | subSet = o 93 | superSet = s 94 | } 95 | 96 | out := FingerprintSet{} 97 | 98 | for k := range subSet { 99 | if _, ok := superSet[k]; ok { 100 | out[k] = struct{}{} 101 | } 102 | } 103 | 104 | return out 105 | } 106 | -------------------------------------------------------------------------------- /vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Matt T. Proud 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package pbutil 16 | 17 | import ( 18 | "encoding/binary" 19 | "errors" 20 | "io" 21 | 22 | "github.com/golang/protobuf/proto" 23 | ) 24 | 25 | var errInvalidVarint = errors.New("invalid varint32 encountered") 26 | 27 | // ReadDelimited decodes a message from the provided length-delimited stream, 28 | // where the length is encoded as 32-bit varint prefix to the message body. 29 | // It returns the total number of bytes read and any applicable error. This is 30 | // roughly equivalent to the companion Java API's 31 | // MessageLite#parseDelimitedFrom. As per the reader contract, this function 32 | // calls r.Read repeatedly as required until exactly one message including its 33 | // prefix is read and decoded (or an error has occurred). The function never 34 | // reads more bytes from the stream than required. The function never returns 35 | // an error if a message has been read and decoded correctly, even if the end 36 | // of the stream has been reached in doing so. In that case, any subsequent 37 | // calls return (0, io.EOF). 38 | func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { 39 | // Per AbstractParser#parsePartialDelimitedFrom with 40 | // CodedInputStream#readRawVarint32. 41 | var headerBuf [binary.MaxVarintLen32]byte 42 | var bytesRead, varIntBytes int 43 | var messageLength uint64 44 | for varIntBytes == 0 { // i.e. no varint has been decoded yet. 45 | if bytesRead >= len(headerBuf) { 46 | return bytesRead, errInvalidVarint 47 | } 48 | // We have to read byte by byte here to avoid reading more bytes 49 | // than required. Each read byte is appended to what we have 50 | // read before. 51 | newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) 52 | if newBytesRead == 0 { 53 | if err != nil { 54 | return bytesRead, err 55 | } 56 | // A Reader should not return (0, nil), but if it does, 57 | // it should be treated as no-op (according to the 58 | // Reader contract). So let's go on... 59 | continue 60 | } 61 | bytesRead += newBytesRead 62 | // Now present everything read so far to the varint decoder and 63 | // see if a varint can be decoded already. 64 | messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) 65 | } 66 | 67 | messageBuf := make([]byte, messageLength) 68 | newBytesRead, err := io.ReadFull(r, messageBuf) 69 | bytesRead += newBytesRead 70 | if err != nil { 71 | return bytesRead, err 72 | } 73 | 74 | return bytesRead, proto.Unmarshal(messageBuf, m) 75 | } 76 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/model/silence.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package model 15 | 16 | import ( 17 | "encoding/json" 18 | "fmt" 19 | "regexp" 20 | "time" 21 | ) 22 | 23 | // Matcher describes a matches the value of a given label. 24 | type Matcher struct { 25 | Name LabelName `json:"name"` 26 | Value string `json:"value"` 27 | IsRegex bool `json:"isRegex"` 28 | } 29 | 30 | func (m *Matcher) UnmarshalJSON(b []byte) error { 31 | type plain Matcher 32 | if err := json.Unmarshal(b, (*plain)(m)); err != nil { 33 | return err 34 | } 35 | 36 | if len(m.Name) == 0 { 37 | return fmt.Errorf("label name in matcher must not be empty") 38 | } 39 | if m.IsRegex { 40 | if _, err := regexp.Compile(m.Value); err != nil { 41 | return err 42 | } 43 | } 44 | return nil 45 | } 46 | 47 | // Validate returns true iff all fields of the matcher have valid values. 48 | func (m *Matcher) Validate() error { 49 | if !m.Name.IsValid() { 50 | return fmt.Errorf("invalid name %q", m.Name) 51 | } 52 | if m.IsRegex { 53 | if _, err := regexp.Compile(m.Value); err != nil { 54 | return fmt.Errorf("invalid regular expression %q", m.Value) 55 | } 56 | } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { 57 | return fmt.Errorf("invalid value %q", m.Value) 58 | } 59 | return nil 60 | } 61 | 62 | // Silence defines the representation of a silence definition in the Prometheus 63 | // eco-system. 64 | type Silence struct { 65 | ID uint64 `json:"id,omitempty"` 66 | 67 | Matchers []*Matcher `json:"matchers"` 68 | 69 | StartsAt time.Time `json:"startsAt"` 70 | EndsAt time.Time `json:"endsAt"` 71 | 72 | CreatedAt time.Time `json:"createdAt,omitempty"` 73 | CreatedBy string `json:"createdBy"` 74 | Comment string `json:"comment,omitempty"` 75 | } 76 | 77 | // Validate returns true iff all fields of the silence have valid values. 78 | func (s *Silence) Validate() error { 79 | if len(s.Matchers) == 0 { 80 | return fmt.Errorf("at least one matcher required") 81 | } 82 | for _, m := range s.Matchers { 83 | if err := m.Validate(); err != nil { 84 | return fmt.Errorf("invalid matcher: %s", err) 85 | } 86 | } 87 | if s.StartsAt.IsZero() { 88 | return fmt.Errorf("start time missing") 89 | } 90 | if s.EndsAt.IsZero() { 91 | return fmt.Errorf("end time missing") 92 | } 93 | if s.EndsAt.Before(s.StartsAt) { 94 | return fmt.Errorf("start time must be before end time") 95 | } 96 | if s.CreatedBy == "" { 97 | return fmt.Errorf("creator information missing") 98 | } 99 | if s.Comment == "" { 100 | return fmt.Errorf("comment missing") 101 | } 102 | if s.CreatedAt.IsZero() { 103 | return fmt.Errorf("creation timestamp missing") 104 | } 105 | return nil 106 | } 107 | -------------------------------------------------------------------------------- /types/transaction.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "github.com/DSiSc/craft/rlp" 5 | "io" 6 | "math/big" 7 | "sync/atomic" 8 | ) 9 | 10 | type Transaction struct { 11 | Data TxData 12 | Hash atomic.Value 13 | Size atomic.Value 14 | From atomic.Value 15 | } 16 | 17 | type TxData struct { 18 | AccountNonce uint64 `json:"nonce" gencodec:"required"` 19 | Price *big.Int `json:"gasPrice" gencodec:"required"` 20 | GasLimit uint64 `json:"gas" gencodec:"required"` 21 | Recipient *Address `json:"to" rlp:"nil"` 22 | From *Address `json:"from" rlp:"-"` 23 | Amount *big.Int `json:"value" gencodec:"required"` 24 | Payload []byte `json:"input" gencodec:"required"` 25 | 26 | // Signature values 27 | V *big.Int `json:"v" gencodec:"required"` 28 | R *big.Int `json:"r" gencodec:"required"` 29 | S *big.Int `json:"s" gencodec:"required"` 30 | 31 | // This is only used when marshaling to JSON. 32 | Hash *Hash `json:"hash" rlp:"-"` 33 | } 34 | 35 | // EncodeRLP implements rlp.Encoder 36 | func (tx *Transaction) EncodeRLP(w io.Writer) error { 37 | return rlp.Encode(w, &tx.Data) 38 | } 39 | 40 | // DecodeRLP implements rlp.Decoder 41 | func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { 42 | _, size, _ := s.Kind() 43 | err := s.Decode(&tx.Data) 44 | if err == nil { 45 | tx.Size.Store(StorageSize(rlp.ListSize(size))) 46 | } 47 | 48 | return err 49 | } 50 | 51 | type ETransaction struct { 52 | data txdata 53 | hash atomic.Value 54 | size atomic.Value 55 | from atomic.Value 56 | } 57 | 58 | type txdata struct { 59 | AccountNonce uint64 `json:"nonce" gencodec:"required"` 60 | Price *big.Int `json:"gasPrice" gencodec:"required"` 61 | GasLimit uint64 `json:"gas" gencodec:"required"` 62 | Recipient *Address `json:"to" rlp:"nil"` // nil means contract creation 63 | Amount *big.Int `json:"value" gencodec:"required"` 64 | Payload []byte `json:"input" gencodec:"required"` 65 | 66 | // Signature values 67 | V *big.Int `json:"v" gencodec:"required"` 68 | R *big.Int `json:"r" gencodec:"required"` 69 | S *big.Int `json:"s" gencodec:"required"` 70 | 71 | // This is only used when marshaling to JSON. 72 | Hash *Hash `json:"hash" rlp:"-"` 73 | } 74 | 75 | func (tx *ETransaction) DecodeBytes(encodedTx []byte) error { 76 | return rlp.DecodeBytes(encodedTx, &tx.data) 77 | } 78 | 79 | func (tx *ETransaction) GetTxData() TxData { 80 | txData := new(TxData) 81 | txData.AccountNonce = tx.data.AccountNonce 82 | txData.Price = tx.data.Price 83 | txData.GasLimit = tx.data.GasLimit 84 | txData.Recipient = tx.data.Recipient 85 | txData.Amount = tx.data.Amount 86 | txData.Payload = tx.data.Payload 87 | 88 | txData.V = tx.data.V 89 | txData.R = tx.data.R 90 | txData.S = tx.data.S 91 | 92 | return *txData 93 | } 94 | 95 | func (tx *ETransaction) SetTxData(txData *TxData) error { 96 | 97 | //res, _ := json.Marshal(tx.data) 98 | //json.Unmarshal(res, txData) 99 | txData.AccountNonce = tx.data.AccountNonce 100 | txData.Price = tx.data.Price 101 | txData.GasLimit = tx.data.GasLimit 102 | txData.Recipient = tx.data.Recipient 103 | txData.Amount = tx.data.Amount 104 | txData.Payload = tx.data.Payload 105 | 106 | txData.V = tx.data.V 107 | txData.R = tx.data.R 108 | txData.S = tx.data.S 109 | 110 | return nil 111 | } 112 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/cbor/cbor.go: -------------------------------------------------------------------------------- 1 | // Package cbor provides primitives for storing different data 2 | // in the CBOR (binary) format. CBOR is defined in RFC7049. 3 | package cbor 4 | 5 | import "time" 6 | 7 | const ( 8 | majorOffset = 5 9 | additionalMax = 23 10 | 11 | // Non Values. 12 | additionalTypeBoolFalse byte = 20 13 | additionalTypeBoolTrue byte = 21 14 | additionalTypeNull byte = 22 15 | 16 | // Integer (+ve and -ve) Sub-types. 17 | additionalTypeIntUint8 byte = 24 18 | additionalTypeIntUint16 byte = 25 19 | additionalTypeIntUint32 byte = 26 20 | additionalTypeIntUint64 byte = 27 21 | 22 | // Float Sub-types. 23 | additionalTypeFloat16 byte = 25 24 | additionalTypeFloat32 byte = 26 25 | additionalTypeFloat64 byte = 27 26 | additionalTypeBreak byte = 31 27 | 28 | // Tag Sub-types. 29 | additionalTypeTimestamp byte = 01 30 | 31 | // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml 32 | additionalTypeTagNetworkAddr uint16 = 260 33 | additionalTypeTagNetworkPrefix uint16 = 261 34 | additionalTypeEmbeddedJSON uint16 = 262 35 | additionalTypeTagHexString uint16 = 263 36 | 37 | // Unspecified number of elements. 38 | additionalTypeInfiniteCount byte = 31 39 | ) 40 | const ( 41 | majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 42 | majorTypeNegativeInt // Major type 1 43 | majorTypeByteString // Major type 2 44 | majorTypeUtf8String // Major type 3 45 | majorTypeArray // Major type 4 46 | majorTypeMap // Major type 5 47 | majorTypeTags // Major type 6 48 | majorTypeSimpleAndFloat // Major type 7 49 | ) 50 | 51 | const ( 52 | maskOutAdditionalType byte = (7 << majorOffset) 53 | maskOutMajorType byte = 31 54 | ) 55 | 56 | const ( 57 | float32Nan = "\xfa\x7f\xc0\x00\x00" 58 | float32PosInfinity = "\xfa\x7f\x80\x00\x00" 59 | float32NegInfinity = "\xfa\xff\x80\x00\x00" 60 | float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" 61 | float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" 62 | float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" 63 | ) 64 | 65 | // IntegerTimeFieldFormat indicates the format of timestamp decoded 66 | // from an integer (time in seconds). 67 | var IntegerTimeFieldFormat = time.RFC3339 68 | 69 | // NanoTimeFieldFormat indicates the format of timestamp decoded 70 | // from a float value (time in seconds and nano seconds). 71 | var NanoTimeFieldFormat = time.RFC3339Nano 72 | 73 | func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { 74 | byteCount := 8 75 | var minor byte 76 | switch { 77 | case number < 256: 78 | byteCount = 1 79 | minor = additionalTypeIntUint8 80 | 81 | case number < 65536: 82 | byteCount = 2 83 | minor = additionalTypeIntUint16 84 | 85 | case number < 4294967296: 86 | byteCount = 4 87 | minor = additionalTypeIntUint32 88 | 89 | default: 90 | byteCount = 8 91 | minor = additionalTypeIntUint64 92 | 93 | } 94 | dst = append(dst, byte(major|minor)) 95 | byteCount-- 96 | for ; byteCount >= 0; byteCount-- { 97 | dst = append(dst, byte(number>>(uint(byteCount)*8))) 98 | } 99 | return dst 100 | } 101 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/model/metric.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package model 15 | 16 | import ( 17 | "fmt" 18 | "regexp" 19 | "sort" 20 | "strings" 21 | ) 22 | 23 | var ( 24 | separator = []byte{0} 25 | // MetricNameRE is a regular expression matching valid metric 26 | // names. Note that the IsValidMetricName function performs the same 27 | // check but faster than a match with this regular expression. 28 | MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) 29 | ) 30 | 31 | // A Metric is similar to a LabelSet, but the key difference is that a Metric is 32 | // a singleton and refers to one and only one stream of samples. 33 | type Metric LabelSet 34 | 35 | // Equal compares the metrics. 36 | func (m Metric) Equal(o Metric) bool { 37 | return LabelSet(m).Equal(LabelSet(o)) 38 | } 39 | 40 | // Before compares the metrics' underlying label sets. 41 | func (m Metric) Before(o Metric) bool { 42 | return LabelSet(m).Before(LabelSet(o)) 43 | } 44 | 45 | // Clone returns a copy of the Metric. 46 | func (m Metric) Clone() Metric { 47 | clone := make(Metric, len(m)) 48 | for k, v := range m { 49 | clone[k] = v 50 | } 51 | return clone 52 | } 53 | 54 | func (m Metric) String() string { 55 | metricName, hasName := m[MetricNameLabel] 56 | numLabels := len(m) - 1 57 | if !hasName { 58 | numLabels = len(m) 59 | } 60 | labelStrings := make([]string, 0, numLabels) 61 | for label, value := range m { 62 | if label != MetricNameLabel { 63 | labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) 64 | } 65 | } 66 | 67 | switch numLabels { 68 | case 0: 69 | if hasName { 70 | return string(metricName) 71 | } 72 | return "{}" 73 | default: 74 | sort.Strings(labelStrings) 75 | return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) 76 | } 77 | } 78 | 79 | // Fingerprint returns a Metric's Fingerprint. 80 | func (m Metric) Fingerprint() Fingerprint { 81 | return LabelSet(m).Fingerprint() 82 | } 83 | 84 | // FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing 85 | // algorithm, which is, however, more susceptible to hash collisions. 86 | func (m Metric) FastFingerprint() Fingerprint { 87 | return LabelSet(m).FastFingerprint() 88 | } 89 | 90 | // IsValidMetricName returns true iff name matches the pattern of MetricNameRE. 91 | // This function, however, does not use MetricNameRE for the check but a much 92 | // faster hardcoded implementation. 93 | func IsValidMetricName(n LabelValue) bool { 94 | if len(n) == 0 { 95 | return false 96 | } 97 | for i, b := range n { 98 | if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { 99 | return false 100 | } 101 | } 102 | return true 103 | } 104 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright(c) 2018 DSiSc Group. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | VERSION=$(shell grep "const Version" version/version.go | sed -E 's/.*"(.+)"$$/\1/') 16 | GIT_COMMIT=$(shell git rev-parse HEAD) 17 | GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) 18 | BUILD_DATE=$(shell date '+%Y-%m-%d-%H:%M:%S') 19 | 20 | .PHONY: default help all build test unit-test devenv gotools clean coverage 21 | 22 | default: all 23 | 24 | help: 25 | @echo 'Management commands for DSiSc/craft:' 26 | @echo 27 | @echo 'Usage:' 28 | @echo ' make lint Check code style.' 29 | @echo ' make spelling Check code spelling.' 30 | @echo ' make fmt Check code formatting.' 31 | @echo ' make static-check Static code check: style & spelling & formatting.' 32 | @echo ' make build Compile the project.' 33 | @echo ' make vet Examine source code and reports suspicious constructs.' 34 | @echo ' make unit-test Run unit tests with coverage report.' 35 | @echo ' make test Run unit tests with coverage report.' 36 | @echo ' make devenv Prepare devenv for test or build.' 37 | @echo ' make fetch-deps Run govendor fetch for deps.' 38 | @echo ' make gotools Prepare go tools depended.' 39 | @echo ' make clean Clean the directory tree.' 40 | @echo 41 | 42 | all: static-check build test 43 | 44 | fmt: 45 | gofmt -d -l . 46 | 47 | spelling: 48 | bash scripts/check_spelling.sh 49 | 50 | lint: 51 | @echo "Check code style..." 52 | golint `go list ./...` 53 | 54 | static-check: fmt spelling lint 55 | 56 | build: 57 | @echo "building craft ${VERSION}" 58 | @echo "GOPATH=${GOPATH}" 59 | go build -v -ldflags "-X github.com/DSiSc/craft/version.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X github.com/DSiSc/craft/version.BuildDate=${BUILD_DATE}" ./... 60 | 61 | vet: 62 | @echo "Examine source code and reports suspicious constructs..." 63 | go vet `go list ./...` 64 | 65 | unit-test: 66 | @echo "Run unit tests without coverage report..." 67 | go test -v -count=1 -race ./... 68 | 69 | coverage: 70 | @echo "Run unit tests with coverage report..." 71 | bash scripts/unit_test_cov.sh 72 | 73 | test: vet unit-test 74 | 75 | get-tools: 76 | # official tools 77 | go get -u golang.org/x/lint/golint 78 | @# go get -u golang.org/x/tools/cmd/gotype 79 | @# go get -u golang.org/x/tools/cmd/goimports 80 | @# go get -u golang.org/x/tools/cmd/godoc 81 | @# go get -u golang.org/x/tools/cmd/gorename 82 | @# go get -u golang.org/x/tools/cmd/gomvpkg 83 | 84 | # thirdparty tools 85 | go get -u github.com/stretchr/testify 86 | @# go get -u github.com/kardianos/govendor 87 | @# go get -u github.com/axw/gocov/... 88 | @# go get -u github.com/client9/misspell/cmd/misspell 89 | 90 | fetch-deps: get-tools 91 | @echo "Run go get to fetch dependencies as described in dependencies.txt ..." 92 | @bash scripts/ensure_deps.sh 93 | 94 | ## tools & deps 95 | devenv: get-tools fetch-deps 96 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/sampler.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | "math/rand" 5 | "sync/atomic" 6 | "time" 7 | ) 8 | 9 | var ( 10 | // Often samples log every ~ 10 events. 11 | Often = RandomSampler(10) 12 | // Sometimes samples log every ~ 100 events. 13 | Sometimes = RandomSampler(100) 14 | // Rarely samples log every ~ 1000 events. 15 | Rarely = RandomSampler(1000) 16 | ) 17 | 18 | // Sampler defines an interface to a log sampler. 19 | type Sampler interface { 20 | // Sample returns true if the event should be part of the sample, false if 21 | // the event should be dropped. 22 | Sample(lvl Level) bool 23 | } 24 | 25 | // RandomSampler use a PRNG to randomly sample an event out of N events, 26 | // regardless of their level. 27 | type RandomSampler uint32 28 | 29 | // Sample implements the Sampler interface. 30 | func (s RandomSampler) Sample(lvl Level) bool { 31 | if s <= 0 { 32 | return false 33 | } 34 | if rand.Intn(int(s)) != 0 { 35 | return false 36 | } 37 | return true 38 | } 39 | 40 | // BasicSampler is a sampler that will send every Nth events, regardless of 41 | // there level. 42 | type BasicSampler struct { 43 | N uint32 44 | counter uint32 45 | } 46 | 47 | // Sample implements the Sampler interface. 48 | func (s *BasicSampler) Sample(lvl Level) bool { 49 | c := atomic.AddUint32(&s.counter, 1) 50 | return c%s.N == 0 51 | } 52 | 53 | // BurstSampler lets Burst events pass per Period then pass the decision to 54 | // NextSampler. If Sampler is not set, all subsequent events are rejected. 55 | type BurstSampler struct { 56 | // Burst is the maximum number of event per period allowed before calling 57 | // NextSampler. 58 | Burst uint32 59 | // Period defines the burst period. If 0, NextSampler is always called. 60 | Period time.Duration 61 | // NextSampler is the sampler used after the burst is reached. If nil, 62 | // events are always rejected after the burst. 63 | NextSampler Sampler 64 | 65 | counter uint32 66 | resetAt int64 67 | } 68 | 69 | // Sample implements the Sampler interface. 70 | func (s *BurstSampler) Sample(lvl Level) bool { 71 | if s.Burst > 0 && s.Period > 0 { 72 | if s.inc() <= s.Burst { 73 | return true 74 | } 75 | } 76 | if s.NextSampler == nil { 77 | return false 78 | } 79 | return s.NextSampler.Sample(lvl) 80 | } 81 | 82 | func (s *BurstSampler) inc() uint32 { 83 | now := time.Now().UnixNano() 84 | resetAt := atomic.LoadInt64(&s.resetAt) 85 | var c uint32 86 | if now > resetAt { 87 | c = 1 88 | atomic.StoreUint32(&s.counter, c) 89 | newResetAt := now + s.Period.Nanoseconds() 90 | reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) 91 | if !reset { 92 | // Lost the race with another goroutine trying to reset. 93 | c = atomic.AddUint32(&s.counter, 1) 94 | } 95 | } else { 96 | c = atomic.AddUint32(&s.counter, 1) 97 | } 98 | return c 99 | } 100 | 101 | // LevelSampler applies a different sampler for each level. 102 | type LevelSampler struct { 103 | DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler 104 | } 105 | 106 | func (s LevelSampler) Sample(lvl Level) bool { 107 | switch lvl { 108 | case DebugLevel: 109 | if s.DebugSampler != nil { 110 | return s.DebugSampler.Sample(lvl) 111 | } 112 | case InfoLevel: 113 | if s.InfoSampler != nil { 114 | return s.InfoSampler.Sample(lvl) 115 | } 116 | case WarnLevel: 117 | if s.WarnSampler != nil { 118 | return s.WarnSampler.Sample(lvl) 119 | } 120 | case ErrorLevel: 121 | if s.ErrorSampler != nil { 122 | return s.ErrorSampler.Sample(lvl) 123 | } 124 | } 125 | return true 126 | } 127 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/cbor/README.md: -------------------------------------------------------------------------------- 1 | ## Reference: 2 | CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) 3 | 4 | ## Comparison of JSON vs CBOR 5 | 6 | Two main areas of reduction are: 7 | 8 | 1. CPU usage to write a log msg 9 | 2. Size (in bytes) of log messages. 10 | 11 | 12 | CPU Usage savings are below: 13 | ``` 14 | name JSON time/op CBOR time/op delta 15 | Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) 16 | ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) 17 | ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) 18 | LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) 19 | LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) 20 | LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) 21 | LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) 22 | LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) 23 | LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) 24 | LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) 25 | LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) 26 | LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) 27 | LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) 28 | LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) 29 | LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) 30 | LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) 31 | LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) 32 | LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) 33 | LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) 34 | LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) 35 | LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) 36 | LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) 37 | LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) 38 | LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) 39 | ``` 40 | 41 | Log message size savings is greatly dependent on the number and type of fields in the log message. 42 | Assuming this log message (with an Integer, timestamp and string, in addition to level). 43 | 44 | `{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` 45 | 46 | Two measurements were done for the log file sizes - one without any compression, second 47 | using [compress/zlib](https://golang.org/pkg/compress/zlib/). 48 | 49 | Results for 10,000 log messages: 50 | 51 | | Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | 52 | | :--- | :---: | :---: | 53 | | JSON | 920 | 28 | 54 | | CBOR | 550 | 28 | 55 | 56 | The example used to calculate the above data is available in [Examples](examples). 57 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/console.go: -------------------------------------------------------------------------------- 1 | package zerolog 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "sort" 9 | "strconv" 10 | "strings" 11 | "sync" 12 | "time" 13 | ) 14 | 15 | const ( 16 | cReset = 0 17 | cBold = 1 18 | cRed = 31 19 | cGreen = 32 20 | cYellow = 33 21 | cBlue = 34 22 | cMagenta = 35 23 | cCyan = 36 24 | cGray = 37 25 | cDarkGray = 90 26 | ) 27 | 28 | var consoleBufPool = sync.Pool{ 29 | New: func() interface{} { 30 | return bytes.NewBuffer(make([]byte, 0, 100)) 31 | }, 32 | } 33 | 34 | // LevelWidth defines the desired character width of the log level column. 35 | // Default 0 does not trim or pad (variable width based level text, e.g. "INFO" or "ERROR") 36 | var LevelWidth = 0 37 | 38 | // ConsoleWriter reads a JSON object per write operation and output an 39 | // optionally colored human readable version on the Out writer. 40 | type ConsoleWriter struct { 41 | Out io.Writer 42 | NoColor bool 43 | } 44 | 45 | func (w ConsoleWriter) Write(p []byte) (n int, err error) { 46 | var event map[string]interface{} 47 | p = decodeIfBinaryToBytes(p) 48 | d := json.NewDecoder(bytes.NewReader(p)) 49 | d.UseNumber() 50 | err = d.Decode(&event) 51 | if err != nil { 52 | return 53 | } 54 | buf := consoleBufPool.Get().(*bytes.Buffer) 55 | defer consoleBufPool.Put(buf) 56 | lvlColor := cReset 57 | level := "????" 58 | if l, ok := event[LevelFieldName].(string); ok { 59 | if !w.NoColor { 60 | lvlColor = levelColor(l) 61 | } 62 | level = strings.ToUpper(l) 63 | if LevelWidth > 0 { 64 | if padding := LevelWidth - len(level); padding > 0 { 65 | level += strings.Repeat(" ", padding) 66 | } else { 67 | level = level[0:LevelWidth] 68 | } 69 | } 70 | } 71 | fmt.Fprintf(buf, "%s |%s| %s", 72 | colorize(formatTime(event[TimestampFieldName]), cGray, !w.NoColor), 73 | colorize(level, lvlColor, !w.NoColor), 74 | colorize(event[MessageFieldName], cReset, !w.NoColor)) 75 | fields := make([]string, 0, len(event)) 76 | for field := range event { 77 | switch field { 78 | case LevelFieldName, TimestampFieldName, MessageFieldName: 79 | continue 80 | } 81 | fields = append(fields, field) 82 | } 83 | sort.Strings(fields) 84 | for _, field := range fields { 85 | fmt.Fprintf(buf, " %s=", colorize(field, cCyan, !w.NoColor)) 86 | switch value := event[field].(type) { 87 | case string: 88 | if needsQuote(value) { 89 | buf.WriteString(strconv.Quote(value)) 90 | } else { 91 | buf.WriteString(value) 92 | } 93 | case json.Number: 94 | fmt.Fprint(buf, value) 95 | default: 96 | b, err := json.Marshal(value) 97 | if err != nil { 98 | fmt.Fprintf(buf, "[error: %v]", err) 99 | } else { 100 | fmt.Fprint(buf, string(b)) 101 | } 102 | } 103 | } 104 | buf.WriteByte('\n') 105 | buf.WriteTo(w.Out) 106 | n = len(p) 107 | return 108 | } 109 | 110 | func formatTime(t interface{}) string { 111 | switch t := t.(type) { 112 | case string: 113 | return t 114 | case json.Number: 115 | u, _ := t.Int64() 116 | return time.Unix(u, 0).Format(time.RFC3339) 117 | } 118 | return "" 119 | } 120 | 121 | func colorize(s interface{}, color int, enabled bool) string { 122 | if !enabled { 123 | return fmt.Sprintf("%v", s) 124 | } 125 | return fmt.Sprintf("\x1b[%dm%v\x1b[0m", color, s) 126 | } 127 | 128 | func levelColor(level string) int { 129 | switch level { 130 | case "debug": 131 | return cMagenta 132 | case "info": 133 | return cGreen 134 | case "warn": 135 | return cYellow 136 | case "error", "fatal", "panic": 137 | return cRed 138 | default: 139 | return cReset 140 | } 141 | } 142 | 143 | func needsQuote(s string) bool { 144 | for i := range s { 145 | if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { 146 | return true 147 | } 148 | } 149 | return false 150 | } 151 | -------------------------------------------------------------------------------- /vendor/github.com/rs/zerolog/internal/json/string.go: -------------------------------------------------------------------------------- 1 | package json 2 | 3 | import "unicode/utf8" 4 | 5 | const hex = "0123456789abcdef" 6 | 7 | var noEscapeTable = [256]bool{} 8 | 9 | func init() { 10 | for i := 0; i <= 0x7e; i++ { 11 | noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' 12 | } 13 | } 14 | 15 | // AppendStrings encodes the input strings to json and 16 | // appends the encoded string list to the input byte slice. 17 | func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { 18 | if len(vals) == 0 { 19 | return append(dst, '[', ']') 20 | } 21 | dst = append(dst, '[') 22 | dst = e.AppendString(dst, vals[0]) 23 | if len(vals) > 1 { 24 | for _, val := range vals[1:] { 25 | dst = e.AppendString(append(dst, ','), val) 26 | } 27 | } 28 | dst = append(dst, ']') 29 | return dst 30 | } 31 | 32 | // AppendString encodes the input string to json and appends 33 | // the encoded string to the input byte slice. 34 | // 35 | // The operation loops though each byte in the string looking 36 | // for characters that need json or utf8 encoding. If the string 37 | // does not need encoding, then the string is appended in it's 38 | // entirety to the byte slice. 39 | // If we encounter a byte that does need encoding, switch up 40 | // the operation and perform a byte-by-byte read-encode-append. 41 | func (Encoder) AppendString(dst []byte, s string) []byte { 42 | // Start with a double quote. 43 | dst = append(dst, '"') 44 | // Loop through each character in the string. 45 | for i := 0; i < len(s); i++ { 46 | // Check if the character needs encoding. Control characters, slashes, 47 | // and the double quote need json encoding. Bytes above the ascii 48 | // boundary needs utf8 encoding. 49 | if !noEscapeTable[s[i]] { 50 | // We encountered a character that needs to be encoded. Switch 51 | // to complex version of the algorithm. 52 | dst = appendStringComplex(dst, s, i) 53 | return append(dst, '"') 54 | } 55 | } 56 | // The string has no need for encoding an therefore is directly 57 | // appended to the byte slice. 58 | dst = append(dst, s...) 59 | // End with a double quote 60 | return append(dst, '"') 61 | } 62 | 63 | // appendStringComplex is used by appendString to take over an in 64 | // progress JSON string encoding that encountered a character that needs 65 | // to be encoded. 66 | func appendStringComplex(dst []byte, s string, i int) []byte { 67 | start := 0 68 | for i < len(s) { 69 | b := s[i] 70 | if b >= utf8.RuneSelf { 71 | r, size := utf8.DecodeRuneInString(s[i:]) 72 | if r == utf8.RuneError && size == 1 { 73 | // In case of error, first append previous simple characters to 74 | // the byte slice if any and append a remplacement character code 75 | // in place of the invalid sequence. 76 | if start < i { 77 | dst = append(dst, s[start:i]...) 78 | } 79 | dst = append(dst, `\ufffd`...) 80 | i += size 81 | start = i 82 | continue 83 | } 84 | i += size 85 | continue 86 | } 87 | if noEscapeTable[b] { 88 | i++ 89 | continue 90 | } 91 | // We encountered a character that needs to be encoded. 92 | // Let's append the previous simple characters to the byte slice 93 | // and switch our operation to read and encode the remainder 94 | // characters byte-by-byte. 95 | if start < i { 96 | dst = append(dst, s[start:i]...) 97 | } 98 | switch b { 99 | case '"', '\\': 100 | dst = append(dst, '\\', b) 101 | case '\b': 102 | dst = append(dst, '\\', 'b') 103 | case '\f': 104 | dst = append(dst, '\\', 'f') 105 | case '\n': 106 | dst = append(dst, '\\', 'n') 107 | case '\r': 108 | dst = append(dst, '\\', 'r') 109 | case '\t': 110 | dst = append(dst, '\\', 't') 111 | default: 112 | dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) 113 | } 114 | i++ 115 | start = i 116 | } 117 | if start < len(s) { 118 | dst = append(dst, s[start:]...) 119 | } 120 | return dst 121 | } 122 | -------------------------------------------------------------------------------- /log/config.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | // Appender is responsible for delivering LogEvents to their destination. 9 | type Appender struct { 10 | Enabled bool 11 | LogLevel Level 12 | LogType string 13 | LogPath string 14 | Output io.Writer 15 | Format string 16 | ShowCaller bool 17 | ShowHostname bool 18 | } 19 | 20 | // OutputFlags are printed in log record that can be customized. 21 | type OutputFlags struct { 22 | // TimestampFieldName is the field name used for the timestamp field. 23 | TimestampFieldName string 24 | // LevelFieldName is the field name used for the level field. 25 | LevelFieldName string 26 | // MessageFieldName is the field name used for the message field. 27 | MessageFieldName string 28 | // ErrorFieldName is the field name used for error fields. 29 | ErrorFieldName string 30 | // CallerFieldName is the field name used for caller field. 31 | CallerFieldName string 32 | // HostnameFieldName is the field name used for hostname field. 33 | HostnameFieldName string 34 | } 35 | 36 | // Config includes configurations for our log, such as log-level. 37 | // For more log destinations just add "Appender" into "Config.[]Appenders". 38 | type Config struct { 39 | Enabled bool 40 | Provider Provider 41 | GlobalLogLevel Level 42 | TimeFieldFormat string 43 | Appenders map[string]*Appender 44 | OutputFlags *OutputFlags 45 | } 46 | 47 | // stdoutAppender is a pre-configed console log. 48 | var stdoutAppender = &Appender{ 49 | Enabled: true, 50 | LogLevel: InfoLevel, 51 | LogType: ConsoleLog, 52 | LogPath: ConsoleStdout, 53 | Output: os.Stdout, 54 | Format: TextFmt, 55 | ShowCaller: true, 56 | ShowHostname: true, 57 | } 58 | 59 | // globalOutputFlags contains pre-defined output flags. Usually no need to modify. 60 | var globalOutputFlags = &OutputFlags{ 61 | TimestampFieldName: "time", 62 | LevelFieldName: "level", 63 | MessageFieldName: "message", 64 | ErrorFieldName: "error", 65 | CallerFieldName: "caller", 66 | HostnameFieldName: "host", 67 | } 68 | 69 | // globalConfig is a set of default log configuration with only one "stdoutAppender". 70 | var globalConfig = &Config{ 71 | Enabled: true, 72 | Provider: Zerolog, 73 | GlobalLogLevel: DebugLevel, 74 | TimeFieldFormat: "2006-01-02 15:04:05.000", 75 | Appenders: map[string]*Appender{"stdout": stdoutAppender}, 76 | OutputFlags: globalOutputFlags, 77 | } 78 | 79 | // buildLogger builds a "Logger" with a number of backend logger inside. 80 | // Each logger corresponds to an "Appender". 81 | func (config *Config) buildLogger() Logger { 82 | if !config.Enabled { 83 | return nonLogger{} 84 | } 85 | switch config.Provider { 86 | case Zerolog: 87 | logger := buildZeroLogger(config) 88 | return logger 89 | } 90 | return nil 91 | } 92 | 93 | // Level defines log levels. 94 | type Level uint8 95 | 96 | const ( 97 | // DebugLevel defines debug log level. 98 | DebugLevel Level = iota 99 | // InfoLevel defines info log level. 100 | InfoLevel 101 | // WarnLevel defines warn log level. 102 | WarnLevel 103 | // ErrorLevel defines error log level. 104 | ErrorLevel 105 | // FatalLevel defines fatal log level. 106 | FatalLevel 107 | // PanicLevel defines panic log level. 108 | PanicLevel 109 | // Disabled disables the logger. 110 | Disabled 111 | ) 112 | 113 | // Provider enumerates backend log libs. 114 | type Provider uint8 115 | 116 | const ( 117 | Zerolog Provider = iota 118 | ) 119 | 120 | const ( 121 | // JsonFmt indicates that log output generated in form of JSON. 122 | JsonFmt string = "JSON" 123 | // TextFmt indicates that log output generated in form of TEXT. 124 | TextFmt string = "TEXT" 125 | // ConsoleLog indicates that log output to console. 126 | ConsoleLog string = "CONSOLE_LOG" 127 | // FileLog indicates that log output to console. 128 | FileLog string = "FILE_LOG" 129 | // ConsoleStdout indicates than console log output to os.Stdout 130 | ConsoleStdout string = "STDOUT" 131 | // ConsoleStderr indicates than console log output to os.Stderr 132 | ConsoleStderr string = "STDERR" 133 | ) 134 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/internal/lv/space.go: -------------------------------------------------------------------------------- 1 | package lv 2 | 3 | import "sync" 4 | 5 | // NewSpace returns an N-dimensional vector space. 6 | func NewSpace() *Space { 7 | return &Space{} 8 | } 9 | 10 | // Space represents an N-dimensional vector space. Each name and unique label 11 | // value pair establishes a new dimension and point within that dimension. Order 12 | // matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1]. 13 | type Space struct { 14 | mtx sync.RWMutex 15 | nodes map[string]*node 16 | } 17 | 18 | // Observe locates the time series identified by the name and label values in 19 | // the vector space, and appends the value to the list of observations. 20 | func (s *Space) Observe(name string, lvs LabelValues, value float64) { 21 | s.nodeFor(name).observe(lvs, value) 22 | } 23 | 24 | // Add locates the time series identified by the name and label values in 25 | // the vector space, and appends the delta to the last value in the list of 26 | // observations. 27 | func (s *Space) Add(name string, lvs LabelValues, delta float64) { 28 | s.nodeFor(name).add(lvs, delta) 29 | } 30 | 31 | // Walk traverses the vector space and invokes fn for each non-empty time series 32 | // which is encountered. Return false to abort the traversal. 33 | func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { 34 | s.mtx.RLock() 35 | defer s.mtx.RUnlock() 36 | for name, node := range s.nodes { 37 | f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) } 38 | if !node.walk(LabelValues{}, f) { 39 | return 40 | } 41 | } 42 | } 43 | 44 | // Reset empties the current space and returns a new Space with the old 45 | // contents. Reset a Space to get an immutable copy suitable for walking. 46 | func (s *Space) Reset() *Space { 47 | s.mtx.Lock() 48 | defer s.mtx.Unlock() 49 | n := NewSpace() 50 | n.nodes, s.nodes = s.nodes, n.nodes 51 | return n 52 | } 53 | 54 | func (s *Space) nodeFor(name string) *node { 55 | s.mtx.Lock() 56 | defer s.mtx.Unlock() 57 | if s.nodes == nil { 58 | s.nodes = map[string]*node{} 59 | } 60 | n, ok := s.nodes[name] 61 | if !ok { 62 | n = &node{} 63 | s.nodes[name] = n 64 | } 65 | return n 66 | } 67 | 68 | // node exists at a specific point in the N-dimensional vector space of all 69 | // possible label values. The node collects observations and has child nodes 70 | // with greater specificity. 71 | type node struct { 72 | mtx sync.RWMutex 73 | observations []float64 74 | children map[pair]*node 75 | } 76 | 77 | type pair struct{ label, value string } 78 | 79 | func (n *node) observe(lvs LabelValues, value float64) { 80 | n.mtx.Lock() 81 | defer n.mtx.Unlock() 82 | if len(lvs) == 0 { 83 | n.observations = append(n.observations, value) 84 | return 85 | } 86 | if len(lvs) < 2 { 87 | panic("too few LabelValues; programmer error!") 88 | } 89 | head, tail := pair{lvs[0], lvs[1]}, lvs[2:] 90 | if n.children == nil { 91 | n.children = map[pair]*node{} 92 | } 93 | child, ok := n.children[head] 94 | if !ok { 95 | child = &node{} 96 | n.children[head] = child 97 | } 98 | child.observe(tail, value) 99 | } 100 | 101 | func (n *node) add(lvs LabelValues, delta float64) { 102 | n.mtx.Lock() 103 | defer n.mtx.Unlock() 104 | if len(lvs) == 0 { 105 | var value float64 106 | if len(n.observations) > 0 { 107 | value = last(n.observations) + delta 108 | } else { 109 | value = delta 110 | } 111 | n.observations = append(n.observations, value) 112 | return 113 | } 114 | if len(lvs) < 2 { 115 | panic("too few LabelValues; programmer error!") 116 | } 117 | head, tail := pair{lvs[0], lvs[1]}, lvs[2:] 118 | if n.children == nil { 119 | n.children = map[pair]*node{} 120 | } 121 | child, ok := n.children[head] 122 | if !ok { 123 | child = &node{} 124 | n.children[head] = child 125 | } 126 | child.add(tail, delta) 127 | } 128 | 129 | func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { 130 | n.mtx.RLock() 131 | defer n.mtx.RUnlock() 132 | if len(n.observations) > 0 && !fn(lvs, n.observations) { 133 | return false 134 | } 135 | for p, child := range n.children { 136 | if !child.walk(append(lvs, p.label, p.value), fn) { 137 | return false 138 | } 139 | } 140 | return true 141 | } 142 | 143 | func last(a []float64) float64 { 144 | return a[len(a)-1] 145 | } 146 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/common/model/alert.go: -------------------------------------------------------------------------------- 1 | // Copyright 2013 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package model 15 | 16 | import ( 17 | "fmt" 18 | "time" 19 | ) 20 | 21 | type AlertStatus string 22 | 23 | const ( 24 | AlertFiring AlertStatus = "firing" 25 | AlertResolved AlertStatus = "resolved" 26 | ) 27 | 28 | // Alert is a generic representation of an alert in the Prometheus eco-system. 29 | type Alert struct { 30 | // Label value pairs for purpose of aggregation, matching, and disposition 31 | // dispatching. This must minimally include an "alertname" label. 32 | Labels LabelSet `json:"labels"` 33 | 34 | // Extra key/value information which does not define alert identity. 35 | Annotations LabelSet `json:"annotations"` 36 | 37 | // The known time range for this alert. Both ends are optional. 38 | StartsAt time.Time `json:"startsAt,omitempty"` 39 | EndsAt time.Time `json:"endsAt,omitempty"` 40 | GeneratorURL string `json:"generatorURL"` 41 | } 42 | 43 | // Name returns the name of the alert. It is equivalent to the "alertname" label. 44 | func (a *Alert) Name() string { 45 | return string(a.Labels[AlertNameLabel]) 46 | } 47 | 48 | // Fingerprint returns a unique hash for the alert. It is equivalent to 49 | // the fingerprint of the alert's label set. 50 | func (a *Alert) Fingerprint() Fingerprint { 51 | return a.Labels.Fingerprint() 52 | } 53 | 54 | func (a *Alert) String() string { 55 | s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) 56 | if a.Resolved() { 57 | return s + "[resolved]" 58 | } 59 | return s + "[active]" 60 | } 61 | 62 | // Resolved returns true iff the activity interval ended in the past. 63 | func (a *Alert) Resolved() bool { 64 | return a.ResolvedAt(time.Now()) 65 | } 66 | 67 | // ResolvedAt returns true off the activity interval ended before 68 | // the given timestamp. 69 | func (a *Alert) ResolvedAt(ts time.Time) bool { 70 | if a.EndsAt.IsZero() { 71 | return false 72 | } 73 | return !a.EndsAt.After(ts) 74 | } 75 | 76 | // Status returns the status of the alert. 77 | func (a *Alert) Status() AlertStatus { 78 | if a.Resolved() { 79 | return AlertResolved 80 | } 81 | return AlertFiring 82 | } 83 | 84 | // Validate checks whether the alert data is inconsistent. 85 | func (a *Alert) Validate() error { 86 | if a.StartsAt.IsZero() { 87 | return fmt.Errorf("start time missing") 88 | } 89 | if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { 90 | return fmt.Errorf("start time must be before end time") 91 | } 92 | if err := a.Labels.Validate(); err != nil { 93 | return fmt.Errorf("invalid label set: %s", err) 94 | } 95 | if len(a.Labels) == 0 { 96 | return fmt.Errorf("at least one label pair required") 97 | } 98 | if err := a.Annotations.Validate(); err != nil { 99 | return fmt.Errorf("invalid annotations: %s", err) 100 | } 101 | return nil 102 | } 103 | 104 | // Alert is a list of alerts that can be sorted in chronological order. 105 | type Alerts []*Alert 106 | 107 | func (as Alerts) Len() int { return len(as) } 108 | func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } 109 | 110 | func (as Alerts) Less(i, j int) bool { 111 | if as[i].StartsAt.Before(as[j].StartsAt) { 112 | return true 113 | } 114 | if as[i].EndsAt.Before(as[j].EndsAt) { 115 | return true 116 | } 117 | return as[i].Fingerprint() < as[j].Fingerprint() 118 | } 119 | 120 | // HasFiring returns true iff one of the alerts is not resolved. 121 | func (as Alerts) HasFiring() bool { 122 | for _, a := range as { 123 | if !a.Resolved() { 124 | return true 125 | } 126 | } 127 | return false 128 | } 129 | 130 | // Status returns StatusFiring iff at least one of the alerts is firing. 131 | func (as Alerts) Status() AlertStatus { 132 | if as.HasFiring() { 133 | return AlertFiring 134 | } 135 | return AlertResolved 136 | } 137 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package promhttp 15 | 16 | import ( 17 | "net/http" 18 | "time" 19 | 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | // The RoundTripperFunc type is an adapter to allow the use of ordinary 24 | // functions as RoundTrippers. If f is a function with the appropriate 25 | // signature, RountTripperFunc(f) is a RoundTripper that calls f. 26 | type RoundTripperFunc func(req *http.Request) (*http.Response, error) 27 | 28 | // RoundTrip implements the RoundTripper interface. 29 | func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { 30 | return rt(r) 31 | } 32 | 33 | // InstrumentRoundTripperInFlight is a middleware that wraps the provided 34 | // http.RoundTripper. It sets the provided prometheus.Gauge to the number of 35 | // requests currently handled by the wrapped http.RoundTripper. 36 | // 37 | // See the example for ExampleInstrumentRoundTripperDuration for example usage. 38 | func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { 39 | return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { 40 | gauge.Inc() 41 | defer gauge.Dec() 42 | return next.RoundTrip(r) 43 | }) 44 | } 45 | 46 | // InstrumentRoundTripperCounter is a middleware that wraps the provided 47 | // http.RoundTripper to observe the request result with the provided CounterVec. 48 | // The CounterVec must have zero, one, or two non-const non-curried labels. For 49 | // those, the only allowed label names are "code" and "method". The function 50 | // panics otherwise. Partitioning of the CounterVec happens by HTTP status code 51 | // and/or HTTP method if the respective instance label names are present in the 52 | // CounterVec. For unpartitioned counting, use a CounterVec with zero labels. 53 | // 54 | // If the wrapped RoundTripper panics or returns a non-nil error, the Counter 55 | // is not incremented. 56 | // 57 | // See the example for ExampleInstrumentRoundTripperDuration for example usage. 58 | func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { 59 | code, method := checkLabels(counter) 60 | 61 | return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { 62 | resp, err := next.RoundTrip(r) 63 | if err == nil { 64 | counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() 65 | } 66 | return resp, err 67 | }) 68 | } 69 | 70 | // InstrumentRoundTripperDuration is a middleware that wraps the provided 71 | // http.RoundTripper to observe the request duration with the provided 72 | // ObserverVec. The ObserverVec must have zero, one, or two non-const 73 | // non-curried labels. For those, the only allowed label names are "code" and 74 | // "method". The function panics otherwise. The Observe method of the Observer 75 | // in the ObserverVec is called with the request duration in 76 | // seconds. Partitioning happens by HTTP status code and/or HTTP method if the 77 | // respective instance label names are present in the ObserverVec. For 78 | // unpartitioned observations, use an ObserverVec with zero labels. Note that 79 | // partitioning of Histograms is expensive and should be used judiciously. 80 | // 81 | // If the wrapped RoundTripper panics or returns a non-nil error, no values are 82 | // reported. 83 | // 84 | // Note that this method is only guaranteed to never observe negative durations 85 | // if used with Go1.9+. 86 | func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { 87 | code, method := checkLabels(obs) 88 | 89 | return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { 90 | start := time.Now() 91 | resp, err := next.RoundTrip(r) 92 | if err == nil { 93 | obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) 94 | } 95 | return resp, err 96 | }) 97 | } 98 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package prometheus 15 | 16 | import ( 17 | "encoding/json" 18 | "expvar" 19 | ) 20 | 21 | type expvarCollector struct { 22 | exports map[string]*Desc 23 | } 24 | 25 | // NewExpvarCollector returns a newly allocated expvar Collector that still has 26 | // to be registered with a Prometheus registry. 27 | // 28 | // An expvar Collector collects metrics from the expvar interface. It provides a 29 | // quick way to expose numeric values that are already exported via expvar as 30 | // Prometheus metrics. Note that the data models of expvar and Prometheus are 31 | // fundamentally different, and that the expvar Collector is inherently slower 32 | // than native Prometheus metrics. Thus, the expvar Collector is probably great 33 | // for experiments and prototying, but you should seriously consider a more 34 | // direct implementation of Prometheus metrics for monitoring production 35 | // systems. 36 | // 37 | // The exports map has the following meaning: 38 | // 39 | // The keys in the map correspond to expvar keys, i.e. for every expvar key you 40 | // want to export as Prometheus metric, you need an entry in the exports 41 | // map. The descriptor mapped to each key describes how to export the expvar 42 | // value. It defines the name and the help string of the Prometheus metric 43 | // proxying the expvar value. The type will always be Untyped. 44 | // 45 | // For descriptors without variable labels, the expvar value must be a number or 46 | // a bool. The number is then directly exported as the Prometheus sample 47 | // value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values 48 | // that are not numbers or bools are silently ignored. 49 | // 50 | // If the descriptor has one variable label, the expvar value must be an expvar 51 | // map. The keys in the expvar map become the various values of the one 52 | // Prometheus label. The values in the expvar map must be numbers or bools again 53 | // as above. 54 | // 55 | // For descriptors with more than one variable label, the expvar must be a 56 | // nested expvar map, i.e. where the values of the topmost map are maps again 57 | // etc. until a depth is reached that corresponds to the number of labels. The 58 | // leaves of that structure must be numbers or bools as above to serve as the 59 | // sample values. 60 | // 61 | // Anything that does not fit into the scheme above is silently ignored. 62 | func NewExpvarCollector(exports map[string]*Desc) Collector { 63 | return &expvarCollector{ 64 | exports: exports, 65 | } 66 | } 67 | 68 | // Describe implements Collector. 69 | func (e *expvarCollector) Describe(ch chan<- *Desc) { 70 | for _, desc := range e.exports { 71 | ch <- desc 72 | } 73 | } 74 | 75 | // Collect implements Collector. 76 | func (e *expvarCollector) Collect(ch chan<- Metric) { 77 | for name, desc := range e.exports { 78 | var m Metric 79 | expVar := expvar.Get(name) 80 | if expVar == nil { 81 | continue 82 | } 83 | var v interface{} 84 | labels := make([]string, len(desc.variableLabels)) 85 | if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { 86 | ch <- NewInvalidMetric(desc, err) 87 | continue 88 | } 89 | var processValue func(v interface{}, i int) 90 | processValue = func(v interface{}, i int) { 91 | if i >= len(labels) { 92 | copiedLabels := append(make([]string, 0, len(labels)), labels...) 93 | switch v := v.(type) { 94 | case float64: 95 | m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) 96 | case bool: 97 | if v { 98 | m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) 99 | } else { 100 | m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) 101 | } 102 | default: 103 | return 104 | } 105 | ch <- m 106 | return 107 | } 108 | vm, ok := v.(map[string]interface{}) 109 | if !ok { 110 | return 111 | } 112 | for lv, val := range vm { 113 | labels[i] = lv 114 | processValue(val, i+1) 115 | } 116 | } 117 | processValue(v, 0) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "strings" 8 | ) 9 | 10 | // backendLogger is the actual logging object that we use, and is pre-built as a global variable. 11 | var backendLogger = globalConfig.buildLogger() 12 | 13 | // Disable to stop logging. 14 | func Disable() { 15 | globalConfig.Enabled = false 16 | backendLogger = &nonLogger{} 17 | } 18 | 19 | // Enable to start logging. 20 | func Enable() { 21 | globalConfig.Enabled = true 22 | backendLogger = globalConfig.buildLogger() 23 | } 24 | 25 | func Debug(fmtmsg string, a ...interface{}) { 26 | backendLogger.Debug(fmt.Sprintf(fmtmsg, a...)) 27 | } 28 | 29 | func Info(fmtmsg string, a ...interface{}) { 30 | backendLogger.Info(fmt.Sprintf(fmtmsg, a...)) 31 | } 32 | 33 | func Warn(fmtmsg string, a ...interface{}) { 34 | backendLogger.Warn(fmt.Sprintf(fmtmsg, a...)) 35 | } 36 | 37 | func Error(fmtmsg string, a ...interface{}) { 38 | backendLogger.Error(fmt.Sprintf(fmtmsg, a...)) 39 | } 40 | 41 | func Fatal(fmtmsg string, a ...interface{}) { 42 | backendLogger.Fatal(fmt.Sprintf(fmtmsg, a...)) 43 | } 44 | 45 | func Panic(fmtmsg string, a ...interface{}) { 46 | backendLogger.Panic(fmt.Sprintf(fmtmsg, a...)) 47 | } 48 | 49 | func DebugKV(msg string, keyvals map[string]interface{}) { 50 | backendLogger.DebugKV(msg, keyvals) 51 | } 52 | 53 | func InfoKV(msg string, keyvals map[string]interface{}) { 54 | backendLogger.InfoKV(msg, keyvals) 55 | } 56 | 57 | func WarnKV(msg string, keyvals map[string]interface{}) { 58 | backendLogger.WarnKV(msg, keyvals) 59 | } 60 | 61 | func ErrorKV(msg string, keyvals map[string]interface{}) { 62 | backendLogger.ErrorKV(msg, keyvals) 63 | } 64 | 65 | func FatalKV(msg string, keyvals map[string]interface{}) { 66 | backendLogger.FatalKV(msg, keyvals) 67 | } 68 | 69 | func PanicKV(msg string, keyvals map[string]interface{}) { 70 | backendLogger.PanicKV(msg, keyvals) 71 | } 72 | 73 | func GetGlobalConfig() *Config { 74 | return globalConfig 75 | } 76 | 77 | // SetGlobalConfig is used to refresh logging manners. 78 | func SetGlobalConfig(config *Config) { 79 | globalConfig = config 80 | backendLogger = globalConfig.buildLogger() 81 | } 82 | 83 | func GetGlobalLogLevel() Level { 84 | return globalConfig.GlobalLogLevel 85 | } 86 | 87 | // SetGlobalLogLevel is used to restraint log-level of all "Appenders". 88 | func SetGlobalLogLevel(level Level) { 89 | globalConfig.GlobalLogLevel = level 90 | backendLogger.SetGlobalLogLevel(level) 91 | } 92 | 93 | func GetOutputFlags() *OutputFlags { 94 | return globalConfig.OutputFlags 95 | } 96 | 97 | // SetOutputFlags is used to reconfig output flags. 98 | func SetOutputFlags(flags *OutputFlags) { 99 | globalConfig.OutputFlags = flags 100 | backendLogger.SetOutputFlags(flags) 101 | } 102 | 103 | func SetTimestampFormat(format string) { 104 | globalConfig.TimeFieldFormat = format 105 | backendLogger.SetTimeFieldFormat(format) 106 | } 107 | 108 | // AddAppender adds/replaces a new logging destination. 109 | func AddAppender(appenderName string, output io.Writer, logLevel Level, format string, showCaller bool, showHostname bool) { 110 | globalConfig.Appenders[appenderName] = &Appender{ 111 | Enabled: true, 112 | LogLevel: logLevel, 113 | Output: output, 114 | Format: format, 115 | ShowCaller: showCaller, 116 | ShowHostname: showHostname, 117 | } 118 | backendLogger = globalConfig.buildLogger() 119 | } 120 | 121 | // RemoveAppender removes a logging appender by name. 122 | func RemoveAppender(appenderNameToRemove string) { 123 | delete(globalConfig.Appenders, appenderNameToRemove) 124 | backendLogger = globalConfig.buildLogger() 125 | } 126 | 127 | // AddFileAppender adds/replaces a new logging destination that append logs to a specified file. 128 | func AddFileAppender(appenderName string, filePath string, logLevel Level, format string, showCaller bool, showHostname bool) { 129 | _, err := os.Stat(filePath) 130 | 131 | if err != nil { 132 | if os.IsNotExist(err) { 133 | parentPath := filePath[0:strings.LastIndex(filePath, "/")] 134 | _, err := os.Stat(filePath) 135 | if err != nil { 136 | if os.IsNotExist(err) { 137 | err := os.MkdirAll(parentPath, 0755) 138 | if err != nil { 139 | panic(err) 140 | } 141 | } 142 | } 143 | } else { 144 | panic(err) 145 | } 146 | } 147 | 148 | file, err := os.OpenFile(filePath, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0644) 149 | if err != nil { 150 | panic(err) 151 | } 152 | AddAppender(appenderName, file, logLevel, format, showCaller, showHostname) 153 | } 154 | 155 | // SetAppenders sets a set of "Appenders". 156 | func SetAppenders(appenders map[string]*Appender) { 157 | globalConfig.Appenders = appenders 158 | backendLogger = globalConfig.buildLogger() 159 | } 160 | -------------------------------------------------------------------------------- /log/zerolog_adapter.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/rs/zerolog" 7 | ) 8 | 9 | // Interface assertions 10 | var _ Logger = (*zeroLogger)(nil) 11 | 12 | // zeroLogger is used as an adapter which contains a set of loggers, 13 | // each logger corresponds to an "Appender". 14 | type zeroLogger struct { 15 | loggers map[string]*zerolog.Logger 16 | } 17 | 18 | func (zl *zeroLogger) SetOutputFlags(flags *OutputFlags) { 19 | zerolog.CallerFieldName = flags.CallerFieldName 20 | zerolog.ErrorFieldName = flags.ErrorFieldName 21 | zerolog.LevelFieldName = flags.LevelFieldName 22 | zerolog.MessageFieldName = flags.MessageFieldName 23 | zerolog.TimestampFieldName = flags.TimestampFieldName 24 | } 25 | 26 | func (zl *zeroLogger) SetGlobalLogLevel(level Level) { 27 | zerolog.SetGlobalLevel(parseLogLevel(level)) 28 | } 29 | 30 | func (zl *zeroLogger) SetTimeFieldFormat(format string) { 31 | zerolog.TimeFieldFormat = format 32 | } 33 | 34 | func (zl *zeroLogger) Debug(msg string) { 35 | for _, l := range zl.loggers { 36 | l.Debug().Msg(msg) 37 | } 38 | } 39 | 40 | func (zl *zeroLogger) Info(msg string) { 41 | for _, l := range zl.loggers { 42 | l.Info().Msg(msg) 43 | } 44 | } 45 | 46 | func (zl *zeroLogger) Warn(msg string) { 47 | for _, l := range zl.loggers { 48 | l.Warn().Msg(msg) 49 | } 50 | } 51 | 52 | func (zl *zeroLogger) Error(msg string) { 53 | for _, l := range zl.loggers { 54 | l.Error().Msg(msg) 55 | } 56 | } 57 | 58 | func (zl *zeroLogger) Fatal(msg string) { 59 | for _, l := range zl.loggers { 60 | l.Fatal().Msg(msg) 61 | } 62 | } 63 | 64 | func (zl *zeroLogger) Panic(msg string) { 65 | for _, l := range zl.loggers { 66 | l.Panic().Msg(msg) 67 | } 68 | } 69 | 70 | func (zl *zeroLogger) DebugKV(msg string, keyvals map[string]interface{}) { 71 | for _, l := range zl.loggers { 72 | l.Debug().Fields(keyvals).Msg(msg) 73 | } 74 | } 75 | 76 | func (zl *zeroLogger) InfoKV(msg string, keyvals map[string]interface{}) { 77 | for _, l := range zl.loggers { 78 | l.Info().Fields(keyvals).Msg(msg) 79 | } 80 | } 81 | 82 | func (zl *zeroLogger) WarnKV(msg string, keyvals map[string]interface{}) { 83 | for _, l := range zl.loggers { 84 | l.Warn().Fields(keyvals).Msg(msg) 85 | } 86 | } 87 | 88 | func (zl *zeroLogger) ErrorKV(msg string, keyvals map[string]interface{}) { 89 | for _, l := range zl.loggers { 90 | l.Error().Fields(keyvals).Msg(msg) 91 | } 92 | } 93 | 94 | func (zl *zeroLogger) FatalKV(msg string, keyvals map[string]interface{}) { 95 | for _, l := range zl.loggers { 96 | l.Fatal().Fields(keyvals).Msg(msg) 97 | } 98 | } 99 | 100 | func (zl *zeroLogger) PanicKV(msg string, keyvals map[string]interface{}) { 101 | for _, l := range zl.loggers { 102 | l.Panic().Fields(keyvals).Msg(msg) 103 | } 104 | } 105 | 106 | // buildZeroLogger builds a zeroLogger out of the specified "Config". 107 | func buildZeroLogger(config *Config) Logger { 108 | 109 | if !config.Enabled { 110 | return nonLogger{} 111 | } 112 | 113 | zerolog.CallerSkipFrameCount = 4 114 | 115 | zerologger := &zeroLogger{make(map[string]*zerolog.Logger, len(config.Appenders))} 116 | 117 | zerolog.TimeFieldFormat = config.TimeFieldFormat 118 | zerolog.SetGlobalLevel(parseLogLevel(config.GlobalLogLevel)) 119 | zerologger.SetOutputFlags(config.OutputFlags) 120 | 121 | for s, a := range config.Appenders { 122 | if !a.Enabled { 123 | continue 124 | } 125 | var logger zerolog.Logger 126 | context := zerolog.New(a.Output).Level(parseLogLevel(a.LogLevel)).With().Timestamp() 127 | if a.ShowCaller { 128 | context = context.Caller() 129 | } 130 | if a.ShowHostname { 131 | hostname, err := os.Hostname() 132 | if err != nil { 133 | panic(err) 134 | } 135 | context = context.Str(globalOutputFlags.HostnameFieldName, hostname) 136 | } 137 | logger = context.Logger() 138 | if a.Format == TextFmt { 139 | if a.Output == os.Stdout || a.Output == os.Stderr { 140 | logger = logger.Output(zerolog.ConsoleWriter{Out: a.Output}) 141 | } else { 142 | logger = logger.Output(zerolog.ConsoleWriter{Out: a.Output, NoColor: true}).With().Timestamp().Logger() 143 | } 144 | } 145 | zerologger.loggers[s] = &logger 146 | } 147 | return zerologger 148 | } 149 | 150 | // parseLogLevel matches our log-level with zerolog's Level. 151 | func parseLogLevel(level Level) zerolog.Level { 152 | switch level { 153 | case DebugLevel: 154 | return zerolog.DebugLevel 155 | case InfoLevel: 156 | return zerolog.InfoLevel 157 | case WarnLevel: 158 | return zerolog.WarnLevel 159 | case ErrorLevel: 160 | return zerolog.ErrorLevel 161 | case FatalLevel: 162 | return zerolog.FatalLevel 163 | case PanicLevel: 164 | return zerolog.PanicLevel 165 | case Disabled: 166 | return zerolog.Disabled 167 | } 168 | return zerolog.Disabled 169 | } 170 | -------------------------------------------------------------------------------- /rlp/typecache.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The go-ethereum Authors 2 | // This file is part of the go-ethereum library. 3 | // 4 | // The go-ethereum library is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU Lesser General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | // 9 | // The go-ethereum library is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU Lesser General Public License for more details. 13 | // 14 | // You should have received a copy of the GNU Lesser General Public License 15 | // along with the go-ethereum library. If not, see . 16 | 17 | package rlp 18 | 19 | import ( 20 | "fmt" 21 | "reflect" 22 | "strings" 23 | "sync" 24 | ) 25 | 26 | var ( 27 | typeCacheMutex sync.RWMutex 28 | typeCache = make(map[typekey]*typeinfo) 29 | ) 30 | 31 | type typeinfo struct { 32 | decoder 33 | writer 34 | } 35 | 36 | // represents struct tags 37 | type tags struct { 38 | // rlp:"nil" controls whether empty input results in a nil pointer. 39 | nilOK bool 40 | // rlp:"tail" controls whether this field swallows additional list 41 | // elements. It can only be set for the last field, which must be 42 | // of slice type. 43 | tail bool 44 | // rlp:"-" ignores fields. 45 | ignored bool 46 | } 47 | 48 | type typekey struct { 49 | reflect.Type 50 | // the key must include the struct tags because they 51 | // might generate a different decoder. 52 | tags 53 | } 54 | 55 | type decoder func(*Stream, reflect.Value) error 56 | 57 | type writer func(reflect.Value, *encbuf) error 58 | 59 | func cachedTypeInfo(typ reflect.Type, tags tags) (*typeinfo, error) { 60 | typeCacheMutex.RLock() 61 | info := typeCache[typekey{typ, tags}] 62 | typeCacheMutex.RUnlock() 63 | if info != nil { 64 | return info, nil 65 | } 66 | // not in the cache, need to generate info for this type. 67 | typeCacheMutex.Lock() 68 | defer typeCacheMutex.Unlock() 69 | return cachedTypeInfo1(typ, tags) 70 | } 71 | 72 | func cachedTypeInfo1(typ reflect.Type, tags tags) (*typeinfo, error) { 73 | key := typekey{typ, tags} 74 | info := typeCache[key] 75 | if info != nil { 76 | // another goroutine got the write lock first 77 | return info, nil 78 | } 79 | // put a dummy value into the cache before generating. 80 | // if the generator tries to lookup itself, it will get 81 | // the dummy value and won't call itself recursively. 82 | typeCache[key] = new(typeinfo) 83 | info, err := genTypeInfo(typ, tags) 84 | if err != nil { 85 | // remove the dummy value if the generator fails 86 | delete(typeCache, key) 87 | return nil, err 88 | } 89 | *typeCache[key] = *info 90 | return typeCache[key], err 91 | } 92 | 93 | type field struct { 94 | index int 95 | info *typeinfo 96 | } 97 | 98 | func structFields(typ reflect.Type) (fields []field, err error) { 99 | for i := 0; i < typ.NumField(); i++ { 100 | if f := typ.Field(i); f.PkgPath == "" { // exported 101 | tags, err := parseStructTag(typ, i) 102 | if err != nil { 103 | return nil, err 104 | } 105 | if tags.ignored { 106 | continue 107 | } 108 | info, err := cachedTypeInfo1(f.Type, tags) 109 | if err != nil { 110 | return nil, err 111 | } 112 | fields = append(fields, field{i, info}) 113 | } 114 | } 115 | return fields, nil 116 | } 117 | 118 | func parseStructTag(typ reflect.Type, fi int) (tags, error) { 119 | f := typ.Field(fi) 120 | var ts tags 121 | for _, t := range strings.Split(f.Tag.Get("rlp"), ",") { 122 | switch t = strings.TrimSpace(t); t { 123 | case "": 124 | case "-": 125 | ts.ignored = true 126 | case "nil": 127 | ts.nilOK = true 128 | case "tail": 129 | ts.tail = true 130 | if fi != typ.NumField()-1 { 131 | return ts, fmt.Errorf(`rlp: invalid struct tag "tail" for %v.%s (must be on last field)`, typ, f.Name) 132 | } 133 | if f.Type.Kind() != reflect.Slice { 134 | return ts, fmt.Errorf(`rlp: invalid struct tag "tail" for %v.%s (field type is not slice)`, typ, f.Name) 135 | } 136 | default: 137 | return ts, fmt.Errorf("rlp: unknown struct tag %q on %v.%s", t, typ, f.Name) 138 | } 139 | } 140 | return ts, nil 141 | } 142 | 143 | func genTypeInfo(typ reflect.Type, tags tags) (info *typeinfo, err error) { 144 | info = new(typeinfo) 145 | if info.decoder, err = makeDecoder(typ, tags); err != nil { 146 | return nil, err 147 | } 148 | if info.writer, err = makeWriter(typ, tags); err != nil { 149 | return nil, err 150 | } 151 | return info, nil 152 | } 153 | 154 | func isUint(k reflect.Kind) bool { 155 | return k >= reflect.Uint && k <= reflect.Uintptr 156 | } 157 | -------------------------------------------------------------------------------- /monitor/metrics.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Define types and structures related to prometheus metrics. 3 | */ 4 | package monitor 5 | 6 | import ( 7 | "github.com/go-kit/kit/metrics" 8 | "github.com/go-kit/kit/metrics/discard" 9 | kitprometheus "github.com/go-kit/kit/metrics/prometheus" 10 | "github.com/prometheus/client_golang/prometheus" 11 | ) 12 | 13 | var JTMetrics *Metrics 14 | 15 | // PromMetrics contains metrics exposed by Consensus. 16 | type Metrics struct { 17 | ApigatewayReceivedTx metrics.Counter 18 | SwitchTakenTx metrics.Counter 19 | TxpoolIngressTx metrics.Counter 20 | TxpoolPooledTx metrics.Counter 21 | TxpoolDiscardedTx metrics.Counter 22 | TxpoolDuplacatedTx metrics.Counter 23 | TxpoolOutgoingTx metrics.Counter 24 | ConsensusPeerId metrics.Gauge 25 | ConsensusMasterId metrics.Gauge 26 | BlockHeight metrics.Gauge 27 | BlockTxNum metrics.Gauge 28 | CommittedTx metrics.Counter 29 | } 30 | 31 | func init() { 32 | // NopConsensusMetrics returns no-op Metrics. 33 | // Used by default. 34 | JTMetrics = &Metrics{ 35 | ApigatewayReceivedTx: discard.NewCounter(), 36 | SwitchTakenTx: discard.NewCounter(), 37 | TxpoolIngressTx: discard.NewCounter(), 38 | TxpoolPooledTx: discard.NewCounter(), 39 | TxpoolDiscardedTx: discard.NewCounter(), 40 | TxpoolDuplacatedTx: discard.NewCounter(), 41 | TxpoolOutgoingTx: discard.NewCounter(), 42 | ConsensusPeerId: discard.NewGauge(), 43 | ConsensusMasterId: discard.NewGauge(), 44 | BlockHeight: discard.NewGauge(), 45 | BlockTxNum: discard.NewGauge(), 46 | CommittedTx: discard.NewCounter(), 47 | } 48 | } 49 | 50 | // createJTMetrics creates Metrics build using Prometheus client library. 51 | func createMetrics() { 52 | JTMetrics = &Metrics{ 53 | ApigatewayReceivedTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 54 | Subsystem: "apigateway", 55 | Name: "received_tx", 56 | Help: "Accumulated num of tx received by apigateway.", 57 | }, []string{}), 58 | SwitchTakenTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 59 | Subsystem: "gossipswitch", 60 | Name: "taken_tx", 61 | Help: "Accumulated num of tx taken by gossipswitch.", 62 | }, []string{}), 63 | TxpoolIngressTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 64 | Subsystem: "txpool", 65 | Name: "ingress_tx", 66 | Help: "Accumulated num of incoming tx to txpool.", 67 | }, []string{}), 68 | TxpoolPooledTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 69 | Subsystem: "txpool", 70 | Name: "pooled_tx", 71 | Help: "Accumulated num of tx pooled into txpool.", 72 | }, []string{}), 73 | TxpoolDiscardedTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 74 | Subsystem: "txpool", 75 | Name: "discarded_tx", 76 | Help: "Accumulated num of discarded tx because txpool is full.", 77 | }, []string{}), 78 | TxpoolDuplacatedTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 79 | Subsystem: "txpool", 80 | Name: "duplicated_tx", 81 | Help: "Accumulated num of duplicated tx.", 82 | }, []string{}), 83 | TxpoolOutgoingTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 84 | Subsystem: "txpool", 85 | Name: "outgoing_tx", 86 | Help: "Accumulated num of tx out from txpool.", 87 | }, []string{}), 88 | ConsensusPeerId: kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ 89 | Subsystem: "consensus", 90 | Name: "peer_id", 91 | Help: "Peer ID.", 92 | }, []string{}), 93 | ConsensusMasterId: kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ 94 | Subsystem: "consensus", 95 | Name: "master_id", 96 | Help: "Peer ID of current master.", 97 | }, []string{}), 98 | BlockHeight: kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ 99 | Subsystem: "store", 100 | Name: "height", 101 | Help: "Height of blocks.", 102 | }, []string{}), 103 | BlockTxNum: kitprometheus.NewGaugeFrom(prometheus.GaugeOpts{ 104 | Subsystem: "store", 105 | Name: "block_tx_num", 106 | Help: "Num of tx contained in latest block.", 107 | }, []string{}), 108 | CommittedTx: kitprometheus.NewCounterFrom(prometheus.CounterOpts{ 109 | Subsystem: "store", 110 | Name: "total_tx_num", 111 | Help: "Accumulated num of committed tx.", 112 | }, []string{}), 113 | } 114 | JTMetrics.ApigatewayReceivedTx.Add(0) 115 | JTMetrics.SwitchTakenTx.Add(0) 116 | JTMetrics.TxpoolIngressTx.Add(0) 117 | JTMetrics.TxpoolPooledTx.Add(0) 118 | JTMetrics.TxpoolDuplacatedTx.Add(0) 119 | JTMetrics.TxpoolDiscardedTx.Add(0) 120 | JTMetrics.TxpoolOutgoingTx.Add(0) 121 | JTMetrics.BlockHeight.Add(0) 122 | JTMetrics.BlockTxNum.Add(0) 123 | JTMetrics.CommittedTx.Add(0) 124 | JTMetrics.ConsensusPeerId.Set(0) 125 | JTMetrics.ConsensusMasterId.Set(0) 126 | } 127 | -------------------------------------------------------------------------------- /vendor/github.com/prometheus/procfs/mdstat.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 The Prometheus Authors 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // 6 | // http://www.apache.org/licenses/LICENSE-2.0 7 | // 8 | // Unless required by applicable law or agreed to in writing, software 9 | // distributed under the License is distributed on an "AS IS" BASIS, 10 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package procfs 15 | 16 | import ( 17 | "fmt" 18 | "io/ioutil" 19 | "regexp" 20 | "strconv" 21 | "strings" 22 | ) 23 | 24 | var ( 25 | statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) 26 | buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) 27 | ) 28 | 29 | // MDStat holds info parsed from /proc/mdstat. 30 | type MDStat struct { 31 | // Name of the device. 32 | Name string 33 | // activity-state of the device. 34 | ActivityState string 35 | // Number of active disks. 36 | DisksActive int64 37 | // Total number of disks the device consists of. 38 | DisksTotal int64 39 | // Number of blocks the device holds. 40 | BlocksTotal int64 41 | // Number of blocks on the device that are in sync. 42 | BlocksSynced int64 43 | } 44 | 45 | // ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. 46 | func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { 47 | mdStatusFilePath := fs.Path("mdstat") 48 | content, err := ioutil.ReadFile(mdStatusFilePath) 49 | if err != nil { 50 | return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) 51 | } 52 | 53 | mdStates := []MDStat{} 54 | lines := strings.Split(string(content), "\n") 55 | for i, l := range lines { 56 | if l == "" { 57 | continue 58 | } 59 | if l[0] == ' ' { 60 | continue 61 | } 62 | if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { 63 | continue 64 | } 65 | 66 | mainLine := strings.Split(l, " ") 67 | if len(mainLine) < 3 { 68 | return mdStates, fmt.Errorf("error parsing mdline: %s", l) 69 | } 70 | mdName := mainLine[0] 71 | activityState := mainLine[2] 72 | 73 | if len(lines) <= i+3 { 74 | return mdStates, fmt.Errorf( 75 | "error parsing %s: too few lines for md device %s", 76 | mdStatusFilePath, 77 | mdName, 78 | ) 79 | } 80 | 81 | active, total, size, err := evalStatusline(lines[i+1]) 82 | if err != nil { 83 | return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) 84 | } 85 | 86 | // j is the line number of the syncing-line. 87 | j := i + 2 88 | if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line 89 | j = i + 3 90 | } 91 | 92 | // If device is syncing at the moment, get the number of currently 93 | // synced bytes, otherwise that number equals the size of the device. 94 | syncedBlocks := size 95 | if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { 96 | syncedBlocks, err = evalBuildline(lines[j]) 97 | if err != nil { 98 | return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) 99 | } 100 | } 101 | 102 | mdStates = append(mdStates, MDStat{ 103 | Name: mdName, 104 | ActivityState: activityState, 105 | DisksActive: active, 106 | DisksTotal: total, 107 | BlocksTotal: size, 108 | BlocksSynced: syncedBlocks, 109 | }) 110 | } 111 | 112 | return mdStates, nil 113 | } 114 | 115 | func evalStatusline(statusline string) (active, total, size int64, err error) { 116 | matches := statuslineRE.FindStringSubmatch(statusline) 117 | if len(matches) != 4 { 118 | return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) 119 | } 120 | 121 | size, err = strconv.ParseInt(matches[1], 10, 64) 122 | if err != nil { 123 | return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) 124 | } 125 | 126 | total, err = strconv.ParseInt(matches[2], 10, 64) 127 | if err != nil { 128 | return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) 129 | } 130 | 131 | active, err = strconv.ParseInt(matches[3], 10, 64) 132 | if err != nil { 133 | return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) 134 | } 135 | 136 | return active, total, size, nil 137 | } 138 | 139 | func evalBuildline(buildline string) (syncedBlocks int64, err error) { 140 | matches := buildlineRE.FindStringSubmatch(buildline) 141 | if len(matches) != 2 { 142 | return 0, fmt.Errorf("unexpected buildline: %s", buildline) 143 | } 144 | 145 | syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) 146 | if err != nil { 147 | return 0, fmt.Errorf("%s in buildline: %s", err, buildline) 148 | } 149 | 150 | return syncedBlocks, nil 151 | } 152 | -------------------------------------------------------------------------------- /vendor/github.com/go-kit/kit/metrics/doc.go: -------------------------------------------------------------------------------- 1 | // Package metrics provides a framework for application instrumentation. It's 2 | // primarily designed to help you get started with good and robust 3 | // instrumentation, and to help you migrate from a less-capable system like 4 | // Graphite to a more-capable system like Prometheus. If your organization has 5 | // already standardized on an instrumentation system like Prometheus, and has no 6 | // plans to change, it may make sense to use that system's instrumentation 7 | // library directly. 8 | // 9 | // This package provides three core metric abstractions (Counter, Gauge, and 10 | // Histogram) and implementations for almost all common instrumentation 11 | // backends. Each metric has an observation method (Add, Set, or Observe, 12 | // respectively) used to record values, and a With method to "scope" the 13 | // observation by various parameters. For example, you might have a Histogram to 14 | // record request durations, parameterized by the method that's being called. 15 | // 16 | // var requestDuration metrics.Histogram 17 | // // ... 18 | // requestDuration.With("method", "MyMethod").Observe(time.Since(begin)) 19 | // 20 | // This allows a single high-level metrics object (requestDuration) to work with 21 | // many code paths somewhat dynamically. The concept of With is fully supported 22 | // in some backends like Prometheus, and not supported in other backends like 23 | // Graphite. So, With may be a no-op, depending on the concrete implementation 24 | // you choose. Please check the implementation to know for sure. For 25 | // implementations that don't provide With, it's necessary to fully parameterize 26 | // each metric in the metric name, e.g. 27 | // 28 | // // Statsd 29 | // c := statsd.NewCounter("request_duration_MyMethod_200") 30 | // c.Add(1) 31 | // 32 | // // Prometheus 33 | // c := prometheus.NewCounter(stdprometheus.CounterOpts{ 34 | // Name: "request_duration", 35 | // ... 36 | // }, []string{"method", "status_code"}) 37 | // c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1) 38 | // 39 | // Usage 40 | // 41 | // Metrics are dependencies, and should be passed to the components that need 42 | // them in the same way you'd construct and pass a database handle, or reference 43 | // to another component. Metrics should *not* be created in the global scope. 44 | // Instead, instantiate metrics in your func main, using whichever concrete 45 | // implementation is appropriate for your organization. 46 | // 47 | // latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ 48 | // Namespace: "myteam", 49 | // Subsystem: "foosvc", 50 | // Name: "request_latency_seconds", 51 | // Help: "Incoming request latency in seconds.", 52 | // }, []string{"method", "status_code"}) 53 | // 54 | // Write your components to take the metrics they will use as parameters to 55 | // their constructors. Use the interface types, not the concrete types. That is, 56 | // 57 | // // NewAPI takes metrics.Histogram, not *prometheus.Summary 58 | // func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { 59 | // // ... 60 | // } 61 | // 62 | // func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { 63 | // begin := time.Now() 64 | // // ... 65 | // a.latency.Observe(time.Since(begin).Seconds()) 66 | // } 67 | // 68 | // Finally, pass the metrics as dependencies when building your object graph. 69 | // This should happen in func main, not in the global scope. 70 | // 71 | // api := NewAPI(store, logger, latency) 72 | // http.ListenAndServe("/", api) 73 | // 74 | // Note that metrics are "write-only" interfaces. 75 | // 76 | // Implementation details 77 | // 78 | // All metrics are safe for concurrent use. Considerable design influence has 79 | // been taken from https://github.com/codahale/metrics and 80 | // https://prometheus.io. 81 | // 82 | // Each telemetry system has different semantics for label values, push vs. 83 | // pull, support for histograms, etc. These properties influence the design of 84 | // their respective packages. This table attempts to summarize the key points of 85 | // distinction. 86 | // 87 | // SYSTEM DIM COUNTERS GAUGES HISTOGRAMS 88 | // dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each 89 | // statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each 90 | // graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate 91 | // expvar 1 atomic atomic synthetic, batch, in-place expose 92 | // influx n custom custom custom 93 | // prometheus n native native native 94 | // pcp 1 native native native 95 | // cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate 96 | // 97 | package metrics 98 | --------------------------------------------------------------------------------