├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── NOTICE ├── README-PIPELINE.md ├── README.md ├── bin └── .gitignore ├── bpx.json ├── codec.go ├── codec_gnmi.go ├── codec_gpb.go ├── codec_gpb_test.go ├── codec_json.go ├── codec_json_test.go ├── crypt.go ├── crypt_test.go ├── docker ├── Dockerfile ├── docker-compose.yaml ├── entrypoint.sh └── metrics.json ├── docs ├── deploy_pipeline.png ├── deploy_pipelinex2.png ├── deploy_pipelinex3.png ├── memoryutil.png ├── pipeline.png └── routerinterfaces.png ├── encap.go ├── encap_st.go ├── encap_st_test.go ├── filter_test.json ├── go.mod ├── go.sum ├── id_rsa_FOR_TEST_ONLY ├── id_rsa_FOR_TEST_ONLY_ALT ├── jsonpb.go ├── mdt_msg_samples ├── dump.bin ├── dump.json ├── dump.jsonkv ├── dump.metrics ├── samples.go └── samples_test.go ├── message.go ├── message_router.go ├── message_router_test.go ├── metamonitoring.go ├── metrics.go ├── metrics.json ├── metrics_gpb.json ├── metrics_influx.go ├── metrics_influx_test.go ├── metrics_prometheus.go ├── metrics_test.go ├── pipeline.conf ├── pipeline.go ├── pipeline_test.conf ├── pipeline_test.go ├── pipeline_test_bad.conf ├── pipeline_test_tap.conf ├── replay.go ├── skeleton └── pipeline.mk ├── tap.go ├── tap_test.go ├── tools ├── monitor │ ├── data_graf4ppl │ │ └── grafana.db │ ├── data_prom4ppl │ │ └── prometheus.yml │ └── run.sh └── test │ └── docker-compose.yml ├── topic_template_testA.txt ├── topic_template_testB.txt ├── topic_template_testBAD.txt ├── topic_template_testC.txt ├── xport_gnmi.go ├── xport_grpc.go ├── xport_grpc_out.go ├── xport_grpc_out.pb.go ├── xport_grpc_out.proto ├── xport_grpc_out_test.go ├── xport_grpc_test.go ├── xport_kafka.go ├── xport_kafka_test.go ├── xport_tcp.go ├── xport_tcp_test.go ├── xport_udp.go └── xport_udp_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | ## Standard 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | 9 | # Test binary, built with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # Dependency directories (remove the comment below to include it) 16 | # vendor/ 17 | 18 | ## Custom 19 | .DS_Store 20 | 21 | ## Test-related (should be fixed) 22 | dump.bin 23 | dump.txt 24 | dump1.txt 25 | dumpfiltererd.txt 26 | mdt_msg_samples/hexdump.bin 27 | models.txt 28 | pipeline.log 29 | 30 | # JetBrains IDE 31 | .idea/ 32 | 33 | # Tests 34 | coverage.out 35 | 36 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | services: 4 | - docker 5 | 6 | sudo: false 7 | 8 | install: true 9 | 10 | 11 | cache: 12 | directories: 13 | - $GOPATH/pkg/mod 14 | 15 | go: 16 | - 1.11.x 17 | - 1.12.x 18 | 19 | env: 20 | global: 21 | - GO111MODULE=on 22 | matrix: 23 | - TEST_SUITE=integration_tests 24 | - TEST_SUITE=unit_tests 25 | 26 | script: 27 | - if [ "$TEST_SUITE" = "unit_tests" ]; then make all; fi 28 | - if [ "$TEST_SUITE" = "integration_tests" ]; then make testall; fi -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GOCMD=go 2 | GOBUILD = $(GOCMD) build 3 | GOCLEAN = $(GOCMD) clean 4 | GOTEST = $(GOCMD) test 5 | GOGET = $(GOCMD) get 6 | GOTOOL = $(GOCMD) tool 7 | GOBASE := $(shell pwd) 8 | GOBIN := $(GOBASE)/bin 9 | DOCKER := $(GOBASE)/tools/test 10 | # name of executable. 11 | BINARY = pipeline 12 | BINDIR = bin 13 | 14 | .DEFAULT_GOAL := all 15 | 16 | include skeleton/pipeline.mk 17 | 18 | ## Removes containers, images, binaries and cache 19 | clean: clean-containers 20 | @echo " > Cleaning binaries and cache" 21 | @-rm -f $(GOBIN)/$(PROJECTNAME)/$(BINARY) 22 | @$(GOCLEAN) 23 | 24 | clean-containers: 25 | @echo " > Cleaning containers" 26 | @cd $(DOCKER) && docker-compose down --rmi all --volumes --remove-orphans 2>/dev/null 27 | 28 | stop-containers: 29 | @echo " > Stopping containers" 30 | @cd $(DOCKER) && docker-compose down --volumes 2>/dev/null 31 | 32 | start-containers: stop-containers 33 | @echo " > Starting containers" 34 | @cd $(DOCKER) && docker-compose up -d 35 | 36 | ## Alias for integration-test 37 | testall: build integration-test 38 | 39 | ## Integration test with Kafka and Zookeper 40 | .PHONY: integration-test 41 | integration-test: 42 | @echo " > Setting up Zookeeper and Kafka. Docker required." 43 | @$(MAKE) start-containers 44 | @echo " > Starting integration tests" 45 | $(GOTEST) -v -coverpkg=./... -tags=integration $(COVER_PROFILE) ./... 46 | @$(MAKE) stop-containers 47 | 48 | ## Default target. Builds and executes unit tests 49 | .PHONY: all 50 | all: build test 51 | 52 | .DEFAULT: 53 | @$(MAKE) help 54 | 55 | ## This help message 56 | .PHONY: help 57 | help: 58 | @printf "\nUsage:\n"; 59 | 60 | @awk '{ \ 61 | if ($$0 ~ /^.PHONY: [a-zA-Z\-\_0-9]+$$/) { \ 62 | helpCommand = substr($$0, index($$0, ":") + 2); \ 63 | if (helpMessage) { \ 64 | printf "\033[36m%-20s\033[0m %s\n", \ 65 | helpCommand, helpMessage; \ 66 | helpMessage = ""; \ 67 | } \ 68 | } else if ($$0 ~ /^[a-zA-Z\-\_0-9.]+:/) { \ 69 | helpCommand = substr($$0, 0, index($$0, ":")); \ 70 | if (helpMessage) { \ 71 | printf "\033[36m%-20s\033[0m %s\n", \ 72 | helpCommand, helpMessage; \ 73 | helpMessage = ""; \ 74 | } \ 75 | } else if ($$0 ~ /^##/) { \ 76 | if (helpMessage) { \ 77 | helpMessage = helpMessage"\n "substr($$0, 3); \ 78 | } else { \ 79 | helpMessage = substr($$0, 3); \ 80 | } \ 81 | } else { \ 82 | if (helpMessage) { \ 83 | print "\n "helpMessage"\n" \ 84 | } \ 85 | helpMessage = ""; \ 86 | } \ 87 | }' \ 88 | $(MAKEFILE_LIST) -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | pipeline-gnmi 2 | Copyright (c) 2020 Cisco Systems, Inc. and/or its affiliates 3 | 4 | This project includes software developed at Cisco Systems, Inc. and/or its affiliates. 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pipeline-gnmi [![Go Report Card](https://goreportcard.com/badge/cisco-ie/pipeline-gnmi)](https://goreportcard.com/report/cisco-ie/pipeline-gnmi) [![Build Status](https://travis-ci.org/cisco-ie/pipeline-gnmi.svg?branch=master)](https://travis-ci.org/cisco-ie/pipeline-gnmi) 2 | 3 | **NOTE**: For a more recently developed collector with more output flexibility and support, please evaluate usage of the following [Telegraf](https://github.com/influxdata/telegraf) plugins for your use case: [cisco_telemetry_mdt](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cisco_telemetry_mdt) and [cisco_telemetry_gnmi](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cisco_telemetry_gnmi). 4 | 5 | > A Model-Driven Telemetry collector based on the open-source tool [`pipeline`](https://github.com/cisco/bigmuddy-network-telemetry-pipeline) including enhancements and bug fixes. 6 | 7 | `pipeline-gnmi` is a Model-Driven Telemetry (MDT) collector based on the open-source tool [`pipeline`](https://github.com/cisco/bigmuddy-network-telemetry-pipeline) which has gNMI support and fixes for maintainability (e.g. Go modules) and compatibility (e.g. Kafka version support). It supports MDT from IOS XE, IOS XR, and NX-OS enabling end-to-end Cisco MDT collection for DIY operators. 8 | 9 | The original pipeline README is included [here](README-PIPELINE.md) for reference. 10 | 11 | ## Usage 12 | pipeline-gnmi is written in Go and targets Go 1.11+. Windows and MacOS/Darwin support is experimental. 13 | 14 | 1) pipeline-gnmi binaries may be downloaded from [Releases](https://github.com/cisco-ie/pipeline-gnmi/releases) 15 | 2) Built from source: 16 | ```bash 17 | git clone https://github.com/cisco-ie/pipeline-gnmi 18 | cd pipeline-gnmi 19 | make build 20 | ``` 21 | 3) Acquired via `go get github.com/cisco-ie/pipeline-gnmi` to be located in `$GOPATH/bin` 22 | 23 | ## Configuration 24 | pipeline configuration support is maintained and detailed in the [original README](README-PIPELINE.md). Sample configuration is supplied as [pipeline.conf](pipeline.conf). 25 | 26 | ### gNMI Support 27 | This project introduces support for [gNMI](https://github.com/openconfig/reference/tree/master/rpc/gnmi). 28 | gNMI is a standardized and cross-platform protocol for network management and telemetry. gNMI does not require prior sensor path configuration on the target device, merely enabling gRPC/gNMI is enough. Sensor paths are requested by the collector (e.g. pipeline). Subscription type (interval, on-change, target-defined) can be specified per path. 29 | 30 | Filtering of retrieved sensor values can be done directly at the input stage through selectors in the configuration file, 31 | by defining all the sensor paths that should be stored in a TSDB or forwarded via Kafka. **Regular metrics filtering through metrics.json files is ignored and not implemented**, due to the lack of user-friendliness of the configuration. 32 | 33 | ``` 34 | [mygnmirouter] 35 | stage = xport_input 36 | type = gnmi 37 | server = 10.49.234.114:57777 38 | 39 | # Sensor Path to subscribe to. No configuration on the device necessary 40 | # Appending an @ with a parameter specifies subscription type: 41 | # @x where x is a positive number indicates a fixed interval, e.g. @10 -> every 10 seconds 42 | # @change indicates only changes should be reported 43 | # omitting @ and parameter will do a target-specific subscriptions (not universally supported) 44 | # 45 | path1 = Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters@10 46 | #path2 = /interfaces/interface/state@change 47 | 48 | # Whitelist the actual sensor values we are interested in (1 per line) and drop the rest. 49 | # This replaces metrics-based filtering for gNMI input - which is not implemented. 50 | # Note: Specifying one or more selectors will drop all other sensor values and is applied for all paths. 51 | #select1 = Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters/packets-sent 52 | #select2 = Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters/packets-received 53 | 54 | # Suppress redundant messages (minimum hearbeat interval) 55 | # If set and 0 or positive, redundant messages should be suppressed by the server 56 | # If greater than 0, the number of seconds after which a measurement should be sent, even if no change has occured 57 | #heartbeat_interval = 0 58 | 59 | tls = false 60 | username = cisco 61 | password = ... 62 | ``` 63 | 64 | ### Kafka 2.x Support 65 | This project supports Kafka 2.x by requiring the Kafka version (`kafkaversion`) to be specified in the config file stage. This is a requirement of the underlying Kafka library and ensures that the library is communicating with the Kafka brokers effectively. 66 | 67 | ``` 68 | [kafkaconsumer] 69 | topic=mdt 70 | consumergroup=pipeline-gnmi 71 | type=kafka 72 | stage=xport_input 73 | brokers=kafka-host:9092 74 | encoding=gpb 75 | datachanneldepth=1000 76 | kafkaversion=2.1.0 77 | ``` 78 | 79 | ### Docker Environment Variables 80 | This project has improved Docker support. The Dockerfile uses multi-stage builds and 81 | builds Pipeline from scratch. The configuration file can now be created from environment variables directly, 82 | e.g. 83 | 84 | ``` 85 | PIPELINE_default_id=pipeline 86 | PIPELINE_mygnmirouter_stage=xport_input 87 | PIPELINE_mygnmirouter_type=gnmi 88 | ``` 89 | 90 | is translated into a pipeline.conf with following contents: 91 | ``` 92 | [default] 93 | id = pipeline 94 | 95 | [mygnmirouter] 96 | stage = xport_input 97 | type = gnmi 98 | ``` 99 | 100 | If the special variable *_password* is used, the value is encrypted using the pipeline RSA key before being written to 101 | the *password* option. Similarly *_secret* can be used, then the value is read from the file whose name is given as 102 | value, encrypted using the pipeline RSA key and then written as *password* option. If the Pipeline RSA key is not 103 | given or does not exist it is created upon creation of the container. 104 | 105 | Additionally, existing replays of sensor data can be fed in efficiently using xz-compressed files. 106 | 107 | ## Licensing 108 | pipeline-gnmi is licensed with [Apache License, Version 2.0](LICENSE), per pipeline. 109 | 110 | ## Help! 111 | For support, please open a [GitHub Issue](https://github.com/cisco-ie/pipeline-gnmi/issues) or email [cisco-ie@cisco.com](mailto:cisco-ie@cisco.com). 112 | 113 | ## Special Thanks 114 | Chris Cassar for implementing `pipeline` used by anyone interested in MDT, Steven Barth for gNMI plugin development, and the Cisco teams implementing MDT support in the platforms. 115 | -------------------------------------------------------------------------------- /bin/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore -------------------------------------------------------------------------------- /bpx.json: -------------------------------------------------------------------------------- 1 | vendor/github.com/cisco/bigmuddy-network-telemetry-proto/proto_go/basepathxlation.json -------------------------------------------------------------------------------- /codec.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Codec factory 9 | // 10 | package main 11 | 12 | import ( 13 | "encoding/json" 14 | "fmt" 15 | "github.com/prometheus/client_golang/prometheus" 16 | log "github.com/sirupsen/logrus" 17 | "io/ioutil" 18 | ) 19 | 20 | type encoding int 21 | 22 | // 23 | // There are dependencies on the this set of values starting from 0, 24 | // and not skipping any values: (see getNewEncapSTParser) 25 | const ( 26 | ENCODING_GPB_COMPACT encoding = iota 27 | ENCODING_GPB_KV 28 | ENCODING_JSON 29 | // 30 | // ENCODING_JSON_EVENTS: 31 | // A format we produce (namely for 3rd parties to consume): 32 | // - if K/V content, un K/V it 33 | // - separate first level of fields into distinct events 34 | // 35 | ENCODING_JSON_EVENTS 36 | // 37 | // ENCODING_GPB: 38 | // A format which handles both GPB_KV and GPB_COMPACT 39 | ENCODING_GPB 40 | // 41 | // Template based encoding (only produced, not consumed, currently) 42 | ENCODING_TEMPLATE 43 | ENCODING_MAX 44 | ) 45 | 46 | // 47 | // Do we support receiving this content in telemetry? (as opposed to producing it) 48 | var codec_support = []encoding{ 49 | ENCODING_GPB, 50 | ENCODING_GPB_COMPACT, 51 | ENCODING_GPB_KV, 52 | ENCODING_JSON, 53 | } 54 | 55 | // 56 | // Produce encoding for name 57 | func nameToEncoding(encap string) (error, encoding) { 58 | 59 | mapping := map[string]encoding{ 60 | "gpbcompact": ENCODING_GPB_COMPACT, 61 | "gpbkv": ENCODING_GPB_KV, 62 | "json": ENCODING_JSON, 63 | "json_events": ENCODING_JSON_EVENTS, 64 | "gpb": ENCODING_GPB, 65 | "template": ENCODING_TEMPLATE, 66 | } 67 | 68 | encoding, ok := mapping[encap] 69 | if ok { 70 | return nil, encoding 71 | } 72 | 73 | err := fmt.Errorf( 74 | "encoding [%s], expected value from %v", 75 | encap, mapping) 76 | 77 | return err, encoding 78 | } 79 | 80 | // 81 | // Produce name for encoding 82 | func encodingToName(enc encoding) string { 83 | 84 | mapping := map[encoding]string{ 85 | ENCODING_GPB_COMPACT: "gpbcompact", 86 | ENCODING_GPB_KV: "gpbkv", 87 | ENCODING_JSON: "json", 88 | ENCODING_JSON_EVENTS: "json_events", 89 | ENCODING_GPB: "gpb", 90 | ENCODING_TEMPLATE: "template", 91 | } 92 | 93 | return mapping[enc] 94 | } 95 | 96 | type codec interface { 97 | blockToDataMsgs(source msgproducer, nextBlock []byte) (error, []dataMsg) 98 | dataMsgToBlock(dM dataMsg) (error, []byte) 99 | } 100 | 101 | // 102 | // Specific codec 103 | func getCodec(name string, e encoding) (error, codec) { 104 | 105 | switch e { 106 | case ENCODING_GPB_COMPACT, ENCODING_GPB_KV, ENCODING_GPB: 107 | return getNewCodecGPB(name, e) 108 | case ENCODING_JSON: 109 | return getNewCodecJSON(name) 110 | 111 | } 112 | 113 | return fmt.Errorf("CODEC: codec unsupported"), nil 114 | } 115 | 116 | // Loaded once, and never changed. Exposed directly rather than 117 | // through accessors. 118 | var basePathXlation map[string]string 119 | 120 | func codec_init(nc nodeConfig) { 121 | 122 | bpxFilename, err := nc.config.GetString("default", "base_path_xlation") 123 | if err != nil { 124 | return 125 | } 126 | 127 | logctx := logger.WithFields(log.Fields{ 128 | "name": "default", 129 | "base_path_xlation": bpxFilename, 130 | }) 131 | 132 | bpxJSON, err := ioutil.ReadFile(bpxFilename) 133 | if err != nil { 134 | logctx.WithError(err).Error( 135 | "failed to read file containing base path translation map") 136 | return 137 | } 138 | 139 | err = json.Unmarshal(bpxJSON, &basePathXlation) 140 | if err != nil { 141 | logctx.WithError(err).Error( 142 | "failed to parse JSON describing base path translation") 143 | return 144 | } 145 | 146 | logctx.WithFields( 147 | log.Fields{"xlation_entries": len(basePathXlation)}).Info( 148 | "loaded base path translation map, applied on input") 149 | } 150 | 151 | type CodecMetaMonitorType struct { 152 | // 153 | // Number of messages decoded for a given codec. Note that a 154 | // message is that defined by the encap (e.g. one frame in 155 | // the streaming telemetry format, or one grpc message in 156 | // grpc.) 157 | Decoded *prometheus.CounterVec 158 | // 159 | // Number of message bytes decoded for a given codec. 160 | DecodedBytes *prometheus.CounterVec 161 | // 162 | // Counter of messages partitioned by base paths decoded 163 | // sufficiently to extract base path. 164 | BasePathGroups *prometheus.CounterVec 165 | // 166 | // Counter of errors per base path. 167 | BasePathDecodeError *prometheus.CounterVec 168 | } 169 | 170 | var codecMetaMonitor *CodecMetaMonitorType 171 | 172 | func init() { 173 | // 174 | // We track messages decoded by codecs across a number of 175 | // dimensions. To that end the common codec sets up the 176 | // metrics. 177 | // 178 | codecMetaMonitor = &CodecMetaMonitorType{ 179 | Decoded: prometheus.NewCounterVec( 180 | prometheus.CounterOpts{ 181 | Name: "codec_decoded_msgs", 182 | Help: "Number of messages decoded (partitioned)", 183 | }, 184 | []string{"section", "source", "codec"}), 185 | DecodedBytes: prometheus.NewCounterVec( 186 | prometheus.CounterOpts{ 187 | Name: "codec_decoded_bytes", 188 | Help: "Number of bytes decoded (partitioned)", 189 | }, 190 | []string{"section", "source", "codec"}), 191 | BasePathGroups: prometheus.NewCounterVec( 192 | prometheus.CounterOpts{ 193 | Name: "codec_base_path_groups", 194 | Help: "Counter tracking groups per-base_path", 195 | }, 196 | []string{"section", "source", "base_path"}), 197 | BasePathDecodeError: prometheus.NewCounterVec( 198 | prometheus.CounterOpts{ 199 | Name: "codec_base_path_decode_error", 200 | Help: "Counter tracking decode errors per-base_path", 201 | }, 202 | []string{"section", "source", "base_path", "errortype"}), 203 | } 204 | 205 | prometheus.MustRegister(codecMetaMonitor.Decoded) 206 | prometheus.MustRegister(codecMetaMonitor.DecodedBytes) 207 | prometheus.MustRegister(codecMetaMonitor.BasePathGroups) 208 | prometheus.MustRegister(codecMetaMonitor.BasePathDecodeError) 209 | } 210 | -------------------------------------------------------------------------------- /codec_gnmi.go: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright (c) 2018 Cisco Systems 3 | // 4 | // Author: Steven Barth 5 | // 6 | package main 7 | 8 | import ( 9 | "fmt" 10 | "strings" 11 | 12 | "github.com/golang/protobuf/proto" 13 | "github.com/openconfig/gnmi/proto/gnmi" 14 | ) 15 | 16 | type dataMsgGNMI struct { 17 | original []byte 18 | source msgproducer 19 | notification *gnmi.Notification 20 | jsonUInt64Compat bool 21 | } 22 | 23 | func decodeGNMIPath(path *gnmi.Path) string { 24 | var builder strings.Builder 25 | 26 | if len(path.Origin) > 0 { 27 | builder.WriteString(path.Origin) 28 | builder.WriteString(":") 29 | } else if len(path.Elem) > 0 { 30 | builder.WriteString("/") 31 | } 32 | 33 | for i, elem := range path.Elem { 34 | builder.WriteString(elem.Name) 35 | if i < len(path.Elem)-1 { 36 | builder.WriteString("/") 37 | } 38 | } 39 | return builder.String() 40 | } 41 | 42 | func (msg *dataMsgGNMI) getDataMsgDescription() string { 43 | _, id := msg.getMetaDataIdentifier() 44 | return fmt.Sprintf("gnmi message [%s msg len %d]", id, len(msg.original)) 45 | } 46 | 47 | func (msg *dataMsgGNMI) produceByteStream(streamSpec *dataMsgStreamSpec) (error, []byte) { 48 | switch streamSpec.streamType { 49 | case dMStreamMsgDefault, dMStreamGPB: 50 | return nil, msg.original 51 | 52 | case dMStreamJSON: 53 | marshaler := Marshaler{} 54 | marshaler.EmitUInt64Unquoted = msg.jsonUInt64Compat 55 | marshaler.OrigName = true 56 | 57 | json, err := marshaler.MarshalToString(msg.notification) 58 | return err, []byte(json) 59 | } 60 | 61 | // 62 | // We only support producing stream in JSON for this message 63 | // for the moment - this is because we have the encoded 64 | // variant at hand. 65 | return fmt.Errorf("gnmi codec: reformat msg to [%s] is"+ 66 | " not supported", dataMsgStreamTypeString(streamSpec.streamType)), nil 67 | } 68 | 69 | func (msg *dataMsgGNMI) produceMetrics(spec *metricsSpec, handler metricsOutputHandler, context metricsOutputContext) error { 70 | written := false 71 | timestamp := uint64(msg.notification.Timestamp / 1000000) 72 | builder := strings.Builder{} 73 | 74 | if len(msg.notification.Prefix.Origin) > 0 { 75 | builder.WriteString(msg.notification.Prefix.Origin) 76 | builder.WriteString(":") 77 | } else { 78 | builder.WriteString("/") 79 | } 80 | 81 | tags := make([]metricsAtom, 3) 82 | tags[0].key = "EncodingPath" 83 | tags[1].key = "Producer" 84 | tags[1].val = msg.source.String() 85 | tags[2].key = "Target" 86 | tags[2].val = msg.notification.Prefix.Target 87 | 88 | // Parse generic keys from prefix 89 | for _, elem := range msg.notification.Prefix.Elem { 90 | builder.WriteString(elem.Name) 91 | builder.WriteString("/") 92 | for key, val := range elem.Key { 93 | tags = append(tags, metricsAtom{ 94 | key: builder.String() + key, 95 | val: val, 96 | }) 97 | } 98 | } 99 | 100 | path := builder.String() 101 | tags[0].val = path[:len(path)-1] 102 | 103 | for _, update := range msg.notification.Update { 104 | metricTags := tags 105 | builder = strings.Builder{} 106 | builder.WriteString(path) 107 | 108 | for i, elem := range update.Path.Elem { 109 | builder.WriteString(elem.Name) 110 | 111 | if i < len(update.Path.Elem)-1 { 112 | builder.WriteString("/") 113 | } 114 | 115 | for key, val := range elem.Key { 116 | metricTags = append(metricTags, metricsAtom{ 117 | key: builder.String() + key, 118 | val: val, 119 | }) 120 | } 121 | } 122 | 123 | var val interface{} 124 | value := update.Val 125 | 126 | switch value.Value.(type) { 127 | case *gnmi.TypedValue_AsciiVal: 128 | val = value.GetAsciiVal() 129 | case *gnmi.TypedValue_BoolVal: 130 | val = value.GetBoolVal() 131 | case *gnmi.TypedValue_BytesVal: 132 | val = value.GetBytesVal() 133 | case *gnmi.TypedValue_DecimalVal: 134 | val = value.GetDecimalVal() 135 | case *gnmi.TypedValue_FloatVal: 136 | val = value.GetFloatVal() 137 | case *gnmi.TypedValue_IntVal: 138 | val = value.GetIntVal() 139 | case *gnmi.TypedValue_StringVal: 140 | val = value.GetStringVal() 141 | case *gnmi.TypedValue_UintVal: 142 | val = value.GetUintVal() 143 | default: 144 | val = nil 145 | } 146 | 147 | if val != nil { 148 | handler.buildMetric( 149 | metricTags, 150 | metricsAtom{ 151 | key: builder.String(), 152 | val: val, 153 | }, 154 | timestamp, 155 | context) 156 | written = true 157 | } 158 | } 159 | 160 | if written { 161 | handler.flushMetric(tags, timestamp, context) 162 | } 163 | 164 | return nil 165 | } 166 | 167 | func (msg *dataMsgGNMI) getDataMsgStreamType() dataMsgStreamType { 168 | return dMStreamMsgDefault 169 | } 170 | 171 | func (msg *dataMsgGNMI) getMetaDataPath() (error, string) { 172 | return nil, decodeGNMIPath(msg.notification.Prefix) 173 | } 174 | 175 | func (msg *dataMsgGNMI) getMetaDataIdentifier() (error, string) { 176 | return nil, msg.source.String() 177 | } 178 | 179 | func (msg *dataMsgGNMI) getMetaData() *dataMsgMetaData { 180 | _, path := msg.getMetaDataPath() 181 | return &dataMsgMetaData{ 182 | Path: path, 183 | Identifier: msg.source.String(), 184 | } 185 | } 186 | 187 | type codecGNMI struct { 188 | name string 189 | } 190 | 191 | // 192 | // Produce a JSON type codec 193 | func getNewCodecGNMI(name string) (error, codecGNMI) { 194 | codec := codecGNMI{ 195 | name: name, 196 | } 197 | 198 | return nil, codec 199 | } 200 | 201 | func (codec *codecGNMI) dataMsgToBlock(dM dataMsg) (error, []byte) { 202 | return fmt.Errorf("gnmi: only decoding is supported currently"), 203 | nil 204 | } 205 | 206 | func (codec *codecGNMI) notificationToDataMsgs(source *gnmiClient, msg *gnmi.Notification) ([]dataMsg, error) { 207 | dMs := make([]dataMsg, 1) 208 | block, err := proto.Marshal(msg) 209 | 210 | if err == nil { 211 | dMs[0] = &dataMsgGNMI{ 212 | original: block, 213 | source: source, 214 | notification: msg, 215 | jsonUInt64Compat: source.jsonUInt64Compat, 216 | } 217 | 218 | // 219 | // Count the decoded message against the source, section and 220 | // type 221 | codecMetaMonitor.Decoded.WithLabelValues(codec.name, source.String(), "gnmi").Inc() 222 | codecMetaMonitor.DecodedBytes.WithLabelValues(codec.name, source.String(), "gnmi").Add(float64(len(block))) 223 | } 224 | 225 | return dMs, err 226 | } 227 | -------------------------------------------------------------------------------- /codec_json.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Provide JSON codec, such as it is. More effort required here to 9 | // exploit common bit (Telemetry message) and provide better 10 | // implementations exporting metadata. Shipping MDT does not support 11 | // JSON yet, though this is in the works. JSON is largely pass 12 | // through. 13 | // 14 | package main 15 | 16 | import ( 17 | "fmt" 18 | ) 19 | 20 | // 21 | // dataMsgJSON dataMsg types are produced when handling JSON streams. 22 | type dataMsgJSON struct { 23 | original []byte 24 | source msgproducer 25 | } 26 | 27 | func (m *dataMsgJSON) getDataMsgDescription() string { 28 | _, id := m.getMetaDataIdentifier() 29 | return fmt.Sprintf("JSON message [%s msg len %d]", id, len(m.original)) 30 | } 31 | 32 | func (m *dataMsgJSON) produceByteStream(streamSpec *dataMsgStreamSpec) ( 33 | error, []byte) { 34 | 35 | switch streamSpec.streamType { 36 | case dMStreamJSON, dMStreamMsgDefault: 37 | return nil, m.original 38 | } 39 | 40 | // 41 | // We only support producing stream in JSON for this message 42 | // for the moment - this is because we have the encoded 43 | // variant at hand. 44 | return fmt.Errorf("JSON CODEC: reformat msg to [%s] is"+ 45 | " not supported", dataMsgStreamTypeString(streamSpec.streamType)), nil 46 | } 47 | 48 | func (m *dataMsgJSON) produceMetrics( 49 | spec *metricsSpec, 50 | outputHandler metricsOutputHandler, 51 | outputContext metricsOutputContext) error { 52 | return fmt.Errorf("JSON CODEC: metric extraction unsupported") 53 | } 54 | 55 | func (m *dataMsgJSON) getDataMsgStreamType() dataMsgStreamType { 56 | return dMStreamJSON 57 | } 58 | 59 | func (m *dataMsgJSON) getMetaDataPath() (error, string) { 60 | return fmt.Errorf("JSON CODEC: path extraction is not supported"), "" 61 | } 62 | 63 | func (m *dataMsgJSON) getMetaDataIdentifier() (error, string) { 64 | return nil, m.source.String() 65 | } 66 | 67 | func (m *dataMsgJSON) getMetaData() *dataMsgMetaData { 68 | return &dataMsgMetaData{ 69 | Path: "unsupported", 70 | Identifier: m.source.String(), 71 | } 72 | } 73 | 74 | type codecJSON struct { 75 | name string 76 | } 77 | 78 | // 79 | // Produce a JSON type codec 80 | func getNewCodecJSON(name string) (error, codec) { 81 | c := &codecJSON{ 82 | name: name, 83 | } 84 | 85 | return nil, c 86 | } 87 | 88 | func (p *codecJSON) dataMsgToBlock(dM dataMsg) (error, []byte) { 89 | return fmt.Errorf("CODEC JSON: only decoding is supported currently"), 90 | nil 91 | } 92 | 93 | // nextBlock allows JSON to produce dataMsg 94 | func (p *codecJSON) blockToDataMsgs(source msgproducer, nextBlock []byte) ( 95 | error, []dataMsg) { 96 | // 97 | // Count the decoded message against the source, section and 98 | // type 99 | codecMetaMonitor.Decoded.WithLabelValues( 100 | p.name, source.String(), encodingToName(ENCODING_JSON)).Inc() 101 | codecMetaMonitor.DecodedBytes.WithLabelValues( 102 | p.name, source.String(), encodingToName(ENCODING_JSON)).Add( 103 | float64(len(nextBlock))) 104 | 105 | dMs := make([]dataMsg, 1) 106 | dMs[0] = &dataMsgJSON{ 107 | original: nextBlock, 108 | source: source, 109 | } 110 | 111 | return nil, dMs 112 | } 113 | -------------------------------------------------------------------------------- /codec_json_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | package main 9 | 10 | import ( 11 | "bytes" 12 | "testing" 13 | ) 14 | 15 | func TestCodecJSON(t *testing.T) { 16 | 17 | var codecJSONTestSource testSource 18 | 19 | err, p := getNewCodecJSON("JSON CODEC TEST") 20 | if err != nil { 21 | t.Errorf("Failed to get JSON codec [%v]", err) 22 | return 23 | } 24 | 25 | testJSONMsg := []byte(`{"Name":"Alice","Body":"Hello","Test":1294706395881547000}`) 26 | err, dMs := p.blockToDataMsgs(&codecJSONTestSource, testJSONMsg) 27 | 28 | if err != nil { 29 | t.Errorf("Failed to get messages from JSON stream [%v]", err) 30 | return 31 | } 32 | 33 | dM := dMs[0] 34 | 35 | err, b := p.dataMsgToBlock(dM) 36 | if err == nil { 37 | t.Errorf("Unexpected unsupported") 38 | } 39 | 40 | err, _ = dM.getMetaDataIdentifier() 41 | if err != nil { 42 | t.Errorf("Failed to retrieve identifier [%v]", err) 43 | } 44 | 45 | description := dM.getDataMsgDescription() 46 | if description == "" { 47 | t.Errorf("Failed to retrieve description") 48 | } 49 | 50 | err, _ = dM.getMetaDataPath() 51 | if err == nil { 52 | t.Errorf("Unexpected unsupported, no path to fetch") 53 | } 54 | 55 | err, b = dM.produceByteStream(&dataMsgStreamSpec{streamType: dMStreamGPB}) 56 | if err == nil { 57 | t.Errorf("Unexpected unsupported GPB byte stream from JSON") 58 | } 59 | 60 | err, b = dM.produceByteStream(dataMsgStreamSpecDefault) 61 | if err != nil { 62 | t.Errorf("Failed to produce byte stream from dataMsg [%v]", 63 | err) 64 | } 65 | 66 | if bytes.Compare(b, testJSONMsg) != 0 { 67 | t.Errorf("Failed to extract expected data") 68 | } 69 | 70 | } 71 | -------------------------------------------------------------------------------- /crypt.go: -------------------------------------------------------------------------------- 1 | // 2 | // July 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Crypto support used to RSA encrypt credentials used to dial in to router. 9 | // 10 | 11 | package main 12 | 13 | import ( 14 | "bufio" 15 | "crypto/rand" 16 | "crypto/rsa" 17 | "crypto/sha256" 18 | "crypto/x509" 19 | "encoding/base64" 20 | "encoding/pem" 21 | "fmt" 22 | log "github.com/sirupsen/logrus" 23 | "golang.org/x/crypto/ssh/terminal" 24 | "io/ioutil" 25 | "os" 26 | ) 27 | 28 | func collect_pkey(pemFile string) (err error, key *rsa.PrivateKey) { 29 | 30 | // Read the private key 31 | pemData, err := ioutil.ReadFile(pemFile) 32 | if err != nil { 33 | return fmt.Errorf("Read RSA pem: %v; access to RSA pem required", err), nil 34 | } 35 | 36 | // Extract the PEM-encoded data block 37 | block, _ := pem.Decode(pemData) 38 | if block == nil { 39 | return fmt.Errorf("Read key data not in PEM format"), nil 40 | } 41 | 42 | if block.Type != "RSA PRIVATE KEY" { 43 | return fmt.Errorf("Key type RSA required but found [%v]", block.Type), nil 44 | } 45 | 46 | pk, err := x509.ParsePKCS1PrivateKey(block.Bytes) 47 | 48 | return err, pk 49 | } 50 | 51 | func encrypt_password(pemFile string, password []byte) (error, string) { 52 | 53 | err, pkey := collect_pkey(pemFile) 54 | if err != nil { 55 | return fmt.Errorf("Private key parse failure: %s", err), "" 56 | } 57 | 58 | ciphertxt, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, 59 | &pkey.PublicKey, password, []byte("")) 60 | if err != nil { 61 | return fmt.Errorf("Encryption failed: %s", err), "" 62 | } 63 | 64 | return nil, base64.StdEncoding.EncodeToString(ciphertxt) 65 | } 66 | 67 | func decrypt_password(pemFile string, p string) (error, string) { 68 | 69 | err, pkey := collect_pkey(pemFile) 70 | if err != nil { 71 | return fmt.Errorf("\nPrivate key parse failure: %s", err), "" 72 | } 73 | 74 | pb64, err := base64.StdEncoding.DecodeString(p) 75 | if err != nil { 76 | return fmt.Errorf("\nFailed to extract base64 from cipher string: %v", err), "" 77 | } 78 | out, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, pkey, pb64, []byte("")) 79 | if err != nil { 80 | return fmt.Errorf("\nFailed to decrypt: %v", err), "" 81 | } 82 | 83 | return nil, string(out) 84 | } 85 | 86 | type cryptUserPasswordCollector struct { 87 | // Username and password. Password is stored in encrypted form if 88 | // pem file is passed in from cli. pem points at pem file setting. 89 | username string 90 | password string 91 | pem string 92 | } 93 | 94 | // 95 | // Stateless utility to collect username and password 96 | func cryptCollect(name string, authenticator string) ( 97 | error, string, string) { 98 | 99 | b := bufio.NewReader(os.Stdin) 100 | for { 101 | fmt.Printf("\nCRYPT Client [%s],[%v]\n Enter username: ", 102 | name, authenticator) 103 | user, more, err := b.ReadLine() 104 | if more { 105 | fmt.Printf("Username too long") 106 | continue 107 | } 108 | if err != nil { 109 | fmt.Printf("Failed to collect username") 110 | continue 111 | } 112 | if string(user) == "" { 113 | fmt.Println("Empty username, try again") 114 | continue 115 | } 116 | fmt.Printf(" Enter password: ") 117 | pw, err := terminal.ReadPassword(int(os.Stdin.Fd())) 118 | if err != nil { 119 | fmt.Printf("Failed to collect password") 120 | continue 121 | } 122 | 123 | fmt.Printf("\n") 124 | 125 | return nil, string(user), string(pw) 126 | } 127 | } 128 | 129 | func (c *cryptUserPasswordCollector) getUP() (string, string, error) { 130 | 131 | var err error 132 | var pw string 133 | 134 | if len(c.pem) > 0 { 135 | err, pw = decrypt_password(c.pem, c.password) 136 | } else { 137 | // clear! 138 | pw = c.password 139 | } 140 | 141 | return c.username, pw, err 142 | } 143 | 144 | // 145 | // Handler of configuration of username and password. Used by various 146 | // module consistently (e.g. grpc dialin, influxdb metrics) 147 | func (c *cryptUserPasswordCollector) handleConfig( 148 | nc nodeConfig, configSection string, authenticator string) error { 149 | 150 | var user, pw_pem string 151 | var err error 152 | 153 | logctx := logger.WithFields(log.Fields{ 154 | "name": configSection, 155 | "authenticator": authenticator, 156 | }) 157 | 158 | pw, _ := nc.config.GetString(configSection, "password") 159 | 160 | // 161 | // A password is setup in config. The password is only ever stored 162 | // encrypted, which means we need the private key to decrypt it. 163 | // This would have been passed in as a pemfile. 164 | if len(pw) > 0 { 165 | 166 | if len(*pemFileName) == 0 { 167 | logctx.Error( 168 | "Encrypted password included in configuration but '-pem' option not passed in . " + 169 | "RSA key pair file used to encrypt password must be passed in as -pem option") 170 | return fmt.Errorf("Authentication setup inconsistent") 171 | } 172 | 173 | // 174 | // Validate decryption works. 175 | pw_pem = *pemFileName 176 | user, err = nc.config.GetString(configSection, "username") 177 | if err == nil { 178 | err, _ = decrypt_password(pw_pem, pw) 179 | } 180 | } else { 181 | err, user, pw = cryptCollect(configSection, authenticator) 182 | if err == nil && len(*pemFileName) > 0 { 183 | var epw string 184 | // 185 | // Let's encypt the password, write it to new config, and 186 | // advise CU accordingly if cu provided keys (pemFileName). 187 | err, epw = encrypt_password(*pemFileName, []byte(pw)) 188 | if err == nil { 189 | fmt.Printf("Generating sample config...") 190 | nc.config.AddOption(configSection, "username", user) 191 | nc.config.AddOption(configSection, "password", epw) 192 | // We successfully encrypted the password; may as well 193 | // revert to storing encrypted password for this run too. 194 | pw = epw 195 | pw_pem = *pemFileName 196 | // 197 | // Write out a temporary config. We may need to move this 198 | // to a common point, if multiple sections force rewrite to 199 | // avoid writing multiple time. 200 | newconfigfile := *configFileName + "_REWRITTEN" 201 | err = nc.config.WriteConfigFile(newconfigfile, 0600, "") 202 | fmt.Printf("A new configuration file [%s] has been written including "+ 203 | "user name and encrypted password.\nIn future, you can run pipeline "+ 204 | "non-interactively.\nDo remember to run pipeline with '-pem %s -config %s' options.\n", 205 | newconfigfile, *pemFileName, newconfigfile) 206 | } else { 207 | fmt.Printf("Failed to encrypt password: [%v]\n", err) 208 | } 209 | } 210 | } 211 | 212 | if err != nil { 213 | logctx.WithError(err).Error("failed to setup authentication") 214 | return err 215 | } 216 | 217 | if len(user) == 0 { 218 | err = fmt.Errorf("Authentication username zero length") 219 | logctx.WithError(err).Error("failed to setup authentication") 220 | return err 221 | } 222 | 223 | if len(pw) == 0 { 224 | err = fmt.Errorf("Authentication password zero length") 225 | logctx.WithError(err).Error("failed to setup authentication") 226 | return err 227 | } 228 | 229 | // 230 | // We have a user, a pw, and, optionally, a pem file. Possibly an 231 | // error. We're done. 232 | c.username = user 233 | c.password = pw 234 | c.pem = pw_pem 235 | logctx.WithFields(log.Fields{"username": user, "pem": pw_pem}).Info( 236 | "setup authentication") 237 | 238 | return nil 239 | } 240 | 241 | type userPasswordCollector interface { 242 | handleConfig(nc nodeConfig, configSection string, authenticator string) error 243 | getUP() (string, string, error) 244 | } 245 | 246 | type userPasswordCollectorFactory func() userPasswordCollector 247 | -------------------------------------------------------------------------------- /crypt_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // July 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Test crypto support used to RSA encrypt credentials used to dial in to router. 9 | // 10 | package main 11 | 12 | import ( 13 | "github.com/dlintw/goconf" 14 | "strings" 15 | "testing" 16 | ) 17 | 18 | func TestCrypt(t *testing.T) { 19 | 20 | passwords := []string{ 21 | "a", 22 | "_", 23 | "aaaaaaaaaaaaaaaaaaaaaaaaaa", 24 | "1_£$0909jjdsoi08", 25 | "RANDOMRANDOMRANDOM", 26 | } 27 | 28 | pemfile_bad := "id_rsa_FOR_TEST_ONLY_BAD" 29 | err, _ := collect_pkey(pemfile_bad) 30 | if err == nil { 31 | t.Fatalf("\nPEM file is nonexistent but collect_pkey did not error") 32 | } 33 | 34 | pemfile := "crypt_test.go" 35 | err, _ = collect_pkey(pemfile) 36 | if err == nil { 37 | t.Fatalf("\nPEM file passed is bad, but collect_pkey did not error") 38 | } 39 | 40 | pemfile = "id_rsa_FOR_TEST_ONLY" 41 | err, _ = collect_pkey(pemfile) 42 | if err != nil { 43 | t.Fatalf("\nPrivate key parse failure: %s", err) 44 | } 45 | 46 | for _, p := range passwords { 47 | 48 | err, _ := encrypt_password(pemfile_bad, []byte(p)) 49 | if err == nil { 50 | t.Fatalf("\nPEM file is non-existent, but encrypt does no fail") 51 | } 52 | 53 | err, encoded_cipher := encrypt_password(pemfile, []byte(p)) 54 | if err != nil { 55 | t.Fatalf("\nFailed to encrypt password: %s", err) 56 | } 57 | 58 | err, _ = decrypt_password(pemfile_bad, encoded_cipher) 59 | if err == nil { 60 | t.Fatalf("\nPEM file is non-existent, but decrypt does no fail") 61 | } 62 | 63 | err, _ = decrypt_password("id_rsa_FOR_TEST_ONLY_ALT", encoded_cipher) 64 | if err == nil { 65 | t.Fatalf("\nDecrypted successfully even if pem mismatched") 66 | } 67 | 68 | err, out := decrypt_password(pemfile, encoded_cipher) 69 | if err != nil { 70 | t.Fatalf("\nFailed to decrypt: %v", err) 71 | } 72 | 73 | if strings.Compare(p, string(out)) != 0 { 74 | t.Fatalf("Passwords en/decrypt not symmetric: %v in %s out", p, out) 75 | } else { 76 | // t.Logf("Passwords en/decrypt symmetric: %v in %s out", p, out) 77 | } 78 | } 79 | } 80 | 81 | type cryptTestUserPasswordCollector struct { 82 | real *cryptUserPasswordCollector 83 | } 84 | 85 | func (c *cryptTestUserPasswordCollector) handleConfig( 86 | nc nodeConfig, name string, server string) error { 87 | return c.real.handleConfig(nc, name, server) 88 | } 89 | 90 | func (c *cryptTestUserPasswordCollector) getUP() ( 91 | string, string, error) { 92 | return c.real.getUP() 93 | } 94 | 95 | func cryptTestUPCollectorFactory() userPasswordCollector { 96 | return &cryptTestUserPasswordCollector{} 97 | } 98 | 99 | func TestCryptCollect(t *testing.T) { 100 | 101 | var nc nodeConfig 102 | var err error 103 | 104 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 105 | if err != nil { 106 | t.Fatalf("Failed to read config [%v]", err) 107 | } 108 | 109 | c := &cryptTestUserPasswordCollector{ 110 | real: &cryptUserPasswordCollector{}, 111 | } 112 | 113 | // 114 | // Now that a password is set, let's test the non-interactive 115 | // negative and positive path. 116 | name := "crypttest" 117 | user := "user" 118 | pw := "mysillysillysillypassword" 119 | nc.config.AddOption(name, "password", pw) 120 | err = c.handleConfig(nc, name, "myauthenticatorBAD") 121 | if err == nil { 122 | t.Fatalf("Test passed but should fail, no pemfile") 123 | } 124 | 125 | *pemFileName = "id_rsa_FOR_TEST_ONLY" 126 | err = c.handleConfig(nc, name, "myauthenticatorBAD") 127 | if err == nil { 128 | t.Fatalf("Test passed but should fail, bad password") 129 | } 130 | 131 | // 132 | // Rewrite with correct password. 133 | err, epw := encrypt_password(*pemFileName, []byte(pw)) 134 | if err != nil { 135 | t.Fatalf("Failed to encrypt password, %v", err) 136 | } 137 | nc.config.AddOption(name, "password", epw) 138 | 139 | nc.config.AddOption(name, "username", "") 140 | err = c.handleConfig(nc, name, "myauthenticatorBAD") 141 | if err == nil { 142 | t.Fatalf("Test passed but should fail, empty username") 143 | } 144 | 145 | nc.config.AddOption(name, "username", user) 146 | err = c.handleConfig(nc, name, "myauthenticator") 147 | if err != nil { 148 | t.Fatalf("Test failed to parse valid config, %v", err) 149 | } 150 | 151 | ruser, rpw, err := c.getUP() 152 | if err != nil { 153 | t.Fatalf("Test failed to return UP, %v", err) 154 | } 155 | 156 | if ruser != user { 157 | t.Fatalf("Wrong username returned") 158 | } 159 | if rpw != pw { 160 | t.Fatalf("Wrong password returned") 161 | } 162 | 163 | } 164 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.12-stretch as build 2 | ARG GITHUBORG=cisco-ie 3 | ARG REV=master 4 | ARG GO111MODULE=on 5 | ARG GOPROXY=https://proxy.golang.org 6 | ARG CGO_ENABLED=0 7 | RUN mkdir -p /data/ && cd /data/ && git clone -b "$REV" --single-branch --depth 1 https://github.com/${GITHUBORG}/pipeline-gnmi && \ 8 | cd pipeline-gnmi && make linux/amd64 && strip bin/pipeline_linux_amd64 9 | 10 | FROM alpine 11 | RUN apk add --no-cache openssl 12 | VOLUME /etc/pipeline 13 | COPY --from=build /data/pipeline-gnmi/bin/pipeline_linux_amd64 /bin/pipeline 14 | COPY entrypoint.sh metrics.json /etc/pipeline/ 15 | RUN chmod +x /etc/pipeline/entrypoint.sh 16 | ENTRYPOINT ["/etc/pipeline/entrypoint.sh"] 17 | -------------------------------------------------------------------------------- /docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | pipeline: 4 | build: . 5 | environment: 6 | - PIPELINE_gnmi_stage=xport_input 7 | - PIPELINE_gnmi_type=gnmi 8 | - PIPELINE_gnmi_server=192.168.0.1:57777 9 | - PIPELINE_gnmi_path1=Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters@10 10 | - PIPELINE_gnmi_tls=false 11 | - PIPELINE_gnmi_username=cisco 12 | - PIPELINE_gnmi__password=cisco 13 | - PIPELINE_metrics_stage=xport_output 14 | - PIPELINE_metrics_type=metrics 15 | - PIPELINE_metrics_file=/etc/pipeline/metrics.json 16 | - PIPELINE_metrics_output=influx 17 | - PIPELINE_metrics_influx=http://influxdb:8086 18 | - PIPELINE_metrics_database=telemetry 19 | - PIPELINE_metrics_username=admin 20 | - PIPELINE_metrics__password=admin 21 | 22 | influxdb: 23 | image: influxdb:alpine 24 | environment: 25 | - INFLUXDB_DB=telemetry 26 | - INFLUXDB_ADMIN_PASSWORD=admin 27 | 28 | chronograf: 29 | image: chronograf:alpine 30 | command: --influxdb-url=http://influxdb:8086 31 | ports: [ "8888:8888" ] 32 | 33 | 34 | -------------------------------------------------------------------------------- /docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | CONFIG=${CONFIG:-/etc/pipeline/pipeline.conf} 3 | 4 | export KEY=${KEY:-/etc/pipeline/pipeline_key} 5 | [ -f "$KEY" ] || openssl genrsa -out $KEY 4096 6 | 7 | if [ -f "$REPLAY" ]; then 8 | export REPLAY_PIPE=/tmp/replay.pipe 9 | rm -f $REPLAY_PIPE 10 | mkfifo $REPLAY_PIPE 11 | ( echo Replaying dataset...; xzcat $REPLAY > $REPLAY_PIPE; rm -f $REPLAY_PIPE; echo Replay completed. ) & 12 | fi 13 | 14 | if [ ! -f "$CONFIG" ]; then 15 | oldsection= 16 | env | grep ^PIPELINE_ | cut -d_ -f2- | sort | while read line; do 17 | section=$(echo "$line" | cut -d_ -f1) 18 | option=$(echo "$line" | cut -d_ -f2-) 19 | key=$(echo "$option" | cut -d= -f1) 20 | value=$(echo "$option" | cut -d= -f2-) 21 | 22 | if [ "$key" == "_password" -o "$key" == "_secret" ]; then 23 | [ "$key" == "_password" ] && password=$value || password=$(cat $value) 24 | key=password 25 | value= 26 | 27 | for pwline in $(echo -n ${password} | openssl pkeyutl -encrypt -inkey $KEY -pkeyopt rsa_padding_mode:oaep \ 28 | -pkeyopt rsa_oaep_md:sha256 -pkeyopt rsa_mgf1_md:sha256 | openssl enc -base64); do 29 | value=$value$pwline 30 | done 31 | fi 32 | 33 | [ "$section" != "$oldsection" ] && echo -e "\n[$section]" >> $CONFIG 34 | echo "$key = $value" >> $CONFIG 35 | 36 | oldsection=$section 37 | done 38 | fi 39 | 40 | exec /bin/pipeline $DEBUG --config=$CONFIG --pem=$KEY --log= -------------------------------------------------------------------------------- /docker/metrics.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "basepath" : "Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters", 4 | "spec" : { 5 | "fields" : [ 6 | {"name" : "interface-name", "tag" : true}, 7 | {"name" : "packets-received"}, 8 | {"name" : "bytes-received"}, 9 | {"name" : "packets-sent", "track": true}, 10 | {"name" : "bytes-sent"}, 11 | {"name" : "output-drops"}, 12 | {"name" : "output-queue-drops"}, 13 | {"name" : "input-drops"}, 14 | {"name" : "input-queue-drops"}, 15 | {"name" : "input-errors"}, 16 | {"name" : "crc-errors"}, 17 | {"name" : "input-ignored-packets"}, 18 | {"name" : "output-errors"}, 19 | {"name" : "output-buffer-failures"}, 20 | {"name" : "carrier-transitions"} 21 | ] 22 | } 23 | }, 24 | { 25 | "basepath" : "Cisco-IOS-XR-mpls-te-oper:mpls-te/tunnels/tunnel-auto-bandwidths/tunnel-auto-bandwidth", 26 | "spec" : { 27 | "fields" : [ 28 | {"name" : "tunnel-name", "tag" : true}, 29 | {"name" : "highest-bandwidth"}, 30 | {"name" : "last-sample-bandwidth"}, 31 | {"name" : "samples-collected"}, 32 | {"name" : "tunnel-requested-bandwidth"} 33 | ] 34 | } 35 | }, 36 | { 37 | "basepath" : "Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/output/statistics", 38 | "spec" : { 39 | "fields" : [ 40 | {"name" : "interface-name", "tag" : true}, 41 | {"name" : "policy-name", "tag" : true}, 42 | { 43 | "name" : "class-stats", 44 | "fields" : [ 45 | {"name" : "class-name", "tag" : true}, 46 | { 47 | "name" : "general-stats", 48 | "fields" : [ 49 | {"name" : "transmit-packets"}, 50 | {"name" : "transmit-bytes"}, 51 | {"name" : "total-drop-packets"}, 52 | {"name" : "total-drop-bytes"}, 53 | {"name" : "total-drop-rate"}, 54 | {"name" : "total-transmit-rate"} 55 | ] 56 | }, 57 | { 58 | "name" : "queue-stats-array", 59 | "fields" : [ 60 | {"name" : "queue-id", "tag" : true}, 61 | {"name" : "tail-drop-packets"}, 62 | {"name" : "tail-drop-bytes"}, 63 | { 64 | "name" : "queue-average-length", 65 | "fields" : [ 66 | {"name" : "value"}, 67 | {"name" : "unit", "tag" : true} 68 | ] 69 | }, 70 | {"name" : "random-drop-packets"}, 71 | {"name" : "random-drop-bytes"}, 72 | {"name" : "conform-packets"}, 73 | {"name" : "conform-bytes"}, 74 | {"name" : "exceed-packets"}, 75 | {"name" : "exceed-bytes"} 76 | ] 77 | } 78 | ] 79 | } 80 | ] 81 | } 82 | }, 83 | { 84 | "basepath" : "Cisco-IOS-XR-procmem-oper:processes-memory/nodes/node/process-ids/process-id", 85 | "spec" : { 86 | "fields" : [ 87 | {"name":"node-name", "tag": true}, 88 | {"name":"name", "tag": true}, 89 | {"name":"text-seg-size"}, 90 | {"name":"data-seg-size"}, 91 | {"name":"stack-seg-size"}, 92 | {"name":"malloc-size"} 93 | ] 94 | } 95 | }, 96 | { 97 | "basepath" : "Cisco-IOS-XR-nto-misc-oper:memory-summary/nodes/node/summary", 98 | "spec" : { 99 | "fields" : [ 100 | {"name":"node-name", "tag": true}, 101 | {"name":"ram-memory"}, 102 | {"name":"free-physical-memory"}, 103 | {"name":"system-ram-memory"}, 104 | {"name":"free-application-memory"} 105 | ] 106 | } 107 | }, 108 | { 109 | "basepath" : "Cisco-IOS-XR-wdsysmon-fd-oper:system-monitoring/cpu-utilization", 110 | "spec" : { 111 | "fields" : [ 112 | {"name":"node-name", "tag": true}, 113 | {"name":"total-cpu-one-minute"}, 114 | {"name":"total-cpu-five-minute"}, 115 | {"name":"total-cpu-fifteen-minute"}, 116 | { 117 | "name":"process-cpu", 118 | "fields" : [ 119 | {"name":"process-name", "tag": true}, 120 | {"name":"process-cpu-one-minute"}, 121 | {"name":"process-cpu-five-minute"}, 122 | {"name":"process-cpu-fifteen-minute"} 123 | ] 124 | } 125 | ] 126 | } 127 | } 128 | ] 129 | -------------------------------------------------------------------------------- /docs/deploy_pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/docs/deploy_pipeline.png -------------------------------------------------------------------------------- /docs/deploy_pipelinex2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/docs/deploy_pipelinex2.png -------------------------------------------------------------------------------- /docs/deploy_pipelinex3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/docs/deploy_pipelinex3.png -------------------------------------------------------------------------------- /docs/memoryutil.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/docs/memoryutil.png -------------------------------------------------------------------------------- /docs/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/docs/pipeline.png -------------------------------------------------------------------------------- /docs/routerinterfaces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/docs/routerinterfaces.png -------------------------------------------------------------------------------- /encap.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Encap factory 9 | 10 | package main 11 | 12 | import ( 13 | "fmt" 14 | ) 15 | 16 | type encapParser interface { 17 | // 18 | // nextBlockBuffer, nextBlock 19 | // Based on the current state, provide the next fixed length 20 | // buffer to read. Note this type of encap works for encoding 21 | // where the length of the header or payload to collect next 22 | // is known a priori (typical of TLV encoding). 23 | // 24 | nextBlockBuffer() (error, *[]byte) 25 | nextBlock(nextBlock []byte, source msgproducer) (error, []dataMsg) 26 | } 27 | 28 | // 29 | // Fetch and an encapParser for type (encap) corresponding to the 30 | // encap, and indicate which pipeline node (name), and external 31 | // producer involved (producer). 32 | func getNewEncapParser(name string, encap string, msgproducer msgproducer) ( 33 | error, encapParser) { 34 | 35 | switch encap { 36 | case "st": 37 | err, p := getNewEncapSTParser(name, msgproducer) 38 | return err, p 39 | } 40 | 41 | return fmt.Errorf("ENCAP: failed to produce parser"), nil 42 | } 43 | -------------------------------------------------------------------------------- /encap_st.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Handle Streaming Telemetry Header. 9 | 10 | package main 11 | 12 | import ( 13 | "bytes" 14 | "encoding/binary" 15 | "fmt" 16 | ) 17 | 18 | const ( 19 | ENC_ST_MAX_DGRAM uint32 = 64 * 1024 20 | ENC_ST_MAX_PAYLOAD uint32 = 1024 * 1024 21 | ENC_ST_HDR_MSG_FLAGS_NONE uint16 = 0 22 | ENC_ST_HDR_MSG_SIZE uint32 = 12 23 | ENC_ST_HDR_VERSION uint16 = 1 24 | ) 25 | 26 | type encapSTHdrMsgType uint16 27 | 28 | const ( 29 | ENC_ST_HDR_MSG_TYPE_UNSED encapSTHdrMsgType = iota 30 | ENC_ST_HDR_MSG_TYPE_TELEMETRY_DATA 31 | ENC_ST_HDR_MSG_TYPE_HEARTBEAT 32 | ) 33 | 34 | type encapSTHdrMsgEncap uint16 35 | 36 | const ( 37 | ENC_ST_HDR_MSG_ENCAP_UNSED encapSTHdrMsgEncap = iota 38 | ENC_ST_HDR_MSG_ENCAP_GPB 39 | ENC_ST_HDR_MSG_ENCAP_JSON 40 | ENC_ST_HDR_MSG_ENCAP_GPB_COMPACT 41 | ENC_ST_HDR_MSG_ENCAP_GPB_KV 42 | ) 43 | 44 | type encapSTHdr struct { 45 | MsgType encapSTHdrMsgType 46 | MsgEncap encapSTHdrMsgEncap 47 | MsgHdrVersion uint16 48 | Msgflag uint16 49 | Msglen uint32 50 | } 51 | 52 | type encapSTParseState int 53 | 54 | const ( 55 | ENC_ST_WAIT_FOR_HDR encapSTParseState = iota 56 | ENC_ST_WAIT_FOR_DATA 57 | ENC_ST_WAIT_FOR_ALL // datagram service 58 | ) 59 | 60 | type encapSTParser struct { 61 | name string 62 | codecs []codec 63 | hdr []byte 64 | cachedMsgEncap encapSTHdrMsgEncap 65 | nextCodec codec 66 | nextBlockSize uint32 67 | state encapSTParseState 68 | // Reference to the source field is passed down channels in 69 | // messages so it is important to remember it is immutable 70 | source msgproducer 71 | } 72 | 73 | func encapSTFromEncoding(enc encoding) (error, encapSTHdrMsgEncap) { 74 | var err error 75 | var encst encapSTHdrMsgEncap 76 | 77 | switch enc { 78 | case ENCODING_GPB: 79 | encst = ENC_ST_HDR_MSG_ENCAP_GPB 80 | case ENCODING_JSON: 81 | encst = ENC_ST_HDR_MSG_ENCAP_JSON 82 | case ENCODING_GPB_KV: 83 | encst = ENC_ST_HDR_MSG_ENCAP_GPB_KV // legacy support 84 | default: 85 | err = fmt.Errorf("Failed to produce encapSTHdrMsgEncap from %d", enc) 86 | } 87 | 88 | return err, encst 89 | } 90 | 91 | func getNewEncapSTParser(name string, source msgproducer) (error, encapParser) { 92 | 93 | var hdr encapSTHdr 94 | 95 | p := &encapSTParser{ 96 | hdr: make([]byte, binary.Size(hdr)), 97 | state: ENC_ST_WAIT_FOR_HDR, 98 | source: source, 99 | name: name, 100 | } 101 | 102 | if source == nil { 103 | // 104 | // This is a datagram service, not a connection oriented 105 | // service. 106 | p.state = ENC_ST_WAIT_FOR_ALL 107 | } 108 | 109 | // 110 | // Setup all possible codecs or fail 111 | p.codecs = make([]codec, ENCODING_MAX) 112 | for _, e := range codec_support { 113 | err, codec := getCodec(name, e) 114 | if err != nil { 115 | return err, nil 116 | } 117 | p.codecs[e] = codec 118 | } 119 | 120 | return nil, p 121 | } 122 | 123 | // 124 | // Buffer for next block 125 | func (p *encapSTParser) nextBlockBuffer() (error, *[]byte) { 126 | 127 | switch p.state { 128 | 129 | case ENC_ST_WAIT_FOR_DATA: 130 | 131 | if p.nextBlockSize == 0 { 132 | return fmt.Errorf("ENCAP ST: req 0 size buffer"), nil 133 | } 134 | 135 | // 136 | // Extra safe. This check is not strictly necessary, because we 137 | // checked when we set it, but... 138 | if p.nextBlockSize > ENC_ST_MAX_PAYLOAD { 139 | return fmt.Errorf("ENCAP ST: req %d size buf, max %d", 140 | p.nextBlockSize, ENC_ST_MAX_PAYLOAD), nil 141 | } 142 | 143 | buffer := make([]byte, p.nextBlockSize) 144 | return nil, &buffer 145 | 146 | case ENC_ST_WAIT_FOR_HDR: 147 | return nil, &p.hdr 148 | 149 | case ENC_ST_WAIT_FOR_ALL: 150 | // 151 | // Length of datagram is unknown. Take worst case. We must do 152 | // better somehow here. 153 | buffer := make([]byte, ENC_ST_MAX_DGRAM) 154 | return nil, &buffer 155 | } 156 | 157 | return fmt.Errorf( 158 | "ENCAP ST: parser in unknown state, buffer req"), nil 159 | } 160 | 161 | // 162 | // cacheCodec caches the codec in use for encap parser, or validates 163 | // and uses it. 164 | func (p *encapSTParser) cacheCodec(encap encapSTHdrMsgEncap) error { 165 | 166 | //fmt.Printf("HDR: %+v\n", hdr) 167 | if p.nextCodec == nil || encap != p.cachedMsgEncap { 168 | // 169 | // Setup next codec. 170 | switch encap { 171 | case ENC_ST_HDR_MSG_ENCAP_GPB: 172 | p.nextCodec = p.codecs[ENCODING_GPB] 173 | case ENC_ST_HDR_MSG_ENCAP_GPB_COMPACT: 174 | p.nextCodec = p.codecs[ENCODING_GPB_COMPACT] 175 | case ENC_ST_HDR_MSG_ENCAP_GPB_KV: 176 | p.nextCodec = p.codecs[ENCODING_GPB_KV] 177 | case ENC_ST_HDR_MSG_ENCAP_JSON: 178 | p.nextCodec = p.codecs[ENCODING_JSON] 179 | default: 180 | return fmt.Errorf("ENCAP ST: no codec for msg encap [%+v]", 181 | encap) 182 | } 183 | p.cachedMsgEncap = encap 184 | } else { 185 | // This is the common path in a live session 186 | // since the codec to use would be cached. 187 | // 188 | } 189 | 190 | return nil 191 | } 192 | 193 | // 194 | // Given requested amount of data, return next block size, and 195 | // possibly a dataMsg 196 | func (p *encapSTParser) nextBlock(nextBlock []byte, source msgproducer) ( 197 | error, []dataMsg) { 198 | 199 | if source != nil { 200 | // 201 | // Source overwritten per msg (handling datagrams) 202 | p.source = source 203 | } 204 | switch p.state { 205 | 206 | case ENC_ST_WAIT_FOR_ALL: 207 | // 208 | // Datagram service. Message is completely read into the 209 | // buffer already. All we need to do is validate content, 210 | // and use it for dM. 211 | var hdr encapSTHdr 212 | hdrbuf := bytes.NewReader(nextBlock) 213 | err := binary.Read(hdrbuf, binary.BigEndian, &hdr) 214 | if err != nil { 215 | return err, nil 216 | } 217 | 218 | // 219 | // We could relax this, but at the moment we only choose to 220 | // support one encoding on a given port. Different encodings 221 | // need to use different input sections, with different port 222 | // to listen on. 223 | err = p.cacheCodec(hdr.MsgEncap) 224 | if err != nil { 225 | return err, nil 226 | } 227 | 228 | // 229 | // Make sure msglen is sensible. hdrbuf.Len() is the unread 230 | // bit. 231 | hdrLen := len(nextBlock) - hdrbuf.Len() 232 | if hdr.Msglen > uint32(hdrbuf.Len()) { 233 | return fmt.Errorf( 234 | "ENCAP ST: drop datagram, payload len expect %d, have %d", 235 | hdr.Msglen, hdrbuf.Len()), nil 236 | } 237 | 238 | // 239 | // Unsupported flags? 240 | if hdr.Msgflag != ENC_ST_HDR_MSG_FLAGS_NONE { 241 | return fmt.Errorf( 242 | "ENCAP ST: flag in header unsupported (zlib?)"), nil 243 | } 244 | 245 | // 246 | // We have a codec otherwise we would have returned after 247 | // cacheCodec call. Extract payload and feed it to codec. 248 | return p.nextCodec.blockToDataMsgs(p.source, nextBlock[hdrLen:]) 249 | 250 | case ENC_ST_WAIT_FOR_DATA: 251 | // 252 | // Track state, and return empty handed (no data yet) 253 | p.nextBlockSize = 0 254 | p.state = ENC_ST_WAIT_FOR_HDR 255 | 256 | if p.nextCodec != nil { 257 | return p.nextCodec.blockToDataMsgs(p.source, nextBlock) 258 | } 259 | 260 | return fmt.Errorf("ENCAP ST: codec not setup for decode"), nil 261 | 262 | case ENC_ST_WAIT_FOR_HDR: 263 | 264 | var hdr encapSTHdr 265 | hdrbuf := bytes.NewReader(nextBlock) 266 | err := binary.Read(hdrbuf, binary.BigEndian, &hdr) 267 | if err != nil { 268 | return err, nil 269 | } 270 | 271 | err = p.cacheCodec(hdr.MsgEncap) 272 | if err != nil { 273 | return err, nil 274 | } 275 | 276 | if hdr.Msglen > ENC_ST_MAX_PAYLOAD { 277 | return fmt.Errorf( 278 | "ENCAP ST: nextBlockBuffer failed; msg too long [%v]", hdr.Msglen), nil 279 | } 280 | 281 | if hdr.Msgflag != ENC_ST_HDR_MSG_FLAGS_NONE { 282 | return fmt.Errorf( 283 | "ENCAP ST: flag in header unsupported (zlib?)"), nil 284 | } 285 | 286 | // 287 | // Track state, and return empty handed (no data yet) 288 | p.nextBlockSize = hdr.Msglen 289 | p.state = ENC_ST_WAIT_FOR_DATA 290 | 291 | return nil, nil 292 | } 293 | 294 | return fmt.Errorf( 295 | "ENCAP ST: parser in unknown state, handling block"), nil 296 | } 297 | -------------------------------------------------------------------------------- /encap_st_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | "bytes" 13 | "encoding/binary" 14 | samples "github.com/cisco-ie/pipeline-gnmi/mdt_msg_samples" 15 | "testing" 16 | ) 17 | 18 | type testSource struct{} 19 | 20 | func (t *testSource) String() string { 21 | return "TEST source" 22 | } 23 | 24 | var encapSTTestSource testSource 25 | 26 | func TestSTParser(t *testing.T) { 27 | 28 | err, gp := getNewEncapSTParser("ENCAP ST TEST", &encapSTTestSource) 29 | if err != nil { 30 | t.Errorf("Failed to get ST test parser: %v", err) 31 | } 32 | 33 | // Type assert gp to the more specific so we can manipulate ST 34 | // specific state. 35 | p := gp.(*encapSTParser) 36 | 37 | err, _ = p.nextBlockBuffer() 38 | if err != nil { 39 | t.Errorf("Failed to get header buffer: %v", err) 40 | } 41 | 42 | // 43 | // Cheat to force error cases 44 | p.state = ENC_ST_WAIT_FOR_DATA 45 | err, _ = p.nextBlockBuffer() 46 | if err == nil { 47 | t.Errorf("Should have failed with request to get 0 size buffer") 48 | } 49 | 50 | p.nextBlockSize = ENC_ST_MAX_PAYLOAD + 1 51 | err, _ = p.nextBlockBuffer() 52 | if err == nil { 53 | t.Errorf("Should have failed with request to get oversize buffer") 54 | } 55 | 56 | // 57 | // Get a GPB message and encap it in ST header. 58 | sample := samples.MDTSampleTelemetryTableFetchOne( 59 | samples.SAMPLE_TELEMETRY_DATABASE_BASIC) 60 | if sample == nil { 61 | t.Errorf("Failed to fetch data") 62 | return 63 | } 64 | payload := sample.SampleStreamGPB 65 | encap := encapSTHdr{ 66 | MsgType: ENC_ST_HDR_MSG_TYPE_TELEMETRY_DATA, 67 | MsgEncap: ENC_ST_HDR_MSG_ENCAP_GPB, 68 | MsgHdrVersion: ENC_ST_HDR_VERSION, 69 | Msgflag: ENC_ST_HDR_MSG_FLAGS_NONE, 70 | Msglen: uint32(len(payload)), 71 | } 72 | 73 | encodedMsgForLen := make([]byte, 0, len(payload)+256) 74 | hdrbuf := bytes.NewBuffer(encodedMsgForLen) 75 | err = binary.Write(hdrbuf, binary.BigEndian, &encap) 76 | if err != nil { 77 | t.Errorf("Failed to write data header") 78 | return 79 | } 80 | _, err = hdrbuf.Write(payload) 81 | if err != nil { 82 | t.Errorf("Failed write data 1") 83 | return 84 | } 85 | 86 | encodedMsg := hdrbuf.Bytes() 87 | // Star afresh 88 | err, gp = getNewEncapSTParser("ENCAP ST TEST", &encapSTTestSource) 89 | if err != nil { 90 | t.Errorf("Failed to get fresh ST test parser: %v", err) 91 | } 92 | p = gp.(*encapSTParser) 93 | 94 | err, dMs := p.nextBlock(encodedMsg, nil) 95 | if err != nil { 96 | t.Errorf("Failed to parse header: %v", err) 97 | } 98 | if dMs != nil { 99 | t.Errorf("Expected header but got data.") 100 | } 101 | if p.state != ENC_ST_WAIT_FOR_DATA { 102 | t.Errorf("Failed to transition to wait for payload") 103 | } 104 | if p.nextBlockSize != uint32(len(payload)) { 105 | t.Errorf("Failed to parser and track msg len") 106 | } 107 | 108 | err, dataBuf := p.nextBlockBuffer() 109 | if err != nil { 110 | t.Errorf("Failed to get data buffer") 111 | } 112 | 113 | if len(*dataBuf) != len(payload) { 114 | t.Errorf("Databuf returned len %d expected %d", 115 | len(*dataBuf), 16) 116 | } 117 | 118 | err, dMs = p.nextBlock(payload, nil) 119 | if err != nil { 120 | t.Errorf("Failed to get data message") 121 | } 122 | 123 | if dMs == nil || len(dMs) != 1 { 124 | t.Errorf("Failed to extract expected data") 125 | } 126 | 127 | err, _ = dMs[0].produceByteStream(dataMsgStreamSpecDefault) 128 | if err != nil { 129 | t.Errorf("Failed to produce byte stream from dataMsg") 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /filter_test.json: -------------------------------------------------------------------------------- 1 | {"node_id":"{{.Node_id}}", 2 | "subscription":"{{.Subscription}}", 3 | "encoding_path":"{{.Encoding_path}}", 4 | "msg_timestamp":"{{.Msg_timestamp}}", 5 | "data":[{{range $index,$element := .Data}} 6 | {{if $index}}, {"timestamp":{{$element.Timestamp}},"ifname":{{index .Keys "interface-name"}},"pktsout":{{index .Content "packets-sent"}}}{{else}}{"timestamp":{{$element.Timestamp}},"ifname":{{index .Keys "interface-name"}},"pktsout":{{index .Content "packets-sent"}}}{{end}}{{end}}]} 7 | 8 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cisco-ie/pipeline-gnmi 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/Shopify/sarama v0.0.0-20180719133408-22114655baec 7 | github.com/Shopify/toxiproxy v2.1.4+incompatible // indirect 8 | github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect 9 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect 10 | github.com/bsm/sarama-cluster v2.1.15+incompatible // indirect 11 | github.com/cisco/bigmuddy-network-telemetry-proto v0.0.0-20170331103848-4419cd20fb73 12 | github.com/davecgh/go-spew v1.1.1 // indirect 13 | github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490 14 | github.com/eapache/go-resiliency v1.1.0 // indirect 15 | github.com/eapache/go-xerial-snappy v0.0.0-20180703130627-040cc1a32f57 // indirect 16 | github.com/eapache/queue v0.0.0-20180227141424-093482f3f8ce // indirect 17 | github.com/evalphobia/logrus_fluent v0.5.0 18 | github.com/fluent/fluent-logger-golang v0.0.0-20180420045249-90f0f0270701 // indirect 19 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect 20 | github.com/golang/protobuf v1.2.0 21 | github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec // indirect 22 | github.com/influxdata/influxdb v0.0.0-20180718194353-468497c11f25 23 | github.com/kr/pretty v0.1.0 // indirect 24 | github.com/matttproud/golang_protobuf_extensions v1.0.0 // indirect 25 | github.com/onsi/ginkgo v1.8.0 // indirect 26 | github.com/onsi/gomega v1.5.0 // indirect 27 | github.com/openconfig/gnmi v0.0.0-20180620224109-3e9008957ea7 28 | github.com/philhofer/fwd v1.0.0 // indirect 29 | github.com/pierrec/lz4 v2.0.2+incompatible // indirect 30 | github.com/pierrec/xxHash v0.1.5 // indirect 31 | github.com/prometheus/client_golang v0.0.0-20180713201052-bcbbc08eb2dd 32 | github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 // indirect 33 | github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a // indirect 34 | github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d // indirect 35 | github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 // indirect 36 | github.com/sirupsen/logrus v0.0.0-20180712201618-92052687f8ec 37 | github.com/stretchr/testify v1.3.0 // indirect 38 | github.com/tinylib/msgp v1.0.2 // indirect 39 | golang.org/x/crypto v0.0.0-20180319061731-c3a3ad6d03f7 40 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd 41 | golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect 42 | google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d // indirect 43 | google.golang.org/grpc v1.13.0 44 | gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect 45 | gopkg.in/bsm/sarama-cluster.v2 v2.1.13 46 | gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect 47 | ) 48 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Shopify/sarama v0.0.0-20180719133408-22114655baec h1:oywOukuvAS0kIRdqnaFhrMaKIZHgiV8zck9FdnKFkBo= 2 | github.com/Shopify/sarama v0.0.0-20180719133408-22114655baec/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= 3 | github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= 4 | github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= 5 | github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= 6 | github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 7 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= 8 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= 9 | github.com/bsm/sarama-cluster v2.1.15+incompatible h1:RkV6WiNRnqEEbp81druK8zYhmnIgdOjqSVi0+9Cnl2A= 10 | github.com/bsm/sarama-cluster v2.1.15+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= 11 | github.com/cisco/bigmuddy-network-telemetry-proto v0.0.0-20170331103848-4419cd20fb73 h1:OY9Y8GC83ufUIYMbVbpXdSorShxgiRBp1gdcsBif/Po= 12 | github.com/cisco/bigmuddy-network-telemetry-proto v0.0.0-20170331103848-4419cd20fb73/go.mod h1:KViqV52FNeJMIyfNFqQYxekPDAB12Kq/DSkoNjtE3kg= 13 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 14 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 15 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 16 | github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490 h1:I8/Qu5NTaiXi1TsEYmTeLDUlf7u9pEdbG+azjDvx8Vg= 17 | github.com/dlintw/goconf v0.0.0-20120228082610-dcc070983490/go.mod h1:jWlUIP63OLr0cV2FGN2IEzSFsMAe58if8rk/SAE0JRE= 18 | github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= 19 | github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= 20 | github.com/eapache/go-xerial-snappy v0.0.0-20180703130627-040cc1a32f57 h1:a7ApRW6UdlZEHT8yq+LS1AcNLlg0HExVpQolhesA8Y4= 21 | github.com/eapache/go-xerial-snappy v0.0.0-20180703130627-040cc1a32f57/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= 22 | github.com/eapache/queue v0.0.0-20180227141424-093482f3f8ce h1:wgJIjAWDwKCWtv+sEEuBOizA9xBmlOoMsP3Cs9ZDxKk= 23 | github.com/eapache/queue v0.0.0-20180227141424-093482f3f8ce/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 24 | github.com/evalphobia/logrus_fluent v0.5.0 h1:RGw1FPYXP8NDajEvGusTpJP313N9i14E77F9R94pl7g= 25 | github.com/evalphobia/logrus_fluent v0.5.0/go.mod h1:hasyj+CXm3BDP1YhFk/rnTcjlegyqvkokV9A25cQsaA= 26 | github.com/fluent/fluent-logger-golang v0.0.0-20180420045249-90f0f0270701 h1:aSv1cygdIRu6xQ2fTJftBraeF/l1aATEfzUmEQzL94g= 27 | github.com/fluent/fluent-logger-golang v0.0.0-20180420045249-90f0f0270701/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= 28 | github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= 29 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 30 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= 31 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 32 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 33 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 34 | github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec h1:ZaSUjYC8aWT/om43c8YVz0SqjT8ABtqw7REbZGsCroE= 35 | github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 36 | github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= 37 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 38 | github.com/influxdata/influxdb v0.0.0-20180718194353-468497c11f25 h1:jjvOIWdcG2Fpw8ynexMZty3RNOTBfz3aginh14YOrtk= 39 | github.com/influxdata/influxdb v0.0.0-20180718194353-468497c11f25/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= 40 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 41 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 42 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 43 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 44 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 45 | github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLzM9Y858MNGCOACTvCW9TSAc= 46 | github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 47 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 48 | github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= 49 | github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 50 | github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= 51 | github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 52 | github.com/openconfig/gnmi v0.0.0-20180620224109-3e9008957ea7 h1:XF5uaJfTfqi1R0xGkTULfSTHbnfXQM2yjdvzutCz+ec= 53 | github.com/openconfig/gnmi v0.0.0-20180620224109-3e9008957ea7/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= 54 | github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= 55 | github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= 56 | github.com/pierrec/lz4 v2.0.2+incompatible h1:6spEXYEkGG74KeVRPzvSU0Fa3xO9DGO0bJcA6uIfwo8= 57 | github.com/pierrec/lz4 v2.0.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= 58 | github.com/pierrec/xxHash v0.1.5 h1:n/jBpwTHiER4xYvK3/CdPVnLDPchj8eTJFFLUb4QHBo= 59 | github.com/pierrec/xxHash v0.1.5/go.mod h1:w2waW5Zoa/Wc4Yqe0wgrIYAGKqRMf7czn2HNKXmuL+I= 60 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 61 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 62 | github.com/prometheus/client_golang v0.0.0-20180713201052-bcbbc08eb2dd h1:T6Y9DkTZlfcCB1qlrwQFCiAcLhTjmkjxW8EP3ZhV7Ts= 63 | github.com/prometheus/client_golang v0.0.0-20180713201052-bcbbc08eb2dd/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 64 | github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4= 65 | github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 66 | github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a h1:JLXgXKi9RCmLk8DMn8+PCvN++iwpD3KptUbVvHBsKtU= 67 | github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 68 | github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d h1:iF+U2tTdys559fmqt0MNaC8QLIJh1twxIIOylDGhswM= 69 | github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 70 | github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165 h1:nkcn14uNmFEuGCb2mBZbBb24RdNRL08b/wb+xBOYpuk= 71 | github.com/rcrowley/go-metrics v0.0.0-20180503174638-e2704e165165/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 72 | github.com/sirupsen/logrus v0.0.0-20180712201618-92052687f8ec h1:5CSwqOHHmdbXJTu4BsYtAaBckHKQzanNv8QRpuPJ8tw= 73 | github.com/sirupsen/logrus v0.0.0-20180712201618-92052687f8ec/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= 74 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 75 | github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= 76 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 77 | github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= 78 | github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= 79 | golang.org/x/crypto v0.0.0-20180319061731-c3a3ad6d03f7 h1:ryKu9k3oWWgQUTahNaa+lDY2fruNO/7fqQVKQfl0Vmc= 80 | golang.org/x/crypto v0.0.0-20180319061731-c3a3ad6d03f7/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 81 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= 82 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 83 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 84 | golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= 85 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 86 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= 87 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 88 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 89 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 90 | google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d h1:6X4PYh39/Pjz7al8CnTTsk+jp3fG2KPpusrnwqzAW+M= 91 | google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 92 | google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= 93 | google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= 94 | gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= 95 | gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= 96 | gopkg.in/bsm/sarama-cluster.v2 v2.1.13 h1:UpT7E5dmKM/2phmg5Rs/jykHUrsjbzY5u9vhdhPUJQo= 97 | gopkg.in/bsm/sarama-cluster.v2 v2.1.13/go.mod h1:PH+cn1N1hKueFCL+6Kz/HLj3ARW4Oop7WH3u0Ivp14w= 98 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 99 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 100 | gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= 101 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 102 | gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= 103 | gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= 104 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 105 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 106 | gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= 107 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 108 | -------------------------------------------------------------------------------- /id_rsa_FOR_TEST_ONLY: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAzRsXb+/+I9AlF0tJKoDQA3bjeN2BRzfNfT6m2Y2Uq4y+WnIj 3 | Rieyo9w1JPfODKEj0bU0e4/BtDzS+cWjwSX1Ti/N2ANaIrF5VDuGba62HxHNPGAP 4 | xjMJun4YVoqbx9wkyikzZeaxXUALEhthBf08bzs7BOFHCUlYndDOTkONe8IBuPDe 5 | yN/Huf3leGLwClCh8qgRbyp8PbxtgCwDcih3QR2R5DNWmlMR1XyLaURBpbspihmr 6 | qMoeHESK5yaNJW27O5gCAolTqhlTV2yboGjAkhlsB9G2TQRSK2pHieIMtf3MKeFo 7 | SuToCyz4Qh964wsWLBj+mtmU2RoZwXTG/gRfBj+KDd1qvvwYKcl2eMx77p8fOet/ 8 | Lj+hGFwnAtk5g1BNdyA8k/4ZlXxGTbkHV0XIEJzsuqK5Nzshf712W8YVk7hQUJMt 9 | ZPt6ZFCZJuTXEIeOYoe8gnJV3ZMS3AtA+i8wvHelr3YgOaRLL00TLaOUXxSrPEps 10 | WnNW0MRF3HC7Hltgm5Wr2QvE0o2WDRKS1MAeZTtl0200npY2/2V4AMtgt0btqZ+Z 11 | TyDfBJ5eHCvCHdnRV120oMulB6vHDLCczPJyoJXvxfvFzf+GfvGpo8royIo2Lwja 12 | PUqeO7/X/g9Vp+EmpB79FjpHXBxkpFDrY2GVOo094Yeaf6FGoghSnIke/R0CAwEA 13 | AQKCAgAw/OiDJtoXaa6COswV58BHI40/IACxpuVgpacqTQOWWb5ZUeuU5QRo2L6k 14 | vr8rCw3IF7SaUoKcbu6hE5CCIHPPoeFTV7Gema717GW2KsV69ECFzBfYafZH4bgk 15 | a2S7vpvW4TJSg2AXeqVqRLnOuWZG1unIx5m04RzWTLoJvV68kWDh8D310mUcykSV 16 | lWPpwsp7b6MCXy5ffHLW6yxDhuC837gpvg1QPMkEqYOOt+vKDsLkJi922BeK/uAC 17 | R5Xsu6G9oRfRzUT7WqMqPVmKXZqpCoNf9rnI+WA62F6P0zrf+OdblWfYg4XcrXf6 18 | KxSpx8sa6zlceN1xYSeHqkGhCCA9qsfFbCHo1E+Vz5mocnPCMPuZxgPZjZB/baNr 19 | X/Ewbkd7oi5kPhIMFZTPdcjwruH3LdTSvuooMslskUhMMx1bNIAtX/gBPoPWGfUL 20 | chPPzaUOqIXwICcH3UlDJgF31269Dz/zV7ZbSMdau6UE4GXohK0tZKzn4sL7H+bu 21 | HT8W/1++s/F5BucyTOsWVVV5iD13hv+CoV/kcMYYPjwA2OSX7Ep8W23L9Ec1uwfW 22 | UpgdUcm30eRXBA4yUAaOC00iOwjeyMGtVR8mkQPNyNEumTzRi4nhN4/SND3RekoE 23 | 85feN2KLqPxIRDY1Bd0IRZLdOzWQ426IxWDsSwyEKctMfnpVgQKCAQEA/ZCH6kQ7 24 | dcAgZZhyS26TwSa3T8iKtmUqyzIgeVKUYarRjWS/2tFOAYsommQajfZ7jp4liH6B 25 | Di0lYO2RS93T79u8z+y+xUhkl3Nnym8pRhOiTdJbvKBKJTeh1DzVtHBKet7S6Wb4 26 | Db91g4nuSuv8e3OZMLiFdOMWGH5/+ZZtK+HZs2Z4e7icoS+i+1UaoMHaXiiJJxfH 27 | lie7LlYjX9D2I2F+gLZOgCpiQJrMMyzWVEqm5x3hXJpzT6hpEeGg4pQnXB1UaCgO 28 | /EKrfwHPkmnR6xChHvJakWCzAgYgNTEv+sveKVslMyVScj1XC8CtgmKz+h62uHZ7 29 | 2rv2blPozDCqIQKCAQEAzxNozqyOtTF8lutBbaLCyf0cHqOPWPym3NhHmrAfZlpk 30 | pqXwnsFqd9q1eQoy2+tS0py/mKLn0CGmUUSmProGgMhp067LbS2wA4rkyEjllIO5 31 | guVZQru1aLPaw7jMhozjIR3kOb6vnsczCnwSyhCQ32qpoNpOMAqrIs6r5bKdOafW 32 | zn7Mi+wzgEXZHavxiXAjoDP9ih+OyIsFq20tTFVOSCBU05ZTTKwpkQq+cJ6R/HDD 33 | msd4TPzFKJrFJkai4EZ1qqlw5JzzKkf7vFWCTz0OuP8bxF8EXTp/LBdi78Qsz7Dd 34 | F7upz7tb8afEBy4qGnyHcQgakgMIXyaCvhDZ4XyLfQKCAQEAphL/siQ2fYz0qxYJ 35 | 5fXquFtPPOYwU/425RdTxaKcpMtorJ8MOZlU6hXm5hgifFCv5F0YYCnbBRn0nWN8 36 | AqCrqzHUnKqG2Fft6nQfyqq78bXyW94nT4f8iBvD1mpx3dieL5O5uD1Ewlxf72Nr 37 | u46Ms56m+w4f+sqxMiHIWAR6RH+MQsAesYncrIWyzcKTyi6tg3YxPnWIt4QWSOcK 38 | DDukus+kN1EwYSlhaMb6b5oyVsr+PemILUrLs6SuDJQm97/pd5+OJRn3lqlkAa+B 39 | 8Hd2kes2XDqHrygSloTEm5I3xp+4RoX3+2O6eDBMztnHytoy3uM28eJIhJ8PbHtw 40 | wiwtgQKCAQB5DbevWmDyu8NB1rWhCg0lV7ogjy01sD07zZPOcjdo9l7wrQFFTXyY 41 | JFsCL1wcBC2ewdOfv1wU6Bb6W090S/uRPZfgD8cMRDEpYQDttO/BKmBAjSLpSCJ8 42 | 2ueAk3+9CI+exIKu3zJ7d6f28SFqztRiR1ByoCwlGZeB+Z+z3/7ZvaxteuLIyYQx 43 | 8KNEHUhOOCgbd0k4fcpSojLmTCS3AweIUp5EMxTvNZX3HHjIDWr6UFGJIiLJntsD 44 | Kbcm8RQ2V/phkjxU6QueJAadyr8q416/9yKcYY+DkLbXZ7SGxgAMSjB1t1dGARl2 45 | 48RNDke+xB3Jjhs6UFuMhE7aHa/BibVhAoIBAQDYWgAPdxUhU9z0YAf67D63WX5t 46 | IMDPW/j5GQzBgv24L8elwxW1OVAHZQkiq6qOEk4LsCgx/aODe5E/je4SvdoLtia+ 47 | cz0jIwB9bYNFIFDPk7hr7+6wOY7Ymw5QJ3GYTnBCaM7dm+hQPUI6G2nfAcqujfSQ 48 | LVODAjxIGlg5TRk5yWhtWv/x26IibrKKYJmndmee+zmdwgzxtsAii0A6mbD4Z+x7 49 | SOK9I0Dghg52APUn7qbkFqL6QC0QkjPA3uJhz3/CL49aWFeputTuXNm+9sC6pABO 50 | sIBAkPMYUDdlU8MPN3ZQMxKPhYl5WcxFPcWgZboLaP0vpkmO7dPnZaKvS98n 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /id_rsa_FOR_TEST_ONLY_ALT: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJJgIBAAKCAgEApslJoZhKxY+oetgw4XVH437+RSpS5wISIwR3CCRqd7DT4M31 3 | vrgbgsi7Lf8QrzilczvbhfO3dvAm0yl1S3a5ES5yo/KQA6yo2E2zEZYlV6+Dc2Ye 4 | /MXlFfH7aBIm+q0sUr4j4Aw8GxCixjEZ4TRK3wpzKY+ObZb78hW8XFNQZpRzC0Tk 5 | YZXPcO9pJDYLKH2mhWjMJcX0ElFwf4cY1LtI1FmtCzHHtwIGH31ktfD40U01WoT+ 6 | Dtx/KSmnv8Zyyw0mc+pP/iFj3mFoRMW8l7pZvO28NmmXphuuZwoY6rulLvMp6IxK 7 | 7nqvpNa9XvURagqAcufshjleW01CYNkc2tB47JpsHpaFMwgfDTEgym4u10HjoD6H 8 | +JaXk6wk7gYT0vfGYG2ysgmahl7pcjy5UPbsO/DKnMl/AkjxsQ+nFB6q8wQNqv47 9 | lQhrloImwMAhRJqaO5fIcenfPvTw4R3+3av8lSvrCZ9aduNDslxpEvP/+1U7QJcz 10 | bqMtp+MtIaqgQT69yitRokkdxbZFOLxPyhyFOlxQktaG9zZGLW3+pXkyco+PK/K1 11 | qtMirL8O9lJW8jWU+tIgFj/RzqGgITI4y6x6TU5CV5L/DkyrF78pAUz1lSYhB96e 12 | qiMTckXmp5yitTwNowE3wMRVNJNDlb3SxCTS3cXkjJs6YU29Xidv0uyvgfkCAwEA 13 | AQKCAgBliDPiw6sBCasu3V0kYxkYTL7NrC47ou8XkihENKb7phHV0IvBRqK4wynv 14 | nk6x3HIb7Fx+9xbAuQFpmttkzSlsZDI1y/wPwJQo2WQhWJ3y+02yeoBysKX87gp2 15 | GFbSU4mf9VAxnEEa2M2+iTWP+x15GgtxSYsS+870WHcKXU2cpNJEEfRV07UssxH2 16 | kU/JnnQ40ss0U4V5gUpS2BMtrbVwUAcfwXYq2niPwu4ENGXERJQuPrfKpEBk9o7A 17 | GDbZlW/LMODxe7IoNnQbKaTdWPw4HyFsnmtLG57X6zWv7/tm0XC5uG4FL3qthmxR 18 | 2i53t1zqlnVOYK4oNqgp/QGr0y1JIDRSnsiYGkODJ5P7IYUIVIvyjP9E/wcu/5+s 19 | J90nGh16Y8JaE76AQmGHU4Qo6B+FtmkpcDdoE0PdOwMyvXDTH2AcRpZQkJR7eQL+ 20 | XfMJzks3FxpPCzjfpQcDsGj6kwCk/clylNJS5G0aOKvvVnjYWkqbC1pAo6bt0net 21 | bCRqX77RQ2VPDbmRwkJxfqGA1n3IpLzw/lBkpKxankLMzGlDIv9b1l091zzO1tKg 22 | ksuvUc2fC5XyLPp/+nGfXMo8GPVLC9CTACK5nr8vkpFw3GU0cRqr8cKMhJcNtkvJ 23 | g24x/XgFQmIbL5pXojn8PYZYT06CQnIgUKh3KjQ2aEIeoNDcAQKCAQEA0zBoAvp1 24 | dDEjNv3uO6pPeun/dFAkOFC/YKUIKVUEgUDKcLIcbTC4MEL84A1yTba7JzE8Exsv 25 | jtSiUhWBg69thhlq3eGedJCXUFmSdQs8g/SVvWhfgkpFRU0KRiXlyPqH/7Yo3H8J 26 | sjkXnMXYkot37EbOcBbj6cD/3WjkYyiqV6YXvmn0oDQwXGgeZ2kf0sUDJANK3Jzb 27 | teaQfbH1NO1ZIOBTuiomtyZGHzxae6qon4rI5vlEfzcpUFCxGl5ivsqLDYbgwWpL 28 | aMwC4nIMEPAYAmtxQFCbGld/8NYQSVFa2tri9u/5UZk2+slaTHxUhBH8Qj0vAhFf 29 | X63siKNQzzCVkQKCAQEAyiz2XSjAiW8jCkKrZS7Kp5U5wfEUShNn4uP5UhXSx5ls 30 | TC3s/iYFAC6VduFh14JP1CE1BNJx5RsNab0D5O5gYGqnQ3+Lc6MRlocQNLIWP3nt 31 | tHtxzEx5XatHKhrppE8nhE4HbSZtMxhEkvAgIkMwfH5yhnSAeJfRfbKU1F2fztsD 32 | rA3n9IXLYW8fWv2QNfHpfCD+s9Xc8d2J4I6HnZpDtCwoNok6+1Ij3hStxT5ecY+1 33 | OMmCbGMfW1yELLxSoRaN1ZN7o7QdYa8/3kys+IfEjiEZfYMzLavdTXU0uAkx2qPs 34 | RjDMG8qS/35MIRY7VY6300uQTEH07OjJKqC6W5zR6QKB/3lEUnVjXA5cKNxq82FP 35 | alQqCYXAfwvXUg0stoMFTpDpo1US8T/BugH8dTgygHAffVgD7XsLIUPi1VJNBxGW 36 | qk1WSwcdht9oJ3r79B1BVn5u84zPYghunVgt5LhjCo9dTLqI3u2+BNNR9T0QXkvH 37 | yhTik6fJfRW8vS9mC77kmuu5c+ji7pUPYrWtE8hNlPhyKFuFiHclmwPYYR/MEAFU 38 | SNmX9nvvuEtyrtFA1rQsSqdSCgqXF7307sQvkYKSsCDeLPqXeHeXkbWbcAyXcFZJ 39 | h3/cMiu8HVQC6nRgdvmivYjuJ2WkZoV0dR2MgU1qFx2/NhrTBysozpsiQ7AyFzTh 40 | sQKCAQEArdTc+xqX+VMaYK8UKUqDKnYPFCcRl+N5lkcHzsE+2ymaSSHPvHG6KP7k 41 | ujNtadv8gVsP+BZtoVySOtEu8P7Y1O2+5TVcFNAnxjJZB8X7hRZ0AJOHM+6pB5XF 42 | +XjsXL9GLebdUccRazY5d9GI/4yfutsJ43t2HroCgL40YyggyL3dOWk3gDItvZfK 43 | 8PRFs2xNodHIzZ1bfIZMD0zdY/AIuHijFskFcqOcfU1wX8CRebjjS3FPd5v+kRJJ 44 | /yDpNuJqjzFgxE+QV7ZUBvIMYyBxKvdgsF9HeMRl4jydWINnXyNEetFoHZkerRjN 45 | okE4QQcWHVHCPx8S1UQN+134/d4nSQKCAQAGJNvaAU4II0yU+OLVuiWo1lbqYwox 46 | Z8XkM5NQfTOESgy2ViysO9DVX+JMiUSxuc6SDu3yxG4GLm8+XbqR5sZPg1+z+xT7 47 | dSP1BGSY9ugLU/M6JjbjZDX7/XJKqx8LE4UpB68KDDBB5x4DVXHRYG6jhXknmJJW 48 | XQtYvJsLKnhIOnNhRF1laZGGxD2J73DjtbrEat7rkbORLQlLcHkEsg0LLOa97sLQ 49 | iStN2Sg5pcxbLu+COsESgvAgeoV8wPQyGjTt96aATsExBkhVKo0Qznj4KnjpOAYz 50 | BGQ+6Td3ZEMwdupsEdOxGdh9/lwmjLS1G05xKW3KCHntEuGWhdpS7JIG 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /mdt_msg_samples/dump.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/mdt_msg_samples/dump.bin -------------------------------------------------------------------------------- /mdt_msg_samples/samples.go: -------------------------------------------------------------------------------- 1 | // 2 | // August 2016, Christian Cassar 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | // 10 | // Packages exporting message samples for test purposes. 11 | package mdt_msg_samples 12 | 13 | import ( 14 | "bufio" 15 | "encoding/json" 16 | telem "github.com/cisco/bigmuddy-network-telemetry-proto/proto_go" 17 | "github.com/golang/protobuf/jsonpb" 18 | "github.com/golang/protobuf/proto" 19 | log "github.com/sirupsen/logrus" 20 | "io/ioutil" 21 | "os" 22 | "strings" 23 | ) 24 | 25 | type sampleTelemetryTable []SampleTelemetryTableEntry 26 | 27 | type SampleTelemetryTableEntry struct { 28 | Sample *telem.Telemetry 29 | SampleStreamGPB []byte 30 | SampleStreamJSON []byte 31 | SampleStreamJSONKV []byte 32 | Leaves int 33 | Events int 34 | } 35 | 36 | type SampleTelemetryDatabaseID int 37 | 38 | const ( 39 | SAMPLE_TELEMETRY_DATABASE_BASIC SampleTelemetryDatabaseID = iota 40 | ) 41 | 42 | var sampleTelemetryDatabase map[SampleTelemetryDatabaseID]sampleTelemetryTable 43 | 44 | func MDTSampleTelemetryTableFetchOne( 45 | dbindex SampleTelemetryDatabaseID) *SampleTelemetryTableEntry { 46 | 47 | if len(sampleTelemetryDatabase) <= int(dbindex) { 48 | return nil 49 | } 50 | 51 | table := sampleTelemetryDatabase[dbindex] 52 | return &table[0] 53 | } 54 | 55 | type MDTContext interface{} 56 | type MDTSampleCallback func(sample *SampleTelemetryTableEntry, context MDTContext) (abort bool) 57 | 58 | // 59 | // MDTSampleTelemetryTableIterate iterates over table of samples 60 | // calling caller with function MDTSampleCallback and opaque context 61 | // MDTContext provided, for every known sample. The number of samples 62 | // iterated over is returned. 63 | func MDTSampleTelemetryTableIterate( 64 | dbindex SampleTelemetryDatabaseID, 65 | fn MDTSampleCallback, 66 | c MDTContext) (applied int) { 67 | 68 | if len(sampleTelemetryDatabase) <= int(dbindex) { 69 | return 0 70 | } 71 | count := 0 72 | table := sampleTelemetryDatabase[dbindex] 73 | for _, entry := range table { 74 | count++ 75 | if fn(&entry, c) { 76 | break 77 | } 78 | } 79 | 80 | return count 81 | } 82 | 83 | func MDTLoadMetrics() string { 84 | b, e := ioutil.ReadFile("mdt_msg_samples/dump.metrics") 85 | if e == nil { 86 | return string(b) 87 | } 88 | return "" 89 | } 90 | 91 | func init() { 92 | 93 | sampleTelemetryDatabase = make(map[SampleTelemetryDatabaseID]sampleTelemetryTable) 94 | 95 | sampleTelemetryDatabase[SAMPLE_TELEMETRY_DATABASE_BASIC] = sampleTelemetryTable{} 96 | 97 | marshaller := &jsonpb.Marshaler{ 98 | EmitDefaults: true, 99 | OrigName: true, 100 | } 101 | 102 | kv, err := os.Open("dump.jsonkv") 103 | if err != nil { 104 | kv, err = os.Open("mdt_msg_samples/dump.jsonkv") 105 | if err != nil { 106 | log.Fatal(err) 107 | } 108 | } 109 | defer kv.Close() 110 | 111 | dump := bufio.NewReader(kv) 112 | decoder := json.NewDecoder(dump) 113 | 114 | _, err = decoder.Token() 115 | if err != nil { 116 | log.Fatal(err) 117 | } 118 | 119 | // Read the messages and build the db. 120 | for decoder.More() { 121 | var m telem.Telemetry 122 | 123 | err := jsonpb.UnmarshalNext(decoder, &m) 124 | if err != nil { 125 | log.Fatal(err) 126 | } 127 | 128 | gpbstream, err := proto.Marshal(&m) 129 | if err != nil { 130 | log.Fatal(err) 131 | } 132 | 133 | jsonstream, err := marshaller.MarshalToString(&m) 134 | if err != nil { 135 | log.Fatal(err) 136 | } 137 | 138 | entry := SampleTelemetryTableEntry{ 139 | Sample: &m, 140 | SampleStreamGPB: gpbstream, 141 | SampleStreamJSONKV: json.RawMessage(jsonstream), 142 | Leaves: strings.Count(jsonstream, "\"name\""), 143 | Events: strings.Count(jsonstream, "\"content\""), 144 | } 145 | 146 | sampleTelemetryDatabase[SAMPLE_TELEMETRY_DATABASE_BASIC] = 147 | append(sampleTelemetryDatabase[SAMPLE_TELEMETRY_DATABASE_BASIC], entry) 148 | } 149 | 150 | jsondump, err := ioutil.ReadFile("dump.json") 151 | if err != nil { 152 | jsondump, err = ioutil.ReadFile("mdt_msg_samples/dump.json") 153 | if err != nil { 154 | // No validation if we don't have the results 155 | return 156 | } 157 | } 158 | var rows []json.RawMessage 159 | 160 | err = json.Unmarshal(jsondump, &rows) 161 | if err != nil { 162 | log.Fatal("Failed to unmarshall verification data") 163 | } 164 | for i, row := range rows { 165 | sample := &sampleTelemetryDatabase[SAMPLE_TELEMETRY_DATABASE_BASIC][i] 166 | sample.SampleStreamJSON = row 167 | } 168 | 169 | // log.Printf("%v", sampleTelemetryDatabase[SAMPLE_TELEMETRY_DATABASE_BASIC]) 170 | } 171 | -------------------------------------------------------------------------------- /mdt_msg_samples/samples_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // August 2016, Christian Cassar 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | // 10 | // Packages exporting message samples for test purposes. 11 | package mdt_msg_samples 12 | 13 | import ( 14 | "fmt" 15 | "testing" 16 | ) 17 | 18 | func sampleDump(sample *SampleTelemetryTableEntry, _ MDTContext) (abort bool) { 19 | if sample.SampleStreamJSON != nil { 20 | fmt.Printf("SampleStreamJSON length is %d \n", len(sample.SampleStreamJSON)) 21 | } 22 | return false 23 | } 24 | 25 | func TestSampleDB(t *testing.T) { 26 | count := MDTSampleTelemetryTableIterate(SAMPLE_TELEMETRY_DATABASE_BASIC, sampleDump, nil) 27 | if count == 0 { 28 | t.Errorf("Sample database empty!") 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /message.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Control and data message interfaces and common types. 9 | // 10 | package main 11 | 12 | import ( 13 | "fmt" 14 | "io/ioutil" 15 | "text/template" 16 | ) 17 | 18 | type dataMsgStreamType int 19 | 20 | const ( 21 | dMStreamGPB dataMsgStreamType = iota 22 | dMStreamGPBKV 23 | dMStreamJSON 24 | dMStreamJSONEvents 25 | dMStreamTemplate 26 | dMStreamMsgDefault 27 | dMStreamMsgUnknown 28 | ) 29 | 30 | type msgproducer interface { 31 | String() string 32 | } 33 | 34 | func dataMsgStreamTypeString(streamType dataMsgStreamType) string { 35 | 36 | switch streamType { 37 | case dMStreamGPB: 38 | return "GPB(compact)" 39 | case dMStreamGPBKV: 40 | return "GPB(k/v)" 41 | case dMStreamJSON: 42 | return "JSON" 43 | case dMStreamJSONEvents: 44 | return "JSON(events)" 45 | case dMStreamTemplate: 46 | return "template" 47 | } 48 | 49 | return "Unknown" 50 | } 51 | 52 | func dataMsgStreamTypeFromEncoding(enc encoding) (error, dataMsgStreamType) { 53 | 54 | mapping := map[encoding]dataMsgStreamType{ 55 | ENCODING_GPB_COMPACT: dMStreamGPB, 56 | ENCODING_GPB_KV: dMStreamGPBKV, 57 | ENCODING_JSON: dMStreamJSON, 58 | ENCODING_JSON_EVENTS: dMStreamJSONEvents, 59 | ENCODING_GPB: dMStreamGPB, 60 | ENCODING_TEMPLATE: dMStreamTemplate, 61 | } 62 | 63 | s, ok := mapping[enc] 64 | if !ok { 65 | return fmt.Errorf("Unsupported encoding %v", enc), 66 | dMStreamMsgUnknown 67 | } 68 | 69 | return nil, s 70 | } 71 | 72 | func dataMsgStreamTypeToEncoding(dmt dataMsgStreamType) (error, encoding) { 73 | 74 | mapping := map[dataMsgStreamType]encoding{ 75 | dMStreamGPB: ENCODING_GPB, 76 | dMStreamJSON: ENCODING_JSON, 77 | dMStreamGPBKV: ENCODING_GPB_KV, // legacy support 78 | } 79 | 80 | s, ok := mapping[dmt] 81 | if !ok { 82 | return fmt.Errorf("Unsupported dataMsgStreamType %v", dmt), ENCODING_MAX 83 | } 84 | 85 | return nil, s 86 | } 87 | 88 | // 89 | // Specification of a dataMsg stream; type and context. 90 | type dataMsgStreamSpec struct { 91 | streamType dataMsgStreamType 92 | context interface{} 93 | } 94 | 95 | // 96 | // A dataMsgStream defaulting to the native (input) type of a message 97 | var dataMsgStreamSpecDefault = &dataMsgStreamSpec{ 98 | streamType: dMStreamMsgDefault, 99 | context: nil, 100 | } 101 | 102 | func (s *dataMsgStreamSpec) dataMsgStreamSpecTextBased() bool { 103 | switch s.streamType { 104 | case dMStreamJSON, dMStreamJSONEvents: 105 | return true 106 | default: 107 | return false 108 | } 109 | } 110 | 111 | // 112 | // Extract specification o dataMsgStream from nodeconfig 113 | func dataMsgStreamSpecFromConfig( 114 | nc nodeConfig, 115 | name string) (error, *dataMsgStreamSpec) { 116 | 117 | encodingString, err := nc.config.GetString(name, "encoding") 118 | if err != nil { 119 | // Default to JSON encoding 120 | encodingString = "json" 121 | } 122 | 123 | err, encoding := nameToEncoding(encodingString) 124 | if err != nil { 125 | return err, nil 126 | } 127 | 128 | err, streamType := dataMsgStreamTypeFromEncoding(encoding) 129 | if err != nil { 130 | return err, nil 131 | } 132 | 133 | spec := &dataMsgStreamSpec{ 134 | streamType: streamType, 135 | context: nil, 136 | } 137 | 138 | switch streamType { 139 | case dMStreamTemplate: 140 | 141 | // 142 | // Read the template name 143 | templateFileName, err := nc.config.GetString(name, "template") 144 | if err != nil { 145 | return fmt.Errorf( 146 | "encoding='template' requires a template option [%v]", err), nil 147 | } 148 | 149 | templateSpec, err := ioutil.ReadFile(templateFileName) 150 | if err != nil { 151 | return fmt.Errorf("read template file [%v]", err), nil 152 | } 153 | 154 | spec.context, err = template.New(name).Parse(string(templateSpec)) 155 | if err != nil { 156 | return fmt.Errorf("parsing template [%v]", err), nil 157 | } 158 | 159 | default: 160 | 161 | } 162 | 163 | return nil, spec 164 | } 165 | 166 | // The heart of the pipeline is the dataMsg. This is what is carried 167 | // around within the pipeline. The dataMsg is produced in some input 168 | // stage, and eventually consumed and probably shipped out in some 169 | // output stage. 170 | // 171 | // As the data message wends itself through the pipeline 172 | // 173 | // - remember that the same msg is pushed onto multiple paths and 174 | // should be immutable. 175 | // 176 | // 177 | type dataMsg interface { 178 | getDataMsgDescription() string 179 | produceByteStream(*dataMsgStreamSpec) (error, []byte) 180 | produceMetrics(*metricsSpec, metricsOutputHandler, metricsOutputContext) error 181 | getDataMsgStreamType() dataMsgStreamType 182 | getMetaDataPath() (error, string) 183 | getMetaDataIdentifier() (error, string) 184 | getMetaData() *dataMsgMetaData 185 | } 186 | 187 | // 188 | // Concrete meta data can be returned from message types. 189 | type dataMsgMetaData struct { 190 | Path string 191 | Identifier string 192 | } 193 | 194 | // 195 | // Control of the pipeline is achieved over control channels from the 196 | // conductor to the nodes. 197 | // 198 | type msgID int 199 | 200 | const ( 201 | // 202 | // Used to request to shutdown, expects ACK on respChan 203 | SHUTDOWN msgID = iota 204 | // Request to report back on pipeline node state 205 | REPORT 206 | // Acknowledge a request. 207 | ACK 208 | ) 209 | 210 | type msgStats struct { 211 | MsgsOK uint64 212 | MsgsNOK uint64 213 | } 214 | 215 | // 216 | // Control message channel type 217 | // 218 | type ctrlMsg struct { 219 | id msgID 220 | content []byte 221 | respChan chan *ctrlMsg 222 | } 223 | -------------------------------------------------------------------------------- /message_router.go: -------------------------------------------------------------------------------- 1 | // 2 | // June 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | log "github.com/sirupsen/logrus" 13 | "time" 14 | ) 15 | 16 | // 17 | // dataMsgRouter is a router of dataMsgs (collects from one in 18 | // channel, and routes to one of a number of output channels). 19 | // The routing decision algorithm is parameterised and dictated by the 20 | // owner. Behaviour on congestion is also parameterised 21 | type dataMsgRouter struct { 22 | shutdownChan chan struct{} 23 | dataChanIn chan dataMsg 24 | dataChansOut []chan dataMsg 25 | route func(dataMsg, int) int 26 | handleCongested func(dataMsg, int, int) dataMsgRouterCongestionAction 27 | timeout time.Duration 28 | logctx *log.Entry 29 | } 30 | 31 | type dataMsgRouterCongestionAction int 32 | 33 | const ( 34 | DATAMSG_ROUTER_DROP = iota 35 | DATAMSG_ROUTER_REROUTE 36 | DATAMSG_ROUTER_SEND_AND_BLOCK 37 | ) 38 | 39 | func (r *dataMsgRouter) handleMsg(msg dataMsg, timeout *time.Timer) { 40 | 41 | for i := 0; true; i++ { 42 | 43 | outChanIndex := r.route(msg, i) 44 | if len(r.dataChansOut[outChanIndex]) < cap(r.dataChansOut[outChanIndex]) { 45 | // Easy optimisation. No need to mess with timers. Just hand it on. 46 | r.dataChansOut[outChanIndex] <- msg 47 | return 48 | } 49 | 50 | // 51 | // Channel backed up and we're about to block. Check whether to block 52 | // or drop. 53 | switch r.handleCongested(msg, i, outChanIndex) { 54 | case DATAMSG_ROUTER_REROUTE: 55 | // Do be careful when rerouting to make sure that you do indeed 56 | // reroute. 57 | continue 58 | case DATAMSG_ROUTER_DROP: 59 | r.logctx.Debug("message router drop") 60 | return 61 | case DATAMSG_ROUTER_SEND_AND_BLOCK: 62 | // 63 | // We are going to send and block, or timeout. 64 | timeout.Reset(r.timeout) 65 | select { 66 | case r.dataChansOut[outChanIndex] <- msg: 67 | // 68 | // Message shipped. Clean out timer, and get out. 69 | if !timeout.Stop() { 70 | <-timeout.C 71 | } 72 | return 73 | case <-timeout.C: 74 | // 75 | // Let go round one more time. 76 | } 77 | } 78 | } 79 | } 80 | 81 | func (r *dataMsgRouter) run() { 82 | 83 | r.logctx.Debug("dataMsg router running") 84 | 85 | // 86 | // Setup stopped timer once. 87 | timeout := time.NewTimer(r.timeout) 88 | timeout.Stop() 89 | 90 | for { 91 | select { 92 | case <-r.shutdownChan: 93 | // 94 | // We're done, and queues all drained 95 | r.logctx.Debug("dataMsg router shutting down") 96 | // 97 | // Drain queues. We don't currently close the dataChan 98 | // before we send shutdown on ctrl chan, but we do 99 | // unhook input stages. Service as many as there are in 100 | // queue. 101 | drain := len(r.dataChanIn) 102 | for i := 0; i < drain; i++ { 103 | r.handleMsg(<-r.dataChanIn, timeout) 104 | } 105 | for _, c := range r.dataChansOut { 106 | // 107 | // conventional pattern to serialise consuming last 108 | // batch of messages, then shutting down. 109 | close(c) 110 | } 111 | r.logctx.Debug("dataMsg router shut down") 112 | return 113 | 114 | case msg := <-r.dataChanIn: 115 | r.handleMsg(msg, timeout) 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /message_router_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // June 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | telem "github.com/cisco/bigmuddy-network-telemetry-proto/proto_go" 13 | "github.com/dlintw/goconf" 14 | log "github.com/sirupsen/logrus" 15 | "testing" 16 | "time" 17 | ) 18 | 19 | func TestDataMsgRouter(t *testing.T) { 20 | 21 | var nc nodeConfig 22 | var err error 23 | var next, last_attempts, blocked int 24 | var testSrc dataMsgRouterTestSRC 25 | 26 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 27 | if err != nil { 28 | t.Fatalf("Failed to read config [%v]", err) 29 | } 30 | 31 | logctx := logger.WithFields(log.Fields{ 32 | "name": "TEST_MESSAGE_ROUTER", 33 | }) 34 | 35 | workers := 3 36 | shutChan := make(chan struct{}) 37 | outChans := make([]chan dataMsg, workers) 38 | for i := 0; i < workers; i++ { 39 | outChans[i] = make(chan dataMsg, 1) 40 | } 41 | inChan := make(chan dataMsg, 1000) 42 | 43 | next = 0 44 | last_attempts = 0 45 | 46 | msg_router := &dataMsgRouter{ 47 | dataChanIn: inChan, 48 | shutdownChan: shutChan, 49 | dataChansOut: outChans, 50 | logctx: logctx, 51 | route: func(msg dataMsg, attempts int) int { 52 | last_attempts = attempts 53 | // fmt.Println("\t\tRouting (next/attempts): ", next, attempts) 54 | return next 55 | }, 56 | handleCongested: func(msg dataMsg, attempts int, worker int) dataMsgRouterCongestionAction { 57 | // Reroute to another worker. 58 | if attempts < workers { 59 | next = next + 1 60 | if next == workers { 61 | next = 0 62 | } 63 | // fmt.Println("\t\tRerouting: ", next) 64 | return DATAMSG_ROUTER_REROUTE 65 | } 66 | 67 | // fmt.Println("Block (attempts/workers): ", attempts) 68 | blocked++ 69 | return DATAMSG_ROUTER_SEND_AND_BLOCK 70 | }, 71 | // We do not really use the timeout. Behaviour is currently to 72 | // hunt for worker whcih can take message or drop. 73 | timeout: time.Duration(1) * time.Second, 74 | } 75 | 76 | logctx = logctx.WithFields(log.Fields{"workers": workers}) 77 | 78 | go msg_router.run() 79 | 80 | msg := &dataMsgGPB{ 81 | source: &testSrc, 82 | cachedDecode: &telem.Telemetry{EncodingPath: "a.b.c"}, 83 | } 84 | // 85 | // Produce to the router 86 | inChan <- msg 87 | time.Sleep(1 * time.Second) 88 | rx := <-outChans[0] 89 | if rx != msg { 90 | t.Fatalf("Failed to receive expected message from first worker") 91 | } 92 | 93 | if last_attempts != 0 { 94 | t.Fatalf("Last attempts, expected 0 retries, got %d", last_attempts) 95 | } 96 | 97 | // 98 | // Produce twice causing congestion, and rerouting 99 | inChan <- msg 100 | inChan <- msg 101 | 102 | // 103 | // Wait, before we start draining, let's make sure the first 104 | // catches up with the second - we are testing hunting. 105 | time.Sleep(1 * time.Second) 106 | 107 | rx = <-outChans[0] 108 | if rx != msg { 109 | t.Fatalf("Failed to receive expected message from first worker") 110 | } 111 | 112 | rx = <-outChans[1] 113 | if rx != msg { 114 | t.Fatalf("Failed to receive expected message from second worker") 115 | } 116 | 117 | if next != 1 { 118 | t.Fatalf("Should have moved to next worker, expected 1, got %d", next) 119 | } 120 | 121 | if last_attempts != 1 { 122 | t.Fatalf("Last attempts, expected 1st retry, got %d", last_attempts) 123 | } 124 | 125 | count := 10 * workers 126 | received := 0 127 | ready := make(chan struct{}) 128 | 129 | go func() { 130 | // Give us time to flood and block 131 | time.Sleep(3 * time.Second) 132 | 133 | w_handled := make([]int, workers) 134 | w := 0 135 | for { 136 | _, ok := <-outChans[w] 137 | if ok { 138 | received++ 139 | w_handled[w]++ 140 | if received == count { 141 | break 142 | } 143 | } 144 | w++ 145 | if w == workers { 146 | w = 0 147 | } 148 | } 149 | totalHandled := 0 150 | for w = 0; w < workers; w++ { 151 | totalHandled += w_handled[w] 152 | } 153 | if count != totalHandled { 154 | t.Fatalf("Received %d out of %d messsages", totalHandled, count) 155 | } 156 | 157 | close(ready) 158 | }() 159 | 160 | for k := 0; k < count; k++ { 161 | inChan <- msg 162 | } 163 | 164 | time.Sleep(1 * time.Second) 165 | close(shutChan) 166 | <-ready 167 | // fmt.Println("Blocked: ", blocked) 168 | if count != received { 169 | t.Fatalf("Expected %d, got %d when testing with send and block", 170 | count, received) 171 | } 172 | } 173 | 174 | type dataMsgRouterTestSRC struct { 175 | } 176 | 177 | func (m *dataMsgRouterTestSRC) String() string { 178 | return "msg router test src" 179 | } 180 | -------------------------------------------------------------------------------- /metamonitoring.go: -------------------------------------------------------------------------------- 1 | // 2 | // May 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | package main 9 | 10 | // 11 | // Monitoring the pipeline. The intention is to provide services to the rest of 12 | // pipeline bits to keep stats for the various components. We start out with 13 | // the aim of having stats scraped by prometheus. 14 | // 15 | import ( 16 | "github.com/prometheus/client_golang/prometheus" 17 | log "github.com/sirupsen/logrus" 18 | "net/http" 19 | ) 20 | 21 | var pipelineMetaMonitoringStarted bool 22 | 23 | func metamonitoring_init(nc nodeConfig) { 24 | // 25 | // Set up server to serve metrics to prometheus 26 | if pipelineMetaMonitoringStarted { 27 | return 28 | } 29 | pipelineMetaMonitoringStarted = true 30 | 31 | path, err := nc.config.GetString("default", 32 | "metamonitoring_prometheus_resource") 33 | if err != nil { 34 | logger.Info("Metamonitoring: not enabled") 35 | return 36 | } 37 | 38 | server, err := nc.config.GetString("default", 39 | "metamonitoring_prometheus_server") 40 | if err != nil { 41 | server = ":8080" 42 | } 43 | 44 | logger.WithFields(log.Fields{ 45 | "resource": path, 46 | "name": "default", 47 | "server": server}).Info( 48 | "Metamonitoring: serving pipeline metrics to prometheus") 49 | 50 | http.Handle(path, prometheus.Handler()) 51 | err = http.ListenAndServe(server, nil) 52 | logger.WithError(err).Error("Metamonitoring: stop serving metrics") 53 | } 54 | -------------------------------------------------------------------------------- /metrics.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "basepath" : "Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters", 4 | "spec" : { 5 | "fields" : [ 6 | {"name" : "interface-name", "tag" : true}, 7 | {"name" : "packets-received"}, 8 | {"name" : "bytes-received"}, 9 | {"name" : "packets-sent", "track": true}, 10 | {"name" : "bytes-sent"}, 11 | {"name" : "output-drops"}, 12 | {"name" : "output-queue-drops"}, 13 | {"name" : "input-drops"}, 14 | {"name" : "input-queue-drops"}, 15 | {"name" : "input-errors"}, 16 | {"name" : "crc-errors"}, 17 | {"name" : "input-ignored-packets"}, 18 | {"name" : "output-errors"}, 19 | {"name" : "output-buffer-failures"}, 20 | {"name" : "carrier-transitions"} 21 | ] 22 | } 23 | }, 24 | { 25 | "basepath" : "Cisco-IOS-XR-mpls-te-oper:mpls-te/tunnels/tunnel-auto-bandwidths/tunnel-auto-bandwidth", 26 | "spec" : { 27 | "fields" : [ 28 | {"name" : "tunnel-name", "tag" : true}, 29 | {"name" : "highest-bandwidth"}, 30 | {"name" : "last-sample-bandwidth"}, 31 | {"name" : "samples-collected"}, 32 | {"name" : "tunnel-requested-bandwidth"} 33 | ] 34 | } 35 | }, 36 | { 37 | "basepath" : "Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/output/statistics", 38 | "spec" : { 39 | "fields" : [ 40 | {"name" : "interface-name", "tag" : true}, 41 | {"name" : "policy-name", "tag" : true}, 42 | { 43 | "name" : "class-stats", 44 | "fields" : [ 45 | {"name" : "class-name", "tag" : true}, 46 | { 47 | "name" : "general-stats", 48 | "fields" : [ 49 | {"name" : "transmit-packets"}, 50 | {"name" : "transmit-bytes"}, 51 | {"name" : "total-drop-packets"}, 52 | {"name" : "total-drop-bytes"}, 53 | {"name" : "total-drop-rate"}, 54 | {"name" : "total-transmit-rate"} 55 | ] 56 | }, 57 | { 58 | "name" : "queue-stats-array", 59 | "fields" : [ 60 | {"name" : "queue-id", "tag" : true}, 61 | {"name" : "tail-drop-packets"}, 62 | {"name" : "tail-drop-bytes"}, 63 | { 64 | "name" : "queue-average-length", 65 | "fields" : [ 66 | {"name" : "value"}, 67 | {"name" : "unit", "tag" : true} 68 | ] 69 | }, 70 | {"name" : "random-drop-packets"}, 71 | {"name" : "random-drop-bytes"}, 72 | {"name" : "conform-packets"}, 73 | {"name" : "conform-bytes"}, 74 | {"name" : "exceed-packets"}, 75 | {"name" : "exceed-bytes"} 76 | ] 77 | } 78 | ] 79 | } 80 | ] 81 | } 82 | }, 83 | { 84 | "basepath" : "Cisco-IOS-XR-procmem-oper:processes-memory/nodes/node/process-ids/process-id", 85 | "spec" : { 86 | "fields" : [ 87 | {"name":"node-name", "tag": true}, 88 | {"name":"name", "tag": true}, 89 | {"name":"text-seg-size"}, 90 | {"name":"data-seg-size"}, 91 | {"name":"stack-seg-size"}, 92 | {"name":"malloc-size"} 93 | ] 94 | } 95 | }, 96 | { 97 | "basepath" : "Cisco-IOS-XR-nto-misc-oper:memory-summary/nodes/node/summary", 98 | "spec" : { 99 | "fields" : [ 100 | {"name":"node-name", "tag": true}, 101 | {"name":"ram-memory"}, 102 | {"name":"free-physical-memory"}, 103 | {"name":"system-ram-memory"}, 104 | {"name":"free-application-memory"} 105 | ] 106 | } 107 | }, 108 | { 109 | "basepath" : "Cisco-IOS-XR-wdsysmon-fd-oper:system-monitoring/cpu-utilization", 110 | "spec" : { 111 | "fields" : [ 112 | {"name":"node-name", "tag": true}, 113 | {"name":"total-cpu-one-minute"}, 114 | {"name":"total-cpu-five-minute"}, 115 | {"name":"total-cpu-fifteen-minute"}, 116 | { 117 | "name":"process-cpu", 118 | "fields" : [ 119 | {"name":"process-name", "tag": true}, 120 | {"name":"process-cpu-one-minute"}, 121 | {"name":"process-cpu-five-minute"}, 122 | {"name":"process-cpu-fifteen-minute"} 123 | ] 124 | } 125 | ] 126 | } 127 | } 128 | ] 129 | -------------------------------------------------------------------------------- /metrics_gpb.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "basepath" : "Cisco-IOS-XR-infra-statsd-oper:infra-statistics/interfaces/interface/latest/generic-counters", 4 | "spec" : { 5 | "fields" : [ 6 | {"name" : "interface_name", "tag" : true}, 7 | {"name" : "packets_received"}, 8 | {"name" : "bytes_received"}, 9 | {"name" : "packets_sent", "track": true}, 10 | {"name" : "bytes_sent"}, 11 | {"name" : "output_drops"}, 12 | {"name" : "output_queue_drops"}, 13 | {"name" : "input_drops"}, 14 | {"name" : "input_queue_drops"}, 15 | {"name" : "input_errors"}, 16 | {"name" : "crc_errors"}, 17 | {"name" : "input_ignored_packets"}, 18 | {"name" : "output_errors"}, 19 | {"name" : "output_buffer_failures"}, 20 | {"name" : "carrier_transitions"} 21 | ] 22 | } 23 | }, 24 | { 25 | "basepath" : "Cisco-IOS-XR-mpls-te-oper:mpls-te/tunnels/tunnel-auto-bandwidths/tunnel-auto-bandwidth", 26 | "spec" : { 27 | "fields" : [ 28 | {"name" : "tunnel_name", "tag" : true}, 29 | {"name" : "highest_bandwidth"}, 30 | {"name" : "last_sample_bandwidth"}, 31 | {"name" : "samples_collected"}, 32 | {"name" : "tunnel_requested_bandwidth"} 33 | ] 34 | } 35 | }, 36 | { 37 | "basepath" : "Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/statistics", 38 | "spec" : { 39 | "fields" : [ 40 | {"name" : "interface_name", "tag" : true}, 41 | {"name" : "policy_name", "tag" : true}, 42 | { 43 | "name" : "class_stats", 44 | "fields" : [ 45 | {"name" : "class_name", "tag" : true}, 46 | { 47 | "name" : "general_stats", 48 | "fields" : [ 49 | {"name" : "transmit_packets"}, 50 | {"name" : "transmit_bytes"}, 51 | {"name" : "total_drop_packets"}, 52 | {"name" : "total_drop_bytes"}, 53 | {"name" : "total_drop_rate"}, 54 | {"name" : "total_transmit_rate"} 55 | ] 56 | }, 57 | { 58 | "name" : "queue_stats_array", 59 | "fields" : [ 60 | {"name" : "queue_id", "tag" : true}, 61 | {"name" : "tail_drop_packets"}, 62 | {"name" : "tail_drop_bytes"}, 63 | { 64 | "name" : "queue_average_length", 65 | "fields" : [ 66 | {"name" : "value"}, 67 | {"name" : "unit", "tag" : true} 68 | ] 69 | }, 70 | {"name" : "random_drop_packets"}, 71 | {"name" : "random_drop_bytes"}, 72 | {"name" : "conform_packets"}, 73 | {"name" : "conform_bytes"}, 74 | {"name" : "exceed_packets"}, 75 | {"name" : "exceed_bytes"} 76 | ] 77 | } 78 | ] 79 | } 80 | ] 81 | } 82 | }, 83 | { 84 | "basepath" : "Cisco-IOS-XR-procmem-oper:processes-memory/nodes/node/process-ids/process-id", 85 | "spec" : { 86 | "fields" : [ 87 | {"name":"node_name", "tag": true}, 88 | {"name":"name", "tag": true}, 89 | {"name":"text_seg_size"}, 90 | {"name":"data_seg_size"}, 91 | {"name":"stack_seg_size"}, 92 | {"name":"malloc_size"} 93 | ] 94 | } 95 | }, 96 | { 97 | "basepath" : "Cisco-IOS-XR-nto-misc-oper:memory-summary/nodes/node/summary", 98 | "spec" : { 99 | "fields" : [ 100 | {"name":"node_name", "tag": true}, 101 | {"name":"ram_memory"}, 102 | {"name":"free_physical_memory"}, 103 | {"name":"system_ram_memory"}, 104 | {"name":"free_application_memory"} 105 | ] 106 | } 107 | }, 108 | { 109 | "basepath" : "Cisco-IOS-XR-wdsysmon-fd-oper:system-monitoring/cpu-utilization", 110 | "spec" : { 111 | "fields" : [ 112 | {"name":"node_name", "tag": true}, 113 | {"name":"total_cpu_one_minute"}, 114 | {"name":"total_cpu_five_minute"}, 115 | {"name":"total_cpu_fifteen_minute"}, 116 | { 117 | "name":"process_cpu", 118 | "fields" : [ 119 | {"name":"process_name", "tag": true}, 120 | {"name":"process_cpu_one_minute"}, 121 | {"name":"process_cpu_five_minute"}, 122 | {"name":"process_cpu_fifteen_minute"} 123 | ] 124 | } 125 | ] 126 | } 127 | } 128 | ] 129 | -------------------------------------------------------------------------------- /metrics_influx_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // June 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | "github.com/dlintw/goconf" 13 | "testing" 14 | ) 15 | 16 | func TestMetricsInfluxConfigure(t *testing.T) { 17 | 18 | var nc nodeConfig 19 | var err error 20 | 21 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 22 | if err != nil { 23 | t.Fatalf("Failed to read config [%v]", err) 24 | } 25 | 26 | // 27 | // Now that a password is set, let's test the non-interactive 28 | // negative and positive path. 29 | name := "influxtest" 30 | nc.config.AddOption(name, "influx", "") 31 | _, err = metricsInfluxNew(name, nc) 32 | if err == nil { 33 | t.Fatalf("Test passed but should fail, empty influx") 34 | } 35 | 36 | nc.config.AddOption(name, "influx", "http://localhost:8086") 37 | _, err = metricsInfluxNew(name, nc) 38 | if err == nil { 39 | t.Fatalf("Test passed but should fail, missing database") 40 | } 41 | 42 | nc.config.AddOption(name, "database", "mydatabase") 43 | *pemFileName = "id_rsa_FOR_TEST_ONLY" 44 | pw := "mysillysillysillypassword" 45 | err, epw := encrypt_password(*pemFileName, []byte(pw)) 46 | nc.config.AddOption(name, "password", epw) 47 | nc.config.AddOption(name, "username", "user") 48 | _, err = metricsInfluxNew(name, nc) 49 | if err != nil { 50 | t.Fatalf("Test failed: %v", err) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /metrics_prometheus.go: -------------------------------------------------------------------------------- 1 | // 2 | // June 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Feed metrics to prometheus 9 | // 10 | package main 11 | 12 | import ( 13 | "bufio" 14 | "bytes" 15 | "fmt" 16 | "hash/fnv" 17 | "net/http" 18 | "net/url" 19 | "os" 20 | "regexp" 21 | "strings" 22 | 23 | log "github.com/sirupsen/logrus" 24 | ) 25 | 26 | // 27 | // Specify defaults in case they are not configured. 28 | const ( 29 | PROMETHEUS_JOBNAME = "telemetry" 30 | ) 31 | 32 | type metricsPrometheusOutputHandler struct { 33 | pushGWAddress string 34 | jobName string 35 | instance string 36 | pushURL string 37 | instanceBuf *bytes.Buffer 38 | // 39 | // metricsfilename allow for diagnostic dump of metrics as 40 | // exported. 41 | metricsfilename string 42 | dump *bufio.Writer 43 | } 44 | 45 | // 46 | // Prometheus constrains symbols in sensor name 47 | func (p *metricsPrometheusOutputHandler) adaptSensorName(name string) string { 48 | 49 | re := regexp.MustCompile("[^a-zA-Z0-9_:]+") 50 | 51 | return re.ReplaceAllString(name, "_") 52 | } 53 | 54 | // 55 | // Prometheus constrains symbols in tag names 56 | func (p *metricsPrometheusOutputHandler) adaptTagName(name string) string { 57 | re := regexp.MustCompile("[^a-zA-Z0-9_]+") 58 | 59 | return re.ReplaceAllString(name, "_") 60 | } 61 | 62 | func (p *metricsPrometheusOutputHandler) flushMetric( 63 | tags []metricsAtom, 64 | ts uint64, 65 | context metricsOutputContext) { 66 | 67 | var pushURL string 68 | 69 | buf := context.(*bytes.Buffer) 70 | 71 | logCtx := logger.WithFields(log.Fields{ 72 | "pushGWAddress": p.pushGWAddress, 73 | "jobName": p.jobName, 74 | "pushURL": p.pushURL, 75 | "instance": p.instance, 76 | }) 77 | 78 | if buf.Len() == 0 { 79 | logCtx.Debug("metrics export (no metrics in msg): keys ", tags) 80 | return 81 | } 82 | 83 | // 84 | // Add unique instance to disambiguate time series for grouped 85 | // metrics. 86 | // 87 | // As multiple metrics make it into the push gateway they are 88 | // grouped by (job, instance or URL labels). Only one update per 89 | // group of metrics is tracked at any one time - any subsequent 90 | // updates to the same group prior to scraping replaces any 91 | // previous updates. 92 | // 93 | // In order to avoid losing updates, we make sure that every 94 | // unique timeseries as identified by the tags, results in a 95 | // distinct sequence (module hash collisions). 96 | p.instanceBuf.Reset() 97 | for i := 0; i < len(tags); i++ { 98 | p.instanceBuf.WriteString( 99 | fmt.Sprintf("%s=\"%v\" ", tags[i].key, tags[i].val)) 100 | } 101 | if p.instanceBuf.Len() != 0 { 102 | h := fnv.New64a() 103 | h.Write(p.instanceBuf.Bytes()) 104 | pushURL = fmt.Sprintf("%s_%v", p.pushURL, h.Sum64()) 105 | } else { 106 | pushURL = p.pushURL 107 | } 108 | 109 | // 110 | // Dump a copy as a string if necessary, showing URL too. 111 | if p.dump != nil { 112 | p.dump.WriteString("POST " + pushURL + "\n") 113 | _, err := p.dump.WriteString(buf.String()) 114 | if err != nil { 115 | logCtx.WithError(err).Error("failed dump metric to file") 116 | } 117 | } 118 | 119 | // POST, (not PUT), to make sure that only metrics in same group 120 | // are replaced. 121 | req, err := http.NewRequest("POST", pushURL, buf) 122 | if err != nil { 123 | logCtx.WithError(err).Error("http new request") 124 | return 125 | } 126 | 127 | req.Header.Set("Content-Type", `text/plain; version=0.0.4`) 128 | resp, err := http.DefaultClient.Do(req) 129 | if err != nil { 130 | logCtx.WithError(err).Error("http post") 131 | return 132 | } 133 | 134 | if resp.StatusCode != 202 { 135 | err = fmt.Errorf( 136 | "unexpected status code %d while pushing to %s", 137 | resp.StatusCode, pushURL) 138 | logCtx.WithError(err).Error("http reply") 139 | } 140 | 141 | resp.Body.Close() 142 | 143 | buf.Reset() 144 | } 145 | 146 | func (p *metricsPrometheusOutputHandler) buildMetric( 147 | tags []metricsAtom, 148 | sensor metricsAtom, 149 | ts uint64, 150 | context metricsOutputContext) { 151 | var delim string 152 | 153 | buf := context.(*bytes.Buffer) 154 | 155 | buf.WriteString(sensor.key) 156 | 157 | if len(tags) > 0 { 158 | delim = "{" 159 | for i := 0; i < len(tags); i++ { 160 | buf.WriteString( 161 | fmt.Sprintf( 162 | "%s%s=\"%v\"", 163 | delim, 164 | tags[i].key, 165 | tags[i].val)) 166 | if i == 0 { 167 | // change delim 168 | delim = "," 169 | } 170 | } 171 | delim = "} " 172 | } else { 173 | delim = " " 174 | } 175 | 176 | buf.WriteString(fmt.Sprintf("%s%v %v\n", delim, sensor.val, ts)) 177 | } 178 | 179 | func (p *metricsPrometheusOutputHandler) worker(m *metricsOutputModule) { 180 | 181 | var metricsfile *os.File 182 | 183 | // 184 | // We don't worry about sizing of buf for this worker. This same 185 | // buf will be used throughout the life of the worker. Underlying 186 | // storage will grow with first few message to accomodate largest 187 | // message built automatically. This knowledge is preserved across 188 | // message since we only call reset between one message and 189 | // another. Put another way, buf storage grows monotonically over 190 | // time. 191 | buf := new(bytes.Buffer) 192 | p.instanceBuf = new(bytes.Buffer) 193 | 194 | defer m.shutdownSyncPoint.Done() 195 | // 196 | // Start by computing the push URL to use. Using the same approach as 197 | // push prometheus client. 198 | if !strings.Contains(p.pushGWAddress, "://") { 199 | p.pushGWAddress = "http://" + p.pushGWAddress 200 | } 201 | if strings.HasSuffix(p.pushGWAddress, "/") { 202 | p.pushGWAddress = p.pushGWAddress[:len(p.pushGWAddress)-1] 203 | } 204 | p.pushURL = fmt.Sprintf( 205 | "%s/metrics/job/%s/instance/%s", 206 | p.pushGWAddress, 207 | url.QueryEscape(p.jobName), 208 | url.QueryEscape(p.instance)) 209 | 210 | logCtx := logger.WithFields(log.Fields{ 211 | "name": m.name, 212 | "output": m.output, 213 | "file": m.inputSpecFile, 214 | "pushURL": p.pushURL, 215 | }) 216 | 217 | if p.metricsfilename != "" { 218 | metricsfile, p.dump = metricsSetupDumpfile( 219 | p.metricsfilename, logCtx) 220 | if metricsfile != nil { 221 | defer metricsfile.Close() 222 | } 223 | } 224 | 225 | for { 226 | 227 | select { 228 | // 229 | // Look for shutdown 230 | case <-m.shutdownChan: 231 | // 232 | // We're being signalled to leave. 233 | logCtx.Info("metrics prometheus worker exiting") 234 | return 235 | 236 | // 237 | // Receive message 238 | case msg, ok := <-m.dataChan: 239 | 240 | if !ok { 241 | // Channel has been closed. Our demise is 242 | // near. SHUTDOWN is likely to be received soon on 243 | // control channel. 244 | // 245 | m.dataChan = nil 246 | continue 247 | } 248 | 249 | // 250 | // Make sure we clear any lefto over message (only on 251 | // error) 252 | buf.Reset() 253 | err := msg.produceMetrics(&m.inputSpec, m.outputHandler, buf) 254 | if err != nil { 255 | // 256 | // We should count these and export them from meta 257 | // monitoring 258 | logCtx.WithError(err).Error("message producing metrics") 259 | continue 260 | } 261 | } 262 | } 263 | } 264 | 265 | func (p *metricsPrometheusOutputHandler) setupWorkers(m *metricsOutputModule) { 266 | m.shutdownSyncPoint.Add(1) 267 | go p.worker(m) 268 | } 269 | 270 | func metricsPrometheusNew(name string, nc nodeConfig) (metricsOutputHandler, error) { 271 | 272 | var p metricsPrometheusOutputHandler 273 | var err error 274 | 275 | p.pushGWAddress, err = nc.config.GetString(name, "pushgw") 276 | if err != nil { 277 | logger.WithError(err).WithFields( 278 | log.Fields{"name": name}).Error( 279 | "attribute 'pushgw' required for prometheus metric export") 280 | return nil, err 281 | } 282 | 283 | p.jobName, err = nc.config.GetString(name, "jobname") 284 | if err != nil { 285 | p.jobName = PROMETHEUS_JOBNAME 286 | } 287 | 288 | p.instance, err = nc.config.GetString(name, "instance") 289 | if err != nil { 290 | p.instance = conductor.ID 291 | } 292 | 293 | // If not set, will default to false 294 | p.metricsfilename, _ = nc.config.GetString(name, "dump") 295 | 296 | return &p, nil 297 | } 298 | -------------------------------------------------------------------------------- /metrics_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // June 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | "github.com/dlintw/goconf" 13 | "testing" 14 | "time" 15 | ) 16 | 17 | func TestMetricsConfigureNegative(t *testing.T) { 18 | var nc nodeConfig 19 | 20 | mod := metricsOutputModuleNew() 21 | 22 | cfg, err := goconf.ReadConfigFile("pipeline_test_bad.conf") 23 | nc.config = cfg 24 | 25 | badsections := []string{ 26 | "metricsbad_missingfilename", 27 | "metricsbad_missingfile", 28 | "metricsbad_badjson", 29 | "metricsbad_missingoutput", 30 | "metricsbad_unsupportedoutput", 31 | "metricsbad_missingpushgw", 32 | } 33 | 34 | for _, section := range badsections { 35 | err, _, _ = mod.configure(section, nc) 36 | if err == nil { 37 | t.Errorf("metrics section section [%v] should fail\n", section) 38 | } 39 | } 40 | } 41 | 42 | func TestMetricsConfigure(t *testing.T) { 43 | var nc nodeConfig 44 | var codecJSONTestSource testSource 45 | 46 | mod := metricsOutputModuleNew() 47 | 48 | cfg, err := goconf.ReadConfigFile("pipeline_test.conf") 49 | nc.config = cfg 50 | err, dChan, cChan := mod.configure("mymetrics", nc) 51 | 52 | err, p := getNewCodecJSON("JSON CODEC TEST") 53 | if err != nil { 54 | t.Errorf("Failed to get JSON codec [%v]", err) 55 | return 56 | } 57 | 58 | testJSONMsg := []byte(`{"Name":"Alice","Body":"Hello","Test":1294706395881547000}`) 59 | err, dMs := p.blockToDataMsgs(&codecJSONTestSource, testJSONMsg) 60 | 61 | if err != nil { 62 | t.Errorf("Failed to get messages from JSON stream [%v]", err) 63 | return 64 | } 65 | 66 | dM := dMs[0] 67 | 68 | dChan <- dM 69 | 70 | time.Sleep(1 * time.Second) 71 | 72 | // 73 | // Send shutdown message 74 | respChan := make(chan *ctrlMsg) 75 | request := &ctrlMsg{ 76 | id: SHUTDOWN, 77 | respChan: respChan, 78 | } 79 | cChan <- request 80 | 81 | // Wait for ACK 82 | ack := <-respChan 83 | 84 | if ack.id != ACK { 85 | t.Error("failed to recieve acknowledgement indicating shutdown complete") 86 | } 87 | 88 | } 89 | -------------------------------------------------------------------------------- /pipeline_test.conf: -------------------------------------------------------------------------------- 1 | [default] 2 | 3 | # 4 | # ID is used to identify pipeline to third parties, e.g. fluentd tag 5 | # and prometheus instance 6 | id = pipeline 7 | metamonitoring_prometheus_resource = /metrics 8 | metamonitoring_prometheus_server = :8888 9 | 10 | [myrouters] 11 | type = tcp 12 | encap = st 13 | stage = xport_input 14 | listen = :5556 15 | logdata = on 16 | 17 | [myotherrouters] 18 | type = tcp 19 | encap = st 20 | stage = xport_input 21 | listen = :5557 22 | logdata = on 23 | 24 | # [mymdtrouter] 25 | # stage = xport_input 26 | # type = grpc 27 | # encap = gpb 28 | # server = 192.168.123.1:56789 29 | # tls = false 30 | # tls_pem = ca.pem 31 | # tls_servername = tlsservername 32 | # subscriptions = genericstats,datarates 33 | # logdata = on 34 | 35 | [mykafka] 36 | type = kafka 37 | key = path_and_id 38 | stage = xport_output 39 | brokers = localhost:9092 40 | kafkaversion = 2.1.0 41 | topic = testtopic 42 | datachanneldepth = 1000 43 | logdata = on 44 | 45 | [kafkaconsumer] 46 | type = kafka 47 | key = path_and_id 48 | stage = xport_input 49 | brokers = localhost:9092 50 | kafkaversion = 2.1.0 51 | topic = consumertopic3 52 | consumergroup = mycollectors 53 | encoding = json 54 | logdata = on 55 | 56 | [mykafka2] 57 | type = kafka 58 | key = path_and_id 59 | stage = xport_output 60 | brokers = localhost:9092 61 | kafkaversion = 2.1.0 62 | topic = consumertopic3 63 | datachanneldepth = 1000 64 | encoding = json 65 | logdata = on 66 | 67 | [inspector] 68 | type = tap 69 | stage = xport_output 70 | datachanneldepth = 1000 71 | file = dump.txt 72 | #countonly = true 73 | 74 | [inspector1] 75 | type = tap 76 | stage = xport_output 77 | datachanneldepth = 1000 78 | file = dump1.txt 79 | countonly = true 80 | 81 | [mymetrics] 82 | stage = xport_output 83 | type = metrics 84 | output = prometheus 85 | file = metrics.json 86 | datachanneldepth = 1000 87 | pushgw = localhost:9091 88 | workers = 1 89 | jobname = telemetry 90 | 91 | [mymetricstest] 92 | stage = xport_output 93 | type = metrics 94 | output = test 95 | file = metrics.json 96 | datachanneldepth = 1000 97 | 98 | [mygrpcout] 99 | type = grpc 100 | stage = xport_output 101 | encoding = json 102 | listen = localhost:5959 103 | logdata = on 104 | datachanneldepth = 1000 105 | 106 | [mygrpcoutnolisten] 107 | type = grpc 108 | stage = xport_output 109 | encoding = json 110 | logdata = on 111 | 112 | [mygrpcoutbadencoding] 113 | type = grpc 114 | stage = xport_output 115 | encoding = gibberish 116 | listen = localhost:5959 117 | # logdata = on 118 | 119 | [templatetest] 120 | stage = xport_output 121 | type = tap 122 | file = dumpfiltererd.txt 123 | encoding = template 124 | template = filter_test.json 125 | datachanneldepth = 1000 126 | 127 | [udpinnolisten] 128 | type = udp 129 | stage = xport_input 130 | rxbuf = 25165824 131 | #logdata = on 132 | 133 | [udpinbadlisten] 134 | type = udp 135 | stage = xport_input 136 | listen = localhostuBAD,BAD,BAD:5958 137 | rxbuf = 25165824 138 | #logdata = on 139 | 140 | [udpin] 141 | type = udp 142 | stage = xport_input 143 | listen = localhost:5958 144 | rxbuf = 25165824 145 | logdata = on 146 | 147 | [replay_bin_archive] 148 | stage = xport_input 149 | type = replay 150 | file = mdt_msg_samples/dump.bin 151 | delayusec = 100000 152 | loop=true 153 | logdata = on 154 | 155 | [tap_out_bin_hexdump] 156 | stage = xport_output 157 | type = tap 158 | encoding = gpb 159 | file = mdt_msg_samples/hexdump.bin 160 | datachanneldepth = 1000 161 | 162 | [kafkaAout] 163 | type = kafka 164 | key = path_and_id 165 | stage = xport_output 166 | brokers = localhost:9092 167 | kafkaversion = 2.1.0 168 | topic_metadata_template = topic_template_testA.txt 169 | encoding = json 170 | datachanneldepth = 1000 171 | required_acks = none 172 | 173 | [kafkaAin] 174 | type = kafka 175 | key = path_and_id 176 | stage = xport_input 177 | brokers = localhost:9092 178 | kafkaversion = 2.1.0 179 | topic = RootOperKafkaTest 180 | consumergroup = mycollectors 181 | encoding = json 182 | 183 | [kafkaBout] 184 | type = kafka 185 | key = path_and_id 186 | stage = xport_output 187 | brokers = localhost:9092 188 | kafkaversion = 2.1.0 189 | topic_metadata_template = topic_template_testB.txt 190 | datachanneldepth = 1000 191 | encoding = json 192 | required_acks = local 193 | 194 | [kafkaBin] 195 | type = kafka 196 | key = path_and_id 197 | stage = xport_input 198 | brokers = localhost:9092 199 | kafkaversion = 2.1.0 200 | topic = RouterInSpace 201 | consumergroup = mycollectors 202 | encoding = json 203 | 204 | [kafkaCout] 205 | type = kafka 206 | key = path_and_id 207 | stage = xport_output 208 | brokers = localhost:9092 209 | kafkaversion = 2.1.0 210 | topic_metadata_template = topic_template_testC.txt 211 | datachanneldepth = 1000 212 | encoding = json 213 | 214 | [kafkaCin] 215 | type = kafka 216 | key = path_and_id 217 | stage = xport_input 218 | brokers = localhost:9092 219 | kafkaversion = 2.1.0 220 | topic = RootOperKafkaTest_RouterInSpace 221 | consumergroup = mycollectors 222 | encoding = json 223 | 224 | [kafkaDout] 225 | type = kafka 226 | key = path_and_id 227 | stage = xport_output 228 | brokers = localhost:9092 229 | kafkaversion = 2.1.0 230 | topic = fallback_to_topic 231 | topic_metadata_template = topic_template_testBAD.txt 232 | datachanneldepth = 1000 233 | encoding = json 234 | required_acks = commit 235 | 236 | [kafkaDin] 237 | type = kafka 238 | key = path_and_id 239 | stage = xport_input 240 | brokers = localhost:9092 241 | kafkaversion = 2.1.0 242 | topic = fallback_to_topic 243 | consumergroup = mycollectors 244 | encoding = json 245 | -------------------------------------------------------------------------------- /pipeline_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | "bytes" 13 | "encoding/binary" 14 | samples "github.com/cisco-ie/pipeline-gnmi/mdt_msg_samples" 15 | log "github.com/sirupsen/logrus" 16 | "net" 17 | "testing" 18 | "time" 19 | ) 20 | 21 | func TestConductor(t *testing.T) { 22 | 23 | startup() 24 | 25 | logger.Info("Load config, logging", conductor.Logfile) 26 | conductor.Configfile = "pipeline_test.conf" 27 | err := loadConfig() 28 | if err != nil { 29 | t.Errorf("Config load failed: %v\n", err) 30 | } else { 31 | go run() 32 | } 33 | 34 | logger.Info("Connect TCP :5556") 35 | conn1, err := net.Dial("tcp", ":5556") 36 | if err != nil { 37 | t.Errorf("Failed to connect to server 5556") 38 | return 39 | } 40 | 41 | logger.Info("Connect TCP :5557") 42 | conn2, err := net.Dial("tcp", ":5557") 43 | if err != nil { 44 | t.Errorf("Failed to connect to server 5557") 45 | return 46 | } 47 | 48 | // 49 | // Get a GPB message and encap it in ST header. 50 | logger.Info("Loading MDT samples") 51 | sample := samples.MDTSampleTelemetryTableFetchOne( 52 | samples.SAMPLE_TELEMETRY_DATABASE_BASIC) 53 | if sample == nil { 54 | t.Errorf("Failed to fetch data") 55 | return 56 | } 57 | payload := sample.SampleStreamGPB 58 | encap := encapSTHdr{ 59 | MsgType: ENC_ST_HDR_MSG_TYPE_TELEMETRY_DATA, 60 | MsgEncap: ENC_ST_HDR_MSG_ENCAP_GPB, 61 | MsgHdrVersion: ENC_ST_HDR_VERSION, 62 | Msgflag: ENC_ST_HDR_MSG_FLAGS_NONE, 63 | Msglen: uint32(len(payload)), 64 | } 65 | 66 | encodedMsg := make([]byte, 0, len(payload)+256) 67 | hdrbuf := bytes.NewBuffer(encodedMsg) 68 | err = binary.Write(hdrbuf, binary.BigEndian, &encap) 69 | if err != nil { 70 | t.Errorf("Failed to write data header") 71 | return 72 | } 73 | _, err = hdrbuf.Write(payload) 74 | if err != nil { 75 | t.Errorf("Failed write data 1") 76 | return 77 | } 78 | 79 | logger.Info("Write to first connection") 80 | written, err := conn1.Write(encodedMsg) 81 | if err != nil { 82 | t.Errorf("Failed write data 1") 83 | return 84 | } 85 | if written != len(encodedMsg) { 86 | t.Errorf("Wrote %d, expect %d for data 1", 87 | written, len(encodedMsg)) 88 | return 89 | } 90 | 91 | logger.Info("Write to second connection") 92 | written, err = conn2.Write(encodedMsg) 93 | if err != nil { 94 | t.Errorf("Failed write data 2") 95 | return 96 | } 97 | if written != len(encodedMsg) { 98 | t.Errorf("Wrote %d, expect %d for data 2", 99 | written, len(encodedMsg)) 100 | return 101 | } 102 | 103 | time.Sleep(5 * time.Second) 104 | 105 | // 106 | // Diddle outputs and inputs to shut them down. 107 | logger.Info("Close inputs") 108 | for _, node := range conductor.inputNodes { 109 | respChan := make(chan *ctrlMsg) 110 | request := &ctrlMsg{ 111 | id: SHUTDOWN, 112 | respChan: respChan, 113 | } 114 | 115 | // 116 | // Send shutdown message 117 | logger.Info("Closing input", node) 118 | node.ctrlChan <- request 119 | // Wait for ACK 120 | <-respChan 121 | close(node.ctrlChan) 122 | logger.Info(" Closed input", node) 123 | } 124 | logger.Info("Close outputs") 125 | for _, node := range conductor.outputNodes { 126 | respChan := make(chan *ctrlMsg) 127 | request := &ctrlMsg{ 128 | id: SHUTDOWN, 129 | respChan: respChan, 130 | } 131 | 132 | // 133 | // Send shutdown message 134 | logger.Info("Closing output", node) 135 | node.ctrlChan <- request 136 | // Wait for ACK 137 | <-respChan 138 | close(node.ctrlChan) 139 | logger.Info(" Closed output", node) 140 | } 141 | 142 | } 143 | 144 | // 145 | // Set up logger for tests. 146 | func init() { 147 | startup() 148 | // theLogger.Formatter = new(log.JSONFormatter) 149 | logger = theLogger.WithFields(log.Fields{"tag": "TESTING"}) 150 | } 151 | -------------------------------------------------------------------------------- /pipeline_test_bad.conf: -------------------------------------------------------------------------------- 1 | [myrouters] 2 | type = tcp 3 | encap = st 4 | stage = xport_input 5 | port = 5556 6 | 7 | [mykafka] 8 | type = kafka 9 | key = path_and_id 10 | stage = xport_output 11 | topic = telemetry 12 | datachanneldepth = 1000 13 | 14 | [kafkaconsumernoconsumergroup] 15 | type = kafka 16 | key = path_and_id 17 | stage = xport_input 18 | brokers = localhost:9092 19 | kafkaversion = 2.1.0 20 | topic = consumertopic 21 | logdata = on 22 | 23 | [kafkaconsumerbadkey] 24 | type = kafka 25 | key = BADKEY 26 | stage = xport_input 27 | brokers = localhost:9092 28 | kafkaversion = 2.1.0 29 | topic = consumertopic 30 | consumergroup = mycollectors 31 | logdata = on 32 | 33 | [kafkaconsumerbadencoding] 34 | type = kafka 35 | key = path 36 | encoding = BADENCODING 37 | stage = xport_input 38 | brokers = localhost:9092 39 | kafkaversion = 2.1.0 40 | topic = consumertopic 41 | consumergroup = mycollectors 42 | logdata = on 43 | 44 | [kafkaconsumernobroker] 45 | type = kafka 46 | key = path 47 | stage = xport_input 48 | topic = consumertopic 49 | consumergroup = mycollectors 50 | kafkaversion = 2.1.0 51 | logdata = on 52 | 53 | [kafkaconsumernokafkaversion] 54 | type = kafka 55 | key = path 56 | stage = xport_input 57 | topic = consumertopic 58 | consumergroup = mycollectors 59 | brokers = localhost:9092 60 | logdata = on 61 | 62 | [kafkaconsumerbadkafkaversion] 63 | type = kafka 64 | key = path 65 | stage = xport_input 66 | topic = consumertopic 67 | consumergroup = mycollectors 68 | brokers = localhost:9092 69 | kafkaversion = BAD 70 | logdata = on 71 | 72 | [myotherkafka] 73 | type = kafka 74 | key = BADKEY 75 | stage = xport_output 76 | brokers = BADlocalhost:9092 77 | kafkaversion = 2.1.0 78 | topic = telemetry 79 | datachanneldepth = 1000 80 | 81 | [kafkaBADrequiredAcks] 82 | type = key 83 | kafka = path_and_id 84 | stage = xport_output 85 | brokers = localhost:9092 86 | kafkaversion = 2.1.0 87 | datachanneldepth = 1000 88 | encoding = json 89 | required_acks = commitBAD 90 | 91 | [kafkaBADkafkaversion] 92 | type = key 93 | kafka = path_and_id 94 | stage = xport_output 95 | brokers = localhost:9092 96 | kafkaversion = BAD 97 | datachanneldepth = 1000 98 | encoding = json 99 | required_acks = commit 100 | 101 | [kafkaBADTopicTemplace] 102 | type = key 103 | kafka = path_and_id 104 | stage = xport_output 105 | brokers = localhost:9092 106 | kafkaversion = 2.1.0 107 | datachanneldepth = 1000 108 | encoding = json 109 | topic_metadata_template = MISSING 110 | 111 | [metricsbad_missingfilename] 112 | stage = xport_output 113 | type = metrics 114 | 115 | [metricsbad_missingfile] 116 | stage = xport_output 117 | type = metrics 118 | file= nonexistent.json 119 | 120 | [metricsbad_badjson] 121 | stage = xport_output 122 | type = metrics 123 | file= pipeline_test_bad.conf 124 | 125 | [metricsbad_missingoutput] 126 | stage = xport_output 127 | type = metrics 128 | file= metrics.json 129 | 130 | [metricsbad_unsupportedoutput] 131 | stage = xport_output 132 | type = metrics 133 | file= metrics.json 134 | output = unknown 135 | 136 | [metricsbad_missingpushgw] 137 | stage = xport_output 138 | type = metrics 139 | file= metrics.json 140 | output = prometheus 141 | -------------------------------------------------------------------------------- /pipeline_test_tap.conf: -------------------------------------------------------------------------------- 1 | 2 | [tap_out_bin] 3 | stage = xport_output 4 | type = tap 5 | raw = true 6 | file = dump.bin 7 | datachanneldepth = 2 8 | 9 | [replay_bin] 10 | stage = xport_input 11 | type = replay 12 | file = dump.bin 13 | delayusec = 100000 14 | -------------------------------------------------------------------------------- /replay.go: -------------------------------------------------------------------------------- 1 | // 2 | // November 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Input node used to replay streaming telemetry archives. Archives 9 | // can be recoded using 'tap' output module with "raw = true" set. 10 | // 11 | // Tests for replay module are in tap_test.go 12 | // 13 | package main 14 | 15 | import ( 16 | "encoding/hex" 17 | "encoding/json" 18 | "fmt" 19 | log "github.com/sirupsen/logrus" 20 | "io" 21 | "os" 22 | "time" 23 | ) 24 | 25 | const ( 26 | REPLAY_DELAY_DEFAULT_USEC = 200000 27 | ) 28 | 29 | // 30 | // Module implementing inputNodeModule interface allowing REPLAY to read 31 | // binary dump for replay. 32 | type replayInputModule struct { 33 | name string 34 | logctx *log.Entry 35 | filename string 36 | logData bool 37 | firstN int 38 | loop bool 39 | delayUsec int 40 | done bool 41 | count int 42 | ctrlChan chan *ctrlMsg 43 | dataChans []chan<- dataMsg 44 | } 45 | 46 | func replayInputModuleNew() inputNodeModule { 47 | return &replayInputModule{} 48 | } 49 | 50 | func (t *replayInputModule) String() string { 51 | return fmt.Sprintf("%s:%s", t.name, t.filename) 52 | } 53 | func (t *replayInputModule) replayInputFeederLoop() error { 54 | 55 | var stats msgStats 56 | var tickIn time.Duration 57 | var tick <-chan time.Time 58 | 59 | err, parser := getNewEncapParser(t.name, "st", t) 60 | if err != nil { 61 | t.logctx.WithError(err).Error( 62 | "Failed to open get parser, STOP") 63 | t.done = true 64 | } 65 | 66 | f, err := os.Open(t.filename) 67 | if err != nil { 68 | t.logctx.WithError(err).Error( 69 | "Failed to open file with binary dump of telemetry. " + 70 | "Dump should be produced with 'tap' output, 'raw=true', STOP") 71 | t.done = true 72 | } else { 73 | defer f.Close() 74 | 75 | if t.delayUsec != 0 { 76 | tickIn = time.Duration(t.delayUsec) * time.Microsecond 77 | } else { 78 | // 79 | // We still tick to get control channel a look in. 80 | tickIn = time.Nanosecond 81 | } 82 | tick = time.Tick(tickIn) 83 | } 84 | 85 | for { 86 | 87 | select { 88 | 89 | case <-tick: 90 | 91 | if t.done { 92 | // Waiting for exit, we're done here. 93 | continue 94 | } 95 | 96 | // iterate until a message is produced (header, payload) 97 | var i int 98 | for { 99 | i = i + 1 100 | err, buffer := parser.nextBlockBuffer() 101 | if err != nil { 102 | t.logctx.WithError(err).WithFields( 103 | log.Fields{ 104 | "iteration": i, 105 | "nth_msg": t.count, 106 | }).Error("Failed to fetch buffer, STOP") 107 | t.done = true 108 | return err 109 | } 110 | 111 | readn, err := io.ReadFull(f, *buffer) 112 | if err != nil { 113 | if err != io.EOF { 114 | t.logctx.WithError(err).WithFields( 115 | log.Fields{ 116 | "iteration": i, 117 | "nth_msg": t.count, 118 | }).Error("Failed to read next buffer, STOP") 119 | t.done = true 120 | return err 121 | } 122 | 123 | if !t.loop { 124 | // 125 | // We're done. 126 | return nil 127 | } 128 | 129 | t.logctx.Debug("restarting from start of message archive") 130 | _, err := f.Seek(0, 0) 131 | if err != nil { 132 | t.logctx.WithError(err).WithFields( 133 | log.Fields{ 134 | "iteration": i, 135 | "nth_msg": t.count, 136 | }).Error("Failed to go back to start, STOP") 137 | t.done = true 138 | return err 139 | } 140 | // 141 | // Because we're starting from scratch, and we 142 | // need to get a new parser to restart parser 143 | // state machine along with data. 144 | err, parser = getNewEncapParser(t.name, "st", t) 145 | continue 146 | } 147 | err, msgs := parser.nextBlock(*buffer, nil) 148 | if err != nil { 149 | t.logctx.WithError(err).WithFields( 150 | log.Fields{ 151 | "iteration": i, 152 | "nth_msg": t.count, 153 | "read_in": readn, 154 | "len": len(*buffer), 155 | "msg": hex.Dump(*buffer), 156 | }).Error( 157 | "Failed to decode next block, STOP") 158 | t.done = true 159 | return err 160 | } 161 | 162 | if t.logData { 163 | t.logctx.WithFields(log.Fields{ 164 | "iteration": i, 165 | "nth_msg": t.count, 166 | "dataMsgCount": len(msgs), 167 | "len": len(*buffer), 168 | "msg": hex.Dump(*buffer), 169 | }).Debug("REPLAY input logdata") 170 | } 171 | 172 | if msgs == nil { 173 | // 174 | // We probably just read a header 175 | continue 176 | } 177 | 178 | for _, msg := range msgs { 179 | for _, dataChan := range t.dataChans { 180 | if len(dataChan) == cap(dataChan) { 181 | t.logctx.Error("Input overrun (replace with counter)") 182 | continue 183 | } 184 | dataChan <- msg 185 | } 186 | } 187 | 188 | t.count = t.count + 1 189 | if t.count == t.firstN && t.firstN != 0 { 190 | t.logctx.Debug("dumped all messages expected") 191 | t.done = true 192 | } 193 | 194 | break 195 | } 196 | 197 | case msg := <-t.ctrlChan: 198 | switch msg.id { 199 | case REPORT: 200 | t.logctx.Debug("report request") 201 | content, _ := json.Marshal(stats) 202 | resp := &ctrlMsg{ 203 | id: ACK, 204 | content: content, 205 | respChan: nil, 206 | } 207 | msg.respChan <- resp 208 | 209 | case SHUTDOWN: 210 | t.logctx.Info("REPLAY input loop, rxed SHUTDOWN, shutting down") 211 | 212 | resp := &ctrlMsg{ 213 | id: ACK, 214 | respChan: nil, 215 | } 216 | msg.respChan <- resp 217 | 218 | return nil 219 | 220 | default: 221 | t.logctx.Error("REPLAY input loop, unknown ctrl message") 222 | } 223 | } 224 | } 225 | } 226 | 227 | func (t *replayInputModule) replayInputFeederLoopSticky() { 228 | t.logctx.Debug("Starting REPLAY feeder loop") 229 | for { 230 | err := t.replayInputFeederLoop() 231 | if err == nil { 232 | t.logctx.Debug("REPLAY feeder loop done, exit") 233 | break 234 | } else { 235 | // retry 236 | time.Sleep(time.Second) 237 | if t.done { 238 | t.logctx.WithFields(log.Fields{ 239 | "nth_msg": t.count, 240 | }).Debug("idle, waiting for shutdown") 241 | } else { 242 | t.logctx.Debug("Restarting REPLAY feeder loop") 243 | } 244 | } 245 | } 246 | } 247 | 248 | func (t *replayInputModule) configure( 249 | name string, 250 | nc nodeConfig, 251 | dataChans []chan<- dataMsg) (error, chan<- *ctrlMsg) { 252 | 253 | var err error 254 | 255 | t.filename, err = nc.config.GetString(name, "file") 256 | if err != nil { 257 | return err, nil 258 | } 259 | 260 | t.name = name 261 | t.logData, _ = nc.config.GetBool(name, "logdata") 262 | t.firstN, _ = nc.config.GetInt(name, "firstn") 263 | if t.firstN == 0 { 264 | t.loop, _ = nc.config.GetBool(name, "loop") 265 | } 266 | 267 | t.delayUsec, err = nc.config.GetInt(name, "delayusec") 268 | if err != nil { 269 | // 270 | // Default to a sensible-ish value for replay to 271 | // avoid overwhelming output stages. Note that 272 | // we can still overwhelm output stages if we 273 | // want to do so explicitly i.e. set to zero. 274 | t.delayUsec = REPLAY_DELAY_DEFAULT_USEC 275 | } 276 | 277 | t.ctrlChan = make(chan *ctrlMsg) 278 | t.dataChans = dataChans 279 | 280 | t.logctx = logger.WithFields(log.Fields{ 281 | "name": t.name, 282 | "file": t.filename, 283 | "logdata": t.logData, 284 | "firstN": t.firstN, 285 | "delayUsec": t.delayUsec, 286 | "loop": t.loop, 287 | }) 288 | 289 | go t.replayInputFeederLoopSticky() 290 | 291 | return nil, t.ctrlChan 292 | } 293 | -------------------------------------------------------------------------------- /skeleton/pipeline.mk: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # March 2017 4 | # Copyright (c) 2017-2019 by cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | # Rudimentary build and test support 8 | # 9 | # 10 | 11 | VERSION = $(shell git describe --always --long --dirty) 12 | COVER_PROFILE = -coverprofile=coverage.out 13 | 14 | PKG = $(shell go list) 15 | 16 | # If infra-utils package is not vendored in your workspace, (e.g. you 17 | # are making changes to it, you can simply comment out the VENDOR 18 | # line, and variable update on packages will assume they are under 19 | # source. 20 | VENDOR = $(PKG)/vendor/ 21 | 22 | LDFLAGS = -ldflags "-X main.appVersion=v${VERSION}(bigmuddy)" 23 | 24 | SOURCEDIR = . 25 | SOURCES := $(shell find $(SOURCEDIR) -name '*.go' -o -name "*.proto" ) 26 | 27 | # Derived from https://vic.demuzere.be/articles/golang-makefile-crosscompile/ 28 | PLATFORMS := linux/amd64 windows/amd64 darwin/amd64 29 | GOPLATFORMTEMP = $(subst /, ,$@) 30 | GOOS = $(word 1, $(GOPLATFORMTEMP)) 31 | GOARCH = $(word 2, $(GOPLATFORMTEMP)) 32 | 33 | .PHONY: $(PLATFORMS) 34 | $(PLATFORMS): 35 | @echo " > Building for ${GOOS}/${GOARCH}" 36 | GOOS=$(GOOS) GOARCH=$(GOARCH) $(GOBUILD) $(LDFLAGS) -o $(BINDIR)/$(BINARY)_$(GOOS)_$(GOARCH) 37 | 38 | ## Build binaries 39 | .PHONY: build 40 | build: hygiene $(PLATFORMS) 41 | 42 | ## Run Go hygiene tooling like vet and fmt 43 | hygiene: 44 | @echo " > Running Go hygiene tooling" 45 | go vet -composites=false ./... 46 | go fmt ./... 47 | 48 | .PHONY: generated-source 49 | generated-source: 50 | go generate -x 51 | 52 | ## Run unit tests 53 | .PHONY: test 54 | test: 55 | $(GOTEST) -v $(COVER_PROFILE) ./... 56 | 57 | ## Displays unit test coverage 58 | .PHONY: coverage 59 | coverage: test 60 | $(GOTOOL) cover -html=coverage.out 61 | 62 | ## Displays integration test coverage 63 | .PHONY: integration-coverage 64 | integration-coverage: integration-test 65 | $(GOTOOL) cover -html=coverage.out 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /tap.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Output node used to tap pipeline for troubleshooting 9 | // 10 | package main 11 | 12 | import ( 13 | "bufio" 14 | "bytes" 15 | "encoding/binary" 16 | "encoding/hex" 17 | "encoding/json" 18 | "fmt" 19 | log "github.com/sirupsen/logrus" 20 | "os" 21 | "time" 22 | ) 23 | 24 | // 25 | // Module implementing outputNodeModule interface. 26 | type tapOutputModule struct { 27 | name string 28 | filename string 29 | countOnly bool 30 | rawDump bool 31 | streamSpec *dataMsgStreamSpec 32 | dataChannelDepth int 33 | ctrlChan chan *ctrlMsg 34 | dataChan chan dataMsg 35 | } 36 | 37 | func tapOutputModuleNew() outputNodeModule { 38 | return &tapOutputModule{} 39 | } 40 | 41 | func (t *tapOutputModule) tapOutputFeederLoop() { 42 | var stats msgStats 43 | var hexOnly bool 44 | // 45 | // Period, in seconds, to dump stats if only counting. 46 | const TIMEOUT = 10 47 | timeout := make(chan bool, 1) 48 | 49 | if !t.streamSpec.dataMsgStreamSpecTextBased() { 50 | hexOnly = true 51 | } 52 | 53 | logctx := logger.WithFields( 54 | log.Fields{ 55 | "name": t.name, 56 | "filename": t.filename, 57 | "countonly": t.countOnly, 58 | "streamSpec": t.streamSpec, 59 | }) 60 | 61 | // Prepare dump file for writing 62 | f, err := os.OpenFile(t.filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 63 | 0660) 64 | if err != nil { 65 | logctx.WithError(err).Error("Tap failed to open dump file") 66 | return 67 | } 68 | defer f.Close() 69 | 70 | logctx.Info("Starting up tap") 71 | 72 | if t.countOnly { 73 | go func() { 74 | time.Sleep(TIMEOUT * time.Second) 75 | timeout <- true 76 | }() 77 | } 78 | 79 | w := bufio.NewWriter(f) 80 | 81 | for { 82 | select { 83 | 84 | case <-timeout: 85 | 86 | go func() { 87 | time.Sleep(TIMEOUT * time.Second) 88 | timeout <- true 89 | }() 90 | w.WriteString(fmt.Sprintf( 91 | "%s:%s: rxed msgs: %v\n", 92 | t.name, time.Now().Local(), stats.MsgsOK)) 93 | w.Flush() 94 | 95 | case msg, ok := <-t.dataChan: 96 | 97 | if !ok { 98 | // Channel has been closed. Our demise 99 | // is near. SHUTDOWN is likely to be 100 | t.dataChan = nil 101 | continue 102 | } 103 | 104 | if t.countOnly { 105 | stats.MsgsOK++ 106 | continue 107 | } 108 | 109 | dM := msg 110 | description := dM.getDataMsgDescription() 111 | if t.rawDump { 112 | err, b := dM.produceByteStream(dataMsgStreamSpecDefault) 113 | if err != nil { 114 | logctx.WithError(err).WithFields( 115 | log.Fields{ 116 | "msg": description, 117 | }).Error("Tap failed to produce raw message") 118 | continue 119 | } 120 | err, enc := dataMsgStreamTypeToEncoding( 121 | dM.getDataMsgStreamType()) 122 | if err != nil { 123 | logctx.WithError(err).WithFields( 124 | log.Fields{ 125 | "msg": description, 126 | }).Error("Tap failed to identify encoding") 127 | continue 128 | } 129 | err, encst := encapSTFromEncoding(enc) 130 | if err != nil { 131 | logctx.WithError(err).WithFields( 132 | log.Fields{ 133 | "msg": description, 134 | }).Error("Tap failed to identify encap st") 135 | continue 136 | } 137 | 138 | // 139 | // We should really push this into co side of codec. 140 | hdr := encapSTHdr{ 141 | MsgType: ENC_ST_HDR_MSG_TYPE_TELEMETRY_DATA, 142 | MsgEncap: encst, 143 | MsgHdrVersion: ENC_ST_HDR_VERSION, 144 | Msgflag: ENC_ST_HDR_MSG_FLAGS_NONE, 145 | Msglen: uint32(len(b)), 146 | } 147 | err = binary.Write(w, binary.BigEndian, hdr) 148 | if err != nil { 149 | logctx.WithError(err).WithFields( 150 | log.Fields{ 151 | "msg": description, 152 | }).Errorf("Tap failed to write binary hdr %+v", hdr) 153 | continue 154 | } 155 | 156 | _, err = w.Write(b) 157 | if err != nil { 158 | logctx.WithError(err).WithFields( 159 | log.Fields{ 160 | "msg": description, 161 | }).Error("Tap failed to write binary message") 162 | continue 163 | } 164 | 165 | continue 166 | } 167 | 168 | // OK. We're ready to dump something largely human readable. 169 | errStreamType, b := dM.produceByteStream(t.streamSpec) 170 | if errStreamType != nil { 171 | err, b = dM.produceByteStream(dataMsgStreamSpecDefault) 172 | if err != nil { 173 | logctx.WithError(err).WithFields( 174 | log.Fields{ 175 | "msg": description, 176 | }).Error("Tap failed to dump message") 177 | stats.MsgsNOK++ 178 | continue 179 | } 180 | } else if b == nil { 181 | continue 182 | } 183 | stats.MsgsOK++ 184 | 185 | w.WriteString(fmt.Sprintf( 186 | "\n------- %v -------\n", time.Now())) 187 | w.WriteString(fmt.Sprintf("Summary: %s\n", description)) 188 | if hexOnly || errStreamType != nil { 189 | if errStreamType != nil { 190 | w.WriteString(fmt.Sprintf( 191 | "Requested stream type failed: [%v]\n", errStreamType)) 192 | } 193 | w.WriteString(hex.Dump(b)) 194 | } else { 195 | var out bytes.Buffer 196 | json.Indent(&out, b, "", " ") 197 | w.WriteString(out.String()) 198 | } 199 | w.Flush() 200 | 201 | case msg := <-t.ctrlChan: 202 | switch msg.id { 203 | case REPORT: 204 | content, _ := json.Marshal(stats) 205 | resp := &ctrlMsg{ 206 | id: ACK, 207 | content: content, 208 | respChan: nil, 209 | } 210 | msg.respChan <- resp 211 | 212 | case SHUTDOWN: 213 | 214 | w.Flush() 215 | logctx.Info("tap feeder loop, rxed SHUTDOWN") 216 | 217 | // 218 | // Dump detailed stats here 219 | 220 | resp := &ctrlMsg{ 221 | id: ACK, 222 | respChan: nil, 223 | } 224 | msg.respChan <- resp 225 | return 226 | 227 | default: 228 | logctx.Error("tap feeder loop, unknown ctrl message") 229 | } 230 | } 231 | 232 | } 233 | } 234 | 235 | // 236 | // Setup a tap output module so we can see what is going on. 237 | func (t *tapOutputModule) configure(name string, nc nodeConfig) ( 238 | error, chan<- dataMsg, chan<- *ctrlMsg) { 239 | 240 | var err error 241 | 242 | t.name = name 243 | 244 | t.filename, err = nc.config.GetString(name, "file") 245 | if err != nil { 246 | return err, nil, nil 247 | } 248 | 249 | t.dataChannelDepth, err = nc.config.GetInt(name, "datachanneldepth") 250 | if err != nil { 251 | t.dataChannelDepth = DATACHANNELDEPTH 252 | } 253 | 254 | // If not set, will default to false, but let's be clear. 255 | t.countOnly, _ = nc.config.GetBool(name, "countonly") 256 | 257 | // Looking for a raw dump? 258 | t.rawDump, _ = nc.config.GetBool(name, "raw") 259 | if t.rawDump { 260 | if t.countOnly { 261 | logger.WithError(err).WithFields( 262 | log.Fields{"name": name}).Error( 263 | "tap config: 'countonly' is incompatible with 'raw'") 264 | return err, nil, nil 265 | } 266 | _, err = nc.config.GetString(name, "encoding") 267 | if err == nil { 268 | logger.WithError(err).WithFields( 269 | log.Fields{"name": name}).Error( 270 | "tap config: 'encoding' is incompatible with 'raw'") 271 | return err, nil, nil 272 | } 273 | } 274 | nc.config.GetString(name, "encoding") 275 | 276 | // Pick output stream type 277 | err, t.streamSpec = dataMsgStreamSpecFromConfig(nc, name) 278 | if err != nil { 279 | logger.WithError(err).WithFields( 280 | log.Fields{ 281 | "name": name, 282 | }).Error("'encoding' option for tap output") 283 | return err, nil, nil 284 | } 285 | 286 | // 287 | // Setup control and data channels 288 | t.ctrlChan = make(chan *ctrlMsg) 289 | t.dataChan = make(chan dataMsg, t.dataChannelDepth) 290 | 291 | go t.tapOutputFeederLoop() 292 | 293 | return nil, t.dataChan, t.ctrlChan 294 | 295 | } 296 | -------------------------------------------------------------------------------- /tap_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // November 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | samples "github.com/cisco-ie/pipeline-gnmi/mdt_msg_samples" 13 | "github.com/dlintw/goconf" 14 | "testing" 15 | "time" 16 | ) 17 | 18 | type tapTestCtrl struct { 19 | } 20 | 21 | func (t *tapTestCtrl) String() string { 22 | return "TapTest" 23 | } 24 | 25 | // 26 | // TestTAPBinaryWriteRead reads a sample message, generates the binary 27 | // dump file, and rereads that file using tap in 28 | func TestTAPBinaryWriteRead(t *testing.T) { 29 | var nc nodeConfig 30 | var tapTestCtrlSrc tapTestCtrl 31 | const numDataMsg = 10 32 | 33 | logger.Info("Start TestTAPBinaryWriteRead") 34 | sample := samples.MDTSampleTelemetryTableFetchOne( 35 | samples.SAMPLE_TELEMETRY_DATABASE_BASIC) 36 | if sample == nil { 37 | t.Errorf("Failed to fetch data") 38 | return 39 | } 40 | 41 | err, codec := getNewCodecGPB("test", ENCODING_GPB) 42 | if err != nil { 43 | t.Error("Failed to get a GPB codec to test") 44 | } 45 | 46 | err, msgs := codec.blockToDataMsgs(&tapTestCtrlSrc, sample.SampleStreamGPB) 47 | if err != nil { 48 | t.Error("Failed to get a GPB codec to test") 49 | } 50 | 51 | logger.Info("Read config") 52 | cfg, err := goconf.ReadConfigFile("pipeline_test_tap.conf") 53 | if err != nil { 54 | t.Fatal("read config failed") 55 | } 56 | 57 | section := "tap_out_bin" 58 | nc.config = cfg 59 | 60 | out := tapOutputModuleNew() 61 | err, inject, ocmc := out.configure(section, nc) 62 | if err != nil { 63 | t.Errorf("tap section [%v] failed\n", section) 64 | } 65 | 66 | logger.Info("Injecting content") 67 | for i := 0; i < numDataMsg; i++ { 68 | inject <- msgs[0] 69 | // 70 | // Now that messages are injected, we should be able to pick 71 | // them up again. 72 | } 73 | 74 | in := replayInputModuleNew() 75 | section = "replay_bin" 76 | dataChan := make(chan dataMsg, 1000) 77 | dataChans := []chan<- dataMsg{dataChan} 78 | err, icmc := in.configure(section, nc, dataChans) 79 | if err != nil { 80 | t.Errorf("tap section [%v] failed\n", section) 81 | } 82 | 83 | logger.Info("Now let's read it") 84 | 85 | var i int 86 | for i = 0; i < numDataMsg; i++ { 87 | <-dataChan 88 | } 89 | if i != numDataMsg { 90 | t.Fatalf("Received %d out of 10 messages", i+1) 91 | } 92 | 93 | respChan := make(chan *ctrlMsg) 94 | request := &ctrlMsg{ 95 | id: SHUTDOWN, 96 | respChan: respChan, 97 | } 98 | 99 | logger.Info("Shutdown reader") 100 | icmc <- request 101 | // Wait for ACK 102 | ack := <-respChan 103 | if ack.id != ACK { 104 | t.Error("failed to recieve ack for tap in shutdown complete") 105 | } 106 | 107 | logger.Info("Shutdown writer") 108 | ocmc <- request 109 | // Wait for ACK 110 | ack = <-respChan 111 | if ack.id != ACK { 112 | t.Error("failed to recieve ack for tap out shutdown complete") 113 | } 114 | } 115 | 116 | // 117 | // TestTAPBinaryReadWrite taps in from a binary dump, and produces a 118 | // hex dump 119 | func TestTAPBinaryReadWrite(t *testing.T) { 120 | var nc nodeConfig 121 | 122 | cfg, err := goconf.ReadConfigFile("pipeline_test.conf") 123 | if err != nil { 124 | t.Error("read config failed") 125 | } 126 | 127 | nc.config = cfg 128 | out := tapOutputModuleNew() 129 | 130 | section := "tap_out_bin_hexdump" 131 | err, inject, ocmc := out.configure(section, nc) 132 | if err != nil { 133 | t.Errorf("tap section [%v] failed\n", section) 134 | } 135 | 136 | in := replayInputModuleNew() 137 | section = "replay_bin_archive" 138 | dataChan := make(chan dataMsg, 1000) 139 | dataChans := []chan<- dataMsg{dataChan} 140 | err, icmc := in.configure(section, nc, dataChans) 141 | if err != nil { 142 | t.Errorf("tap section [%v] failed\n", section) 143 | } 144 | // 145 | // 7 message in replay_bin_archive 146 | for i := 0; i < 7; i++ { 147 | dM := <-dataChan 148 | inject <- dM 149 | } 150 | 151 | // 152 | // Check handling of closing of data chan before shutdown giving 153 | // handler opportunity to exercise before actually shutting down. 154 | close(inject) 155 | time.Sleep(100 * time.Millisecond) 156 | 157 | respChan := make(chan *ctrlMsg) 158 | request := &ctrlMsg{ 159 | id: SHUTDOWN, 160 | respChan: respChan, 161 | } 162 | 163 | icmc <- request 164 | // Wait for ACK 165 | ack := <-respChan 166 | if ack.id != ACK { 167 | t.Error("failed to recieve ack for tap in shutdown complete") 168 | } 169 | 170 | ocmc <- request 171 | // Wait for ACK 172 | ack = <-respChan 173 | if ack.id != ACK { 174 | t.Error("failed to recieve ack for tap out shutdown complete") 175 | } 176 | 177 | } 178 | -------------------------------------------------------------------------------- /tools/monitor/data_graf4ppl/grafana.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cisco-ie/pipeline-gnmi/d931496b9d70a8d9a14c047a9f94b6081469ed21/tools/monitor/data_graf4ppl/grafana.db -------------------------------------------------------------------------------- /tools/monitor/data_prom4ppl/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s # By default, scrape targets every 5 seconds. 3 | 4 | scrape_configs: 5 | - job_name: 'pipeline' 6 | static_configs: 7 | - targets: ['localhost:8989'] 8 | 9 | - job_name: 'prometheus' 10 | static_configs: 11 | - targets: ['localhost:9090'] 12 | -------------------------------------------------------------------------------- /tools/monitor/run.sh: -------------------------------------------------------------------------------- 1 | docker rm -vf prom4ppl 2>/dev/null || echo "No prometheus instance to kill" 2 | docker rm -fv graf4ppl 2>/dev/null || echo "No grafana instance to kill" 3 | docker run -d --net=host --name prom4ppl -v ${PWD}/data_prom4ppl:/etc/prometheus prom/prometheus:v1.5.2 -config.file=/etc/prometheus/prometheus.yml 4 | docker run -d -i --net=host -v ${PWD}/data_graf4ppl:/var/lib/grafana --name graf4ppl grafana/grafana:4.2.0 5 | -------------------------------------------------------------------------------- /tools/test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Derived from https://github.com/confluentinc/cp-docker-images/blob/5.2.1-post/examples/kafka-single-node/docker-compose.yml 3 | version: '2' 4 | services: 5 | zookeeper: 6 | image: confluentinc/cp-zookeeper:5.1.3 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | ZOOKEEPER_TICK_TIME: 2000 10 | 11 | kafka: 12 | image: confluentinc/cp-kafka:5.1.3 13 | depends_on: 14 | - zookeeper 15 | ports: 16 | - 9092:9092 17 | environment: 18 | KAFKA_BROKER_ID: 1 19 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 20 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 21 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 22 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT 23 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 -------------------------------------------------------------------------------- /topic_template_testA.txt: -------------------------------------------------------------------------------- 1 | {{- .Path -}} 2 | -------------------------------------------------------------------------------- /topic_template_testB.txt: -------------------------------------------------------------------------------- 1 | {{- .Identifier -}} 2 | -------------------------------------------------------------------------------- /topic_template_testBAD.txt: -------------------------------------------------------------------------------- 1 | {{.DOESNOTEXIST}} 2 | -------------------------------------------------------------------------------- /topic_template_testC.txt: -------------------------------------------------------------------------------- 1 | {{- .Path -}}_{{- .Identifier -}} -------------------------------------------------------------------------------- /xport_grpc_out.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: xport_grpc_out.proto 3 | 4 | /* 5 | Package main is a generated protocol buffer package. 6 | 7 | It is generated from these files: 8 | xport_grpc_out.proto 9 | 10 | It has these top-level messages: 11 | SubJSONReqMsg 12 | SubJSONRepMsg 13 | */ 14 | package main 15 | 16 | import proto "github.com/golang/protobuf/proto" 17 | import fmt "fmt" 18 | import math "math" 19 | 20 | import ( 21 | context "golang.org/x/net/context" 22 | grpc "google.golang.org/grpc" 23 | ) 24 | 25 | // Reference imports to suppress errors if they are not otherwise used. 26 | var _ = proto.Marshal 27 | var _ = fmt.Errorf 28 | var _ = math.Inf 29 | 30 | // This is a compile-time assertion to ensure that this generated file 31 | // is compatible with the proto package it is being compiled against. 32 | // A compilation error at this line likely means your copy of the 33 | // proto package needs to be updated. 34 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 35 | 36 | type SubJSONReqMsg struct { 37 | ReqId int64 `protobuf:"varint,1,opt,name=ReqId,json=reqId" json:"ReqId,omitempty"` 38 | } 39 | 40 | func (m *SubJSONReqMsg) Reset() { *m = SubJSONReqMsg{} } 41 | func (m *SubJSONReqMsg) String() string { return proto.CompactTextString(m) } 42 | func (*SubJSONReqMsg) ProtoMessage() {} 43 | func (*SubJSONReqMsg) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 44 | 45 | func (m *SubJSONReqMsg) GetReqId() int64 { 46 | if m != nil { 47 | return m.ReqId 48 | } 49 | return 0 50 | } 51 | 52 | type SubJSONRepMsg struct { 53 | Data []byte `protobuf:"bytes,1,opt,name=Data,json=data,proto3" json:"Data,omitempty"` 54 | ReqId int64 `protobuf:"varint,2,opt,name=ReqId,json=reqId" json:"ReqId,omitempty"` 55 | } 56 | 57 | func (m *SubJSONRepMsg) Reset() { *m = SubJSONRepMsg{} } 58 | func (m *SubJSONRepMsg) String() string { return proto.CompactTextString(m) } 59 | func (*SubJSONRepMsg) ProtoMessage() {} 60 | func (*SubJSONRepMsg) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 61 | 62 | func (m *SubJSONRepMsg) GetData() []byte { 63 | if m != nil { 64 | return m.Data 65 | } 66 | return nil 67 | } 68 | 69 | func (m *SubJSONRepMsg) GetReqId() int64 { 70 | if m != nil { 71 | return m.ReqId 72 | } 73 | return 0 74 | } 75 | 76 | func init() { 77 | proto.RegisterType((*SubJSONReqMsg)(nil), "main.SubJSONReqMsg") 78 | proto.RegisterType((*SubJSONRepMsg)(nil), "main.SubJSONRepMsg") 79 | } 80 | 81 | // Reference imports to suppress errors if they are not otherwise used. 82 | var _ context.Context 83 | var _ grpc.ClientConn 84 | 85 | // This is a compile-time assertion to ensure that this generated file 86 | // is compatible with the grpc package it is being compiled against. 87 | const _ = grpc.SupportPackageIsVersion4 88 | 89 | // Client API for GRPCOut service 90 | 91 | type GRPCOutClient interface { 92 | // Server side telemetry streaming 93 | Pull(ctx context.Context, in *SubJSONReqMsg, opts ...grpc.CallOption) (GRPCOut_PullClient, error) 94 | } 95 | 96 | type gRPCOutClient struct { 97 | cc *grpc.ClientConn 98 | } 99 | 100 | func NewGRPCOutClient(cc *grpc.ClientConn) GRPCOutClient { 101 | return &gRPCOutClient{cc} 102 | } 103 | 104 | func (c *gRPCOutClient) Pull(ctx context.Context, in *SubJSONReqMsg, opts ...grpc.CallOption) (GRPCOut_PullClient, error) { 105 | stream, err := grpc.NewClientStream(ctx, &_GRPCOut_serviceDesc.Streams[0], c.cc, "/main.gRPCOut/Pull", opts...) 106 | if err != nil { 107 | return nil, err 108 | } 109 | x := &gRPCOutPullClient{stream} 110 | if err := x.ClientStream.SendMsg(in); err != nil { 111 | return nil, err 112 | } 113 | if err := x.ClientStream.CloseSend(); err != nil { 114 | return nil, err 115 | } 116 | return x, nil 117 | } 118 | 119 | type GRPCOut_PullClient interface { 120 | Recv() (*SubJSONRepMsg, error) 121 | grpc.ClientStream 122 | } 123 | 124 | type gRPCOutPullClient struct { 125 | grpc.ClientStream 126 | } 127 | 128 | func (x *gRPCOutPullClient) Recv() (*SubJSONRepMsg, error) { 129 | m := new(SubJSONRepMsg) 130 | if err := x.ClientStream.RecvMsg(m); err != nil { 131 | return nil, err 132 | } 133 | return m, nil 134 | } 135 | 136 | // Server API for GRPCOut service 137 | 138 | type GRPCOutServer interface { 139 | // Server side telemetry streaming 140 | Pull(*SubJSONReqMsg, GRPCOut_PullServer) error 141 | } 142 | 143 | func RegisterGRPCOutServer(s *grpc.Server, srv GRPCOutServer) { 144 | s.RegisterService(&_GRPCOut_serviceDesc, srv) 145 | } 146 | 147 | func _GRPCOut_Pull_Handler(srv interface{}, stream grpc.ServerStream) error { 148 | m := new(SubJSONReqMsg) 149 | if err := stream.RecvMsg(m); err != nil { 150 | return err 151 | } 152 | return srv.(GRPCOutServer).Pull(m, &gRPCOutPullServer{stream}) 153 | } 154 | 155 | type GRPCOut_PullServer interface { 156 | Send(*SubJSONRepMsg) error 157 | grpc.ServerStream 158 | } 159 | 160 | type gRPCOutPullServer struct { 161 | grpc.ServerStream 162 | } 163 | 164 | func (x *gRPCOutPullServer) Send(m *SubJSONRepMsg) error { 165 | return x.ServerStream.SendMsg(m) 166 | } 167 | 168 | var _GRPCOut_serviceDesc = grpc.ServiceDesc{ 169 | ServiceName: "main.gRPCOut", 170 | HandlerType: (*GRPCOutServer)(nil), 171 | Methods: []grpc.MethodDesc{}, 172 | Streams: []grpc.StreamDesc{ 173 | { 174 | StreamName: "Pull", 175 | Handler: _GRPCOut_Pull_Handler, 176 | ServerStreams: true, 177 | }, 178 | }, 179 | Metadata: "xport_grpc_out.proto", 180 | } 181 | 182 | func init() { proto.RegisterFile("xport_grpc_out.proto", fileDescriptor0) } 183 | 184 | var fileDescriptor0 = []byte{ 185 | // 159 bytes of a gzipped FileDescriptorProto 186 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xa9, 0x28, 0xc8, 0x2f, 187 | 0x2a, 0x89, 0x4f, 0x2f, 0x2a, 0x48, 0x8e, 0xcf, 0x2f, 0x2d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 188 | 0x17, 0x62, 0xc9, 0x4d, 0xcc, 0xcc, 0x53, 0x52, 0xe5, 0xe2, 0x0d, 0x2e, 0x4d, 0xf2, 0x0a, 0xf6, 189 | 0xf7, 0x0b, 0x4a, 0x2d, 0xf4, 0x2d, 0x4e, 0x17, 0x12, 0xe1, 0x62, 0x0d, 0x4a, 0x2d, 0xf4, 0x4c, 190 | 0x91, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x62, 0x2d, 0x02, 0x71, 0x94, 0x2c, 0x91, 0x94, 0x15, 191 | 0x80, 0x94, 0x09, 0x71, 0xb1, 0xb8, 0x24, 0x96, 0x24, 0x82, 0x55, 0xf1, 0x04, 0xb1, 0xa4, 0x24, 192 | 0x96, 0x24, 0x22, 0xb4, 0x32, 0x21, 0x69, 0x35, 0xb2, 0xe7, 0x62, 0x4f, 0x0f, 0x0a, 0x70, 0xf6, 193 | 0x2f, 0x2d, 0x11, 0x32, 0xe1, 0x62, 0x09, 0x28, 0xcd, 0xc9, 0x11, 0x12, 0xd6, 0x03, 0xd9, 0xad, 194 | 0x87, 0x62, 0xb1, 0x14, 0xba, 0x20, 0xc8, 0x1a, 0x25, 0x06, 0x03, 0xc6, 0x24, 0x36, 0xb0, 0x7b, 195 | 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x49, 0x74, 0xa5, 0x74, 0xc7, 0x00, 0x00, 0x00, 196 | } 197 | -------------------------------------------------------------------------------- /xport_grpc_out.proto: -------------------------------------------------------------------------------- 1 | // 2 | // gRPC service definition used to connect to pipeline and pull 3 | // telemetry in desired form. 4 | // 5 | // Build go binding as follows: 6 | // protoc --plugin=/protoc-gen-go --go_out=plugins=grpc:. xport_grpc_out.proto 7 | // 8 | 9 | syntax = "proto3"; 10 | 11 | package main; 12 | 13 | message SubJSONReqMsg { 14 | int64 ReqId = 1; 15 | } 16 | 17 | message SubJSONRepMsg { 18 | bytes Data = 1; 19 | int64 ReqId = 2; 20 | } 21 | 22 | service gRPCOut { 23 | // Server side telemetry streaming 24 | rpc Pull(SubJSONReqMsg) returns(stream SubJSONRepMsg) {}; 25 | } -------------------------------------------------------------------------------- /xport_grpc_out_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | _ "fmt" 5 | samples "github.com/cisco-ie/pipeline-gnmi/mdt_msg_samples" 6 | "github.com/dlintw/goconf" 7 | log "github.com/sirupsen/logrus" 8 | "golang.org/x/net/context" 9 | "google.golang.org/grpc" 10 | "io" 11 | "math/rand" 12 | "sync" 13 | "testing" 14 | "time" 15 | ) 16 | 17 | type grpcOutTestClient struct { 18 | t *testing.T 19 | } 20 | 21 | func (c *grpcOutTestClient) GetRequestMetadata( 22 | ctx context.Context, uri ...string) ( 23 | map[string]string, error) { 24 | 25 | mapping := map[string]string{} 26 | 27 | return mapping, nil 28 | } 29 | 30 | func (c *grpcOutTestClient) RequireTransportSecurity() bool { 31 | return false 32 | } 33 | 34 | func (c *grpcOutTestClient) grpcOutTestClientRun( 35 | server string, 36 | timeout int, 37 | target int, 38 | result chan bool) { 39 | 40 | var opts []grpc.DialOption 41 | var ctx context.Context 42 | 43 | opts = append(opts, grpc.WithInsecure()) 44 | opts = append(opts, grpc.WithPerRPCCredentials(c)) 45 | opts = append(opts, 46 | grpc.WithTimeout(time.Second*time.Duration(timeout))) 47 | 48 | conn, err := grpc.Dial(server, opts...) 49 | if err != nil { 50 | c.t.Log("client dial failed", server, err, timeout) 51 | result <- false 52 | return 53 | } 54 | defer conn.Close() 55 | 56 | req := &SubJSONReqMsg{ 57 | ReqId: rand.Int63(), 58 | } 59 | 60 | ctx, _ = context.WithCancel(context.Background()) 61 | client := NewGRPCOutClient(conn) 62 | stream, err := client.Pull(ctx, req) 63 | if err != nil { 64 | c.t.Log("client pull failed", server, err) 65 | result <- false 66 | return 67 | } 68 | 69 | i := 0 70 | for { 71 | reply, err := stream.Recv() 72 | if err == nil { 73 | i++ 74 | if req.ReqId != reply.ReqId { 75 | c.t.Log("reqId mismatch", server, req.ReqId, reply.ReqId) 76 | } 77 | if i == target { 78 | result <- true 79 | stream.CloseSend() 80 | if _, err := stream.Recv(); err != nil { 81 | break 82 | } 83 | return 84 | } 85 | } else if err == io.EOF { 86 | // c.t.Log("client recv EOF", server, err) 87 | break 88 | } else { 89 | // c.t.Error("client recv failed", server, err) 90 | result <- false 91 | return 92 | } 93 | } 94 | } 95 | 96 | type grpcOutProducer struct { 97 | t *testing.T 98 | dMch chan<- dataMsg 99 | codec codec 100 | } 101 | 102 | func (g *grpcOutProducer) String() string { 103 | return "grpc_out_test" 104 | } 105 | 106 | func grpcOutProduceOneIteration( 107 | sample *samples.SampleTelemetryTableEntry, 108 | context samples.MDTContext) (abort bool) { 109 | 110 | c := context.(*grpcOutProducer) 111 | 112 | err, msgs := c.codec.blockToDataMsgs(c, sample.SampleStreamGPB) 113 | if err != nil { 114 | c.t.Fatal("producing one iteration blockToDataMsgs failed", err) 115 | } 116 | 117 | for _, msg := range msgs { 118 | // fmt.Print("Dispatching msg\n") 119 | c.dMch <- msg 120 | } 121 | 122 | return false 123 | } 124 | 125 | func grpcOutProduceContent( 126 | t *testing.T, 127 | ctrl chan struct{}, 128 | dMch chan<- dataMsg, 129 | iterations int, 130 | periodSeconds int) { 131 | 132 | err, codec := getNewCodecGPB("test", ENCODING_GPB) 133 | if err != nil { 134 | t.Fatal("Failed to get codec", err) 135 | } 136 | 137 | prod := &grpcOutProducer{ 138 | t: t, 139 | dMch: dMch, 140 | codec: codec, 141 | } 142 | 143 | for { 144 | // fmt.Print("Iteration\n") 145 | for i := 0; i < iterations; i++ { 146 | samples.MDTSampleTelemetryTableIterate( 147 | samples.SAMPLE_TELEMETRY_DATABASE_BASIC, 148 | grpcOutProduceOneIteration, prod) 149 | select { 150 | case <-ctrl: 151 | return 152 | default: 153 | } 154 | } 155 | timeout := make(chan bool, 1) 156 | go func() { 157 | // fmt.Printf("Wait %vs \n", periodSeconds) 158 | time.Sleep(time.Duration(periodSeconds) * time.Second) 159 | timeout <- true 160 | }() 161 | select { 162 | case <-ctrl: 163 | // We're done here 164 | return 165 | case <-timeout: 166 | // Wait for timeout specified 167 | // fmt.Print("Period\n") 168 | } 169 | } 170 | } 171 | 172 | func TestGrpcOutConfigNegative(t *testing.T) { 173 | 174 | var nc nodeConfig 175 | var err error 176 | 177 | startup() 178 | logger = theLogger.WithFields(log.Fields{"tag": "test"}) 179 | 180 | // 181 | // Read test config 182 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 183 | if err != nil { 184 | t.Error("Config failed to open config pipeline_test.conf") 185 | } 186 | g := grpcOutputModuleNew() 187 | 188 | err, _, _ = g.configure("mygrpcoutnolisten", nc) 189 | if err == nil { 190 | t.Error("Config expected to fail without 'listen'") 191 | } 192 | err, _, _ = g.configure("mygrpcoutbadencoding", nc) 193 | if err == nil { 194 | t.Error("Config expected to fail unsupported 'encoding'") 195 | } 196 | } 197 | 198 | func coreTestGrpcOutClient( 199 | t *testing.T, 200 | clients int, 201 | collect int, 202 | produce_iterations int, 203 | period int, 204 | earlyTerminationAfter int, 205 | produceOnly bool) { 206 | 207 | var nc nodeConfig 208 | var err error 209 | 210 | startup() 211 | 212 | // 213 | // Read test config 214 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 215 | if err != nil { 216 | log.Fatalf("Pipeline_test.conf could not be opened [%v]", err) 217 | } 218 | go metamonitoring_init(nc) 219 | 220 | o := grpcOutputModuleNew() 221 | 222 | err, dM, ctrl := o.configure("mygrpcout", nc) 223 | if err != nil { 224 | t.Fatal("Failed to configure module", err) 225 | } 226 | // Wait for configure to complete before we start test 227 | time.Sleep(time.Second) 228 | // 229 | // Kick off client 230 | c := &grpcOutTestClient{t: t} 231 | server, err := nc.config.GetString("mygrpcout", "listen") 232 | if err != nil { 233 | t.Fatal("Failed to pick up 'listen'", err) 234 | } 235 | 236 | produceCtrl := make(chan struct{}) 237 | results := make(chan bool, clients) 238 | if !produceOnly { 239 | for i := 0; i < clients; i++ { 240 | // 241 | // Given this uses t we should synchronise and make sure 242 | // it does not outlast the test. 243 | go c.grpcOutTestClientRun(server, 3, collect, results) 244 | } 245 | } 246 | 247 | go grpcOutProduceContent(t, produceCtrl, dM, produce_iterations, period) 248 | 249 | if produceOnly { 250 | // 251 | // block forever 252 | var wg sync.WaitGroup 253 | wg.Add(1) 254 | wg.Wait() 255 | } 256 | 257 | success := 0 258 | if earlyTerminationAfter == 0 { 259 | for result := range results { 260 | if result { 261 | success++ 262 | if success == clients { 263 | break 264 | } 265 | } else { 266 | t.Fatal("Client returned failure") 267 | } 268 | } 269 | } else { 270 | time.Sleep(time.Second * time.Duration(period*earlyTerminationAfter)) 271 | } 272 | 273 | // 274 | // Stop producing 275 | close(produceCtrl) 276 | 277 | // 278 | // Close module from top 279 | respChan := make(chan *ctrlMsg) 280 | request := &ctrlMsg{ 281 | id: SHUTDOWN, 282 | respChan: respChan, 283 | } 284 | 285 | ctrl <- request 286 | // Wait for ACK 287 | <-respChan 288 | close(ctrl) 289 | } 290 | 291 | func TestGrpcOutClient(t *testing.T) { 292 | clients := 10 293 | // collect so many messages before returning ok at client 294 | collect := 14 295 | // number of interations to produce - 2xmsgs per iteration 296 | produce_iterations := 5 // 5x2 = 10, i.e. at least two iterations 297 | // period between iterations 298 | period := 2 299 | coreTestGrpcOutClient(t, clients, collect, produce_iterations, period, 0, false) 300 | } 301 | 302 | func TestGrpcOutClientCloseServerSide(t *testing.T) { 303 | // Wait for port to be available again 304 | time.Sleep(time.Second * 3) 305 | clients := 10 306 | // collect so many messages before returning ok at client 307 | collect := 9999999 308 | // number of iterations to produce - 2xmsgs per iteration 309 | produce_iterations := 5 // 5x2 = 10, i.e. 999999/10 - very many iterations 310 | // period between iterations 311 | period := 1 312 | coreTestGrpcOutClient(t, clients, collect, produce_iterations, period, 3, false) 313 | } 314 | -------------------------------------------------------------------------------- /xport_grpc_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systdialin, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | "fmt" 13 | "math/rand" 14 | "net" 15 | "testing" 16 | "time" 17 | 18 | telem "github.com/cisco/bigmuddy-network-telemetry-proto/proto_go" 19 | dialin "github.com/cisco/bigmuddy-network-telemetry-proto/proto_go/mdt_grpc_dialin" 20 | "github.com/golang/protobuf/proto" 21 | log "github.com/sirupsen/logrus" 22 | "golang.org/x/net/context" 23 | "google.golang.org/grpc" 24 | "google.golang.org/grpc/metadata" 25 | ) 26 | 27 | type grpcTestUserPasswordCollector struct { 28 | } 29 | 30 | const ( 31 | grpcServerPort = ":56789" 32 | GRPCTESTMSGS = 10000 33 | GRPCCLIENTS = 1 34 | ) 35 | 36 | var dummyUsername = "root" 37 | var dummyPassword = "lab" 38 | 39 | func (c *grpcTestUserPasswordCollector) handleConfig( 40 | nc nodeConfig, name string, server string) error { 41 | return nil 42 | } 43 | func (c *grpcTestUserPasswordCollector) getUP() ( 44 | string, string, error) { 45 | return dummyUsername, dummyPassword, nil 46 | } 47 | 48 | func grpcTestUPCollectorFactory() userPasswordCollector { 49 | return &grpcTestUserPasswordCollector{} 50 | } 51 | 52 | func TestGRPCRun(t *testing.T) { 53 | 54 | // 55 | // Setup username and password for test 56 | grpcUPCollector := grpcTestUPCollectorFactory() 57 | 58 | name := "xrv9k-grpc-server" 59 | server := grpcServerPort 60 | _, _, err := grpcUPCollector.getUP() 61 | if err != nil { 62 | t.Error("failed to collect username/password") 63 | } 64 | 65 | cleanSubs := extractSubscriptions(" counters, datarates, 132, nemo, sub1, sub2, sub3, sub4") 66 | ctrlChan := make(chan *ctrlMsg) 67 | dataChan := make(chan dataMsg, 10) 68 | 69 | var dataChans = make([]chan<- dataMsg, 0) 70 | dataChans = append(dataChans, dataChan) 71 | 72 | tls := false 73 | tls_pem := "" 74 | tls_key := "" 75 | tls_servername := "" 76 | 77 | block := &grpcBlock{ 78 | name: name, 79 | server: &grpcRemoteServer{ 80 | server: server, 81 | subscriptions: cleanSubs, 82 | auth: grpcUPCollector, 83 | reqID: rand.Int63(), 84 | }, 85 | encap: ENCODING_GPB, 86 | encodingRequest: ENCODING_GPB_KV, 87 | logData: true, 88 | ctrlChan: ctrlChan, 89 | dataChans: dataChans, 90 | childrenDone: make(chan struct{}), 91 | tls: tls, 92 | tls_pem: tls_pem, 93 | tls_key: tls_key, 94 | tls_servername: tls_servername, 95 | } 96 | 97 | // 98 | // We will fail to connect. Test retry 99 | log.Debug("GRPC TEST RETRY: Transport errors expected here:") 100 | log.Debug("GRPC TEST RETRY: ==============================================") 101 | 102 | block.server.linkServerBlock(block) 103 | go block.run() 104 | time.Sleep(1 * time.Second) 105 | 106 | // 107 | // Now run local server and consume all 108 | go runTestGRPCServer(t) 109 | 110 | var i int 111 | for i = 0; i < GRPCTESTMSGS*GRPCCLIENTS*len(cleanSubs); i++ { 112 | <-dataChan 113 | } 114 | log.Debug(" END ==============================================") 115 | 116 | log.Debug("GRPCTEST: exited datachannel after events\n", i) 117 | respChan := make(chan *ctrlMsg) 118 | request := &ctrlMsg{ 119 | id: SHUTDOWN, 120 | respChan: respChan, 121 | } 122 | 123 | // 124 | // and shutdown in the face of retry. 125 | ctrlChan <- request 126 | 127 | // Wait for ACK 128 | ack := <-respChan 129 | 130 | if ack.id != ACK { 131 | t.Error("failed to recieve acknowledgement indicating shutdown complete") 132 | } 133 | 134 | // 135 | // Now run local server and consume half then cancel 136 | ctrlChan2 := make(chan *ctrlMsg) 137 | dataChan2 := make(chan dataMsg, 10) 138 | var dataChans2 = make([]chan<- dataMsg, 0) 139 | dataChans2 = append(dataChans2, dataChan2) 140 | block2 := &grpcBlock{ 141 | name: "client2TestForCancel", 142 | server: &grpcRemoteServer{ 143 | server: server, 144 | subscriptions: cleanSubs, 145 | auth: grpcUPCollector, 146 | reqID: rand.Int63(), 147 | }, 148 | encap: ENCODING_GPB, 149 | encodingRequest: ENCODING_GPB_KV, 150 | logData: true, 151 | ctrlChan: ctrlChan2, 152 | dataChans: dataChans2, 153 | childrenDone: make(chan struct{}), 154 | tls: tls, 155 | tls_pem: tls_pem, 156 | tls_servername: tls_servername, 157 | } 158 | block2.server.linkServerBlock(block2) 159 | go block2.run() 160 | 161 | for i = 0; i < (GRPCTESTMSGS/2)*GRPCCLIENTS*len(cleanSubs); i++ { 162 | <-dataChan2 163 | } 164 | 165 | log.Debug("GRPCTEST: exited datachannel after %d events"+ 166 | " (partial, as expect), test cancel\n", i) 167 | 168 | respChan = make(chan *ctrlMsg) 169 | request = &ctrlMsg{ 170 | id: SHUTDOWN, 171 | respChan: respChan, 172 | } 173 | 174 | // 175 | // and shutdown in the face of retry. 176 | ctrlChan2 <- request 177 | 178 | // Wait for ACK 179 | ack = <-respChan 180 | 181 | if ack.id != ACK { 182 | t.Error("failed to recieve acknowledgement indicating shutdown complete") 183 | } 184 | 185 | } 186 | 187 | type gRPCConfigOperServer struct{} 188 | 189 | func (s *gRPCConfigOperServer) GetConfig(*dialin.ConfigGetArgs, 190 | dialin.GRPCConfigOper_GetConfigServer) error { 191 | return nil 192 | } 193 | func (s *gRPCConfigOperServer) MergeConfig(context.Context, *dialin.ConfigArgs) (*dialin.ConfigReply, error) { 194 | return nil, nil 195 | } 196 | func (s *gRPCConfigOperServer) DeleteConfig(context.Context, *dialin.ConfigArgs) (*dialin.ConfigReply, error) { 197 | return nil, nil 198 | } 199 | func (s *gRPCConfigOperServer) ReplaceConfig(context.Context, *dialin.ConfigArgs) (*dialin.ConfigReply, error) { 200 | return nil, nil 201 | } 202 | func (s *gRPCConfigOperServer) CliConfig(context.Context, *dialin.CliConfigArgs) (*dialin.CliConfigReply, error) { 203 | return nil, nil 204 | 205 | } 206 | func (s *gRPCConfigOperServer) CommitReplace(context.Context, *dialin.CommitReplaceArgs) (*dialin.CommitReplaceReply, error) { 207 | return nil, nil 208 | } 209 | func (s *gRPCConfigOperServer) CommitConfig(context.Context, *dialin.CommitArgs) (*dialin.CommitReply, error) { 210 | return nil, nil 211 | } 212 | func (s *gRPCConfigOperServer) ConfigDiscardChanges(context.Context, *dialin.DiscardChangesArgs) (*dialin.DiscardChangesReply, error) { 213 | return nil, nil 214 | } 215 | func (s *gRPCConfigOperServer) GetOper(*dialin.GetOperArgs, dialin.GRPCConfigOper_GetOperServer) error { 216 | return nil 217 | } 218 | 219 | // 220 | // Create subs handler. 221 | func (s *gRPCConfigOperServer) CreateSubs( 222 | reqArgs *dialin.CreateSubsArgs, 223 | stream dialin.GRPCConfigOper_CreateSubsServer) error { 224 | 225 | ctx := stream.Context() 226 | md, _ := metadata.FromIncomingContext(ctx) 227 | 228 | if md["username"][0] != dummyUsername { 229 | return fmt.Errorf("Bad username") 230 | } 231 | 232 | if md["password"][0] != dummyPassword { 233 | return fmt.Errorf("Bad password") 234 | } 235 | 236 | var collectionId uint64 237 | 238 | collectionId = 100 239 | basePath := "RootOper.ABC.DEF" 240 | 241 | var msg telem.Telemetry 242 | 243 | if reqArgs.Encode == GRPC_ENCODE_GPBKV { 244 | msg = telem.Telemetry{ 245 | CollectionId: collectionId, 246 | EncodingPath: basePath, 247 | CollectionStartTime: uint64(time.Now().Unix()), 248 | CollectionEndTime: uint64(time.Now().Unix()), 249 | } 250 | } else { 251 | return fmt.Errorf("Requesting unexpected encoding") 252 | } 253 | 254 | msgstream, _ := proto.Marshal(&msg) 255 | 256 | for i := 0; i < GRPCTESTMSGS; i++ { 257 | 258 | reply := dialin.CreateSubsReply{ 259 | ResReqId: reqArgs.ReqId, 260 | Data: msgstream, 261 | Errors: "", 262 | } 263 | 264 | if err := stream.Send(&reply); err != nil { 265 | return err 266 | } 267 | 268 | } 269 | 270 | return nil 271 | } 272 | 273 | func runTestGRPCServer(t *testing.T) { 274 | 275 | lis, err := net.Listen("tcp", grpcServerPort) 276 | if err != nil { 277 | t.Logf("failed to listen: %v", err) 278 | } 279 | s := grpc.NewServer() 280 | 281 | dialin.RegisterGRPCConfigOperServer(s, new(gRPCConfigOperServer)) 282 | go s.Serve(lis) 283 | 284 | } 285 | -------------------------------------------------------------------------------- /xport_tcp.go: -------------------------------------------------------------------------------- 1 | // 2 | // January 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Handle TCP transports 9 | 10 | package main 11 | 12 | import ( 13 | "encoding/hex" 14 | "encoding/json" 15 | log "github.com/sirupsen/logrus" 16 | "io" 17 | "net" 18 | "sync" 19 | "time" 20 | ) 21 | 22 | const ( 23 | // 24 | // Time to wait before attempting to accept connection. 25 | XPORT_TCP_WAIT_TO_REBIND = 1 26 | ) 27 | 28 | type serverTCP struct { 29 | name string 30 | bindpoint string 31 | encap string 32 | // 33 | // Log data into debug log 34 | logData bool 35 | // 36 | // TCP keepalive period in second. 0 stops pipeline from enabling 37 | // it. 38 | keepaliveSeconds time.Duration 39 | // Listener over which connections are accepted. 40 | listener net.Listener 41 | // Control channel used to control the server 42 | ctrlChan <-chan *ctrlMsg 43 | // Data channels fed by the server 44 | dataChans []chan<- dataMsg 45 | // 46 | // Wait group used to synchronise shutdown of all open 47 | // connection 48 | connectionGroup *sync.WaitGroup 49 | 50 | // 51 | // Channel used to signal up to conductor that we are done 52 | doneCancel chan int 53 | // 54 | // Channel used withing the server to request connections come 55 | // down 56 | cancelConn chan struct{} 57 | } 58 | 59 | func (s *serverTCP) cancelled() bool { 60 | select { 61 | case <-s.cancelConn: 62 | return true 63 | default: 64 | // Nothing to do, go 65 | // back sound the 66 | // loop. 67 | } 68 | 69 | return false 70 | } 71 | 72 | func (s *serverTCP) handleConnection(conn net.Conn) { 73 | 74 | logctx := logger.WithFields( 75 | log.Fields{ 76 | "name": s.name, 77 | "local": conn.LocalAddr().String(), 78 | "remote": conn.RemoteAddr().String(), 79 | "encap": s.encap, 80 | "keepalive": s.keepaliveSeconds, 81 | }) 82 | logctx.Info("TCP server accepted connection") 83 | 84 | defer func() { 85 | // Log, close the connection, and indicate to the wait 86 | // group that we are done. 87 | logctx.Info("TCP server closing connection") 88 | conn.Close() 89 | s.connectionGroup.Done() 90 | }() 91 | 92 | // 93 | // Fetch a parser of the appropriate type. 94 | err, parser := getNewEncapParser(s.name, s.encap, conn.RemoteAddr()) 95 | if err != nil { 96 | logctx.WithError(err).Error("TCP server failed to fetch parser") 97 | return 98 | } 99 | 100 | // Make buffered channels which will allow us to handle 101 | // cancellation while waiting to read from socket. 102 | // 103 | // Anonymous function reading from socket will signal back over 104 | // error or result channels, unless operation is cancelled 105 | // first. 106 | // 107 | // A buffer length of one is sufficient; it is only used to 108 | // decouple reading from handler. A vain early optimisation is to 109 | // reuse the channel across the iterations reading from socket 110 | // rather than adding a channel every time; hence the use of the 111 | // buffered channel as opposed to using the pattern of simply 112 | // closing channel as done signal. 113 | readDone := make(chan int, 1) 114 | readErr := make(chan error, 1) 115 | 116 | // 117 | // Setup TCP Keepalive. On linux, confirm using: 118 | // ss -n -i -t -o '( sport = : )' 119 | // State Recv-Q Send-Q ... Address:Port 120 | // ESTAB 0 0 ...:62436 timer:(keepalive,8min58sec,0) 121 | // 122 | if s.keepaliveSeconds != 0 { 123 | err = conn.(*net.TCPConn).SetKeepAlive(true) 124 | if err != nil { 125 | logctx.WithError(err).Error("TCP keepalive setup failed") 126 | } else { 127 | err = conn.(*net.TCPConn).SetKeepAlivePeriod(s.keepaliveSeconds) 128 | if err != nil { 129 | logctx.WithError(err).Error("TCP keepalive period setup failed") 130 | } 131 | } 132 | } 133 | 134 | for { 135 | 136 | // Get the next buffer expected by the parser 137 | err, buffer := parser.nextBlockBuffer() 138 | if err != nil || len(*buffer) == 0 { 139 | logger.WithError(err).Error("TCP server failed to fetch buffer") 140 | return 141 | } 142 | 143 | // 144 | // What are we waiting for: 145 | if s.logData { 146 | logger.WithFields(log.Fields{ 147 | "len": len(*buffer), 148 | }).Debug("waiting to read 'len' from socket") 149 | } 150 | 151 | go func() { 152 | n, err := io.ReadFull(conn, *buffer) 153 | if err != nil { 154 | readErr <- err 155 | } else { 156 | readDone <- n 157 | } 158 | }() 159 | 160 | select { 161 | case <-s.cancelConn: 162 | // 163 | // Shutting down? 164 | logger.Info("Cancelled connection") 165 | return 166 | case err = <-readErr: 167 | // 168 | // Read failed 169 | logger.WithError(err).Error("TCP server failed on read full") 170 | return 171 | case <-readDone: 172 | // 173 | // We're done reading what we expected 174 | } 175 | 176 | // 177 | // We got the data we were expecting 178 | err, msgs := parser.nextBlock(*buffer, nil) 179 | if err != nil { 180 | logger.WithError(err).WithFields( 181 | log.Fields{ 182 | "len": len(*buffer), 183 | "msg": hex.Dump(*buffer), 184 | }).Error("Failed to extract next buffer") 185 | return 186 | } 187 | 188 | if s.logData { 189 | logger.WithFields(log.Fields{ 190 | "dataMsgCount": len(msgs), 191 | "len": len(*buffer), 192 | "msg": hex.Dump(*buffer), 193 | }).Debug("TCP server logdata") 194 | } 195 | 196 | // 197 | // It is perfectly valid for there not 198 | // to be a message to send on; e.g. we 199 | // have just read and on the wire 200 | // header which simply updates parser 201 | // state. 202 | if msgs == nil { 203 | continue 204 | } 205 | 206 | // 207 | // Spray the generated messages across each 208 | // available downstream channel 209 | // 210 | for _, msg := range msgs { 211 | for _, dataChan := range s.dataChans { 212 | dataChan <- msg 213 | } 214 | } 215 | } 216 | } 217 | 218 | func (s *serverTCP) acceptTCPConnections() { 219 | 220 | defer func() { 221 | 222 | // Wait for exit of cancelled connections if necessary. 223 | logger.WithFields(log.Fields{ 224 | "name": s.name, 225 | "bindpoint": s.bindpoint, 226 | }).Debug("TCP server waiting for connections to cancel") 227 | s.connectionGroup.Wait() 228 | 229 | logger.WithFields(log.Fields{ 230 | "name": s.name, 231 | "bindpoint": s.bindpoint, 232 | }).Debug("TCP server destroying binding") 233 | 234 | // 235 | // Tell top half we're done cleaning up 236 | s.doneCancel <- 1 237 | 238 | // Canceller will ensure server is removed from 239 | // serversTCP with something like this: 240 | // delete(serversTCP, s.bindpoint) 241 | }() 242 | 243 | for { 244 | conn, err := s.listener.Accept() 245 | if s.cancelled() { 246 | // 247 | // We may be here because the northbound 248 | // cancelled on us (and called Close) 249 | logger.WithFields(log.Fields{ 250 | "name": s.name, 251 | "bindpoint": s.bindpoint, 252 | }).Debug("TCP server cancel binding") 253 | return 254 | } 255 | 256 | if err != nil { 257 | logger.WithError(err).WithFields( 258 | log.Fields{ 259 | "name": s.name, 260 | "bindpoint": s.bindpoint, 261 | }).Error("TCP connection accept failed") 262 | // We keep trying, but use a retry 263 | // timeout. Note that when we're in this 264 | // sleep, we will also not be handling 265 | // deletes. 266 | time.Sleep( 267 | XPORT_TCP_WAIT_TO_REBIND * time.Second) 268 | } 269 | 270 | // 271 | // Look for cancellation from controller. 272 | s.connectionGroup.Add(1) 273 | go s.handleConnection(conn) 274 | } 275 | 276 | } 277 | 278 | func (s *serverTCP) startServer() { 279 | 280 | var stats msgStats 281 | 282 | logger.WithFields(log.Fields{ 283 | "name": s.name, 284 | "listen": s.bindpoint}).Info("TCP server starting") 285 | 286 | // 287 | // Start accepting connections 288 | go s.acceptTCPConnections() 289 | 290 | for { 291 | select { 292 | 293 | case msg := <-s.ctrlChan: 294 | switch msg.id { 295 | case REPORT: 296 | content, _ := json.Marshal(stats) 297 | resp := &ctrlMsg{ 298 | id: ACK, 299 | content: content, 300 | respChan: nil, 301 | } 302 | msg.respChan <- resp 303 | 304 | case SHUTDOWN: 305 | logger.WithFields( 306 | log.Fields{"name": s.name}).Info( 307 | "TCP server loop, rxed SHUTDOWN, closing connections") 308 | 309 | // 310 | // Flag cancellation of binding and 311 | // its connections and wait for 312 | // cancellation to complete 313 | // synchronously. 314 | close(s.cancelConn) 315 | s.listener.Close() 316 | 317 | logger.WithFields( 318 | log.Fields{ 319 | "name": s.name, 320 | "bindpoint": s.bindpoint, 321 | }).Info("TCP server notify conductor binding is closed") 322 | 323 | resp := &ctrlMsg{ 324 | id: ACK, 325 | respChan: nil, 326 | } 327 | msg.respChan <- resp 328 | return 329 | 330 | default: 331 | logger.WithFields( 332 | log.Fields{"name": s.name}).Error( 333 | "TCP server loop, unknown ctrl message") 334 | } 335 | } 336 | } 337 | 338 | } 339 | 340 | // 341 | // addTCPServer adds the new service to serversTCP if necessary. 342 | // Runs in the context of the conductor handler 343 | func addTCPServer( 344 | name string, 345 | bindpoint string, 346 | encap string, 347 | dataChans []chan<- dataMsg, 348 | ctrlChan <-chan *ctrlMsg, 349 | keepalive int, 350 | logData bool) error { 351 | 352 | listener, err := net.Listen("tcp", bindpoint) 353 | if err != nil { 354 | logger.WithError(err).WithFields(log.Fields{ 355 | "name": name, 356 | "bindpoint": bindpoint, 357 | }).Error("TCP server failed to bind") 358 | return err 359 | } 360 | 361 | s := new(serverTCP) 362 | s.name = name 363 | s.listener = listener 364 | s.bindpoint = bindpoint 365 | s.encap = encap 366 | s.logData = logData 367 | s.keepaliveSeconds = time.Duration(keepalive) * time.Second 368 | s.dataChans = dataChans 369 | s.ctrlChan = ctrlChan 370 | s.cancelConn = make(chan struct{}) 371 | s.doneCancel = make(chan int) 372 | s.connectionGroup = new(sync.WaitGroup) 373 | 374 | go s.startServer() 375 | 376 | return nil 377 | } 378 | 379 | // Module implement inputNodeModule interface 380 | type tcpInputModule struct { 381 | } 382 | 383 | func tcpInputModuleNew() inputNodeModule { 384 | return &tcpInputModule{} 385 | } 386 | 387 | func (m *tcpInputModule) configure( 388 | name string, 389 | nc nodeConfig, 390 | dataChans []chan<- dataMsg) (error, chan<- *ctrlMsg) { 391 | 392 | listen, err := nc.config.GetString(name, "listen") 393 | if err != nil { 394 | logger.WithError(err).WithFields( 395 | log.Fields{"name": name}).Error( 396 | "attribute 'listen' must be specified in this section") 397 | return err, nil 398 | } 399 | 400 | // 401 | // If not set, will default to false, but let's be clear. 402 | logData, _ := nc.config.GetBool(name, "logdata") 403 | if err != nil { 404 | logData = false 405 | } 406 | 407 | encap, err := nc.config.GetString(name, "encap") 408 | if err != nil { 409 | encap = "st" 410 | } 411 | 412 | keepalive, _ := nc.config.GetInt(name, "keepalive_seconds") 413 | 414 | // 415 | // Create a control channel which will be used to control us, 416 | // and kick off the server which will accept connections and 417 | // listen for control requests. 418 | ctrlChan := make(chan *ctrlMsg) 419 | err = addTCPServer( 420 | name, listen, encap, dataChans, ctrlChan, keepalive, logData) 421 | 422 | return err, ctrlChan 423 | } 424 | -------------------------------------------------------------------------------- /xport_tcp_test.go: -------------------------------------------------------------------------------- 1 | // 2 | // February 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | 9 | package main 10 | 11 | import ( 12 | "bytes" 13 | "encoding/binary" 14 | samples "github.com/cisco-ie/pipeline-gnmi/mdt_msg_samples" 15 | "net" 16 | "testing" 17 | "time" 18 | ) 19 | 20 | func TestTCPServerStart(t *testing.T) { 21 | var dataChans = make([]chan<- dataMsg, 0) 22 | 23 | ctrlChan := make(chan *ctrlMsg) 24 | ctrlChan1 := make(chan *ctrlMsg) 25 | ctrlChan2 := make(chan *ctrlMsg) 26 | dataChan := make(chan dataMsg, DATACHANNELDEPTH) 27 | dataChans = append(dataChans, dataChan) 28 | 29 | err := addTCPServer("TestTCP", ":5556", "st", dataChans, ctrlChan, 0, true) 30 | if err != nil { 31 | t.Errorf("setup function fail to startup TCP server") 32 | return 33 | } 34 | 35 | err = addTCPServer("TestTCP", ":5556", "st", dataChans, ctrlChan1, 1000, true) 36 | // 37 | // NEGATIVE: This should fail because we are already bound to the same port 38 | if err == nil { 39 | t.Errorf("setup function succeded to startup TCP server, but expected fail") 40 | return 41 | } 42 | 43 | err = addTCPServer("TestTCP2", ":5559", "st", dataChans, ctrlChan2, 1000, true) 44 | if err != nil { 45 | t.Errorf("setup function fail to startup second TCP server (port REALLY in use?)") 46 | return 47 | } 48 | 49 | time.Sleep(1 * time.Second) 50 | 51 | // 52 | // Bring up connection and test sending content 53 | conn, err := net.Dial("tcp", ":5556") 54 | if err != nil { 55 | t.Errorf("Failed to connect to server") 56 | return 57 | } 58 | 59 | sample := samples.MDTSampleTelemetryTableFetchOne( 60 | samples.SAMPLE_TELEMETRY_DATABASE_BASIC) 61 | if sample == nil { 62 | t.Errorf("Failed to fetch data") 63 | return 64 | } 65 | fullmsg := sample.SampleStreamGPB 66 | hdr := encapSTHdr{ 67 | MsgType: ENC_ST_HDR_MSG_TYPE_TELEMETRY_DATA, 68 | MsgEncap: ENC_ST_HDR_MSG_ENCAP_GPB, 69 | MsgHdrVersion: ENC_ST_HDR_VERSION, 70 | Msgflag: ENC_ST_HDR_MSG_FLAGS_NONE, 71 | Msglen: uint32(len(fullmsg)), 72 | } 73 | 74 | err = binary.Write(conn, binary.BigEndian, &hdr) 75 | if err != nil { 76 | t.Errorf("Failed to write data header") 77 | return 78 | } 79 | 80 | wrote, err := conn.Write(fullmsg) 81 | if err != nil { 82 | t.Errorf("Failed write data 1") 83 | return 84 | } 85 | if wrote != len(fullmsg) { 86 | t.Errorf("Wrote %d, expect %d for data 1", 87 | wrote, len(fullmsg)) 88 | return 89 | } 90 | 91 | data := <-dataChan 92 | 93 | err, b := data.produceByteStream(dataMsgStreamSpecDefault) 94 | if err != nil { 95 | t.Errorf("Data failed to produce byte stream as expected") 96 | } 97 | 98 | if !bytes.Contains(b, fullmsg) { 99 | t.Errorf("Failed to receive expected data") 100 | } 101 | 102 | // 103 | // Test shutdown 104 | respChan := make(chan *ctrlMsg) 105 | request := &ctrlMsg{ 106 | id: SHUTDOWN, 107 | respChan: respChan, 108 | } 109 | 110 | // 111 | // Send shutdown message 112 | ctrlChan <- request 113 | 114 | // Wait for ACK 115 | ack := <-respChan 116 | 117 | if ack.id != ACK { 118 | t.Error("failed to recieve acknowledgement indicating shutdown complete") 119 | } 120 | 121 | // 122 | // Send shutdown message 123 | ctrlChan2 <- request 124 | 125 | // Wait for ACK 126 | ack = <-respChan 127 | 128 | if ack.id != ACK { 129 | t.Error("failed to recieve acknowledgement indicating shutdown complete") 130 | } 131 | 132 | } 133 | -------------------------------------------------------------------------------- /xport_udp.go: -------------------------------------------------------------------------------- 1 | // 2 | // October 2016, cisco 3 | // 4 | // Copyright (c) 2016 by cisco Systems, Inc. 5 | // All rights reserved. 6 | // 7 | // 8 | // Handle UDP transports 9 | 10 | package main 11 | 12 | import ( 13 | "encoding/hex" 14 | "encoding/json" 15 | _ "fmt" 16 | "github.com/prometheus/client_golang/prometheus" 17 | log "github.com/sirupsen/logrus" 18 | "net" 19 | "time" 20 | ) 21 | 22 | const ( 23 | XPORT_UDPPOLLWAIT = 3 24 | XPORT_UDPRETRY = 10 25 | ) 26 | 27 | type serverUDP struct { 28 | name string 29 | bindpoint *net.UDPAddr 30 | encap string 31 | // OS receive buffer, can be tuned in config. 32 | rxBuf int 33 | // 34 | // Log data into debug log 35 | logData bool 36 | // Listener over which connections are accepted. 37 | listener *net.UDPConn 38 | // Control channel used to control the server 39 | ctrlChan <-chan *ctrlMsg 40 | // Data channels fed by the server 41 | dataChans []chan<- dataMsg 42 | // 43 | // Channel used by listen handler to signal it has closed. 44 | closedListener chan struct{} 45 | // 46 | // Cancelled by conductor? 47 | cancelled chan struct{} 48 | } 49 | 50 | // 51 | // runServer is a UDP handler. 52 | func (s *serverUDP) runServer() { 53 | 54 | conn := s.listener 55 | defer close(s.closedListener) 56 | 57 | logctx := logger.WithFields(log.Fields{ 58 | "name": s.name, 59 | "local": conn.LocalAddr().String()}) 60 | logctx.Info("UDP server run starting") 61 | 62 | if s.rxBuf != 0 { 63 | err := conn.SetReadBuffer(s.rxBuf) 64 | if err != nil { 65 | logctx.WithError(err).WithFields( 66 | log.Fields{ 67 | "rxBuf": s.rxBuf, 68 | }).Error( 69 | "RxBuf size (check OS max, e.g. sysctl -w net.core.rmem_max)") 70 | } 71 | } 72 | 73 | // 74 | // Fetch a parser 75 | err, parser := getNewEncapParser(s.name, s.encap, nil) 76 | if err != nil { 77 | logctx.WithError(err).Error("UDP parser setup") 78 | return 79 | } 80 | 81 | for { 82 | err, buffer := parser.nextBlockBuffer() 83 | if err != nil { 84 | logctx.WithError(err).Error("UDP failed to retrieve buffer") 85 | goto out 86 | } 87 | 88 | length, remoteAddr, err := conn.ReadFromUDP(*buffer) 89 | if err != nil { 90 | // 91 | // This may be normal operation; i.e. parent closed 92 | // binding. We have no way of distinguishing short of 93 | // some horrid match of the error string. 94 | // https://github.com/golang/go/issues/4373 95 | // 96 | // But we can check for cancelled... 97 | select { 98 | case <-s.cancelled: 99 | logctx.WithFields( 100 | log.Fields{ 101 | "remote": remoteAddr.String(), 102 | }).Debug("Reading from UDP port, cancelled") 103 | default: 104 | xportUDPMetaMonitor.CountersErrors.WithLabelValues( 105 | s.name, remoteAddr.String()).Inc() 106 | logctx.WithError(err).WithFields( 107 | log.Fields{ 108 | "remote": remoteAddr.String(), 109 | }).Error("Reading from UDP port") 110 | } 111 | goto out 112 | } 113 | 114 | trimBuf := (*buffer)[:length] 115 | // fmt.Printf("length: %d/%d\n", length, len(trimBuf)) 116 | err, msgs := parser.nextBlock(trimBuf, remoteAddr) 117 | if err != nil { 118 | xportUDPMetaMonitor.CountersErrors.WithLabelValues( 119 | s.name, remoteAddr.String()).Inc() 120 | logctx.WithError(err).WithFields( 121 | log.Fields{ 122 | "remote": remoteAddr.String(), 123 | }).Error("Failed to extract next buffer") 124 | goto out 125 | } 126 | 127 | if s.logData { 128 | logctx.WithFields(log.Fields{ 129 | "remote": remoteAddr.String(), 130 | "dataMsgCount": len(msgs), 131 | "msglen": len(trimBuf), 132 | "msg": hex.EncodeToString(trimBuf), 133 | }).Debug("UDP server logdata") 134 | } 135 | xportUDPMetaMonitor.CountersMsgs.WithLabelValues( 136 | s.name, remoteAddr.String()).Inc() 137 | xportUDPMetaMonitor.CountersBytes.WithLabelValues( 138 | s.name, remoteAddr.String()).Add(float64(length)) 139 | 140 | if msgs == nil { 141 | continue 142 | } 143 | 144 | // 145 | // Now we have content. What to do with it? 146 | // 147 | // Spray the generated messages across each available 148 | // downstream channel 149 | // 150 | // Given this is UDP, rather than block on channel if channel 151 | // is full, we drop and count. This ensures that the drop 152 | // damage is limited to the slow consumer rather than all 153 | // consumers. (There is still a window of opportunity between 154 | // capacity test and send, if other producers feed the channel 155 | // but we can live with that.) 156 | // 157 | for _, msg := range msgs { 158 | for _, dataChan := range s.dataChans { 159 | if cap(dataChan) == len(dataChan) { 160 | // Count drops and continue. We need to add metadata 161 | // to channel to do a better job of identifying 162 | // laggards. 163 | xportUDPMetaMonitor.CountersDrops.WithLabelValues( 164 | s.name, remoteAddr.String()).Inc() 165 | continue 166 | } 167 | select { 168 | case dataChan <- msg: 169 | // job done for this msg on this channel 170 | case <-s.cancelled: 171 | goto out 172 | } 173 | } 174 | } 175 | } 176 | 177 | out: 178 | logctx.Info("UDP server run stopping") 179 | } 180 | 181 | func (s *serverUDP) startStickyServer() { 182 | // 183 | // Prime loop by closing listener channel 184 | close(s.closedListener) 185 | 186 | for { 187 | select { 188 | case <-s.closedListener: 189 | // 190 | // Listener is closed. Recreate listener, set up new 191 | // closedListener and kick off. 192 | var err error 193 | s.listener, err = net.ListenUDP("udp", s.bindpoint) 194 | if err != nil { 195 | logger.WithError(err).WithFields(log.Fields{ 196 | "name": s.name, 197 | "bindpoint": s.bindpoint, 198 | }).Error("UDP server failed to bind, retrying") 199 | time.Sleep(time.Second * XPORT_UDPRETRY) 200 | continue 201 | } else { 202 | s.closedListener = make(chan struct{}) 203 | go s.runServer() 204 | } 205 | 206 | case msg := <-s.ctrlChan: 207 | switch msg.id { 208 | case REPORT: 209 | stats := msgStats{} 210 | content, _ := json.Marshal(stats) 211 | resp := &ctrlMsg{ 212 | id: ACK, 213 | content: content, 214 | respChan: nil, 215 | } 216 | msg.respChan <- resp 217 | 218 | case SHUTDOWN: 219 | 220 | logger.WithFields( 221 | log.Fields{"name": s.name}).Info( 222 | "UDP server loop, rxed SHUTDOWN, closing binding") 223 | 224 | close(s.cancelled) 225 | if s.listener != nil { 226 | s.listener.Close() 227 | // 228 | // Closing the listen port will cause reading from 229 | // it to fail, and running server to return. 230 | // Wait for signal we're done 231 | <-s.closedListener 232 | } 233 | 234 | logger.WithFields( 235 | log.Fields{ 236 | "name": s.name, 237 | "bindpoint": s.bindpoint, 238 | }).Debug("UDP server notify conductor binding is closed") 239 | resp := &ctrlMsg{ 240 | id: ACK, 241 | respChan: nil, 242 | } 243 | msg.respChan <- resp 244 | return 245 | 246 | default: 247 | logger.WithFields( 248 | log.Fields{"name": s.name}).Error( 249 | "UDP server loop, unknown ctrl message") 250 | } 251 | } 252 | } 253 | } 254 | 255 | func addUDPServer( 256 | name string, 257 | bindpoint *net.UDPAddr, 258 | encap string, 259 | dataChans []chan<- dataMsg, 260 | ctrlChan <-chan *ctrlMsg, 261 | rxBuf int, 262 | logData bool) error { 263 | 264 | s := new(serverUDP) 265 | s.name = name 266 | s.bindpoint = bindpoint 267 | s.encap = encap 268 | s.logData = logData 269 | s.dataChans = dataChans 270 | s.ctrlChan = ctrlChan 271 | s.rxBuf = rxBuf 272 | s.closedListener = make(chan struct{}) 273 | s.cancelled = make(chan struct{}) 274 | 275 | go s.startStickyServer() 276 | 277 | return nil 278 | } 279 | 280 | // Module implement inputNodeModule interface 281 | type udpInputModule struct { 282 | } 283 | 284 | func udpInputModuleNew() inputNodeModule { 285 | return &udpInputModule{} 286 | } 287 | 288 | func (m *udpInputModule) configure( 289 | name string, 290 | nc nodeConfig, 291 | dataChans []chan<- dataMsg) (error, chan<- *ctrlMsg) { 292 | 293 | listen, err := nc.config.GetString(name, "listen") 294 | if err != nil { 295 | logger.WithError(err).WithFields( 296 | log.Fields{"name": name}).Error( 297 | "attribute 'listen' must be specified in this section") 298 | return err, nil 299 | } 300 | 301 | bindpoint, err := net.ResolveUDPAddr("udp", listen) 302 | if err != nil { 303 | logger.WithError(err).WithFields( 304 | log.Fields{"name": name}).Error( 305 | "attribute 'listen' unparseable as local UDP address") 306 | return err, nil 307 | } 308 | 309 | encap, err := nc.config.GetString(name, "encap") 310 | if err != nil { 311 | encap = "st" 312 | } 313 | // 314 | // If not set, will default to false, but let's be clear. 315 | logData, _ := nc.config.GetBool(name, "logdata") 316 | rxBuf, _ := nc.config.GetInt(name, "rxbuf") 317 | 318 | // 319 | // Create a control channel which will be used to control us, 320 | // and kick off the server which will accept connections and 321 | // listen for control requests. 322 | ctrlChan := make(chan *ctrlMsg) 323 | err = addUDPServer( 324 | name, bindpoint, encap, dataChans, ctrlChan, rxBuf, logData) 325 | 326 | return err, ctrlChan 327 | } 328 | 329 | type xportUDPMetaMonitorType struct { 330 | CountersMsgs *prometheus.CounterVec 331 | CountersBytes *prometheus.CounterVec 332 | CountersErrors *prometheus.CounterVec 333 | CountersDrops *prometheus.CounterVec 334 | } 335 | 336 | var xportUDPMetaMonitor *xportUDPMetaMonitorType 337 | 338 | func init() { 339 | xportUDPMetaMonitor = &xportUDPMetaMonitorType{ 340 | CountersMsgs: prometheus.NewCounterVec( 341 | prometheus.CounterOpts{ 342 | Name: "xportUDP_messages", 343 | Help: "Messages", 344 | }, 345 | []string{"section", "peer"}), 346 | CountersBytes: prometheus.NewCounterVec( 347 | prometheus.CounterOpts{ 348 | Name: "xportUDP_bytes", 349 | Help: "Bytes", 350 | }, 351 | []string{"section", "peer"}), 352 | CountersErrors: prometheus.NewCounterVec( 353 | prometheus.CounterOpts{ 354 | Name: "xportUDP_errors", 355 | Help: "Errors", 356 | }, 357 | []string{"section", "peer"}), 358 | CountersDrops: prometheus.NewCounterVec( 359 | prometheus.CounterOpts{ 360 | Name: "xportUDP_drops", 361 | Help: "Drops", 362 | }, 363 | []string{"section", "peer"}), 364 | } 365 | 366 | // Dump content 367 | prometheus.MustRegister(xportUDPMetaMonitor.CountersMsgs) 368 | prometheus.MustRegister(xportUDPMetaMonitor.CountersBytes) 369 | prometheus.MustRegister(xportUDPMetaMonitor.CountersErrors) 370 | prometheus.MustRegister(xportUDPMetaMonitor.CountersDrops) 371 | } 372 | -------------------------------------------------------------------------------- /xport_udp_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | samples "github.com/cisco-ie/pipeline-gnmi/mdt_msg_samples" 8 | "github.com/dlintw/goconf" 9 | log "github.com/sirupsen/logrus" 10 | "net" 11 | "sync" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | // Workaround for uninit lock assignment go vet error: 17 | // https://github.com/golang/go/issues/13675 18 | 19 | type udpTestContextLock struct { 20 | sync.Mutex 21 | sync.WaitGroup 22 | } 23 | 24 | type udpTestContext struct { 25 | name string 26 | maxlag int 27 | max int 28 | encap encapSTHdrMsgEncap 29 | send int 30 | handled int 31 | dataChan chan dataMsg 32 | tDone chan struct{} 33 | lock *udpTestContextLock 34 | conn *net.UDPConn 35 | } 36 | 37 | func udpTestSendOneMessage( 38 | sample *samples.SampleTelemetryTableEntry, 39 | context samples.MDTContext) (abort bool) { 40 | 41 | var err error 42 | 43 | c := context.(*udpTestContext) 44 | 45 | hdr := encapSTHdr{ 46 | MsgType: ENC_ST_HDR_MSG_TYPE_TELEMETRY_DATA, 47 | MsgEncap: ENC_ST_HDR_MSG_ENCAP_GPB, 48 | MsgHdrVersion: ENC_ST_HDR_VERSION, 49 | Msgflag: ENC_ST_HDR_MSG_FLAGS_NONE, 50 | } 51 | 52 | hdr.MsgEncap = c.encap 53 | 54 | if c.encap == ENC_ST_HDR_MSG_ENCAP_GPB { 55 | hdr.Msglen = uint32(len(sample.SampleStreamGPB)) 56 | } else if c.encap == ENC_ST_HDR_MSG_ENCAP_JSON { 57 | hdr.Msglen = uint32(len(sample.SampleStreamJSON)) 58 | } else { 59 | return true 60 | } 61 | 62 | hdrBuf := new(bytes.Buffer) 63 | err = binary.Write(hdrBuf, binary.BigEndian, hdr) 64 | if err != nil { 65 | panic(err) 66 | } 67 | if c.encap == ENC_ST_HDR_MSG_ENCAP_GPB { 68 | _, err = c.conn.Write(append(hdrBuf.Bytes(), sample.SampleStreamGPB...)) 69 | } else if c.encap == ENC_ST_HDR_MSG_ENCAP_JSON { 70 | _, err = c.conn.Write(append(hdrBuf.Bytes(), sample.SampleStreamJSON...)) 71 | } 72 | if err == nil { 73 | c.lock.Lock() 74 | c.send++ 75 | c.lock.Unlock() 76 | } 77 | 78 | return false 79 | } 80 | 81 | func udpTestSendMessages(c *udpTestContext) { 82 | defer c.lock.Done() 83 | for { 84 | select { 85 | case <-c.tDone: 86 | return 87 | default: 88 | takeabreak := false 89 | done := false 90 | c.lock.Lock() 91 | if c.handled >= c.max { 92 | done = true 93 | } 94 | if c.send-c.handled >= c.maxlag { 95 | takeabreak = true 96 | } 97 | c.lock.Unlock() 98 | 99 | if done { 100 | return 101 | } 102 | 103 | if takeabreak { 104 | // fmt.Printf("send: waiting 1s at t %d r %d\n", c.send, c.handled) 105 | time.Sleep(time.Millisecond * 2) 106 | continue 107 | } 108 | 109 | samples.MDTSampleTelemetryTableIterate( 110 | samples.SAMPLE_TELEMETRY_DATABASE_BASIC, 111 | udpTestSendOneMessage, c) 112 | } 113 | } 114 | } 115 | 116 | func udpTestHandleMessages(c *udpTestContext) { 117 | for { 118 | select { 119 | case <-c.dataChan: 120 | c.lock.Lock() 121 | c.handled++ 122 | c.lock.Unlock() 123 | case <-c.tDone: 124 | // fmt.Printf("receive: stop at t %d r %d\n", c.send, c.handled) 125 | c.lock.Done() 126 | return 127 | } 128 | } 129 | } 130 | 131 | func TestUDPServerNegative(tb *testing.T) { 132 | var dataChans = make([]chan<- dataMsg, 0) 133 | var nc nodeConfig 134 | var err error 135 | 136 | logfile := startup() 137 | logger = theLogger.WithFields(log.Fields{"tag": "test"}) 138 | if logfile != nil { 139 | defer logfile.Close() 140 | } 141 | 142 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 143 | dataChan := make(chan dataMsg, DATACHANNELDEPTH) 144 | dataChans = append(dataChans, dataChan) 145 | 146 | i := udpInputModuleNew() 147 | err, _ = i.configure("udpinnolisten", nc, dataChans) 148 | if err == nil { 149 | tb.Fatal("Configured input module without listen") 150 | } 151 | 152 | err, _ = i.configure("udpinbadlisten", nc, dataChans) 153 | if err == nil { 154 | tb.Fatal("Configured input module with bad listen") 155 | } 156 | 157 | } 158 | 159 | func TestUDPServer(tb *testing.T) { 160 | var dataChans = make([]chan<- dataMsg, 0) 161 | var nc nodeConfig 162 | var err error 163 | 164 | logfile := startup() 165 | logger = theLogger.WithFields(log.Fields{"tag": "test"}) 166 | if logfile != nil { 167 | defer logfile.Close() 168 | } 169 | 170 | nc.config, err = goconf.ReadConfigFile("pipeline_test.conf") 171 | go metamonitoring_init(nc) 172 | 173 | server, err := nc.config.GetString("udpin", "listen") 174 | if err != nil { 175 | tb.Fatal("Failed to pick up 'listen'", err) 176 | } 177 | 178 | dataChan := make(chan dataMsg, DATACHANNELDEPTH) 179 | dataChans = append(dataChans, dataChan) 180 | 181 | i := udpInputModuleNew() 182 | err, ctrlChan := i.configure("udpin", nc, dataChans) 183 | if err != nil { 184 | tb.Fatal("Failed to config input module", err) 185 | } 186 | 187 | udpAddr, err := net.ResolveUDPAddr("udp", server) 188 | if err != nil { 189 | tb.Fatal("Failed to get server address", err) 190 | } 191 | outConn, err := net.DialUDP("udp", nil, udpAddr) 192 | if err != nil { 193 | tb.Fatal("Failed to dial UDP", err) 194 | } 195 | 196 | // 197 | // Read, as opposed to write buffer is the limitation for bursts. 198 | // No need to setup outConn.SetWriteBuffer(46388608) 199 | // 200 | subtests := []udpTestContext{ 201 | {name: "BurstSparseGPB", maxlag: 5, max: 10000, encap: ENC_ST_HDR_MSG_ENCAP_GPB}, 202 | {name: "BurstMediumGPB", maxlag: 50, max: 10000, encap: ENC_ST_HDR_MSG_ENCAP_GPB}, 203 | {name: "BurstDenseGPB", maxlag: 500, max: 10000, encap: ENC_ST_HDR_MSG_ENCAP_GPB}, 204 | } 205 | 206 | // 207 | // Structure test as subtest in case I need to add more. 208 | for _, subt := range subtests { 209 | subt.lock = &udpTestContextLock{} 210 | tb.Run( 211 | fmt.Sprint(subt.name), 212 | func(tb *testing.T) { 213 | subt.tDone = make(chan struct{}) 214 | subt.conn = outConn 215 | subt.dataChan = dataChan 216 | 217 | subt.lock.Add(2) 218 | 219 | go udpTestHandleMessages(&subt) 220 | time.Sleep(time.Millisecond * 500) 221 | go udpTestSendMessages(&subt) 222 | 223 | ticker := time.NewTicker(time.Second * 2) 224 | old_handled := 0 225 | for range ticker.C { 226 | subt.lock.Lock() 227 | if subt.handled >= subt.max { 228 | ticker.Stop() 229 | break 230 | } 231 | 232 | if subt.handled == old_handled { 233 | // 234 | // stopped making progress 235 | ticker.Stop() 236 | tb.Fatalf("Progress stalled: handled %d, sent %d, max %d", 237 | subt.handled, subt.send, subt.max) 238 | } 239 | old_handled = subt.handled 240 | subt.lock.Unlock() 241 | } 242 | 243 | close(subt.tDone) 244 | subt.lock.Wait() 245 | }) 246 | } 247 | 248 | // 249 | // Test shutdown 250 | respChan := make(chan *ctrlMsg) 251 | request := &ctrlMsg{ 252 | id: SHUTDOWN, 253 | respChan: respChan, 254 | } 255 | 256 | // 257 | // Send shutdown message 258 | ctrlChan <- request 259 | 260 | // Wait for ACK 261 | ack := <-respChan 262 | 263 | if ack.id != ACK { 264 | tb.Error("failed to recieve acknowledgement indicating shutdown complete") 265 | } 266 | 267 | } 268 | --------------------------------------------------------------------------------