├── .github └── workflows │ └── semgrep.yml ├── .gitignore ├── .travis.yml ├── Dockerfile ├── Dockerfile.prod ├── LICENSE.txt ├── Makefile ├── README.md ├── cmd ├── cnetflow │ └── cnetflow.go ├── cnflegacy │ └── cnflegacy.go ├── csflow │ └── csflow.go └── goflow │ └── goflow.go ├── decoders ├── decoder.go ├── netflow │ ├── ipfix.go │ ├── netflow.go │ ├── nfv9.go │ └── packet.go ├── netflowlegacy │ ├── netflow.go │ ├── netflow_test.go │ └── packet.go ├── sflow │ ├── datastructure.go │ ├── packet.go │ ├── sflow.go │ └── sflow_test.go └── utils │ └── utils.go ├── docker-compose-pkg.yml ├── go.mod ├── go.sum ├── package ├── Dockerfile ├── goflow.env └── goflow.service ├── pb ├── flow.pb.go └── flow.proto ├── producer ├── producer_nf.go ├── producer_nflegacy.go ├── producer_sf.go └── producer_test.go ├── transport ├── kafka.go └── transport_test.go └── utils ├── metrics.go ├── netflow.go ├── nflegacy.go ├── sflow.go ├── sflow_test.go └── utils.go /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: {} 3 | workflow_dispatch: {} 4 | push: 5 | branches: 6 | - main 7 | - master 8 | schedule: 9 | - cron: '0 0 * * *' 10 | name: Semgrep config 11 | jobs: 12 | semgrep: 13 | name: semgrep/ci 14 | runs-on: ubuntu-latest 15 | env: 16 | SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} 17 | SEMGREP_URL: https://cloudflare.semgrep.dev 18 | SEMGREP_APP_URL: https://cloudflare.semgrep.dev 19 | SEMGREP_VERSION_CHECK_URL: https://cloudflare.semgrep.dev/api/check-version 20 | container: 21 | image: semgrep/semgrep 22 | steps: 23 | - uses: actions/checkout@v4 24 | - run: semgrep ci 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | go.work.sum 23 | 24 | # env file 25 | .env 26 | 27 | # Dist 28 | /dist -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | include: 3 | # Test 4 | - stage: test 5 | os: linux 6 | language: go 7 | env: 8 | GO111MODULE=on 9 | script: 10 | - make test-race vet test 11 | # Compile 12 | - stage: compile 13 | os: linux 14 | language: go 15 | env: 16 | GO111MODULE=on 17 | BUILDINFOSDET=-travis 18 | before_install: 19 | - sudo apt-get update 20 | - sudo apt-get install -y rpm ruby ruby-dev 21 | - sudo gem install fpm 22 | script: 23 | - GOOS=linux make build-goflow-light 24 | - GOOS=linux make build-goflow 25 | - GOOS=darwin make build-goflow 26 | - GOOS=windows EXTENSION=.exe make build-goflow 27 | - make package-deb-goflow package-rpm-goflow 28 | deploy: 29 | provider: releases 30 | api_key: 31 | secure: eg1OSNzXVSVsCx/n7xSJAtAw7NlgtnK57EyJmrwGgvcs5OUm5cvsnK3isuWwsAFanW6b69UoyyZDayIj72poiTVGo5705lL1sN39LxypmlkpmOFJaMggIdbPAN4fB6anRHp+MBGMvxGjeJP/97JKnPXcyK+QevqxRl2sMFRjLthTBManET7ahAhD5HqsdT/MeFORCymlJ+sIRXkLHrtBdiW/KXLLzsKn3C4/OPP3Z08ggqDix7I3zLaHW7nAvug3h5V5I84FiedEgO+w7McMjX8ri2Fz/sXNz3AaQIgBUxkmnIEvv4b9nFkd3HjIHRyS6iPpcdrqGXcMqW2SVHOJ668t140MLKrZyoCj4yi0UzqjY5F6iBCy5GSz8TBbz1Mo7TF6ieVeAaC0WZImO1aRHQeBNY/5NjvmwCXLDq7sUyxcHbfSa39/Pn6sD5yZkNsSEpTJ9AHxo2/os4NxQJ6l4nV/vseNDUnhcLf3irCBpsv1k1q6EgAO4kCdELSDMaYasZm2p4U9PDiGP1tyxWoglQKzma0sR1FGnOpUQB1Wl6ZWeW4IotHLb6QQRLfERPueWgENi2etDs88lLY1EuCamFoY19nWXROCiUEYFthK6csapgQw7y4hIcup2/gB0eNVoWbGB16MYQD2W47gj6LUGDSQMAjXffymugde71R46JQ= 32 | file_glob: true 33 | file: dist/* 34 | skip_cleanup: true 35 | on: 36 | tags: true 37 | repo: cloudflare/goflow 38 | - dist: trusty 39 | services: 40 | - docker 41 | script: 42 | - make docker-goflow 43 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine as builder 2 | ARG LDFLAGS="" 3 | 4 | RUN apk --update --no-cache add git build-base gcc 5 | 6 | COPY . /build 7 | WORKDIR /build 8 | 9 | RUN go build -ldflags "${LDFLAGS}" -o goflow cmd/goflow/goflow.go 10 | 11 | FROM alpine:latest 12 | ARG src_dir 13 | 14 | RUN apk update --no-cache && \ 15 | adduser -S -D -H -h / flow 16 | USER flow 17 | COPY --from=builder /build/goflow / 18 | 19 | ENTRYPOINT ["./goflow"] 20 | -------------------------------------------------------------------------------- /Dockerfile.prod: -------------------------------------------------------------------------------- 1 | ARG src_uri=github.com/cloudflare/goflow 2 | 3 | FROM golang:alpine as builder 4 | ARG src_uri 5 | 6 | RUN apk --update --no-cache add git && \ 7 | go get -u $src_uri 8 | 9 | FROM alpine:latest 10 | ARG src_uri 11 | 12 | RUN apk update --no-cache && \ 13 | adduser -S -D -H -h / flow 14 | USER flow 15 | COPY --from=builder /go/bin/goflow / 16 | 17 | ENTRYPOINT ["./goflow"] 18 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018, Cloudflare. All rights reserved. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 4 | 5 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 6 | 7 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | 9 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | EXTENSION ?= 2 | DIST_DIR ?= dist/ 3 | GOOS ?= linux 4 | ARCH ?= $(shell uname -m) 5 | BUILDINFOSDET ?= 6 | 7 | DOCKER_REPO := cloudflare/ 8 | GOFLOW_NAME := goflow 9 | GOFLOW_VERSION := $(shell git describe --tags $(git rev-list --tags --max-count=1)) 10 | VERSION_PKG := $(shell echo $(GOFLOW_VERSION) | sed 's/^v//g') 11 | ARCH := x86_64 12 | LICENSE := BSD-3 13 | URL := https://github.com/cloudflare/goflow 14 | DESCRIPTION := GoFlow: an sFlow/IPFIX/NetFlow v9/v5 collector to Kafka 15 | BUILDINFOS := ($(shell date +%FT%T%z)$(BUILDINFOSDET)) 16 | LDFLAGS := '-X main.version=$(GOFLOW_VERSION) -X main.buildinfos=$(BUILDINFOS)' 17 | 18 | OUTPUT_GOFLOW := $(DIST_DIR)goflow-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) 19 | 20 | OUTPUT_GOFLOW_LIGHT_SFLOW := $(DIST_DIR)goflow-sflow-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) 21 | OUTPUT_GOFLOW_LIGHT_NF := $(DIST_DIR)goflow-netflow-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) 22 | OUTPUT_GOFLOW_LIGHT_NFV5 := $(DIST_DIR)goflow-nflegacy-$(GOFLOW_VERSION)-$(GOOS)-$(ARCH)$(EXTENSION) 23 | 24 | .PHONY: all 25 | all: test-race vet test 26 | 27 | .PHONY: proto 28 | proto: 29 | @echo generating protobuf 30 | protoc --go_out=. --plugin=$(PROTOCPATH)protoc-gen-go pb/*.proto 31 | 32 | .PHONY: test 33 | test: 34 | @echo testing code 35 | go test ./... 36 | 37 | .PHONY: vet 38 | vet: 39 | @echo checking code is vetted 40 | go vet $(shell go list ./...) 41 | 42 | .PHONY: test-race 43 | test-race: 44 | @echo testing code for races 45 | go test -race ./... 46 | 47 | .PHONY: prepare 48 | prepare: 49 | mkdir -p $(DIST_DIR) 50 | 51 | .PHONY: clean 52 | clean: 53 | rm -rf $(DIST_DIR) 54 | 55 | .PHONY: build-goflow 56 | build-goflow: prepare 57 | go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW) cmd/goflow/goflow.go 58 | 59 | .PHONY: build-goflow-light 60 | build-goflow-light: prepare 61 | go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW_LIGHT_SFLOW) cmd/csflow/csflow.go 62 | go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW_LIGHT_NF) cmd/cnetflow/cnetflow.go 63 | go build -ldflags $(LDFLAGS) -o $(OUTPUT_GOFLOW_LIGHT_NFV5) cmd/cnflegacy/cnflegacy.go 64 | 65 | .PHONY: docker-goflow 66 | docker-goflow: 67 | docker build -t $(DOCKER_REPO)$(GOFLOW_NAME):$(GOFLOW_VERSION) --build-arg LDFLAGS=$(LDFLAGS) -f Dockerfile . 68 | 69 | .PHONY: package-deb-goflow 70 | package-deb-goflow: prepare 71 | fpm -s dir -t deb -n $(GOFLOW_NAME) -v $(VERSION_PKG) \ 72 | --description "$(DESCRIPTION)" \ 73 | --url "$(URL)" \ 74 | --architecture $(ARCH) \ 75 | --license "$(LICENSE)" \ 76 | --deb-no-default-config-files \ 77 | --package $(DIST_DIR) \ 78 | $(OUTPUT_GOFLOW)=/usr/bin/goflow \ 79 | package/goflow.service=/lib/systemd/system/goflow.service \ 80 | package/goflow.env=/etc/default/goflow 81 | 82 | .PHONY: package-rpm-goflow 83 | package-rpm-goflow: prepare 84 | fpm -s dir -t rpm -n $(GOFLOW_NAME) -v $(VERSION_PKG) \ 85 | --description "$(DESCRIPTION)" \ 86 | --url "$(URL)" \ 87 | --architecture $(ARCH) \ 88 | --license "$(LICENSE) "\ 89 | --package $(DIST_DIR) \ 90 | $(OUTPUT_GOFLOW)=/usr/bin/goflow \ 91 | package/goflow.service=/lib/systemd/system/goflow.service \ 92 | package/goflow.env=/etc/default/goflow 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GoFlow 2 | 3 | > [!WARNING] 4 | > This software is no longer maintained. We advise replacing your production use of this software with the fork [goflow2](https://github.com/netsampler/goflow2). 5 | 6 | This application is a NetFlow/IPFIX/sFlow collector in Go. 7 | 8 | It gathers network information (IP, interfaces, routers) from different flow protocols, 9 | serializes it in a protobuf format and sends the messages to Kafka using Sarama's library. 10 | 11 | ## Why 12 | 13 | The diversity of devices and the amount of network samples at Cloudflare required its own pipeline. 14 | We focused on building tools that could be easily monitored and maintained. 15 | The main goal is to have full visibility of a network while allowing other teams to develop on it. 16 | 17 | ### Modularity 18 | 19 | In order to enable load-balancing and optimizations, the GoFlow library has a `decoder` which converts 20 | the payload of a flow packet into a Go structure. 21 | 22 | The `producer` functions (one per protocol) then converts those structures into a protobuf (`pb/flow.pb`) 23 | which contains the fields a network engineer is interested in. 24 | The flow packets usually contains multiples samples 25 | This acts as an abstraction of a sample. 26 | 27 | The `transport` provides different way of processing the protobuf. Either sending it via Kafka or 28 | print it on the console. 29 | 30 | Finally, `utils` provide functions that are directly used by the CLI utils. 31 | GoFlow is a wrapper of all the functions and chains thems into producing bytes into Kafka. 32 | There is also one CLI tool per protocol. 33 | 34 | You can build your own collector using this base and replace parts: 35 | * Use different transport (eg: RabbitMQ instead of Kafka) 36 | * Convert to another format (eg: Cap'n Proto, Avro, instead of protobuf) 37 | * Decode different samples (eg: not only IP networks, add MPLS) 38 | * Different metrics system (eg: use [expvar](https://golang.org/pkg/expvar/) instead of Prometheus) 39 | 40 | ### Protocol difference 41 | 42 | The sampling protocols can be very different: 43 | 44 | **sFlow** is a stateless protocol which sends the full header of a packet with router information 45 | (interfaces, destination AS) while **NetFlow/IPFIX** rely on templates that contain fields (eg: source IPv6). 46 | 47 | The sampling rate in NetFlow/IPFIX is provided by **Option Data Sets**. This is why it can take a few minutes 48 | for the packets to be decoded until all the templates are received (**Option Template** and **Data Template**). 49 | 50 | Both of these protocols bundle multiple samples (**Data Set** in NetFlow/IPFIX and **Flow Sample** in sFlow) 51 | in one packet. 52 | 53 | The advantages of using an abstract network flow format, such as protobuf, is it enables summing over the 54 | protocols (eg: per ASN or per port, rather than per (ASN, router) and (port, router)). 55 | 56 | ## Features 57 | 58 | Collection: 59 | * NetFlow v5 60 | * IPFIX/NetFlow v9 61 | * Handles sampling rate provided by the Option Data Set 62 | * sFlow v5: RAW, IPv4, IPv6, Ethernet samples, Gateway data, router data, switch data 63 | 64 | Production: 65 | * Convert to protobuf 66 | * Sends to Kafka producer 67 | * Prints to the console 68 | 69 | Monitoring: 70 | * Prometheus metrics 71 | * Time to decode 72 | * Samples rates 73 | * Payload information 74 | * NetFlow Templates 75 | 76 | ## Run 77 | 78 | Download the latest release and just run the following command: 79 | 80 | ``` 81 | ./goflow -h 82 | ``` 83 | 84 | Enable or disable a protocol using `-nf=false` or `-sflow=false`. 85 | Define the port and addresses of the protocols using `-nf.addr`, `-nf.port` for NetFlow and `-sflow.addr`, `-slow.port` for sFlow. 86 | 87 | Set the brokers or the Kafka brokers SRV record using: `-kafka.brokers 127.0.0.1:9092,[::1]:9092` or `-kafka.srv`. 88 | Disable Kafka sending `-kafka=false`. 89 | You can hash the protobuf by key when you send it to Kafka. 90 | 91 | You can collect NetFlow/IPFIX, NetFlow v5 and sFlow using the same collector 92 | or use the single-protocol collectors. 93 | 94 | You can define the number of workers per protocol using `-workers` . 95 | 96 | ## Docker 97 | 98 | We also provide a all-in-one Docker container. To run it in debug mode without sending into Kafka: 99 | 100 | ``` 101 | $ sudo docker run --net=host -ti cloudflare/goflow:latest -kafka=false 102 | ``` 103 | 104 | ## Environment 105 | 106 | To get an example of pipeline, check out [flow-pipeline](https://github.com/cloudflare/flow-pipeline) 107 | 108 | ### How is it used at Cloudflare 109 | 110 | The samples flowing into Kafka are **processed** and special fields are inserted using other databases: 111 | * User plan 112 | * Country 113 | * ASN and BGP information 114 | 115 | The extended protobuf has the same base of the one in this repo. The **compatibility** with other software 116 | is preserved when adding new fields (thus the fields will be lost if re-serialized). 117 | 118 | Once the updated flows are back into Kafka, they are **consumed** by **database inserters** (Clickhouse, Amazon Redshift, Google BigTable...) 119 | to allow for static analysis. Other teams access the network data just like any other log (SQL query). 120 | 121 | ### Output format 122 | 123 | If you want to develop applications, build `pb/flow.proto` into the language you want: 124 | 125 | Example in Go: 126 | ``` 127 | PROTOCPATH=$HOME/go/bin/ make proto 128 | ``` 129 | 130 | Example in Java: 131 | 132 | ``` 133 | export SRC_DIR="path/to/goflow-pb" 134 | export DST_DIR="path/to/java/app/src/main/java" 135 | protoc -I=$SRC_DIR --java_out=$DST_DIR $SRC_DIR/flow.proto 136 | ``` 137 | 138 | The fields are listed in the following table. 139 | 140 | You can find information on how they are populated from the original source: 141 | * For [sFlow](https://sflow.org/developers/specifications.php) 142 | * For [NetFlow v5](https://www.cisco.com/c/en/us/td/docs/net_mgmt/netflow_collection_engine/3-6/user/guide/format.html) 143 | * For [NetFlow v9](https://www.cisco.com/en/US/technologies/tk648/tk362/technologies_white_paper09186a00800a3db9.html) 144 | * For [IPFIX](https://www.iana.org/assignments/ipfix/ipfix.xhtml) 145 | 146 | | Field | Description | NetFlow v5 | sFlow | NetFlow v9 | IPFIX | 147 | | - | - | - | - | - | - | 148 | |Type|Type of flow message|NETFLOW_V5|SFLOW_5|NETFLOW_V9|IPFIX| 149 | |TimeReceived|Timestamp of when the message was received|Included|Included|Included|Included| 150 | |SequenceNum|Sequence number of the flow packet|Included|Included|Included|Included| 151 | |SamplingRate|Sampling rate of the flow|Included|Included|Included|Included| 152 | |FlowDirection|Direction of the flow| | |DIRECTION (61)|flowDirection (61)| 153 | |SamplerAddress|Address of the device that generated the packet|IP source of packet|Agent IP|IP source of packet|IP source of packet| 154 | |TimeFlowStart|Time the flow started|System uptime and first|=TimeReceived|System uptime and FIRST_SWITCHED (22)|flowStartXXX (150, 152, 154, 156)| 155 | |TimeFlowEnd|Time the flow ended|System uptime and last|=TimeReceived|System uptime and LAST_SWITCHED (23)|flowEndXXX (151, 153, 155, 157)| 156 | |Bytes|Number of bytes in flow|dOctets|Length of sample|IN_BYTES (1) OUT_BYTES (23)|octetDeltaCount (1) postOctetDeltaCount (23)| 157 | |Packets|Number of packets in flow|dPkts|=1|IN_PKTS (2) OUT_PKTS (24)|packetDeltaCount (1) postPacketDeltaCount (24)| 158 | |SrcAddr|Source address (IP)|srcaddr (IPv4 only)|Included|Included|IPV4_SRC_ADDR (8) IPV6_SRC_ADDR (27)|sourceIPv4Address/sourceIPv6Address (8/27)| 159 | |DstAddr|Destination address (IP)|dstaddr (IPv4 only)|Included|Included|IPV4_DST_ADDR (12) IPV6_DST_ADDR (28)|destinationIPv4Address (12)destinationIPv6Address (28)| 160 | |Etype|Ethernet type (0x86dd for IPv6...)|IPv4|Included|Included|Included| 161 | |Proto|Protocol (UDP, TCP, ICMP...)|prot|Included|PROTOCOL (4)|protocolIdentifier (4)| 162 | |SrcPort|Source port (when UDP/TCP/SCTP)|srcport|Included|L4_SRC_PORT (7)|sourceTransportPort (7)| 163 | |DstPort|Destination port (when UDP/TCP/SCTP)|dstport|Included|L4_DST_PORT (11)|destinationTransportPort (11)| 164 | |InIf|Input interface|input|Included|INPUT_SNMP (10)|ingressInterface (10)| 165 | |OutIf|Output interface|output|Included|OUTPUT_SNMP (14)|egressInterface (14)| 166 | |SrcMac|Source mac address| |Included|IN_SRC_MAC (56)|sourceMacAddress (56)| 167 | |DstMac|Destination mac address| |Included|OUT_DST_MAC (57)|postDestinationMacAddress (57)| 168 | |SrcVlan|Source VLAN ID| |From ExtendedSwitch|SRC_VLAN (59)|vlanId (58)| 169 | |DstVlan|Destination VLAN ID| |From ExtendedSwitch|DST_VLAN (59)|postVlanId (59)| 170 | |VlanId|802.11q VLAN ID| |Included|SRC_VLAN (59)|postVlanId (59)| 171 | |IngressVrfID|VRF ID| | | |ingressVRFID (234)| 172 | |EgressVrfID|VRF ID| | | |egressVRFID (235)| 173 | |IPTos|IP Type of Service|tos|Included|SRC_TOS (5)|ipClassOfService (5)| 174 | |ForwardingStatus|Forwarding status| | |FORWARDING_STATUS (89)|forwardingStatus (89)| 175 | |IPTTL|IP Time to Live| |Included|IPTTL (52)|minimumTTL (52| 176 | |TCPFlags|TCP flags|tcp_flags|Included|TCP_FLAGS (6)|tcpControlBits (6)| 177 | |IcmpType|ICMP Type| |Included|ICMP_TYPE (32)|icmpTypeXXX (176, 178) icmpTypeCodeXXX (32, 139)| 178 | |IcmpCode|ICMP Code| |Included|ICMP_TYPE (32)|icmpCodeXXX (177, 179) icmpTypeCodeXXX (32, 139)| 179 | |IPv6FlowLabel|IPv6 Flow Label| |Included|IPV6_FLOW_LABEL (31)|flowLabelIPv6 (31)| 180 | |FragmentId|IP Fragment ID| |Included|IPV4_IDENT (54)|fragmentIdentification (54)| 181 | |FragmentOffset|IP Fragment Offset| |Included|FRAGMENT_OFFSET (88)|fragmentOffset (88) and fragmentFlags (197)| 182 | |BiFlowDirection|BiFlow Identification| | | |biflowDirection (239)| 183 | |SrcAS|Source AS number|src_as|From ExtendedGateway|SRC_AS (16)|bgpSourceAsNumber (16)| 184 | |DstAS|Destination AS number|dst_as|From ExtendedGateway|DST_AS (17)|bgpDestinationAsNumber (17)| 185 | |NextHop|Nexthop address|nexthop|From ExtendedGateway|IPV4_NEXT_HOP (15) BGP_IPV4_NEXT_HOP (18) IPV6_NEXT_HOP (62) BGP_IPV6_NEXT_HOP (63)|ipNextHopIPv4Address (15) bgpNextHopIPv4Address (18) ipNextHopIPv6Address (62) bgpNextHopIPv6Address (63)| 186 | |NextHopAS|Nexthop AS number| |From ExtendedGateway| | | 187 | |SrcNet|Source address mask|src_mask|From ExtendedRouter|SRC_MASK (9) IPV6_SRC_MASK (29)|sourceIPv4PrefixLength (9) sourceIPv6PrefixLength (29)| 188 | |DstNet|Destination address mask|dst_mask|From ExtendedRouter|DST_MASK (13) IPV6_DST_MASK (30)|destinationIPv4PrefixLength (13) destinationIPv6PrefixLength (30)| 189 | |HasEncap|Indicates if has GRE encapsulation||Included||| 190 | |xxxEncap fields|Same as field but inside GRE||Included||| 191 | |HasMPLS|Indicates the presence of MPLS header||Included||| 192 | |MPLSCount|Count of MPLS layers||Included||| 193 | |MPLSxTTL|TTL of the MPLS label||Included||| 194 | |MPLSxLabel|MPLS label||Included||| 195 | 196 | If you are implementing flow processors to add more data to the protobuf, 197 | we suggest you use field IDs ≥ 1000. 198 | 199 | ### Implementation notes 200 | 201 | The pipeline at Cloudflare is connecting collectors with flow processors 202 | that will add more information: with IP address, add country, ASN, etc. 203 | 204 | For aggregation, we are using Materialized tables in Clickhouse. 205 | Dictionaries help correlating flows with country and ASNs. 206 | A few collectors can treat hundred of thousands of samples. 207 | 208 | We also experimented successfully flow aggregation with Flink using a 209 | [Keyed Session Window](https://ci.apache.org/projects/flink/flink-docs-release-1.4/dev/stream/operators/windows.html#session-windows): 210 | this sums the `Bytes x SamplingRate` and `Packets x SamplingRate` received during a 5 minutes **window** while allowing 2 more minutes 211 | in the case where some flows were delayed before closing the **session**. 212 | 213 | The BGP information provided by routers can be unreliable (if the router does not have a BGP full-table or it is a static route). 214 | You can use Maxmind [prefix to ASN](https://dev.maxmind.com/geoip/geoip2/geolite2/) in order to solve this issue. 215 | 216 | ## License 217 | 218 | Licensed under the BSD 3 License. 219 | -------------------------------------------------------------------------------- /cmd/cnetflow/cnetflow.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "runtime" 9 | 10 | "github.com/cloudflare/goflow/v3/transport" 11 | "github.com/cloudflare/goflow/v3/utils" 12 | "github.com/prometheus/client_golang/prometheus/promhttp" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | var ( 17 | version = "" 18 | buildinfos = "" 19 | AppVersion = "GoFlow NetFlow " + version + " " + buildinfos 20 | 21 | Addr = flag.String("addr", "", "NetFlow/IPFIX listening address") 22 | Port = flag.Int("port", 2055, "NetFlow/IPFIX listening port") 23 | Reuse = flag.Bool("reuse", false, "Enable so_reuseport for NetFlow/IPFIX listening port") 24 | 25 | Workers = flag.Int("workers", 1, "Number of NetFlow workers") 26 | LogLevel = flag.String("loglevel", "info", "Log level") 27 | LogFmt = flag.String("logfmt", "normal", "Log formatter") 28 | 29 | EnableKafka = flag.Bool("kafka", true, "Enable Kafka") 30 | FixedLength = flag.Bool("proto.fixedlen", false, "Enable fixed length protobuf") 31 | MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") 32 | MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") 33 | TemplatePath = flag.String("templates.path", "/templates", "NetFlow/IPFIX templates list") 34 | 35 | Version = flag.Bool("v", false, "Print version") 36 | ) 37 | 38 | func init() { 39 | transport.RegisterFlags() 40 | } 41 | 42 | func httpServer(state *utils.StateNetFlow) { 43 | http.Handle(*MetricsPath, promhttp.Handler()) 44 | http.HandleFunc(*TemplatePath, state.ServeHTTPTemplates) 45 | log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) 46 | } 47 | 48 | func main() { 49 | flag.Parse() 50 | 51 | if *Version { 52 | fmt.Println(AppVersion) 53 | os.Exit(0) 54 | } 55 | 56 | lvl, _ := log.ParseLevel(*LogLevel) 57 | log.SetLevel(lvl) 58 | 59 | var defaultTransport utils.Transport 60 | defaultTransport = &utils.DefaultLogTransport{} 61 | 62 | switch *LogFmt { 63 | case "json": 64 | log.SetFormatter(&log.JSONFormatter{}) 65 | defaultTransport = &utils.DefaultJSONTransport{} 66 | } 67 | 68 | runtime.GOMAXPROCS(runtime.NumCPU()) 69 | 70 | log.Info("Starting GoFlow") 71 | 72 | s := &utils.StateNetFlow{ 73 | Transport: defaultTransport, 74 | Logger: log.StandardLogger(), 75 | } 76 | 77 | go httpServer(s) 78 | 79 | if *EnableKafka { 80 | kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) 81 | if err != nil { 82 | log.Fatal(err) 83 | } 84 | kafkaState.FixedLengthProto = *FixedLength 85 | s.Transport = kafkaState 86 | } 87 | log.WithFields(log.Fields{ 88 | "Type": "NetFlow"}). 89 | Infof("Listening on UDP %v:%v", *Addr, *Port) 90 | 91 | err := s.FlowRoutine(*Workers, *Addr, *Port, *Reuse) 92 | if err != nil { 93 | log.Fatalf("Fatal error: could not listen to UDP (%v)", err) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /cmd/cnflegacy/cnflegacy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "runtime" 9 | 10 | "github.com/cloudflare/goflow/v3/transport" 11 | "github.com/cloudflare/goflow/v3/utils" 12 | "github.com/prometheus/client_golang/prometheus/promhttp" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | var ( 17 | version = "" 18 | buildinfos = "" 19 | AppVersion = "GoFlow NetFlowV5 " + version + " " + buildinfos 20 | 21 | Addr = flag.String("addr", "", "NetFlow v5 listening address") 22 | Port = flag.Int("port", 2055, "NetFlow v5 listening port") 23 | Reuse = flag.Bool("reuse", false, "Enable so_reuseport for NetFlow v5 listening port") 24 | 25 | Workers = flag.Int("workers", 1, "Number of NetFlow v5 workers") 26 | LogLevel = flag.String("loglevel", "info", "Log level") 27 | LogFmt = flag.String("logfmt", "normal", "Log formatter") 28 | 29 | EnableKafka = flag.Bool("kafka", true, "Enable Kafka") 30 | FixedLength = flag.Bool("proto.fixedlen", false, "Enable fixed length protobuf") 31 | MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") 32 | MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") 33 | 34 | Version = flag.Bool("v", false, "Print version") 35 | ) 36 | 37 | func init() { 38 | transport.RegisterFlags() 39 | } 40 | 41 | func httpServer() { 42 | http.Handle(*MetricsPath, promhttp.Handler()) 43 | log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) 44 | } 45 | 46 | func main() { 47 | flag.Parse() 48 | 49 | if *Version { 50 | fmt.Println(AppVersion) 51 | os.Exit(0) 52 | } 53 | 54 | lvl, _ := log.ParseLevel(*LogLevel) 55 | log.SetLevel(lvl) 56 | 57 | var defaultTransport utils.Transport 58 | defaultTransport = &utils.DefaultLogTransport{} 59 | 60 | switch *LogFmt { 61 | case "json": 62 | log.SetFormatter(&log.JSONFormatter{}) 63 | defaultTransport = &utils.DefaultJSONTransport{} 64 | } 65 | 66 | runtime.GOMAXPROCS(runtime.NumCPU()) 67 | 68 | log.Info("Starting GoFlow") 69 | 70 | s := &utils.StateNFLegacy{ 71 | Transport: defaultTransport, 72 | Logger: log.StandardLogger(), 73 | } 74 | 75 | go httpServer() 76 | 77 | if *EnableKafka { 78 | kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) 79 | if err != nil { 80 | log.Fatal(err) 81 | } 82 | kafkaState.FixedLengthProto = *FixedLength 83 | s.Transport = kafkaState 84 | } 85 | log.WithFields(log.Fields{ 86 | "Type": "NetFlowLegacy"}). 87 | Infof("Listening on UDP %v:%v", *Addr, *Port) 88 | 89 | err := s.FlowRoutine(*Workers, *Addr, *Port, *Reuse) 90 | if err != nil { 91 | log.Fatalf("Fatal error: could not listen to UDP (%v)", err) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /cmd/csflow/csflow.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "runtime" 9 | 10 | "github.com/cloudflare/goflow/v3/transport" 11 | "github.com/cloudflare/goflow/v3/utils" 12 | "github.com/prometheus/client_golang/prometheus/promhttp" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | var ( 17 | version = "" 18 | buildinfos = "" 19 | AppVersion = "GoFlow sFlow " + version + " " + buildinfos 20 | 21 | Addr = flag.String("addr", "", "sFlow listening address") 22 | Port = flag.Int("port", 6343, "sFlow listening port") 23 | Reuse = flag.Bool("reuse", false, "Enable so_reuseport for sFlow listening port") 24 | 25 | Workers = flag.Int("workers", 1, "Number of sFlow workers") 26 | LogLevel = flag.String("loglevel", "info", "Log level") 27 | LogFmt = flag.String("logfmt", "normal", "Log formatter") 28 | 29 | EnableKafka = flag.Bool("kafka", true, "Enable Kafka") 30 | FixedLength = flag.Bool("proto.fixedlen", false, "Enable fixed length protobuf") 31 | MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") 32 | MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") 33 | 34 | Version = flag.Bool("v", false, "Print version") 35 | ) 36 | 37 | func init() { 38 | transport.RegisterFlags() 39 | } 40 | 41 | func httpServer() { 42 | http.Handle(*MetricsPath, promhttp.Handler()) 43 | log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) 44 | } 45 | 46 | func main() { 47 | flag.Parse() 48 | 49 | if *Version { 50 | fmt.Println(AppVersion) 51 | os.Exit(0) 52 | } 53 | 54 | lvl, _ := log.ParseLevel(*LogLevel) 55 | log.SetLevel(lvl) 56 | 57 | var defaultTransport utils.Transport 58 | defaultTransport = &utils.DefaultLogTransport{} 59 | 60 | switch *LogFmt { 61 | case "json": 62 | log.SetFormatter(&log.JSONFormatter{}) 63 | defaultTransport = &utils.DefaultJSONTransport{} 64 | } 65 | 66 | runtime.GOMAXPROCS(runtime.NumCPU()) 67 | 68 | log.Info("Starting GoFlow") 69 | 70 | s := &utils.StateSFlow{ 71 | Transport: defaultTransport, 72 | Logger: log.StandardLogger(), 73 | } 74 | 75 | go httpServer() 76 | 77 | if *EnableKafka { 78 | kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) 79 | if err != nil { 80 | log.Fatal(err) 81 | } 82 | kafkaState.FixedLengthProto = *FixedLength 83 | s.Transport = kafkaState 84 | } 85 | log.WithFields(log.Fields{ 86 | "Type": "sFlow"}). 87 | Infof("Listening on UDP %v:%v", *Addr, *Port) 88 | 89 | err := s.FlowRoutine(*Workers, *Addr, *Port, *Reuse) 90 | if err != nil { 91 | log.Fatalf("Fatal error: could not listen to UDP (%v)", err) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /cmd/goflow/goflow.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "runtime" 9 | "sync" 10 | 11 | "github.com/cloudflare/goflow/v3/transport" 12 | "github.com/cloudflare/goflow/v3/utils" 13 | "github.com/prometheus/client_golang/prometheus/promhttp" 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | var ( 18 | version = "" 19 | buildinfos = "" 20 | AppVersion = "GoFlow " + version + " " + buildinfos 21 | 22 | SFlowEnable = flag.Bool("sflow", true, "Enable sFlow") 23 | SFlowAddr = flag.String("sflow.addr", "", "sFlow listening address") 24 | SFlowPort = flag.Int("sflow.port", 6343, "sFlow listening port") 25 | SFlowReuse = flag.Bool("sflow.reuserport", false, "Enable so_reuseport for sFlow") 26 | 27 | NFLEnable = flag.Bool("nfl", true, "Enable NetFlow v5") 28 | NFLAddr = flag.String("nfl.addr", "", "NetFlow v5 listening address") 29 | NFLPort = flag.Int("nfl.port", 2056, "NetFlow v5 listening port") 30 | NFLReuse = flag.Bool("nfl.reuserport", false, "Enable so_reuseport for NetFlow v5") 31 | 32 | NFEnable = flag.Bool("nf", true, "Enable NetFlow/IPFIX") 33 | NFAddr = flag.String("nf.addr", "", "NetFlow/IPFIX listening address") 34 | NFPort = flag.Int("nf.port", 2055, "NetFlow/IPFIX listening port") 35 | NFReuse = flag.Bool("nf.reuserport", false, "Enable so_reuseport for NetFlow/IPFIX") 36 | 37 | Workers = flag.Int("workers", 1, "Number of workers per collector") 38 | LogLevel = flag.String("loglevel", "info", "Log level") 39 | LogFmt = flag.String("logfmt", "normal", "Log formatter") 40 | 41 | EnableKafka = flag.Bool("kafka", true, "Enable Kafka") 42 | FixedLength = flag.Bool("proto.fixedlen", false, "Enable fixed length protobuf") 43 | MetricsAddr = flag.String("metrics.addr", ":8080", "Metrics address") 44 | MetricsPath = flag.String("metrics.path", "/metrics", "Metrics path") 45 | 46 | TemplatePath = flag.String("templates.path", "/templates", "NetFlow/IPFIX templates list") 47 | 48 | Version = flag.Bool("v", false, "Print version") 49 | ) 50 | 51 | func init() { 52 | transport.RegisterFlags() 53 | } 54 | 55 | func httpServer(state *utils.StateNetFlow) { 56 | http.Handle(*MetricsPath, promhttp.Handler()) 57 | http.HandleFunc(*TemplatePath, state.ServeHTTPTemplates) 58 | log.Fatal(http.ListenAndServe(*MetricsAddr, nil)) 59 | } 60 | 61 | func main() { 62 | flag.Parse() 63 | 64 | if *Version { 65 | fmt.Println(AppVersion) 66 | os.Exit(0) 67 | } 68 | 69 | lvl, _ := log.ParseLevel(*LogLevel) 70 | log.SetLevel(lvl) 71 | 72 | var defaultTransport utils.Transport 73 | defaultTransport = &utils.DefaultLogTransport{} 74 | 75 | switch *LogFmt { 76 | case "json": 77 | log.SetFormatter(&log.JSONFormatter{}) 78 | defaultTransport = &utils.DefaultJSONTransport{} 79 | } 80 | 81 | runtime.GOMAXPROCS(runtime.NumCPU()) 82 | 83 | log.Info("Starting GoFlow") 84 | 85 | sSFlow := &utils.StateSFlow{ 86 | Transport: defaultTransport, 87 | Logger: log.StandardLogger(), 88 | } 89 | sNF := &utils.StateNetFlow{ 90 | Transport: defaultTransport, 91 | Logger: log.StandardLogger(), 92 | } 93 | sNFL := &utils.StateNFLegacy{ 94 | Transport: defaultTransport, 95 | Logger: log.StandardLogger(), 96 | } 97 | 98 | go httpServer(sNF) 99 | 100 | if *EnableKafka { 101 | kafkaState, err := transport.StartKafkaProducerFromArgs(log.StandardLogger()) 102 | if err != nil { 103 | log.Fatal(err) 104 | } 105 | kafkaState.FixedLengthProto = *FixedLength 106 | 107 | sSFlow.Transport = kafkaState 108 | sNFL.Transport = kafkaState 109 | sNF.Transport = kafkaState 110 | } 111 | 112 | wg := &sync.WaitGroup{} 113 | if *SFlowEnable { 114 | wg.Add(1) 115 | go func() { 116 | log.WithFields(log.Fields{ 117 | "Type": "sFlow"}). 118 | Infof("Listening on UDP %v:%v", *SFlowAddr, *SFlowPort) 119 | 120 | err := sSFlow.FlowRoutine(*Workers, *SFlowAddr, *SFlowPort, *SFlowReuse) 121 | if err != nil { 122 | log.Fatalf("Fatal error: could not listen to UDP (%v)", err) 123 | } 124 | wg.Done() 125 | }() 126 | } 127 | if *NFEnable { 128 | wg.Add(1) 129 | go func() { 130 | log.WithFields(log.Fields{ 131 | "Type": "NetFlow"}). 132 | Infof("Listening on UDP %v:%v", *NFAddr, *NFPort) 133 | 134 | err := sNF.FlowRoutine(*Workers, *NFAddr, *NFPort, *NFReuse) 135 | if err != nil { 136 | log.Fatalf("Fatal error: could not listen to UDP (%v)", err) 137 | } 138 | wg.Done() 139 | }() 140 | } 141 | if *NFLEnable { 142 | wg.Add(1) 143 | go func() { 144 | log.WithFields(log.Fields{ 145 | "Type": "NetFlowLegacy"}). 146 | Infof("Listening on UDP %v:%v", *NFLAddr, *NFLPort) 147 | 148 | err := sNFL.FlowRoutine(*Workers, *NFLAddr, *NFLPort, *NFLReuse) 149 | if err != nil { 150 | log.Fatalf("Fatal error: could not listen to UDP (%v)", err) 151 | } 152 | wg.Done() 153 | }() 154 | } 155 | wg.Wait() 156 | } 157 | -------------------------------------------------------------------------------- /decoders/decoder.go: -------------------------------------------------------------------------------- 1 | package decoder 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type Message interface{} 8 | type MessageDecoded interface{} 9 | 10 | type DecoderFunc func(Message interface{}) error 11 | type DoneCallback func(string, int, time.Time, time.Time) 12 | type ErrorCallback func(string, int, time.Time, time.Time, error) 13 | 14 | // Worker structure 15 | type Worker struct { 16 | Id int 17 | DecoderParams DecoderParams 18 | WorkerPool chan chan Message 19 | Name string 20 | InMsg chan Message 21 | Quit chan bool 22 | } 23 | 24 | // Create a worker and add it to the pool. 25 | func CreateWorker(workerPool chan chan Message, decoderParams DecoderParams, id int, name string) Worker { 26 | return Worker{ 27 | Id: id, 28 | DecoderParams: decoderParams, 29 | WorkerPool: workerPool, 30 | Name: name, 31 | InMsg: make(chan Message), 32 | Quit: make(chan bool), 33 | } 34 | } 35 | 36 | // Start the worker. Launches a goroutine to process NFv9 messages. 37 | // The worker will add its input channel of NFv9 messages to decode to the pool. 38 | func (w Worker) Start() { 39 | go func() { 40 | //log.Debugf("Worker %v started", w.Id) 41 | for { 42 | select { 43 | case <-w.Quit: 44 | break 45 | case w.WorkerPool <- w.InMsg: 46 | msg := <-w.InMsg 47 | timeTrackStart := time.Now() 48 | err := w.DecoderParams.DecoderFunc(msg) 49 | timeTrackStop := time.Now() 50 | 51 | if err != nil && w.DecoderParams.ErrorCallback != nil { 52 | w.DecoderParams.ErrorCallback(w.Name, w.Id, timeTrackStart, timeTrackStop, err) 53 | } else if err == nil && w.DecoderParams.DoneCallback != nil { 54 | w.DecoderParams.DoneCallback(w.Name, w.Id, timeTrackStart, timeTrackStop) 55 | } 56 | } 57 | } 58 | //log.Debugf("Worker %v done", w.Id) 59 | }() 60 | } 61 | 62 | // Stop the worker. 63 | func (w Worker) Stop() { 64 | //log.Debugf("Stopping worker %v", w.Id) 65 | w.Quit <- true 66 | } 67 | 68 | // Processor structure 69 | type Processor struct { 70 | workerpool chan chan Message 71 | workerlist []Worker 72 | DecoderParams DecoderParams 73 | Name string 74 | } 75 | 76 | // Decoder structure. Define the function to call and the config specific to the type of packets. 77 | type DecoderParams struct { 78 | DecoderFunc DecoderFunc 79 | DoneCallback DoneCallback 80 | ErrorCallback ErrorCallback 81 | } 82 | 83 | // Create a message processor which is going to create all the workers and set-up the pool. 84 | func CreateProcessor(numWorkers int, decoderParams DecoderParams, name string) Processor { 85 | processor := Processor{ 86 | workerpool: make(chan chan Message), 87 | workerlist: make([]Worker, numWorkers), 88 | DecoderParams: decoderParams, 89 | Name: name, 90 | } 91 | for i := 0; i < numWorkers; i++ { 92 | worker := CreateWorker(processor.workerpool, decoderParams, i, name) 93 | processor.workerlist[i] = worker 94 | } 95 | return processor 96 | } 97 | 98 | // Start message processor 99 | func (p Processor) Start() { 100 | for _, worker := range p.workerlist { 101 | worker.Start() 102 | } 103 | } 104 | 105 | func (p Processor) Stop() { 106 | for _, worker := range p.workerlist { 107 | worker.Stop() 108 | } 109 | } 110 | 111 | // Send a message to be decoded to the pool. 112 | func (p Processor) ProcessMessage(msg Message) { 113 | sendChannel := <-p.workerpool 114 | sendChannel <- msg 115 | } 116 | -------------------------------------------------------------------------------- /decoders/netflow/netflow.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "sync" 8 | 9 | "github.com/cloudflare/goflow/v3/decoders/utils" 10 | ) 11 | 12 | type FlowBaseTemplateSet map[uint16]map[uint32]map[uint16]interface{} 13 | 14 | type NetFlowTemplateSystem interface { 15 | GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) 16 | AddTemplate(version uint16, obsDomainId uint32, template interface{}) 17 | } 18 | 19 | func DecodeNFv9OptionsTemplateSet(payload *bytes.Buffer) ([]NFv9OptionsTemplateRecord, error) { 20 | records := make([]NFv9OptionsTemplateRecord, 0) 21 | var err error 22 | for payload.Len() >= 4 { 23 | optsTemplateRecord := NFv9OptionsTemplateRecord{} 24 | err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.ScopeLength, &optsTemplateRecord.OptionLength) 25 | if err != nil { 26 | break 27 | } 28 | 29 | sizeScope := int(optsTemplateRecord.ScopeLength) / 4 30 | sizeOptions := int(optsTemplateRecord.OptionLength) / 4 31 | if sizeScope < 0 || sizeOptions < 0 { 32 | return records, NewErrorDecodingNetFlow("Error decoding OptionsTemplateSet: negative length.") 33 | } 34 | 35 | fields := make([]Field, sizeScope) 36 | for i := 0; i < sizeScope; i++ { 37 | field := Field{} 38 | err = utils.BinaryDecoder(payload, &field) 39 | fields[i] = field 40 | } 41 | optsTemplateRecord.Scopes = fields 42 | 43 | fields = make([]Field, sizeOptions) 44 | for i := 0; i < sizeOptions; i++ { 45 | field := Field{} 46 | err = utils.BinaryDecoder(payload, &field) 47 | fields[i] = field 48 | } 49 | optsTemplateRecord.Options = fields 50 | 51 | records = append(records, optsTemplateRecord) 52 | } 53 | 54 | return records, nil 55 | } 56 | 57 | func DecodeIPFIXOptionsTemplateSet(payload *bytes.Buffer) ([]IPFIXOptionsTemplateRecord, error) { 58 | records := make([]IPFIXOptionsTemplateRecord, 0) 59 | var err error 60 | for payload.Len() >= 4 { 61 | optsTemplateRecord := IPFIXOptionsTemplateRecord{} 62 | err = utils.BinaryDecoder(payload, &optsTemplateRecord.TemplateId, &optsTemplateRecord.FieldCount, &optsTemplateRecord.ScopeFieldCount) 63 | if err != nil { 64 | break 65 | } 66 | 67 | fields := make([]Field, int(optsTemplateRecord.ScopeFieldCount)) 68 | for i := 0; i < int(optsTemplateRecord.ScopeFieldCount); i++ { 69 | field := Field{} 70 | err = utils.BinaryDecoder(payload, &field) 71 | fields[i] = field 72 | } 73 | optsTemplateRecord.Scopes = fields 74 | 75 | optionsSize := int(optsTemplateRecord.FieldCount) - int(optsTemplateRecord.ScopeFieldCount) 76 | if optionsSize < 0 { 77 | return records, NewErrorDecodingNetFlow("Error decoding OptionsTemplateSet: negative length.") 78 | } 79 | fields = make([]Field, optionsSize) 80 | for i := 0; i < optionsSize; i++ { 81 | field := Field{} 82 | err = utils.BinaryDecoder(payload, &field) 83 | fields[i] = field 84 | } 85 | optsTemplateRecord.Options = fields 86 | 87 | records = append(records, optsTemplateRecord) 88 | } 89 | 90 | return records, nil 91 | } 92 | 93 | func DecodeTemplateSet(payload *bytes.Buffer) ([]TemplateRecord, error) { 94 | records := make([]TemplateRecord, 0) 95 | var err error 96 | for payload.Len() >= 4 { 97 | templateRecord := TemplateRecord{} 98 | err = utils.BinaryDecoder(payload, &templateRecord.TemplateId, &templateRecord.FieldCount) 99 | if err != nil { 100 | break 101 | } 102 | 103 | if templateRecord.FieldCount == 0 { 104 | return records, NewErrorDecodingNetFlow("Error decoding TemplateSet: zero count.") 105 | } 106 | 107 | fields := make([]Field, int(templateRecord.FieldCount)) 108 | for i := 0; i < int(templateRecord.FieldCount); i++ { 109 | field := Field{} 110 | err = utils.BinaryDecoder(payload, &field) 111 | fields[i] = field 112 | } 113 | templateRecord.Fields = fields 114 | records = append(records, templateRecord) 115 | } 116 | 117 | return records, nil 118 | } 119 | 120 | func GetTemplateSize(template []Field) int { 121 | sum := 0 122 | for _, templateField := range template { 123 | sum += int(templateField.Length) 124 | } 125 | return sum 126 | } 127 | 128 | func DecodeDataSetUsingFields(payload *bytes.Buffer, listFields []Field) []DataField { 129 | for payload.Len() >= GetTemplateSize(listFields) { 130 | 131 | dataFields := make([]DataField, len(listFields)) 132 | 133 | for i, templateField := range listFields { 134 | value := payload.Next(int(templateField.Length)) 135 | nfvalue := DataField{ 136 | Type: templateField.Type, 137 | Value: value, 138 | } 139 | dataFields[i] = nfvalue 140 | } 141 | return dataFields 142 | } 143 | return []DataField{} 144 | } 145 | 146 | type ErrorTemplateNotFound struct { 147 | version uint16 148 | obsDomainId uint32 149 | templateId uint16 150 | typeTemplate string 151 | } 152 | 153 | func NewErrorTemplateNotFound(version uint16, obsDomainId uint32, templateId uint16, typeTemplate string) *ErrorTemplateNotFound { 154 | return &ErrorTemplateNotFound{ 155 | version: version, 156 | obsDomainId: obsDomainId, 157 | templateId: templateId, 158 | typeTemplate: typeTemplate, 159 | } 160 | } 161 | 162 | func (e *ErrorTemplateNotFound) Error() string { 163 | return fmt.Sprintf("No %v template %v found for and domain id %v", e.typeTemplate, e.templateId, e.obsDomainId) 164 | } 165 | 166 | type ErrorVersion struct { 167 | version uint16 168 | } 169 | 170 | func NewErrorVersion(version uint16) *ErrorVersion { 171 | return &ErrorVersion{ 172 | version: version, 173 | } 174 | } 175 | 176 | func (e *ErrorVersion) Error() string { 177 | return fmt.Sprintf("Unknown NetFlow version %v (only decodes v9 and v10/IPFIX)", e.version) 178 | } 179 | 180 | type ErrorFlowId struct { 181 | id uint16 182 | } 183 | 184 | func NewErrorFlowId(id uint16) *ErrorFlowId { 185 | return &ErrorFlowId{ 186 | id: id, 187 | } 188 | } 189 | 190 | func (e *ErrorFlowId) Error() string { 191 | return fmt.Sprintf("Unknown flow id %v (templates < 256, data >= 256)", e.id) 192 | } 193 | 194 | type ErrorDecodingNetFlow struct { 195 | msg string 196 | } 197 | 198 | func NewErrorDecodingNetFlow(msg string) *ErrorDecodingNetFlow { 199 | return &ErrorDecodingNetFlow{ 200 | msg: msg, 201 | } 202 | } 203 | 204 | func (e *ErrorDecodingNetFlow) Error() string { 205 | return fmt.Sprintf("Error decoding NetFlow: %v", e.msg) 206 | } 207 | 208 | func DecodeOptionsDataSet(payload *bytes.Buffer, listFieldsScopes, listFieldsOption []Field) ([]OptionsDataRecord, error) { 209 | records := make([]OptionsDataRecord, 0) 210 | 211 | listFieldsScopesSize := GetTemplateSize(listFieldsScopes) 212 | listFieldsOptionSize := GetTemplateSize(listFieldsOption) 213 | 214 | for payload.Len() >= listFieldsScopesSize+listFieldsOptionSize { 215 | payloadLim := bytes.NewBuffer(payload.Next(listFieldsScopesSize)) 216 | scopeValues := DecodeDataSetUsingFields(payloadLim, listFieldsScopes) 217 | payloadLim = bytes.NewBuffer(payload.Next(listFieldsOptionSize)) 218 | optionValues := DecodeDataSetUsingFields(payloadLim, listFieldsOption) 219 | 220 | record := OptionsDataRecord{ 221 | ScopesValues: scopeValues, 222 | OptionsValues: optionValues, 223 | } 224 | 225 | records = append(records, record) 226 | } 227 | return records, nil 228 | } 229 | 230 | func DecodeDataSet(payload *bytes.Buffer, listFields []Field) ([]DataRecord, error) { 231 | records := make([]DataRecord, 0) 232 | 233 | listFieldsSize := GetTemplateSize(listFields) 234 | for payload.Len() >= listFieldsSize { 235 | payloadLim := bytes.NewBuffer(payload.Next(listFieldsSize)) 236 | values := DecodeDataSetUsingFields(payloadLim, listFields) 237 | 238 | record := DataRecord{ 239 | Values: values, 240 | } 241 | 242 | records = append(records, record) 243 | } 244 | return records, nil 245 | } 246 | 247 | func (ts *BasicTemplateSystem) GetTemplates() map[uint16]map[uint32]map[uint16]interface{} { 248 | ts.templateslock.RLock() 249 | tmp := ts.templates 250 | ts.templateslock.RUnlock() 251 | return tmp 252 | } 253 | 254 | func (ts *BasicTemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { 255 | ts.templateslock.Lock() 256 | _, exists := ts.templates[version] 257 | if !exists { 258 | ts.templates[version] = make(map[uint32]map[uint16]interface{}) 259 | } 260 | _, exists = ts.templates[version][obsDomainId] 261 | if !exists { 262 | ts.templates[version][obsDomainId] = make(map[uint16]interface{}) 263 | } 264 | var templateId uint16 265 | switch templateIdConv := template.(type) { 266 | case IPFIXOptionsTemplateRecord: 267 | templateId = templateIdConv.TemplateId 268 | case NFv9OptionsTemplateRecord: 269 | templateId = templateIdConv.TemplateId 270 | case TemplateRecord: 271 | templateId = templateIdConv.TemplateId 272 | } 273 | ts.templates[version][obsDomainId][templateId] = template 274 | ts.templateslock.Unlock() 275 | } 276 | 277 | func (ts *BasicTemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { 278 | ts.templateslock.RLock() 279 | templatesVersion, okver := ts.templates[version] 280 | if okver { 281 | templatesObsDom, okobs := templatesVersion[obsDomainId] 282 | if okobs { 283 | template, okid := templatesObsDom[templateId] 284 | if okid { 285 | ts.templateslock.RUnlock() 286 | return template, nil 287 | } 288 | ts.templateslock.RUnlock() 289 | return nil, NewErrorTemplateNotFound(version, obsDomainId, templateId, "info") 290 | } 291 | ts.templateslock.RUnlock() 292 | return nil, NewErrorTemplateNotFound(version, obsDomainId, templateId, "info") 293 | } 294 | ts.templateslock.RUnlock() 295 | return nil, NewErrorTemplateNotFound(version, obsDomainId, templateId, "info") 296 | } 297 | 298 | type BasicTemplateSystem struct { 299 | templates FlowBaseTemplateSet 300 | templateslock *sync.RWMutex 301 | } 302 | 303 | func CreateTemplateSystem() *BasicTemplateSystem { 304 | ts := &BasicTemplateSystem{ 305 | templates: make(FlowBaseTemplateSet), 306 | templateslock: &sync.RWMutex{}, 307 | } 308 | return ts 309 | } 310 | 311 | func DecodeMessage(payload *bytes.Buffer, templates NetFlowTemplateSystem) (interface{}, error) { 312 | var size uint16 313 | packetNFv9 := NFv9Packet{} 314 | packetIPFIX := IPFIXPacket{} 315 | var returnItem interface{} 316 | 317 | var version uint16 318 | var obsDomainId uint32 319 | binary.Read(payload, binary.BigEndian, &version) 320 | 321 | if version == 9 { 322 | utils.BinaryDecoder(payload, &packetNFv9.Count, &packetNFv9.SystemUptime, &packetNFv9.UnixSeconds, &packetNFv9.SequenceNumber, &packetNFv9.SourceId) 323 | size = packetNFv9.Count 324 | packetNFv9.Version = version 325 | returnItem = packetNFv9 326 | obsDomainId = packetNFv9.SourceId 327 | } else if version == 10 { 328 | utils.BinaryDecoder(payload, &packetIPFIX.Length, &packetIPFIX.ExportTime, &packetIPFIX.SequenceNumber, &packetIPFIX.ObservationDomainId) 329 | size = packetIPFIX.Length 330 | packetIPFIX.Version = version 331 | returnItem = packetIPFIX 332 | obsDomainId = packetIPFIX.ObservationDomainId 333 | } else { 334 | return nil, NewErrorVersion(version) 335 | } 336 | 337 | for i := 0; ((i < int(size) && version == 9) || version == 10) && payload.Len() > 0; i++ { 338 | fsheader := FlowSetHeader{} 339 | utils.BinaryDecoder(payload, &fsheader) 340 | 341 | nextrelpos := int(fsheader.Length) - binary.Size(fsheader) 342 | if nextrelpos < 0 { 343 | return returnItem, NewErrorDecodingNetFlow("Error decoding packet: non-terminated stream.") 344 | } 345 | 346 | var flowSet interface{} 347 | 348 | if fsheader.Id == 0 && version == 9 { 349 | templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) 350 | records, err := DecodeTemplateSet(templateReader) 351 | if err != nil { 352 | return returnItem, err 353 | } 354 | templatefs := TemplateFlowSet{ 355 | FlowSetHeader: fsheader, 356 | Records: records, 357 | } 358 | 359 | flowSet = templatefs 360 | 361 | if templates != nil { 362 | for _, record := range records { 363 | templates.AddTemplate(version, obsDomainId, record) 364 | } 365 | } 366 | 367 | } else if fsheader.Id == 1 && version == 9 { 368 | templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) 369 | records, err := DecodeNFv9OptionsTemplateSet(templateReader) 370 | if err != nil { 371 | return returnItem, err 372 | } 373 | optsTemplatefs := NFv9OptionsTemplateFlowSet{ 374 | FlowSetHeader: fsheader, 375 | Records: records, 376 | } 377 | flowSet = optsTemplatefs 378 | 379 | if templates != nil { 380 | for _, record := range records { 381 | templates.AddTemplate(version, obsDomainId, record) 382 | } 383 | } 384 | 385 | } else if fsheader.Id == 2 && version == 10 { 386 | templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) 387 | records, err := DecodeTemplateSet(templateReader) 388 | if err != nil { 389 | return returnItem, err 390 | } 391 | templatefs := TemplateFlowSet{ 392 | FlowSetHeader: fsheader, 393 | Records: records, 394 | } 395 | flowSet = templatefs 396 | 397 | if templates != nil { 398 | for _, record := range records { 399 | templates.AddTemplate(version, obsDomainId, record) 400 | } 401 | } 402 | 403 | } else if fsheader.Id == 3 && version == 10 { 404 | templateReader := bytes.NewBuffer(payload.Next(nextrelpos)) 405 | records, err := DecodeIPFIXOptionsTemplateSet(templateReader) 406 | if err != nil { 407 | return returnItem, err 408 | } 409 | optsTemplatefs := IPFIXOptionsTemplateFlowSet{ 410 | FlowSetHeader: fsheader, 411 | Records: records, 412 | } 413 | flowSet = optsTemplatefs 414 | 415 | if templates != nil { 416 | for _, record := range records { 417 | templates.AddTemplate(version, obsDomainId, record) 418 | } 419 | } 420 | 421 | } else if fsheader.Id >= 256 { 422 | dataReader := bytes.NewBuffer(payload.Next(nextrelpos)) 423 | 424 | if templates == nil { 425 | continue 426 | } 427 | 428 | template, err := templates.GetTemplate(version, obsDomainId, fsheader.Id) 429 | 430 | if err == nil { 431 | switch templatec := template.(type) { 432 | case TemplateRecord: 433 | records, err := DecodeDataSet(dataReader, templatec.Fields) 434 | if err != nil { 435 | return returnItem, err 436 | } 437 | datafs := DataFlowSet{ 438 | FlowSetHeader: fsheader, 439 | Records: records, 440 | } 441 | flowSet = datafs 442 | case IPFIXOptionsTemplateRecord: 443 | records, err := DecodeOptionsDataSet(dataReader, templatec.Scopes, templatec.Options) 444 | if err != nil { 445 | return returnItem, err 446 | } 447 | 448 | datafs := OptionsDataFlowSet{ 449 | FlowSetHeader: fsheader, 450 | Records: records, 451 | } 452 | flowSet = datafs 453 | case NFv9OptionsTemplateRecord: 454 | records, err := DecodeOptionsDataSet(dataReader, templatec.Scopes, templatec.Options) 455 | if err != nil { 456 | return returnItem, err 457 | } 458 | 459 | datafs := OptionsDataFlowSet{ 460 | FlowSetHeader: fsheader, 461 | Records: records, 462 | } 463 | flowSet = datafs 464 | } 465 | } else { 466 | return returnItem, err 467 | } 468 | } else { 469 | return returnItem, NewErrorFlowId(fsheader.Id) 470 | } 471 | 472 | if version == 9 && flowSet != nil { 473 | packetNFv9.FlowSets = append(packetNFv9.FlowSets, flowSet) 474 | } else if version == 10 && flowSet != nil { 475 | packetIPFIX.FlowSets = append(packetIPFIX.FlowSets, flowSet) 476 | } 477 | } 478 | 479 | if version == 9 { 480 | return packetNFv9, nil 481 | } else if version == 10 { 482 | return packetIPFIX, nil 483 | } else { 484 | return returnItem, NewErrorVersion(version) 485 | } 486 | } 487 | -------------------------------------------------------------------------------- /decoders/netflow/nfv9.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | const ( 9 | NFV9_FIELD_IN_BYTES = 1 10 | NFV9_FIELD_IN_PKTS = 2 11 | NFV9_FIELD_FLOWS = 3 12 | NFV9_FIELD_PROTOCOL = 4 13 | NFV9_FIELD_SRC_TOS = 5 14 | NFV9_FIELD_TCP_FLAGS = 6 15 | NFV9_FIELD_L4_SRC_PORT = 7 16 | NFV9_FIELD_IPV4_SRC_ADDR = 8 17 | NFV9_FIELD_SRC_MASK = 9 18 | NFV9_FIELD_INPUT_SNMP = 10 19 | NFV9_FIELD_L4_DST_PORT = 11 20 | NFV9_FIELD_IPV4_DST_ADDR = 12 21 | NFV9_FIELD_DST_MASK = 13 22 | NFV9_FIELD_OUTPUT_SNMP = 14 23 | NFV9_FIELD_IPV4_NEXT_HOP = 15 24 | NFV9_FIELD_SRC_AS = 16 25 | NFV9_FIELD_DST_AS = 17 26 | NFV9_FIELD_BGP_IPV4_NEXT_HOP = 18 27 | NFV9_FIELD_MUL_DST_PKTS = 19 28 | NFV9_FIELD_MUL_DST_BYTES = 20 29 | NFV9_FIELD_LAST_SWITCHED = 21 30 | NFV9_FIELD_FIRST_SWITCHED = 22 31 | NFV9_FIELD_OUT_BYTES = 23 32 | NFV9_FIELD_OUT_PKTS = 24 33 | NFV9_FIELD_MIN_PKT_LNGTH = 25 34 | NFV9_FIELD_MAX_PKT_LNGTH = 26 35 | NFV9_FIELD_IPV6_SRC_ADDR = 27 36 | NFV9_FIELD_IPV6_DST_ADDR = 28 37 | NFV9_FIELD_IPV6_SRC_MASK = 29 38 | NFV9_FIELD_IPV6_DST_MASK = 30 39 | NFV9_FIELD_IPV6_FLOW_LABEL = 31 40 | NFV9_FIELD_ICMP_TYPE = 32 41 | NFV9_FIELD_MUL_IGMP_TYPE = 33 42 | NFV9_FIELD_SAMPLING_INTERVAL = 34 43 | NFV9_FIELD_SAMPLING_ALGORITHM = 35 44 | NFV9_FIELD_FLOW_ACTIVE_TIMEOUT = 36 45 | NFV9_FIELD_FLOW_INACTIVE_TIMEOUT = 37 46 | NFV9_FIELD_ENGINE_TYPE = 38 47 | NFV9_FIELD_ENGINE_ID = 39 48 | NFV9_FIELD_TOTAL_BYTES_EXP = 40 49 | NFV9_FIELD_TOTAL_PKTS_EXP = 41 50 | NFV9_FIELD_TOTAL_FLOWS_EXP = 42 51 | NFV9_FIELD_IPV4_SRC_PREFIX = 44 52 | NFV9_FIELD_IPV4_DST_PREFIX = 45 53 | NFV9_FIELD_MPLS_TOP_LABEL_TYPE = 46 54 | NFV9_FIELD_MPLS_TOP_LABEL_IP_ADDR = 47 55 | NFV9_FIELD_FLOW_SAMPLER_ID = 48 56 | NFV9_FIELD_FLOW_SAMPLER_MODE = 49 57 | NFV9_FIELD_FLOW_SAMPLER_RANDOM_INTERVAL = 50 58 | NFV9_FIELD_MIN_TTL = 52 59 | NFV9_FIELD_MAX_TTL = 53 60 | NFV9_FIELD_IPV4_IDENT = 54 61 | NFV9_FIELD_DST_TOS = 55 62 | NFV9_FIELD_IN_SRC_MAC = 56 63 | NFV9_FIELD_OUT_DST_MAC = 57 64 | NFV9_FIELD_SRC_VLAN = 58 65 | NFV9_FIELD_DST_VLAN = 59 66 | NFV9_FIELD_IP_PROTOCOL_VERSION = 60 67 | NFV9_FIELD_DIRECTION = 61 68 | NFV9_FIELD_IPV6_NEXT_HOP = 62 69 | NFV9_FIELD_BGP_IPV6_NEXT_HOP = 63 70 | NFV9_FIELD_IPV6_OPTION_HEADERS = 64 71 | NFV9_FIELD_MPLS_LABEL_1 = 70 72 | NFV9_FIELD_MPLS_LABEL_2 = 71 73 | NFV9_FIELD_MPLS_LABEL_3 = 72 74 | NFV9_FIELD_MPLS_LABEL_4 = 73 75 | NFV9_FIELD_MPLS_LABEL_5 = 74 76 | NFV9_FIELD_MPLS_LABEL_6 = 75 77 | NFV9_FIELD_MPLS_LABEL_7 = 76 78 | NFV9_FIELD_MPLS_LABEL_8 = 77 79 | NFV9_FIELD_MPLS_LABEL_9 = 78 80 | NFV9_FIELD_MPLS_LABEL_10 = 79 81 | NFV9_FIELD_IN_DST_MAC = 80 82 | NFV9_FIELD_OUT_SRC_MAC = 81 83 | NFV9_FIELD_IF_NAME = 82 84 | NFV9_FIELD_IF_DESC = 83 85 | NFV9_FIELD_SAMPLER_NAME = 84 86 | NFV9_FIELD_IN_PERMANENT_BYTES = 85 87 | NFV9_FIELD_IN_PERMANENT_PKTS = 86 88 | NFV9_FIELD_FRAGMENT_OFFSET = 88 89 | NFV9_FIELD_FORWARDING_STATUS = 89 90 | NFV9_FIELD_MPLS_PAL_RD = 90 91 | NFV9_FIELD_MPLS_PREFIX_LEN = 91 92 | NFV9_FIELD_SRC_TRAFFIC_INDEX = 92 93 | NFV9_FIELD_DST_TRAFFIC_INDEX = 93 94 | NFV9_FIELD_APPLICATION_DESCRIPTION = 94 95 | NFV9_FIELD_APPLICATION_TAG = 95 96 | NFV9_FIELD_APPLICATION_NAME = 96 97 | NFV9_FIELD_postipDiffServCodePoint = 98 98 | NFV9_FIELD_replication_factor = 99 99 | NFV9_FIELD_layer2packetSectionOffset = 102 100 | NFV9_FIELD_layer2packetSectionSize = 103 101 | NFV9_FIELD_layer2packetSectionData = 104 102 | ) 103 | 104 | type NFv9Packet struct { 105 | Version uint16 106 | Count uint16 107 | SystemUptime uint32 108 | UnixSeconds uint32 109 | SequenceNumber uint32 110 | SourceId uint32 111 | FlowSets []interface{} 112 | } 113 | 114 | type NFv9OptionsTemplateFlowSet struct { 115 | FlowSetHeader 116 | Records []NFv9OptionsTemplateRecord 117 | } 118 | 119 | type NFv9OptionsTemplateRecord struct { 120 | TemplateId uint16 121 | ScopeLength uint16 122 | OptionLength uint16 123 | Scopes []Field 124 | Options []Field 125 | } 126 | 127 | func NFv9TypeToString(typeId uint16) string { 128 | 129 | nameList := map[uint16]string{ 130 | 1: "IN_BYTES", 131 | 2: "IN_PKTS", 132 | 3: "FLOWS", 133 | 4: "PROTOCOL", 134 | 5: "SRC_TOS", 135 | 6: "TCP_FLAGS", 136 | 7: "L4_SRC_PORT", 137 | 8: "IPV4_SRC_ADDR", 138 | 9: "SRC_MASK", 139 | 10: "INPUT_SNMP", 140 | 11: "L4_DST_PORT", 141 | 12: "IPV4_DST_ADDR", 142 | 13: "DST_MASK", 143 | 14: "OUTPUT_SNMP", 144 | 15: "IPV4_NEXT_HOP", 145 | 16: "SRC_AS", 146 | 17: "DST_AS", 147 | 18: "BGP_IPV4_NEXT_HOP", 148 | 19: "MUL_DST_PKTS", 149 | 20: "MUL_DST_BYTES", 150 | 21: "LAST_SWITCHED", 151 | 22: "FIRST_SWITCHED", 152 | 23: "OUT_BYTES", 153 | 24: "OUT_PKTS", 154 | 25: "MIN_PKT_LNGTH", 155 | 26: "MAX_PKT_LNGTH", 156 | 27: "IPV6_SRC_ADDR", 157 | 28: "IPV6_DST_ADDR", 158 | 29: "IPV6_SRC_MASK", 159 | 30: "IPV6_DST_MASK", 160 | 31: "IPV6_FLOW_LABEL", 161 | 32: "ICMP_TYPE", 162 | 33: "MUL_IGMP_TYPE", 163 | 34: "SAMPLING_INTERVAL", 164 | 35: "SAMPLING_ALGORITHM", 165 | 36: "FLOW_ACTIVE_TIMEOUT", 166 | 37: "FLOW_INACTIVE_TIMEOUT", 167 | 38: "ENGINE_TYPE", 168 | 39: "ENGINE_ID", 169 | 40: "TOTAL_BYTES_EXP", 170 | 41: "TOTAL_PKTS_EXP", 171 | 42: "TOTAL_FLOWS_EXP", 172 | 43: "*Vendor Proprietary*", 173 | 44: "IPV4_SRC_PREFIX", 174 | 45: "IPV4_DST_PREFIX", 175 | 46: "MPLS_TOP_LABEL_TYPE", 176 | 47: "MPLS_TOP_LABEL_IP_ADDR", 177 | 48: "FLOW_SAMPLER_ID", 178 | 49: "FLOW_SAMPLER_MODE", 179 | 50: "FLOW_SAMPLER_RANDOM_INTERVAL", 180 | 51: "*Vendor Proprietary*", 181 | 52: "MIN_TTL", 182 | 53: "MAX_TTL", 183 | 54: "IPV4_IDENT", 184 | 55: "DST_TOS", 185 | 56: "IN_SRC_MAC", 186 | 57: "OUT_DST_MAC", 187 | 58: "SRC_VLAN", 188 | 59: "DST_VLAN", 189 | 60: "IP_PROTOCOL_VERSION", 190 | 61: "DIRECTION", 191 | 62: "IPV6_NEXT_HOP", 192 | 63: "BPG_IPV6_NEXT_HOP", 193 | 64: "IPV6_OPTION_HEADERS", 194 | 65: "*Vendor Proprietary*", 195 | 66: "*Vendor Proprietary*", 196 | 67: "*Vendor Proprietary*", 197 | 68: "*Vendor Proprietary*", 198 | 69: "*Vendor Proprietary*", 199 | 70: "MPLS_LABEL_1", 200 | 71: "MPLS_LABEL_2", 201 | 72: "MPLS_LABEL_3", 202 | 73: "MPLS_LABEL_4", 203 | 74: "MPLS_LABEL_5", 204 | 75: "MPLS_LABEL_6", 205 | 76: "MPLS_LABEL_7", 206 | 77: "MPLS_LABEL_8", 207 | 78: "MPLS_LABEL_9", 208 | 79: "MPLS_LABEL_10", 209 | 80: "IN_DST_MAC", 210 | 81: "OUT_SRC_MAC", 211 | 82: "IF_NAME", 212 | 83: "IF_DESC", 213 | 84: "SAMPLER_NAME", 214 | 85: "IN_ PERMANENT _BYTES", 215 | 86: "IN_ PERMANENT _PKTS", 216 | 87: "*Vendor Proprietary*", 217 | 88: "FRAGMENT_OFFSET", 218 | 89: "FORWARDING STATUS", 219 | 90: "MPLS PAL RD", 220 | 91: "MPLS PREFIX LEN", 221 | 92: "SRC TRAFFIC INDEX", 222 | 93: "DST TRAFFIC INDEX", 223 | 94: "APPLICATION DESCRIPTION", 224 | 95: "APPLICATION TAG", 225 | 96: "APPLICATION NAME", 226 | 98: "postipDiffServCodePoint", 227 | 99: "replication factor", 228 | 100: "DEPRECATED", 229 | 102: "layer2packetSectionOffset", 230 | 103: "layer2packetSectionSize", 231 | 104: "layer2packetSectionData", 232 | 234: "ingressVRFID", 233 | 235: "egressVRFID", 234 | } 235 | 236 | if typeId > 104 || typeId == 0 { 237 | return "Unassigned" 238 | } else { 239 | return nameList[typeId] 240 | } 241 | } 242 | 243 | func NFv9ScopeToString(scopeId uint16) string { 244 | nameList := map[uint16]string{ 245 | 1: "System", 246 | 2: "Interface", 247 | 3: "Line Card", 248 | 4: "NetFlow Cache", 249 | 5: "Template", 250 | } 251 | 252 | if scopeId >= 1 && scopeId <= 5 { 253 | return nameList[scopeId] 254 | } else { 255 | return "Unassigned" 256 | } 257 | } 258 | 259 | func (flowSet NFv9OptionsTemplateFlowSet) String(TypeToString func(uint16) string) string { 260 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 261 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 262 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 263 | 264 | for j, record := range flowSet.Records { 265 | str += fmt.Sprintf(" - Record %v:\n", j) 266 | str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) 267 | str += fmt.Sprintf(" ScopeLength: %v\n", record.ScopeLength) 268 | str += fmt.Sprintf(" OptionLength: %v\n", record.OptionLength) 269 | str += fmt.Sprintf(" Scopes (%v):\n", len(record.Scopes)) 270 | 271 | for k, field := range record.Scopes { 272 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, NFv9ScopeToString(field.Type), field.Type, field.Length) 273 | } 274 | 275 | str += fmt.Sprintf(" Options (%v):\n", len(record.Options)) 276 | 277 | for k, field := range record.Options { 278 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) 279 | } 280 | } 281 | 282 | return str 283 | } 284 | 285 | func (p NFv9Packet) String() string { 286 | str := "Flow Packet\n" 287 | str += "------------\n" 288 | str += fmt.Sprintf(" Version: %v\n", p.Version) 289 | str += fmt.Sprintf(" Count: %v\n", p.Count) 290 | 291 | unixSeconds := time.Unix(int64(p.UnixSeconds), 0) 292 | str += fmt.Sprintf(" SystemUptime: %v\n", p.SystemUptime) 293 | str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String()) 294 | str += fmt.Sprintf(" SequenceNumber: %v\n", p.SequenceNumber) 295 | str += fmt.Sprintf(" SourceId: %v\n", p.SourceId) 296 | str += fmt.Sprintf(" FlowSets (%v):\n", len(p.FlowSets)) 297 | 298 | for i, flowSet := range p.FlowSets { 299 | switch flowSet := flowSet.(type) { 300 | case TemplateFlowSet: 301 | str += fmt.Sprintf(" - TemplateFlowSet %v:\n", i) 302 | str += flowSet.String(NFv9TypeToString) 303 | case NFv9OptionsTemplateFlowSet: 304 | str += fmt.Sprintf(" - OptionsTemplateFlowSet %v:\n", i) 305 | str += flowSet.String(NFv9TypeToString) 306 | case DataFlowSet: 307 | str += fmt.Sprintf(" - DataFlowSet %v:\n", i) 308 | str += flowSet.String(NFv9TypeToString) 309 | case OptionsDataFlowSet: 310 | str += fmt.Sprintf(" - OptionsDataFlowSet %v:\n", i) 311 | str += flowSet.String(NFv9TypeToString, NFv9ScopeToString) 312 | default: 313 | str += fmt.Sprintf(" - (unknown type) %v: %v\n", i, flowSet) 314 | } 315 | } 316 | return str 317 | } 318 | -------------------------------------------------------------------------------- /decoders/netflow/packet.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // FlowSetHeader contains fields shared by all Flow Sets (DataFlowSet, 8 | // TemplateFlowSet, OptionsTemplateFlowSet). 9 | type FlowSetHeader struct { 10 | // FlowSet ID: 11 | // 0 for TemplateFlowSet 12 | // 1 for OptionsTemplateFlowSet 13 | // 256-65535 for DataFlowSet (used as TemplateId) 14 | Id uint16 15 | 16 | // The total length of this FlowSet in bytes (including padding). 17 | Length uint16 18 | } 19 | 20 | // TemplateFlowSet is a collection of templates that describe structure of Data 21 | // Records (actual NetFlow data). 22 | type TemplateFlowSet struct { 23 | FlowSetHeader 24 | 25 | // List of Template Records 26 | Records []TemplateRecord 27 | } 28 | 29 | // DataFlowSet is a collection of Data Records (actual NetFlow data) and Options 30 | // Data Records (meta data). 31 | type DataFlowSet struct { 32 | FlowSetHeader 33 | 34 | Records []DataRecord 35 | } 36 | 37 | type OptionsDataFlowSet struct { 38 | FlowSetHeader 39 | 40 | Records []OptionsDataRecord 41 | } 42 | 43 | // TemplateRecord is a single template that describes structure of a Flow Record 44 | // (actual Netflow data). 45 | type TemplateRecord struct { 46 | // Each of the newly generated Template Records is given a unique 47 | // Template ID. This uniqueness is local to the Observation Domain that 48 | // generated the Template ID. Template IDs of Data FlowSets are numbered 49 | // from 256 to 65535. 50 | TemplateId uint16 51 | 52 | // Number of fields in this Template Record. Because a Template FlowSet 53 | // usually contains multiple Template Records, this field allows the 54 | // Collector to determine the end of the current Template Record and 55 | // the start of the next. 56 | FieldCount uint16 57 | 58 | // List of fields in this Template Record. 59 | Fields []Field 60 | } 61 | 62 | type DataRecord struct { 63 | Values []DataField 64 | } 65 | 66 | // OptionsDataRecord is meta data sent alongide actual NetFlow data. Combined 67 | // with OptionsTemplateRecord it can be decoded to a single data row. 68 | type OptionsDataRecord struct { 69 | // List of Scope values stored in raw format as []byte 70 | ScopesValues []DataField 71 | 72 | // List of Optons values stored in raw format as []byte 73 | OptionsValues []DataField 74 | } 75 | 76 | // Field describes type and length of a single value in a Flow Data Record. 77 | // Field does not contain the record value itself it is just a description of 78 | // what record value will look like. 79 | type Field struct { 80 | // A numeric value that represents the type of field. 81 | Type uint16 82 | 83 | // The length (in bytes) of the field. 84 | Length uint16 85 | } 86 | 87 | type DataField struct { 88 | // A numeric value that represents the type of field. 89 | Type uint16 90 | 91 | // The value (in bytes) of the field. 92 | Value interface{} 93 | //Value []byte 94 | } 95 | 96 | func (flowSet OptionsDataFlowSet) String(TypeToString func(uint16) string, ScopeToString func(uint16) string) string { 97 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 98 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 99 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 100 | 101 | for j, record := range flowSet.Records { 102 | str += fmt.Sprintf(" - Record %v:\n", j) 103 | str += fmt.Sprintf(" Scopes (%v):\n", len(record.ScopesValues)) 104 | 105 | for k, value := range record.ScopesValues { 106 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, ScopeToString(value.Type), value.Type, value.Value) 107 | } 108 | 109 | str += fmt.Sprintf(" Options (%v):\n", len(record.OptionsValues)) 110 | 111 | for k, value := range record.OptionsValues { 112 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) 113 | } 114 | } 115 | 116 | return str 117 | } 118 | 119 | func (flowSet DataFlowSet) String(TypeToString func(uint16) string) string { 120 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 121 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 122 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 123 | 124 | for j, record := range flowSet.Records { 125 | str += fmt.Sprintf(" - Record %v:\n", j) 126 | str += fmt.Sprintf(" Values (%v):\n", len(record.Values)) 127 | 128 | for k, value := range record.Values { 129 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) 130 | } 131 | } 132 | 133 | return str 134 | } 135 | 136 | func (flowSet TemplateFlowSet) String(TypeToString func(uint16) string) string { 137 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 138 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 139 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 140 | 141 | for j, record := range flowSet.Records { 142 | str += fmt.Sprintf(" - %v. Record:\n", j) 143 | str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) 144 | str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount) 145 | str += fmt.Sprintf(" Fields (%v):\n", len(record.Fields)) 146 | 147 | for k, field := range record.Fields { 148 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(field.Type), field.Type, field.Length) 149 | } 150 | } 151 | 152 | return str 153 | } 154 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/netflow.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | "github.com/cloudflare/goflow/v3/decoders/utils" 8 | ) 9 | 10 | const MAX_UDP_PKT_SIZE = 65535 11 | const FLOW_SIZE = 48 12 | const MAX_FLOWS_PER_PACKET = MAX_UDP_PKT_SIZE / FLOW_SIZE 13 | 14 | type ErrorVersion struct { 15 | version uint16 16 | } 17 | 18 | func NewErrorVersion(version uint16) *ErrorVersion { 19 | return &ErrorVersion{ 20 | version: version, 21 | } 22 | } 23 | 24 | func (e *ErrorVersion) Error() string { 25 | return fmt.Sprintf("Unknown NetFlow version %v (only decodes v5)", e.version) 26 | } 27 | 28 | func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { 29 | var version uint16 30 | utils.BinaryDecoder(payload, &version) 31 | packet := PacketNetFlowV5{} 32 | if version == 5 { 33 | packet.Version = version 34 | 35 | utils.BinaryDecoder(payload, 36 | &(packet.Count), 37 | &(packet.SysUptime), 38 | &(packet.UnixSecs), 39 | &(packet.UnixNSecs), 40 | &(packet.FlowSequence), 41 | &(packet.EngineType), 42 | &(packet.EngineId), 43 | &(packet.SamplingInterval), 44 | ) 45 | 46 | if packet.Count > MAX_FLOWS_PER_PACKET { 47 | return nil, fmt.Errorf("invalid amount of flows: %d", packet.Count) 48 | } 49 | 50 | packet.Records = make([]RecordsNetFlowV5, int(packet.Count)) 51 | for i := 0; i < int(packet.Count) && payload.Len() >= FLOW_SIZE; i++ { 52 | record := RecordsNetFlowV5{} 53 | utils.BinaryDecoder(payload, &record) 54 | packet.Records[i] = record 55 | } 56 | 57 | return packet, nil 58 | } else { 59 | return nil, NewErrorVersion(version) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/netflow_test.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDecodeNetFlowV5(t *testing.T) { 11 | data := []byte{ 12 | 0x00, 0x05, 0x00, 0x06, 0x00, 0x82, 0xc3, 0x48, 0x5b, 0xcd, 0xba, 0x1b, 0x05, 0x97, 0x6d, 0xc7, 13 | 0x00, 0x00, 0x64, 0x3d, 0x08, 0x08, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x79, 0x0a, 0x80, 0x02, 0x01, 14 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x02, 0x4e, 15 | 0x00, 0x82, 0x9b, 0x8c, 0x00, 0x82, 0x9b, 0x90, 0x1f, 0x90, 0xb9, 0x18, 0x00, 0x1b, 0x06, 0x00, 16 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x77, 0x0a, 0x81, 0x02, 0x01, 17 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x94, 18 | 0x00, 0x82, 0x95, 0xa9, 0x00, 0x82, 0x9a, 0xfb, 0x1f, 0x90, 0xc1, 0x2c, 0x00, 0x12, 0x06, 0x00, 19 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x81, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x77, 20 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xc2, 21 | 0x00, 0x82, 0x95, 0xa9, 0x00, 0x82, 0x9a, 0xfc, 0xc1, 0x2c, 0x1f, 0x90, 0x00, 0x16, 0x06, 0x00, 22 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x79, 23 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x01, 0xf1, 24 | 0x00, 0x82, 0x9b, 0x8c, 0x00, 0x82, 0x9b, 0x8f, 0xb9, 0x18, 0x1f, 0x90, 0x00, 0x1b, 0x06, 0x00, 25 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x79, 26 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x02, 0x2e, 27 | 0x00, 0x82, 0x9b, 0x90, 0x00, 0x82, 0x9b, 0x9d, 0xb9, 0x1a, 0x1f, 0x90, 0x00, 0x1b, 0x06, 0x00, 28 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x79, 0x0a, 0x80, 0x02, 0x01, 29 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x0b, 0xac, 30 | 0x00, 0x82, 0x9b, 0x90, 0x00, 0x82, 0x9b, 0x9d, 0x1f, 0x90, 0xb9, 0x1a, 0x00, 0x1b, 0x06, 0x00, 31 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 32 | } 33 | buf := bytes.NewBuffer(data) 34 | 35 | dec, err := DecodeMessage(buf) 36 | assert.Nil(t, err) 37 | assert.NotNil(t, dec) 38 | decNfv5 := dec.(PacketNetFlowV5) 39 | assert.Equal(t, uint16(5), decNfv5.Version) 40 | assert.Equal(t, uint16(9), decNfv5.Records[0].Input) 41 | } 42 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/packet.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "net" 7 | "time" 8 | ) 9 | 10 | type PacketNetFlowV5 struct { 11 | Version uint16 12 | Count uint16 13 | SysUptime uint32 14 | UnixSecs uint32 15 | UnixNSecs uint32 16 | FlowSequence uint32 17 | EngineType uint8 18 | EngineId uint8 19 | SamplingInterval uint16 20 | Records []RecordsNetFlowV5 21 | } 22 | 23 | type RecordsNetFlowV5 struct { 24 | SrcAddr uint32 25 | DstAddr uint32 26 | NextHop uint32 27 | Input uint16 28 | Output uint16 29 | DPkts uint32 30 | DOctets uint32 31 | First uint32 32 | Last uint32 33 | SrcPort uint16 34 | DstPort uint16 35 | Pad1 byte 36 | TCPFlags uint8 37 | Proto uint8 38 | Tos uint8 39 | SrcAS uint16 40 | DstAS uint16 41 | SrcMask uint8 42 | DstMask uint8 43 | Pad2 uint16 44 | } 45 | 46 | func (p PacketNetFlowV5) String() string { 47 | str := "NetFlow v5 Packet\n" 48 | str += "-----------------\n" 49 | str += fmt.Sprintf(" Version: %v\n", p.Version) 50 | str += fmt.Sprintf(" Count: %v\n", p.Count) 51 | 52 | unixSeconds := time.Unix(int64(p.UnixSecs), int64(p.UnixNSecs)) 53 | str += fmt.Sprintf(" SystemUptime: %v\n", time.Duration(p.SysUptime)*time.Millisecond) 54 | str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String()) 55 | str += fmt.Sprintf(" FlowSequence: %v\n", p.FlowSequence) 56 | str += fmt.Sprintf(" EngineType: %v\n", p.EngineType) 57 | str += fmt.Sprintf(" EngineId: %v\n", p.EngineId) 58 | str += fmt.Sprintf(" SamplingInterval: %v\n", p.SamplingInterval) 59 | str += fmt.Sprintf(" Records (%v):\n", len(p.Records)) 60 | 61 | for i, record := range p.Records { 62 | str += fmt.Sprintf(" Record %v:\n", i) 63 | str += record.String() 64 | } 65 | return str 66 | } 67 | 68 | func (r RecordsNetFlowV5) String() string { 69 | srcaddr := make(net.IP, 4) 70 | binary.BigEndian.PutUint32(srcaddr, r.SrcAddr) 71 | dstaddr := make(net.IP, 4) 72 | binary.BigEndian.PutUint32(dstaddr, r.DstAddr) 73 | nexthop := make(net.IP, 4) 74 | binary.BigEndian.PutUint32(nexthop, r.NextHop) 75 | 76 | str := fmt.Sprintf(" SrcAddr: %v\n", srcaddr.String()) 77 | str += fmt.Sprintf(" DstAddr: %v\n", dstaddr.String()) 78 | str += fmt.Sprintf(" NextHop: %v\n", nexthop.String()) 79 | str += fmt.Sprintf(" Input: %v\n", r.Input) 80 | str += fmt.Sprintf(" Output: %v\n", r.Output) 81 | str += fmt.Sprintf(" DPkts: %v\n", r.DPkts) 82 | str += fmt.Sprintf(" DOctets: %v\n", r.DOctets) 83 | str += fmt.Sprintf(" First: %v\n", time.Duration(r.First)*time.Millisecond) 84 | str += fmt.Sprintf(" Last: %v\n", time.Duration(r.Last)*time.Millisecond) 85 | str += fmt.Sprintf(" SrcPort: %v\n", r.SrcPort) 86 | str += fmt.Sprintf(" DstPort: %v\n", r.DstPort) 87 | str += fmt.Sprintf(" TCPFlags: %v\n", r.TCPFlags) 88 | str += fmt.Sprintf(" Proto: %v\n", r.Proto) 89 | str += fmt.Sprintf(" Tos: %v\n", r.Tos) 90 | str += fmt.Sprintf(" SrcAS: %v\n", r.SrcAS) 91 | str += fmt.Sprintf(" DstAS: %v\n", r.DstAS) 92 | str += fmt.Sprintf(" SrcMask: %v\n", r.SrcMask) 93 | str += fmt.Sprintf(" DstMask: %v\n", r.DstMask) 94 | 95 | return str 96 | } 97 | -------------------------------------------------------------------------------- /decoders/sflow/datastructure.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | type SampledHeader struct { 4 | Protocol uint32 5 | FrameLength uint32 6 | Stripped uint32 7 | OriginalLength uint32 8 | HeaderData []byte 9 | } 10 | 11 | type SampledEthernet struct { 12 | Length uint32 13 | SrcMac []byte 14 | DstMac []byte 15 | EthType uint32 16 | } 17 | 18 | type SampledIP_Base struct { 19 | Length uint32 20 | Protocol uint32 21 | SrcIP []byte 22 | DstIP []byte 23 | SrcPort uint32 24 | DstPort uint32 25 | TcpFlags uint32 26 | } 27 | 28 | type SampledIPv4 struct { 29 | Base SampledIP_Base 30 | Tos uint32 31 | } 32 | 33 | type SampledIPv6 struct { 34 | Base SampledIP_Base 35 | Priority uint32 36 | } 37 | 38 | type ExtendedSwitch struct { 39 | SrcVlan uint32 40 | SrcPriority uint32 41 | DstVlan uint32 42 | DstPriority uint32 43 | } 44 | 45 | type ExtendedRouter struct { 46 | NextHopIPVersion uint32 47 | NextHop []byte 48 | SrcMaskLen uint32 49 | DstMaskLen uint32 50 | } 51 | 52 | type ExtendedGateway struct { 53 | NextHopIPVersion uint32 54 | NextHop []byte 55 | AS uint32 56 | SrcAS uint32 57 | SrcPeerAS uint32 58 | ASDestinations uint32 59 | ASPathType uint32 60 | ASPathLength uint32 61 | ASPath []uint32 62 | CommunitiesLength uint32 63 | Communities []uint32 64 | LocalPref uint32 65 | } 66 | 67 | type IfCounters struct { 68 | IfIndex uint32 69 | IfType uint32 70 | IfSpeed uint64 71 | IfDirection uint32 72 | IfStatus uint32 73 | IfInOctets uint64 74 | IfInUcastPkts uint32 75 | IfInMulticastPkts uint32 76 | IfInBroadcastPkts uint32 77 | IfInDiscards uint32 78 | IfInErrors uint32 79 | IfInUnknownProtos uint32 80 | IfOutOctets uint64 81 | IfOutUcastPkts uint32 82 | IfOutMulticastPkts uint32 83 | IfOutBroadcastPkts uint32 84 | IfOutDiscards uint32 85 | IfOutErrors uint32 86 | IfPromiscuousMode uint32 87 | } 88 | 89 | type EthernetCounters struct { 90 | Dot3StatsAlignmentErrors uint32 91 | Dot3StatsFCSErrors uint32 92 | Dot3StatsSingleCollisionFrames uint32 93 | Dot3StatsMultipleCollisionFrames uint32 94 | Dot3StatsSQETestErrors uint32 95 | Dot3StatsDeferredTransmissions uint32 96 | Dot3StatsLateCollisions uint32 97 | Dot3StatsExcessiveCollisions uint32 98 | Dot3StatsInternalMacTransmitErrors uint32 99 | Dot3StatsCarrierSenseErrors uint32 100 | Dot3StatsFrameTooLongs uint32 101 | Dot3StatsInternalMacReceiveErrors uint32 102 | Dot3StatsSymbolErrors uint32 103 | } 104 | -------------------------------------------------------------------------------- /decoders/sflow/packet.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | type Packet struct { 4 | Version uint32 5 | IPVersion uint32 6 | AgentIP []byte 7 | SubAgentId uint32 8 | SequenceNumber uint32 9 | Uptime uint32 10 | SamplesCount uint32 11 | Samples []interface{} 12 | } 13 | 14 | type SampleHeader struct { 15 | Format uint32 16 | Length uint32 17 | 18 | SampleSequenceNumber uint32 19 | SourceIdType uint32 20 | SourceIdValue uint32 21 | } 22 | 23 | type FlowSample struct { 24 | Header SampleHeader 25 | 26 | SamplingRate uint32 27 | SamplePool uint32 28 | Drops uint32 29 | Input uint32 30 | Output uint32 31 | FlowRecordsCount uint32 32 | Records []FlowRecord 33 | } 34 | 35 | type CounterSample struct { 36 | Header SampleHeader 37 | 38 | CounterRecordsCount uint32 39 | Records []CounterRecord 40 | } 41 | 42 | type ExpandedFlowSample struct { 43 | Header SampleHeader 44 | 45 | SamplingRate uint32 46 | SamplePool uint32 47 | Drops uint32 48 | InputIfFormat uint32 49 | InputIfValue uint32 50 | OutputIfFormat uint32 51 | OutputIfValue uint32 52 | FlowRecordsCount uint32 53 | Records []FlowRecord 54 | } 55 | 56 | type RecordHeader struct { 57 | DataFormat uint32 58 | Length uint32 59 | } 60 | 61 | type FlowRecord struct { 62 | Header RecordHeader 63 | Data interface{} 64 | } 65 | 66 | type CounterRecord struct { 67 | Header RecordHeader 68 | Data interface{} 69 | } 70 | -------------------------------------------------------------------------------- /decoders/sflow/sflow.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | "github.com/cloudflare/goflow/v3/decoders/utils" 8 | ) 9 | 10 | const ( 11 | FORMAT_EXT_SWITCH = 1001 12 | FORMAT_EXT_ROUTER = 1002 13 | FORMAT_EXT_GATEWAY = 1003 14 | FORMAT_RAW_PKT = 1 15 | FORMAT_ETH = 2 16 | FORMAT_IPV4 = 3 17 | FORMAT_IPV6 = 4 18 | 19 | // The following max constants control what goflow considers reasonable amounts of data objects reported in a single packet. 20 | // This is to prevent an attacker from making us allocate arbitrary amounts of memory and let goflow be killed by the OOM killer. 21 | // sflow samples are reported in UDP packets which have a maximum PDU size of 64Kib. The following numbers are derived from that fact. 22 | MAX_UDP_PKT_SIZE = 65535 23 | USUAL_SAMPLED_HEADER_SIZE = 128 24 | FLOW_RECORD_HEADER_SIZE = 8 25 | ASN_SIZE = 4 26 | COMMUNITY_SIZE = 4 27 | MAX_SAMPLES_PER_PACKET = MAX_UDP_PKT_SIZE / USUAL_SAMPLED_HEADER_SIZE 28 | MAX_FLOW_RECORDS = MAX_UDP_PKT_SIZE / FLOW_RECORD_HEADER_SIZE 29 | MAX_AS_PATH_LENGTH = MAX_UDP_PKT_SIZE / ASN_SIZE 30 | MAX_COMMUNITIES_LENGTH = MAX_UDP_PKT_SIZE / COMMUNITY_SIZE 31 | ) 32 | 33 | type ErrorDecodingSFlow struct { 34 | msg string 35 | } 36 | 37 | func NewErrorDecodingSFlow(msg string) *ErrorDecodingSFlow { 38 | return &ErrorDecodingSFlow{ 39 | msg: msg, 40 | } 41 | } 42 | 43 | func (e *ErrorDecodingSFlow) Error() string { 44 | return fmt.Sprintf("Error decoding sFlow: %v", e.msg) 45 | } 46 | 47 | type ErrorDataFormat struct { 48 | dataformat uint32 49 | } 50 | 51 | func NewErrorDataFormat(dataformat uint32) *ErrorDataFormat { 52 | return &ErrorDataFormat{ 53 | dataformat: dataformat, 54 | } 55 | } 56 | 57 | func (e *ErrorDataFormat) Error() string { 58 | return fmt.Sprintf("Unknown data format %v", e.dataformat) 59 | } 60 | 61 | type ErrorIPVersion struct { 62 | version uint32 63 | } 64 | 65 | func NewErrorIPVersion(version uint32) *ErrorIPVersion { 66 | return &ErrorIPVersion{ 67 | version: version, 68 | } 69 | } 70 | 71 | func (e *ErrorIPVersion) Error() string { 72 | return fmt.Sprintf("Unknown IP version: %v", e.version) 73 | } 74 | 75 | type ErrorVersion struct { 76 | version uint32 77 | } 78 | 79 | func NewErrorVersion(version uint32) *ErrorVersion { 80 | return &ErrorVersion{ 81 | version: version, 82 | } 83 | } 84 | 85 | func (e *ErrorVersion) Error() string { 86 | return fmt.Sprintf("Unknown sFlow version %v (supported v5)", e.version) 87 | } 88 | 89 | func DecodeCounterRecord(header *RecordHeader, payload *bytes.Buffer) (CounterRecord, error) { 90 | counterRecord := CounterRecord{ 91 | Header: *header, 92 | } 93 | switch (*header).DataFormat { 94 | case 1: 95 | ifCounters := IfCounters{} 96 | utils.BinaryDecoder(payload, &ifCounters) 97 | counterRecord.Data = ifCounters 98 | case 2: 99 | ethernetCounters := EthernetCounters{} 100 | utils.BinaryDecoder(payload, ðernetCounters) 101 | counterRecord.Data = ethernetCounters 102 | default: 103 | return counterRecord, NewErrorDataFormat((*header).DataFormat) 104 | } 105 | 106 | return counterRecord, nil 107 | } 108 | 109 | func DecodeIP(payload *bytes.Buffer) (uint32, []byte, error) { 110 | var ipVersion uint32 111 | utils.BinaryDecoder(payload, &ipVersion) 112 | var ip []byte 113 | if ipVersion == 1 { 114 | ip = make([]byte, 4) 115 | } else if ipVersion == 2 { 116 | ip = make([]byte, 16) 117 | } else { 118 | return ipVersion, ip, NewErrorIPVersion(ipVersion) 119 | } 120 | if payload.Len() >= len(ip) { 121 | utils.BinaryDecoder(payload, &ip) 122 | } else { 123 | return ipVersion, ip, NewErrorDecodingSFlow(fmt.Sprintf("Not enough data: %v, needs %v.", payload.Len(), len(ip))) 124 | } 125 | return ipVersion, ip, nil 126 | } 127 | 128 | func DecodeFlowRecord(header *RecordHeader, payload *bytes.Buffer) (FlowRecord, error) { 129 | flowRecord := FlowRecord{ 130 | Header: *header, 131 | } 132 | switch (*header).DataFormat { 133 | case FORMAT_EXT_SWITCH: 134 | extendedSwitch := ExtendedSwitch{} 135 | err := utils.BinaryDecoder(payload, &extendedSwitch) 136 | if err != nil { 137 | return flowRecord, err 138 | } 139 | flowRecord.Data = extendedSwitch 140 | case FORMAT_RAW_PKT: 141 | sampledHeader := SampledHeader{} 142 | err := utils.BinaryDecoder(payload, &(sampledHeader.Protocol), &(sampledHeader.FrameLength), &(sampledHeader.Stripped), &(sampledHeader.OriginalLength)) 143 | if err != nil { 144 | return flowRecord, err 145 | } 146 | sampledHeader.HeaderData = payload.Bytes() 147 | flowRecord.Data = sampledHeader 148 | case FORMAT_IPV4: 149 | sampledIPBase := SampledIP_Base{ 150 | SrcIP: make([]byte, 4), 151 | DstIP: make([]byte, 4), 152 | } 153 | err := utils.BinaryDecoder(payload, &sampledIPBase) 154 | if err != nil { 155 | return flowRecord, err 156 | } 157 | sampledIPv4 := SampledIPv4{ 158 | Base: sampledIPBase, 159 | } 160 | err = utils.BinaryDecoder(payload, &(sampledIPv4.Tos)) 161 | if err != nil { 162 | return flowRecord, err 163 | } 164 | flowRecord.Data = sampledIPv4 165 | case FORMAT_IPV6: 166 | sampledIPBase := SampledIP_Base{ 167 | SrcIP: make([]byte, 16), 168 | DstIP: make([]byte, 16), 169 | } 170 | err := utils.BinaryDecoder(payload, &sampledIPBase) 171 | if err != nil { 172 | return flowRecord, err 173 | } 174 | sampledIPv6 := SampledIPv6{ 175 | Base: sampledIPBase, 176 | } 177 | err = utils.BinaryDecoder(payload, &(sampledIPv6.Priority)) 178 | if err != nil { 179 | return flowRecord, err 180 | } 181 | flowRecord.Data = sampledIPv6 182 | case FORMAT_EXT_ROUTER: 183 | extendedRouter := ExtendedRouter{} 184 | 185 | ipVersion, ip, err := DecodeIP(payload) 186 | if err != nil { 187 | return flowRecord, err 188 | } 189 | extendedRouter.NextHopIPVersion = ipVersion 190 | extendedRouter.NextHop = ip 191 | err = utils.BinaryDecoder(payload, &(extendedRouter.SrcMaskLen), &(extendedRouter.DstMaskLen)) 192 | if err != nil { 193 | return flowRecord, err 194 | } 195 | flowRecord.Data = extendedRouter 196 | case FORMAT_EXT_GATEWAY: 197 | extendedGateway := ExtendedGateway{} 198 | ipVersion, ip, err := DecodeIP(payload) 199 | if err != nil { 200 | return flowRecord, err 201 | } 202 | extendedGateway.NextHopIPVersion = ipVersion 203 | extendedGateway.NextHop = ip 204 | err = utils.BinaryDecoder(payload, &(extendedGateway.AS), &(extendedGateway.SrcAS), &(extendedGateway.SrcPeerAS), 205 | &(extendedGateway.ASDestinations)) 206 | if err != nil { 207 | return flowRecord, err 208 | } 209 | asPath := make([]uint32, 0) 210 | if extendedGateway.ASDestinations != 0 { 211 | err := utils.BinaryDecoder(payload, &(extendedGateway.ASPathType), &(extendedGateway.ASPathLength)) 212 | if err != nil { 213 | return flowRecord, err 214 | } 215 | if int(extendedGateway.ASPathLength) > payload.Len()-4 { 216 | return flowRecord, fmt.Errorf("invalid AS path length: %v", extendedGateway.ASPathLength) 217 | } 218 | if extendedGateway.ASPathLength > MAX_AS_PATH_LENGTH { 219 | return flowRecord, fmt.Errorf("invalid AS path length: %d", extendedGateway.ASPathLength) 220 | } 221 | asPath = make([]uint32, extendedGateway.ASPathLength) 222 | if len(asPath) > 0 { 223 | err = utils.BinaryDecoder(payload, asPath) 224 | if err != nil { 225 | return flowRecord, err 226 | } 227 | } 228 | } 229 | extendedGateway.ASPath = asPath 230 | 231 | err = utils.BinaryDecoder(payload, &(extendedGateway.CommunitiesLength)) 232 | if err != nil { 233 | return flowRecord, err 234 | } 235 | if int(extendedGateway.CommunitiesLength) > payload.Len()-4 { 236 | return flowRecord, fmt.Errorf("invalid Communities length: %v", extendedGateway.CommunitiesLength) 237 | } 238 | 239 | if extendedGateway.CommunitiesLength > MAX_COMMUNITIES_LENGTH { 240 | return flowRecord, fmt.Errorf("invalid communities length: %d", extendedGateway.CommunitiesLength) 241 | } 242 | communities := make([]uint32, extendedGateway.CommunitiesLength) 243 | if len(communities) > 0 { 244 | err = utils.BinaryDecoder(payload, communities) 245 | if err != nil { 246 | return flowRecord, err 247 | } 248 | } 249 | err = utils.BinaryDecoder(payload, &(extendedGateway.LocalPref)) 250 | if err != nil { 251 | return flowRecord, err 252 | } 253 | extendedGateway.Communities = communities 254 | 255 | flowRecord.Data = extendedGateway 256 | default: 257 | return flowRecord, fmt.Errorf("unknown data format %v", (*header).DataFormat) 258 | } 259 | return flowRecord, nil 260 | } 261 | 262 | func DecodeSample(header *SampleHeader, payload *bytes.Buffer) (interface{}, error) { 263 | format := (*header).Format 264 | var sample interface{} 265 | 266 | err := utils.BinaryDecoder(payload, &((*header).SampleSequenceNumber)) 267 | if err != nil { 268 | return sample, err 269 | } 270 | if format == FORMAT_RAW_PKT || format == FORMAT_ETH { 271 | var sourceId uint32 272 | err = utils.BinaryDecoder(payload, &sourceId) 273 | if err != nil { 274 | return sample, err 275 | } 276 | 277 | (*header).SourceIdType = sourceId >> 24 278 | (*header).SourceIdValue = sourceId & 0x00ffffff 279 | } else if format == FORMAT_IPV4 || format == FORMAT_IPV6 { 280 | err = utils.BinaryDecoder(payload, &((*header).SourceIdType), &((*header).SourceIdValue)) 281 | if err != nil { 282 | return sample, err 283 | } 284 | } else { 285 | return nil, NewErrorDataFormat(format) 286 | } 287 | 288 | var recordsCount uint32 289 | var flowSample FlowSample 290 | var counterSample CounterSample 291 | var expandedFlowSample ExpandedFlowSample 292 | if format == FORMAT_RAW_PKT { 293 | flowSample = FlowSample{ 294 | Header: *header, 295 | } 296 | err = utils.BinaryDecoder(payload, &(flowSample.SamplingRate), &(flowSample.SamplePool), 297 | &(flowSample.Drops), &(flowSample.Input), &(flowSample.Output), &(flowSample.FlowRecordsCount)) 298 | if err != nil { 299 | return sample, err 300 | } 301 | recordsCount = flowSample.FlowRecordsCount 302 | if recordsCount > MAX_FLOW_RECORDS { 303 | return flowSample, fmt.Errorf("invalid number of flows records: %d", recordsCount) 304 | } 305 | flowSample.Records = make([]FlowRecord, recordsCount) 306 | sample = flowSample 307 | } else if format == FORMAT_ETH || format == FORMAT_IPV6 { 308 | err = utils.BinaryDecoder(payload, &recordsCount) 309 | if err != nil { 310 | return sample, err 311 | } 312 | counterSample = CounterSample{ 313 | Header: *header, 314 | CounterRecordsCount: recordsCount, 315 | } 316 | 317 | if recordsCount > MAX_SAMPLES_PER_PACKET { 318 | return flowSample, fmt.Errorf("invalid number of samples: %d", recordsCount) 319 | } 320 | counterSample.Records = make([]CounterRecord, recordsCount) 321 | sample = counterSample 322 | } else if format == FORMAT_IPV4 { 323 | expandedFlowSample = ExpandedFlowSample{ 324 | Header: *header, 325 | } 326 | err = utils.BinaryDecoder(payload, &(expandedFlowSample.SamplingRate), &(expandedFlowSample.SamplePool), 327 | &(expandedFlowSample.Drops), &(expandedFlowSample.InputIfFormat), &(expandedFlowSample.InputIfValue), 328 | &(expandedFlowSample.OutputIfFormat), &(expandedFlowSample.OutputIfValue), &(expandedFlowSample.FlowRecordsCount)) 329 | if err != nil { 330 | return sample, err 331 | } 332 | recordsCount = expandedFlowSample.FlowRecordsCount 333 | expandedFlowSample.Records = make([]FlowRecord, recordsCount) 334 | sample = expandedFlowSample 335 | } 336 | for i := 0; i < int(recordsCount) && payload.Len() >= 8; i++ { 337 | recordHeader := RecordHeader{} 338 | err = utils.BinaryDecoder(payload, &(recordHeader.DataFormat), &(recordHeader.Length)) 339 | if err != nil { 340 | return sample, err 341 | } 342 | if int(recordHeader.Length) > payload.Len() { 343 | break 344 | } 345 | recordReader := bytes.NewBuffer(payload.Next(int(recordHeader.Length))) 346 | if format == FORMAT_RAW_PKT || format == FORMAT_IPV4 { 347 | record, err := DecodeFlowRecord(&recordHeader, recordReader) 348 | if err != nil { 349 | continue 350 | } 351 | if format == FORMAT_RAW_PKT { 352 | flowSample.Records[i] = record 353 | } else if format == FORMAT_IPV4 { 354 | expandedFlowSample.Records[i] = record 355 | } 356 | } else if format == FORMAT_ETH || format == FORMAT_IPV6 { 357 | record, err := DecodeCounterRecord(&recordHeader, recordReader) 358 | if err != nil { 359 | continue 360 | } 361 | counterSample.Records[i] = record 362 | } 363 | } 364 | return sample, nil 365 | } 366 | 367 | func DecodeMessage(payload *bytes.Buffer) (interface{}, error) { 368 | var version uint32 369 | err := utils.BinaryDecoder(payload, &version) 370 | if err != nil { 371 | return nil, err 372 | } 373 | packetV5 := Packet{} 374 | if version == 5 { 375 | packetV5.Version = version 376 | err = utils.BinaryDecoder(payload, &(packetV5.IPVersion)) 377 | if err != nil { 378 | return packetV5, err 379 | } 380 | var ip []byte 381 | if packetV5.IPVersion == 1 { 382 | ip = make([]byte, 4) 383 | utils.BinaryDecoder(payload, ip) 384 | } else if packetV5.IPVersion == 2 { 385 | ip = make([]byte, 16) 386 | err = utils.BinaryDecoder(payload, ip) 387 | if err != nil { 388 | return packetV5, err 389 | } 390 | } else { 391 | return nil, NewErrorIPVersion(packetV5.IPVersion) 392 | } 393 | 394 | packetV5.AgentIP = ip 395 | err = utils.BinaryDecoder(payload, &(packetV5.SubAgentId), &(packetV5.SequenceNumber), &(packetV5.Uptime), &(packetV5.SamplesCount)) 396 | if err != nil { 397 | return packetV5, err 398 | } 399 | packetV5.Samples = make([]interface{}, int(packetV5.SamplesCount)) 400 | for i := 0; i < int(packetV5.SamplesCount) && payload.Len() >= 8; i++ { 401 | header := SampleHeader{} 402 | err = utils.BinaryDecoder(payload, &(header.Format), &(header.Length)) 403 | if err != nil { 404 | return packetV5, err 405 | } 406 | if int(header.Length) > payload.Len() { 407 | break 408 | } 409 | sampleReader := bytes.NewBuffer(payload.Next(int(header.Length))) 410 | 411 | sample, err := DecodeSample(&header, sampleReader) 412 | if err != nil { 413 | continue 414 | } else { 415 | packetV5.Samples[i] = sample 416 | } 417 | } 418 | 419 | return packetV5, nil 420 | } else { 421 | return nil, NewErrorVersion(version) 422 | } 423 | } 424 | -------------------------------------------------------------------------------- /decoders/sflow/sflow_test.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestSFlowDecode(t *testing.T) { 11 | data := []byte{ 12 | 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0xac, 0x10, 0x00, 0x11, 0x00, 0x00, 0x00, 0x01, 13 | 0x00, 0x00, 0x01, 0xaa, 0x67, 0xee, 0xaa, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 14 | 0x00, 0x00, 0x00, 0x88, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x04, 0x13, 0x00, 0x00, 0x08, 0x00, 15 | 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xaa, 0x00, 0x00, 0x04, 0x13, 16 | 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, 0x01, 17 | 0x00, 0x00, 0x00, 0x52, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x4e, 0x00, 0xff, 0x12, 0x34, 18 | 0x35, 0x1b, 0xff, 0xab, 0xcd, 0xef, 0xab, 0x64, 0x81, 0x00, 0x00, 0x20, 0x08, 0x00, 0x45, 0x00, 19 | 0x00, 0x3c, 0x5c, 0x07, 0x00, 0x00, 0x7c, 0x01, 0x48, 0xa0, 0xac, 0x10, 0x20, 0xfe, 0xac, 0x10, 20 | 0x20, 0xf1, 0x08, 0x00, 0x97, 0x61, 0xa9, 0x48, 0x0c, 0xb2, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 21 | 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 22 | 0x77, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x00, 0x00, 23 | } 24 | buf := bytes.NewBuffer(data) 25 | _, err := DecodeMessage(buf) 26 | assert.Nil(t, err) 27 | } 28 | 29 | func TestExpandedSFlowDecode(t *testing.T) { 30 | data := getExpandedSFlowDecode() 31 | 32 | buf := bytes.NewBuffer(data) 33 | _, err := DecodeMessage(buf) 34 | assert.Nil(t, err) 35 | } 36 | 37 | func getExpandedSFlowDecode() []byte { 38 | return []byte{ 39 | 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x03, 0x04, 0x00, 0x00, 0x00, 0x00, 40 | 0x0f, 0xa7, 0x72, 0xc2, 0x0f, 0x76, 0x73, 0x48, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x03, 41 | 0x00, 0x00, 0x00, 0xdc, 0x20, 0x90, 0x93, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa4, 42 | 0x00, 0x00, 0x3f, 0xff, 0x04, 0x38, 0xec, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 43 | 0x00, 0x0f, 0x42, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x52, 0x00, 0x00, 0x00, 0x02, 44 | 0x00, 0x00, 0x03, 0xe9, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 45 | 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x90, 46 | 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x05, 0xea, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 47 | 0x08, 0xec, 0xf5, 0x2a, 0x8f, 0xbe, 0x74, 0x83, 0xef, 0x30, 0x65, 0xb7, 0x81, 0x00, 0x00, 0x1e, 48 | 0x08, 0x00, 0x45, 0x00, 0x05, 0xd4, 0x3b, 0xba, 0x40, 0x00, 0x3f, 0x06, 0xbd, 0x99, 0xb9, 0x3b, 49 | 0xdc, 0x93, 0x58, 0xee, 0x4e, 0x13, 0x01, 0xbb, 0xcf, 0xd6, 0x45, 0xb7, 0x1b, 0xc0, 0xd5, 0xb8, 50 | 0xff, 0x24, 0x80, 0x10, 0x00, 0x04, 0x01, 0x55, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0xc8, 0xc8, 51 | 0x56, 0x95, 0x00, 0x34, 0xf6, 0x0f, 0xe8, 0x1d, 0xbd, 0x41, 0x45, 0x92, 0x4c, 0xc2, 0x71, 0xe0, 52 | 0xeb, 0x2e, 0x35, 0x17, 0x7c, 0x2f, 0xb9, 0xa8, 0x05, 0x92, 0x0e, 0x03, 0x1b, 0x50, 0x53, 0x0c, 53 | 0xe5, 0x7d, 0x86, 0x75, 0x32, 0x8a, 0xcc, 0xe2, 0x26, 0xa8, 0x90, 0x21, 0x78, 0xbf, 0xce, 0x7a, 54 | 0xf8, 0xb5, 0x8d, 0x48, 0xe4, 0xaa, 0xfe, 0x26, 0x34, 0xe0, 0xad, 0xb9, 0xec, 0x79, 0x74, 0xd8, 55 | 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xdc, 0x20, 0x90, 0x93, 0x27, 0x00, 0x00, 0x00, 0x00, 56 | 0x00, 0x0f, 0x42, 0xa4, 0x00, 0x00, 0x3f, 0xff, 0x04, 0x39, 0x2c, 0xd9, 0x00, 0x00, 0x00, 0x00, 57 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x4b, 58 | 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x03, 0xe9, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x17, 59 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 60 | 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x05, 0xca, 0x00, 0x00, 0x00, 0x04, 61 | 0x00, 0x00, 0x00, 0x80, 0xda, 0xb1, 0x22, 0xfb, 0xd9, 0xcf, 0x74, 0x83, 0xef, 0x30, 0x65, 0xb7, 62 | 0x81, 0x00, 0x00, 0x17, 0x08, 0x00, 0x45, 0x00, 0x05, 0xb4, 0xe2, 0x28, 0x40, 0x00, 0x3f, 0x06, 63 | 0x15, 0x0f, 0xc3, 0xb5, 0xaf, 0x26, 0x05, 0x92, 0xc6, 0x9e, 0x00, 0x50, 0x0f, 0xb3, 0x35, 0x8e, 64 | 0x36, 0x02, 0xa1, 0x01, 0xed, 0xb0, 0x80, 0x10, 0x00, 0x3b, 0xf7, 0xd4, 0x00, 0x00, 0x01, 0x01, 65 | 0x08, 0x0a, 0xd2, 0xe8, 0xac, 0xbe, 0x00, 0x36, 0xbc, 0x3c, 0x37, 0x36, 0xc4, 0x80, 0x3f, 0x66, 66 | 0x33, 0xc5, 0x50, 0xa6, 0x63, 0xb2, 0x92, 0xc3, 0x6a, 0x7a, 0x80, 0x65, 0x0b, 0x22, 0x62, 0xfe, 67 | 0x16, 0x9c, 0xab, 0x55, 0x03, 0x47, 0xa6, 0x54, 0x63, 0xa5, 0xbc, 0x17, 0x8e, 0x5a, 0xf6, 0xbc, 68 | 0x24, 0x52, 0xe9, 0xd2, 0x7b, 0x08, 0xe8, 0xc2, 0x6b, 0x05, 0x1c, 0xc0, 0x61, 0xb4, 0xe0, 0x43, 69 | 0x59, 0x62, 0xbf, 0x0a, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xdc, 0x04, 0x12, 0xa0, 0x65, 70 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa8, 0x00, 0x00, 0x3f, 0xff, 0xa4, 0x06, 0x9f, 0x9b, 71 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa8, 0x00, 0x00, 0x00, 0x00, 72 | 0x00, 0x0f, 0x42, 0xa4, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x03, 0xe9, 0x00, 0x00, 0x00, 0x10, 73 | 0x00, 0x00, 0x05, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x39, 0x00, 0x00, 0x00, 0x00, 74 | 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x05, 0xf2, 75 | 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0x74, 0x83, 0xef, 0x30, 0x65, 0xb7, 0x28, 0x99, 76 | 0x3a, 0x4e, 0x89, 0x27, 0x81, 0x00, 0x05, 0x39, 0x08, 0x00, 0x45, 0x18, 0x05, 0xdc, 0x8e, 0x5c, 77 | 0x40, 0x00, 0x3a, 0x06, 0x53, 0x77, 0x89, 0x4a, 0xcc, 0xd5, 0x59, 0xbb, 0xa9, 0x55, 0x07, 0x8f, 78 | 0xad, 0xdc, 0xf2, 0x9b, 0x09, 0xb4, 0xce, 0x1d, 0xbc, 0xee, 0x80, 0x10, 0x75, 0x40, 0x58, 0x02, 79 | 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0xb0, 0x18, 0x5b, 0x6f, 0xd7, 0xd6, 0x8b, 0x47, 0xee, 0x6a, 80 | 0x03, 0x0b, 0x9b, 0x52, 0xb1, 0xca, 0x61, 0x4b, 0x84, 0x57, 0x75, 0xc4, 0xb2, 0x18, 0x11, 0x39, 81 | 0xce, 0x5d, 0x2a, 0x38, 0x91, 0x29, 0x76, 0x11, 0x7d, 0xc1, 0xcc, 0x5c, 0x4b, 0x0a, 0xde, 0xbb, 82 | 0xa8, 0xad, 0x9d, 0x88, 0x36, 0x8b, 0xc0, 0x02, 0x87, 0xa7, 0xa5, 0x1c, 0xd9, 0x85, 0x71, 0x85, 83 | 0x68, 0x2b, 0x59, 0xc6, 0x2c, 0x3c, 0x84, 0x0c, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xdc, 84 | 0x20, 0x90, 0x93, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa4, 0x00, 0x00, 0x3f, 0xff, 85 | 0x04, 0x39, 0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa4, 86 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0x4b, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x03, 0xe9, 87 | 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 88 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x90, 0x00, 0x00, 0x00, 0x01, 89 | 0x00, 0x00, 0x05, 0xf2, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 0xda, 0xb1, 0x22, 0xfb, 90 | 0xd9, 0xcf, 0x74, 0x83, 0xef, 0x30, 0x65, 0xb7, 0x81, 0x00, 0x00, 0x17, 0x08, 0x00, 0x45, 0x00, 91 | 0x05, 0xdc, 0x7e, 0x42, 0x40, 0x00, 0x3f, 0x06, 0x12, 0x4d, 0xb9, 0x66, 0xdb, 0x43, 0x67, 0xc2, 92 | 0xa9, 0x20, 0x63, 0x75, 0x57, 0xae, 0x6d, 0xbf, 0x59, 0x7c, 0x93, 0x71, 0x09, 0x67, 0x80, 0x10, 93 | 0x00, 0xeb, 0xfc, 0x16, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x40, 0x96, 0x88, 0x38, 0x36, 0xe1, 94 | 0x64, 0xc7, 0x1b, 0x43, 0xbc, 0x0e, 0x1f, 0x81, 0x6d, 0x39, 0xf6, 0x12, 0x0c, 0xea, 0xc0, 0xea, 95 | 0x7b, 0xc1, 0x77, 0xe2, 0x92, 0x6a, 0xbf, 0xbe, 0x84, 0xd9, 0x00, 0x18, 0x57, 0x49, 0x92, 0x72, 96 | 0x8f, 0xa3, 0x78, 0x45, 0x6f, 0xc6, 0x98, 0x8f, 0x71, 0xb0, 0xc5, 0x52, 0x7d, 0x8a, 0x82, 0xef, 97 | 0x52, 0xdb, 0xe9, 0xdc, 0x0a, 0x52, 0xdb, 0x06, 0x51, 0x80, 0x80, 0xa9, 0x00, 0x00, 0x00, 0x03, 98 | 0x00, 0x00, 0x00, 0xdc, 0x20, 0x90, 0x93, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa4, 99 | 0x00, 0x00, 0x3f, 0xff, 0x04, 0x39, 0xac, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 100 | 0x00, 0x0f, 0x42, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x42, 0xa5, 0x00, 0x00, 0x00, 0x02, 101 | 0x00, 0x00, 0x03, 0xe9, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x03, 0xbd, 0x00, 0x00, 0x00, 0x00, 102 | 0x00, 0x00, 0x03, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x90, 103 | 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x05, 0xf2, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x80, 104 | 0x90, 0xe2, 0xba, 0x89, 0x21, 0xad, 0x74, 0x83, 0xef, 0x30, 0x65, 0xb7, 0x81, 0x00, 0x03, 0xbd, 105 | 0x08, 0x00, 0x45, 0x00, 0x05, 0xdc, 0x76, 0xa2, 0x40, 0x00, 0x38, 0x06, 0xac, 0x75, 0x33, 0x5b, 106 | 0x74, 0x6c, 0xc3, 0xb5, 0xae, 0x87, 0x1f, 0x40, 0x80, 0x68, 0xab, 0xbb, 0x2f, 0x90, 0x01, 0xee, 107 | 0x3a, 0xaf, 0x80, 0x10, 0x00, 0xeb, 0x8e, 0xf4, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x34, 0xc0, 108 | 0xff, 0x26, 0xac, 0x90, 0xd5, 0xc4, 0xcc, 0xd7, 0xa4, 0xa5, 0x5b, 0xa3, 0x79, 0x33, 0xc1, 0x25, 109 | 0xcd, 0x84, 0xdc, 0xaa, 0x37, 0xc9, 0xe3, 0xab, 0xc6, 0xb4, 0xeb, 0xe3, 0x8d, 0x72, 0x06, 0xd1, 110 | 0x5a, 0x1f, 0x9a, 0x8b, 0xe9, 0x9a, 0xf7, 0x33, 0x35, 0xe5, 0xca, 0x67, 0xba, 0x04, 0xf9, 0x3c, 111 | 0x27, 0xff, 0xa3, 0xca, 0x5e, 0x90, 0xf9, 0xc7, 0xd1, 0xe4, 0xf8, 0xf5, 0x7a, 0x14, 0xdc, 0x1c, 112 | 0xb1, 0xde, 0x63, 0x75, 0xb2, 0x65, 0x27, 0xf0, 0x0d, 0x29, 0xc5, 0x56, 0x60, 0x4a, 0x50, 0x10, 113 | 0x00, 0x77, 0xc0, 0xef, 0x00, 0x00, 0x74, 0xcf, 0x8a, 0x79, 0x87, 0x77, 0x75, 0x64, 0x75, 0xeb, 114 | 0xa4, 0x56, 0xb4, 0xd8, 0x70, 0xca, 0xe6, 0x11, 0xbb, 0x9f, 0xa1, 0x63, 0x95, 0xa1, 0xb4, 0x81, 115 | 0x8d, 0x50, 0xe0, 0xd5, 0xa9, 0x2c, 0xd7, 0x8f, 0xfe, 0x78, 0xce, 0xff, 0x5a, 0xa6, 0xb6, 0xb9, 116 | 0xf1, 0xe9, 0x5f, 0xda, 0xcb, 0xf3, 0x62, 0x61, 0x5f, 0x2b, 0x32, 0x95, 0x5d, 0x96, 0x2e, 0xef, 117 | 0x32, 0x04, 0xff, 0xcc, 0x76, 0xba, 0x49, 0xab, 0x92, 0xa7, 0xf1, 0xcc, 0x52, 0x68, 0xde, 0x94, 118 | 0x90, 0xdb, 0x1b, 0xa0, 0x28, 0x8a, 0xf8, 0x64, 0x55, 0x9c, 0x9b, 0xf6, 0x9c, 0x44, 0xd9, 0x68, 119 | 0xc0, 0xe5, 0x2c, 0xe1, 0x3d, 0x29, 0x19, 0xef, 0x8b, 0x0c, 0x9d, 0x0a, 0x7e, 0xcd, 0xc2, 0xe9, 120 | 0x85, 0x6b, 0x85, 0xb3, 0x97, 0xbe, 0xc6, 0x26, 0xd2, 0xe5, 0x2e, 0x90, 0xa9, 0xac, 0xe3, 0xd8, 121 | 0xef, 0xbd, 0x7b, 0x40, 0xf8, 0xb7, 0xe3, 0xc3, 0x8d, 0xa7, 0x38, 0x0f, 0x87, 0x7a, 0x50, 0x62, 122 | 0xc8, 0xb8, 0xa4, 0x52, 0x6e, 0xdc, 0x92, 0x7f, 0xe6, 0x8d, 0x45, 0x39, 0xfd, 0x06, 0x6e, 0xd9, 123 | 0xb5, 0x65, 0xac, 0xae, 0x2b, 0x8d, 0xea, 0xcf, 0xa2, 0x98, 0x0b, 0xc6, 0x43, 0x2e, 0xa7, 0x71, 124 | 0x99, 0x2b, 0xea, 0xc3, 0x9c, 0x27, 0x74, 0x9e, 0xd5, 0x11, 0x60, 0x7a, 0x00, 0x00, 0x00, 0x00, 125 | 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd6, 0x2a, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 126 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 127 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 128 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 129 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 130 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 131 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 132 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /decoders/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/binary" 5 | "io" 6 | ) 7 | 8 | func BinaryDecoder(payload io.Reader, dests ...interface{}) error { 9 | for _, dest := range dests { 10 | err := binary.Read(payload, binary.BigEndian, dest) 11 | if err != nil { 12 | return err 13 | } 14 | } 15 | return nil 16 | } 17 | -------------------------------------------------------------------------------- /docker-compose-pkg.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | packager: 4 | build: package 5 | entrypoint: make 6 | command: 7 | - build-goflow 8 | - package-deb-goflow 9 | - package-rpm-goflow 10 | volumes: 11 | - ./:/work/ 12 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cloudflare/goflow/v3 2 | 3 | go 1.21 4 | 5 | toolchain go1.23.4 6 | 7 | require ( 8 | github.com/Shopify/sarama v1.38.1 9 | github.com/golang/protobuf v1.5.4 10 | github.com/libp2p/go-reuseport v0.4.0 11 | github.com/prometheus/client_golang v1.20.5 12 | github.com/sirupsen/logrus v1.9.3 13 | github.com/stretchr/testify v1.10.0 14 | ) 15 | 16 | require ( 17 | github.com/beorn7/perks v1.0.1 // indirect 18 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 19 | github.com/davecgh/go-spew v1.1.1 // indirect 20 | github.com/eapache/go-resiliency v1.7.0 // indirect 21 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect 22 | github.com/eapache/queue v1.1.0 // indirect 23 | github.com/golang/snappy v0.0.4 // indirect 24 | github.com/hashicorp/errwrap v1.1.0 // indirect 25 | github.com/hashicorp/go-multierror v1.1.1 // indirect 26 | github.com/hashicorp/go-uuid v1.0.3 // indirect 27 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 28 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 29 | github.com/jcmturner/gofork v1.7.6 // indirect 30 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 31 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 32 | github.com/klauspost/compress v1.17.11 // indirect 33 | github.com/kr/text v0.2.0 // indirect 34 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 35 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 36 | github.com/pmezard/go-difflib v1.0.0 // indirect 37 | github.com/prometheus/client_model v0.6.1 // indirect 38 | github.com/prometheus/common v0.62.0 // indirect 39 | github.com/prometheus/procfs v0.15.1 // indirect 40 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 41 | golang.org/x/crypto v0.32.0 // indirect 42 | golang.org/x/net v0.34.0 // indirect 43 | golang.org/x/sync v0.10.0 // indirect 44 | golang.org/x/sys v0.29.0 // indirect 45 | google.golang.org/protobuf v1.36.3 // indirect 46 | gopkg.in/yaml.v3 v3.0.1 // indirect 47 | ) 48 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= 2 | github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= 3 | github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= 4 | github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= 5 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 6 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 7 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 8 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 9 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 10 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 12 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= 14 | github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= 15 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= 16 | github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= 17 | github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= 18 | github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 19 | github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= 20 | github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= 21 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 22 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 23 | github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= 24 | github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 25 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 26 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 27 | github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= 28 | github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= 29 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 30 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 31 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 32 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 33 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 34 | github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 35 | github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= 36 | github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 37 | github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= 38 | github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= 39 | github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= 40 | github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= 41 | github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= 42 | github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= 43 | github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= 44 | github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= 45 | github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= 46 | github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= 47 | github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= 48 | github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= 49 | github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= 50 | github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= 51 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 52 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 53 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 54 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 55 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 56 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 57 | github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= 58 | github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= 59 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 60 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 61 | github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= 62 | github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 63 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 64 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 65 | github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= 66 | github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= 67 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 68 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 69 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= 70 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= 71 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 72 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 73 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= 74 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 75 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 76 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 77 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 78 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 79 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 80 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 81 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 82 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 83 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 84 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 85 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 86 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 87 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 88 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 89 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 90 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 91 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 92 | golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= 93 | golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= 94 | golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= 95 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 96 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 97 | golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 98 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 99 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 100 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 101 | golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 102 | golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= 103 | golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= 104 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 105 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 106 | golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= 107 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 108 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 109 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 110 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 111 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 112 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 113 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 114 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 115 | golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= 116 | golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 117 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 118 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 119 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 120 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 121 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 122 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 123 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 124 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 125 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 126 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 127 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 128 | google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= 129 | google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 130 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 131 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 132 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 133 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 134 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 135 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 136 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 137 | -------------------------------------------------------------------------------- /package/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ruby 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y git make rpm golang && \ 5 | gem install fpm 6 | 7 | WORKDIR /work 8 | 9 | ENTRYPOINT [ "/bin/bash" ] 10 | -------------------------------------------------------------------------------- /package/goflow.env: -------------------------------------------------------------------------------- 1 | GOFLOW_ARGS= 2 | -------------------------------------------------------------------------------- /package/goflow.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=GoFlow 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | EnvironmentFile=/etc/default/goflow 8 | ExecStart=/usr/bin/goflow $GOFLOW_ARGS 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /pb/flow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package flowprotob; 3 | 4 | option java_package = "com.cloudflare.net.flowagg"; 5 | option java_outer_classname = "FlowMessagePb"; 6 | 7 | message FlowMessage { 8 | 9 | enum FlowType { 10 | FLOWUNKNOWN = 0; 11 | SFLOW_5 = 1; 12 | NETFLOW_V5 = 2; 13 | NETFLOW_V9 = 3; 14 | IPFIX = 4; 15 | } 16 | FlowType Type = 1; 17 | 18 | uint64 TimeReceived = 2; 19 | uint32 SequenceNum = 4; 20 | uint64 SamplingRate = 3; 21 | 22 | uint32 FlowDirection = 42; 23 | 24 | // Sampler information 25 | bytes SamplerAddress = 11; 26 | 27 | // Found inside packet 28 | uint64 TimeFlowStart = 38; 29 | uint64 TimeFlowEnd = 5; 30 | 31 | // Size of the sampled packet 32 | uint64 Bytes = 9; 33 | uint64 Packets = 10; 34 | 35 | // Source/destination addresses 36 | bytes SrcAddr = 6; 37 | bytes DstAddr = 7; 38 | 39 | // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...) 40 | uint32 Etype = 30; 41 | 42 | // Layer 4 protocol 43 | uint32 Proto = 20; 44 | 45 | // Ports for UDP and TCP 46 | uint32 SrcPort = 21; 47 | uint32 DstPort = 22; 48 | 49 | // Interfaces 50 | uint32 InIf = 18; 51 | uint32 OutIf = 19; 52 | 53 | // Ethernet information 54 | uint64 SrcMac = 27; 55 | uint64 DstMac = 28; 56 | 57 | // Vlan 58 | uint32 SrcVlan = 33; 59 | uint32 DstVlan = 34; 60 | // 802.1q VLAN in sampled packet 61 | uint32 VlanId = 29; 62 | 63 | // VRF 64 | uint32 IngressVrfID = 39; 65 | uint32 EgressVrfID = 40; 66 | 67 | // IP and TCP special flags 68 | uint32 IPTos = 23; 69 | uint32 ForwardingStatus = 24; 70 | uint32 IPTTL = 25; 71 | uint32 TCPFlags = 26; 72 | uint32 IcmpType = 31; 73 | uint32 IcmpCode = 32; 74 | uint32 IPv6FlowLabel = 37; 75 | // Fragments (IPv4/IPv6) 76 | uint32 FragmentId = 35; 77 | uint32 FragmentOffset = 36; 78 | uint32 BiFlowDirection = 41; 79 | 80 | // Autonomous system information 81 | uint32 SrcAS = 14; 82 | uint32 DstAS = 15; 83 | 84 | bytes NextHop = 12; 85 | uint32 NextHopAS = 13; 86 | 87 | // Prefix size 88 | uint32 SrcNet = 16; 89 | uint32 DstNet = 17; 90 | 91 | // IP encapsulation information 92 | bool HasEncap = 43; 93 | bytes SrcAddrEncap = 44; 94 | bytes DstAddrEncap = 45; 95 | uint32 ProtoEncap = 46; 96 | uint32 EtypeEncap = 47; 97 | 98 | uint32 IPTosEncap = 48; 99 | uint32 IPTTLEncap = 49; 100 | uint32 IPv6FlowLabelEncap = 50; 101 | uint32 FragmentIdEncap = 51; 102 | uint32 FragmentOffsetEncap = 52; 103 | 104 | // MPLS information 105 | bool HasMPLS = 53; 106 | uint32 MPLSCount = 54; 107 | uint32 MPLS1TTL = 55; // First TTL 108 | uint32 MPLS1Label = 56; // First Label 109 | uint32 MPLS2TTL = 57; // Second TTL 110 | uint32 MPLS2Label = 58; // Second Label 111 | uint32 MPLS3TTL = 59; // Third TTL 112 | uint32 MPLS3Label = 60; // Third Label 113 | uint32 MPLSLastTTL = 61; // Last TTL 114 | uint32 MPLSLastLabel = 62; // Last Label 115 | 116 | // PPP information 117 | bool HasPPP = 63; 118 | uint32 PPPAddressControl = 64; 119 | 120 | // Custom fields: start after ID 1000: 121 | // uint32 MyCustomField = 1000; 122 | 123 | } 124 | -------------------------------------------------------------------------------- /producer/producer_nf.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "errors" 7 | "fmt" 8 | "net" 9 | "sync" 10 | "time" 11 | 12 | "github.com/cloudflare/goflow/v3/decoders/netflow" 13 | flowmessage "github.com/cloudflare/goflow/v3/pb" 14 | ) 15 | 16 | type SamplingRateSystem interface { 17 | GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) 18 | AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) 19 | } 20 | 21 | type basicSamplingRateSystem struct { 22 | sampling map[uint16]map[uint32]uint32 23 | samplinglock *sync.RWMutex 24 | } 25 | 26 | func CreateSamplingSystem() SamplingRateSystem { 27 | ts := &basicSamplingRateSystem{ 28 | sampling: make(map[uint16]map[uint32]uint32), 29 | samplinglock: &sync.RWMutex{}, 30 | } 31 | return ts 32 | } 33 | 34 | func (s *basicSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) { 35 | s.samplinglock.Lock() 36 | _, exists := s.sampling[version] 37 | if !exists { 38 | s.sampling[version] = make(map[uint32]uint32) 39 | } 40 | s.sampling[version][obsDomainId] = samplingRate 41 | s.samplinglock.Unlock() 42 | } 43 | 44 | func (s *basicSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) { 45 | s.samplinglock.RLock() 46 | samplingVersion, okver := s.sampling[version] 47 | if okver { 48 | samplingRate, okid := samplingVersion[obsDomainId] 49 | if okid { 50 | s.samplinglock.RUnlock() 51 | return samplingRate, nil 52 | } 53 | s.samplinglock.RUnlock() 54 | return 0, errors.New("") // TBC 55 | } 56 | s.samplinglock.RUnlock() 57 | return 0, errors.New("") // TBC 58 | } 59 | 60 | type SingleSamplingRateSystem struct { 61 | Sampling uint32 62 | } 63 | 64 | func (s *SingleSamplingRateSystem) AddSamplingRate(version uint16, obsDomainId uint32, samplingRate uint32) { 65 | } 66 | 67 | func (s *SingleSamplingRateSystem) GetSamplingRate(version uint16, obsDomainId uint32) (uint32, error) { 68 | return s.Sampling, nil 69 | } 70 | 71 | func NetFlowLookFor(dataFields []netflow.DataField, typeId uint16) (bool, interface{}) { 72 | for _, dataField := range dataFields { 73 | if dataField.Type == typeId { 74 | return true, dataField.Value 75 | } 76 | } 77 | return false, nil 78 | } 79 | 80 | func NetFlowPopulate(dataFields []netflow.DataField, typeId uint16, addr interface{}) bool { 81 | exists, value := NetFlowLookFor(dataFields, typeId) 82 | if exists && value != nil { 83 | valueBytes, ok := value.([]byte) 84 | valueReader := bytes.NewReader(valueBytes) 85 | if ok { 86 | switch addrt := addr.(type) { 87 | case *(net.IP): 88 | *addrt = valueBytes 89 | case *(time.Time): 90 | t := uint64(0) 91 | binary.Read(valueReader, binary.BigEndian, &t) 92 | t64 := int64(t / 1000) 93 | *addrt = time.Unix(t64, 0) 94 | default: 95 | binary.Read(valueReader, binary.BigEndian, addr) 96 | } 97 | } 98 | } 99 | return exists 100 | } 101 | 102 | func DecodeUNumber(b []byte, out interface{}) error { 103 | var o uint64 104 | l := len(b) 105 | switch l { 106 | case 1: 107 | o = uint64(b[0]) 108 | case 2: 109 | o = uint64(binary.BigEndian.Uint16(b)) 110 | case 4: 111 | o = uint64(binary.BigEndian.Uint32(b)) 112 | case 8: 113 | o = binary.BigEndian.Uint64(b) 114 | default: 115 | if l < 8 { 116 | var iter uint 117 | for i := range b { 118 | o |= uint64(b[i]) << uint(8*(uint(l)-iter-1)) 119 | iter++ 120 | } 121 | } else { 122 | return fmt.Errorf("non-regular number of bytes for a number: %v", l) 123 | } 124 | } 125 | switch t := out.(type) { 126 | case *byte: 127 | *t = byte(o) 128 | case *uint16: 129 | *t = uint16(o) 130 | case *uint32: 131 | *t = uint32(o) 132 | case *uint64: 133 | *t = o 134 | default: 135 | return errors.New("the parameter is not a pointer to a byte/uint16/uint32/uint64 structure") 136 | } 137 | return nil 138 | } 139 | 140 | func ConvertNetFlowDataSet(version uint16, baseTime uint32, uptime uint32, record []netflow.DataField) *flowmessage.FlowMessage { 141 | flowMessage := &flowmessage.FlowMessage{} 142 | var time uint64 143 | 144 | if version == 9 { 145 | flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V9 146 | } else if version == 10 { 147 | flowMessage.Type = flowmessage.FlowMessage_IPFIX 148 | } 149 | 150 | for i := range record { 151 | df := record[i] 152 | 153 | v, ok := df.Value.([]byte) 154 | if !ok { 155 | continue 156 | } 157 | 158 | switch df.Type { 159 | 160 | // Statistics 161 | case netflow.NFV9_FIELD_IN_BYTES: 162 | DecodeUNumber(v, &(flowMessage.Bytes)) 163 | case netflow.NFV9_FIELD_IN_PKTS: 164 | DecodeUNumber(v, &(flowMessage.Packets)) 165 | case netflow.NFV9_FIELD_OUT_BYTES: 166 | DecodeUNumber(v, &(flowMessage.Bytes)) 167 | case netflow.NFV9_FIELD_OUT_PKTS: 168 | DecodeUNumber(v, &(flowMessage.Packets)) 169 | 170 | // L4 171 | case netflow.NFV9_FIELD_L4_SRC_PORT: 172 | DecodeUNumber(v, &(flowMessage.SrcPort)) 173 | case netflow.NFV9_FIELD_L4_DST_PORT: 174 | DecodeUNumber(v, &(flowMessage.DstPort)) 175 | case netflow.NFV9_FIELD_PROTOCOL: 176 | DecodeUNumber(v, &(flowMessage.Proto)) 177 | 178 | // Network 179 | case netflow.NFV9_FIELD_SRC_AS: 180 | DecodeUNumber(v, &(flowMessage.SrcAS)) 181 | case netflow.NFV9_FIELD_DST_AS: 182 | DecodeUNumber(v, &(flowMessage.DstAS)) 183 | 184 | // Interfaces 185 | case netflow.NFV9_FIELD_INPUT_SNMP: 186 | DecodeUNumber(v, &(flowMessage.InIf)) 187 | case netflow.NFV9_FIELD_OUTPUT_SNMP: 188 | DecodeUNumber(v, &(flowMessage.OutIf)) 189 | 190 | case netflow.NFV9_FIELD_FORWARDING_STATUS: 191 | DecodeUNumber(v, &(flowMessage.ForwardingStatus)) 192 | case netflow.NFV9_FIELD_SRC_TOS: 193 | DecodeUNumber(v, &(flowMessage.IPTos)) 194 | case netflow.NFV9_FIELD_TCP_FLAGS: 195 | DecodeUNumber(v, &(flowMessage.TCPFlags)) 196 | case netflow.NFV9_FIELD_MIN_TTL: 197 | DecodeUNumber(v, &(flowMessage.IPTTL)) 198 | 199 | // IP 200 | case netflow.NFV9_FIELD_IPV4_SRC_ADDR: 201 | flowMessage.SrcAddr = v 202 | flowMessage.Etype = 0x800 203 | case netflow.NFV9_FIELD_IPV4_DST_ADDR: 204 | flowMessage.DstAddr = v 205 | flowMessage.Etype = 0x800 206 | 207 | case netflow.NFV9_FIELD_SRC_MASK: 208 | DecodeUNumber(v, &(flowMessage.SrcNet)) 209 | case netflow.NFV9_FIELD_DST_MASK: 210 | DecodeUNumber(v, &(flowMessage.DstNet)) 211 | 212 | case netflow.NFV9_FIELD_IPV6_SRC_ADDR: 213 | flowMessage.SrcAddr = v 214 | flowMessage.Etype = 0x86dd 215 | case netflow.NFV9_FIELD_IPV6_DST_ADDR: 216 | flowMessage.DstAddr = v 217 | flowMessage.Etype = 0x86dd 218 | 219 | case netflow.NFV9_FIELD_IPV6_SRC_MASK: 220 | DecodeUNumber(v, &(flowMessage.SrcNet)) 221 | case netflow.NFV9_FIELD_IPV6_DST_MASK: 222 | DecodeUNumber(v, &(flowMessage.DstNet)) 223 | 224 | case netflow.NFV9_FIELD_IPV4_NEXT_HOP: 225 | flowMessage.NextHop = v 226 | case netflow.NFV9_FIELD_BGP_IPV4_NEXT_HOP: 227 | flowMessage.NextHop = v 228 | 229 | case netflow.NFV9_FIELD_IPV6_NEXT_HOP: 230 | flowMessage.NextHop = v 231 | case netflow.NFV9_FIELD_BGP_IPV6_NEXT_HOP: 232 | flowMessage.NextHop = v 233 | 234 | // ICMP 235 | case netflow.NFV9_FIELD_ICMP_TYPE: 236 | var icmpTypeCode uint16 237 | DecodeUNumber(v, &icmpTypeCode) 238 | flowMessage.IcmpType = uint32(icmpTypeCode >> 8) 239 | flowMessage.IcmpCode = uint32(icmpTypeCode & 0xff) 240 | case netflow.IPFIX_FIELD_icmpTypeCodeIPv6: 241 | var icmpTypeCode uint16 242 | DecodeUNumber(v, &icmpTypeCode) 243 | flowMessage.IcmpType = uint32(icmpTypeCode >> 8) 244 | flowMessage.IcmpCode = uint32(icmpTypeCode & 0xff) 245 | case netflow.IPFIX_FIELD_icmpTypeIPv4: 246 | DecodeUNumber(v, &(flowMessage.IcmpType)) 247 | case netflow.IPFIX_FIELD_icmpTypeIPv6: 248 | DecodeUNumber(v, &(flowMessage.IcmpType)) 249 | case netflow.IPFIX_FIELD_icmpCodeIPv4: 250 | DecodeUNumber(v, &(flowMessage.IcmpCode)) 251 | case netflow.IPFIX_FIELD_icmpCodeIPv6: 252 | DecodeUNumber(v, &(flowMessage.IcmpCode)) 253 | 254 | // Mac 255 | case netflow.NFV9_FIELD_IN_SRC_MAC: 256 | DecodeUNumber(v, &(flowMessage.SrcMac)) 257 | case netflow.NFV9_FIELD_OUT_DST_MAC: 258 | DecodeUNumber(v, &(flowMessage.DstMac)) 259 | 260 | case netflow.NFV9_FIELD_SRC_VLAN: 261 | DecodeUNumber(v, &(flowMessage.VlanId)) 262 | DecodeUNumber(v, &(flowMessage.SrcVlan)) 263 | case netflow.NFV9_FIELD_DST_VLAN: 264 | DecodeUNumber(v, &(flowMessage.DstVlan)) 265 | 266 | case netflow.IPFIX_FIELD_ingressVRFID: 267 | DecodeUNumber(v, &(flowMessage.IngressVrfID)) 268 | case netflow.IPFIX_FIELD_egressVRFID: 269 | DecodeUNumber(v, &(flowMessage.EgressVrfID)) 270 | 271 | case netflow.NFV9_FIELD_IPV4_IDENT: 272 | DecodeUNumber(v, &(flowMessage.FragmentId)) 273 | case netflow.NFV9_FIELD_FRAGMENT_OFFSET: 274 | var fragOffset uint32 275 | DecodeUNumber(v, &fragOffset) 276 | flowMessage.FragmentOffset |= fragOffset 277 | case netflow.IPFIX_FIELD_fragmentFlags: 278 | var ipFlags uint32 279 | DecodeUNumber(v, &ipFlags) 280 | flowMessage.FragmentOffset |= ipFlags 281 | case netflow.NFV9_FIELD_IPV6_FLOW_LABEL: 282 | DecodeUNumber(v, &(flowMessage.IPv6FlowLabel)) 283 | 284 | case netflow.IPFIX_FIELD_biflowDirection: 285 | DecodeUNumber(v, &(flowMessage.BiFlowDirection)) 286 | 287 | case netflow.NFV9_FIELD_DIRECTION: 288 | DecodeUNumber(v, &(flowMessage.FlowDirection)) 289 | 290 | default: 291 | if version == 9 { 292 | // NetFlow v9 time works with a differential based on router's uptime 293 | switch df.Type { 294 | case netflow.NFV9_FIELD_FIRST_SWITCHED: 295 | var timeFirstSwitched uint32 296 | DecodeUNumber(v, &timeFirstSwitched) 297 | timeDiff := (uptime - timeFirstSwitched) / 1000 298 | flowMessage.TimeFlowStart = uint64(baseTime - timeDiff) 299 | case netflow.NFV9_FIELD_LAST_SWITCHED: 300 | var timeLastSwitched uint32 301 | DecodeUNumber(v, &timeLastSwitched) 302 | timeDiff := (uptime - timeLastSwitched) / 1000 303 | flowMessage.TimeFlowEnd = uint64(baseTime - timeDiff) 304 | } 305 | } else if version == 10 { 306 | switch df.Type { 307 | case netflow.IPFIX_FIELD_flowStartSeconds: 308 | DecodeUNumber(v, &time) 309 | flowMessage.TimeFlowStart = time 310 | case netflow.IPFIX_FIELD_flowStartMilliseconds: 311 | DecodeUNumber(v, &time) 312 | flowMessage.TimeFlowStart = time / 1000 313 | case netflow.IPFIX_FIELD_flowStartMicroseconds: 314 | DecodeUNumber(v, &time) 315 | flowMessage.TimeFlowStart = time / 1000000 316 | case netflow.IPFIX_FIELD_flowStartNanoseconds: 317 | DecodeUNumber(v, &time) 318 | flowMessage.TimeFlowStart = time / 1000000000 319 | case netflow.IPFIX_FIELD_flowEndSeconds: 320 | DecodeUNumber(v, &time) 321 | flowMessage.TimeFlowEnd = time 322 | case netflow.IPFIX_FIELD_flowEndMilliseconds: 323 | DecodeUNumber(v, &time) 324 | flowMessage.TimeFlowEnd = time / 1000 325 | case netflow.IPFIX_FIELD_flowEndMicroseconds: 326 | DecodeUNumber(v, &time) 327 | flowMessage.TimeFlowEnd = time / 1000000 328 | case netflow.IPFIX_FIELD_flowEndNanoseconds: 329 | DecodeUNumber(v, &time) 330 | flowMessage.TimeFlowEnd = time / 1000000000 331 | } 332 | } 333 | } 334 | 335 | } 336 | 337 | return flowMessage 338 | } 339 | 340 | func SearchNetFlowDataSetsRecords(version uint16, baseTime uint32, uptime uint32, dataRecords []netflow.DataRecord) []*flowmessage.FlowMessage { 341 | flowMessageSet := make([]*flowmessage.FlowMessage, 0) 342 | for _, record := range dataRecords { 343 | fmsg := ConvertNetFlowDataSet(version, baseTime, uptime, record.Values) 344 | if fmsg != nil { 345 | flowMessageSet = append(flowMessageSet, fmsg) 346 | } 347 | } 348 | return flowMessageSet 349 | } 350 | 351 | func SearchNetFlowDataSets(version uint16, baseTime uint32, uptime uint32, dataFlowSet []netflow.DataFlowSet) []*flowmessage.FlowMessage { 352 | flowMessageSet := make([]*flowmessage.FlowMessage, 0) 353 | for _, dataFlowSetItem := range dataFlowSet { 354 | fmsg := SearchNetFlowDataSetsRecords(version, baseTime, uptime, dataFlowSetItem.Records) 355 | if fmsg != nil { 356 | flowMessageSet = append(flowMessageSet, fmsg...) 357 | } 358 | } 359 | return flowMessageSet 360 | } 361 | 362 | func SearchNetFlowOptionDataSets(dataFlowSet []netflow.OptionsDataFlowSet) (uint32, bool) { 363 | var samplingRate uint32 364 | var found bool 365 | for _, dataFlowSetItem := range dataFlowSet { 366 | for _, record := range dataFlowSetItem.Records { 367 | b := NetFlowPopulate(record.OptionsValues, 305, &samplingRate) 368 | if b { 369 | return samplingRate, b 370 | } 371 | b = NetFlowPopulate(record.OptionsValues, 50, &samplingRate) 372 | if b { 373 | return samplingRate, b 374 | } 375 | b = NetFlowPopulate(record.OptionsValues, 34, &samplingRate) 376 | if b { 377 | return samplingRate, b 378 | } 379 | } 380 | } 381 | return samplingRate, found 382 | } 383 | 384 | func SplitNetFlowSets(packetNFv9 netflow.NFv9Packet) ([]netflow.DataFlowSet, []netflow.TemplateFlowSet, []netflow.NFv9OptionsTemplateFlowSet, []netflow.OptionsDataFlowSet) { 385 | dataFlowSet := make([]netflow.DataFlowSet, 0) 386 | templatesFlowSet := make([]netflow.TemplateFlowSet, 0) 387 | optionsTemplatesFlowSet := make([]netflow.NFv9OptionsTemplateFlowSet, 0) 388 | optionsDataFlowSet := make([]netflow.OptionsDataFlowSet, 0) 389 | for _, flowSet := range packetNFv9.FlowSets { 390 | switch flowSet := flowSet.(type) { 391 | case netflow.TemplateFlowSet: 392 | templatesFlowSet = append(templatesFlowSet, flowSet) 393 | case netflow.NFv9OptionsTemplateFlowSet: 394 | optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, flowSet) 395 | case netflow.DataFlowSet: 396 | dataFlowSet = append(dataFlowSet, flowSet) 397 | case netflow.OptionsDataFlowSet: 398 | optionsDataFlowSet = append(optionsDataFlowSet, flowSet) 399 | } 400 | } 401 | return dataFlowSet, templatesFlowSet, optionsTemplatesFlowSet, optionsDataFlowSet 402 | } 403 | 404 | func SplitIPFIXSets(packetIPFIX netflow.IPFIXPacket) ([]netflow.DataFlowSet, []netflow.TemplateFlowSet, []netflow.IPFIXOptionsTemplateFlowSet, []netflow.OptionsDataFlowSet) { 405 | dataFlowSet := make([]netflow.DataFlowSet, 0) 406 | templatesFlowSet := make([]netflow.TemplateFlowSet, 0) 407 | optionsTemplatesFlowSet := make([]netflow.IPFIXOptionsTemplateFlowSet, 0) 408 | optionsDataFlowSet := make([]netflow.OptionsDataFlowSet, 0) 409 | for _, flowSet := range packetIPFIX.FlowSets { 410 | switch flowSet := flowSet.(type) { 411 | case netflow.TemplateFlowSet: 412 | templatesFlowSet = append(templatesFlowSet, flowSet) 413 | case netflow.IPFIXOptionsTemplateFlowSet: 414 | optionsTemplatesFlowSet = append(optionsTemplatesFlowSet, flowSet) 415 | case netflow.DataFlowSet: 416 | dataFlowSet = append(dataFlowSet, flowSet) 417 | case netflow.OptionsDataFlowSet: 418 | optionsDataFlowSet = append(optionsDataFlowSet, flowSet) 419 | } 420 | } 421 | return dataFlowSet, templatesFlowSet, optionsTemplatesFlowSet, optionsDataFlowSet 422 | } 423 | 424 | // Convert a NetFlow datastructure to a FlowMessage protobuf 425 | // Does not put sampling rate 426 | func ProcessMessageNetFlow(msgDec interface{}, samplingRateSys SamplingRateSystem) ([]*flowmessage.FlowMessage, error) { 427 | seqnum := uint32(0) 428 | var baseTime uint32 429 | var uptime uint32 430 | 431 | flowMessageSet := make([]*flowmessage.FlowMessage, 0) 432 | 433 | switch msgDecConv := msgDec.(type) { 434 | case netflow.NFv9Packet: 435 | dataFlowSet, _, _, optionDataFlowSet := SplitNetFlowSets(msgDecConv) 436 | 437 | seqnum = msgDecConv.SequenceNumber 438 | baseTime = msgDecConv.UnixSeconds 439 | uptime = msgDecConv.SystemUptime 440 | obsDomainId := msgDecConv.SourceId 441 | 442 | flowMessageSet = SearchNetFlowDataSets(9, baseTime, uptime, dataFlowSet) 443 | samplingRate, found := SearchNetFlowOptionDataSets(optionDataFlowSet) 444 | if samplingRateSys != nil { 445 | if found { 446 | samplingRateSys.AddSamplingRate(9, obsDomainId, samplingRate) 447 | } else { 448 | samplingRate, _ = samplingRateSys.GetSamplingRate(9, obsDomainId) 449 | } 450 | } 451 | for _, fmsg := range flowMessageSet { 452 | fmsg.SequenceNum = seqnum 453 | fmsg.SamplingRate = uint64(samplingRate) 454 | } 455 | case netflow.IPFIXPacket: 456 | dataFlowSet, _, _, optionDataFlowSet := SplitIPFIXSets(msgDecConv) 457 | 458 | seqnum = msgDecConv.SequenceNumber 459 | baseTime = msgDecConv.ExportTime 460 | obsDomainId := msgDecConv.ObservationDomainId 461 | 462 | flowMessageSet = SearchNetFlowDataSets(10, baseTime, uptime, dataFlowSet) 463 | 464 | samplingRate, found := SearchNetFlowOptionDataSets(optionDataFlowSet) 465 | if samplingRateSys != nil { 466 | if found { 467 | samplingRateSys.AddSamplingRate(10, obsDomainId, samplingRate) 468 | } else { 469 | samplingRate, _ = samplingRateSys.GetSamplingRate(10, obsDomainId) 470 | } 471 | } 472 | for _, fmsg := range flowMessageSet { 473 | fmsg.SequenceNum = seqnum 474 | fmsg.SamplingRate = uint64(samplingRate) 475 | } 476 | default: 477 | return flowMessageSet, errors.New("bad NetFlow/IPFIX version") 478 | } 479 | 480 | return flowMessageSet, nil 481 | } 482 | -------------------------------------------------------------------------------- /producer/producer_nflegacy.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "net" 7 | 8 | "github.com/cloudflare/goflow/v3/decoders/netflowlegacy" 9 | flowmessage "github.com/cloudflare/goflow/v3/pb" 10 | ) 11 | 12 | func ConvertNetFlowLegacyRecord(baseTime uint32, uptime uint32, record netflowlegacy.RecordsNetFlowV5) *flowmessage.FlowMessage { 13 | flowMessage := &flowmessage.FlowMessage{} 14 | 15 | flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V5 16 | 17 | timeDiffFirst := (uptime - record.First) / 1000 18 | timeDiffLast := (uptime - record.Last) / 1000 19 | flowMessage.TimeFlowStart = uint64(baseTime - timeDiffFirst) 20 | flowMessage.TimeFlowEnd = uint64(baseTime - timeDiffLast) 21 | 22 | v := make(net.IP, 4) 23 | binary.BigEndian.PutUint32(v, record.NextHop) 24 | flowMessage.NextHop = v 25 | v = make(net.IP, 4) 26 | binary.BigEndian.PutUint32(v, record.SrcAddr) 27 | flowMessage.SrcAddr = v 28 | v = make(net.IP, 4) 29 | binary.BigEndian.PutUint32(v, record.DstAddr) 30 | flowMessage.DstAddr = v 31 | 32 | flowMessage.Etype = 0x800 33 | flowMessage.SrcAS = uint32(record.SrcAS) 34 | flowMessage.DstAS = uint32(record.DstAS) 35 | flowMessage.SrcNet = uint32(record.SrcMask) 36 | flowMessage.DstNet = uint32(record.DstMask) 37 | flowMessage.Proto = uint32(record.Proto) 38 | flowMessage.TCPFlags = uint32(record.TCPFlags) 39 | flowMessage.IPTos = uint32(record.Tos) 40 | flowMessage.InIf = uint32(record.Input) 41 | flowMessage.OutIf = uint32(record.Output) 42 | flowMessage.SrcPort = uint32(record.SrcPort) 43 | flowMessage.DstPort = uint32(record.DstPort) 44 | flowMessage.Packets = uint64(record.DPkts) 45 | flowMessage.Bytes = uint64(record.DOctets) 46 | 47 | return flowMessage 48 | } 49 | 50 | func SearchNetFlowLegacyRecords(baseTime uint32, uptime uint32, dataRecords []netflowlegacy.RecordsNetFlowV5) []*flowmessage.FlowMessage { 51 | flowMessageSet := make([]*flowmessage.FlowMessage, 0) 52 | for _, record := range dataRecords { 53 | fmsg := ConvertNetFlowLegacyRecord(baseTime, uptime, record) 54 | if fmsg != nil { 55 | flowMessageSet = append(flowMessageSet, fmsg) 56 | } 57 | } 58 | return flowMessageSet 59 | } 60 | 61 | func ProcessMessageNetFlowLegacy(msgDec interface{}) ([]*flowmessage.FlowMessage, error) { 62 | switch packet := msgDec.(type) { 63 | case netflowlegacy.PacketNetFlowV5: 64 | seqnum := packet.FlowSequence 65 | samplingRate := packet.SamplingInterval 66 | baseTime := packet.UnixSecs 67 | uptime := packet.SysUptime 68 | 69 | flowMessageSet := SearchNetFlowLegacyRecords(baseTime, uptime, packet.Records) 70 | for _, fmsg := range flowMessageSet { 71 | fmsg.SequenceNum = seqnum 72 | fmsg.SamplingRate = uint64(samplingRate) 73 | } 74 | 75 | return flowMessageSet, nil 76 | default: 77 | return []*flowmessage.FlowMessage{}, errors.New("bad NetFlow v5 version") 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /producer/producer_sf.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "fmt" 7 | "net" 8 | 9 | "github.com/cloudflare/goflow/v3/decoders/sflow" 10 | flowmessage "github.com/cloudflare/goflow/v3/pb" 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | const ethernetHeaderSize = 14 15 | 16 | func GetSFlowFlowSamples(packet *sflow.Packet) []interface{} { 17 | flowSamples := make([]interface{}, 0) 18 | for _, sample := range packet.Samples { 19 | switch sample.(type) { 20 | case sflow.FlowSample: 21 | flowSamples = append(flowSamples, sample) 22 | case sflow.ExpandedFlowSample: 23 | flowSamples = append(flowSamples, sample) 24 | } 25 | } 26 | return flowSamples 27 | } 28 | 29 | type SFlowProducerConfig struct { 30 | DecodeGRE bool 31 | } 32 | 33 | func ParseSampledHeader(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader) error { 34 | return ParseSampledHeaderConfig(flowMessage, sampledHeader, nil) 35 | } 36 | 37 | func ParseSampledHeaderConfig(flowMessage *flowmessage.FlowMessage, sampledHeader *sflow.SampledHeader, config *SFlowProducerConfig) error { 38 | var decodeGRE bool 39 | if config != nil { 40 | decodeGRE = config.DecodeGRE 41 | } 42 | 43 | data := (*sampledHeader).HeaderData 44 | switch (*sampledHeader).Protocol { 45 | case 1: // Ethernet 46 | if len(data) < ethernetHeaderSize { 47 | return fmt.Errorf("data shorter than ethernet header (%d<%d bytes)", len(data), ethernetHeaderSize) 48 | } 49 | var hasPPP bool 50 | var pppAddressControl uint16 51 | var hasMPLS bool 52 | var countMpls uint32 53 | var firstLabelMpls uint32 54 | var firstTtlMpls uint8 55 | var secondLabelMpls uint32 56 | var secondTtlMpls uint8 57 | var thirdLabelMpls uint32 58 | var thirdTtlMpls uint8 59 | var lastLabelMpls uint32 60 | var lastTtlMpls uint8 61 | 62 | var hasEncap bool 63 | var nextHeader byte 64 | var nextHeaderEncap byte 65 | var tcpflags byte 66 | srcIP := net.IP{} 67 | dstIP := net.IP{} 68 | srcIPEncap := net.IP{} 69 | dstIPEncap := net.IP{} 70 | offset := ethernetHeaderSize 71 | 72 | var srcMac uint64 73 | var dstMac uint64 74 | 75 | var tos byte 76 | var ttl byte 77 | var identification uint16 78 | var fragOffset uint16 79 | var flowLabel uint32 80 | 81 | var tosEncap byte 82 | var ttlEncap byte 83 | var identificationEncap uint16 84 | var fragOffsetEncap uint16 85 | var flowLabelEncap uint32 86 | 87 | var srcPort uint16 88 | var dstPort uint16 89 | 90 | etherType := data[12:14] 91 | etherTypeEncap := []byte{0, 0} 92 | 93 | dstMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[0:6]...)) 94 | srcMac = binary.BigEndian.Uint64(append([]byte{0, 0}, data[6:12]...)) 95 | (*flowMessage).SrcMac = srcMac 96 | (*flowMessage).DstMac = dstMac 97 | 98 | encap := true 99 | iterations := 0 100 | for encap && iterations <= 1 { 101 | encap = false 102 | 103 | if etherType[0] == 0x81 && etherType[1] == 0x0 { // VLAN 802.1Q 104 | (*flowMessage).VlanId = uint32(binary.BigEndian.Uint16(data[14:16])) 105 | offset += 4 106 | etherType = data[16:18] 107 | } 108 | 109 | if etherType[0] == 0x88 && etherType[1] == 0x47 { // MPLS 110 | iterateMpls := true 111 | hasMPLS = true 112 | for iterateMpls { 113 | if len(data) < offset+5 { 114 | iterateMpls = false 115 | break 116 | } 117 | label := binary.BigEndian.Uint32(append([]byte{0}, data[offset:offset+3]...)) >> 4 118 | //exp := data[offset+2] > 1 119 | bottom := data[offset+2] & 1 120 | mplsTtl := data[offset+3] 121 | offset += 4 122 | 123 | if bottom == 1 || label <= 15 || offset > len(data) { 124 | if data[offset]&0xf0>>4 == 4 { 125 | etherType = []byte{0x8, 0x0} 126 | } else if data[offset]&0xf0>>4 == 6 { 127 | etherType = []byte{0x86, 0xdd} 128 | } 129 | iterateMpls = false 130 | } 131 | 132 | if countMpls == 0 { 133 | firstLabelMpls = label 134 | firstTtlMpls = mplsTtl 135 | } else if countMpls == 1 { 136 | secondLabelMpls = label 137 | secondTtlMpls = mplsTtl 138 | } else if countMpls == 2 { 139 | thirdLabelMpls = label 140 | thirdTtlMpls = mplsTtl 141 | } else { 142 | lastLabelMpls = label 143 | lastTtlMpls = mplsTtl 144 | } 145 | countMpls++ 146 | } 147 | } 148 | 149 | if etherType[0] == 0x8 && etherType[1] == 0x0 { // IPv4 150 | if len(data) >= offset+20 { 151 | nextHeader = data[offset+9] 152 | srcIP = data[offset+12 : offset+16] 153 | dstIP = data[offset+16 : offset+20] 154 | tos = data[offset+1] 155 | ttl = data[offset+8] 156 | 157 | identification = binary.BigEndian.Uint16(data[offset+4 : offset+6]) 158 | fragOffset = binary.BigEndian.Uint16(data[offset+6 : offset+8]) 159 | 160 | offset += 20 161 | } 162 | } else if etherType[0] == 0x86 && etherType[1] == 0xdd { // IPv6 163 | if len(data) >= offset+40 { 164 | nextHeader = data[offset+6] 165 | srcIP = data[offset+8 : offset+24] 166 | dstIP = data[offset+24 : offset+40] 167 | 168 | tostmp := uint32(binary.BigEndian.Uint16(data[offset : offset+2])) 169 | tos = uint8(tostmp & 0x0ff0 >> 4) 170 | ttl = data[offset+7] 171 | 172 | flowLabel = binary.BigEndian.Uint32(data[offset : offset+4]) 173 | 174 | offset += 40 175 | 176 | } 177 | } else if etherType[0] == 0x8 && etherType[1] == 0x6 { // ARP 178 | } /*else { 179 | return errors.New(fmt.Sprintf("Unknown EtherType: %v\n", etherType)) 180 | } */ 181 | 182 | if len(data) >= offset+4 && (nextHeader == 17 || nextHeader == 6) { 183 | srcPort = binary.BigEndian.Uint16(data[offset+0 : offset+2]) 184 | dstPort = binary.BigEndian.Uint16(data[offset+2 : offset+4]) 185 | } 186 | 187 | if len(data) > offset+13 && nextHeader == 6 { 188 | tcpflags = data[offset+13] 189 | } 190 | 191 | // ICMP and ICMPv6 192 | if len(data) >= offset+2 && (nextHeader == 1 || nextHeader == 58) { 193 | (*flowMessage).IcmpType = uint32(data[offset+0]) 194 | (*flowMessage).IcmpCode = uint32(data[offset+1]) 195 | } 196 | 197 | // GRE 198 | if len(data) >= offset+4 && nextHeader == 47 { 199 | etherTypeEncap = data[offset+2 : offset+4] 200 | offset += 4 201 | if (etherTypeEncap[0] == 0x8 && etherTypeEncap[1] == 0x0) || 202 | (etherTypeEncap[0] == 0x86 && etherTypeEncap[1] == 0xdd) { 203 | encap = true 204 | hasEncap = true 205 | } 206 | if etherTypeEncap[0] == 0x88 && etherTypeEncap[1] == 0x0b && len(data) >= offset+12 { 207 | offset += 8 208 | encap = true 209 | hasPPP = true 210 | pppAddressControl = binary.BigEndian.Uint16(data[offset : offset+2]) 211 | pppEtherType := data[offset+2 : offset+4] 212 | if pppEtherType[0] == 0x0 && pppEtherType[1] == 0x21 { 213 | etherTypeEncap = []byte{0x8, 0x00} 214 | hasEncap = true 215 | } else if pppEtherType[0] == 0x0 && pppEtherType[1] == 0x57 { 216 | etherTypeEncap = []byte{0x86, 0xdd} 217 | hasEncap = true 218 | } 219 | offset += 4 220 | 221 | } 222 | 223 | if hasEncap { 224 | srcIPEncap = srcIP 225 | dstIPEncap = dstIP 226 | 227 | nextHeaderEncap = nextHeader 228 | tosEncap = tos 229 | ttlEncap = ttl 230 | identificationEncap = identification 231 | fragOffsetEncap = fragOffset 232 | flowLabelEncap = flowLabel 233 | 234 | etherTypeEncapTmp := etherTypeEncap 235 | etherTypeEncap = etherType 236 | etherType = etherTypeEncapTmp 237 | } 238 | 239 | } 240 | iterations++ 241 | } 242 | 243 | if !decodeGRE && hasEncap { 244 | //fmt.Printf("DEOCDE %v -> %v || %v -> %v\n", net.IP(srcIPEncap), net.IP(dstIPEncap), net.IP(srcIP), net.IP(dstIP)) 245 | tmpSrc := srcIPEncap 246 | tmpDst := dstIPEncap 247 | srcIPEncap = srcIP 248 | dstIPEncap = dstIP 249 | srcIP = tmpSrc 250 | dstIP = tmpDst 251 | 252 | tmpNextHeader := nextHeaderEncap 253 | nextHeaderEncap = nextHeader 254 | nextHeader = tmpNextHeader 255 | 256 | tosTmp := tosEncap 257 | tosEncap = tos 258 | tos = tosTmp 259 | 260 | ttlTmp := ttlEncap 261 | ttlEncap = ttl 262 | ttl = ttlTmp 263 | 264 | identificationTmp := identificationEncap 265 | identificationEncap = identification 266 | identification = identificationTmp 267 | 268 | fragOffsetTmp := fragOffsetEncap 269 | fragOffsetEncap = fragOffset 270 | fragOffset = fragOffsetTmp 271 | 272 | flowLabelTmp := flowLabelEncap 273 | flowLabelEncap = flowLabel 274 | flowLabel = flowLabelTmp 275 | } 276 | 277 | (*flowMessage).HasPPP = hasPPP 278 | (*flowMessage).PPPAddressControl = uint32(pppAddressControl) 279 | 280 | (*flowMessage).HasMPLS = hasMPLS 281 | (*flowMessage).MPLSCount = countMpls 282 | (*flowMessage).MPLS1Label = firstLabelMpls 283 | (*flowMessage).MPLS1TTL = uint32(firstTtlMpls) 284 | (*flowMessage).MPLS2Label = secondLabelMpls 285 | (*flowMessage).MPLS2TTL = uint32(secondTtlMpls) 286 | (*flowMessage).MPLS3Label = thirdLabelMpls 287 | (*flowMessage).MPLS3TTL = uint32(thirdTtlMpls) 288 | (*flowMessage).MPLSLastLabel = lastLabelMpls 289 | (*flowMessage).MPLSLastTTL = uint32(lastTtlMpls) 290 | 291 | (*flowMessage).HasEncap = hasEncap 292 | (*flowMessage).ProtoEncap = uint32(nextHeaderEncap) 293 | (*flowMessage).SrcAddrEncap = srcIPEncap 294 | (*flowMessage).DstAddrEncap = dstIPEncap 295 | (*flowMessage).EtypeEncap = uint32(binary.BigEndian.Uint16(etherTypeEncap[0:2])) 296 | 297 | (*flowMessage).IPTosEncap = uint32(tosEncap) 298 | (*flowMessage).IPTTLEncap = uint32(ttlEncap) 299 | (*flowMessage).FragmentIdEncap = uint32(identificationEncap) 300 | (*flowMessage).FragmentOffsetEncap = uint32(fragOffsetEncap) 301 | (*flowMessage).IPv6FlowLabelEncap = flowLabelEncap & 0xFFFFF 302 | 303 | (*flowMessage).Etype = uint32(binary.BigEndian.Uint16(etherType[0:2])) 304 | (*flowMessage).IPv6FlowLabel = flowLabel & 0xFFFFF 305 | 306 | (*flowMessage).SrcPort = uint32(srcPort) 307 | (*flowMessage).DstPort = uint32(dstPort) 308 | 309 | (*flowMessage).SrcAddr = srcIP 310 | (*flowMessage).DstAddr = dstIP 311 | (*flowMessage).Proto = uint32(nextHeader) 312 | (*flowMessage).IPTos = uint32(tos) 313 | (*flowMessage).IPTTL = uint32(ttl) 314 | (*flowMessage).TCPFlags = uint32(tcpflags) 315 | 316 | (*flowMessage).FragmentId = uint32(identification) 317 | (*flowMessage).FragmentOffset = uint32(fragOffset) 318 | } 319 | return nil 320 | } 321 | 322 | func SearchSFlowSamplesConfig(samples []interface{}, config *SFlowProducerConfig, agent net.IP) []*flowmessage.FlowMessage { 323 | flowMessageSet := make([]*flowmessage.FlowMessage, 0) 324 | 325 | for _, flowSample := range samples { 326 | var records []sflow.FlowRecord 327 | 328 | flowMessage := &flowmessage.FlowMessage{} 329 | flowMessage.Type = flowmessage.FlowMessage_SFLOW_5 330 | 331 | switch flowSample := flowSample.(type) { 332 | case sflow.FlowSample: 333 | records = flowSample.Records 334 | flowMessage.SamplingRate = uint64(flowSample.SamplingRate) 335 | flowMessage.InIf = flowSample.Input 336 | flowMessage.OutIf = flowSample.Output 337 | case sflow.ExpandedFlowSample: 338 | records = flowSample.Records 339 | flowMessage.SamplingRate = uint64(flowSample.SamplingRate) 340 | flowMessage.InIf = flowSample.InputIfValue 341 | flowMessage.OutIf = flowSample.OutputIfValue 342 | } 343 | 344 | var ipNh net.IP 345 | var ipSrc net.IP 346 | var ipDst net.IP 347 | 348 | flowMessage.Packets = 1 349 | for _, record := range records { 350 | switch recordData := record.Data.(type) { 351 | case sflow.SampledHeader: 352 | flowMessage.Bytes = uint64(recordData.FrameLength) 353 | err := ParseSampledHeaderConfig(flowMessage, &recordData, config) 354 | if err != nil { 355 | log.Errorf("ParseSampledHeaderConfig failed for %s: %v", agent, err) 356 | } 357 | case sflow.SampledIPv4: 358 | ipSrc = recordData.Base.SrcIP 359 | ipDst = recordData.Base.DstIP 360 | flowMessage.SrcAddr = ipSrc 361 | flowMessage.DstAddr = ipDst 362 | flowMessage.Bytes = uint64(recordData.Base.Length) 363 | flowMessage.Proto = recordData.Base.Protocol 364 | flowMessage.SrcPort = recordData.Base.SrcPort 365 | flowMessage.DstPort = recordData.Base.DstPort 366 | flowMessage.IPTos = recordData.Tos 367 | flowMessage.Etype = 0x800 368 | case sflow.SampledIPv6: 369 | ipSrc = recordData.Base.SrcIP 370 | ipDst = recordData.Base.DstIP 371 | flowMessage.SrcAddr = ipSrc 372 | flowMessage.DstAddr = ipDst 373 | flowMessage.Bytes = uint64(recordData.Base.Length) 374 | flowMessage.Proto = recordData.Base.Protocol 375 | flowMessage.SrcPort = recordData.Base.SrcPort 376 | flowMessage.DstPort = recordData.Base.DstPort 377 | flowMessage.IPTos = recordData.Priority 378 | flowMessage.Etype = 0x86dd 379 | case sflow.ExtendedRouter: 380 | ipNh = recordData.NextHop 381 | flowMessage.NextHop = ipNh 382 | flowMessage.SrcNet = recordData.SrcMaskLen 383 | flowMessage.DstNet = recordData.DstMaskLen 384 | case sflow.ExtendedGateway: 385 | ipNh = recordData.NextHop 386 | flowMessage.NextHop = ipNh 387 | flowMessage.SrcAS = recordData.SrcAS 388 | if len(recordData.ASPath) > 0 { 389 | flowMessage.DstAS = recordData.ASPath[len(recordData.ASPath)-1] 390 | flowMessage.NextHopAS = recordData.ASPath[0] 391 | flowMessage.SrcAS = recordData.AS 392 | } else { 393 | flowMessage.DstAS = recordData.AS 394 | } 395 | case sflow.ExtendedSwitch: 396 | flowMessage.SrcVlan = recordData.SrcVlan 397 | flowMessage.DstVlan = recordData.DstVlan 398 | } 399 | } 400 | flowMessageSet = append(flowMessageSet, flowMessage) 401 | } 402 | return flowMessageSet 403 | } 404 | 405 | func ProcessMessageSFlow(msgDec interface{}) ([]*flowmessage.FlowMessage, error) { 406 | return ProcessMessageSFlowConfig(msgDec, nil) 407 | } 408 | 409 | func ProcessMessageSFlowConfig(msgDec interface{}, config *SFlowProducerConfig) ([]*flowmessage.FlowMessage, error) { 410 | switch packet := msgDec.(type) { 411 | case sflow.Packet: 412 | seqnum := packet.SequenceNumber 413 | var agent net.IP 414 | agent = packet.AgentIP 415 | 416 | flowSamples := GetSFlowFlowSamples(&packet) 417 | flowMessageSet := SearchSFlowSamplesConfig(flowSamples, config, agent) 418 | for _, fmsg := range flowMessageSet { 419 | fmsg.SamplerAddress = agent 420 | fmsg.SequenceNum = seqnum 421 | } 422 | 423 | return flowMessageSet, nil 424 | default: 425 | return []*flowmessage.FlowMessage{}, errors.New("bad sFlow version") 426 | } 427 | } 428 | -------------------------------------------------------------------------------- /producer/producer_test.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/cloudflare/goflow/v3/decoders/netflow" 7 | "github.com/cloudflare/goflow/v3/decoders/sflow" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestProcessMessageNetFlow(t *testing.T) { 12 | records := []netflow.DataRecord{ 13 | { 14 | Values: []netflow.DataField{ 15 | { 16 | Type: netflow.NFV9_FIELD_IPV4_SRC_ADDR, 17 | Value: []byte{10, 0, 0, 1}, 18 | }, 19 | }, 20 | }, 21 | } 22 | dfs := []interface{}{ 23 | netflow.DataFlowSet{ 24 | Records: records, 25 | }, 26 | } 27 | 28 | pktnf9 := netflow.NFv9Packet{ 29 | FlowSets: dfs, 30 | } 31 | testsr := &SingleSamplingRateSystem{1} 32 | _, err := ProcessMessageNetFlow(pktnf9, testsr) 33 | assert.Nil(t, err) 34 | 35 | pktipfix := netflow.IPFIXPacket{ 36 | FlowSets: dfs, 37 | } 38 | _, err = ProcessMessageNetFlow(pktipfix, testsr) 39 | assert.Nil(t, err) 40 | } 41 | 42 | func TestProcessMessageSFlow(t *testing.T) { 43 | sh := sflow.SampledHeader{ 44 | FrameLength: 10, 45 | Protocol: 1, 46 | HeaderData: []byte{ 47 | 0xff, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xff, 0xab, 0xcd, 0xef, 0xab, 0xbc, 0x86, 0xdd, 0x60, 0x2e, 48 | 0xc4, 0xec, 0x01, 0xcc, 0x06, 0x40, 0xfd, 0x01, 0x00, 0x00, 0xff, 0x01, 0x82, 0x10, 0xcd, 0xff, 49 | 0xff, 0x1c, 0x00, 0x00, 0x01, 0x50, 0xfd, 0x01, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x02, 0xff, 50 | 0xff, 0x93, 0x00, 0x00, 0x02, 0x46, 0xcf, 0xca, 0x00, 0x50, 0x05, 0x15, 0x21, 0x6f, 0xa4, 0x9c, 51 | 0xf4, 0x59, 0x80, 0x18, 0x08, 0x09, 0x8c, 0x86, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x2a, 0x85, 52 | 0xee, 0x9e, 0x64, 0x5c, 0x27, 0x28, 53 | }, 54 | } 55 | pkt := sflow.Packet{ 56 | Version: 5, 57 | Samples: []interface{}{ 58 | sflow.FlowSample{ 59 | SamplingRate: 1, 60 | Records: []sflow.FlowRecord{ 61 | { 62 | Data: sh, 63 | }, 64 | }, 65 | }, 66 | sflow.ExpandedFlowSample{ 67 | SamplingRate: 1, 68 | Records: []sflow.FlowRecord{ 69 | { 70 | Data: sh, 71 | }, 72 | }, 73 | }, 74 | }, 75 | } 76 | _, err := ProcessMessageSFlow(pkt) 77 | assert.Nil(t, err) 78 | } 79 | -------------------------------------------------------------------------------- /transport/kafka.go: -------------------------------------------------------------------------------- 1 | package transport 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "errors" 7 | "flag" 8 | "fmt" 9 | "os" 10 | "reflect" 11 | "strings" 12 | 13 | sarama "github.com/Shopify/sarama" 14 | flowmessage "github.com/cloudflare/goflow/v3/pb" 15 | "github.com/cloudflare/goflow/v3/utils" 16 | proto "github.com/golang/protobuf/proto" 17 | ) 18 | 19 | var ( 20 | KafkaTLS *bool 21 | KafkaSASL *bool 22 | KafkaTopic *string 23 | KafkaSrv *string 24 | KafkaBrk *string 25 | 26 | KafkaLogErrors *bool 27 | 28 | KafkaHashing *bool 29 | KafkaKeying *string 30 | KafkaVersion *string 31 | 32 | kafkaConfigVersion sarama.KafkaVersion = sarama.V0_11_0_0 33 | ) 34 | 35 | type KafkaState struct { 36 | FixedLengthProto bool 37 | producer sarama.AsyncProducer 38 | topic string 39 | hashing bool 40 | keying []string 41 | } 42 | 43 | // SetKafkaVersion sets the KafkaVersion that is used to set the log message format version 44 | func SetKafkaVersion(version sarama.KafkaVersion) { 45 | kafkaConfigVersion = version 46 | } 47 | 48 | // ParseKafkaVersion is a pass through to sarama.ParseKafkaVersion to get a KafkaVersion struct by a string version that can be passed into SetKafkaVersion 49 | // This function is here so that calling code need not import sarama to set KafkaVersion 50 | func ParseKafkaVersion(versionString string) (sarama.KafkaVersion, error) { 51 | return sarama.ParseKafkaVersion(versionString) 52 | } 53 | 54 | func RegisterFlags() { 55 | KafkaTLS = flag.Bool("kafka.tls", false, "Use TLS to connect to Kafka") 56 | KafkaSASL = flag.Bool("kafka.sasl", false, "Use SASL/PLAIN data to connect to Kafka (TLS is recommended and the environment variables KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set)") 57 | KafkaTopic = flag.String("kafka.topic", "flow-messages", "Kafka topic to produce to") 58 | KafkaSrv = flag.String("kafka.srv", "", "SRV record containing a list of Kafka brokers (or use kafka.out.brokers)") 59 | KafkaBrk = flag.String("kafka.brokers", "127.0.0.1:9092,[::1]:9092", "Kafka brokers list separated by commas") 60 | 61 | KafkaLogErrors = flag.Bool("kafka.log.err", false, "Log Kafka errors") 62 | 63 | KafkaHashing = flag.Bool("kafka.hashing", false, "Enable partitioning by hash instead of random") 64 | KafkaKeying = flag.String("kafka.key", "SamplerAddress,DstAS", "Kafka list of fields to do hashing on (partition) separated by commas") 65 | KafkaVersion = flag.String("kafka.version", "0.11.0.0", "Log message version (must be a version that parses per sarama.ParseKafkaVersion)") 66 | } 67 | 68 | func StartKafkaProducerFromArgs(log utils.Logger) (*KafkaState, error) { 69 | kVersion, err := ParseKafkaVersion(*KafkaVersion) 70 | if err != nil { 71 | return nil, err 72 | } 73 | SetKafkaVersion(kVersion) 74 | addrs := make([]string, 0) 75 | if *KafkaSrv != "" { 76 | addrs, _ = utils.GetServiceAddresses(*KafkaSrv) 77 | } else { 78 | addrs = strings.Split(*KafkaBrk, ",") 79 | } 80 | return StartKafkaProducer(addrs, *KafkaTopic, *KafkaHashing, *KafkaKeying, *KafkaTLS, *KafkaSASL, *KafkaLogErrors, log) 81 | } 82 | 83 | func StartKafkaProducer(addrs []string, topic string, hashing bool, keying string, useTls bool, useSasl bool, logErrors bool, log utils.Logger) (*KafkaState, error) { 84 | kafkaConfig := sarama.NewConfig() 85 | kafkaConfig.Version = kafkaConfigVersion 86 | kafkaConfig.Producer.Return.Successes = false 87 | kafkaConfig.Producer.Return.Errors = logErrors 88 | if useTls { 89 | rootCAs, err := x509.SystemCertPool() 90 | if err != nil { 91 | return nil, errors.New(fmt.Sprintf("Error initializing TLS: %v", err)) 92 | } 93 | kafkaConfig.Net.TLS.Enable = true 94 | kafkaConfig.Net.TLS.Config = &tls.Config{RootCAs: rootCAs} 95 | } 96 | 97 | var keyingSplit []string 98 | if hashing { 99 | kafkaConfig.Producer.Partitioner = sarama.NewHashPartitioner 100 | keyingSplit = strings.Split(keying, ",") 101 | } 102 | 103 | if useSasl { 104 | if !useTls && log != nil { 105 | log.Warn("Using SASL without TLS will transmit the authentication in plaintext!") 106 | } 107 | kafkaConfig.Net.SASL.Enable = true 108 | kafkaConfig.Net.SASL.User = os.Getenv("KAFKA_SASL_USER") 109 | kafkaConfig.Net.SASL.Password = os.Getenv("KAFKA_SASL_PASS") 110 | if kafkaConfig.Net.SASL.User == "" && kafkaConfig.Net.SASL.Password == "" { 111 | return nil, errors.New("Kafka SASL config from environment was unsuccessful. KAFKA_SASL_USER and KAFKA_SASL_PASS need to be set.") 112 | } else if log != nil { 113 | log.Infof("Authenticating as user '%s'...", kafkaConfig.Net.SASL.User) 114 | } 115 | } 116 | 117 | kafkaProducer, err := sarama.NewAsyncProducer(addrs, kafkaConfig) 118 | if err != nil { 119 | return nil, err 120 | } 121 | state := KafkaState{ 122 | producer: kafkaProducer, 123 | topic: topic, 124 | hashing: hashing, 125 | keying: keyingSplit, 126 | } 127 | 128 | if logErrors { 129 | go func() { 130 | for { 131 | select { 132 | case msg := <-kafkaProducer.Errors(): 133 | if log != nil { 134 | log.Error(msg) 135 | } 136 | } 137 | } 138 | }() 139 | } 140 | 141 | return &state, nil 142 | } 143 | 144 | func HashProto(fields []string, flowMessage *flowmessage.FlowMessage) string { 145 | var keyStr string 146 | 147 | if flowMessage != nil { 148 | vfm := reflect.ValueOf(flowMessage) 149 | vfm = reflect.Indirect(vfm) 150 | 151 | for _, kf := range fields { 152 | fieldValue := vfm.FieldByName(kf) 153 | if fieldValue.IsValid() { 154 | keyStr += fmt.Sprintf("%v-", fieldValue) 155 | } 156 | } 157 | } 158 | 159 | return keyStr 160 | } 161 | 162 | func (s KafkaState) SendKafkaFlowMessage(flowMessage *flowmessage.FlowMessage) { 163 | var key sarama.Encoder 164 | if s.hashing { 165 | keyStr := HashProto(s.keying, flowMessage) 166 | key = sarama.StringEncoder(keyStr) 167 | } 168 | var b []byte 169 | if !s.FixedLengthProto { 170 | b, _ = proto.Marshal(flowMessage) 171 | } else { 172 | buf := proto.NewBuffer([]byte{}) 173 | buf.EncodeMessage(flowMessage) 174 | b = buf.Bytes() 175 | } 176 | s.producer.Input() <- &sarama.ProducerMessage{ 177 | Topic: s.topic, 178 | Key: key, 179 | Value: sarama.ByteEncoder(b), 180 | } 181 | } 182 | 183 | func (s KafkaState) Publish(msgs []*flowmessage.FlowMessage) { 184 | for _, msg := range msgs { 185 | s.SendKafkaFlowMessage(msg) 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /transport/transport_test.go: -------------------------------------------------------------------------------- 1 | package transport 2 | 3 | import ( 4 | "testing" 5 | 6 | flowmessage "github.com/cloudflare/goflow/v3/pb" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestHash(t *testing.T) { 11 | msg := &flowmessage.FlowMessage{ 12 | SamplerAddress: []byte{10, 0, 0, 1}, 13 | } 14 | key := HashProto([]string{"SamplerAddress", "InvalidField"}, msg) 15 | assert.Equal(t, "[10 0 0 1]-", key, "The two keys should be the same.") 16 | } 17 | -------------------------------------------------------------------------------- /utils/metrics.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "strconv" 5 | "time" 6 | 7 | "github.com/prometheus/client_golang/prometheus" 8 | ) 9 | 10 | var ( 11 | MetricTrafficBytes = prometheus.NewCounterVec( 12 | prometheus.CounterOpts{ 13 | Name: "flow_traffic_bytes", 14 | Help: "Bytes received by the application.", 15 | }, 16 | []string{"remote_ip", "remote_port", "local_ip", "local_port", "type"}, 17 | ) 18 | MetricTrafficPackets = prometheus.NewCounterVec( 19 | prometheus.CounterOpts{ 20 | Name: "flow_traffic_packets", 21 | Help: "Packets received by the application.", 22 | }, 23 | []string{"remote_ip", "remote_port", "local_ip", "local_port", "type"}, 24 | ) 25 | MetricPacketSizeSum = prometheus.NewSummaryVec( 26 | prometheus.SummaryOpts{ 27 | Name: "flow_traffic_summary_size_bytes", 28 | Help: "Summary of packet size.", 29 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 30 | }, 31 | []string{"remote_ip", "remote_port", "local_ip", "local_port", "type"}, 32 | ) 33 | DecoderStats = prometheus.NewCounterVec( 34 | prometheus.CounterOpts{ 35 | Name: "flow_decoder_count", 36 | Help: "Decoder processed count.", 37 | }, 38 | []string{"worker", "name"}, 39 | ) 40 | DecoderErrors = prometheus.NewCounterVec( 41 | prometheus.CounterOpts{ 42 | Name: "flow_decoder_error_count", 43 | Help: "Decoder processed error count.", 44 | }, 45 | []string{"worker", "name"}, 46 | ) 47 | DecoderTime = prometheus.NewSummaryVec( 48 | prometheus.SummaryOpts{ 49 | Name: "flow_summary_decoding_time_us", 50 | Help: "Decoding time summary.", 51 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 52 | }, 53 | []string{"name"}, 54 | ) 55 | DecoderProcessTime = prometheus.NewSummaryVec( 56 | prometheus.SummaryOpts{ 57 | Name: "flow_summary_processing_time_us", 58 | Help: "Processing time summary.", 59 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 60 | }, 61 | []string{"name"}, 62 | ) 63 | NetFlowStats = prometheus.NewCounterVec( 64 | prometheus.CounterOpts{ 65 | Name: "flow_process_nf_count", 66 | Help: "NetFlows processed.", 67 | }, 68 | []string{"router", "version"}, 69 | ) 70 | NetFlowErrors = prometheus.NewCounterVec( 71 | prometheus.CounterOpts{ 72 | Name: "flow_process_nf_errors_count", 73 | Help: "NetFlows processed errors.", 74 | }, 75 | []string{"router", "error"}, 76 | ) 77 | NetFlowSetRecordsStatsSum = prometheus.NewCounterVec( 78 | prometheus.CounterOpts{ 79 | Name: "flow_process_nf_flowset_records_sum", 80 | Help: "NetFlows FlowSets sum of records.", 81 | }, 82 | []string{"router", "version", "type"}, // data-template, data, opts... 83 | ) 84 | NetFlowSetStatsSum = prometheus.NewCounterVec( 85 | prometheus.CounterOpts{ 86 | Name: "flow_process_nf_flowset_sum", 87 | Help: "NetFlows FlowSets sum.", 88 | }, 89 | []string{"router", "version", "type"}, // data-template, data, opts... 90 | ) 91 | NetFlowTimeStatsSum = prometheus.NewSummaryVec( 92 | prometheus.SummaryOpts{ 93 | Name: "flow_process_nf_delay_summary_seconds", 94 | Help: "NetFlows time difference between time of flow and processing.", 95 | Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 96 | }, 97 | []string{"router", "version"}, 98 | ) 99 | NetFlowTemplatesStats = prometheus.NewCounterVec( 100 | prometheus.CounterOpts{ 101 | Name: "flow_process_nf_templates_count", 102 | Help: "NetFlows Template count.", 103 | }, 104 | []string{"router", "version", "obs_domain_id", "template_id", "type"}, // options/template 105 | ) 106 | SFlowStats = prometheus.NewCounterVec( 107 | prometheus.CounterOpts{ 108 | Name: "flow_process_sf_count", 109 | Help: "sFlows processed.", 110 | }, 111 | []string{"router", "agent", "version"}, 112 | ) 113 | SFlowErrors = prometheus.NewCounterVec( 114 | prometheus.CounterOpts{ 115 | Name: "flow_process_sf_errors_count", 116 | Help: "sFlows processed errors.", 117 | }, 118 | []string{"router", "error"}, 119 | ) 120 | SFlowSampleStatsSum = prometheus.NewCounterVec( 121 | prometheus.CounterOpts{ 122 | Name: "flow_process_sf_samples_sum", 123 | Help: "SFlows samples sum.", 124 | }, 125 | []string{"router", "agent", "version", "type"}, // counter, flow, expanded... 126 | ) 127 | SFlowSampleRecordsStatsSum = prometheus.NewCounterVec( 128 | prometheus.CounterOpts{ 129 | Name: "flow_process_sf_samples_records_sum", 130 | Help: "SFlows samples sum of records.", 131 | }, 132 | []string{"router", "agent", "version", "type"}, // data-template, data, opts... 133 | ) 134 | ) 135 | 136 | func init() { 137 | prometheus.MustRegister(MetricTrafficBytes) 138 | prometheus.MustRegister(MetricTrafficPackets) 139 | prometheus.MustRegister(MetricPacketSizeSum) 140 | 141 | prometheus.MustRegister(DecoderStats) 142 | prometheus.MustRegister(DecoderErrors) 143 | prometheus.MustRegister(DecoderTime) 144 | prometheus.MustRegister(DecoderProcessTime) 145 | 146 | prometheus.MustRegister(NetFlowStats) 147 | prometheus.MustRegister(NetFlowErrors) 148 | prometheus.MustRegister(NetFlowSetRecordsStatsSum) 149 | prometheus.MustRegister(NetFlowSetStatsSum) 150 | prometheus.MustRegister(NetFlowTimeStatsSum) 151 | prometheus.MustRegister(NetFlowTemplatesStats) 152 | 153 | prometheus.MustRegister(SFlowStats) 154 | prometheus.MustRegister(SFlowErrors) 155 | prometheus.MustRegister(SFlowSampleStatsSum) 156 | prometheus.MustRegister(SFlowSampleRecordsStatsSum) 157 | } 158 | 159 | func DefaultAccountCallback(name string, id int, start, end time.Time) { 160 | DecoderProcessTime.With( 161 | prometheus.Labels{ 162 | "name": name, 163 | }). 164 | Observe(float64((end.Sub(start)).Nanoseconds()) / 1000) 165 | DecoderStats.With( 166 | prometheus.Labels{ 167 | "worker": strconv.Itoa(id), 168 | "name": name, 169 | }). 170 | Inc() 171 | } 172 | -------------------------------------------------------------------------------- /utils/netflow.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | "strconv" 8 | "sync" 9 | "time" 10 | 11 | "github.com/cloudflare/goflow/v3/decoders/netflow" 12 | flowmessage "github.com/cloudflare/goflow/v3/pb" 13 | "github.com/cloudflare/goflow/v3/producer" 14 | "github.com/prometheus/client_golang/prometheus" 15 | ) 16 | 17 | type TemplateSystem struct { 18 | key string 19 | templates *netflow.BasicTemplateSystem 20 | } 21 | 22 | func (s *TemplateSystem) AddTemplate(version uint16, obsDomainId uint32, template interface{}) { 23 | s.templates.AddTemplate(version, obsDomainId, template) 24 | 25 | typeStr := "options_template" 26 | var templateId uint16 27 | switch templateIdConv := template.(type) { 28 | case netflow.IPFIXOptionsTemplateRecord: 29 | templateId = templateIdConv.TemplateId 30 | case netflow.NFv9OptionsTemplateRecord: 31 | templateId = templateIdConv.TemplateId 32 | case netflow.TemplateRecord: 33 | templateId = templateIdConv.TemplateId 34 | typeStr = "template" 35 | } 36 | NetFlowTemplatesStats.With( 37 | prometheus.Labels{ 38 | "router": s.key, 39 | "version": strconv.Itoa(int(version)), 40 | "obs_domain_id": strconv.Itoa(int(obsDomainId)), 41 | "template_id": strconv.Itoa(int(templateId)), 42 | "type": typeStr, 43 | }). 44 | Inc() 45 | } 46 | 47 | func (s *TemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { 48 | return s.templates.GetTemplate(version, obsDomainId, templateId) 49 | } 50 | 51 | type StateNetFlow struct { 52 | Transport Transport 53 | Logger Logger 54 | templateslock *sync.RWMutex 55 | templates map[string]*TemplateSystem 56 | 57 | samplinglock *sync.RWMutex 58 | sampling map[string]producer.SamplingRateSystem 59 | } 60 | 61 | func (s *StateNetFlow) DecodeFlow(msg interface{}) error { 62 | pkt := msg.(BaseMessage) 63 | buf := bytes.NewBuffer(pkt.Payload) 64 | 65 | key := pkt.Src.String() 66 | samplerAddress := pkt.Src 67 | if samplerAddress.To4() != nil { 68 | samplerAddress = samplerAddress.To4() 69 | } 70 | 71 | s.templateslock.RLock() 72 | templates, ok := s.templates[key] 73 | s.templateslock.RUnlock() 74 | if !ok { 75 | templates = &TemplateSystem{ 76 | templates: netflow.CreateTemplateSystem(), 77 | key: key, 78 | } 79 | s.templateslock.Lock() 80 | s.templates[key] = templates 81 | s.templateslock.Unlock() 82 | } 83 | s.samplinglock.RLock() 84 | sampling, ok := s.sampling[key] 85 | s.samplinglock.RUnlock() 86 | if !ok { 87 | sampling = producer.CreateSamplingSystem() 88 | s.samplinglock.Lock() 89 | s.sampling[key] = sampling 90 | s.samplinglock.Unlock() 91 | } 92 | 93 | ts := uint64(time.Now().UTC().Unix()) 94 | if pkt.SetTime { 95 | ts = uint64(pkt.RecvTime.UTC().Unix()) 96 | } 97 | 98 | timeTrackStart := time.Now() 99 | msgDec, err := netflow.DecodeMessage(buf, templates) 100 | if err != nil { 101 | switch err.(type) { 102 | case *netflow.ErrorVersion: 103 | NetFlowErrors.With( 104 | prometheus.Labels{ 105 | "router": key, 106 | "error": "error_version", 107 | }). 108 | Inc() 109 | case *netflow.ErrorFlowId: 110 | NetFlowErrors.With( 111 | prometheus.Labels{ 112 | "router": key, 113 | "error": "error_flow_id", 114 | }). 115 | Inc() 116 | case *netflow.ErrorTemplateNotFound: 117 | NetFlowErrors.With( 118 | prometheus.Labels{ 119 | "router": key, 120 | "error": "template_not_found", 121 | }). 122 | Inc() 123 | default: 124 | NetFlowErrors.With( 125 | prometheus.Labels{ 126 | "router": key, 127 | "error": "error_decoding", 128 | }). 129 | Inc() 130 | } 131 | return err 132 | } 133 | 134 | flowMessageSet := make([]*flowmessage.FlowMessage, 0) 135 | 136 | switch msgDecConv := msgDec.(type) { 137 | case netflow.NFv9Packet: 138 | NetFlowStats.With( 139 | prometheus.Labels{ 140 | "router": key, 141 | "version": "9", 142 | }). 143 | Inc() 144 | 145 | for _, fs := range msgDecConv.FlowSets { 146 | switch fsConv := fs.(type) { 147 | case netflow.TemplateFlowSet: 148 | NetFlowSetStatsSum.With( 149 | prometheus.Labels{ 150 | "router": key, 151 | "version": "9", 152 | "type": "TemplateFlowSet", 153 | }). 154 | Inc() 155 | 156 | NetFlowSetRecordsStatsSum.With( 157 | prometheus.Labels{ 158 | "router": key, 159 | "version": "9", 160 | "type": "OptionsTemplateFlowSet", 161 | }). 162 | Add(float64(len(fsConv.Records))) 163 | 164 | case netflow.NFv9OptionsTemplateFlowSet: 165 | NetFlowSetStatsSum.With( 166 | prometheus.Labels{ 167 | "router": key, 168 | "version": "9", 169 | "type": "OptionsTemplateFlowSet", 170 | }). 171 | Inc() 172 | 173 | NetFlowSetRecordsStatsSum.With( 174 | prometheus.Labels{ 175 | "router": key, 176 | "version": "9", 177 | "type": "OptionsTemplateFlowSet", 178 | }). 179 | Add(float64(len(fsConv.Records))) 180 | 181 | case netflow.OptionsDataFlowSet: 182 | NetFlowSetStatsSum.With( 183 | prometheus.Labels{ 184 | "router": key, 185 | "version": "9", 186 | "type": "OptionsDataFlowSet", 187 | }). 188 | Inc() 189 | 190 | NetFlowSetRecordsStatsSum.With( 191 | prometheus.Labels{ 192 | "router": key, 193 | "version": "9", 194 | "type": "OptionsDataFlowSet", 195 | }). 196 | Add(float64(len(fsConv.Records))) 197 | case netflow.DataFlowSet: 198 | NetFlowSetStatsSum.With( 199 | prometheus.Labels{ 200 | "router": key, 201 | "version": "9", 202 | "type": "DataFlowSet", 203 | }). 204 | Inc() 205 | 206 | NetFlowSetRecordsStatsSum.With( 207 | prometheus.Labels{ 208 | "router": key, 209 | "version": "9", 210 | "type": "DataFlowSet", 211 | }). 212 | Add(float64(len(fsConv.Records))) 213 | } 214 | } 215 | flowMessageSet, err = producer.ProcessMessageNetFlow(msgDecConv, sampling) 216 | 217 | for _, fmsg := range flowMessageSet { 218 | fmsg.TimeReceived = ts 219 | fmsg.SamplerAddress = samplerAddress 220 | timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd 221 | NetFlowTimeStatsSum.With( 222 | prometheus.Labels{ 223 | "router": key, 224 | "version": "9", 225 | }). 226 | Observe(float64(timeDiff)) 227 | } 228 | case netflow.IPFIXPacket: 229 | NetFlowStats.With( 230 | prometheus.Labels{ 231 | "router": key, 232 | "version": "10", 233 | }). 234 | Inc() 235 | 236 | for _, fs := range msgDecConv.FlowSets { 237 | switch fsConv := fs.(type) { 238 | case netflow.TemplateFlowSet: 239 | NetFlowSetStatsSum.With( 240 | prometheus.Labels{ 241 | "router": key, 242 | "version": "10", 243 | "type": "TemplateFlowSet", 244 | }). 245 | Inc() 246 | 247 | NetFlowSetRecordsStatsSum.With( 248 | prometheus.Labels{ 249 | "router": key, 250 | "version": "10", 251 | "type": "TemplateFlowSet", 252 | }). 253 | Add(float64(len(fsConv.Records))) 254 | 255 | case netflow.IPFIXOptionsTemplateFlowSet: 256 | NetFlowSetStatsSum.With( 257 | prometheus.Labels{ 258 | "router": key, 259 | "version": "10", 260 | "type": "OptionsTemplateFlowSet", 261 | }). 262 | Inc() 263 | 264 | NetFlowSetRecordsStatsSum.With( 265 | prometheus.Labels{ 266 | "router": key, 267 | "version": "10", 268 | "type": "OptionsTemplateFlowSet", 269 | }). 270 | Add(float64(len(fsConv.Records))) 271 | 272 | case netflow.OptionsDataFlowSet: 273 | 274 | NetFlowSetStatsSum.With( 275 | prometheus.Labels{ 276 | "router": key, 277 | "version": "10", 278 | "type": "OptionsDataFlowSet", 279 | }). 280 | Inc() 281 | 282 | NetFlowSetRecordsStatsSum.With( 283 | prometheus.Labels{ 284 | "router": key, 285 | "version": "10", 286 | "type": "OptionsDataFlowSet", 287 | }). 288 | Add(float64(len(fsConv.Records))) 289 | 290 | case netflow.DataFlowSet: 291 | NetFlowSetStatsSum.With( 292 | prometheus.Labels{ 293 | "router": key, 294 | "version": "10", 295 | "type": "DataFlowSet", 296 | }). 297 | Inc() 298 | 299 | NetFlowSetRecordsStatsSum.With( 300 | prometheus.Labels{ 301 | "router": key, 302 | "version": "10", 303 | "type": "DataFlowSet", 304 | }). 305 | Add(float64(len(fsConv.Records))) 306 | } 307 | } 308 | flowMessageSet, err = producer.ProcessMessageNetFlow(msgDecConv, sampling) 309 | 310 | for _, fmsg := range flowMessageSet { 311 | fmsg.TimeReceived = ts 312 | fmsg.SamplerAddress = samplerAddress 313 | timeDiff := fmsg.TimeReceived - fmsg.TimeFlowEnd 314 | NetFlowTimeStatsSum.With( 315 | prometheus.Labels{ 316 | "router": key, 317 | "version": "10", 318 | }). 319 | Observe(float64(timeDiff)) 320 | } 321 | } 322 | 323 | timeTrackStop := time.Now() 324 | DecoderTime.With( 325 | prometheus.Labels{ 326 | "name": "NetFlow", 327 | }). 328 | Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) 329 | 330 | if s.Transport != nil { 331 | s.Transport.Publish(flowMessageSet) 332 | } 333 | 334 | return nil 335 | } 336 | 337 | func (s *StateNetFlow) ServeHTTPTemplates(w http.ResponseWriter, r *http.Request) { 338 | tmp := make(map[string]map[uint16]map[uint32]map[uint16]interface{}) 339 | s.templateslock.RLock() 340 | for key, templatesrouterstr := range s.templates { 341 | templatesrouter := templatesrouterstr.templates.GetTemplates() 342 | tmp[key] = templatesrouter 343 | } 344 | s.templateslock.RUnlock() 345 | enc := json.NewEncoder(w) 346 | enc.Encode(tmp) 347 | } 348 | 349 | func (s *StateNetFlow) InitTemplates() { 350 | s.templates = make(map[string]*TemplateSystem) 351 | s.templateslock = &sync.RWMutex{} 352 | s.sampling = make(map[string]producer.SamplingRateSystem) 353 | s.samplinglock = &sync.RWMutex{} 354 | } 355 | 356 | func (s *StateNetFlow) FlowRoutine(workers int, addr string, port int, reuseport bool) error { 357 | s.InitTemplates() 358 | return UDPRoutine("NetFlow", s.DecodeFlow, workers, addr, port, reuseport, s.Logger) 359 | } 360 | -------------------------------------------------------------------------------- /utils/nflegacy.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "time" 6 | 7 | "github.com/cloudflare/goflow/v3/decoders/netflowlegacy" 8 | flowmessage "github.com/cloudflare/goflow/v3/pb" 9 | "github.com/cloudflare/goflow/v3/producer" 10 | "github.com/prometheus/client_golang/prometheus" 11 | ) 12 | 13 | type StateNFLegacy struct { 14 | Transport Transport 15 | Logger Logger 16 | } 17 | 18 | func (s *StateNFLegacy) DecodeFlow(msg interface{}) error { 19 | pkt := msg.(BaseMessage) 20 | buf := bytes.NewBuffer(pkt.Payload) 21 | key := pkt.Src.String() 22 | samplerAddress := pkt.Src 23 | if samplerAddress.To4() != nil { 24 | samplerAddress = samplerAddress.To4() 25 | } 26 | 27 | ts := uint64(time.Now().UTC().Unix()) 28 | if pkt.SetTime { 29 | ts = uint64(pkt.RecvTime.UTC().Unix()) 30 | } 31 | 32 | timeTrackStart := time.Now() 33 | msgDec, err := netflowlegacy.DecodeMessage(buf) 34 | 35 | if err != nil { 36 | switch err.(type) { 37 | case *netflowlegacy.ErrorVersion: 38 | NetFlowErrors.With( 39 | prometheus.Labels{ 40 | "router": key, 41 | "error": "error_version", 42 | }). 43 | Inc() 44 | } 45 | return err 46 | } 47 | 48 | switch msgDecConv := msgDec.(type) { 49 | case netflowlegacy.PacketNetFlowV5: 50 | NetFlowStats.With( 51 | prometheus.Labels{ 52 | "router": key, 53 | "version": "5", 54 | }). 55 | Inc() 56 | NetFlowSetStatsSum.With( 57 | prometheus.Labels{ 58 | "router": key, 59 | "version": "5", 60 | "type": "DataFlowSet", 61 | }). 62 | Add(float64(msgDecConv.Count)) 63 | } 64 | 65 | var flowMessageSet []*flowmessage.FlowMessage 66 | flowMessageSet, err = producer.ProcessMessageNetFlowLegacy(msgDec) 67 | 68 | timeTrackStop := time.Now() 69 | DecoderTime.With( 70 | prometheus.Labels{ 71 | "name": "NetFlowV5", 72 | }). 73 | Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) 74 | 75 | for _, fmsg := range flowMessageSet { 76 | fmsg.TimeReceived = ts 77 | fmsg.SamplerAddress = samplerAddress 78 | } 79 | 80 | if s.Transport != nil { 81 | s.Transport.Publish(flowMessageSet) 82 | } 83 | 84 | return nil 85 | } 86 | 87 | func (s *StateNFLegacy) FlowRoutine(workers int, addr string, port int, reuseport bool) error { 88 | return UDPRoutine("NetFlowV5", s.DecodeFlow, workers, addr, port, reuseport, s.Logger) 89 | } 90 | -------------------------------------------------------------------------------- /utils/sflow.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "net" 6 | "time" 7 | 8 | "github.com/cloudflare/goflow/v3/decoders/sflow" 9 | flowmessage "github.com/cloudflare/goflow/v3/pb" 10 | "github.com/cloudflare/goflow/v3/producer" 11 | "github.com/prometheus/client_golang/prometheus" 12 | ) 13 | 14 | type StateSFlow struct { 15 | Transport Transport 16 | Logger Logger 17 | 18 | Config *producer.SFlowProducerConfig 19 | } 20 | 21 | func (s *StateSFlow) DecodeFlow(msg interface{}) error { 22 | pkt := msg.(BaseMessage) 23 | buf := bytes.NewBuffer(pkt.Payload) 24 | key := pkt.Src.String() 25 | 26 | ts := uint64(time.Now().UTC().Unix()) 27 | if pkt.SetTime { 28 | ts = uint64(pkt.RecvTime.UTC().Unix()) 29 | } 30 | 31 | timeTrackStart := time.Now() 32 | msgDec, err := sflow.DecodeMessage(buf) 33 | 34 | if err != nil { 35 | switch err.(type) { 36 | case *sflow.ErrorVersion: 37 | SFlowErrors.With( 38 | prometheus.Labels{ 39 | "router": key, 40 | "error": "error_version", 41 | }). 42 | Inc() 43 | case *sflow.ErrorIPVersion: 44 | SFlowErrors.With( 45 | prometheus.Labels{ 46 | "router": key, 47 | "error": "error_ip_version", 48 | }). 49 | Inc() 50 | case *sflow.ErrorDataFormat: 51 | SFlowErrors.With( 52 | prometheus.Labels{ 53 | "router": key, 54 | "error": "error_data_format", 55 | }). 56 | Inc() 57 | default: 58 | SFlowErrors.With( 59 | prometheus.Labels{ 60 | "router": key, 61 | "error": "error_decoding", 62 | }). 63 | Inc() 64 | } 65 | return err 66 | } 67 | 68 | switch msgDecConv := msgDec.(type) { 69 | case sflow.Packet: 70 | agentStr := net.IP(msgDecConv.AgentIP).String() 71 | SFlowStats.With( 72 | prometheus.Labels{ 73 | "router": key, 74 | "agent": agentStr, 75 | "version": "5", 76 | }). 77 | Inc() 78 | 79 | for _, samples := range msgDecConv.Samples { 80 | typeStr := "unknown" 81 | countRec := 0 82 | switch samplesConv := samples.(type) { 83 | case sflow.FlowSample: 84 | typeStr = "FlowSample" 85 | countRec = len(samplesConv.Records) 86 | case sflow.CounterSample: 87 | typeStr = "CounterSample" 88 | if samplesConv.Header.Format == 4 { 89 | typeStr = "Expanded" + typeStr 90 | } 91 | countRec = len(samplesConv.Records) 92 | case sflow.ExpandedFlowSample: 93 | typeStr = "ExpandedFlowSample" 94 | countRec = len(samplesConv.Records) 95 | } 96 | SFlowSampleStatsSum.With( 97 | prometheus.Labels{ 98 | "router": key, 99 | "agent": agentStr, 100 | "version": "5", 101 | "type": typeStr, 102 | }). 103 | Inc() 104 | 105 | SFlowSampleRecordsStatsSum.With( 106 | prometheus.Labels{ 107 | "router": key, 108 | "agent": agentStr, 109 | "version": "5", 110 | "type": typeStr, 111 | }). 112 | Add(float64(countRec)) 113 | } 114 | 115 | } 116 | 117 | var flowMessageSet []*flowmessage.FlowMessage 118 | flowMessageSet, err = producer.ProcessMessageSFlowConfig(msgDec, s.Config) 119 | 120 | timeTrackStop := time.Now() 121 | DecoderTime.With( 122 | prometheus.Labels{ 123 | "name": "sFlow", 124 | }). 125 | Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1000) 126 | 127 | for _, fmsg := range flowMessageSet { 128 | fmsg.TimeReceived = ts 129 | fmsg.TimeFlowStart = ts 130 | fmsg.TimeFlowEnd = ts 131 | } 132 | 133 | if s.Transport != nil { 134 | s.Transport.Publish(flowMessageSet) 135 | } 136 | 137 | return nil 138 | } 139 | 140 | func (s *StateSFlow) FlowRoutine(workers int, addr string, port int, reuseport bool) error { 141 | return UDPRoutine("sFlow", s.DecodeFlow, workers, addr, port, reuseport, s.Logger) 142 | } 143 | -------------------------------------------------------------------------------- /utils/sflow_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestDecodeFlowExpandedSFlow(t *testing.T) { 9 | msg := BaseMessage{ 10 | Src: []byte{}, 11 | Port: 1, 12 | Payload: getExpandedSFlowDecode(), 13 | } 14 | 15 | s := &StateSFlow{ 16 | Transport: &DefaultLogTransport{}, 17 | } 18 | 19 | assert.Nil(t, s.DecodeFlow(msg)) 20 | } 21 | 22 | func getExpandedSFlowDecode() []byte { 23 | return []byte{ 24 | 0, 0, 0, 5, 0, 0, 0, 1, 1, 2, 3, 4, 0, 0, 0, 0, 5, 167, 139, 219, 5, 118, 25 | 138, 184, 0, 0, 0, 6, 0, 0, 0, 3, 0, 0, 0, 220, 2, 144, 194, 214, 0, 0, 0, 0, 26 | 0, 5, 6, 164, 0, 0, 3, 255, 6, 6, 189, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 27 | 6, 164, 0, 0, 0, 0, 0, 5, 6, 171, 0, 0, 0, 2, 0, 0, 3, 233, 0, 0, 0, 6, 28 | 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 29 | 0, 144, 0, 0, 0, 1, 0, 0, 5, 234, 0, 0, 0, 4, 0, 0, 0, 128, 8, 6, 168, 250, 30 | 146, 253, 116, 131, 239, 8, 101, 183, 129, 0, 5, 7, 8, 0, 9, 0, 5, 212, 0, 2, 4, 0, 31 | 3, 6, 252, 8, 9, 187, 169, 1, 4, 7, 186, 201, 1, 187, 249, 6, 160, 7, 5, 240, 6, 4, 32 | 4, 0, 0, 6, 0, 123, 119, 210, 0, 0, 165, 105, 7, 171, 145, 234, 102, 0, 252, 187, 162, 227, 33 | 104, 188, 126, 232, 156, 164, 2, 115, 6, 100, 0, 185, 6, 4, 119, 5, 213, 1, 215, 208, 8, 4, 34 | 118, 183, 241, 225, 130, 186, 2, 250, 220, 153, 189, 3, 4, 4, 1, 8, 210, 119, 172, 9, 164, 233, 35 | 1, 8, 171, 226, 196, 195, 3, 152, 9, 5, 6, 181, 4, 7, 0, 0, 0, 3, 0, 0, 0, 220, 36 | 9, 107, 215, 156, 0, 0, 0, 0, 0, 5, 6, 165, 0, 0, 3, 255, 226, 123, 0, 100, 0, 0, 37 | 0, 0, 0, 0, 0, 0, 0, 5, 6, 165, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 0, 2, 38 | 0, 0, 3, 233, 0, 0, 0, 6, 0, 0, 3, 184, 0, 0, 0, 0, 0, 0, 3, 184, 0, 0, 39 | 0, 0, 0, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 5, 190, 0, 0, 0, 4, 40 | 0, 0, 0, 128, 116, 131, 239, 8, 101, 183, 144, 226, 186, 134, 8, 1, 129, 0, 3, 184, 8, 0, 41 | 9, 0, 5, 168, 7, 127, 4, 0, 4, 6, 163, 211, 185, 9, 220, 7, 0, 254, 3, 8, 0, 9, 42 | 130, 136, 179, 1, 2, 2, 7, 5, 250, 4, 128, 6, 0, 1, 7, 1, 0, 0, 1, 1, 8, 0, 43 | 6, 9, 250, 9, 4, 113, 121, 4, 160, 125, 0, 4, 9, 209, 241, 194, 190, 148, 161, 186, 6, 192, 44 | 246, 190, 170, 2, 238, 190, 128, 221, 223, 1, 218, 225, 3, 9, 7, 226, 220, 231, 127, 3, 3, 252, 45 | 7, 9, 161, 247, 218, 8, 8, 174, 133, 4, 213, 245, 149, 218, 5, 4, 200, 128, 139, 5, 0, 115, 46 | 0, 0, 0, 3, 0, 0, 0, 220, 2, 144, 194, 215, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 47 | 3, 255, 6, 6, 253, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 0, 0, 48 | 0, 5, 6, 171, 0, 0, 0, 2, 0, 0, 3, 233, 0, 0, 0, 6, 0, 0, 0, 104, 0, 0, 49 | 0, 0, 0, 0, 0, 104, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 50 | 0, 0, 5, 242, 0, 0, 0, 4, 0, 0, 0, 128, 116, 131, 239, 7, 9, 1, 116, 131, 239, 8, 51 | 101, 183, 129, 0, 0, 104, 8, 0, 9, 0, 5, 220, 152, 143, 4, 0, 1, 6, 5, 179, 9, 187, 52 | 191, 101, 190, 2, 144, 182, 0, 0, 130, 4, 252, 4, 160, 192, 138, 8, 219, 124, 128, 6, 0, 235, 53 | 180, 213, 0, 0, 1, 1, 8, 0, 9, 124, 6, 1, 9, 1, 252, 3, 194, 8, 195, 209, 115, 1, 54 | 5, 152, 204, 2, 6, 4, 1, 119, 254, 9, 1, 170, 0, 192, 2, 7, 190, 9, 149, 5, 101, 2, 55 | 128, 122, 0, 190, 1, 109, 188, 175, 4, 8, 152, 1, 142, 108, 2, 100, 2, 124, 125, 195, 5, 8, 56 | 233, 126, 7, 4, 243, 4, 3, 153, 0, 0, 0, 3, 0, 0, 0, 220, 5, 1, 150, 6, 0, 0, 57 | 0, 0, 0, 5, 6, 167, 0, 0, 3, 255, 6, 5, 105, 220, 0, 0, 0, 0, 0, 0, 0, 0, 58 | 0, 5, 6, 167, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 0, 2, 0, 0, 3, 233, 0, 0, 59 | 0, 6, 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 5, 7, 0, 0, 0, 0, 0, 0, 0, 1, 60 | 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 2, 2, 0, 0, 0, 4, 0, 0, 0, 128, 116, 131, 61 | 239, 8, 101, 183, 152, 3, 130, 1, 196, 153, 129, 0, 5, 7, 8, 0, 9, 0, 2, 0, 0, 0, 62 | 4, 0, 126, 7, 119, 188, 185, 9, 221, 8, 2, 116, 144, 0, 9, 139, 3, 112, 2, 0, 8, 124, 63 | 255, 251, 0, 0, 131, 2, 0, 0, 0, 246, 3, 3, 107, 5, 0, 0, 0, 0, 9, 173, 2, 217, 64 | 6, 248, 0, 0, 9, 173, 2, 217, 8, 248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 65 | 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 6, 9, 153, 66 | 215, 157, 0, 255, 0, 8, 1, 0, 9, 8, 9, 6, 164, 103, 9, 5, 0, 0, 0, 3, 0, 0, 67 | 0, 152, 5, 201, 2, 175, 0, 0, 0, 0, 0, 5, 6, 5, 0, 0, 3, 255, 1, 8, 9, 1, 68 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 5, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 69 | 0, 2, 0, 0, 3, 233, 0, 0, 0, 6, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 70 | 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 4, 0, 0, 71 | 0, 4, 0, 0, 0, 0, 116, 131, 239, 8, 101, 183, 218, 177, 4, 251, 217, 207, 8, 0, 9, 0, 72 | 0, 8, 0, 0, 0, 0, 9, 7, 8, 161, 106, 3, 109, 6, 185, 9, 220, 215, 0, 123, 9, 184, 73 | 0, 8, 116, 122, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 3, 130, 6, 74 | 0, 0, 0, 3, 0, 0, 0, 220, 2, 144, 194, 216, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 75 | 3, 255, 6, 7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 6, 164, 0, 0, 0, 0, 76 | 0, 5, 6, 165, 0, 0, 0, 2, 0, 0, 3, 233, 0, 0, 0, 6, 0, 0, 3, 202, 0, 0, 77 | 0, 0, 0, 0, 3, 202, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 78 | 0, 0, 5, 242, 0, 0, 0, 4, 0, 0, 0, 128, 144, 226, 186, 135, 4, 241, 116, 131, 239, 8, 79 | 101, 183, 129, 0, 3, 202, 8, 0, 9, 0, 5, 220, 147, 0, 4, 0, 7, 6, 225, 131, 1, 159, 80 | 7, 185, 195, 181, 170, 8, 9, 117, 7, 175, 8, 3, 191, 135, 190, 150, 196, 102, 0, 6, 0, 119, 81 | 116, 113, 0, 0, 201, 244, 240, 206, 2, 117, 4, 139, 8, 4, 240, 223, 247, 123, 6, 0, 239, 0, 82 | 9, 116, 152, 153, 191, 0, 124, 2, 7, 8, 3, 178, 166, 150, 3, 218, 163, 175, 121, 8, 4, 210, 83 | 4, 5, 166, 5, 178, 1, 6, 222, 172, 186, 6, 241, 232, 8, 188, 192, 2, 220, 128, 1, 8, 7, 84 | 194, 130, 220, 5, 2, 0, 158, 195, 0, 4, 3, 2, 160, 158, 157, 2, 102, 3, 7, 3, 0, 0, 85 | 1, 3, 3, 4, 1, 1, 4, 2, 187, 255, 188, 3, 4, 138, 9, 180, 104, 233, 212, 239, 123, 237, 86 | 112, 8, 133, 129, 152, 138, 7, 195, 8, 171, 237, 3, 4, 223, 116, 214, 151, 9, 151, 102, 0, 0, 87 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 88 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 89 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 90 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 91 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 92 | 0, 0, 0, 0, 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/binary" 5 | "flag" 6 | "fmt" 7 | "net" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | decoder "github.com/cloudflare/goflow/v3/decoders" 13 | "github.com/cloudflare/goflow/v3/decoders/netflow" 14 | flowmessage "github.com/cloudflare/goflow/v3/pb" 15 | reuseport "github.com/libp2p/go-reuseport" 16 | "github.com/prometheus/client_golang/prometheus" 17 | ) 18 | 19 | const defaultFields = "Type,TimeReceived,SequenceNum,SamplingRate,SamplerAddress,TimeFlowStart,TimeFlowEnd,Bytes,Packets,SrcAddr,DstAddr,Etype,Proto,SrcPort,DstPort,InIf,OutIf,SrcMac,DstMac,SrcVlan,DstVlan,VlanId,IngressVrfID,EgressVrfID,IPTos,ForwardingStatus,IPTTL,TCPFlags,IcmpType,IcmpCode,IPv6FlowLabel,FragmentId,FragmentOffset,BiFlowDirection,SrcAS,DstAS,NextHop,NextHopAS,SrcNet,DstNet,HasEncap,SrcAddrEncap,DstAddrEncap,ProtoEncap,EtypeEncap,IPTosEncap,IPTTLEncap,IPv6FlowLabelEncap,FragmentIdEncap,FragmentOffsetEncap,HasMPLS,MPLSCount,MPLS1TTL,MPLS1Label,MPLS2TTL,MPLS2Label,MPLS3TTL,MPLS3Label,MPLSLastTTL,MPLSLastLabel,HasPPP,PPPAddressControl" 20 | 21 | var ( 22 | MessageFields = flag.String("message.fields", defaultFields, "The list of fields to include in flow messages") 23 | ) 24 | 25 | func GetServiceAddresses(srv string) (addrs []string, err error) { 26 | _, srvs, err := net.LookupSRV("", "", srv) 27 | if err != nil { 28 | return nil, fmt.Errorf("service discovery: %v", err) 29 | } 30 | for _, srv := range srvs { 31 | addrs = append(addrs, net.JoinHostPort(srv.Target, strconv.Itoa(int(srv.Port)))) 32 | } 33 | return addrs, nil 34 | } 35 | 36 | type Logger interface { 37 | Printf(string, ...interface{}) 38 | Errorf(string, ...interface{}) 39 | Warnf(string, ...interface{}) 40 | Warn(...interface{}) 41 | Error(...interface{}) 42 | Debug(...interface{}) 43 | Debugf(string, ...interface{}) 44 | Infof(string, ...interface{}) 45 | Fatalf(string, ...interface{}) 46 | } 47 | 48 | type BaseMessage struct { 49 | Src net.IP 50 | Port int 51 | Payload []byte 52 | 53 | SetTime bool 54 | RecvTime time.Time 55 | } 56 | 57 | type Transport interface { 58 | Publish([]*flowmessage.FlowMessage) 59 | } 60 | 61 | type DefaultLogTransport struct { 62 | } 63 | 64 | func (s *DefaultLogTransport) Publish(msgs []*flowmessage.FlowMessage) { 65 | for _, msg := range msgs { 66 | fmt.Printf("%v\n", FlowMessageToString(msg)) 67 | } 68 | } 69 | 70 | type DefaultJSONTransport struct { 71 | } 72 | 73 | func (s *DefaultJSONTransport) Publish(msgs []*flowmessage.FlowMessage) { 74 | for _, msg := range msgs { 75 | fmt.Printf("%v\n", FlowMessageToJSON(msg)) 76 | } 77 | } 78 | 79 | type DefaultErrorCallback struct { 80 | Logger Logger 81 | } 82 | 83 | func (cb *DefaultErrorCallback) Callback(name string, id int, start, end time.Time, err error) { 84 | if _, ok := err.(*netflow.ErrorTemplateNotFound); ok { 85 | return 86 | } 87 | if cb.Logger != nil { 88 | cb.Logger.Errorf("Error from: %v (%v) duration: %v. %v", name, id, end.Sub(start), err) 89 | } 90 | } 91 | 92 | type flowMessageItem struct { 93 | Name, Value string 94 | } 95 | 96 | func flowMessageFiltered(fmsg *flowmessage.FlowMessage) []flowMessageItem { 97 | srcmac := make([]byte, 8) 98 | dstmac := make([]byte, 8) 99 | binary.BigEndian.PutUint64(srcmac, fmsg.SrcMac) 100 | binary.BigEndian.PutUint64(dstmac, fmsg.DstMac) 101 | srcmac = srcmac[2:8] 102 | dstmac = dstmac[2:8] 103 | var message []flowMessageItem 104 | 105 | for _, field := range strings.Split(*MessageFields, ",") { 106 | switch field { 107 | case "Type": 108 | message = append(message, flowMessageItem{"Type", fmsg.Type.String()}) 109 | case "TimeReceived": 110 | message = append(message, flowMessageItem{"TimeReceived", fmt.Sprintf("%v", fmsg.TimeReceived)}) 111 | case "SequenceNum": 112 | message = append(message, flowMessageItem{"SequenceNum", fmt.Sprintf("%v", fmsg.SequenceNum)}) 113 | case "SamplingRate": 114 | message = append(message, flowMessageItem{"SamplingRate", fmt.Sprintf("%v", fmsg.SamplingRate)}) 115 | case "SamplerAddress": 116 | message = append(message, flowMessageItem{"SamplerAddress", net.IP(fmsg.SamplerAddress).String()}) 117 | case "TimeFlowStart": 118 | message = append(message, flowMessageItem{"TimeFlowStart", fmt.Sprintf("%v", fmsg.TimeFlowStart)}) 119 | case "TimeFlowEnd": 120 | message = append(message, flowMessageItem{"TimeFlowEnd", fmt.Sprintf("%v", fmsg.TimeFlowEnd)}) 121 | case "Bytes": 122 | message = append(message, flowMessageItem{"Bytes", fmt.Sprintf("%v", fmsg.Bytes)}) 123 | case "Packets": 124 | message = append(message, flowMessageItem{"Packets", fmt.Sprintf("%v", fmsg.Packets)}) 125 | case "SrcAddr": 126 | message = append(message, flowMessageItem{"SrcAddr", net.IP(fmsg.SrcAddr).String()}) 127 | case "DstAddr": 128 | message = append(message, flowMessageItem{"DstAddr", net.IP(fmsg.DstAddr).String()}) 129 | case "Etype": 130 | message = append(message, flowMessageItem{"Etype", fmt.Sprintf("%v", fmsg.Etype)}) 131 | case "Proto": 132 | message = append(message, flowMessageItem{"Proto", fmt.Sprintf("%v", fmsg.Proto)}) 133 | case "SrcPort": 134 | message = append(message, flowMessageItem{"SrcPort", fmt.Sprintf("%v", fmsg.SrcPort)}) 135 | case "DstPort": 136 | message = append(message, flowMessageItem{"DstPort", fmt.Sprintf("%v", fmsg.DstPort)}) 137 | case "InIf": 138 | message = append(message, flowMessageItem{"InIf", fmt.Sprintf("%v", fmsg.InIf)}) 139 | case "OutIf": 140 | message = append(message, flowMessageItem{"OutIf", fmt.Sprintf("%v", fmsg.OutIf)}) 141 | case "SrcMac": 142 | message = append(message, flowMessageItem{"SrcMac", net.HardwareAddr(srcmac).String()}) 143 | case "DstMac": 144 | message = append(message, flowMessageItem{"DstMac", net.HardwareAddr(dstmac).String()}) 145 | case "SrcVlan": 146 | message = append(message, flowMessageItem{"SrcVlan", fmt.Sprintf("%v", fmsg.SrcVlan)}) 147 | case "DstVlan": 148 | message = append(message, flowMessageItem{"DstVlan", fmt.Sprintf("%v", fmsg.DstVlan)}) 149 | case "VlanId": 150 | message = append(message, flowMessageItem{"VlanId", fmt.Sprintf("%v", fmsg.VlanId)}) 151 | case "IngressVrfID": 152 | message = append(message, flowMessageItem{"IngressVrfID", fmt.Sprintf("%v", fmsg.IngressVrfID)}) 153 | case "EgressVrfID": 154 | message = append(message, flowMessageItem{"EgressVrfID", fmt.Sprintf("%v", fmsg.EgressVrfID)}) 155 | case "IPTos": 156 | message = append(message, flowMessageItem{"IPTos", fmt.Sprintf("%v", fmsg.IPTos)}) 157 | case "ForwardingStatus": 158 | message = append(message, flowMessageItem{"ForwardingStatus", fmt.Sprintf("%v", fmsg.ForwardingStatus)}) 159 | case "IPTTL": 160 | message = append(message, flowMessageItem{"IPTTL", fmt.Sprintf("%v", fmsg.IPTTL)}) 161 | case "TCPFlags": 162 | message = append(message, flowMessageItem{"TCPFlags", fmt.Sprintf("%v", fmsg.TCPFlags)}) 163 | case "IcmpType": 164 | message = append(message, flowMessageItem{"IcmpType", fmt.Sprintf("%v", fmsg.IcmpType)}) 165 | case "IcmpCode": 166 | message = append(message, flowMessageItem{"IcmpCode", fmt.Sprintf("%v", fmsg.IcmpCode)}) 167 | case "IPv6FlowLabel": 168 | message = append(message, flowMessageItem{"IPv6FlowLabel", fmt.Sprintf("%v", fmsg.IPv6FlowLabel)}) 169 | case "FragmentId": 170 | message = append(message, flowMessageItem{"FragmentId", fmt.Sprintf("%v", fmsg.FragmentId)}) 171 | case "FragmentOffset": 172 | message = append(message, flowMessageItem{"FragmentOffset", fmt.Sprintf("%v", fmsg.FragmentOffset)}) 173 | case "BiFlowDirection": 174 | message = append(message, flowMessageItem{"BiFlowDirection", fmt.Sprintf("%v", fmsg.BiFlowDirection)}) 175 | case "SrcAS": 176 | message = append(message, flowMessageItem{"SrcAS", fmt.Sprintf("%v", fmsg.SrcAS)}) 177 | case "DstAS": 178 | message = append(message, flowMessageItem{"DstAS", fmt.Sprintf("%v", fmsg.DstAS)}) 179 | case "NextHop": 180 | message = append(message, flowMessageItem{"NextHop", net.IP(fmsg.NextHop).String()}) 181 | case "NextHopAS": 182 | message = append(message, flowMessageItem{"NextHopAS", fmt.Sprintf("%v", fmsg.NextHopAS)}) 183 | case "SrcNet": 184 | message = append(message, flowMessageItem{"SrcNet", fmt.Sprintf("%v", fmsg.SrcNet)}) 185 | case "DstNet": 186 | message = append(message, flowMessageItem{"DstNet", fmt.Sprintf("%v", fmsg.DstNet)}) 187 | case "HasEncap": 188 | message = append(message, flowMessageItem{"HasEncap", fmt.Sprintf("%v", fmsg.HasEncap)}) 189 | case "SrcAddrEncap": 190 | message = append(message, flowMessageItem{"SrcAddrEncap", net.IP(fmsg.SrcAddrEncap).String()}) 191 | case "DstAddrEncap": 192 | message = append(message, flowMessageItem{"DstAddrEncap", net.IP(fmsg.DstAddrEncap).String()}) 193 | case "ProtoEncap": 194 | message = append(message, flowMessageItem{"ProtoEncap", fmt.Sprintf("%v", fmsg.ProtoEncap)}) 195 | case "EtypeEncap": 196 | message = append(message, flowMessageItem{"EtypeEncap", fmt.Sprintf("%v", fmsg.EtypeEncap)}) 197 | case "IPTosEncap": 198 | message = append(message, flowMessageItem{"IPTosEncap", fmt.Sprintf("%v", fmsg.IPTosEncap)}) 199 | case "IPTTLEncap": 200 | message = append(message, flowMessageItem{"IPTTLEncap", fmt.Sprintf("%v", fmsg.IPTTLEncap)}) 201 | case "IPv6FlowLabelEncap": 202 | message = append(message, flowMessageItem{"IPv6FlowLabelEncap", fmt.Sprintf("%v", fmsg.IPv6FlowLabelEncap)}) 203 | case "FragmentIdEncap": 204 | message = append(message, flowMessageItem{"FragmentIdEncap", fmt.Sprintf("%v", fmsg.FragmentIdEncap)}) 205 | case "FragmentOffsetEncap": 206 | message = append(message, flowMessageItem{"FragmentOffsetEncap", fmt.Sprintf("%v", fmsg.FragmentOffsetEncap)}) 207 | case "HasMPLS": 208 | message = append(message, flowMessageItem{"HasMPLS", fmt.Sprintf("%v", fmsg.HasMPLS)}) 209 | case "MPLSCount": 210 | message = append(message, flowMessageItem{"MPLSCount", fmt.Sprintf("%v", fmsg.MPLSCount)}) 211 | case "MPLS1TTL": 212 | message = append(message, flowMessageItem{"MPLS1TTL", fmt.Sprintf("%v", fmsg.MPLS1TTL)}) 213 | case "MPLS1Label": 214 | message = append(message, flowMessageItem{"MPLS1Label", fmt.Sprintf("%v", fmsg.MPLS1Label)}) 215 | case "MPLS2TTL": 216 | message = append(message, flowMessageItem{"MPLS2TTL", fmt.Sprintf("%v", fmsg.MPLS2TTL)}) 217 | case "MPLS2Label": 218 | message = append(message, flowMessageItem{"MPLS2Label", fmt.Sprintf("%v", fmsg.MPLS2Label)}) 219 | case "MPLS3TTL": 220 | message = append(message, flowMessageItem{"MPLS3TTL", fmt.Sprintf("%v", fmsg.MPLS3TTL)}) 221 | case "MPLS3Label": 222 | message = append(message, flowMessageItem{"MPLS3Label", fmt.Sprintf("%v", fmsg.MPLS3Label)}) 223 | case "MPLSLastTTL": 224 | message = append(message, flowMessageItem{"MPLSLastTTL", fmt.Sprintf("%v", fmsg.MPLSLastTTL)}) 225 | case "MPLSLastLabel": 226 | message = append(message, flowMessageItem{"MPLSLastLabel", fmt.Sprintf("%v", fmsg.MPLSLastLabel)}) 227 | case "HasPPP": 228 | message = append(message, flowMessageItem{"HasPPP", fmt.Sprintf("%v", fmsg.HasPPP)}) 229 | case "PPPAddressControl": 230 | message = append(message, flowMessageItem{"PPPAddressControl", fmt.Sprintf("%v", fmsg.PPPAddressControl)}) 231 | } 232 | } 233 | 234 | return message 235 | } 236 | 237 | func FlowMessageToString(fmsg *flowmessage.FlowMessage) string { 238 | filteredMessage := flowMessageFiltered(fmsg) 239 | message := make([]string, len(filteredMessage)) 240 | for i, m := range filteredMessage { 241 | message[i] = m.Name + ":" + m.Value 242 | } 243 | return strings.Join(message, " ") 244 | } 245 | 246 | func FlowMessageToJSON(fmsg *flowmessage.FlowMessage) string { 247 | filteredMessage := flowMessageFiltered(fmsg) 248 | message := make([]string, len(filteredMessage)) 249 | for i, m := range filteredMessage { 250 | message[i] = fmt.Sprintf("\"%s\":\"%s\"", m.Name, m.Value) 251 | } 252 | return "{" + strings.Join(message, ",") + "}" 253 | } 254 | 255 | func UDPRoutine(name string, decodeFunc decoder.DecoderFunc, workers int, addr string, port int, sockReuse bool, logger Logger) error { 256 | ecb := DefaultErrorCallback{ 257 | Logger: logger, 258 | } 259 | 260 | decoderParams := decoder.DecoderParams{ 261 | DecoderFunc: decodeFunc, 262 | DoneCallback: DefaultAccountCallback, 263 | ErrorCallback: ecb.Callback, 264 | } 265 | 266 | processor := decoder.CreateProcessor(workers, decoderParams, name) 267 | processor.Start() 268 | 269 | addrUDP := net.UDPAddr{ 270 | IP: net.ParseIP(addr), 271 | Port: port, 272 | } 273 | 274 | var udpconn *net.UDPConn 275 | var err error 276 | 277 | if sockReuse { 278 | pconn, err := reuseport.ListenPacket("udp", addrUDP.String()) 279 | if err != nil { 280 | return err 281 | } 282 | defer pconn.Close() 283 | var ok bool 284 | udpconn, ok = pconn.(*net.UDPConn) 285 | if !ok { 286 | return err 287 | } 288 | } else { 289 | udpconn, err = net.ListenUDP("udp", &addrUDP) 290 | if err != nil { 291 | return err 292 | } 293 | defer udpconn.Close() 294 | } 295 | 296 | payload := make([]byte, 9000) 297 | 298 | localIP := addrUDP.IP.String() 299 | if addrUDP.IP == nil { 300 | localIP = "" 301 | } 302 | 303 | for { 304 | size, pktAddr, _ := udpconn.ReadFromUDP(payload) 305 | payloadCut := make([]byte, size) 306 | copy(payloadCut, payload[0:size]) 307 | 308 | baseMessage := BaseMessage{ 309 | Src: pktAddr.IP, 310 | Port: pktAddr.Port, 311 | Payload: payloadCut, 312 | } 313 | processor.ProcessMessage(baseMessage) 314 | 315 | MetricTrafficBytes.With( 316 | prometheus.Labels{ 317 | "remote_ip": pktAddr.IP.String(), 318 | "remote_port": strconv.Itoa(pktAddr.Port), 319 | "local_ip": localIP, 320 | "local_port": strconv.Itoa(addrUDP.Port), 321 | "type": name, 322 | }). 323 | Add(float64(size)) 324 | MetricTrafficPackets.With( 325 | prometheus.Labels{ 326 | "remote_ip": pktAddr.IP.String(), 327 | "remote_port": strconv.Itoa(pktAddr.Port), 328 | "local_ip": localIP, 329 | "local_port": strconv.Itoa(addrUDP.Port), 330 | "type": name, 331 | }). 332 | Inc() 333 | MetricPacketSizeSum.With( 334 | prometheus.Labels{ 335 | "remote_ip": pktAddr.IP.String(), 336 | "remote_port": strconv.Itoa(pktAddr.Port), 337 | "local_ip": localIP, 338 | "local_port": strconv.Itoa(addrUDP.Port), 339 | "type": name, 340 | }). 341 | Observe(float64(size)) 342 | } 343 | } 344 | --------------------------------------------------------------------------------