├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── build.yml │ ├── docker-release.yml │ ├── docker-release_registry_github.yml │ ├── docker.yml │ ├── docker_registry_github.yml │ └── release.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── cmd ├── enricher │ ├── main.go │ └── pb │ │ ├── flowext.pb.go │ │ └── flowext.proto └── goflow2 │ ├── main.go │ └── mapping.yaml ├── compose ├── elk │ ├── README.md │ ├── docker-compose.yml │ └── logstash.conf └── kcg │ ├── README.md │ ├── clickhouse │ ├── create.sh │ ├── flow.proto │ └── protocols.csv │ ├── docker-compose.yml │ ├── grafana │ ├── Dockerfile │ ├── dashboards.yml │ ├── dashboards │ │ ├── perfs.json │ │ └── viz-ch.json │ └── datasources-ch.yml │ └── prometheus │ └── prometheus.yml ├── decoders ├── netflow │ ├── format.go │ ├── ipfix.go │ ├── netflow.go │ ├── netflow_test.go │ ├── nfv9.go │ ├── packet.go │ ├── templates.go │ └── templates_test.go ├── netflowlegacy │ ├── format.go │ ├── netflow.go │ ├── netflow_test.go │ └── packet.go ├── sflow │ ├── datastructure.go │ ├── format.go │ ├── packet.go │ ├── sflow.go │ └── sflow_test.go └── utils │ ├── types.go │ ├── utils.go │ └── utils_test.go ├── docs ├── agents.md ├── contributors.md ├── logs.md ├── mapping.md ├── performance.md ├── protobuf.md └── protocols.md ├── format ├── binary │ └── binary.go ├── format.go ├── json │ └── json.go └── text │ └── text.go ├── go.mod ├── go.sum ├── graphics └── diagram.png ├── metrics ├── decoder.go ├── metrics.go ├── producer.go ├── receiver.go └── templates.go ├── package ├── goflow2.env └── goflow2.service ├── pb ├── flow.pb.go └── flow.proto ├── producer ├── producer.go ├── proto │ ├── config.go │ ├── config_impl.go │ ├── messages.go │ ├── messages_test.go │ ├── producer_nf.go │ ├── producer_nflegacy.go │ ├── producer_packet.go │ ├── producer_packet_test.go │ ├── producer_sf.go │ ├── producer_test.go │ ├── proto.go │ ├── reflect.go │ ├── reflect_test.go │ └── render.go └── raw │ └── raw.go ├── transport ├── file │ └── transport.go ├── kafka │ ├── kafka.go │ └── scram_client.go └── transport.go └── utils ├── debug ├── debug.go ├── decoder.go └── producer.go ├── mute.go ├── mute_test.go ├── pipe.go ├── templates └── templates.go ├── udp.go └── udp_test.go /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Run GoFlow2 with arguments '...' 16 | 2. Receive samples '....' 17 | 3. See error 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Captures** 23 | If applicable, add output (JSON, protobuf), packet captures and device configuration. 24 | 25 | **Sampler device:** 26 | - Brand: [e.g. Cisco, Juniper, Arista, Mikrotik] 27 | - Model: [e.g. MX, QFX] 28 | - Version: [e.g. 22] 29 | - Estimated flow traffic: [e.g. 5000 samples per second] 30 | 31 | **GoFlow2:** 32 | - Version: [e.g. v2.0.0] 33 | - Environment: [e.g. Kubernetes, Docker, Debian package] 34 | - OS: [e.g. Linux Ubuntu Server 23.04] 35 | 36 | **Additional context** 37 | Add any other context about the problem here. 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or packet captures about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | runs-on: ubuntu-latest 13 | steps: 14 | 15 | - name: Set up Go 1.x 16 | uses: actions/setup-go@v2 17 | with: 18 | go-version: ^1.21 19 | 20 | - name: Check out code into the Go module directory 21 | uses: actions/checkout@v1 22 | # note: @v2 seem to be https://github.com/actions/checkout/issues/290 23 | # which only works IF the tags are pushed before/same time as the commit 24 | # otherwise, display previous tag 25 | with: 26 | fetch-depth: 0 27 | 28 | - name: Tidy Go Modules 29 | run: go mod tidy 30 | 31 | - name: Test & Vet 32 | run: make test vet 33 | 34 | - name: Build 35 | run: | 36 | GOOS=linux make build 37 | GOOS=darwin make build 38 | GOOS=windows EXTENSION=.exe make build 39 | ARCH=arm64 GOARCH=arm64 GOOS=linux make build 40 | 41 | - name: Install fpm 42 | run: | 43 | sudo apt-get update 44 | sudo apt-get install -y rpm ruby ruby-dev 45 | sudo gem install fpm 46 | 47 | - name: Package 48 | run: | 49 | make package-deb package-rpm 50 | ARCH=arm64 make package-deb package-rpm 51 | 52 | - name: Upload Artifact 53 | uses: actions/upload-artifact@v4 54 | with: 55 | name: dist 56 | path: dist/* 57 | retention-days: 14 58 | -------------------------------------------------------------------------------- /.github/workflows/docker-release.yml: -------------------------------------------------------------------------------- 1 | name: DockerRelease 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | build: 10 | name: DockerRelease 11 | runs-on: ubuntu-latest 12 | steps: 13 | 14 | - name: Set up Go 1.x 15 | uses: actions/setup-go@v2 16 | with: 17 | go-version: ^1.21 18 | 19 | - name: Check out code into the Go module directory 20 | uses: actions/checkout@v1 21 | with: 22 | fetch-depth: 0 23 | 24 | - name: Set up QEMU 25 | uses: docker/setup-qemu-action@v1 26 | 27 | - name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v1 29 | 30 | - name: Login to DockerHub 31 | uses: docker/login-action@v1 32 | with: 33 | username: ${{ secrets.DOCKERHUB_USERNAME }} 34 | password: ${{ secrets.DOCKERHUB_TOKEN }} 35 | 36 | - name: Build 37 | run: | 38 | export VERSION=$(git describe --tags --abbrev=0 HEAD) 39 | DOCKER_SUFFIX=-amd64 DOCKER_CMD='buildx build --push --platform linux/amd64' make docker 40 | DOCKER_SUFFIX=-arm64 DOCKER_CMD='buildx build --push --platform linux/arm64/v8' make docker 41 | make docker-manifest-release-buildx 42 | VERSION=latest make docker-manifest-release-buildx 43 | -------------------------------------------------------------------------------- /.github/workflows/docker-release_registry_github.yml: -------------------------------------------------------------------------------- 1 | name: DockerRelease 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | env: 8 | DOCKER_REPO: ghcr.io/${{ github.repository_owner }}/ 9 | 10 | jobs: 11 | build: 12 | name: DockerRelease 13 | runs-on: ubuntu-latest 14 | steps: 15 | 16 | - name: Set up Go 1.x 17 | uses: actions/setup-go@v2 18 | with: 19 | go-version: ^1.21 20 | 21 | - name: Check out code into the Go module directory 22 | uses: actions/checkout@v1 23 | with: 24 | fetch-depth: 0 25 | 26 | - name: Set up QEMU 27 | uses: docker/setup-qemu-action@v1 28 | 29 | - name: Set up Docker Buildx 30 | uses: docker/setup-buildx-action@v1 31 | 32 | - name: Login to DockerHub 33 | uses: docker/login-action@v1 34 | with: 35 | username: ${{ secrets.DOCKERHUB_USERNAME }} 36 | password: ${{ secrets.DOCKERHUB_TOKEN }} 37 | 38 | - name: Log in to registry 39 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin 40 | 41 | - name: Build 42 | run: | 43 | export VERSION=$(git describe --tags --abbrev=0 HEAD) 44 | DOCKER_SUFFIX=-amd64 DOCKER_CMD='buildx build --push --platform linux/amd64' make docker 45 | DOCKER_SUFFIX=-arm64 DOCKER_CMD='buildx build --push --platform linux/arm64/v8' make docker 46 | make docker-manifest-release-buildx 47 | VERSION=latest make docker-manifest-release-buildx 48 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | 7 | jobs: 8 | build: 9 | name: Docker 10 | runs-on: ubuntu-latest 11 | steps: 12 | 13 | - name: Set up Go 1.x 14 | uses: actions/setup-go@v2 15 | with: 16 | go-version: ^1.21 17 | 18 | - name: Check out code into the Go module directory 19 | uses: actions/checkout@v1 20 | with: 21 | fetch-depth: 0 22 | 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v1 25 | 26 | - name: Set up Docker Buildx 27 | uses: docker/setup-buildx-action@v1 28 | 29 | - name: Login to DockerHub 30 | uses: docker/login-action@v1 31 | with: 32 | username: ${{ secrets.DOCKERHUB_USERNAME }} 33 | password: ${{ secrets.DOCKERHUB_TOKEN }} 34 | 35 | - name: Build 36 | run: | 37 | DOCKER_SUFFIX=-amd64 DOCKER_CMD='buildx build --push --platform linux/amd64' make docker 38 | DOCKER_SUFFIX=-arm64 DOCKER_CMD='buildx build --push --platform linux/arm64/v8' make docker 39 | make docker-manifest-buildx 40 | VERSION=latest make docker-manifest-release-buildx 41 | -------------------------------------------------------------------------------- /.github/workflows/docker_registry_github.yml: -------------------------------------------------------------------------------- 1 | name: Docker Registry GitHub 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | 7 | env: 8 | DOCKER_REPO: ghcr.io/${{ github.repository_owner }}/ 9 | 10 | jobs: 11 | build: 12 | name: Docker Registry GitHub 13 | runs-on: ubuntu-latest 14 | steps: 15 | 16 | - name: Set up Go 1.x 17 | uses: actions/setup-go@v2 18 | with: 19 | go-version: ^1.21 20 | 21 | - name: Check out code into the Go module directory 22 | uses: actions/checkout@v1 23 | with: 24 | fetch-depth: 0 25 | 26 | - name: Set up QEMU 27 | uses: docker/setup-qemu-action@v1 28 | 29 | - name: Set up Docker Buildx 30 | uses: docker/setup-buildx-action@v1 31 | 32 | - name: Log in to registry 33 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin 34 | 35 | - name: Build 36 | run: | 37 | DOCKER_SUFFIX=-amd64 DOCKER_CMD='buildx build --push --platform linux/amd64' make docker 38 | DOCKER_SUFFIX=-arm64 DOCKER_CMD='buildx build --push --platform linux/arm64/v8' make docker 39 | make docker-manifest-buildx 40 | VERSION=latest make docker-manifest-release-buildx 41 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | build: 10 | name: Release 11 | runs-on: ubuntu-latest 12 | steps: 13 | 14 | - name: Set up Go 1.x 15 | uses: actions/setup-go@v2 16 | with: 17 | go-version: ^1.21 18 | 19 | - name: Check out code into the Go module directory 20 | uses: actions/checkout@v1 21 | with: 22 | fetch-depth: 0 23 | 24 | - name: Install fpm 25 | run: | 26 | sudo apt-get update 27 | sudo apt-get install -y rpm ruby ruby-dev 28 | sudo gem install fpm 29 | 30 | - name: Build 31 | run: | 32 | export VERSION=$(git describe --tags --abbrev=0 HEAD) 33 | GOOS=linux make build 34 | GOOS=darwin make build 35 | GOOS=windows EXTENSION=.exe make build 36 | make package-deb package-rpm 37 | ARCH=arm64 GOARCH=arm64 GOOS=linux make build 38 | ARCH=arm64 make package-deb package-rpm 39 | 40 | - name: Create Release 41 | id: create_release 42 | uses: actions/create-release@v1 43 | env: 44 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 45 | with: 46 | tag_name: ${{ github.ref }} 47 | release_name: Release ${{ github.ref }} 48 | draft: false 49 | prerelease: false 50 | 51 | - name: Upload Release Asset 52 | uses: actions/github-script@v2 53 | with: 54 | github-token: ${{secrets.GITHUB_TOKEN}} 55 | script: | 56 | const fs = require('fs').promises; 57 | const upload_url = '${{ steps.create_release.outputs.upload_url }}'; 58 | for (let file of await fs.readdir('./dist')) { 59 | console.log('uploading', file); 60 | await github.repos.uploadReleaseAsset({ 61 | url: upload_url, 62 | name: file, 63 | data: await fs.readFile(`./dist/${file}`) 64 | }); 65 | } 66 | 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dist/* 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine as builder 2 | ARG LDFLAGS="" 3 | 4 | RUN apk --update --no-cache add git build-base gcc 5 | 6 | COPY . /build 7 | WORKDIR /build 8 | 9 | RUN go build -ldflags "${LDFLAGS}" -o goflow2 cmd/goflow2/main.go 10 | 11 | FROM alpine:latest 12 | ARG src_dir 13 | ARG VERSION="" 14 | ARG CREATED="" 15 | ARG DESCRIPTION="" 16 | ARG NAME="" 17 | ARG MAINTAINER="" 18 | ARG URL="" 19 | ARG LICENSE="" 20 | ARG REV="" 21 | 22 | LABEL org.opencontainers.image.created="${CREATED}" 23 | LABEL org.opencontainers.image.authors="${MAINTAINER}" 24 | LABEL org.opencontainers.image.url="${URL}" 25 | LABEL org.opencontainers.image.title="${NAME}" 26 | LABEL org.opencontainers.image.version="${VERSION}" 27 | LABEL org.opencontainers.image.description="${DESCRIPTION}" 28 | LABEL org.opencontainers.image.licenses="${LICENSE}" 29 | LABEL org.opencontainers.image.revision="${REV}" 30 | 31 | RUN apk update --no-cache && \ 32 | adduser -S -D -H -h / flow 33 | USER flow 34 | COPY --from=builder /build/goflow2 / 35 | 36 | ENTRYPOINT ["./goflow2"] 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, NetSampler 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | EXTENSION ?= 2 | DIST_DIR ?= dist/ 3 | GOOS ?= linux 4 | ARCH ?= $(shell uname -m) 5 | BUILDINFOSDET ?= 6 | 7 | DOCKER_REPO ?= netsampler/ 8 | NAME := goflow2 9 | VERSION ?= $(shell git describe --abbrev --long HEAD) 10 | ABBREV ?= $(shell git rev-parse --short HEAD) 11 | COMMIT ?= $(shell git rev-parse HEAD) 12 | TAG ?= $(shell git describe --tags --abbrev=0 HEAD) 13 | VERSION_PKG ?= $(shell echo $(VERSION) | sed 's/^v//g') 14 | LICENSE := BSD-3-Clause 15 | URL := https://github.com/netsampler/goflow2 16 | DESCRIPTION := GoFlow2: Open-Source and Scalable Network Sample Collector 17 | DATE := $(shell date +%FT%T%z) 18 | BUILDINFOS ?= ($(DATE)$(BUILDINFOSDET)) 19 | LDFLAGS ?= '-X main.version=$(VERSION) -X main.buildinfos=$(BUILDINFOS)' 20 | MAINTAINER := lspgn@users.noreply.github.com 21 | DOCKER_BIN ?= docker 22 | DOCKER_CMD ?= build 23 | DOCKER_SUFFIX ?= 24 | 25 | OUTPUT := $(DIST_DIR)goflow2-$(VERSION_PKG)-$(GOOS)-$(ARCH)$(EXTENSION) 26 | 27 | .PHONY: proto 28 | proto: 29 | @echo generating protobuf 30 | protoc --go_opt=paths=source_relative --go_out=. pb/*.proto 31 | protoc --go_opt=paths=source_relative --go_out=. cmd/enricher/pb/*.proto 32 | 33 | .PHONY: vet 34 | vet: 35 | go vet cmd/goflow2/main.go 36 | 37 | .PHONY: test 38 | test: 39 | go test -v ./... 40 | 41 | .PHONY: prepare 42 | prepare: 43 | mkdir -p $(DIST_DIR) 44 | 45 | PHONY: clean 46 | clean: 47 | rm -rf $(DIST_DIR) 48 | 49 | .PHONY: build 50 | build: prepare 51 | CGO_ENABLED=0 go build -ldflags $(LDFLAGS) -o $(OUTPUT) cmd/goflow2/main.go 52 | 53 | .PHONY: docker 54 | docker: 55 | $(DOCKER_BIN) $(DOCKER_CMD) \ 56 | --build-arg LDFLAGS=$(LDFLAGS) \ 57 | --build-arg CREATED="$(DATE)" \ 58 | --build-arg MAINTAINER="$(MAINTAINER)" \ 59 | --build-arg URL="$(URL)" \ 60 | --build-arg NAME="$(NAME)" \ 61 | --build-arg DESCRIPTION="$(DESCRIPTION)" \ 62 | --build-arg LICENSE="$(LICENSE)" \ 63 | --build-arg VERSION="$(VERSION)" \ 64 | --build-arg REV="$(COMMIT)" \ 65 | -t $(DOCKER_REPO)$(NAME):$(ABBREV)$(DOCKER_SUFFIX) . 66 | 67 | .PHONY: push-docker 68 | push-docker: 69 | $(DOCKER_BIN) push $(DOCKER_REPO)$(NAME):$(ABBREV)$(DOCKER_SUFFIX) 70 | 71 | .PHONY: docker-manifest 72 | docker-manifest: 73 | $(DOCKER_BIN) manifest create $(DOCKER_REPO)$(NAME):$(ABBREV) \ 74 | --amend $(DOCKER_REPO)$(NAME):$(ABBREV)-amd64 \ 75 | --amend $(DOCKER_REPO)$(NAME):$(ABBREV)-arm64 76 | $(DOCKER_BIN) manifest push $(DOCKER_REPO)$(NAME):$(ABBREV) 77 | 78 | $(DOCKER_BIN) manifest create $(DOCKER_REPO)$(NAME):latest \ 79 | --amend $(DOCKER_REPO)$(NAME):$(ABBREV)-amd64 \ 80 | --amend $(DOCKER_REPO)$(NAME):$(ABBREV)-arm64 81 | $(DOCKER_BIN) manifest push $(DOCKER_REPO)$(NAME):latest 82 | 83 | .PHONY: docker-manifest-buildx 84 | docker-manifest-buildx: 85 | $(DOCKER_BIN) buildx imagetools create \ 86 | -t $(DOCKER_REPO)$(NAME):$(ABBREV) \ 87 | $(DOCKER_REPO)$(NAME):$(ABBREV)-amd64 \ 88 | $(DOCKER_REPO)$(NAME):$(ABBREV)-arm64 89 | 90 | .PHONY: docker-manifest-release 91 | docker-manifest-release: 92 | $(DOCKER_BIN) manifest create $(DOCKER_REPO)$(NAME):$(VERSION) \ 93 | --amend $(DOCKER_REPO)$(NAME):$(ABBREV)-amd64 \ 94 | --amend $(DOCKER_REPO)$(NAME):$(ABBREV)-arm64 95 | $(DOCKER_BIN) manifest push $(DOCKER_REPO)$(NAME):$(VERSION) 96 | 97 | .PHONY: docker-manifest-release-buildx 98 | docker-manifest-release-buildx: 99 | $(DOCKER_BIN) buildx imagetools create \ 100 | -t $(DOCKER_REPO)$(NAME):$(VERSION) \ 101 | $(DOCKER_REPO)$(NAME):$(ABBREV)-amd64 \ 102 | $(DOCKER_REPO)$(NAME):$(ABBREV)-arm64 103 | 104 | .PHONY: package-deb 105 | package-deb: prepare 106 | fpm -s dir -t deb -n $(NAME) -v $(VERSION_PKG) \ 107 | --maintainer "$(MAINTAINER)" \ 108 | --description "$(DESCRIPTION)" \ 109 | --url "$(URL)" \ 110 | --architecture $(ARCH) \ 111 | --license "$(LICENSE)" \ 112 | --package $(DIST_DIR) \ 113 | $(OUTPUT)=/usr/bin/goflow2 \ 114 | package/goflow2.service=/lib/systemd/system/goflow2.service \ 115 | package/goflow2.env=/etc/default/goflow2 116 | 117 | .PHONY: package-rpm 118 | package-rpm: prepare 119 | fpm -s dir -t rpm -n $(NAME) -v $(VERSION_PKG) \ 120 | --maintainer "$(MAINTAINER)" \ 121 | --description "$(DESCRIPTION)" \ 122 | --url "$(URL)" \ 123 | --architecture $(ARCH) \ 124 | --license "$(LICENSE) "\ 125 | --package $(DIST_DIR) \ 126 | $(OUTPUT)=/usr/bin/goflow2 \ 127 | package/goflow2.service=/lib/systemd/system/goflow2.service \ 128 | package/goflow2.env=/etc/default/goflow2 129 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GoFlow2 2 | 3 | [![Build Status](https://github.com/netsampler/goflow2/workflows/Build/badge.svg)](https://github.com/netsampler/goflow2/actions?query=workflow%3ABuild) 4 | [![Go Reference](https://pkg.go.dev/badge/github.com/netsampler/goflow2.svg)](https://pkg.go.dev/github.com/netsampler/goflow2) 5 | 6 | This application is a NetFlow/IPFIX/sFlow collector in Go. 7 | 8 | It gathers network information (IP, interfaces, routers) from different flow protocols, 9 | serializes it in a common format. 10 | 11 | You will want to use GoFlow if: 12 | * You receive a decent amount of network samples and need horizontal scalability 13 | * Have protocol diversity and need a consistent format 14 | * Require raw samples and build aggregation and custom enrichment 15 | 16 | This software is the entry point of a pipeline. The storage, transport, enrichment, graphing, alerting are 17 | not provided. 18 | 19 | ![GoFlow2 System diagram](/graphics/diagram.png) 20 | 21 | ## Origins 22 | 23 | This work is a fork of a previous [open-source GoFlow code](https://github.com/cloudflare/goflow) built and used at Cloudflare. 24 | It lives in its own GitHub organization to be maintained more easily. 25 | 26 | Among the differences with the original code: 27 | The serializer and transport options have been revamped to make this program more user-friendly 28 | and target new use-cases like logging providers. 29 | Minimal changes in the decoding libraries. 30 | 31 | ## Modularity 32 | 33 | In order to enable load-balancing and optimizations, the GoFlow2 library has a `decoder` which converts 34 | the payload of a flow packet into a structure. 35 | 36 | The `producer` converts the samples into another format. 37 | Out of the box, this repository provides a protobuf producer (`pb/flow.pb`) 38 | and a raw producer. 39 | In the case of the protobuf producer, the records in a single flow packet 40 | are extracted and made in their own protobuf. Custom mapping allows 41 | to add new fields without rebuilding the proto. 42 | 43 | The `format` directory offers various utilities to format a message. It calls specific 44 | functions to marshal as JSON or text for instance. 45 | 46 | The `transport` provides different way of processing the message. Either sending it via Kafka or 47 | send it to a file (or stdout). 48 | 49 | GoFlow2 is a wrapper of all the functions and chains them. 50 | 51 | You can build your own collector using this base and replace parts: 52 | * Use different transport (e.g: RabbitMQ instead of Kafka) 53 | * Convert to another format (e.g: Cap'n Proto, Avro, instead of protobuf) 54 | * Decode different samples (e.g: not only IP networks, add MPLS) 55 | * Different metrics system (e.g: [OpenTelemetry](https://opentelemetry.io/)) 56 | 57 | ### Protocol difference 58 | 59 | The sampling protocols have distinct features: 60 | 61 | **sFlow** is a stateless protocol which sends the full header of a packet with router information 62 | (interfaces, destination AS) while **NetFlow/IPFIX** rely on templates that contain fields (e.g: source IPv6). 63 | 64 | The sampling rate in NetFlow/IPFIX is provided by **Option Data Sets**. This is why it can take a few minutes 65 | for the packets to be decoded until all the templates are received (**Option Template** and **Data Template**). 66 | 67 | Both of these protocols bundle multiple samples (**Data Set** in NetFlow/IPFIX and **Flow Sample** in sFlow) 68 | in one packet. 69 | 70 | The advantages of using an abstract network flow format, such as protobuf, is it enables summing over the 71 | protocols (e.g: per ASN or per port, rather than per (ASN, router) and (port, router)). 72 | 73 | To read more about the protocols and how they are mapped inside, check out [page](/docs/protocols.md) 74 | 75 | ### Features of GoFlow2 76 | 77 | Collection: 78 | * NetFlow v5 79 | * IPFIX/NetFlow v9 (sampling rate provided by the Option Data Set) 80 | * sFlow v5 81 | 82 | (adding NetFlow v1,7,8 is being evaluated) 83 | 84 | Production: 85 | * Convert to protobuf or json 86 | * Prints to the console/file 87 | * Sends to Kafka and partition 88 | 89 | Monitoring via Prometheus metrics 90 | 91 | ## Get started 92 | 93 | To read about agents that samples network traffic, check this [page](/docs/agents.md). 94 | 95 | To set up the collector, download the latest release corresponding to your OS 96 | and run the following command (the binaries have a suffix with the version): 97 | 98 | ```bash 99 | $ ./goflow2 100 | ``` 101 | 102 | By default, this command will launch an sFlow collector on port `:6343` and 103 | a NetFlowV9/IPFIX collector on port `:2055`. 104 | 105 | By default, the samples received will be printed in JSON format on the stdout. 106 | 107 | ```json 108 | { 109 | "type": "SFLOW_5", 110 | "time_received_ns": 1681583295157626000, 111 | "sequence_num": 2999, 112 | "sampling_rate": 100, 113 | "sampler_address": "192.168.0.1", 114 | "time_flow_start_ns": 1681583295157626000, 115 | "time_flow_end_ns": 1681583295157626000, 116 | "bytes": 1500, 117 | "packets": 1, 118 | "src_addr": "fd01::1", 119 | "dst_addr": "fd01::2", 120 | "etype": "IPv6", 121 | "proto": "TCP", 122 | "src_port": 443, 123 | "dst_port": 50001 124 | } 125 | ``` 126 | 127 | If you are using a log integration (e.g: Loki with Promtail, Splunk, Fluentd, Google Cloud Logs, etc.), 128 | just send the output into a file. 129 | 130 | ```bash 131 | $ ./goflow2 -transport.file /var/logs/goflow2.log 132 | ``` 133 | 134 | To enable Kafka and send protobuf, use the following arguments: 135 | 136 | ```bash 137 | $ ./goflow2 -transport=kafka \ 138 | -transport.kafka.brokers=localhost:9092 \ 139 | -transport.kafka.topic=flows \ 140 | -format=bin 141 | ``` 142 | 143 | By default, the distribution will be randomized. 144 | In order to partition the field, you need to configure the `key` 145 | in the formatter. 146 | 147 | By default, compression is disabled when sending data to Kafka. 148 | To change the kafka compression type of the producer side configure the following option: 149 | 150 | ``` 151 | -transport.kafka.compression.type=gzip 152 | ``` 153 | The list of codecs is available in the [Sarama documentation](https://pkg.go.dev/github.com/Shopify/sarama#CompressionCodec). 154 | 155 | 156 | By default, the collector will listen for IPFIX/NetFlow V9/NetFlow V5 on port 2055 157 | and sFlow on port 6343. 158 | To change the sockets binding, you can set the `-listen` argument and a URI 159 | for each protocol (`netflow`, `sflow` or `flow` for both as scheme) separated by a comma. 160 | For instance, to create 4 parallel sockets of sFlow and one of NetFlow, you can use: 161 | 162 | ```bash 163 | $ ./goflow2 -listen 'sflow://:6343?count=4,netflow://:2055' 164 | ``` 165 | 166 | More information about workers and resource usage is avaialble on the [Performance page](/docs/performance.md). 167 | 168 | ### Docker 169 | 170 | You can also run directly with a container: 171 | ``` 172 | $ sudo docker run -p 6343:6343/udp -p 2055:2055/udp -ti netsampler/goflow2:latest 173 | ``` 174 | 175 | ### Mapping extra fields 176 | 177 | In the case of exotic template fields or extra payload not supported by GoFlow2 178 | of out the box, it is possible to pass a mapping file using `-mapping mapping.yaml`. 179 | A [sample file](cmd/goflow2/mapping.yaml) is available in the `cmd/goflow2` directory. 180 | 181 | For instance, certain devices producing IPFIX use `ingressPhysicalInterface` (id: 252) 182 | and do not use `ingressInterface` (id: 10). Using the following you can have the interface mapped 183 | in the InIf protobuf field without changing the code. 184 | 185 | ```yaml 186 | ipfix: 187 | mapping: 188 | - field: 252 189 | destination: in_if 190 | - field: 253 191 | destination: out_if 192 | ``` 193 | 194 | ### Output format considerations 195 | 196 | The JSON format is advised only when consuming a small amount of data directly. 197 | For bigger workloads, the protobuf output format provides a binary representation 198 | and is preferred. 199 | It can also be extended with enrichment as long as the user keep the same IDs. 200 | 201 | If you want to develop applications, build `pb/flow.proto` into the language you want: 202 | When adding custom fields, picking a field ID ≥ 1000 is suggested. 203 | 204 | Check the docs for more information about [compiling protobuf](/docs/protobuf.md). 205 | 206 | ## Flow Pipeline 207 | 208 | A basic enrichment tool is available in the `cmd/enricher` directory. 209 | You need to load the Maxmind GeoIP ASN and Country databases using `-db.asn` and `-db.country`. 210 | 211 | Running a flow enrichment system is as simple as a pipe. 212 | Once you plug the stdin of the enricher to the stdout of GoFlow in protobuf, 213 | the source and destination IP addresses will automatically be mapped 214 | with a database for Autonomous System Number and Country. 215 | Similar output options as GoFlow are provided. 216 | 217 | ```bash 218 | $ ./goflow2 -transport.file.sep= -format=bin | \ 219 | ./enricher -db.asn path-to/GeoLite2-ASN.mmdb -db.country path-to/GeoLite2-Country.mmdb 220 | ``` 221 | 222 | For a more scalable production setting, Kafka and protobuf are recommended. 223 | Stream operations (aggregation and filtering) can be done with stream-processor tools. 224 | For instance Flink, or the more recent Kafka Streams and kSQLdb. 225 | Direct storage can be done with data-warehouses like Clickhouse. 226 | 227 | Each protobuf message is prefixed by its varint length. 228 | 229 | This repository contains [examples of pipelines](./compose) with docker-compose. 230 | The available pipelines are: 231 | * [Kafka+Clickhouse+Grafana](./compose/kcg) 232 | * [Logstash+Elastic+Kibana](./compose/elk) 233 | 234 | ## Security notes and assumptions 235 | 236 | By default, the buffer for UDP is 9000 bytes. 237 | Protections were added to avoid DOS on sFlow since the various length fields are 32 bits. 238 | There are assumptions on how many records and list items a sample can have (eg: AS-Path). 239 | 240 | ## User stories 241 | 242 | Are you using GoFlow2 in production at scale? Add yourself here! 243 | 244 | ### Contributions 245 | 246 | This project welcomes pull-requests, whether it's documentation, 247 | instrumentation (e.g: docker-compose, metrics), internals (protocol libraries), 248 | integration (new CLI feature) or else! 249 | Just make sure to check for the use-cases via an issue. 250 | 251 | This software would not exist without the testing and commits from 252 | its users and [contributors](docs/contributors.md). 253 | 254 | ## License 255 | 256 | Licensed under the BSD-3 License. 257 | -------------------------------------------------------------------------------- /cmd/enricher/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "flag" 8 | "fmt" 9 | "io" 10 | "log" 11 | "log/slog" 12 | "net" 13 | "os" 14 | "strings" 15 | 16 | flowmessage "github.com/netsampler/goflow2/v2/cmd/enricher/pb" 17 | 18 | // import various formatters 19 | "github.com/netsampler/goflow2/v2/format" 20 | _ "github.com/netsampler/goflow2/v2/format/binary" 21 | _ "github.com/netsampler/goflow2/v2/format/json" 22 | _ "github.com/netsampler/goflow2/v2/format/text" 23 | 24 | // import various transports 25 | "github.com/netsampler/goflow2/v2/transport" 26 | _ "github.com/netsampler/goflow2/v2/transport/file" 27 | _ "github.com/netsampler/goflow2/v2/transport/kafka" 28 | 29 | "github.com/oschwald/geoip2-golang" 30 | "google.golang.org/protobuf/encoding/protodelim" 31 | ) 32 | 33 | var ( 34 | version = "" 35 | buildinfos = "" 36 | AppVersion = "Enricher " + version + " " + buildinfos 37 | 38 | DbAsn = flag.String("db.asn", "", "IP->ASN database") 39 | DbCountry = flag.String("db.country", "", "IP->Country database") 40 | 41 | LogLevel = flag.String("loglevel", "info", "Log level") 42 | LogFmt = flag.String("logfmt", "normal", "Log formatter") 43 | 44 | SamplingRate = flag.Int("samplingrate", 0, "Set sampling rate (values > 0)") 45 | 46 | Format = flag.String("format", "json", fmt.Sprintf("Choose the format (available: %s)", strings.Join(format.GetFormats(), ", "))) 47 | Transport = flag.String("transport", "file", fmt.Sprintf("Choose the transport (available: %s)", strings.Join(transport.GetTransports(), ", "))) 48 | 49 | Version = flag.Bool("v", false, "Print version") 50 | ) 51 | 52 | func MapAsn(db *geoip2.Reader, addr []byte, dest *uint32) { 53 | entry, err := db.ASN(net.IP(addr)) 54 | if err != nil { 55 | return 56 | } 57 | *dest = uint32(entry.AutonomousSystemNumber) 58 | } 59 | func MapCountry(db *geoip2.Reader, addr []byte, dest *string) { 60 | entry, err := db.Country(net.IP(addr)) 61 | if err != nil { 62 | return 63 | } 64 | *dest = entry.Country.IsoCode 65 | } 66 | 67 | func MapFlow(dbAsn, dbCountry *geoip2.Reader, msg *ProtoProducerMessage) { 68 | if dbAsn != nil { 69 | MapAsn(dbAsn, msg.SrcAddr, &(msg.FlowMessageExt.SrcAs)) 70 | MapAsn(dbAsn, msg.DstAddr, &(msg.FlowMessageExt.DstAs)) 71 | } 72 | if dbCountry != nil { 73 | MapCountry(dbCountry, msg.SrcAddr, &(msg.FlowMessageExt.SrcCountry)) 74 | MapCountry(dbCountry, msg.DstAddr, &(msg.FlowMessageExt.DstCountry)) 75 | } 76 | } 77 | 78 | type ProtoProducerMessage struct { 79 | flowmessage.FlowMessageExt 80 | } 81 | 82 | func (m *ProtoProducerMessage) MarshalBinary() ([]byte, error) { 83 | buf := bytes.NewBuffer([]byte{}) 84 | _, err := protodelim.MarshalTo(buf, m) 85 | return buf.Bytes(), err 86 | } 87 | 88 | func main() { 89 | flag.Parse() 90 | 91 | if *Version { 92 | fmt.Println(AppVersion) 93 | os.Exit(0) 94 | } 95 | 96 | var loglevel slog.Level 97 | if err := loglevel.UnmarshalText([]byte(*LogLevel)); err != nil { 98 | log.Fatal("error parsing log level") 99 | } 100 | 101 | lo := slog.HandlerOptions{ 102 | Level: loglevel, 103 | } 104 | logger := slog.New(slog.NewTextHandler(os.Stderr, &lo)) 105 | 106 | switch *LogFmt { 107 | case "json": 108 | logger = slog.New(slog.NewJSONHandler(os.Stderr, &lo)) 109 | } 110 | 111 | slog.SetDefault(logger) 112 | 113 | var dbAsn, dbCountry *geoip2.Reader 114 | var err error 115 | if *DbAsn != "" { 116 | dbAsn, err = geoip2.Open(*DbAsn) 117 | if err != nil { 118 | slog.Error("error opening asn db", slog.String("error", err.Error())) 119 | os.Exit(1) 120 | } 121 | defer dbAsn.Close() 122 | } 123 | 124 | if *DbCountry != "" { 125 | dbCountry, err = geoip2.Open(*DbCountry) 126 | if err != nil { 127 | slog.Error("error opening country db", slog.String("error", err.Error())) 128 | os.Exit(1) 129 | } 130 | defer dbCountry.Close() 131 | } 132 | 133 | formatter, err := format.FindFormat(*Format) 134 | if err != nil { 135 | log.Fatal(err) 136 | } 137 | 138 | transporter, err := transport.FindTransport(*Transport) 139 | if err != nil { 140 | slog.Error("error transporter", slog.String("error", err.Error())) 141 | os.Exit(1) 142 | } 143 | defer transporter.Close() 144 | 145 | logger.Info("starting enricher") 146 | 147 | rdr := bufio.NewReader(os.Stdin) 148 | 149 | var msg ProtoProducerMessage 150 | for { 151 | if err := protodelim.UnmarshalFrom(rdr, &msg); err != nil && errors.Is(err, io.EOF) { 152 | return 153 | } else if err != nil { 154 | slog.Error("error unmarshalling message", slog.String("error", err.Error())) 155 | continue 156 | } 157 | 158 | MapFlow(dbAsn, dbCountry, &msg) 159 | 160 | if *SamplingRate > 0 { 161 | msg.SamplingRate = uint64(*SamplingRate) 162 | } 163 | 164 | key, data, err := formatter.Format(&msg) 165 | if err != nil { 166 | slog.Error("error formatting message", slog.String("error", err.Error())) 167 | continue 168 | } 169 | 170 | err = transporter.Send(key, data) 171 | if err != nil { 172 | slog.Error("error sending message", slog.String("error", err.Error())) 173 | continue 174 | } 175 | 176 | msg.Reset() 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /cmd/enricher/pb/flowext.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package flowpb; 3 | option go_package = "github.com/netsampler/goflow2/cmd/enricher/pb;flowpb"; 4 | 5 | message FlowMessageExt { 6 | 7 | enum FlowType { 8 | FLOWUNKNOWN = 0; 9 | SFLOW_5 = 1; 10 | NETFLOW_V5 = 2; 11 | NETFLOW_V9 = 3; 12 | IPFIX = 4; 13 | } 14 | FlowType type = 1; 15 | 16 | uint64 time_received = 2; 17 | uint32 sequence_num = 4; 18 | uint64 sampling_rate = 3; 19 | 20 | uint32 flow_direction = 42; 21 | 22 | // Sampler information 23 | bytes sampler_address = 11; 24 | 25 | // Found inside packet 26 | uint64 time_flow_start = 38; 27 | uint64 time_flow_end = 5; 28 | uint64 time_flow_start_ms = 63; 29 | uint64 time_flow_end_ms = 64; 30 | 31 | // Size of the sampled packet 32 | uint64 bytes = 9; 33 | uint64 packets = 10; 34 | 35 | // Source/destination addresses 36 | bytes src_addr = 6; 37 | bytes dst_addr = 7; 38 | 39 | // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...) 40 | uint32 etype = 30; 41 | 42 | // Layer 4 protocol 43 | uint32 proto = 20; 44 | 45 | // Ports for UDP and TCP 46 | uint32 src_port = 21; 47 | uint32 dst_port = 22; 48 | 49 | // Interfaces 50 | uint32 in_if = 18; 51 | uint32 out_if = 19; 52 | 53 | // Ethernet information 54 | uint64 src_mac = 27; 55 | uint64 dst_mac = 28; 56 | 57 | // Vlan 58 | uint32 src_vlan = 33; 59 | uint32 dst_vlan = 34; 60 | // 802.1q VLAN in sampled packet 61 | uint32 vlan_id = 29; 62 | 63 | // VRF 64 | uint32 ingress_vrf_id = 39; 65 | uint32 egress_vrf_id = 40; 66 | 67 | // IP and TCP special flags 68 | uint32 ip_tos = 23; 69 | uint32 forwarding_status = 24; 70 | uint32 ip_ttl = 25; 71 | uint32 tcp_flags = 26; 72 | uint32 icmp_type = 31; 73 | uint32 icmp_code = 32; 74 | uint32 ipv6_flow_label = 37; 75 | // Fragments (IPv4/IPv6) 76 | uint32 fragment_id = 35; 77 | uint32 fragment_offset = 36; 78 | uint32 bi_flow_direction = 41; 79 | 80 | // Autonomous system information 81 | uint32 src_as = 14; 82 | uint32 dst_as = 15; 83 | 84 | bytes next_hop = 12; 85 | uint32 next_hop_as = 13; 86 | 87 | // Prefix size 88 | uint32 src_net = 16; 89 | uint32 dst_net = 17; 90 | 91 | // BGP information 92 | bytes bgp_next_hop = 100; 93 | repeated uint32 bgp_communities = 101; 94 | repeated uint32 as_path = 102; 95 | 96 | // MPLS information 97 | bool has_mpls = 53; 98 | uint32 mpls_count = 54; 99 | uint32 mpls_1_ttl = 55; // First TTL 100 | uint32 mpls_1_label = 56; // First Label 101 | uint32 mpls_2_ttl = 57; // Second TTL 102 | uint32 mpls_2_label = 58; // Second Label 103 | uint32 mpls_3_ttl = 59; // Third TTL 104 | uint32 mpls_3_label = 60; // Third Label 105 | uint32 mpls_last_ttl = 61; // Last TTL 106 | uint32 mpls_last_label = 62; // Last Label 107 | bytes mpls_label_ip = 65; // MPLS TOP Label IP 108 | 109 | uint32 observation_domain_id = 70; 110 | uint32 observation_point_id = 71; 111 | 112 | string src_country = 1000; 113 | string dst_country = 1001; 114 | 115 | } 116 | -------------------------------------------------------------------------------- /cmd/goflow2/mapping.yaml: -------------------------------------------------------------------------------- 1 | formatter: 2 | fields: # list of fields to format in JSON 3 | - type 4 | - time_received_ns 5 | - sequence_num 6 | - sampling_rate 7 | - flow_direction 8 | - sampler_address 9 | - time_flow_start_ns 10 | - time_flow_end_ns 11 | - bytes 12 | - packets 13 | - src_addr 14 | - src_net 15 | - dst_addr 16 | - dst_net 17 | - etype 18 | - proto 19 | - src_port 20 | - dst_port 21 | - in_if 22 | - out_if 23 | - src_mac 24 | - dst_mac 25 | # additional fields 26 | - icmp_name # virtual column 27 | - csum # udp checksum 28 | key: 29 | - sampler_address 30 | protobuf: # manual protobuf fields addition 31 | - name: flow_direction 32 | index: 42 33 | type: varint 34 | - name: bi_flow_direction 35 | index: 41 36 | type: varint 37 | - name: ingress_vrf_id 38 | index: 39 39 | type: varint 40 | - name: egress_vrf_id 41 | index: 40 42 | type: varint 43 | - name: csum 44 | index: 999 45 | type: varint 46 | render: 47 | time_received_ns: datetimenano 48 | # Decoder mappings 49 | ipfix: 50 | mapping: 51 | - field: 61 52 | destination: flow_direction 53 | - field: 239 54 | destination: bi_flow_direction 55 | - field: 234 56 | destination: ingress_vrf_id 57 | - field: 235 58 | destination: egress_vrf_id 59 | netflowv9: 60 | mapping: 61 | - field: 34 # samplingInterval provided within the template 62 | destination: sampling_rate 63 | endian: little 64 | - field: 61 65 | destination: flow_direction 66 | sflow: 67 | ports: 68 | - proto: "udp" 69 | dir: "dst" 70 | port: 3544 71 | parser: "teredo-dst" 72 | - proto: "udp" 73 | dir: "both" 74 | port: 4754 75 | parser: "gre" 76 | - proto: "udp" 77 | dir: "both" 78 | port: 6081 79 | parser: "geneve" 80 | mapping: 81 | - layer: "udp" 82 | offset: 48 83 | length: 16 84 | destination: csum 85 | - layer: "tcp" 86 | offset: 128 87 | length: 16 88 | destination: csum 89 | -------------------------------------------------------------------------------- /compose/elk/README.md: -------------------------------------------------------------------------------- 1 | # Flows + Logstash + Elastic + Kibana 2 | 3 | Clickhouse is a powerful data warehouse. 4 | 5 | A sample [docker-compose](./docker-compose.yml) is provided. 6 | It's composed of: 7 | * GoFlow2 8 | * Logstash 9 | * Elastic 10 | * Kibana 11 | 12 | To start the containers, use: 13 | ```bash 14 | $ docker-compose up 15 | ``` 16 | 17 | This command will automatically build the GoFlow2 container. 18 | 19 | GoFlow2 collects NetFlow v9/IPFIX and sFlow packets and logs them into a file (`/var/log/goflow/goflow.log`). 20 | Logstash collects the log messages, parse the JSON and sends to Elastic. 21 | Kibana can be used to visualize the data. You can access the dashboard at http://localhost:5601. 22 | 23 | This stack requires to create an [index pattern](http://localhost:5601/app/management/kibana/indexPatterns/create). 24 | Define the index pattern to be `logstash-*`. Select `@timestamp` to be the time filter. 25 | You can then visualize flows in the [Discover](http://localhost:5601/app/discover) section. -------------------------------------------------------------------------------- /compose/elk/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | goflow2: 4 | build: 5 | context: ../../ 6 | dockerfile: Dockerfile 7 | args: 8 | VERSION: compose 9 | LDFLAGS: -X main.version=compose 10 | image: netsampler/goflow2 11 | user: root # because docker-compose mount as root 12 | ports: 13 | - '8080:8080' 14 | - '6343:6343/udp' 15 | - '2055:2055/udp' 16 | command: 17 | - -transport=file 18 | - -transport.file=/var/log/goflow/goflow2.log 19 | - -format=json 20 | restart: always 21 | logging: 22 | driver: gelf 23 | options: 24 | gelf-address: "udp://localhost:12201" 25 | tag: "flows" 26 | volumes: 27 | - logs:/var/log/goflow 28 | elasticsearch: 29 | image: docker.elastic.co/elasticsearch/elasticsearch:7.13.0 30 | environment: 31 | - discovery.type=single-node 32 | ports: 33 | - 9200:9200 34 | kibana: 35 | image: docker.elastic.co/kibana/kibana:7.13.0 36 | ports: 37 | - 5601:5601 38 | depends_on: 39 | - elasticsearch 40 | - logstash 41 | logstash: 42 | image: docker.elastic.co/logstash/logstash:7.13.0 43 | user: root # because docker-compose mount as root 44 | links: 45 | - elasticsearch 46 | volumes: 47 | - ./logstash.conf:/etc/logstash/logstash.conf 48 | - logs:/var/log/goflow 49 | command: logstash -f /etc/logstash/logstash.conf 50 | ports: 51 | - 12201:12201/udp 52 | depends_on: 53 | - elasticsearch 54 | volumes: 55 | logs: 56 | -------------------------------------------------------------------------------- /compose/elk/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | gelf { 3 | port => 12201 4 | } 5 | file { 6 | path => "/var/log/goflow/*.log" 7 | type => "log" 8 | } 9 | } 10 | filter { 11 | json { 12 | source => "message" 13 | target => "flow" 14 | remove_field => ["message"] 15 | } 16 | } 17 | output { 18 | elasticsearch { 19 | hosts => ["elasticsearch:9200"] 20 | index => "logstash-%{+YYYY-MM-dd}" 21 | } 22 | } -------------------------------------------------------------------------------- /compose/kcg/README.md: -------------------------------------------------------------------------------- 1 | # Flows + Kafka + Clicklhouse + Grafana + Prometheus 2 | 3 | Clickhouse is a powerful data warehouse. 4 | 5 | A sample [docker-compose](./docker-compose.yml) is provided. 6 | It's composed of: 7 | * Apache Kafka 8 | * GoFlow2 9 | * Prometheus 10 | * Clickhouse 11 | * Grafana 12 | 13 | To start the containers, use: 14 | ```bash 15 | $ docker-compose up 16 | ``` 17 | 18 | This command will automatically build Grafana and GoFlow2 containers. 19 | 20 | GoFlow2 collects NetFlow v9/IPFIX and sFlow packets and sends as a protobuf into Kafka. 21 | Prometheus scrapes the metrics of the collector. 22 | Clickhouse consumes from Kafka, stores raw data and aggregates over specific columns 23 | using `MATERIALIZED TABLES` and `VIEWS` defined in a [schema file](./clickhouse/create.sh). 24 | 25 | You can visualize the data in Grafana at http://localhost:3000 (credentials: admin/admin) with the 26 | pre-made dashboards. 27 | 28 | Note: if you are using Colima as the engine, it does not support UDP port forwarding. Flows won't be collected. 29 | It is possible to run GoFlow2 locally and feed into kafka if you add an `/etc/hosts` entry `127.0.0.1 kafka`. 30 | -------------------------------------------------------------------------------- /compose/kcg/clickhouse/create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | clickhouse client -n <<-EOSQL 5 | 6 | CREATE DATABASE IF NOT EXISTS dictionaries; 7 | 8 | CREATE DICTIONARY IF NOT EXISTS dictionaries.protocols ( 9 | proto UInt8, 10 | name String, 11 | description String 12 | ) 13 | PRIMARY KEY proto 14 | LAYOUT(FLAT()) 15 | SOURCE (FILE(path '/var/lib/clickhouse/user_files/protocols.csv' format 'CSVWithNames')) 16 | LIFETIME(3600); 17 | 18 | CREATE TABLE IF NOT EXISTS flows 19 | ( 20 | time_received_ns UInt64, 21 | time_flow_start_ns UInt64, 22 | 23 | sequence_num UInt32, 24 | sampling_rate UInt64, 25 | sampler_address FixedString(16), 26 | 27 | src_addr FixedString(16), 28 | dst_addr FixedString(16), 29 | 30 | src_as UInt32, 31 | dst_as UInt32, 32 | 33 | etype UInt32, 34 | proto UInt32, 35 | 36 | src_port UInt32, 37 | dst_port UInt32, 38 | 39 | bytes UInt64, 40 | packets UInt64 41 | ) ENGINE = Kafka() 42 | SETTINGS 43 | kafka_broker_list = 'kafka:9092', 44 | kafka_num_consumers = 1, 45 | kafka_topic_list = 'flows', 46 | kafka_group_name = 'clickhouse', 47 | kafka_format = 'Protobuf', 48 | kafka_schema = 'flow.proto:FlowMessage'; 49 | 50 | CREATE TABLE IF NOT EXISTS flows_raw 51 | ( 52 | date Date, 53 | time_inserted_ns DateTime64(9), 54 | time_received_ns DateTime64(9), 55 | time_flow_start_ns DateTime64(9), 56 | 57 | sequence_num UInt32, 58 | sampling_rate UInt64, 59 | sampler_address FixedString(16), 60 | 61 | src_addr FixedString(16), 62 | dst_addr FixedString(16), 63 | 64 | src_as UInt32, 65 | dst_as UInt32, 66 | 67 | etype UInt32, 68 | proto UInt32, 69 | 70 | src_port UInt32, 71 | dst_port UInt32, 72 | 73 | bytes UInt64, 74 | packets UInt64 75 | ) ENGINE = MergeTree() 76 | PARTITION BY date 77 | ORDER BY time_received_ns; 78 | 79 | CREATE MATERIALIZED VIEW IF NOT EXISTS flows_raw_view TO flows_raw 80 | AS SELECT 81 | toDate(time_received_ns) AS date, 82 | now() AS time_inserted_ns, 83 | toDateTime64(time_received_ns/1000000000, 9) AS time_received_ns, 84 | toDateTime64(time_flow_start_ns/1000000000, 9) AS time_flow_start_ns, 85 | sequence_num, 86 | sampling_rate, 87 | sampler_address, 88 | 89 | src_addr, 90 | dst_addr, 91 | 92 | src_as, 93 | dst_as, 94 | 95 | etype, 96 | proto, 97 | 98 | src_port, 99 | dst_port, 100 | 101 | bytes, 102 | packets 103 | FROM flows; 104 | 105 | CREATE TABLE IF NOT EXISTS flows_5m 106 | ( 107 | date Date, 108 | timeslot DateTime, 109 | 110 | src_as UInt32, 111 | dst_as UInt32, 112 | 113 | etypeMap Nested ( 114 | etype UInt32, 115 | bytes UInt64, 116 | packets UInt64, 117 | count UInt64 118 | ), 119 | 120 | bytes UInt64, 121 | packets UInt64, 122 | count UInt64 123 | ) ENGINE = SummingMergeTree() 124 | PARTITION BY date 125 | ORDER BY (date, timeslot, src_as, dst_as, \`etypeMap.etype\`); 126 | 127 | CREATE MATERIALIZED VIEW IF NOT EXISTS flows_5m_view TO flows_5m 128 | AS 129 | SELECT 130 | date, 131 | toStartOfFiveMinute(time_received_ns) AS timeslot, 132 | src_as, 133 | dst_as, 134 | 135 | [etype] AS \`etypeMap.etype\`, 136 | [bytes] AS \`etypeMap.bytes\`, 137 | [packets] AS \`etypeMap.packets\`, 138 | [count] AS \`etypeMap.count\`, 139 | 140 | sum(bytes) AS bytes, 141 | sum(packets) AS packets, 142 | count() AS count 143 | 144 | FROM flows_raw 145 | GROUP BY date, timeslot, src_as, dst_as, \`etypeMap.etype\`; 146 | 147 | EOSQL 148 | -------------------------------------------------------------------------------- /compose/kcg/clickhouse/flow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package flowpb; 3 | option go_package = "github.com/netsampler/goflow2/pb;flowpb"; 4 | 5 | message FlowMessage { 6 | 7 | enum FlowType { 8 | FLOWUNKNOWN = 0; 9 | SFLOW_5 = 1; 10 | NETFLOW_V5 = 2; 11 | NETFLOW_V9 = 3; 12 | IPFIX = 4; 13 | } 14 | FlowType type = 1; 15 | 16 | uint64 time_received_ns = 110; 17 | uint32 sequence_num = 4; 18 | uint64 sampling_rate = 3; 19 | 20 | //uint32 flow_direction = 42; 21 | 22 | // Sampler information 23 | bytes sampler_address = 11; 24 | 25 | // Found inside packet 26 | uint64 time_flow_start_ns = 111; 27 | uint64 time_flow_end_ns = 112; 28 | 29 | // Size of the sampled packet 30 | uint64 bytes = 9; 31 | uint64 packets = 10; 32 | 33 | // Source/destination addresses 34 | bytes src_addr = 6; 35 | bytes dst_addr = 7; 36 | 37 | // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...) 38 | uint32 etype = 30; 39 | 40 | // Layer 4 protocol 41 | uint32 proto = 20; 42 | 43 | // Ports for UDP and TCP 44 | uint32 src_port = 21; 45 | uint32 dst_port = 22; 46 | 47 | // Interfaces 48 | uint32 in_if = 18; 49 | uint32 out_if = 19; 50 | 51 | // Ethernet information 52 | uint64 src_mac = 27; 53 | uint64 dst_mac = 28; 54 | 55 | // Vlan 56 | uint32 src_vlan = 33; 57 | uint32 dst_vlan = 34; 58 | // 802.1q VLAN in sampled packet 59 | uint32 vlan_id = 29; 60 | 61 | // IP and TCP special flags 62 | uint32 ip_tos = 23; 63 | uint32 forwarding_status = 24; 64 | uint32 ip_ttl = 25; 65 | uint32 ip_flags = 38; 66 | uint32 tcp_flags = 26; 67 | uint32 icmp_type = 31; 68 | uint32 icmp_code = 32; 69 | uint32 ipv6_flow_label = 37; 70 | // Fragments (IPv4/IPv6) 71 | uint32 fragment_id = 35; 72 | uint32 fragment_offset = 36; 73 | 74 | // Autonomous system information 75 | uint32 src_as = 14; 76 | uint32 dst_as = 15; 77 | 78 | bytes next_hop = 12; 79 | uint32 next_hop_as = 13; 80 | 81 | // Prefix size 82 | uint32 src_net = 16; 83 | uint32 dst_net = 17; 84 | 85 | // BGP information 86 | bytes bgp_next_hop = 100; 87 | repeated uint32 bgp_communities = 101; 88 | repeated uint32 as_path = 102; 89 | 90 | // MPLS information 91 | repeated uint32 mpls_ttl = 80; 92 | repeated uint32 mpls_label = 81; 93 | repeated bytes mpls_ip = 82; 94 | 95 | uint32 observation_domain_id = 70; 96 | uint32 observation_point_id = 71; 97 | 98 | // Encapsulation 99 | enum LayerStack { 100 | Ethernet = 0; 101 | IPv4 = 1; 102 | IPv6 = 2; 103 | TCP = 3; 104 | UDP = 4; 105 | MPLS = 5; 106 | Dot1Q = 6; 107 | ICMP = 7; 108 | ICMPv6 = 8; 109 | GRE = 9; 110 | IPv6HeaderRouting = 10; 111 | IPv6HeaderFragment = 11; 112 | Geneve = 12; 113 | Teredo = 13; 114 | Custom = 99; 115 | // todo: add nsh 116 | } 117 | repeated LayerStack layer_stack = 103; 118 | repeated uint32 layer_size = 104; 119 | 120 | repeated bytes ipv6_routing_header_addresses = 105; // SRv6 121 | uint32 ipv6_routing_header_seg_left = 106; // SRv6 122 | 123 | } 124 | -------------------------------------------------------------------------------- /compose/kcg/clickhouse/protocols.csv: -------------------------------------------------------------------------------- 1 | proto,name,description 2 | 0,HOPOPT,IPv6 Hop-by-Hop Option 3 | 1,ICMP,Internet Control Message 4 | 2,IGMP,Internet Group Management 5 | 4,IPv4,IPv4 encapsulation 6 | 6,TCP,Transmission Control Protocol 7 | 8,EGP,Exterior Gateway Protocol 8 | 9,IGP,Interior Gateway Protocol 9 | 16,CHAOS,Chaos 10 | 17,UDP,User Datagram Protocol 11 | 27,RDP,Reliable Data Protocol 12 | 41,IPv6,IPv6 encapsulation 13 | 43,IPv6-Route,Routing Header for IPv6 14 | 44,IPv6-Frag,Fragment Header for IPv6 15 | 45,IDRP,Inter-Domain Routing Protocol 16 | 46,RSVP,Reservation Protocol 17 | 47,GRE,Generic Routing Encapsulation 18 | 50,ESP,Encap Security Payload 19 | 51,AH,Authentication Header 20 | 55,MOBILE,IP Mobility 21 | 58,IPv6-ICMP,ICMP for IPv6 22 | 59,IPv6-NoNxt,No Next Header for IPv6 23 | 60,IPv6-Opts,Destination Options for IPv6 24 | 88,EIGRP,EIGRP 25 | 89,OSPFIGP,OSPFIGP 26 | 92,MTP,Multicast Transport Protocol 27 | 94,IPIP,IP-within-IP Encapsulation Protocol 28 | 97,ETHERIP,Ethernet-within-IP Encapsulation 29 | 98,ENCAP,Encapsulation Header 30 | 112,VRRP,Virtual Router Redundancy Protocol 31 | -------------------------------------------------------------------------------- /compose/kcg/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | kafka: 4 | image: bitnami/kafka:3.9.0 5 | ports: 6 | - 9092:9092 7 | environment: 8 | - ALLOW_PLAINTEXT_LISTENER=yes 9 | - KAFKA_DELETE_TOPIC_ENABLE=true 10 | - KAFKA_ENABLE_KRAFT=true 11 | - KAFKA_KRAFT_CLUSTER_ID=AAAAAAAAAAAAAAAAAAAAAA # uuid encoded in base64 12 | # dd if=/dev/zero of=/dev/stdout count=16 bs=1 | openssl enc -A -a 13 | # head -c 20 /dev/zero | base64 14 | - KAFKA_CFG_BROKER_ID=1 15 | - KAFKA_CFG_NODE_ID=1 16 | - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@kafka:9093 17 | - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER 18 | - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=IB 19 | - KAFKA_CFG_PROCESS_ROLES=broker,controller 20 | - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,BROKER:PLAINTEXT,IB:PLAINTEXT 21 | - KAFKA_CFG_LISTENERS=CONTROLLER://:9093,BROKER://:9092,IB://:9094 22 | - KAFKA_ADVERTISED_LISTENERS=BROKER://kafka:9092,IB://:9094 23 | - BITNAMI_DEBUG=yes 24 | restart: always 25 | grafana: 26 | image: grafana/grafana:9.4.3 27 | environment: 28 | - GF_INSTALL_PLUGINS=vertamedia-clickhouse-datasource 29 | # - GF_INSTALL_PLUGINS=grafana-clickhouse-datasource 30 | # - GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=vertamedia-clickhouse-datasource 31 | ports: 32 | - 3000:3000 33 | restart: always 34 | volumes: 35 | - ./grafana/datasources-ch.yml:/etc/grafana/provisioning/datasources/datasources-ch.yml 36 | - ./grafana/dashboards.yml:/etc/grafana/provisioning/dashboards/dashboards.yml 37 | - ./grafana/dashboards:/var/lib/grafana/dashboards 38 | prometheus: 39 | image: prom/prometheus:v3.0.1 40 | ports: 41 | - 9090:9090 42 | restart: always 43 | volumes: 44 | - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml 45 | goflow2: 46 | build: 47 | context: ../../ 48 | dockerfile: Dockerfile 49 | args: 50 | VERSION: compose 51 | LDFLAGS: -X main.version=compose 52 | image: netsampler/goflow2 53 | depends_on: 54 | - kafka 55 | ports: 56 | - 8080:8080 57 | - 6343:6343/udp 58 | - 2055:2055/udp 59 | restart: always 60 | command: 61 | - -transport.kafka.brokers=kafka:9092 62 | - -transport=kafka 63 | - -transport.kafka.topic=flows 64 | - -format=bin 65 | db: 66 | image: clickhouse/clickhouse-server:24.11.1.2557-alpine 67 | ports: 68 | - 8123:8123 69 | volumes: 70 | - ./clickhouse:/docker-entrypoint-initdb.d/ 71 | #- ../../pb/flow.proto:/var/lib/clickhouse/format_schemas/flow.proto 72 | # the following files have 101:101 as owner 73 | - ./clickhouse/flow.proto:/var/lib/clickhouse/format_schemas/flow.proto 74 | - ./clickhouse/protocols.csv:/var/lib/clickhouse/user_files/protocols.csv 75 | depends_on: 76 | - kafka 77 | -------------------------------------------------------------------------------- /compose/kcg/grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu AS builder 2 | 3 | RUN apt-get update && apt-get install -y git 4 | RUN git clone https://github.com/Vertamedia/clickhouse-grafana.git 5 | 6 | FROM grafana/grafana:9.1.7 7 | 8 | COPY --from=builder /clickhouse-grafana /var/lib/grafana/plugins -------------------------------------------------------------------------------- /compose/kcg/grafana/dashboards.yml: -------------------------------------------------------------------------------- 1 | - name: 'default' 2 | org_id: 1 3 | folder: '' 4 | type: file 5 | options: 6 | folder: /var/lib/grafana/dashboards -------------------------------------------------------------------------------- /compose/kcg/grafana/datasources-ch.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | access: proxy 7 | orgId: 1 8 | url: http://prometheus:9090 9 | version: 1 10 | editable: true 11 | - name: ClickHouse 12 | type: vertamedia-clickhouse-datasource 13 | typeLogoUrl: '' 14 | access: proxy 15 | url: http://db:8123 16 | password: '' 17 | user: '' 18 | database: '' 19 | basicAuth: false 20 | basicAuthUser: '' 21 | basicAuthPassword: '' 22 | withCredentials: false 23 | isDefault: true 24 | secureJsonFields: {} 25 | version: 3 26 | readOnly: false 27 | -------------------------------------------------------------------------------- /compose/kcg/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. 3 | evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. 4 | alerting: 5 | alertmanagers: 6 | - static_configs: 7 | - targets: 8 | 9 | rule_files: 10 | 11 | scrape_configs: 12 | - job_name: 'prometheus' 13 | static_configs: 14 | - targets: ['localhost:9090', 'goflow2:8080', 'host.docker.internal:8080'] 15 | -------------------------------------------------------------------------------- /decoders/netflow/format.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | ) 7 | 8 | func (p *IPFIXPacket) MarshalJSON() ([]byte, error) { 9 | return json.Marshal(*p) // this is a trick to avoid having the JSON marshaller defaults to MarshalText 10 | } 11 | 12 | func (p *NFv9Packet) MarshalJSON() ([]byte, error) { 13 | return json.Marshal(*p) // this is a trick to avoid having the JSON marshaller defaults to MarshalText 14 | } 15 | 16 | func (p *IPFIXPacket) MarshalText() ([]byte, error) { 17 | return []byte(fmt.Sprintf("IPFIX count:%d seq:%d", len(p.FlowSets), p.SequenceNumber)), nil 18 | } 19 | 20 | func (p *NFv9Packet) MarshalText() ([]byte, error) { 21 | return []byte(fmt.Sprintf("NetFlowV%d count:%d seq:%d", p.Version, p.Count, p.SequenceNumber)), nil 22 | } 23 | -------------------------------------------------------------------------------- /decoders/netflow/packet.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // FlowSetHeader contains fields shared by all Flow Sets (DataFlowSet, 8 | // TemplateFlowSet, OptionsTemplateFlowSet). 9 | type FlowSetHeader struct { 10 | // FlowSet ID: 11 | // 0 for TemplateFlowSet 12 | // 1 for OptionsTemplateFlowSet 13 | // 256-65535 for DataFlowSet (used as TemplateId) 14 | Id uint16 `json:"id"` 15 | 16 | // The total length of this FlowSet in bytes (including padding). 17 | Length uint16 `json:"length"` 18 | } 19 | 20 | // TemplateFlowSet is a collection of templates that describe structure of Data 21 | // Records (actual NetFlow data). 22 | type TemplateFlowSet struct { 23 | FlowSetHeader 24 | 25 | // List of Template Records 26 | Records []TemplateRecord `json:"records"` 27 | } 28 | 29 | // DataFlowSet is a collection of Data Records (actual NetFlow data) and Options 30 | // Data Records (meta data). 31 | type DataFlowSet struct { 32 | FlowSetHeader 33 | 34 | Records []DataRecord `json:"records"` 35 | } 36 | 37 | // RawFlowSet is a a set that could not be decoded due to the absence of a template 38 | type RawFlowSet struct { 39 | FlowSetHeader 40 | 41 | Records []byte `json:"records"` 42 | } 43 | 44 | type OptionsDataFlowSet struct { 45 | FlowSetHeader 46 | 47 | Records []OptionsDataRecord `json:"records"` 48 | } 49 | 50 | // TemplateRecord is a single template that describes structure of a Flow Record 51 | // (actual Netflow data). 52 | type TemplateRecord struct { 53 | // Each of the newly generated Template Records is given a unique 54 | // Template ID. This uniqueness is local to the Observation Domain that 55 | // generated the Template ID. Template IDs of Data FlowSets are numbered 56 | // from 256 to 65535. 57 | TemplateId uint16 `json:"template-id"` 58 | 59 | // Number of fields in this Template Record. Because a Template FlowSet 60 | // usually contains multiple Template Records, this field allows the 61 | // Collector to determine the end of the current Template Record and 62 | // the start of the next. 63 | FieldCount uint16 `json:"field-count"` 64 | 65 | // List of fields in this Template Record. 66 | Fields []Field `json:"fields"` 67 | } 68 | 69 | type DataRecord struct { 70 | Values []DataField `json:"values"` 71 | } 72 | 73 | // OptionsDataRecord is meta data sent alongide actual NetFlow data. Combined 74 | // with OptionsTemplateRecord it can be decoded to a single data row. 75 | type OptionsDataRecord struct { 76 | // List of Scope values stored in raw format as []byte 77 | ScopesValues []DataField `json:"scope-values"` 78 | 79 | // List of Optons values stored in raw format as []byte 80 | OptionsValues []DataField `json:"option-values"` 81 | } 82 | 83 | // Field describes type and length of a single value in a Flow Data Record. 84 | // Field does not contain the record value itself it is just a description of 85 | // what record value will look like. 86 | type Field struct { 87 | // A numeric value that represents the type of field. 88 | PenProvided bool `json:"pen-provided"` 89 | Type uint16 `json:"type"` 90 | 91 | // The length (in bytes) of the field. 92 | Length uint16 `json:"length"` 93 | 94 | Pen uint32 `json:"pen"` 95 | } 96 | 97 | type DataField struct { 98 | // A numeric value that represents the type of field. 99 | PenProvided bool `json:"pen-provided"` 100 | Type uint16 `json:"type"` 101 | Pen uint32 `json:"pen"` 102 | 103 | // The value (in bytes) of the field. 104 | Value interface{} `json:"value"` 105 | //Value []byte 106 | } 107 | 108 | func (flowSet RawFlowSet) String() string { 109 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 110 | str += fmt.Sprintf(" Length: %v\n", len(flowSet.Records)) 111 | str += fmt.Sprintf(" Records: %v\n", flowSet.Records) 112 | 113 | return str 114 | } 115 | 116 | func (flowSet OptionsDataFlowSet) String(TypeToString func(uint16) string, ScopeToString func(uint16) string) string { 117 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 118 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 119 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 120 | 121 | for j, record := range flowSet.Records { 122 | str += fmt.Sprintf(" - Record %v:\n", j) 123 | str += fmt.Sprintf(" Scopes (%v):\n", len(record.ScopesValues)) 124 | 125 | for k, value := range record.ScopesValues { 126 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, ScopeToString(value.Type), value.Type, value.Value) 127 | } 128 | 129 | str += fmt.Sprintf(" Options (%v):\n", len(record.OptionsValues)) 130 | 131 | for k, value := range record.OptionsValues { 132 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) 133 | } 134 | } 135 | 136 | return str 137 | } 138 | 139 | func (flowSet DataFlowSet) String(TypeToString func(uint16) string) string { 140 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 141 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 142 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 143 | 144 | for j, record := range flowSet.Records { 145 | str += fmt.Sprintf(" - Record %v:\n", j) 146 | str += fmt.Sprintf(" Values (%v):\n", len(record.Values)) 147 | 148 | for k, value := range record.Values { 149 | str += fmt.Sprintf(" - %v. %v (%v): %v\n", k, TypeToString(value.Type), value.Type, value.Value) 150 | } 151 | } 152 | 153 | return str 154 | } 155 | 156 | func (flowSet TemplateFlowSet) String(TypeToString func(uint16) string) string { 157 | str := fmt.Sprintf(" Id %v\n", flowSet.Id) 158 | str += fmt.Sprintf(" Length: %v\n", flowSet.Length) 159 | str += fmt.Sprintf(" Records (%v records):\n", len(flowSet.Records)) 160 | 161 | for j, record := range flowSet.Records { 162 | str += fmt.Sprintf(" - %v. Record:\n", j) 163 | str += fmt.Sprintf(" TemplateId: %v\n", record.TemplateId) 164 | str += fmt.Sprintf(" FieldCount: %v\n", record.FieldCount) 165 | str += fmt.Sprintf(" Fields (%v):\n", len(record.Fields)) 166 | 167 | for k, field := range record.Fields { 168 | str += fmt.Sprintf(" - %v. %v (%v/%v): %v\n", k, TypeToString(field.Type), field.Type, field.PenProvided, field.Length) 169 | } 170 | } 171 | 172 | return str 173 | } 174 | -------------------------------------------------------------------------------- /decoders/netflow/templates.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | ErrorTemplateNotFound = fmt.Errorf("Error template not found") 10 | ) 11 | 12 | type FlowBaseTemplateSet map[uint64]interface{} 13 | 14 | func templateKey(version uint16, obsDomainId uint32, templateId uint16) uint64 { 15 | return (uint64(version) << 48) | (uint64(obsDomainId) << 16) | uint64(templateId) 16 | } 17 | 18 | // Store interface that allows storing, removing and retrieving template data 19 | type NetFlowTemplateSystem interface { 20 | RemoveTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) 21 | GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) 22 | AddTemplate(version uint16, obsDomainId uint32, templateId uint16, template interface{}) error 23 | } 24 | 25 | func (ts *BasicTemplateSystem) GetTemplates() FlowBaseTemplateSet { 26 | ts.templateslock.RLock() 27 | tmp := ts.templates 28 | ts.templateslock.RUnlock() 29 | return tmp 30 | } 31 | 32 | func (ts *BasicTemplateSystem) AddTemplate(version uint16, obsDomainId uint32, templateId uint16, template interface{}) error { 33 | ts.templateslock.Lock() 34 | defer ts.templateslock.Unlock() 35 | 36 | /*var templateId uint16 37 | switch templateIdConv := template.(type) { 38 | case IPFIXOptionsTemplateRecord: 39 | templateId = templateIdConv.TemplateId 40 | case NFv9OptionsTemplateRecord: 41 | templateId = templateIdConv.TemplateId 42 | case TemplateRecord: 43 | templateId = templateIdConv.TemplateId 44 | }*/ 45 | key := templateKey(version, obsDomainId, templateId) 46 | ts.templates[key] = template 47 | return nil 48 | } 49 | 50 | func (ts *BasicTemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { 51 | ts.templateslock.RLock() 52 | defer ts.templateslock.RUnlock() 53 | key := templateKey(version, obsDomainId, templateId) 54 | if template, ok := ts.templates[key]; ok { 55 | return template, nil 56 | } 57 | return nil, ErrorTemplateNotFound 58 | } 59 | 60 | func (ts *BasicTemplateSystem) RemoveTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { 61 | ts.templateslock.Lock() 62 | defer ts.templateslock.Unlock() 63 | 64 | key := templateKey(version, obsDomainId, templateId) 65 | if template, ok := ts.templates[key]; ok { 66 | delete(ts.templates, key) 67 | return template, nil 68 | } 69 | return nil, ErrorTemplateNotFound 70 | } 71 | 72 | type BasicTemplateSystem struct { 73 | templates FlowBaseTemplateSet 74 | templateslock *sync.RWMutex 75 | } 76 | 77 | // Creates a basic store for NetFlow and IPFIX templates. 78 | // Everyting is stored in memory. 79 | func CreateTemplateSystem() NetFlowTemplateSystem { 80 | ts := &BasicTemplateSystem{ 81 | templates: make(FlowBaseTemplateSet), 82 | templateslock: &sync.RWMutex{}, 83 | } 84 | return ts 85 | } 86 | -------------------------------------------------------------------------------- /decoders/netflow/templates_test.go: -------------------------------------------------------------------------------- 1 | package netflow 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func benchTemplatesAdd(ts NetFlowTemplateSystem, obs uint32, N int, b *testing.B) { 8 | for n := 0; n <= N; n++ { 9 | ts.AddTemplate(10, obs, uint16(n), n) 10 | } 11 | } 12 | 13 | func BenchmarkTemplatesAdd(b *testing.B) { 14 | ts := CreateTemplateSystem() 15 | b.Log("Creating", b.N, "templates") 16 | benchTemplatesAdd(ts, uint32(b.N)%0xffff+1, b.N, b) 17 | } 18 | 19 | func BenchmarkTemplatesAddGet(b *testing.B) { 20 | ts := CreateTemplateSystem() 21 | templates := 1000 22 | b.Log("Adding", templates, "templates") 23 | benchTemplatesAdd(ts, 1, templates, b) 24 | b.Log("Getting", b.N, "templates") 25 | 26 | for n := 0; n <= b.N; n++ { 27 | data, err := ts.GetTemplate(10, 1, uint16(n%templates)) 28 | if err != nil { 29 | b.Fatal(err) 30 | } 31 | dataC, ok := data.(int) 32 | if !ok { 33 | b.Fatal("template not an integer") 34 | } 35 | if dataC != n%templates { 36 | b.Fatal("different values", dataC, "!=", n%templates) 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/format.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | func (p *PacketNetFlowV5) MarshalJSON() ([]byte, error) { 10 | return json.Marshal(*p) // this is a trick to avoid having the JSON marshaller defaults to MarshalText 11 | } 12 | 13 | func (p *PacketNetFlowV5) MarshalText() ([]byte, error) { 14 | return []byte(fmt.Sprintf("NetFlowV%d seq:%d count:%d", p.Version, p.FlowSequence, p.Count)), nil 15 | } 16 | 17 | func (p PacketNetFlowV5) String() string { 18 | str := "NetFlow v5 Packet\n" 19 | str += "-----------------\n" 20 | str += fmt.Sprintf(" Version: %v\n", p.Version) 21 | str += fmt.Sprintf(" Count: %v\n", p.Count) 22 | 23 | unixSeconds := time.Unix(int64(p.UnixSecs), int64(p.UnixNSecs)) 24 | str += fmt.Sprintf(" SystemUptime: %v\n", time.Duration(p.SysUptime)*time.Millisecond) 25 | str += fmt.Sprintf(" UnixSeconds: %v\n", unixSeconds.String()) 26 | str += fmt.Sprintf(" FlowSequence: %v\n", p.FlowSequence) 27 | str += fmt.Sprintf(" EngineType: %v\n", p.EngineType) 28 | str += fmt.Sprintf(" EngineId: %v\n", p.EngineId) 29 | str += fmt.Sprintf(" SamplingInterval: %v\n", p.SamplingInterval) 30 | str += fmt.Sprintf(" Records (%v):\n", len(p.Records)) 31 | 32 | for i, record := range p.Records { 33 | str += fmt.Sprintf(" Record %v:\n", i) 34 | str += record.String() 35 | } 36 | return str 37 | } 38 | 39 | func (r RecordsNetFlowV5) String() string { 40 | str := fmt.Sprintf(" SrcAddr: %v\n", r.SrcAddr) 41 | str += fmt.Sprintf(" DstAddr: %v\n", r.DstAddr) 42 | str += fmt.Sprintf(" NextHop: %v\n", r.NextHop) 43 | str += fmt.Sprintf(" Input: %v\n", r.Input) 44 | str += fmt.Sprintf(" Output: %v\n", r.Output) 45 | str += fmt.Sprintf(" DPkts: %v\n", r.DPkts) 46 | str += fmt.Sprintf(" DOctets: %v\n", r.DOctets) 47 | str += fmt.Sprintf(" First: %v\n", time.Duration(r.First)*time.Millisecond) 48 | str += fmt.Sprintf(" Last: %v\n", time.Duration(r.Last)*time.Millisecond) 49 | str += fmt.Sprintf(" SrcPort: %v\n", r.SrcPort) 50 | str += fmt.Sprintf(" DstPort: %v\n", r.DstPort) 51 | str += fmt.Sprintf(" TCPFlags: %v\n", r.TCPFlags) 52 | str += fmt.Sprintf(" Proto: %v\n", r.Proto) 53 | str += fmt.Sprintf(" Tos: %v\n", r.Tos) 54 | str += fmt.Sprintf(" SrcAS: %v\n", r.SrcAS) 55 | str += fmt.Sprintf(" DstAS: %v\n", r.DstAS) 56 | str += fmt.Sprintf(" SrcMask: %v\n", r.SrcMask) 57 | str += fmt.Sprintf(" DstMask: %v\n", r.DstMask) 58 | 59 | return str 60 | } 61 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/netflow.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | 7 | "github.com/netsampler/goflow2/v2/decoders/utils" 8 | ) 9 | 10 | type DecoderError struct { 11 | Err error 12 | } 13 | 14 | func (e *DecoderError) Error() string { 15 | return fmt.Sprintf("NetFlowLegacy %s", e.Err.Error()) 16 | } 17 | 18 | func (e *DecoderError) Unwrap() error { 19 | return e.Err 20 | } 21 | 22 | func DecodeMessageVersion(payload *bytes.Buffer, packet *PacketNetFlowV5) error { 23 | var version uint16 24 | if err := utils.BinaryDecoder(payload, &version); err != nil { 25 | return err 26 | } 27 | packet.Version = version 28 | if packet.Version != 5 { 29 | return &DecoderError{fmt.Errorf("unknown version %d", version)} 30 | } 31 | return DecodeMessage(payload, packet) 32 | } 33 | 34 | func DecodeMessage(payload *bytes.Buffer, packet *PacketNetFlowV5) error { 35 | if err := utils.BinaryDecoder(payload, 36 | &packet.Count, 37 | &packet.SysUptime, 38 | &packet.UnixSecs, 39 | &packet.UnixNSecs, 40 | &packet.FlowSequence, 41 | &packet.EngineType, 42 | &packet.EngineId, 43 | &packet.SamplingInterval, 44 | ); err != nil { 45 | return &DecoderError{err} 46 | } 47 | 48 | packet.Records = make([]RecordsNetFlowV5, int(packet.Count)) // maximum is 65535 which would be 3MB 49 | for i := 0; i < int(packet.Count) && payload.Len() >= 48; i++ { 50 | record := RecordsNetFlowV5{} 51 | var srcAddr, dstAddr, nextHop uint32 52 | 53 | if err := utils.BinaryDecoder(payload, 54 | &srcAddr, 55 | &dstAddr, 56 | &nextHop, 57 | &record.Input, 58 | &record.Output, 59 | &record.DPkts, 60 | &record.DOctets, 61 | &record.First, 62 | &record.Last, 63 | &record.SrcPort, 64 | &record.DstPort, 65 | &record.Pad1, 66 | &record.TCPFlags, 67 | &record.Proto, 68 | &record.Tos, 69 | &record.SrcAS, 70 | &record.DstAS, 71 | &record.SrcMask, 72 | &record.DstMask, 73 | &record.Pad2, 74 | ); err != nil { 75 | return &DecoderError{err} 76 | } 77 | record.SrcAddr = IPAddress(srcAddr) 78 | record.DstAddr = IPAddress(dstAddr) 79 | record.NextHop = IPAddress(nextHop) 80 | packet.Records[i] = record 81 | } 82 | 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/netflow_test.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "bytes" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDecodeNetFlowV5(t *testing.T) { 11 | data := []byte{ 12 | 0x00, 0x05, 0x00, 0x06, 0x00, 0x82, 0xc3, 0x48, 0x5b, 0xcd, 0xba, 0x1b, 0x05, 0x97, 0x6d, 0xc7, 13 | 0x00, 0x00, 0x64, 0x3d, 0x08, 0x08, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x79, 0x0a, 0x80, 0x02, 0x01, 14 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x02, 0x4e, 15 | 0x00, 0x82, 0x9b, 0x8c, 0x00, 0x82, 0x9b, 0x90, 0x1f, 0x90, 0xb9, 0x18, 0x00, 0x1b, 0x06, 0x00, 16 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x77, 0x0a, 0x81, 0x02, 0x01, 17 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x94, 18 | 0x00, 0x82, 0x95, 0xa9, 0x00, 0x82, 0x9a, 0xfb, 0x1f, 0x90, 0xc1, 0x2c, 0x00, 0x12, 0x06, 0x00, 19 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x81, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x77, 20 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xc2, 21 | 0x00, 0x82, 0x95, 0xa9, 0x00, 0x82, 0x9a, 0xfc, 0xc1, 0x2c, 0x1f, 0x90, 0x00, 0x16, 0x06, 0x00, 22 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x79, 23 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x01, 0xf1, 24 | 0x00, 0x82, 0x9b, 0x8c, 0x00, 0x82, 0x9b, 0x8f, 0xb9, 0x18, 0x1f, 0x90, 0x00, 0x1b, 0x06, 0x00, 25 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x01, 0x0a, 0x80, 0x02, 0x79, 26 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x02, 0x2e, 27 | 0x00, 0x82, 0x9b, 0x90, 0x00, 0x82, 0x9b, 0x9d, 0xb9, 0x1a, 0x1f, 0x90, 0x00, 0x1b, 0x06, 0x00, 28 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x80, 0x02, 0x79, 0x0a, 0x80, 0x02, 0x01, 29 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x00, 0x02, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x0b, 0xac, 30 | 0x00, 0x82, 0x9b, 0x90, 0x00, 0x82, 0x9b, 0x9d, 0x1f, 0x90, 0xb9, 0x1a, 0x00, 0x1b, 0x06, 0x00, 31 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 32 | } 33 | buf := bytes.NewBuffer(data) 34 | 35 | var decNfv5 PacketNetFlowV5 36 | assert.Nil(t, DecodeMessageVersion(buf, &decNfv5)) 37 | assert.Equal(t, uint16(5), decNfv5.Version) 38 | assert.Equal(t, uint16(9), decNfv5.Records[0].Input) 39 | } 40 | -------------------------------------------------------------------------------- /decoders/netflowlegacy/packet.go: -------------------------------------------------------------------------------- 1 | package netflowlegacy 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type PacketNetFlowV5 struct { 8 | Version uint16 `json:"version"` 9 | Count uint16 `json:"count"` 10 | SysUptime uint32 `json:"sys-uptime"` 11 | UnixSecs uint32 `json:"unix-secs"` 12 | UnixNSecs uint32 `json:"unix-nsecs"` 13 | FlowSequence uint32 `json:"flow-sequence"` 14 | EngineType uint8 `json:"engine-type"` 15 | EngineId uint8 `json:"engine-id"` 16 | SamplingInterval uint16 `json:"sampling-interval"` 17 | Records []RecordsNetFlowV5 `json:"records"` 18 | } 19 | 20 | type RecordsNetFlowV5 struct { 21 | SrcAddr IPAddress `json:"src-addr"` 22 | DstAddr IPAddress `json:"dst-addr"` 23 | NextHop IPAddress `json:"next-hop"` 24 | Input uint16 `json:"input"` 25 | Output uint16 `json:"output"` 26 | DPkts uint32 `json:"dpkts"` 27 | DOctets uint32 `json:"doctets"` 28 | First uint32 `json:"first"` 29 | Last uint32 `json:"last"` 30 | SrcPort uint16 `json:"src-port"` 31 | DstPort uint16 `json:"dst-port"` 32 | Pad1 byte `json:"pad1"` 33 | TCPFlags uint8 `json:"tcp-flags"` 34 | Proto uint8 `json:"proto"` 35 | Tos uint8 `json:"tos"` 36 | SrcAS uint16 `json:"src-as"` 37 | DstAS uint16 `json:"dst-as"` 38 | SrcMask uint8 `json:"src-mask"` 39 | DstMask uint8 `json:"dst-mask"` 40 | Pad2 uint16 `json:"pad2"` 41 | } 42 | 43 | type IPAddress uint32 // purely for the formatting purpose 44 | 45 | func (s *IPAddress) MarshalJSON() ([]byte, error) { 46 | return []byte(fmt.Sprintf("\"%d.%d.%d.%d\"", *s>>24, (*s>>16)&0xFF, (*s>>8)&0xFF, *s&0xFF)), nil 47 | } 48 | -------------------------------------------------------------------------------- /decoders/sflow/datastructure.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | import "github.com/netsampler/goflow2/v2/decoders/utils" 4 | 5 | type SampledHeader struct { 6 | Protocol uint32 `json:"protocol"` 7 | FrameLength uint32 `json:"frame-length"` 8 | Stripped uint32 `json:"stripped"` 9 | OriginalLength uint32 `json:"original-length"` 10 | HeaderData []byte `json:"header-data"` 11 | } 12 | 13 | type SampledEthernet struct { 14 | Length uint32 `json:"length"` 15 | SrcMac utils.MacAddress `json:"src-mac"` 16 | DstMac utils.MacAddress `json:"dst-mac"` 17 | EthType uint32 `json:"eth-type"` 18 | } 19 | 20 | type SampledIPBase struct { 21 | Length uint32 `json:"length"` 22 | Protocol uint32 `json:"protocol"` 23 | SrcIP utils.IPAddress `json:"src-ip"` 24 | DstIP utils.IPAddress `json:"dst-ip"` 25 | SrcPort uint32 `json:"src-port"` 26 | DstPort uint32 `json:"dst-port"` 27 | TcpFlags uint32 `json:"tcp-flags"` 28 | } 29 | 30 | type SampledIPv4 struct { 31 | SampledIPBase 32 | Tos uint32 `json:"tos"` 33 | } 34 | 35 | type SampledIPv6 struct { 36 | SampledIPBase 37 | Priority uint32 `json:"priority"` 38 | } 39 | 40 | type ExtendedSwitch struct { 41 | SrcVlan uint32 `json:"src-vlan"` 42 | SrcPriority uint32 `json:"src-priority"` 43 | DstVlan uint32 `json:"dst-vlan"` 44 | DstPriority uint32 `json:"dst-priority"` 45 | } 46 | 47 | type ExtendedRouter struct { 48 | NextHopIPVersion uint32 `json:"next-hop-ip-version"` 49 | NextHop utils.IPAddress `json:"next-hop"` 50 | SrcMaskLen uint32 `json:"src-mask-len"` 51 | DstMaskLen uint32 `json:"dst-mask-len"` 52 | } 53 | 54 | type ExtendedGateway struct { 55 | NextHopIPVersion uint32 `json:"next-hop-ip-version"` 56 | NextHop utils.IPAddress `json:"next-hop"` 57 | AS uint32 `json:"as"` 58 | SrcAS uint32 `json:"src-as"` 59 | SrcPeerAS uint32 `json:"src-peer-as"` 60 | ASDestinations uint32 `json:"as-destinations"` 61 | ASPathType uint32 `json:"as-path-type"` 62 | ASPathLength uint32 `json:"as-path-length"` 63 | ASPath []uint32 `json:"as-path"` 64 | CommunitiesLength uint32 `json:"communities-length"` 65 | Communities []uint32 `json:"communities"` 66 | LocalPref uint32 `json:"local-pref"` 67 | } 68 | 69 | type EgressQueue struct { 70 | Queue uint32 `json:"queue"` 71 | } 72 | 73 | type ExtendedACL struct { 74 | Number uint32 `json:"number"` 75 | Name string `json:"name"` 76 | Direction uint32 `json:"direction"` // 0:unknown, 1:ingress, 2:egress 77 | } 78 | 79 | type ExtendedFunction struct { 80 | Symbol string `json:"symbol"` 81 | } 82 | 83 | type IfCounters struct { 84 | IfIndex uint32 `json:"if-index"` 85 | IfType uint32 `json:"if-type"` 86 | IfSpeed uint64 `json:"if-speed"` 87 | IfDirection uint32 `json:"if-direction"` 88 | IfStatus uint32 `json:"if-status"` 89 | IfInOctets uint64 `json:"if-in-octets"` 90 | IfInUcastPkts uint32 `json:"if-in-ucast-pkts"` 91 | IfInMulticastPkts uint32 `json:"if-in-multicast-pkts"` 92 | IfInBroadcastPkts uint32 `json:"if-in-broadcast-pkts"` 93 | IfInDiscards uint32 `json:"if-in-discards"` 94 | IfInErrors uint32 `json:"if-in-errors"` 95 | IfInUnknownProtos uint32 `json:"if-in-unknown-protos"` 96 | IfOutOctets uint64 `json:"if-out-octets"` 97 | IfOutUcastPkts uint32 `json:"if-out-ucast-pkts"` 98 | IfOutMulticastPkts uint32 `json:"if-out-multicast-pkts"` 99 | IfOutBroadcastPkts uint32 `json:"if-out-broadcast-pkts"` 100 | IfOutDiscards uint32 `json:"if-out-discards"` 101 | IfOutErrors uint32 `json:"if-out-errors"` 102 | IfPromiscuousMode uint32 `json:"if-promiscuous-mode"` 103 | } 104 | 105 | type EthernetCounters struct { 106 | Dot3StatsAlignmentErrors uint32 `json:"dot3-stats-aligment-errors"` 107 | Dot3StatsFCSErrors uint32 `json:"dot3-stats-fcse-errors"` 108 | Dot3StatsSingleCollisionFrames uint32 `json:"dot3-stats-single-collision-frames"` 109 | Dot3StatsMultipleCollisionFrames uint32 `json:"dot3-stats-multiple-collision-frames"` 110 | Dot3StatsSQETestErrors uint32 `json:"dot3-stats-seq-test-errors"` 111 | Dot3StatsDeferredTransmissions uint32 `json:"dot3-stats-deferred-transmissions"` 112 | Dot3StatsLateCollisions uint32 `json:"dot3-stats-late-collisions"` 113 | Dot3StatsExcessiveCollisions uint32 `json:"dot3-stats-excessive-collisions"` 114 | Dot3StatsInternalMacTransmitErrors uint32 `json:"dot3-stats-internal-mac-transmit-errors"` 115 | Dot3StatsCarrierSenseErrors uint32 `json:"dot3-stats-carrier-sense-errors"` 116 | Dot3StatsFrameTooLongs uint32 `json:"dot3-stats-frame-too-longs"` 117 | Dot3StatsInternalMacReceiveErrors uint32 `json:"dot3-stats-internal-mac-receive-errors"` 118 | Dot3StatsSymbolErrors uint32 `json:"dot3-stats-symbol-errors"` 119 | } 120 | 121 | type RawRecord struct { 122 | Data []byte `json:"data"` 123 | } 124 | -------------------------------------------------------------------------------- /decoders/sflow/format.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/netip" 7 | ) 8 | 9 | func (p *Packet) MarshalJSON() ([]byte, error) { 10 | return json.Marshal(*p) // this is a trick to avoid having the JSON marshaller defaults to MarshalText 11 | } 12 | 13 | func (p *Packet) MarshalText() ([]byte, error) { 14 | agentIP, _ := netip.AddrFromSlice(p.AgentIP) 15 | return []byte(fmt.Sprintf("sFlow%d agent:%s seq:%d count:%d", p.Version, agentIP.String(), p.SequenceNumber, p.SamplesCount)), nil 16 | } 17 | -------------------------------------------------------------------------------- /decoders/sflow/packet.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | import "github.com/netsampler/goflow2/v2/decoders/utils" 4 | 5 | type Packet struct { 6 | Version uint32 `json:"version"` 7 | IPVersion uint32 `json:"ip-version"` 8 | AgentIP utils.IPAddress `json:"agent-ip"` 9 | SubAgentId uint32 `json:"sub-agent-id"` 10 | SequenceNumber uint32 `json:"sequence-number"` 11 | Uptime uint32 `json:"uptime"` 12 | SamplesCount uint32 `json:"samples-count"` 13 | Samples []interface{} `json:"samples"` 14 | } 15 | 16 | type SampleHeader struct { 17 | Format uint32 `json:"format"` 18 | Length uint32 `json:"length"` 19 | 20 | SampleSequenceNumber uint32 `json:"sample-sequence-number"` 21 | SourceIdType uint32 `json:"source-id-type"` 22 | SourceIdValue uint32 `json:"source-id-value"` 23 | } 24 | 25 | type FlowSample struct { 26 | Header SampleHeader `json:"header"` 27 | 28 | SamplingRate uint32 `json:"sampling-rate"` 29 | SamplePool uint32 `json:"sample-pool"` 30 | Drops uint32 `json:"drops"` 31 | Input uint32 `json:"input"` 32 | Output uint32 `json:"output"` 33 | FlowRecordsCount uint32 `json:"flow-records-count"` 34 | Records []FlowRecord `json:"records"` 35 | } 36 | 37 | type CounterSample struct { 38 | Header SampleHeader `json:"header"` 39 | 40 | CounterRecordsCount uint32 `json:"counter-records-count"` 41 | Records []CounterRecord `json:"records"` 42 | } 43 | 44 | type ExpandedFlowSample struct { 45 | Header SampleHeader `json:"header"` 46 | 47 | SamplingRate uint32 `json:"sampling-rate"` 48 | SamplePool uint32 `json:"sample-pool"` 49 | Drops uint32 `json:"drops"` 50 | InputIfFormat uint32 `json:"input-if-format"` 51 | InputIfValue uint32 `json:"input-if-value"` 52 | OutputIfFormat uint32 `json:"output-if-format"` 53 | OutputIfValue uint32 `json:"output-if-value"` 54 | FlowRecordsCount uint32 `json:"flow-records-count"` 55 | Records []FlowRecord `json:"records"` 56 | } 57 | 58 | // DropSample data structure according to https://sflow.org/sflow_drops.txt 59 | type DropSample struct { 60 | Header SampleHeader `json:"header"` 61 | 62 | Drops uint32 `json:"drops"` 63 | Input uint32 `json:"input"` 64 | Output uint32 `json:"output"` 65 | Reason uint32 `json:"reason"` 66 | FlowRecordsCount uint32 `json:"flow-records-count"` 67 | Records []FlowRecord `json:"records"` 68 | } 69 | 70 | type RecordHeader struct { 71 | DataFormat uint32 `json:"data-format"` 72 | Length uint32 `json:"length"` 73 | } 74 | 75 | type FlowRecord struct { 76 | Header RecordHeader `json:"header"` 77 | Data interface{} `json:"data"` 78 | } 79 | 80 | type CounterRecord struct { 81 | Header RecordHeader `json:"header"` 82 | Data interface{} `json:"data"` 83 | } 84 | -------------------------------------------------------------------------------- /decoders/utils/types.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "net/netip" 7 | ) 8 | 9 | type MacAddress []byte // purely for the formatting purpose 10 | 11 | func (s *MacAddress) MarshalJSON() ([]byte, error) { 12 | return []byte(fmt.Sprintf("\"%s\"", net.HardwareAddr([]byte(*s)).String())), nil 13 | } 14 | 15 | type IPAddress []byte // purely for the formatting purpose 16 | 17 | func (s IPAddress) MarshalJSON() ([]byte, error) { 18 | ip, _ := netip.AddrFromSlice([]byte(s)) 19 | return []byte(fmt.Sprintf("\"%s\"", ip.String())), nil 20 | } 21 | -------------------------------------------------------------------------------- /decoders/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "errors" 7 | "io" 8 | "reflect" 9 | ) 10 | 11 | type BytesBuffer interface { 12 | io.Reader 13 | Next(int) []byte 14 | } 15 | 16 | func BinaryDecoder(payload *bytes.Buffer, dests ...interface{}) error { 17 | for _, dest := range dests { 18 | err := BinaryRead(payload, binary.BigEndian, dest) 19 | if err != nil { 20 | return err 21 | } 22 | } 23 | return nil 24 | } 25 | func BinaryRead(payload BytesBuffer, order binary.ByteOrder, data any) error { 26 | // Fast path for basic types and slices. 27 | if n := intDataSize(data); n != 0 { 28 | bs := payload.Next(n) 29 | if len(bs) < n { 30 | return io.ErrUnexpectedEOF 31 | } 32 | switch data := data.(type) { 33 | case *bool: 34 | *data = bs[0] != 0 35 | case *int8: 36 | *data = int8(bs[0]) 37 | case *uint8: 38 | *data = bs[0] 39 | case *int16: 40 | *data = int16(order.Uint16(bs)) 41 | case *uint16: 42 | *data = order.Uint16(bs) 43 | case *int32: 44 | *data = int32(order.Uint32(bs)) 45 | case *uint32: 46 | *data = order.Uint32(bs) 47 | case *int64: 48 | *data = int64(order.Uint64(bs)) 49 | case *uint64: 50 | *data = order.Uint64(bs) 51 | case *string: 52 | strlen := int(order.Uint32(bs)) 53 | buf := payload.Next(strlen) 54 | if len(buf) < strlen { 55 | return io.ErrUnexpectedEOF 56 | } 57 | *data = string(buf) 58 | case []bool: 59 | for i, x := range bs { // Easier to loop over the input for 8-bit values. 60 | data[i] = x != 0 61 | } 62 | case []int8: 63 | for i, x := range bs { 64 | data[i] = int8(x) 65 | } 66 | case []uint8: 67 | copy(data, bs) 68 | case IPAddress: 69 | copy(data, bs) 70 | case MacAddress: 71 | copy(data, bs) 72 | case []int16: 73 | for i := range data { 74 | data[i] = int16(order.Uint16(bs[2*i:])) 75 | } 76 | case []uint16: 77 | for i := range data { 78 | data[i] = order.Uint16(bs[2*i:]) 79 | } 80 | case []int32: 81 | for i := range data { 82 | data[i] = int32(order.Uint32(bs[4*i:])) 83 | } 84 | case []uint32: 85 | for i := range data { 86 | data[i] = order.Uint32(bs[4*i:]) 87 | } 88 | case []int64: 89 | for i := range data { 90 | data[i] = int64(order.Uint64(bs[8*i:])) 91 | } 92 | case []uint64: 93 | for i := range data { 94 | data[i] = order.Uint64(bs[8*i:]) 95 | } 96 | default: 97 | n = 0 // fast path doesn't apply 98 | } 99 | if n != 0 { 100 | return nil 101 | } 102 | } 103 | 104 | return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) 105 | } 106 | 107 | // intDataSize returns the size of the data required to represent the data when encoded. 108 | // It returns zero if the type cannot be implemented by the fast path in Read or Write. 109 | func intDataSize(data any) int { 110 | switch data := data.(type) { 111 | case bool, int8, uint8, *bool, *int8, *uint8: 112 | return 1 113 | case []bool: 114 | return len(data) 115 | case []int8: 116 | return len(data) 117 | case []uint8: 118 | return len(data) 119 | case IPAddress: 120 | return len(data) 121 | case MacAddress: 122 | return len(data) 123 | case int16, uint16, *int16, *uint16: 124 | return 2 125 | case []int16: 126 | return 2 * len(data) 127 | case []uint16: 128 | return 2 * len(data) 129 | case int32, uint32, *int32, *uint32: 130 | return 4 131 | case *string: // return the length field 132 | return 4 133 | case []int32: 134 | return 4 * len(data) 135 | case []uint32: 136 | return 4 * len(data) 137 | case int64, uint64, *int64, *uint64: 138 | return 8 139 | case []int64: 140 | return 8 * len(data) 141 | case []uint64: 142 | return 8 * len(data) 143 | } 144 | return 0 145 | } 146 | -------------------------------------------------------------------------------- /decoders/utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/binary" 5 | "io" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func testBinaryRead(buf BytesBuffer, data any) error { 13 | order := binary.BigEndian 14 | return BinaryRead(buf, order, data) 15 | } 16 | 17 | func testBinaryReadComparison(buf BytesBuffer, data any) error { 18 | order := binary.BigEndian 19 | return binary.Read(buf, order, data) 20 | } 21 | 22 | type benchFct func(buf BytesBuffer, data any) error 23 | 24 | func TestBinaryReadInteger(t *testing.T) { 25 | buf := newTestBuf([]byte{1, 2, 3, 4}) 26 | var dest uint32 27 | err := testBinaryRead(buf, &dest) 28 | require.NoError(t, err) 29 | assert.Equal(t, uint32(0x1020304), dest) 30 | } 31 | 32 | func TestBinaryReadBytes(t *testing.T) { 33 | buf := newTestBuf([]byte{1, 2, 3, 4}) 34 | dest := make([]byte, 4) 35 | err := testBinaryRead(buf, dest) 36 | require.NoError(t, err) 37 | } 38 | 39 | func TestBinaryReadUints(t *testing.T) { 40 | buf := newTestBuf([]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}) 41 | dest := make([]uint32, 4) 42 | err := testBinaryRead(buf, dest) 43 | require.NoError(t, err) 44 | assert.Equal(t, uint32(0x1020304), dest[0]) 45 | } 46 | 47 | type testBuf struct { 48 | buf []byte 49 | off int 50 | } 51 | 52 | func newTestBuf(data []byte) *testBuf { 53 | return &testBuf{ 54 | buf: data, 55 | } 56 | } 57 | 58 | func (b *testBuf) Next(n int) []byte { 59 | if n > len(b.buf) { 60 | return b.buf 61 | } 62 | return b.buf[0:n] 63 | } 64 | 65 | func (b *testBuf) Reset() { 66 | b.off = 0 67 | } 68 | 69 | func (b *testBuf) Read(p []byte) (int, error) { 70 | if len(b.buf) == 0 || b.off >= len(b.buf) { 71 | return 0, io.EOF 72 | } 73 | 74 | n := copy(p, b.buf[b.off:]) 75 | b.off += n 76 | return n, nil 77 | } 78 | 79 | func benchBinaryRead(b *testing.B, buf *testBuf, dest any, cmp bool) { 80 | var fct benchFct 81 | if cmp { 82 | fct = testBinaryReadComparison 83 | } else { 84 | fct = testBinaryRead 85 | } 86 | for n := 0; n < b.N; n++ { 87 | fct(buf, dest) 88 | buf.Reset() 89 | } 90 | } 91 | 92 | func BenchmarkBinaryReadIntegerBase(b *testing.B) { 93 | buf := newTestBuf([]byte{1, 2, 3, 4}) 94 | var dest uint32 95 | benchBinaryRead(b, buf, &dest, false) 96 | } 97 | 98 | func BenchmarkBinaryReadIntegerComparison(b *testing.B) { 99 | buf := newTestBuf([]byte{1, 2, 3, 4}) 100 | var dest uint32 101 | benchBinaryRead(b, buf, &dest, true) 102 | } 103 | 104 | func BenchmarkBinaryReadByteBase(b *testing.B) { 105 | buf := newTestBuf([]byte{1, 2, 3, 4}) 106 | var dest byte 107 | benchBinaryRead(b, buf, &dest, false) 108 | } 109 | 110 | func BBenchmarkBinaryReadByteComparison(b *testing.B) { 111 | buf := newTestBuf([]byte{1, 2, 3, 4}) 112 | var dest byte 113 | benchBinaryRead(b, buf, &dest, true) 114 | } 115 | 116 | func BenchmarkBinaryReadBytesBase(b *testing.B) { 117 | buf := newTestBuf([]byte{1, 2, 3, 4}) 118 | dest := make([]byte, 4) 119 | benchBinaryRead(b, buf, dest, false) 120 | } 121 | 122 | func BenchmarkBinaryReadBytesComparison(b *testing.B) { 123 | buf := newTestBuf([]byte{1, 2, 3, 4}) 124 | dest := make([]byte, 4) 125 | benchBinaryRead(b, buf, dest, true) 126 | } 127 | 128 | func BenchmarkBinaryReadUintsBase(b *testing.B) { 129 | buf := newTestBuf([]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}) 130 | dest := make([]uint32, 4) 131 | benchBinaryRead(b, buf, dest, false) 132 | } 133 | 134 | func BenchmarkBinaryReadUintsComparison(b *testing.B) { 135 | buf := newTestBuf([]byte{1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4}) 136 | dest := make([]uint32, 4) 137 | benchBinaryRead(b, buf, dest, true) 138 | } 139 | -------------------------------------------------------------------------------- /docs/agents.md: -------------------------------------------------------------------------------- 1 | # Agents 2 | 3 | There are various agents that can send samples to a flow collector. 4 | 5 | ## Hardware 6 | 7 | ### Juniper 8 | 9 | In the latest versions, Juniper supports sFlow and IPFIX protocols. 10 | 11 | [Documentation](https://www.juniper.net/documentation/us/en/software/junos/network-mgmt/topics/topic-map/sflow-monitoring-technology.html). 12 | 13 | Sample configuration: 14 | ``` 15 | set protocols sflow collector 10.0.0.1 16 | set protocols sflow collector udp-port 6343 17 | set protocols sflow interface ge-0/0/0 18 | set protocols sflow sample-rate 2048 19 | ``` 20 | 21 | ## Software 22 | 23 | ### hsflowd 24 | 25 | [Documentation](https://sflow.net/host-sflow-linux-config.php). 26 | 27 | Sample packets using pcap, iptables nflog and many more. Uses sFlow. 28 | 29 | Sample configuration: 30 | ``` 31 | sflow { 32 | collector { ip = 10.0.0.1 udpport = 6343 } 33 | pcap { dev = eth0 } 34 | } 35 | ``` 36 | 37 | Run with 38 | ```bash 39 | $ hsflowd -d -f hsflowd.conf 40 | ``` 41 | 42 | ### nProbe 43 | 44 | [Documentation](https://www.ntop.org/guides/nprobe/) 45 | 46 | Sample packets using pcap, iptables nflog and many more. Uses NetFlow v9 or IPFIX. 47 | 48 | Run with 49 | ```bash 50 | $ nprobe -i eth0 -n 10.0.0.1:2055 -V 10 51 | ``` 52 | 53 | ### softflowd 54 | 55 | [Documentation](https://github.com/irino/softflowd/blob/master/softflowd.md) 56 | 57 | Sample packets using pcap. Uses NetFlow or IPFIX. 58 | 59 | Run with 60 | ```bash 61 | $ softflowd -i eth0 -n '10.0.0.1:2055' -P 'udp' -v 10 62 | ``` 63 | -------------------------------------------------------------------------------- /docs/contributors.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | A special thank you to all the contributors of GoFlow. 4 | * [debugloop](https://github.com/debugloop) 5 | * [simPod](https://github.com/simPod) 6 | * [mmlb](https://github.com/mmlb) 7 | * [kanocz](https://github.com/kanocz) 8 | * [morrowc](https://github.com/morrowc) 9 | * [SuperQ](https://github.com/SuperQ) 10 | * [shyam334](https://github.com/shyam334) 11 | * [leoluk](https://github.com/leoluk) 12 | 13 | and many more! -------------------------------------------------------------------------------- /docs/logs.md: -------------------------------------------------------------------------------- 1 | # Logs 2 | 3 | -------------------------------------------------------------------------------- /docs/mapping.md: -------------------------------------------------------------------------------- 1 | # Mapping and Configuration 2 | 3 | GoFlow2 allows users to collect and represent non-standard fields 4 | without having to rely on `-produce=raw` setting. 5 | 6 | By default, commonly used types are collected into the protobuf. 7 | For instance source and destination IP addresses, TCP/UDP ports, etc. 8 | When suggesting a new field to collect, preference should be given to fields 9 | that are both widely adopted and supported by multiple protocols (sFlow, IPFIX). 10 | 11 | Some scenarios require more flexibility. 12 | In fact, IPFIX allows Private Enterprise Numbers ([PEN](https://www.iana.org/assignments/enterprise-numbers/)) 13 | and entire datagrams (IPFIX, sFlow) can contain bytes of interest. 14 | 15 | A mapping configuration file empowers GoFlow2 users to collect 16 | extra data without changing the code and recompiling. 17 | The feature is available for both protobuf binary and JSON formatting. 18 | 19 | A configuration file can be invoked the following way: 20 | 21 | ```bash 22 | goflow2 -mapping=config.yaml -format=json -produce=sample 23 | ``` 24 | 25 | An example configuration file that collects NetFlow/IPFIX flow direction information: 26 | 27 | ```yaml 28 | formatter: 29 | fields: # list of fields to format in JSON 30 | - flow_direction 31 | protobuf: # manual protobuf fields addition 32 | - name: flow_direction 33 | index: 42 34 | type: varint 35 | # Decoder mappings 36 | ipfix: 37 | mapping: 38 | - field: 61 39 | destination: flow_direction 40 | netflowv9: 41 | mapping: 42 | - field: 61 43 | destination: flow_direction 44 | ``` 45 | 46 | In JSON, the field `flow_direction` will now be added. 47 | In binary protobuf, when consumed by another tool, 48 | the latter can access the new field at index 42. 49 | A custom proto file can be compiled with the following: 50 | 51 | ```proto 52 | message FlowMessage { 53 | 54 | ... 55 | uint32 flow_direction = 42; 56 | 57 | ``` 58 | 59 | ## Formatting and rendering 60 | 61 | This section of the configuration is used for textual representations. 62 | Both fields from [`flow.proto`](../pb/flow.proto) and custom ones inside `formatter.protobuf` 63 | can be available in the textual output (JSON for instance). 64 | 65 | The items inside `formatter.fields` are the fields present in the output. 66 | 67 | The render section picks the representation. 68 | For instance a 4/16 bytes field can be represented as an IP address, time can be represented as RFC3339 or epoch. 69 | 70 | ```yaml 71 | formatter: 72 | fields: 73 | - time_received_ns 74 | - my_new_field 75 | - my_other_field 76 | protobuf: 77 | - name: my_new_field 78 | index: 1000 79 | type: varint 80 | - name: my_other_field 81 | index: 2000 82 | type: string 83 | render: 84 | time_received_ns: datetimenano 85 | my_other_field: ip 86 | ``` 87 | 88 | ## Encapsulation 89 | 90 | Custom mapping can be used with encapsulation. 91 | 92 | By default, GoFlow2 will expect a packet with the following layers: 93 | 94 | * Ethernet 95 | * 802.1q and/or MPLS 96 | * IP 97 | * TCP or UDP 98 | 99 | A more complex packet could be in the form: 100 | 101 | * **Ethernet** 102 | * **MPLS** 103 | * **IP** 104 | * *GRE* 105 | * *Ethernet* 106 | * *IP* 107 | * *UDP* 108 | 109 | Only the layers in **bold** will have the information collected. 110 | The perimeter that is considered encapsulation here is the GRE protocol (note: it could be started if a second Ethernet layer was above 802.1q). 111 | Rather than having duplicates of the existing fields with encapsulation, a configuration file can be used to collect 112 | the encapsulated fields. 113 | 114 | An additional consideration is that protobuf fields can be array (or `repeated`). 115 | Due to the way the mapping works, the arrays are not [packed](https://protobuf.dev/programming-guides/encoding/#packed) 116 | (equivalent to a `repeated myfield = 123 [packed=false]` in the definition). 117 | Each item is encoded in the order they are parsed alongside other fields 118 | whereas packed would require a second pass to combine all the items together. 119 | 120 | ### Inner UDP/TCP ports 121 | 122 | ```yaml 123 | formatter: 124 | fields: 125 | - src_port_encap 126 | - dst_port_encap 127 | protobuf: 128 | - name: src_port_encap 129 | index: 1021 130 | type: string 131 | array: true 132 | - name: dst_port_encap 133 | index: 1022 134 | type: string 135 | array: true 136 | sflow: 137 | mapping: 138 | - layer: "udp" 139 | encap: true 140 | offset: 0 141 | length: 16 142 | destination: src_port_encap 143 | - layer: "udp" 144 | encap: true 145 | offset: 16 146 | length: 16 147 | destination: dst_port_encap 148 | - layer: "tcp" 149 | encap: true 150 | offset: 0 151 | length: 16 152 | destination: src_port_encap 153 | - layer: "tcp" 154 | encap: true 155 | offset: 16 156 | length: 16 157 | destination: dst_port_encap 158 | ``` 159 | 160 | ### Inner IP addresses 161 | 162 | ```yaml 163 | formatter: 164 | fields: 165 | - src_ip_encap 166 | - dst_ip_encap 167 | protobuf: 168 | - name: src_ip_encap 169 | index: 1006 170 | type: string 171 | array: true 172 | - name: dst_ip_encap 173 | index: 1007 174 | type: string 175 | array: true 176 | render: 177 | src_ip_encap: ip 178 | dst_ip_encap: ip 179 | sflow: 180 | mapping: 181 | - layer: "ipv6" 182 | encap: true 183 | offset: 64 184 | length: 128 185 | destination: src_ip_encap 186 | - layer: "ipv6" 187 | encap: true 188 | offset: 192 189 | length: 128 190 | destination: dst_ip_encap 191 | - layer: "ipv4" 192 | encap: true 193 | offset: 96 194 | length: 32 195 | destination: src_ip_encap 196 | - layer: "ipv4" 197 | encap: true 198 | offset: 128 199 | length: 32 200 | destination: dst_ip_encap 201 | ``` 202 | -------------------------------------------------------------------------------- /docs/performance.md: -------------------------------------------------------------------------------- 1 | # Performance 2 | 3 | When setting up GoFlow2 for the first time, it is difficult to estimate the settings and resources required. 4 | This software has been tested with hundreds of thousands of flows per second on common hardware but the default settings may not be optimal everywhere. 5 | 6 | It is important to understand the pattern of your flows. 7 | Some environments have predictable trends, for instance a regional ISP will likely have a peak of traffic at 20:00 local time, 8 | whereas a hosting provider may have large bursts of traffic due to a DDoS attack. 9 | 10 | We need to consider the following: 11 | 12 | * R: The rate of packets (controlled by sampling and traffic) 13 | * C: The decoding capacity of a worker (dependent on CPU) 14 | * L: The allowed latency (dependent on buffer size) 15 | 16 | In a typical environment, capacity matches or exceeds the rate (C >= R). 17 | When the rate goes above the capacity (eg: bursts), packets waiting to be processed pile up. 18 | Latency increases as long as the rate exceeds the capacity. It remains stable if the rate equals the capacity. 19 | It can only lower when there is extra capacity (C-R). 20 | 21 | A buffer too large can cause "buffer bloat" where latency is too high for normal operations (eg: DDoS detection being delayed), 22 | whereas a short buffer (or no buffer for real-time) may drop information during an temporary increase. 23 | 24 | The listen URI can be customized to meet an environment requirements. 25 | GoFlow2 will work better in an environment with guaranteed resources. 26 | 27 | ## Life of a packet 28 | 29 | When a packet is received by the collectors' machine, the kernel will send the packet towards a socket. 30 | The socket is buffered. On Linux, the buffersize is a global configuration setting: `rmem_max`. 31 | 32 | If the buffer is full, new packets will be discarded and increasing the count of 33 | UDP errors. 34 | 35 | A first level of load-balancing can be done by having multiple sockets listening 36 | on the same port. 37 | On Linux, this is done with `SO_REUSEPORT` and `SO_REUSEADDRESS` options. 38 | In GoFlow2 you can set the `count` option to define the number of sockets. 39 | Each socket will put the packet in a queue to be decoded. 40 | 41 | The number of `workers` should ideally match the number of CPUs available. 42 | By default, the number is set to twice the amount of open sockets. 43 | 44 | `Blocking` mode forces GoFlow2 to operate in real-time instead of buffered. A packet is only decoded if 45 | a worker is available and storage depends on the kernel UDP buffer. 46 | 47 | In buffered mode, the size of the queue is set by `queue_size`, much larger than the UDP buffer. 48 | 49 | The URI below summarizes the options: 50 | 51 | ``` 52 | $ goflow2 -listen flow://:6343/?count=4&workers=16&blocking=false&queue_size=1000000 53 | ^ ^ ^ ^ 54 | ┃ ┃ ┃ ┗ In buffered mode, the amount of packets stored in memory 55 | ┃ ┃ ┗ Real-time mode 56 | ┃ ┗ Decoding workers 57 | ┗ Open sockets listening 58 | ``` 59 | 60 | ## Note on resources guarantees 61 | 62 | GoFlow2 works better on guaranteed fixed resources. 63 | It requires the operator to scope for a worst case scenario in terms of latency. 64 | 65 | RAM usage is dependent on the `queue_size` (unless using blocking mode). 66 | By default, this may exceed the host memory if rate is above capacity and result in an `OoM` crash. 67 | As UDP packets can be a maximum of 9000 bytes, as a result, a 2GB RAM machine can only buffer 222222 packets if there no overhead. 68 | 69 | Kubernetes is an example of allowing flexible resources for processes. 70 | 71 | In a Pod `resources`, the `request` and `limits` can be set for CPU. Extra CPU can be used by other applications if colocated. 72 | Make sure they are the same for RAM since if GoFlow2 is killed, data could be lost, unless you are confident other applications 73 | will not require extra RAM during peaks. 74 | 75 | Furthermore, `HorizontalPodScalers` can be used to create additional GoFlow2 instances and route the packets when a metric crosses a threshold. 76 | This is not recommended with NetFlow/IPFIX without having a shared template system due to cold-starts. 77 | 78 | Another item to take into account, make sure the MTU of the machines where the collector are hosted match 79 | the MTU of the sampling routers. This can cause some issues when tunnelling or on certain cloud providers. 80 | -------------------------------------------------------------------------------- /docs/protobuf.md: -------------------------------------------------------------------------------- 1 | # Protobuf 2 | 3 | The `.proto` files contains a list of fields that are populated by GoFlow2. 4 | 5 | If the fields are changed, the schema needs to be recompiled 6 | in order to use it. 7 | 8 | The compilation is dependent on the language. 9 | Keep in mind the protobuf source code and libraries changes often and this page may be outdated. 10 | 11 | For other languages, refer to the [official guide](https://developers.google.com/protocol-buffers). 12 | 13 | ## Compile for Golang 14 | 15 | The following two tools are required: 16 | * [protoc](https://github.com/protocolbuffers/protobuf), a protobuf compiler, written in C 17 | * [protoc-gen-go](https://github.com/protocolbuffers/protobuf-go), a Go plugin for protoc that can compile protobuf for Golang 18 | 19 | The release page in the respective GitHub repositories should provide binaries distributions. Unzip/Untar if necessary. 20 | Make sure that the two binaries are in your ``$PATH``. On Mac OS you can add the files to `/usr/local/bin` for instance. 21 | 22 | From the root of the repository, run the following command: 23 | 24 | ```bash 25 | $ protoc --go_opt=paths=source_relative --go_out=. pb/*.proto 26 | ``` 27 | 28 | This will compile the main protobuf schema into the `pb` directory. 29 | 30 | You can also run the command which will also compile the protobuf for the sample enricher. 31 | 32 | ```bash 33 | $ make proto 34 | ``` -------------------------------------------------------------------------------- /format/binary/binary.go: -------------------------------------------------------------------------------- 1 | package binary 2 | 3 | import ( 4 | "encoding" 5 | 6 | "github.com/netsampler/goflow2/v2/format" 7 | ) 8 | 9 | type BinaryDriver struct { 10 | } 11 | 12 | func (d *BinaryDriver) Prepare() error { 13 | return nil 14 | } 15 | 16 | func (d *BinaryDriver) Init() error { 17 | return nil 18 | } 19 | 20 | func (d *BinaryDriver) Format(data interface{}) ([]byte, []byte, error) { 21 | var key []byte 22 | if dataIf, ok := data.(interface{ Key() []byte }); ok { 23 | key = dataIf.Key() 24 | } 25 | if dataIf, ok := data.(encoding.BinaryMarshaler); ok { 26 | text, err := dataIf.MarshalBinary() 27 | return key, text, err 28 | } 29 | return key, nil, format.ErrNoSerializer 30 | } 31 | 32 | func init() { 33 | d := &BinaryDriver{} 34 | format.RegisterFormatDriver("bin", d) 35 | } 36 | -------------------------------------------------------------------------------- /format/format.go: -------------------------------------------------------------------------------- 1 | package format 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | formatDrivers = make(map[string]FormatDriver) 10 | lock = &sync.RWMutex{} 11 | 12 | ErrFormat = fmt.Errorf("format error") 13 | ErrNoSerializer = fmt.Errorf("message is not serializable") 14 | ) 15 | 16 | type DriverFormatError struct { 17 | Driver string 18 | Err error 19 | } 20 | 21 | func (e *DriverFormatError) Error() string { 22 | return fmt.Sprintf("%s for %s format", e.Err.Error(), e.Driver) 23 | } 24 | 25 | func (e *DriverFormatError) Unwrap() []error { 26 | return []error{ErrFormat, e.Err} 27 | } 28 | 29 | type FormatDriver interface { 30 | Prepare() error // Prepare driver (eg: flag registration) 31 | Init() error // Initialize driver (eg: parse keying) 32 | Format(data interface{}) ([]byte, []byte, error) // Send a message 33 | } 34 | 35 | type FormatInterface interface { 36 | Format(data interface{}) ([]byte, []byte, error) 37 | } 38 | 39 | type Format struct { 40 | FormatDriver 41 | name string 42 | } 43 | 44 | func (t *Format) Format(data interface{}) ([]byte, []byte, error) { 45 | key, text, err := t.FormatDriver.Format(data) 46 | if err != nil { 47 | err = &DriverFormatError{ 48 | t.name, 49 | err, 50 | } 51 | } 52 | return key, text, err 53 | } 54 | 55 | func RegisterFormatDriver(name string, t FormatDriver) { 56 | lock.Lock() 57 | formatDrivers[name] = t 58 | lock.Unlock() 59 | 60 | if err := t.Prepare(); err != nil { 61 | panic(err) 62 | } 63 | } 64 | 65 | func FindFormat(name string) (*Format, error) { 66 | lock.RLock() 67 | t, ok := formatDrivers[name] 68 | lock.RUnlock() 69 | if !ok { 70 | return nil, fmt.Errorf("%w %s not found", ErrFormat, name) 71 | } 72 | 73 | err := t.Init() 74 | if err != nil { 75 | err = &DriverFormatError{name, err} 76 | } 77 | return &Format{t, name}, err 78 | } 79 | 80 | func GetFormats() []string { 81 | lock.RLock() 82 | defer lock.RUnlock() 83 | t := make([]string, len(formatDrivers)) 84 | var i int 85 | for k := range formatDrivers { 86 | t[i] = k 87 | i++ 88 | } 89 | return t 90 | } 91 | -------------------------------------------------------------------------------- /format/json/json.go: -------------------------------------------------------------------------------- 1 | package json 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/netsampler/goflow2/v2/format" 7 | ) 8 | 9 | type JsonDriver struct { 10 | } 11 | 12 | func (d *JsonDriver) Prepare() error { 13 | return nil 14 | } 15 | 16 | func (d *JsonDriver) Init() error { 17 | return nil 18 | } 19 | 20 | func (d *JsonDriver) Format(data interface{}) ([]byte, []byte, error) { 21 | var key []byte 22 | if dataIf, ok := data.(interface{ Key() []byte }); ok { 23 | key = dataIf.Key() 24 | } 25 | output, err := json.Marshal(data) 26 | return key, output, err 27 | } 28 | 29 | func init() { 30 | d := &JsonDriver{} 31 | format.RegisterFormatDriver("json", d) 32 | } 33 | -------------------------------------------------------------------------------- /format/text/text.go: -------------------------------------------------------------------------------- 1 | package text 2 | 3 | import ( 4 | "encoding" 5 | 6 | "github.com/netsampler/goflow2/v2/format" 7 | ) 8 | 9 | type TextDriver struct { 10 | } 11 | 12 | func (d *TextDriver) Prepare() error { 13 | return nil 14 | } 15 | 16 | func (d *TextDriver) Init() error { 17 | return nil 18 | } 19 | 20 | func (d *TextDriver) Format(data interface{}) ([]byte, []byte, error) { 21 | var key []byte 22 | if dataIf, ok := data.(interface{ Key() []byte }); ok { 23 | key = dataIf.Key() 24 | } 25 | if dataIf, ok := data.(encoding.TextMarshaler); ok { 26 | text, err := dataIf.MarshalText() 27 | return key, text, err 28 | } 29 | if dataIf, ok := data.(interface{ String() string }); ok { 30 | return key, []byte(dataIf.String()), nil 31 | } 32 | return key, nil, format.ErrNoSerializer 33 | } 34 | 35 | func init() { 36 | d := &TextDriver{} 37 | format.RegisterFormatDriver("text", d) 38 | } 39 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/netsampler/goflow2/v2 2 | 3 | go 1.22 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/Shopify/sarama v1.38.1 9 | github.com/libp2p/go-reuseport v0.4.0 10 | github.com/oschwald/geoip2-golang v1.11.0 11 | github.com/prometheus/client_golang v1.22.0 12 | github.com/stretchr/testify v1.10.0 13 | github.com/xdg-go/scram v1.1.2 14 | google.golang.org/protobuf v1.36.6 15 | gopkg.in/yaml.v3 v3.0.1 16 | ) 17 | 18 | require ( 19 | github.com/beorn7/perks v1.0.1 // indirect 20 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 21 | github.com/davecgh/go-spew v1.1.1 // indirect 22 | github.com/eapache/go-resiliency v1.3.0 // indirect 23 | github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect 24 | github.com/eapache/queue v1.1.0 // indirect 25 | github.com/golang/snappy v0.0.4 // indirect 26 | github.com/hashicorp/errwrap v1.0.0 // indirect 27 | github.com/hashicorp/go-multierror v1.1.1 // indirect 28 | github.com/hashicorp/go-uuid v1.0.3 // indirect 29 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 30 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 31 | github.com/jcmturner/gofork v1.7.6 // indirect 32 | github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect 33 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 34 | github.com/klauspost/compress v1.18.0 // indirect 35 | github.com/kr/text v0.2.0 // indirect 36 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 37 | github.com/oschwald/maxminddb-golang v1.13.0 // indirect 38 | github.com/pierrec/lz4/v4 v4.1.17 // indirect 39 | github.com/pmezard/go-difflib v1.0.0 // indirect 40 | github.com/prometheus/client_model v0.6.1 // indirect 41 | github.com/prometheus/common v0.62.0 // indirect 42 | github.com/prometheus/procfs v0.15.1 // indirect 43 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 44 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 45 | github.com/xdg-go/stringprep v1.0.4 // indirect 46 | golang.org/x/crypto v0.31.0 // indirect 47 | golang.org/x/net v0.33.0 // indirect 48 | golang.org/x/sys v0.30.0 // indirect 49 | golang.org/x/text v0.21.0 // indirect 50 | ) 51 | -------------------------------------------------------------------------------- /graphics/diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netsampler/goflow2/fb53b559f1abe986081384d0470c02937ef087e5/graphics/diagram.png -------------------------------------------------------------------------------- /metrics/decoder.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/netsampler/goflow2/v2/decoders/netflow" 9 | "github.com/netsampler/goflow2/v2/utils" 10 | 11 | "github.com/prometheus/client_golang/prometheus" 12 | ) 13 | 14 | func PromDecoderWrapper(wrapped utils.DecoderFunc, name string) utils.DecoderFunc { 15 | return func(msg interface{}) error { 16 | pkt, ok := msg.(*utils.Message) 17 | if !ok { 18 | return fmt.Errorf("flow is not *Message") 19 | } 20 | remote := pkt.Src.Addr().Unmap().String() 21 | localIP := pkt.Dst.Addr().Unmap().String() 22 | 23 | port := fmt.Sprintf("%d", pkt.Dst.Port()) 24 | size := len(pkt.Payload) 25 | 26 | MetricTrafficBytes.With( 27 | prometheus.Labels{ 28 | "remote_ip": remote, 29 | "local_ip": localIP, 30 | "local_port": port, 31 | "type": name, 32 | }). 33 | Add(float64(size)) 34 | MetricTrafficPackets.With( 35 | prometheus.Labels{ 36 | "remote_ip": remote, 37 | "local_ip": localIP, 38 | "local_port": port, 39 | "type": name, 40 | }). 41 | Inc() 42 | MetricPacketSizeSum.With( 43 | prometheus.Labels{ 44 | "remote_ip": remote, 45 | "local_ip": localIP, 46 | "local_port": port, 47 | "type": name, 48 | }). 49 | Observe(float64(size)) 50 | 51 | timeTrackStart := time.Now().UTC() 52 | 53 | err := wrapped(msg) 54 | 55 | timeTrackStop := time.Now().UTC() 56 | 57 | DecoderTime.With( 58 | prometheus.Labels{ 59 | "name": name, 60 | }). 61 | Observe(float64((timeTrackStop.Sub(timeTrackStart)).Nanoseconds()) / 1e9) 62 | 63 | if err != nil { 64 | if errors.Is(err, netflow.ErrorTemplateNotFound) { 65 | NetFlowErrors.With( 66 | prometheus.Labels{ 67 | "router": remote, 68 | "error": "template_not_found", 69 | }). 70 | Inc() 71 | } 72 | 73 | switch err.(type) { 74 | default: 75 | DecoderErrors.With( 76 | prometheus.Labels{ 77 | "router": remote, 78 | "name": name, 79 | }). 80 | Inc() 81 | } 82 | } 83 | return err 84 | } 85 | } 86 | 87 | func recordCommonNetFlowMetrics(version uint16, key string, flowSets []interface{}) { 88 | versionStr := fmt.Sprintf("%d", version) 89 | 90 | for _, fs := range flowSets { 91 | switch fsConv := fs.(type) { 92 | case netflow.TemplateFlowSet: 93 | NetFlowSetStatsSum.With( 94 | prometheus.Labels{ 95 | "router": key, 96 | "version": versionStr, 97 | "type": "TemplateFlowSet", 98 | }). 99 | Inc() 100 | 101 | NetFlowSetRecordsStatsSum.With( 102 | prometheus.Labels{ 103 | "router": key, 104 | "version": versionStr, 105 | "type": "TemplateFlowSet", 106 | }). 107 | Add(float64(len(fsConv.Records))) 108 | case netflow.NFv9OptionsTemplateFlowSet: 109 | NetFlowSetStatsSum.With( 110 | prometheus.Labels{ 111 | "router": key, 112 | "version": versionStr, 113 | "type": "OptionsTemplateFlowSet", 114 | }). 115 | Inc() 116 | 117 | NetFlowSetRecordsStatsSum.With( 118 | prometheus.Labels{ 119 | "router": key, 120 | "version": versionStr, 121 | "type": "OptionsTemplateFlowSet", 122 | }). 123 | Add(float64(len(fsConv.Records))) 124 | case netflow.IPFIXOptionsTemplateFlowSet: 125 | NetFlowSetStatsSum.With( 126 | prometheus.Labels{ 127 | "router": key, 128 | "version": versionStr, 129 | "type": "OptionsTemplateFlowSet", 130 | }). 131 | Inc() 132 | 133 | NetFlowSetRecordsStatsSum.With( 134 | prometheus.Labels{ 135 | "router": key, 136 | "version": versionStr, 137 | "type": "OptionsTemplateFlowSet", 138 | }). 139 | Add(float64(len(fsConv.Records))) 140 | case netflow.OptionsDataFlowSet: 141 | NetFlowSetStatsSum.With( 142 | prometheus.Labels{ 143 | "router": key, 144 | "version": versionStr, 145 | "type": "OptionsDataFlowSet", 146 | }). 147 | Inc() 148 | 149 | NetFlowSetRecordsStatsSum.With( 150 | prometheus.Labels{ 151 | "router": key, 152 | "version": versionStr, 153 | "type": "OptionsDataFlowSet", 154 | }). 155 | Add(float64(len(fsConv.Records))) 156 | case netflow.DataFlowSet: 157 | NetFlowSetStatsSum.With( 158 | prometheus.Labels{ 159 | "router": key, 160 | "version": versionStr, 161 | "type": "DataFlowSet", 162 | }). 163 | Inc() 164 | 165 | NetFlowSetRecordsStatsSum.With( 166 | prometheus.Labels{ 167 | "router": key, 168 | "version": versionStr, 169 | "type": "DataFlowSet", 170 | }). 171 | Add(float64(len(fsConv.Records))) 172 | } 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | const ( 8 | NAMESPACE = "goflow2" 9 | ) 10 | 11 | var ( 12 | MetricReceivedDroppedPackets = prometheus.NewCounterVec( 13 | prometheus.CounterOpts{ 14 | Name: "flow_dropped_packets_total", 15 | Help: "Packets dropped before processing.", 16 | Namespace: NAMESPACE, 17 | }, 18 | []string{"remote_ip", "local_ip", "local_port"}, 19 | ) 20 | MetricReceivedDroppedBytes = prometheus.NewCounterVec( 21 | prometheus.CounterOpts{ 22 | Name: "flow_dropped_bytes_total", 23 | Help: "Bytes dropped before processing.", 24 | Namespace: NAMESPACE, 25 | }, 26 | []string{"remote_ip", "local_ip", "local_port"}, 27 | ) 28 | MetricTrafficBytes = prometheus.NewCounterVec( 29 | prometheus.CounterOpts{ 30 | Name: "flow_traffic_bytes_total", 31 | Help: "Bytes received by the application.", 32 | Namespace: NAMESPACE, 33 | }, 34 | []string{"remote_ip", "local_ip", "local_port", "type"}, 35 | ) 36 | MetricTrafficPackets = prometheus.NewCounterVec( 37 | prometheus.CounterOpts{ 38 | Name: "flow_traffic_packets_total", 39 | Help: "Packets received by the application.", 40 | Namespace: NAMESPACE}, 41 | []string{"remote_ip", "local_ip", "local_port", "type"}, 42 | ) 43 | MetricPacketSizeSum = prometheus.NewSummaryVec( 44 | prometheus.SummaryOpts{ 45 | Name: "flow_traffic_size_bytes", 46 | Help: "Summary of packet size.", 47 | Namespace: NAMESPACE, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 48 | }, 49 | []string{"remote_ip", "local_ip", "local_port", "type"}, 50 | ) 51 | DecoderErrors = prometheus.NewCounterVec( 52 | prometheus.CounterOpts{ 53 | Name: "flow_decoder_error_total", 54 | Help: "NetFlow/sFlow processed errors.", 55 | Namespace: NAMESPACE}, 56 | []string{"router", "name"}, 57 | ) 58 | DecoderTime = prometheus.NewSummaryVec( 59 | prometheus.SummaryOpts{ 60 | Name: "flow_decoding_time_seconds", 61 | Help: "Decoding time summary.", 62 | Namespace: NAMESPACE, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 63 | }, 64 | []string{"name"}, 65 | ) 66 | NetFlowStats = prometheus.NewCounterVec( 67 | prometheus.CounterOpts{ 68 | Name: "flow_process_nf_total", 69 | Help: "NetFlows processed.", 70 | Namespace: NAMESPACE}, 71 | []string{"router", "version"}, 72 | ) 73 | NetFlowErrors = prometheus.NewCounterVec( 74 | prometheus.CounterOpts{ 75 | Name: "flow_process_nf_errors_total", 76 | Help: "NetFlows processed errors.", 77 | Namespace: NAMESPACE}, 78 | []string{"router", "error"}, 79 | ) 80 | NetFlowSetRecordsStatsSum = prometheus.NewCounterVec( 81 | prometheus.CounterOpts{ 82 | Name: "flow_process_nf_flowset_records_total", 83 | Help: "NetFlows FlowSets sum of records.", 84 | Namespace: NAMESPACE}, 85 | []string{"router", "version", "type"}, // data-template, data, opts... 86 | ) 87 | NetFlowSetStatsSum = prometheus.NewCounterVec( 88 | prometheus.CounterOpts{ 89 | Name: "flow_process_nf_flowset_total", 90 | Help: "NetFlows FlowSets sum.", 91 | Namespace: NAMESPACE}, 92 | []string{"router", "version", "type"}, // data-template, data, opts... 93 | ) 94 | NetFlowTimeStatsSum = prometheus.NewSummaryVec( 95 | prometheus.SummaryOpts{ 96 | Name: "flow_process_nf_delay_seconds", 97 | Help: "NetFlows time difference between time of flow and processing.", 98 | Namespace: NAMESPACE, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, 99 | }, 100 | []string{"router", "version"}, 101 | ) 102 | NetFlowTemplatesStats = prometheus.NewCounterVec( 103 | prometheus.CounterOpts{ 104 | Name: "flow_process_nf_templates_total", 105 | Help: "NetFlows Template count.", 106 | Namespace: NAMESPACE}, 107 | []string{"router", "version", "obs_domain_id", "template_id", "type"}, // options/template 108 | ) 109 | SFlowStats = prometheus.NewCounterVec( 110 | prometheus.CounterOpts{ 111 | Name: "flow_process_sf_total", 112 | Help: "sFlows processed.", 113 | Namespace: NAMESPACE}, 114 | []string{"router", "agent", "version"}, 115 | ) 116 | SFlowSampleStatsSum = prometheus.NewCounterVec( 117 | prometheus.CounterOpts{ 118 | Name: "flow_process_sf_samples_total", 119 | Help: "SFlows samples sum.", 120 | Namespace: NAMESPACE}, 121 | []string{"router", "agent", "version", "type"}, // counter, flow, expanded... 122 | ) 123 | SFlowSampleRecordsStatsSum = prometheus.NewCounterVec( 124 | prometheus.CounterOpts{ 125 | Name: "flow_process_sf_samples_records_total", 126 | Help: "SFlows samples sum of records.", 127 | Namespace: NAMESPACE}, 128 | []string{"router", "agent", "version", "type"}, // data-template, data, opts... 129 | ) 130 | ) 131 | 132 | func init() { 133 | prometheus.MustRegister(MetricReceivedDroppedPackets) 134 | prometheus.MustRegister(MetricReceivedDroppedBytes) 135 | 136 | prometheus.MustRegister(MetricTrafficBytes) 137 | prometheus.MustRegister(MetricTrafficPackets) 138 | prometheus.MustRegister(MetricPacketSizeSum) 139 | 140 | prometheus.MustRegister(DecoderErrors) 141 | prometheus.MustRegister(DecoderTime) 142 | 143 | prometheus.MustRegister(NetFlowStats) 144 | prometheus.MustRegister(NetFlowErrors) 145 | prometheus.MustRegister(NetFlowSetRecordsStatsSum) 146 | prometheus.MustRegister(NetFlowSetStatsSum) 147 | prometheus.MustRegister(NetFlowTimeStatsSum) 148 | prometheus.MustRegister(NetFlowTemplatesStats) 149 | 150 | prometheus.MustRegister(SFlowStats) 151 | prometheus.MustRegister(SFlowSampleStatsSum) 152 | prometheus.MustRegister(SFlowSampleRecordsStatsSum) 153 | } 154 | -------------------------------------------------------------------------------- /metrics/producer.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "net/netip" 5 | 6 | "github.com/netsampler/goflow2/v2/decoders/netflow" 7 | "github.com/netsampler/goflow2/v2/decoders/netflowlegacy" 8 | "github.com/netsampler/goflow2/v2/decoders/sflow" 9 | "github.com/netsampler/goflow2/v2/producer" 10 | "github.com/netsampler/goflow2/v2/producer/proto" 11 | 12 | "github.com/prometheus/client_golang/prometheus" 13 | ) 14 | 15 | type PromProducerWrapper struct { 16 | wrapped producer.ProducerInterface 17 | } 18 | 19 | func (p *PromProducerWrapper) Produce(msg interface{}, args *producer.ProduceArgs) ([]producer.ProducerMessage, error) { 20 | flowMessageSet, err := p.wrapped.Produce(msg, args) 21 | if err != nil { 22 | return flowMessageSet, err 23 | } 24 | key := args.Src.Addr().Unmap().String() 25 | var nfvariant bool 26 | var versionStr string 27 | switch packet := msg.(type) { 28 | case *sflow.Packet: 29 | agentStr := "unk" 30 | agentIp, ok := netip.AddrFromSlice(packet.AgentIP) 31 | if ok { 32 | agentStr = agentIp.String() 33 | } 34 | 35 | SFlowStats.With( 36 | prometheus.Labels{ 37 | "router": key, 38 | "agent": agentStr, 39 | "version": "5", 40 | }). 41 | Inc() 42 | 43 | for _, samples := range packet.Samples { 44 | typeStr := "unknown" 45 | countRec := 0 46 | switch samplesConv := samples.(type) { 47 | case sflow.FlowSample: 48 | typeStr = "FlowSample" 49 | countRec = len(samplesConv.Records) 50 | case sflow.CounterSample: 51 | typeStr = "CounterSample" 52 | if samplesConv.Header.Format == 4 { 53 | typeStr = "Expanded" + typeStr 54 | } 55 | countRec = len(samplesConv.Records) 56 | case sflow.ExpandedFlowSample: 57 | typeStr = "ExpandedFlowSample" 58 | countRec = len(samplesConv.Records) 59 | } 60 | SFlowSampleStatsSum.With( 61 | prometheus.Labels{ 62 | "router": key, 63 | "agent": agentStr, 64 | "version": "5", 65 | "type": typeStr, 66 | }). 67 | Inc() 68 | 69 | SFlowSampleRecordsStatsSum.With( 70 | prometheus.Labels{ 71 | "router": key, 72 | "agent": agentStr, 73 | "version": "5", 74 | "type": typeStr, 75 | }). 76 | Add(float64(countRec)) 77 | } 78 | 79 | case *netflowlegacy.PacketNetFlowV5: 80 | NetFlowStats.With( 81 | prometheus.Labels{ 82 | "router": key, 83 | "version": "5", 84 | }). 85 | Inc() 86 | NetFlowSetStatsSum.With( 87 | prometheus.Labels{ 88 | "router": key, 89 | "version": "5", 90 | "type": "DataFlowSet", 91 | }). 92 | Add(float64(packet.Count)) 93 | 94 | case *netflow.NFv9Packet: 95 | NetFlowStats.With( 96 | prometheus.Labels{ 97 | "router": key, 98 | "version": "9", 99 | }). 100 | Inc() 101 | recordCommonNetFlowMetrics(9, key, packet.FlowSets) 102 | nfvariant = true 103 | versionStr = "9" 104 | 105 | case *netflow.IPFIXPacket: 106 | NetFlowStats.With( 107 | prometheus.Labels{ 108 | "router": key, 109 | "version": "10", 110 | }). 111 | Inc() 112 | recordCommonNetFlowMetrics(10, key, packet.FlowSets) 113 | nfvariant = true 114 | versionStr = "10" 115 | } 116 | 117 | if nfvariant { 118 | for _, msg := range flowMessageSet { 119 | fmsg, ok := msg.(*protoproducer.ProtoProducerMessage) 120 | if !ok { 121 | continue 122 | } 123 | timeDiff := fmsg.TimeReceivedNs - fmsg.TimeFlowEndNs 124 | 125 | NetFlowTimeStatsSum.With( 126 | prometheus.Labels{ 127 | "router": key, 128 | "version": versionStr, 129 | }). 130 | Observe(float64(timeDiff) / 1e9) 131 | } 132 | } 133 | 134 | return flowMessageSet, err 135 | } 136 | 137 | func (p *PromProducerWrapper) Close() { 138 | p.wrapped.Close() 139 | } 140 | 141 | func (p *PromProducerWrapper) Commit(flowMessageSet []producer.ProducerMessage) { 142 | p.wrapped.Commit(flowMessageSet) 143 | } 144 | 145 | // Wraps a producer with metrics 146 | func WrapPromProducer(wrapped producer.ProducerInterface) producer.ProducerInterface { 147 | return &PromProducerWrapper{ 148 | wrapped: wrapped, 149 | } 150 | } 151 | 152 | // metrics template system 153 | -------------------------------------------------------------------------------- /metrics/receiver.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/netsampler/goflow2/v2/utils" 7 | 8 | "github.com/prometheus/client_golang/prometheus" 9 | ) 10 | 11 | type ReceiverMetric struct { 12 | } 13 | 14 | func NewReceiverMetric() *ReceiverMetric { 15 | return &ReceiverMetric{} 16 | } 17 | 18 | func (r *ReceiverMetric) Dropped(pkt utils.Message) { 19 | remote := pkt.Src.Addr().Unmap().String() 20 | localIP := pkt.Dst.Addr().Unmap().String() 21 | 22 | port := fmt.Sprintf("%d", pkt.Dst.Port()) 23 | size := len(pkt.Payload) 24 | 25 | labels := prometheus.Labels{ 26 | "remote_ip": remote, 27 | "local_ip": localIP, 28 | "local_port": port, 29 | } 30 | MetricReceivedDroppedPackets.With(labels).Inc() 31 | MetricReceivedDroppedBytes.With(labels).Add(float64(size)) 32 | } 33 | -------------------------------------------------------------------------------- /metrics/templates.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "strconv" 5 | 6 | "github.com/netsampler/goflow2/v2/decoders/netflow" 7 | 8 | "github.com/prometheus/client_golang/prometheus" 9 | ) 10 | 11 | type PromTemplateSystem struct { 12 | key string 13 | wrapped netflow.NetFlowTemplateSystem 14 | } 15 | 16 | // A default Prometheus template generator function to be used by a pipe 17 | func NewDefaultPromTemplateSystem(key string) netflow.NetFlowTemplateSystem { 18 | return NewPromTemplateSystem(key, netflow.CreateTemplateSystem()) 19 | } 20 | 21 | // Creates a Prometheus template system that wraps another template system. 22 | // The key argument is providing the router information for metrics. 23 | func NewPromTemplateSystem(key string, wrapped netflow.NetFlowTemplateSystem) netflow.NetFlowTemplateSystem { 24 | return &PromTemplateSystem{ 25 | key: key, 26 | wrapped: wrapped, 27 | } 28 | } 29 | 30 | func (s *PromTemplateSystem) getLabels(version uint16, obsDomainId uint32, templateId uint16, template interface{}) prometheus.Labels { 31 | 32 | typeStr := "options_template" 33 | switch template.(type) { 34 | case netflow.TemplateRecord: 35 | typeStr = "template" 36 | } 37 | 38 | return prometheus.Labels{ 39 | "router": s.key, 40 | "version": strconv.Itoa(int(version)), 41 | "obs_domain_id": strconv.Itoa(int(obsDomainId)), 42 | "template_id": strconv.Itoa(int(templateId)), 43 | "type": typeStr, 44 | } 45 | } 46 | 47 | func (s *PromTemplateSystem) AddTemplate(version uint16, obsDomainId uint32, templateId uint16, template interface{}) error { 48 | err := s.wrapped.AddTemplate(version, obsDomainId, templateId, template) 49 | 50 | labels := s.getLabels(version, obsDomainId, templateId, template) 51 | NetFlowTemplatesStats.With( 52 | labels). 53 | Inc() 54 | return err 55 | } 56 | 57 | func (s *PromTemplateSystem) GetTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { 58 | return s.wrapped.GetTemplate(version, obsDomainId, templateId) 59 | } 60 | 61 | func (s *PromTemplateSystem) RemoveTemplate(version uint16, obsDomainId uint32, templateId uint16) (interface{}, error) { 62 | 63 | template, err := s.wrapped.RemoveTemplate(version, obsDomainId, templateId) 64 | 65 | if err == nil { 66 | labels := s.getLabels(version, obsDomainId, templateId, template) 67 | 68 | NetFlowTemplatesStats.Delete(labels) 69 | } 70 | 71 | return template, err 72 | } 73 | -------------------------------------------------------------------------------- /package/goflow2.env: -------------------------------------------------------------------------------- 1 | GOFLOW2_ARGS= -------------------------------------------------------------------------------- /package/goflow2.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=GoFlow2 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | EnvironmentFile=/etc/default/goflow2 8 | WorkingDirectory=/usr/share/goflow2 9 | ExecStart=/usr/bin/goflow2 $GOFLOW2_ARGS 10 | ExecReload=/bin/kill -HUP $MAINPID 11 | 12 | [Install] 13 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /pb/flow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package flowpb; 3 | option go_package = "github.com/netsampler/goflow2/pb;flowpb"; 4 | 5 | message FlowMessage { 6 | 7 | enum FlowType { 8 | FLOWUNKNOWN = 0; 9 | SFLOW_5 = 1; 10 | NETFLOW_V5 = 2; 11 | NETFLOW_V9 = 3; 12 | IPFIX = 4; 13 | } 14 | FlowType type = 1; 15 | 16 | uint64 time_received_ns = 110; 17 | uint32 sequence_num = 4; 18 | uint64 sampling_rate = 3; 19 | 20 | //uint32 flow_direction = 42; 21 | 22 | // Sampler information 23 | bytes sampler_address = 11; 24 | 25 | // Found inside packet 26 | uint64 time_flow_start_ns = 111; 27 | uint64 time_flow_end_ns = 112; 28 | 29 | // Size of the sampled packet 30 | uint64 bytes = 9; 31 | uint64 packets = 10; 32 | 33 | // Source/destination addresses 34 | bytes src_addr = 6; 35 | bytes dst_addr = 7; 36 | 37 | // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...) 38 | uint32 etype = 30; 39 | 40 | // Layer 4 protocol 41 | uint32 proto = 20; 42 | 43 | // Ports for UDP and TCP 44 | uint32 src_port = 21; 45 | uint32 dst_port = 22; 46 | 47 | // Interfaces 48 | uint32 in_if = 18; 49 | uint32 out_if = 19; 50 | 51 | // Ethernet information 52 | uint64 src_mac = 27; 53 | uint64 dst_mac = 28; 54 | 55 | // Vlan 56 | uint32 src_vlan = 33; 57 | uint32 dst_vlan = 34; 58 | // 802.1q VLAN in sampled packet 59 | uint32 vlan_id = 29; 60 | 61 | // IP and TCP special flags 62 | uint32 ip_tos = 23; 63 | uint32 forwarding_status = 24; 64 | uint32 ip_ttl = 25; 65 | uint32 ip_flags = 38; 66 | uint32 tcp_flags = 26; 67 | uint32 icmp_type = 31; 68 | uint32 icmp_code = 32; 69 | uint32 ipv6_flow_label = 37; 70 | // Fragments (IPv4/IPv6) 71 | uint32 fragment_id = 35; 72 | uint32 fragment_offset = 36; 73 | 74 | // Autonomous system information 75 | uint32 src_as = 14; 76 | uint32 dst_as = 15; 77 | 78 | bytes next_hop = 12; 79 | uint32 next_hop_as = 13; 80 | 81 | // Prefix size 82 | uint32 src_net = 16; 83 | uint32 dst_net = 17; 84 | 85 | // BGP information 86 | bytes bgp_next_hop = 100; 87 | repeated uint32 bgp_communities = 101; 88 | repeated uint32 as_path = 102; 89 | 90 | // MPLS information 91 | repeated uint32 mpls_ttl = 80; 92 | repeated uint32 mpls_label = 81; 93 | repeated bytes mpls_ip = 82; 94 | 95 | uint32 observation_domain_id = 70; 96 | uint32 observation_point_id = 71; 97 | 98 | // Encapsulation 99 | enum LayerStack { 100 | Ethernet = 0; 101 | IPv4 = 1; 102 | IPv6 = 2; 103 | TCP = 3; 104 | UDP = 4; 105 | MPLS = 5; 106 | Dot1Q = 6; 107 | ICMP = 7; 108 | ICMPv6 = 8; 109 | GRE = 9; 110 | IPv6HeaderRouting = 10; 111 | IPv6HeaderFragment = 11; 112 | Geneve = 12; 113 | Teredo = 13; 114 | Custom = 99; 115 | // todo: add nsh 116 | } 117 | repeated LayerStack layer_stack = 103; 118 | repeated uint32 layer_size = 104; 119 | 120 | repeated bytes ipv6_routing_header_addresses = 105; // SRv6 121 | uint32 ipv6_routing_header_seg_left = 106; // SRv6 122 | 123 | } 124 | -------------------------------------------------------------------------------- /producer/producer.go: -------------------------------------------------------------------------------- 1 | package producer 2 | 3 | import ( 4 | "net/netip" 5 | "time" 6 | ) 7 | 8 | // Interface of the messages 9 | type ProducerMessage interface{} 10 | 11 | type ProducerInterface interface { 12 | // Converts a message into a list of flow samples 13 | Produce(msg interface{}, args *ProduceArgs) ([]ProducerMessage, error) 14 | // Indicates to the producer the messages returned were processed 15 | Commit([]ProducerMessage) 16 | Close() 17 | } 18 | 19 | type ProduceArgs struct { 20 | Src netip.AddrPort 21 | Dst netip.AddrPort 22 | SamplerAddress netip.Addr 23 | TimeReceived time.Time 24 | } 25 | -------------------------------------------------------------------------------- /producer/proto/config.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "github.com/netsampler/goflow2/v2/decoders/netflow" 5 | ) 6 | 7 | type EndianType string 8 | type ProtoType string 9 | 10 | var ( 11 | BigEndian EndianType = "big" 12 | LittleEndian EndianType = "little" 13 | 14 | ProtoString ProtoType = "string" 15 | ProtoVarint ProtoType = "varint" 16 | 17 | ProtoTypeMap = map[string]ProtoType{ 18 | string(ProtoString): ProtoString, 19 | string(ProtoVarint): ProtoVarint, 20 | "bytes": ProtoString, 21 | } 22 | ) 23 | 24 | // MappableField is the interface that allows a flow's field to be mapped to a specific protobuf field. 25 | // Provided by Template Mapper's function. 26 | type MappableField interface { 27 | GetEndianness() EndianType 28 | GetDestination() string 29 | GetProtoIndex() int32 30 | GetProtoType() ProtoType 31 | IsArray() bool 32 | } 33 | 34 | // MappableByteField is the interface, similar to MappableField, but for direct packet parsing. 35 | // Provided by PacketMapper. 36 | type MappableByteField interface { 37 | MappableField 38 | GetOffset() int 39 | GetLength() int 40 | IsEncapsulated() bool 41 | } 42 | 43 | // TemplateMapper is the interface to returns the mapping information for a specific type of template field 44 | type TemplateMapper interface { 45 | Map(field netflow.DataField) (MappableField, bool) 46 | } 47 | 48 | // MapLayerIterator is the interface to obtain subsequent mapping information 49 | type MapLayerIterator interface { 50 | Next() MappableByteField // returns the next MappableByteField. Function is called by the packet parser until returns nil. 51 | } 52 | 53 | // PacketLayerMapper is the interface to obtain the mapping information for a layer of a packet 54 | type PacketLayerMapper interface { 55 | Map(layer string) MapLayerIterator // returns an iterator to avoid handling arrays 56 | } 57 | 58 | // PacketMapper is the interface to parse a packet into a flow message 59 | type PacketMapper interface { 60 | ParsePacket(flowMessage ProtoProducerMessageIf, data []byte) (err error) 61 | } 62 | 63 | // FormatterMapper returns the configuration statements for the textual formatting of the protobuf messages 64 | type FormatterMapper interface { 65 | Keys() []string 66 | Fields() []string 67 | Rename(name string) (string, bool) 68 | Remap(name string) (string, bool) 69 | Render(name string) (RenderFunc, bool) 70 | NumToProtobuf(num int32) (ProtobufFormatterConfig, bool) 71 | IsArray(name string) bool 72 | } 73 | 74 | // ProtoProducerConfig is the top level configuration for a general flow to protobuf producer 75 | type ProtoProducerConfig interface { 76 | GetFormatter() FormatterMapper 77 | GetIPFIXMapper() TemplateMapper 78 | GetNetFlowMapper() TemplateMapper 79 | GetPacketMapper() PacketMapper 80 | } 81 | -------------------------------------------------------------------------------- /producer/proto/messages.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "hash" 7 | "hash/fnv" 8 | "reflect" 9 | "strings" 10 | "sync" 11 | 12 | "google.golang.org/protobuf/encoding/protodelim" 13 | "google.golang.org/protobuf/encoding/protowire" 14 | "google.golang.org/protobuf/proto" 15 | 16 | flowmessage "github.com/netsampler/goflow2/v2/pb" 17 | ) 18 | 19 | // ProtoProducerMessageIf interface to a flow message, used by parsers and tests 20 | type ProtoProducerMessageIf interface { 21 | GetFlowMessage() *ProtoProducerMessage // access the underlying structure 22 | MapCustom(key string, v []byte, cfg MappableField) error // inject custom field 23 | } 24 | 25 | type ProtoProducerMessage struct { 26 | flowmessage.FlowMessage 27 | 28 | formatter FormatterMapper 29 | 30 | skipDelimiter bool // for binary marshalling, skips the varint prefix 31 | } 32 | 33 | var protoMessagePool = sync.Pool{ 34 | New: func() any { 35 | return &ProtoProducerMessage{} 36 | }, 37 | } 38 | 39 | func (m *ProtoProducerMessage) GetFlowMessage() *ProtoProducerMessage { 40 | return m 41 | } 42 | 43 | func (m *ProtoProducerMessage) MapCustom(key string, v []byte, cfg MappableField) error { 44 | return MapCustom(m, v, cfg) 45 | } 46 | 47 | func (m *ProtoProducerMessage) AddLayer(name string) (ok bool) { 48 | value, ok := flowmessage.FlowMessage_LayerStack_value[name] 49 | m.LayerStack = append(m.LayerStack, flowmessage.FlowMessage_LayerStack(value)) 50 | return ok 51 | } 52 | 53 | func (m *ProtoProducerMessage) MarshalBinary() ([]byte, error) { 54 | buf := bytes.NewBuffer([]byte{}) 55 | if m.skipDelimiter { 56 | b, err := proto.Marshal(m) 57 | return b, err 58 | } else { 59 | _, err := protodelim.MarshalTo(buf, m) 60 | return buf.Bytes(), err 61 | } 62 | } 63 | 64 | func (m *ProtoProducerMessage) MarshalText() ([]byte, error) { 65 | return []byte(m.FormatMessageReflectText("")), nil 66 | } 67 | 68 | func (m *ProtoProducerMessage) baseKey(h hash.Hash) { 69 | vfm := reflect.ValueOf(m) 70 | vfm = reflect.Indirect(vfm) 71 | 72 | unkMap := m.mapUnknown() // todo: should be able to reuse if set in structure 73 | 74 | for _, s := range m.formatter.Keys() { 75 | fieldName := s 76 | 77 | // get original name from structure 78 | if fieldNameMap, ok := m.formatter.Remap(fieldName); ok && fieldNameMap != "" { 79 | fieldName = fieldNameMap 80 | } 81 | 82 | fieldValue := vfm.FieldByName(fieldName) 83 | // if does not exist from structure, 84 | // fetch from unknown (only numbered) fields 85 | // that were parsed above 86 | 87 | if !fieldValue.IsValid() { 88 | if unkField, ok := unkMap[s]; ok { 89 | fieldValue = reflect.ValueOf(unkField) 90 | } else { 91 | continue 92 | } 93 | } 94 | h.Write([]byte(fmt.Sprintf("%v", fieldValue.Interface()))) 95 | } 96 | } 97 | 98 | func (m *ProtoProducerMessage) Key() []byte { 99 | if len(m.formatter.Keys()) == 0 { 100 | return nil 101 | } 102 | h := fnv.New32() 103 | m.baseKey(h) 104 | return h.Sum(nil) 105 | } 106 | 107 | func (m *ProtoProducerMessage) MarshalJSON() ([]byte, error) { 108 | return []byte(m.FormatMessageReflectJSON("")), nil 109 | } 110 | 111 | func (m *ProtoProducerMessage) FormatMessageReflectText(ext string) string { 112 | return m.FormatMessageReflectCustom(ext, "", " ", "=", false) 113 | } 114 | 115 | func (m *ProtoProducerMessage) FormatMessageReflectJSON(ext string) string { 116 | return fmt.Sprintf("{%s}", m.FormatMessageReflectCustom(ext, "\"", ",", ":", true)) 117 | } 118 | 119 | func ExtractTag(name, original string, tag reflect.StructTag) string { 120 | lookup, ok := tag.Lookup(name) 121 | if !ok { 122 | return original 123 | } 124 | before, _, _ := strings.Cut(lookup, ",") 125 | return before 126 | } 127 | 128 | func (m *ProtoProducerMessage) mapUnknown() map[string]interface{} { 129 | unkMap := make(map[string]interface{}) 130 | 131 | fmr := m.ProtoReflect() 132 | unk := fmr.GetUnknown() 133 | var offset int 134 | for offset < len(unk) { 135 | num, dataType, length := protowire.ConsumeTag(unk[offset:]) 136 | offset += length 137 | length = protowire.ConsumeFieldValue(num, dataType, unk[offset:]) 138 | data := unk[offset : offset+length] 139 | offset += length 140 | 141 | // we check if the index is listed in the config 142 | if pbField, ok := m.formatter.NumToProtobuf(int32(num)); ok { 143 | 144 | var dest interface{} 145 | var value interface{} 146 | if dataType == protowire.VarintType { 147 | v, _ := protowire.ConsumeVarint(data) 148 | value = v 149 | } else if dataType == protowire.BytesType { 150 | v, _ := protowire.ConsumeString(data) 151 | //value = hex.EncodeToString([]byte(v)) // removed, this conversion is left to the renderer 152 | value = []byte(v) 153 | } else { 154 | continue 155 | } 156 | if pbField.Array { 157 | var destSlice []interface{} 158 | if dest, ok := unkMap[pbField.Name]; !ok { 159 | destSlice = make([]interface{}, 0) 160 | } else { 161 | destSlice = dest.([]interface{}) 162 | } 163 | destSlice = append(destSlice, value) 164 | dest = destSlice 165 | } else { 166 | dest = value 167 | } 168 | 169 | unkMap[pbField.Name] = dest 170 | 171 | } 172 | } 173 | return unkMap 174 | } 175 | 176 | func (m *ProtoProducerMessage) FormatMessageReflectCustom(ext, quotes, sep, sign string, null bool) string { 177 | vfm := reflect.ValueOf(m) 178 | vfm = reflect.Indirect(vfm) 179 | 180 | var i int 181 | fields := m.formatter.Fields() 182 | fstr := make([]string, len(fields)) // todo: reuse with pool 183 | 184 | unkMap := m.mapUnknown() 185 | 186 | // iterate through the fields requested by the user 187 | for _, s := range fields { 188 | fieldName := s 189 | 190 | fieldFinalName := s 191 | if fieldRename, ok := m.formatter.Rename(s); ok && fieldRename != "" { 192 | fieldFinalName = fieldRename 193 | } 194 | 195 | // get original name from structure 196 | if fieldNameMap, ok := m.formatter.Remap(fieldName); ok && fieldNameMap != "" { 197 | fieldName = fieldNameMap 198 | } 199 | 200 | // get renderer 201 | renderer, okRenderer := m.formatter.Render(fieldName) 202 | if !okRenderer { // todo: change to renderer check 203 | renderer = NilRenderer 204 | } 205 | 206 | fieldValue := vfm.FieldByName(fieldName) 207 | // if does not exist from structure, 208 | // fetch from unknown (only numbered) fields 209 | // that were parsed above 210 | 211 | if !fieldValue.IsValid() { 212 | if unkField, ok := unkMap[s]; ok { 213 | fieldValue = reflect.ValueOf(unkField) 214 | } else if !okRenderer { // not a virtual field 215 | continue 216 | } 217 | } 218 | 219 | isSlice := m.formatter.IsArray(fieldName) 220 | 221 | // render each item of the array independently 222 | // note: isSlice is necessary to consider certain byte arrays in their entirety 223 | // eg: IP addresses 224 | if isSlice { 225 | v := "[" 226 | 227 | if fieldValue.IsValid() { 228 | 229 | c := fieldValue.Len() 230 | for i := 0; i < c; i++ { 231 | fieldValueI := fieldValue.Index(i) 232 | var val interface{} 233 | if fieldValueI.IsValid() { 234 | val = fieldValueI.Interface() 235 | } 236 | 237 | rendered := renderer(m, fieldName, val) 238 | if rendered == nil { 239 | continue 240 | } 241 | renderedType := reflect.TypeOf(rendered) 242 | if renderedType.Kind() == reflect.String { 243 | v += fmt.Sprintf("%s%v%s", quotes, rendered, quotes) 244 | } else { 245 | v += fmt.Sprintf("%v", rendered) 246 | } 247 | 248 | if i < c-1 { 249 | v += "," 250 | } 251 | } 252 | } 253 | v += "]" 254 | fstr[i] = fmt.Sprintf("%s%s%s%s%s", quotes, fieldFinalName, quotes, sign, v) 255 | } else { 256 | var val interface{} 257 | if fieldValue.IsValid() { 258 | val = fieldValue.Interface() 259 | } 260 | 261 | rendered := renderer(m, fieldName, val) 262 | if rendered == nil { 263 | continue 264 | } 265 | renderedType := reflect.TypeOf(rendered) 266 | if renderedType.Kind() == reflect.String { 267 | fstr[i] = fmt.Sprintf("%s%s%s%s%s%v%s", quotes, fieldFinalName, quotes, sign, quotes, rendered, quotes) 268 | } else { 269 | fstr[i] = fmt.Sprintf("%s%s%s%s%v", quotes, fieldFinalName, quotes, sign, rendered) 270 | } 271 | } 272 | i++ 273 | 274 | } 275 | fstr = fstr[0:i] 276 | 277 | return strings.Join(fstr, sep) 278 | } 279 | -------------------------------------------------------------------------------- /producer/proto/messages_test.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "testing" 5 | 6 | "google.golang.org/protobuf/encoding/protowire" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestMarshalJSON(t *testing.T) { 12 | var m ProtoProducerMessage 13 | 14 | m.formatter = &FormatterConfigMapper{ 15 | fields: []string{"Etype", "test1", "test2", "test3"}, 16 | rename: map[string]string{ 17 | "Etype": "etype", 18 | }, 19 | numToPb: map[int32]ProtobufFormatterConfig{ 20 | 100: ProtobufFormatterConfig{ 21 | Name: "test1", 22 | Index: 100, 23 | Type: "varint", 24 | Array: false, 25 | }, 26 | 101: ProtobufFormatterConfig{ 27 | Name: "test2", 28 | Index: 101, 29 | Type: "string", 30 | Array: false, 31 | }, 32 | 102: ProtobufFormatterConfig{ 33 | Name: "test3", 34 | Index: 102, 35 | Type: "bytes", 36 | Array: false, 37 | }, 38 | }, 39 | render: map[string]RenderFunc{ 40 | "Etype": EtypeRenderer, 41 | "test1": EtypeRenderer, 42 | "test2": NilRenderer, 43 | "test3": StringRenderer, 44 | }, 45 | } 46 | 47 | m.FlowMessage.Etype = 0x86dd 48 | 49 | fmr := m.FlowMessage.ProtoReflect() 50 | unk := fmr.GetUnknown() 51 | 52 | unk = protowire.AppendTag(unk, protowire.Number(100), protowire.VarintType) 53 | unk = protowire.AppendVarint(unk, 0x86dd) 54 | 55 | unk = protowire.AppendTag(unk, protowire.Number(101), protowire.BytesType) 56 | unk = protowire.AppendString(unk, string("testing")) 57 | 58 | unk = protowire.AppendTag(unk, protowire.Number(102), protowire.BytesType) 59 | unk = protowire.AppendString(unk, string([]byte{0x74, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67})) 60 | 61 | fmr.SetUnknown(unk) 62 | 63 | out, err := m.MarshalJSON() 64 | assert.Nil(t, err) 65 | t.Log(string(out)) 66 | assert.Equal(t, "{\"etype\":\"IPv6\",\"test1\":\"IPv6\",\"test2\":\"74657374696e67\",\"test3\":\"testing\"}", string(out)) 67 | } 68 | -------------------------------------------------------------------------------- /producer/proto/producer_nflegacy.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "encoding/binary" 5 | 6 | "github.com/netsampler/goflow2/v2/decoders/netflowlegacy" 7 | flowmessage "github.com/netsampler/goflow2/v2/pb" 8 | "github.com/netsampler/goflow2/v2/producer" 9 | ) 10 | 11 | func ConvertNetFlowLegacyRecord(flowMessage *ProtoProducerMessage, baseTime uint64, uptime uint32, record netflowlegacy.RecordsNetFlowV5) { 12 | flowMessage.Type = flowmessage.FlowMessage_NETFLOW_V5 13 | 14 | timeDiffFirst := (uptime - record.First) 15 | timeDiffLast := (uptime - record.Last) 16 | flowMessage.TimeFlowStartNs = baseTime - uint64(timeDiffFirst)*1000000 17 | flowMessage.TimeFlowEndNs = baseTime - uint64(timeDiffLast)*1000000 18 | 19 | v := make([]byte, 4) 20 | binary.BigEndian.PutUint32(v, uint32(record.NextHop)) 21 | flowMessage.NextHop = v 22 | v = make([]byte, 4) 23 | binary.BigEndian.PutUint32(v, uint32(record.SrcAddr)) 24 | flowMessage.SrcAddr = v 25 | v = make([]byte, 4) 26 | binary.BigEndian.PutUint32(v, uint32(record.DstAddr)) 27 | flowMessage.DstAddr = v 28 | 29 | flowMessage.Etype = 0x800 30 | flowMessage.SrcAs = uint32(record.SrcAS) 31 | flowMessage.DstAs = uint32(record.DstAS) 32 | flowMessage.SrcNet = uint32(record.SrcMask) 33 | flowMessage.DstNet = uint32(record.DstMask) 34 | flowMessage.Proto = uint32(record.Proto) 35 | flowMessage.TcpFlags = uint32(record.TCPFlags) 36 | flowMessage.IpTos = uint32(record.Tos) 37 | flowMessage.InIf = uint32(record.Input) 38 | flowMessage.OutIf = uint32(record.Output) 39 | flowMessage.SrcPort = uint32(record.SrcPort) 40 | flowMessage.DstPort = uint32(record.DstPort) 41 | flowMessage.Packets = uint64(record.DPkts) 42 | flowMessage.Bytes = uint64(record.DOctets) 43 | } 44 | 45 | func SearchNetFlowLegacyRecords(baseTime uint64, uptime uint32, dataRecords []netflowlegacy.RecordsNetFlowV5) (flowMessageSet []producer.ProducerMessage) { 46 | for _, record := range dataRecords { 47 | fmsg := protoMessagePool.Get().(*ProtoProducerMessage) 48 | fmsg.Reset() 49 | ConvertNetFlowLegacyRecord(fmsg, baseTime, uptime, record) 50 | flowMessageSet = append(flowMessageSet, fmsg) 51 | } 52 | return flowMessageSet 53 | } 54 | 55 | func ProcessMessageNetFlowLegacy(packet *netflowlegacy.PacketNetFlowV5) ([]producer.ProducerMessage, error) { 56 | seqnum := packet.FlowSequence 57 | samplingRate := packet.SamplingInterval & 0x3FFF 58 | baseTime := uint64(packet.UnixSecs)*1000000000 + uint64(packet.UnixNSecs) 59 | uptime := packet.SysUptime 60 | 61 | flowMessageSet := SearchNetFlowLegacyRecords(baseTime, uptime, packet.Records) 62 | for _, msg := range flowMessageSet { 63 | fmsg, ok := msg.(*ProtoProducerMessage) 64 | if !ok { 65 | continue 66 | } 67 | fmsg.SequenceNum = seqnum 68 | fmsg.SamplingRate = uint64(samplingRate) 69 | } 70 | 71 | return flowMessageSet, nil 72 | } 73 | -------------------------------------------------------------------------------- /producer/proto/producer_sf.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "github.com/netsampler/goflow2/v2/decoders/sflow" 5 | flowmessage "github.com/netsampler/goflow2/v2/pb" 6 | "github.com/netsampler/goflow2/v2/producer" 7 | ) 8 | 9 | func GetSFlowFlowSamples(packet *sflow.Packet) []interface{} { 10 | var flowSamples []interface{} 11 | for _, sample := range packet.Samples { 12 | switch sample.(type) { 13 | case sflow.FlowSample: 14 | flowSamples = append(flowSamples, sample) 15 | case sflow.ExpandedFlowSample: 16 | flowSamples = append(flowSamples, sample) 17 | } 18 | } 19 | return flowSamples 20 | } 21 | 22 | func ParseSampledHeader(flowMessage *ProtoProducerMessage, sampledHeader *sflow.SampledHeader) error { 23 | return ParseSampledHeaderConfig(flowMessage, sampledHeader, nil) 24 | } 25 | 26 | func ParseSampledHeaderConfig(flowMessage *ProtoProducerMessage, sampledHeader *sflow.SampledHeader, config PacketMapper) error { 27 | data := (*sampledHeader).HeaderData 28 | switch (*sampledHeader).Protocol { 29 | case 1: // Ethernet 30 | if config == nil { 31 | config = DefaultEnvironment 32 | } 33 | 34 | if err := config.ParsePacket(flowMessage, data); err != nil { 35 | return err 36 | } 37 | } 38 | return nil 39 | } 40 | 41 | func SearchSFlowSampleConfig(flowMessage *ProtoProducerMessage, flowSample interface{}, config PacketMapper) error { 42 | var records []sflow.FlowRecord 43 | flowMessage.Type = flowmessage.FlowMessage_SFLOW_5 44 | 45 | switch flowSample := flowSample.(type) { 46 | case sflow.FlowSample: 47 | records = flowSample.Records 48 | flowMessage.SamplingRate = uint64(flowSample.SamplingRate) 49 | flowMessage.InIf = flowSample.Input 50 | flowMessage.OutIf = flowSample.Output 51 | case sflow.ExpandedFlowSample: 52 | records = flowSample.Records 53 | flowMessage.SamplingRate = uint64(flowSample.SamplingRate) 54 | flowMessage.InIf = flowSample.InputIfValue 55 | flowMessage.OutIf = flowSample.OutputIfValue 56 | } 57 | 58 | var ipNh, ipSrc, ipDst []byte 59 | flowMessage.Packets = 1 60 | for _, record := range records { 61 | switch recordData := record.Data.(type) { 62 | case sflow.SampledHeader: 63 | flowMessage.Bytes = uint64(recordData.FrameLength) 64 | if err := ParseSampledHeaderConfig(flowMessage, &recordData, config); err != nil { // todo: make function configurable 65 | return err 66 | } 67 | case sflow.SampledIPv4: 68 | ipSrc = recordData.SrcIP 69 | ipDst = recordData.DstIP 70 | flowMessage.SrcAddr = ipSrc 71 | flowMessage.DstAddr = ipDst 72 | flowMessage.Bytes = uint64(recordData.Length) 73 | flowMessage.Proto = recordData.Protocol 74 | flowMessage.SrcPort = recordData.SrcPort 75 | flowMessage.DstPort = recordData.DstPort 76 | flowMessage.IpTos = recordData.Tos 77 | flowMessage.Etype = 0x800 78 | case sflow.SampledIPv6: 79 | ipSrc = recordData.SrcIP 80 | ipDst = recordData.DstIP 81 | flowMessage.SrcAddr = ipSrc 82 | flowMessage.DstAddr = ipDst 83 | flowMessage.Bytes = uint64(recordData.Length) 84 | flowMessage.Proto = recordData.Protocol 85 | flowMessage.SrcPort = recordData.SrcPort 86 | flowMessage.DstPort = recordData.DstPort 87 | flowMessage.IpTos = recordData.Priority 88 | flowMessage.Etype = 0x86dd 89 | case sflow.ExtendedRouter: 90 | ipNh = recordData.NextHop 91 | flowMessage.NextHop = ipNh 92 | flowMessage.SrcNet = recordData.SrcMaskLen 93 | flowMessage.DstNet = recordData.DstMaskLen 94 | case sflow.ExtendedGateway: 95 | ipNh = recordData.NextHop 96 | flowMessage.BgpNextHop = ipNh 97 | flowMessage.BgpCommunities = recordData.Communities 98 | flowMessage.AsPath = recordData.ASPath 99 | if len(recordData.ASPath) > 0 { 100 | flowMessage.DstAs = recordData.ASPath[len(recordData.ASPath)-1] 101 | flowMessage.NextHopAs = recordData.ASPath[0] 102 | } else { 103 | flowMessage.DstAs = recordData.AS 104 | } 105 | if recordData.SrcAS > 0 { 106 | flowMessage.SrcAs = recordData.SrcAS 107 | } else { 108 | flowMessage.SrcAs = recordData.AS 109 | } 110 | case sflow.ExtendedSwitch: 111 | flowMessage.SrcVlan = recordData.SrcVlan 112 | flowMessage.DstVlan = recordData.DstVlan 113 | } 114 | } 115 | return nil 116 | 117 | } 118 | 119 | func SearchSFlowSamplesConfig(samples []interface{}, config PacketMapper) (flowMessageSet []producer.ProducerMessage, err error) { 120 | for _, flowSample := range samples { 121 | fmsg := protoMessagePool.Get().(*ProtoProducerMessage) 122 | fmsg.Reset() 123 | if err := SearchSFlowSampleConfig(fmsg, flowSample, config); err != nil { 124 | return nil, err 125 | } 126 | flowMessageSet = append(flowMessageSet, fmsg) 127 | } 128 | return flowMessageSet, nil 129 | } 130 | 131 | // Converts an sFlow message 132 | func ProcessMessageSFlowConfig(packet *sflow.Packet, config ProtoProducerConfig) (flowMessageSet []producer.ProducerMessage, err error) { 133 | seqnum := packet.SequenceNumber 134 | agent := packet.AgentIP 135 | 136 | var cfgSFlow PacketMapper 137 | if config != nil { 138 | cfgSFlow = config.GetPacketMapper() 139 | } 140 | 141 | flowSamples := GetSFlowFlowSamples(packet) 142 | flowMessageSet, err = SearchSFlowSamplesConfig(flowSamples, cfgSFlow) 143 | if err != nil { 144 | return flowMessageSet, err 145 | } 146 | for _, msg := range flowMessageSet { 147 | fmsg, ok := msg.(*ProtoProducerMessage) 148 | if !ok { 149 | continue 150 | } 151 | fmsg.SamplerAddress = agent 152 | fmsg.SequenceNum = seqnum 153 | } 154 | 155 | return flowMessageSet, nil 156 | } 157 | -------------------------------------------------------------------------------- /producer/proto/producer_test.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/netsampler/goflow2/v2/decoders/netflow" 7 | "github.com/netsampler/goflow2/v2/decoders/sflow" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestProcessMessageNetFlow(t *testing.T) { 12 | records := []netflow.DataRecord{ 13 | netflow.DataRecord{ 14 | Values: []netflow.DataField{ 15 | netflow.DataField{ 16 | Type: netflow.NFV9_FIELD_IPV4_SRC_ADDR, 17 | Value: []byte{10, 0, 0, 1}, 18 | }, 19 | netflow.DataField{ 20 | Type: netflow.NFV9_FIELD_FIRST_SWITCHED, 21 | // 218432176 22 | Value: []byte{0x0d, 0x05, 0x02, 0xb0}, 23 | }, 24 | netflow.DataField{ 25 | Type: netflow.NFV9_FIELD_LAST_SWITCHED, 26 | // 218432192 27 | Value: []byte{0x0d, 0x05, 0x02, 0xc0}, 28 | }, 29 | netflow.DataField{ 30 | Type: netflow.NFV9_FIELD_MPLS_LABEL_1, 31 | // 24041 32 | Value: []byte{0x05, 0xde, 0x94}, 33 | }, 34 | netflow.DataField{ 35 | Type: netflow.NFV9_FIELD_MPLS_LABEL_2, 36 | // 211992 37 | Value: []byte{0x33, 0xc1, 0x85}, 38 | }, 39 | netflow.DataField{ 40 | Type: netflow.NFV9_FIELD_MPLS_LABEL_3, 41 | // 48675 42 | Value: []byte{0x0b, 0xe2, 0x35}, 43 | }, 44 | }, 45 | }, 46 | } 47 | dfs := []interface{}{ 48 | netflow.DataFlowSet{ 49 | Records: records, 50 | }, 51 | } 52 | 53 | pktnf9 := netflow.NFv9Packet{ 54 | SystemUptime: 218432000, 55 | UnixSeconds: 1705732882, 56 | FlowSets: dfs, 57 | } 58 | testsr := &SingleSamplingRateSystem{1} 59 | msgs, err := ProcessMessageNetFlowV9Config(&pktnf9, testsr, nil) 60 | if assert.Nil(t, err) && assert.Len(t, msgs, 1) { 61 | msg, ok := msgs[0].(*ProtoProducerMessage) 62 | if assert.True(t, ok) { 63 | assert.Equal(t, uint64(1705732882176*1e6), msg.TimeFlowStartNs) 64 | assert.Equal(t, uint64(1705732882192*1e6), msg.TimeFlowEndNs) 65 | assert.Equal(t, []uint32{24041, 211992, 48675}, msg.MplsLabel) 66 | } 67 | } 68 | 69 | pktipfix := netflow.IPFIXPacket{ 70 | FlowSets: dfs, 71 | } 72 | _, err = ProcessMessageIPFIXConfig(&pktipfix, testsr, nil) 73 | assert.Nil(t, err) 74 | } 75 | 76 | func TestProcessMessageSFlow(t *testing.T) { 77 | sh := sflow.SampledHeader{ 78 | FrameLength: 10, 79 | Protocol: 1, 80 | HeaderData: []byte{ 81 | 0xff, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xff, 0xab, 0xcd, 0xef, 0xab, 0xbc, 0x86, 0xdd, 0x60, 0x2e, 82 | 0xc4, 0xec, 0x01, 0xcc, 0x06, 0x40, 0xfd, 0x01, 0x00, 0x00, 0xff, 0x01, 0x82, 0x10, 0xcd, 0xff, 83 | 0xff, 0x1c, 0x00, 0x00, 0x01, 0x50, 0xfd, 0x01, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x02, 0xff, 84 | 0xff, 0x93, 0x00, 0x00, 0x02, 0x46, 0xcf, 0xca, 0x00, 0x50, 0x05, 0x15, 0x21, 0x6f, 0xa4, 0x9c, 85 | 0xf4, 0x59, 0x80, 0x18, 0x08, 0x09, 0x8c, 0x86, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x2a, 0x85, 86 | 0xee, 0x9e, 0x64, 0x5c, 0x27, 0x28, 87 | }, 88 | } 89 | pkt := sflow.Packet{ 90 | Version: 5, 91 | Samples: []interface{}{ 92 | sflow.FlowSample{ 93 | SamplingRate: 1, 94 | Records: []sflow.FlowRecord{ 95 | sflow.FlowRecord{ 96 | Data: sh, 97 | }, 98 | }, 99 | }, 100 | sflow.ExpandedFlowSample{ 101 | SamplingRate: 1, 102 | Records: []sflow.FlowRecord{ 103 | sflow.FlowRecord{ 104 | Data: sh, 105 | }, 106 | }, 107 | }, 108 | }, 109 | } 110 | _, err := ProcessMessageSFlowConfig(&pkt, nil) 111 | assert.Nil(t, err) 112 | } 113 | 114 | func TestExpandedSFlowDecode(t *testing.T) { 115 | flowMessages, err := ProcessMessageSFlowConfig(getSflowPacket(), nil) 116 | flowMessageIf := flowMessages[0] 117 | flowMessage := flowMessageIf.(*ProtoProducerMessage) 118 | 119 | assert.Nil(t, err) 120 | 121 | assert.Equal(t, []byte{0x05, 0x05, 0x05, 0x05}, flowMessage.BgpNextHop) 122 | assert.Equal(t, []uint32{3936619448, 3936619708, 3936623548}, flowMessage.BgpCommunities) 123 | assert.Equal(t, []uint32{456}, flowMessage.AsPath) 124 | assert.Equal(t, []byte{0x09, 0x09, 0x09, 0x09}, flowMessage.NextHop) 125 | } 126 | 127 | func getSflowPacket() *sflow.Packet { 128 | pkt := sflow.Packet{ 129 | Version: 5, 130 | IPVersion: 1, 131 | AgentIP: []uint8{1, 2, 3, 4}, 132 | SubAgentId: 0, 133 | SequenceNumber: 3178205882, 134 | Uptime: 3011091704, 135 | SamplesCount: 1, 136 | Samples: []interface{}{ 137 | sflow.FlowSample{ 138 | Header: sflow.SampleHeader{ 139 | Format: 1, 140 | Length: 662, 141 | SampleSequenceNumber: 2757962272, 142 | SourceIdType: 0, 143 | SourceIdValue: 1000100, 144 | }, 145 | SamplingRate: 16383, 146 | SamplePool: 639948256, 147 | Drops: 0, 148 | Input: 1000100, 149 | Output: 1000005, 150 | FlowRecordsCount: 4, 151 | Records: []sflow.FlowRecord{ 152 | sflow.FlowRecord{ 153 | Header: sflow.RecordHeader{ 154 | DataFormat: 1001, 155 | Length: 16, 156 | }, 157 | Data: sflow.ExtendedSwitch{ 158 | SrcVlan: 952, 159 | SrcPriority: 0, 160 | DstVlan: 952, 161 | DstPriority: 0, 162 | }, 163 | }, 164 | sflow.FlowRecord{ 165 | Header: sflow.RecordHeader{ 166 | DataFormat: 1, 167 | Length: 144, 168 | }, 169 | Data: sflow.SampledHeader{ 170 | Protocol: 1, 171 | FrameLength: 1522, 172 | Stripped: 4, 173 | OriginalLength: 128, 174 | HeaderData: []byte{ 175 | 0x74, 0x83, 0xef, 0x2e, 0xc3, 0xc5, 0xac, 0x1f, 0x6b, 0x2c, 0x43, 0x36, 0x81, 0x00, 0x03, 0xb8, 176 | 0x08, 0x00, 0x45, 0x00, 0x05, 0xdc, 0x59, 0xa5, 0x40, 0x00, 0x40, 0x06, 0x0a, 0xb8, 0xb9, 0x3b, 177 | 0xdf, 0xb6, 0x32, 0x44, 0x05, 0x89, 0x23, 0x78, 0xc9, 0x06, 0x24, 0x6c, 0x0b, 0xf4, 0xd9, 0xce, 178 | 0x9c, 0x66, 0x50, 0x10, 0x00, 0x1e, 0x29, 0x8a, 0x00, 0x00, 0xb4, 0x7e, 0xb7, 0xfd, 0x16, 0x3e, 179 | 0x19, 0x97, 0xa8, 0xb4, 0x2a, 0xf7, 0x49, 0x96, 0xf4, 0x0e, 0xef, 0xa7, 0x55, 0x93, 0x27, 0x6f, 180 | 0x1e, 0x20, 0xe1, 0x04, 0x2f, 0x36, 0x18, 0xfe, 0x7b, 0x88, 0x1f, 0xc9, 0x57, 0xbc, 0x71, 0x43, 181 | 0x3d, 0x1c, 0x6c, 0xb0, 0x3d, 0xf7, 0x51, 0x48, 0x68, 0x94, 0x47, 0x00, 0xd3, 0x1a, 0x9d, 0xdb, 182 | 0x2f, 0x1e, 0x39, 0xcf, 0xfd, 0x96, 0x79, 0xdf, 0xb0, 0x2d, 0x02, 0x6e, 0x72, 0xf5, 0x29, 0x73, 183 | }, 184 | }, 185 | }, 186 | sflow.FlowRecord{ 187 | Header: sflow.RecordHeader{ 188 | DataFormat: 1003, 189 | Length: 56, 190 | }, 191 | Data: sflow.ExtendedGateway{ 192 | NextHopIPVersion: 1, 193 | NextHop: []uint8{5, 5, 5, 5}, 194 | AS: 123, 195 | SrcAS: 0, 196 | SrcPeerAS: 0, 197 | ASDestinations: 1, 198 | ASPathType: 2, 199 | ASPathLength: 1, 200 | ASPath: []uint32{456}, 201 | CommunitiesLength: 3, 202 | Communities: []uint32{ 203 | 3936619448, 204 | 3936619708, 205 | 3936623548, 206 | }, 207 | LocalPref: 170, 208 | }, 209 | }, 210 | sflow.FlowRecord{ 211 | Header: sflow.RecordHeader{ 212 | DataFormat: 1002, 213 | Length: 16, 214 | }, 215 | Data: sflow.ExtendedRouter{ 216 | NextHopIPVersion: 1, 217 | NextHop: []uint8{9, 9, 9, 9}, 218 | SrcMaskLen: 26, 219 | DstMaskLen: 22, 220 | }, 221 | }, 222 | }, 223 | }, 224 | }, 225 | } 226 | return &pkt 227 | } 228 | 229 | func TestNetFlowV9Time(t *testing.T) { 230 | // This test ensures the NetFlow v9 timestamps are properly calculated. 231 | // It passes a baseTime = 2024-01-01 00:00:00 (in seconds) and an uptime of 2 seconds (in milliseconds). 232 | // The flow record was logged at 1 second of uptime (in milliseconds). 233 | // The calculation is the following: baseTime - uptime + flowUptime. 234 | var flowMessage ProtoProducerMessage 235 | err := ConvertNetFlowDataSet(&flowMessage, 9, 1704067200, 2000, []netflow.DataField{ 236 | netflow.DataField{ 237 | Type: netflow.NFV9_FIELD_FIRST_SWITCHED, 238 | Value: []byte{0x0, 0x0, 0x03, 0xe8}, // 1000 239 | }, 240 | }, nil, nil) 241 | assert.Nil(t, err) 242 | assert.Equal(t, uint64(1704067199)*1e9, flowMessage.TimeFlowStartNs) 243 | } 244 | -------------------------------------------------------------------------------- /producer/proto/proto.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/netsampler/goflow2/v2/decoders/netflow" 8 | "github.com/netsampler/goflow2/v2/decoders/netflowlegacy" 9 | "github.com/netsampler/goflow2/v2/decoders/sflow" 10 | "github.com/netsampler/goflow2/v2/producer" 11 | ) 12 | 13 | type ProtoProducer struct { 14 | cfg ProtoProducerConfig 15 | samplinglock *sync.RWMutex 16 | sampling map[string]SamplingRateSystem 17 | samplingRateSystem func() SamplingRateSystem 18 | } 19 | 20 | func (p *ProtoProducer) enrich(flowMessageSet []producer.ProducerMessage, cb func(msg *ProtoProducerMessage)) { 21 | for _, msg := range flowMessageSet { 22 | fmsg, ok := msg.(*ProtoProducerMessage) 23 | if !ok { 24 | continue 25 | } 26 | cb(fmsg) 27 | } 28 | } 29 | 30 | func (p *ProtoProducer) getSamplingRateSystem(args *producer.ProduceArgs) SamplingRateSystem { 31 | key := args.Src.Addr().String() 32 | p.samplinglock.RLock() 33 | sampling, ok := p.sampling[key] 34 | p.samplinglock.RUnlock() 35 | if !ok { 36 | sampling = p.samplingRateSystem() 37 | p.samplinglock.Lock() 38 | p.sampling[key] = sampling 39 | p.samplinglock.Unlock() 40 | } 41 | 42 | return sampling 43 | } 44 | 45 | func (p *ProtoProducer) Produce(msg interface{}, args *producer.ProduceArgs) (flowMessageSet []producer.ProducerMessage, err error) { 46 | tr := uint64(args.TimeReceived.UnixNano()) 47 | sa, _ := args.SamplerAddress.Unmap().MarshalBinary() 48 | switch msgConv := msg.(type) { 49 | case *netflowlegacy.PacketNetFlowV5: 50 | flowMessageSet, err = ProcessMessageNetFlowLegacy(msgConv) 51 | 52 | p.enrich(flowMessageSet, func(fmsg *ProtoProducerMessage) { 53 | fmsg.TimeReceivedNs = tr 54 | fmsg.SamplerAddress = sa 55 | }) 56 | case *netflow.NFv9Packet: 57 | samplingRateSystem := p.getSamplingRateSystem(args) 58 | flowMessageSet, err = ProcessMessageNetFlowV9Config(msgConv, samplingRateSystem, p.cfg) 59 | 60 | p.enrich(flowMessageSet, func(fmsg *ProtoProducerMessage) { 61 | fmsg.TimeReceivedNs = tr 62 | fmsg.SamplerAddress = sa 63 | }) 64 | case *netflow.IPFIXPacket: 65 | samplingRateSystem := p.getSamplingRateSystem(args) 66 | flowMessageSet, err = ProcessMessageIPFIXConfig(msgConv, samplingRateSystem, p.cfg) 67 | 68 | p.enrich(flowMessageSet, func(fmsg *ProtoProducerMessage) { 69 | fmsg.TimeReceivedNs = tr 70 | fmsg.SamplerAddress = sa 71 | }) 72 | case *sflow.Packet: 73 | flowMessageSet, err = ProcessMessageSFlowConfig(msgConv, p.cfg) 74 | 75 | p.enrich(flowMessageSet, func(fmsg *ProtoProducerMessage) { 76 | fmsg.TimeReceivedNs = tr 77 | fmsg.TimeFlowStartNs = tr 78 | fmsg.TimeFlowEndNs = tr 79 | }) 80 | default: 81 | return flowMessageSet, fmt.Errorf("flow not recognized") 82 | } 83 | 84 | p.enrich(flowMessageSet, func(fmsg *ProtoProducerMessage) { 85 | fmsg.formatter = p.cfg.GetFormatter() 86 | }) 87 | return flowMessageSet, err 88 | } 89 | 90 | func (p *ProtoProducer) Commit(flowMessageSet []producer.ProducerMessage) { 91 | for _, fmsg := range flowMessageSet { 92 | protoMessagePool.Put(fmsg) 93 | } 94 | } 95 | 96 | func (p *ProtoProducer) Close() {} 97 | 98 | func CreateProtoProducer(cfg ProtoProducerConfig, samplingRateSystem func() SamplingRateSystem) (producer.ProducerInterface, error) { 99 | return &ProtoProducer{ 100 | cfg: cfg, 101 | samplinglock: &sync.RWMutex{}, 102 | sampling: make(map[string]SamplingRateSystem), 103 | samplingRateSystem: samplingRateSystem, 104 | }, nil 105 | } 106 | -------------------------------------------------------------------------------- /producer/proto/reflect.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | 7 | "google.golang.org/protobuf/encoding/protowire" 8 | 9 | "github.com/netsampler/goflow2/v2/decoders/netflow" 10 | ) 11 | 12 | // Using a data slice, returns a chunk corresponding 13 | func GetBytes(d []byte, offset, length int, shift bool) []byte { 14 | 15 | /* 16 | 17 | Example with an offset of 4 and length of 6 18 | 19 | initial data: 20 | 0xAA 0x55 21 | 1010 1010.0101 0101 22 | ^--- -^ 23 | 24 | with shift 25 | 0x29 26 | 0010 1001 27 | 28 | without shift (bitwise AND) 29 | 0xa4 30 | 1010 0100 31 | 32 | 33 | */ 34 | 35 | if len(d)*8 < offset { 36 | return nil 37 | } 38 | if length == 0 { 39 | return nil 40 | } 41 | 42 | shiftSize := offset % 8 // how much to shift to the left due to offset 43 | shiftRightSize := length % 8 // for final step 44 | 45 | start := offset / 8 46 | end := (offset + length) / 8 47 | if (offset+length)%8 > 0 { 48 | end += 1 49 | } 50 | 51 | lengthB := length / 8 52 | if shiftRightSize > 0 { 53 | lengthB += 1 54 | } 55 | 56 | missing := end - len(d) // calculate how many missing bytes 57 | if missing > 0 { 58 | end = len(d) 59 | } 60 | 61 | dUsed := d[start:end] 62 | 63 | if shiftSize == 0 && length%8 == 0 { // simple case 64 | if missing > 0 { 65 | dFinal := make([]byte, lengthB) 66 | 67 | copy(dFinal, dUsed) 68 | return dFinal 69 | } 70 | return dUsed 71 | } 72 | 73 | dFinal := make([]byte, lengthB) 74 | 75 | // first pass, apply offset 76 | for i := range dFinal { 77 | if i >= len(dUsed) { 78 | break 79 | } 80 | left := dUsed[i] << shiftSize 81 | 82 | dFinal[i] = left 83 | if i+1 >= len(dUsed) { 84 | break 85 | } 86 | right := dUsed[i+1] >> (8 - shiftSize) 87 | dFinal[i] |= right 88 | } 89 | 90 | // final pass 91 | if shift { 92 | dFinal[len(dFinal)-1] >>= (8 - shiftRightSize) % 8 93 | } else { 94 | dFinal[len(dFinal)-1] &= (0xFF << ((8 - shiftRightSize) % 8)) 95 | } 96 | 97 | return dFinal 98 | } 99 | 100 | func IsUInt(k reflect.Kind) bool { 101 | return k == reflect.Uint8 || k == reflect.Uint16 || k == reflect.Uint32 || k == reflect.Uint64 102 | } 103 | 104 | func IsInt(k reflect.Kind) bool { 105 | return k == reflect.Int8 || k == reflect.Int16 || k == reflect.Int32 || k == reflect.Int64 106 | } 107 | 108 | func MapCustomNetFlow(flowMessage *ProtoProducerMessage, df netflow.DataField, mapper TemplateMapper) error { 109 | if mapper == nil { 110 | return nil 111 | } 112 | mapped, ok := mapper.Map(df) 113 | if ok { 114 | v := df.Value.([]byte) 115 | if err := MapCustom(flowMessage, v, mapped); err != nil { 116 | return err 117 | } 118 | } 119 | return nil 120 | } 121 | 122 | func MapCustom(flowMessage *ProtoProducerMessage, v []byte, cfg MappableField) error { 123 | vfm := reflect.ValueOf(flowMessage) 124 | vfm = reflect.Indirect(vfm) 125 | 126 | fieldValue := vfm.FieldByName(cfg.GetDestination()) 127 | 128 | if fieldValue.IsValid() { 129 | typeDest := fieldValue.Type() 130 | fieldValueAddr := fieldValue.Addr() 131 | 132 | if typeDest.Kind() == reflect.Slice { 133 | 134 | if typeDest.Elem().Kind() == reflect.Uint8 { 135 | fieldValue.SetBytes(v) 136 | } else { 137 | item := reflect.New(typeDest.Elem()) 138 | 139 | if IsUInt(typeDest.Elem().Kind()) { 140 | if cfg.GetEndianness() == LittleEndian { 141 | if err := DecodeUNumberLE(v, item.Interface()); err != nil { 142 | return err 143 | } 144 | } else { 145 | if err := DecodeUNumber(v, item.Interface()); err != nil { 146 | return err 147 | } 148 | } 149 | } else if IsInt(typeDest.Elem().Kind()) { 150 | if cfg.GetEndianness() == LittleEndian { 151 | if err := DecodeNumberLE(v, item.Interface()); err != nil { 152 | return err 153 | } 154 | } else { 155 | if err := DecodeNumber(v, item.Interface()); err != nil { 156 | return err 157 | } 158 | } 159 | } 160 | 161 | itemi := reflect.Indirect(item) 162 | tmpFieldValue := reflect.Append(fieldValue, itemi) 163 | fieldValue.Set(tmpFieldValue) 164 | } 165 | 166 | } else if fieldValueAddr.IsValid() { 167 | if IsUInt(typeDest.Kind()) { 168 | if cfg.GetEndianness() == LittleEndian { 169 | if err := DecodeUNumberLE(v, fieldValueAddr.Interface()); err != nil { 170 | return err 171 | } 172 | } else { 173 | if err := DecodeUNumber(v, fieldValueAddr.Interface()); err != nil { 174 | return err 175 | } 176 | } 177 | } else if IsInt(typeDest.Kind()) { 178 | if cfg.GetEndianness() == LittleEndian { 179 | if err := DecodeNumberLE(v, fieldValueAddr.Interface()); err != nil { 180 | return err 181 | } 182 | } else { 183 | if err := DecodeNumber(v, fieldValueAddr.Interface()); err != nil { 184 | return err 185 | } 186 | } 187 | } 188 | 189 | } 190 | } else if cfg.GetProtoIndex() > 0 { 191 | 192 | fmr := flowMessage.ProtoReflect() 193 | unk := fmr.GetUnknown() 194 | 195 | if !cfg.IsArray() { 196 | var offset int 197 | for offset < len(unk) { 198 | num, _, length := protowire.ConsumeField(unk[offset:]) 199 | offset += length 200 | if int32(num) == cfg.GetProtoIndex() { 201 | // only one allowed 202 | break 203 | } 204 | } 205 | } 206 | 207 | var dstVar uint64 208 | if cfg.GetProtoType() == ProtoVarint { 209 | if cfg.GetEndianness() == LittleEndian { 210 | if err := DecodeUNumberLE(v, &dstVar); err != nil { 211 | return err 212 | } 213 | } else { 214 | if err := DecodeUNumber(v, &dstVar); err != nil { 215 | return err 216 | } 217 | } 218 | // support signed int? 219 | unk = protowire.AppendTag(unk, protowire.Number(cfg.GetProtoIndex()), protowire.VarintType) 220 | unk = protowire.AppendVarint(unk, dstVar) 221 | } else if cfg.GetProtoType() == ProtoString { 222 | unk = protowire.AppendTag(unk, protowire.Number(cfg.GetProtoIndex()), protowire.BytesType) 223 | unk = protowire.AppendString(unk, string(v)) 224 | } else { 225 | return fmt.Errorf("could not insert into protobuf unknown") 226 | } 227 | fmr.SetUnknown(unk) 228 | } 229 | return nil 230 | } 231 | -------------------------------------------------------------------------------- /producer/proto/reflect_test.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetBytes(t *testing.T) { 10 | d := []byte{0xAA, 0x55, 0xAB, 0x56} 11 | 12 | // Simple case 13 | r := GetBytes(d, 16, 16, true) 14 | assert.Equal(t, []byte{0xAB, 0x56}, r) 15 | 16 | r = GetBytes(d, 24, 8, true) 17 | assert.Equal(t, []byte{0x56}, r) 18 | 19 | r = GetBytes(d, 24, 32, true) 20 | assert.Equal(t, []byte{0x56, 0x00, 0x00, 0x00}, r) 21 | 22 | // Trying to break 23 | r = GetBytes(d, 32, 0, true) 24 | assert.Nil(t, r) 25 | 26 | r = GetBytes(d, 32, 16, true) 27 | assert.Equal(t, []byte{0x00, 0x00}, r) 28 | 29 | // Offset to shift 30 | r = GetBytes(d, 4, 16, true) 31 | assert.Equal(t, []byte{0xA5, 0x5A}, r) 32 | 33 | r = GetBytes(d, 4, 16, false) 34 | assert.Equal(t, []byte{0xA5, 0x5A}, r) 35 | 36 | r = GetBytes(d, 4, 4, true) 37 | assert.Equal(t, []byte{0x0A}, r) 38 | 39 | r = GetBytes(d, 4, 4, false) 40 | assert.Equal(t, []byte{0xA0}, r) 41 | 42 | r = GetBytes(d, 4, 6, true) 43 | assert.Equal(t, []byte{0x29}, r) 44 | 45 | r = GetBytes(d, 4, 6, false) 46 | assert.Equal(t, []byte{0xA4}, r) 47 | 48 | r = GetBytes(d, 20, 6, true) 49 | assert.Equal(t, []byte{0x2D}, r) 50 | 51 | r = GetBytes(d, 20, 6, false) 52 | assert.Equal(t, []byte{0xB4}, r) 53 | 54 | r = GetBytes(d, 5, 10, true) 55 | assert.Equal(t, []byte{0x4A, 0x02}, r) 56 | 57 | // Trying to break 58 | r = GetBytes(d, 30, 10, true) 59 | assert.Equal(t, []byte{0x80, 0x00}, r) 60 | 61 | r = GetBytes(d, 30, 10, false) 62 | assert.Equal(t, []byte{0x80, 0x00}, r) 63 | 64 | r = GetBytes(d, 30, 2, true) 65 | assert.Equal(t, []byte{0x02}, r) 66 | 67 | r = GetBytes(d, 30, 2, false) 68 | assert.Equal(t, []byte{0x80}, r) 69 | 70 | r = GetBytes(d, 32, 1, true) 71 | assert.Equal(t, []byte{0}, r) 72 | 73 | } 74 | 75 | func BenchmarkGetBytes(b *testing.B) { 76 | d := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} 77 | for i := 0; i < b.N; i++ { 78 | GetBytes(d, 2, 10, false) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /producer/proto/render.go: -------------------------------------------------------------------------------- 1 | package protoproducer 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/hex" 6 | "net" 7 | "net/netip" 8 | "time" 9 | ) 10 | 11 | type RenderFunc func(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} 12 | 13 | type RendererID string 14 | 15 | const ( 16 | RendererNone RendererID = "none" 17 | RendererIP RendererID = "ip" 18 | RendererMac RendererID = "mac" 19 | RendererEtype RendererID = "etype" 20 | RendererProto RendererID = "proto" 21 | RendererType RendererID = "type" 22 | RendererNetwork RendererID = "network" 23 | RendererDateTime RendererID = "datetime" 24 | RendererDateTimeNano RendererID = "datetimenano" 25 | RendererString RendererID = "string" 26 | ) 27 | 28 | var ( 29 | renderers = map[RendererID]RenderFunc{ 30 | RendererNone: NilRenderer, 31 | RendererIP: IPRenderer, 32 | RendererMac: MacRenderer, 33 | RendererEtype: EtypeRenderer, 34 | RendererProto: ProtoRenderer, 35 | RendererDateTime: DateTimeRenderer, 36 | RendererDateTimeNano: DateTimeNanoRenderer, 37 | RendererString: StringRenderer, 38 | } 39 | 40 | defaultRenderers = map[string]RenderFunc{ 41 | "SrcMac": MacRenderer, 42 | "DstMac": MacRenderer, 43 | "SrcAddr": IPRenderer, 44 | "DstAddr": IPRenderer, 45 | "SamplerAddress": IPRenderer, 46 | "NextHop": IPRenderer, 47 | "BgpNextHop": IPRenderer, 48 | "MplsLabelIp": IPRenderer, 49 | "MplsIp": IPRenderer, 50 | "Etype": EtypeRenderer, 51 | "Proto": ProtoRenderer, 52 | "SrcNet": NetworkRenderer, 53 | "DstNet": NetworkRenderer, 54 | 55 | "icmp_name": ICMPRenderer, 56 | 57 | "Ipv6RoutingHeaderAddresses": IPRenderer, 58 | } 59 | 60 | etypeName = map[uint32]string{ 61 | 0x806: "ARP", 62 | 0x800: "IPv4", 63 | 0x86dd: "IPv6", 64 | } 65 | protoName = map[uint32]string{ 66 | 0: "HOPOPT", 67 | 1: "ICMP", 68 | 2: "IGMP", 69 | 3: "GGP", 70 | 4: "IPv4", 71 | 5: "ST", 72 | 6: "TCP", 73 | 7: "CBT", 74 | 8: "EGP", 75 | 9: "IGP", 76 | 10: "BBN-RCC-MON", 77 | 11: "NVP-II", 78 | 12: "PUP", 79 | 13: "ARGUS", 80 | 14: "EMCON", 81 | 15: "XNET", 82 | 16: "CHAOS", 83 | 17: "UDP", 84 | 18: "MUX", 85 | 19: "DCN-MEAS", 86 | 20: "HMP", 87 | 21: "PRM", 88 | 22: "XNS-IDP", 89 | 23: "TRUNK-1", 90 | 24: "TRUNK-2", 91 | 25: "LEAF-1", 92 | 26: "LEAF-2", 93 | 27: "RDP", 94 | 28: "IRTP", 95 | 29: "ISO-TP4", 96 | 30: "NETBLT", 97 | 31: "MFE-NSP", 98 | 32: "MERIT-INP", 99 | 33: "DCCP", 100 | 34: "3PC", 101 | 35: "IDPR", 102 | 36: "XTP", 103 | 37: "DDP", 104 | 38: "IDPR-CMTP", 105 | 39: "TP++", 106 | 40: "IL", 107 | 41: "IPv6", 108 | 42: "SDRP", 109 | 43: "IPv6-Route", 110 | 44: "IPv6-Frag", 111 | 45: "IDRP", 112 | 46: "RSVP", 113 | 47: "GRE", 114 | 48: "DSR", 115 | 49: "BNA", 116 | 50: "ESP", 117 | 51: "AH", 118 | 52: "I-NLSP", 119 | 53: "SWIPE", 120 | 54: "NARP", 121 | 55: "Min-IPv4", 122 | 56: "TLSP", 123 | 57: "SKIP", 124 | 58: "IPv6-ICMP", 125 | 59: "IPv6-NoNxt", 126 | 60: "IPv6-Opts", 127 | 61: "any-host-internal-protocol", 128 | 62: "CFTP", 129 | 63: "any-local-network", 130 | 64: "SAT-EXPAK", 131 | 65: "KRYPTOLAN", 132 | 66: "RVD", 133 | 67: "IPPC", 134 | 68: "any-distributed-file-system", 135 | 69: "SAT-MON", 136 | 70: "VISA", 137 | 71: "IPCV", 138 | 72: "CPNX", 139 | 73: "CPHB", 140 | 74: "WSN", 141 | 75: "PVP", 142 | 76: "BR-SAT-MON", 143 | 77: "SUN-ND", 144 | 78: "WB-MON", 145 | 79: "WB-EXPAK", 146 | 80: "ISO-IP", 147 | 81: "VMTP", 148 | 82: "SECURE-VMTP", 149 | 83: "VINES", 150 | 84: "IPTM", 151 | 85: "NSFNET-IGP", 152 | 86: "DGP", 153 | 87: "TCF", 154 | 88: "EIGRP", 155 | 89: "OSPFIGP", 156 | 90: "Sprite-RPC", 157 | 91: "LARP", 158 | 92: "MTP", 159 | 93: "AX.25", 160 | 94: "IPIP", 161 | 95: "MICP", 162 | 96: "SCC-SP", 163 | 97: "ETHERIP", 164 | 98: "ENCAP", 165 | 99: "any-private-encryption-scheme", 166 | 100: "GMTP", 167 | 101: "IFMP", 168 | 102: "PNNI", 169 | 103: "PIM", 170 | 104: "ARIS", 171 | 105: "SCPS", 172 | 106: "QNX", 173 | 107: "A/N", 174 | 108: "IPComp", 175 | 109: "SNP", 176 | 110: "Compaq-Peer", 177 | 111: "IPX-in-IP", 178 | 112: "VRRP", 179 | 113: "PGM", 180 | 114: "any-0-hop-protocol", 181 | 115: "L2TP", 182 | 116: "DDX", 183 | 117: "IATP", 184 | 118: "STP", 185 | 119: "SRP", 186 | 120: "UTI", 187 | 121: "SMP", 188 | 122: "SM", 189 | 123: "PTP", 190 | 124: "ISIS over IPv4", 191 | 125: "FIRE", 192 | 126: "CRTP", 193 | 127: "CRUDP", 194 | 128: "SSCOPMCE", 195 | 129: "IPLT", 196 | 130: "SPS", 197 | 131: "PIPE", 198 | 132: "SCTP", 199 | 133: "FC", 200 | 134: "RSVP-E2E-IGNORE", 201 | 135: "Mobility Header", 202 | 136: "UDPLite", 203 | 137: "MPLS-in-IP", 204 | 138: "manet", 205 | 139: "HIP", 206 | 140: "Shim6", 207 | 141: "WESP", 208 | 142: "ROHC", 209 | 143: "Ethernet", 210 | 144: "AGGFRAG", 211 | 145: "NSH", 212 | } 213 | icmpTypeName = map[uint32]string{ 214 | 0: "EchoReply", 215 | 3: "DestinationUnreachable", 216 | 8: "Echo", 217 | 9: "RouterAdvertisement", 218 | 10: "RouterSolicitation", 219 | 11: "TimeExceeded", 220 | } 221 | icmp6TypeName = map[uint32]string{ 222 | 1: "DestinationUnreachable", 223 | 2: "PacketTooBig", 224 | 3: "TimeExceeded", 225 | 128: "EchoRequest", 226 | 129: "EchoReply", 227 | 133: "RouterSolicitation", 228 | 134: "RouterAdvertisement", 229 | } 230 | ) 231 | 232 | func NilRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 233 | if dataIf, ok := data.(interface { 234 | String() string 235 | }); ok { 236 | return dataIf.String() 237 | } 238 | if dataC, ok := data.([]byte); ok { 239 | return hex.EncodeToString(dataC) 240 | } 241 | return data 242 | } 243 | 244 | func StringRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 245 | if dataC, ok := data.([]byte); ok { 246 | return string(dataC) 247 | } else if dataC, ok := data.(string); ok { 248 | return string(dataC) 249 | } // maybe should support uint64? 250 | return NilRenderer(msg, fieldName, data) 251 | } 252 | 253 | func DateTimeRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 254 | if dataC, ok := data.(uint64); ok { 255 | ts := time.Unix(int64(dataC), 0).UTC() 256 | return ts.Format(time.RFC3339Nano) 257 | } else if dataC, ok := data.(int64); ok { 258 | ts := time.Unix(dataC, 0).UTC() 259 | return ts.Format(time.RFC3339Nano) 260 | } else if dataC, ok := data.(uint32); ok { 261 | ts := time.Unix(int64(dataC), 0).UTC() 262 | return ts.Format(time.RFC3339Nano) 263 | } else if dataC, ok := data.(int32); ok { 264 | ts := time.Unix(int64(dataC), 0).UTC() 265 | return ts.Format(time.RFC3339Nano) 266 | } 267 | return NilRenderer(msg, fieldName, data) 268 | } 269 | 270 | func DateTimeNanoRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 271 | if dataC, ok := data.(uint64); ok { 272 | ts := time.Unix(int64(dataC)/1e9, int64(dataC)%1e9).UTC() 273 | return ts.Format(time.RFC3339Nano) 274 | } else if dataC, ok := data.(int64); ok { 275 | ts := time.Unix(dataC/1e9, dataC%1e9).UTC() 276 | return ts.Format(time.RFC3339Nano) 277 | } 278 | return NilRenderer(msg, fieldName, data) 279 | } 280 | 281 | func MacRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 282 | if dataC, ok := data.(uint64); ok { 283 | var mac [8]byte 284 | binary.BigEndian.PutUint64(mac[:], dataC) 285 | return net.HardwareAddr(mac[2:]).String() 286 | } 287 | return NilRenderer(msg, fieldName, data) 288 | 289 | } 290 | 291 | func RenderIP(addr []byte) string { 292 | if addr == nil || (len(addr) != 4 && len(addr) != 16) { 293 | return "" 294 | } 295 | ip, _ := netip.AddrFromSlice(addr) 296 | return ip.String() 297 | } 298 | 299 | func IPRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 300 | if dataC, ok := data.([]byte); ok { 301 | return RenderIP(dataC) 302 | } 303 | return NilRenderer(msg, fieldName, data) 304 | } 305 | 306 | func EtypeRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 307 | if dataC, ok := data.(uint32); ok { 308 | return etypeName[dataC] 309 | } else if dataC, ok := data.(uint64); ok { // supports protobuf mapped fields 310 | return etypeName[uint32(dataC)] 311 | } 312 | return "unknown" 313 | } 314 | 315 | func ProtoName(protoNumber uint32) string { 316 | dataC, ok := protoName[protoNumber] 317 | if ok { 318 | return dataC 319 | } else if (protoNumber >= 146) && (protoNumber <= 252) { 320 | return "unassigned" 321 | } else if (protoNumber >= 253) && (protoNumber <= 254) { 322 | return "experimental" 323 | } else if protoNumber == 255 { 324 | return "reserved" 325 | } 326 | return "unknown" 327 | } 328 | 329 | func ProtoRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 330 | if dataC, ok := data.(uint32); ok { 331 | return ProtoName(dataC) 332 | } else if dataC, ok := data.(uint64); ok { 333 | return ProtoName(uint32(dataC)) 334 | } 335 | return "unknown" 336 | } 337 | 338 | func NetworkRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 339 | var addr netip.Addr 340 | if fieldName == "SrcNet" { 341 | addr, _ = netip.AddrFromSlice(msg.SrcAddr) 342 | } else if fieldName == "DstNet" { 343 | addr, _ = netip.AddrFromSlice(msg.DstAddr) 344 | } 345 | if dataC, ok := data.(uint32); ok { 346 | prefix, _ := addr.Prefix(int(dataC)) 347 | return prefix.String() 348 | } 349 | return "unknown" 350 | } 351 | 352 | func IcmpCodeType(proto, icmpCode, icmpType uint32) string { 353 | if proto == 1 { 354 | return icmpTypeName[icmpType] 355 | } else if proto == 58 { 356 | return icmp6TypeName[icmpType] 357 | } 358 | return "unknown" 359 | } 360 | 361 | func ICMPRenderer(msg *ProtoProducerMessage, fieldName string, data interface{}) interface{} { 362 | return IcmpCodeType(uint32(msg.Proto), uint32(msg.IcmpCode), uint32(msg.IcmpType)) 363 | } 364 | -------------------------------------------------------------------------------- /producer/raw/raw.go: -------------------------------------------------------------------------------- 1 | package rawproducer 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/netip" 7 | "time" 8 | 9 | "github.com/netsampler/goflow2/v2/decoders/netflow" 10 | "github.com/netsampler/goflow2/v2/decoders/netflowlegacy" 11 | "github.com/netsampler/goflow2/v2/decoders/sflow" 12 | "github.com/netsampler/goflow2/v2/producer" 13 | ) 14 | 15 | // Producer that keeps the same format 16 | // as the original flow samples. 17 | // This can be used for debugging (eg: getting NetFlow Option Templates) 18 | type RawProducer struct { 19 | } 20 | 21 | // Raw message 22 | type RawMessage struct { 23 | Message interface{} `json:"message"` 24 | Src netip.AddrPort `json:"src"` 25 | TimeReceived time.Time `json:"time_received"` 26 | } 27 | 28 | func (m RawMessage) MarshalJSON() ([]byte, error) { 29 | typeStr := "unknown" 30 | switch m.Message.(type) { 31 | case *netflowlegacy.PacketNetFlowV5: 32 | typeStr = "netflowv5" 33 | case *netflow.NFv9Packet: 34 | typeStr = "netflowv9" 35 | case *netflow.IPFIXPacket: 36 | typeStr = "ipfix" 37 | case *sflow.Packet: 38 | typeStr = "sflow" 39 | } 40 | 41 | tmpStruct := struct { 42 | Type string `json:"type"` 43 | Message interface{} `json:"message"` 44 | Src *netip.AddrPort `json:"src"` 45 | TimeReceived *time.Time `json:"time_received"` 46 | }{ 47 | Type: typeStr, 48 | Message: m.Message, 49 | Src: &m.Src, 50 | TimeReceived: &m.TimeReceived, 51 | } 52 | return json.Marshal(tmpStruct) 53 | } 54 | 55 | func (m RawMessage) MarshalText() ([]byte, error) { 56 | var msgContents []byte 57 | var err error 58 | if msg, ok := m.Message.(interface { 59 | MarshalText() ([]byte, error) 60 | }); ok { 61 | msgContents, err = msg.MarshalText() 62 | } 63 | return []byte(fmt.Sprintf("%s %s: %s", m.TimeReceived.String(), m.Src.String(), string(msgContents))), err 64 | } 65 | 66 | func (p *RawProducer) Produce(msg interface{}, args *producer.ProduceArgs) ([]producer.ProducerMessage, error) { 67 | // should return msg wrapped 68 | // []*interface{msg,} 69 | return []producer.ProducerMessage{RawMessage{msg, args.Src, args.TimeReceived}}, nil 70 | } 71 | 72 | func (p *RawProducer) Commit(flowMessageSet []producer.ProducerMessage) {} 73 | 74 | func (p *RawProducer) Close() {} 75 | -------------------------------------------------------------------------------- /transport/file/transport.go: -------------------------------------------------------------------------------- 1 | package file 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/netsampler/goflow2/v2/transport" 7 | "io" 8 | "os" 9 | "os/signal" 10 | "sync" 11 | "syscall" 12 | ) 13 | 14 | type FileDriver struct { 15 | fileDestination string 16 | lineSeparator string 17 | w io.Writer 18 | file *os.File 19 | lock *sync.RWMutex 20 | q chan bool 21 | } 22 | 23 | func (d *FileDriver) Prepare() error { 24 | flag.StringVar(&d.fileDestination, "transport.file", "", "File/console output (empty for stdout)") 25 | flag.StringVar(&d.lineSeparator, "transport.file.sep", "\n", "Line separator") 26 | // idea: add terminal coloring based on key partitioning (if any) 27 | return nil 28 | } 29 | 30 | func (d *FileDriver) openFile() error { 31 | file, err := os.OpenFile(d.fileDestination, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) 32 | if err != nil { 33 | return err 34 | } 35 | d.file = file 36 | d.w = d.file 37 | return err 38 | } 39 | 40 | func (d *FileDriver) Init() error { 41 | d.q = make(chan bool, 1) 42 | 43 | if d.fileDestination == "" { 44 | d.w = os.Stdout 45 | } else { 46 | var err error 47 | 48 | d.lock.Lock() 49 | err = d.openFile() 50 | d.lock.Unlock() 51 | if err != nil { 52 | return err 53 | } 54 | 55 | c := make(chan os.Signal, 1) 56 | signal.Notify(c, syscall.SIGHUP) 57 | go func() { 58 | for { 59 | select { 60 | case <-c: 61 | d.lock.Lock() 62 | d.file.Close() 63 | err := d.openFile() 64 | d.lock.Unlock() 65 | if err != nil { 66 | return 67 | } 68 | // if there is an error, keeps using the old file 69 | case <-d.q: 70 | return 71 | } 72 | } 73 | }() 74 | } 75 | return nil 76 | } 77 | 78 | func (d *FileDriver) Send(key, data []byte) error { 79 | d.lock.RLock() 80 | w := d.w 81 | d.lock.RUnlock() 82 | _, err := fmt.Fprint(w, string(data)+d.lineSeparator) 83 | return err 84 | } 85 | 86 | func (d *FileDriver) Close() error { 87 | if d.fileDestination != "" { 88 | d.lock.Lock() 89 | d.file.Close() 90 | d.lock.Unlock() 91 | signal.Ignore(syscall.SIGHUP) 92 | } 93 | close(d.q) 94 | return nil 95 | } 96 | 97 | func init() { 98 | d := &FileDriver{ 99 | lock: &sync.RWMutex{}, 100 | } 101 | transport.RegisterTransportDriver("file", d) 102 | } 103 | -------------------------------------------------------------------------------- /transport/kafka/scram_client.go: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | // From https://github.com/Shopify/sarama/blob/main/examples/sasl_scram_client/scram_client.go 4 | 5 | import ( 6 | "crypto/sha256" 7 | "crypto/sha512" 8 | 9 | "github.com/xdg-go/scram" 10 | ) 11 | 12 | var ( 13 | SHA256 scram.HashGeneratorFcn = sha256.New 14 | SHA512 scram.HashGeneratorFcn = sha512.New 15 | ) 16 | 17 | type XDGSCRAMClient struct { 18 | *scram.Client 19 | *scram.ClientConversation 20 | scram.HashGeneratorFcn 21 | } 22 | 23 | func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { 24 | x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) 25 | if err != nil { 26 | return err 27 | } 28 | x.ClientConversation = x.Client.NewConversation() 29 | return nil 30 | } 31 | 32 | func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { 33 | response, err = x.ClientConversation.Step(challenge) 34 | return 35 | } 36 | 37 | func (x *XDGSCRAMClient) Done() bool { 38 | return x.ClientConversation.Done() 39 | } 40 | -------------------------------------------------------------------------------- /transport/transport.go: -------------------------------------------------------------------------------- 1 | package transport 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | var ( 9 | transportDrivers = make(map[string]TransportDriver) 10 | lock = &sync.RWMutex{} 11 | 12 | ErrTransport = fmt.Errorf("transport error") 13 | ) 14 | 15 | type DriverTransportError struct { 16 | Driver string 17 | Err error 18 | } 19 | 20 | func (e *DriverTransportError) Error() string { 21 | return fmt.Sprintf("%s for %s transport", e.Err.Error(), e.Driver) 22 | } 23 | 24 | func (e *DriverTransportError) Unwrap() []error { 25 | return []error{ErrTransport, e.Err} 26 | } 27 | 28 | type TransportDriver interface { 29 | Prepare() error // Prepare driver (eg: flag registration) 30 | Init() error // Initialize driver (eg: start connections, open files...) 31 | Close() error // Close driver (eg: close connections and files...) 32 | Send(key, data []byte) error // Send a formatted message 33 | } 34 | 35 | type TransportInterface interface { 36 | Send(key, data []byte) error 37 | } 38 | 39 | type Transport struct { 40 | TransportDriver 41 | name string 42 | } 43 | 44 | func (t *Transport) Close() error { 45 | if err := t.TransportDriver.Close(); err != nil { 46 | return &DriverTransportError{t.name, err} 47 | } 48 | return nil 49 | } 50 | 51 | func (t *Transport) Send(key, data []byte) error { 52 | if err := t.TransportDriver.Send(key, data); err != nil { 53 | return &DriverTransportError{t.name, err} 54 | } 55 | return nil 56 | } 57 | 58 | func RegisterTransportDriver(name string, t TransportDriver) { 59 | lock.Lock() 60 | transportDrivers[name] = t 61 | lock.Unlock() 62 | 63 | if err := t.Prepare(); err != nil { 64 | panic(err) 65 | } 66 | } 67 | 68 | func FindTransport(name string) (*Transport, error) { 69 | lock.RLock() 70 | t, ok := transportDrivers[name] 71 | lock.RUnlock() 72 | if !ok { 73 | return nil, fmt.Errorf("%w %s not found", ErrTransport, name) 74 | } 75 | 76 | err := t.Init() 77 | if err != nil { 78 | err = &DriverTransportError{name, err} 79 | } 80 | return &Transport{t, name}, err 81 | } 82 | 83 | func GetTransports() []string { 84 | lock.RLock() 85 | defer lock.RUnlock() 86 | t := make([]string, len(transportDrivers)) 87 | var i int 88 | for k := range transportDrivers { 89 | t[i] = k 90 | i++ 91 | } 92 | return t 93 | } 94 | -------------------------------------------------------------------------------- /utils/debug/debug.go: -------------------------------------------------------------------------------- 1 | package debug 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | var ( 8 | PanicError = fmt.Errorf("panic") 9 | ) 10 | 11 | type PanicErrorMessage struct { 12 | Msg interface{} 13 | Inner string 14 | Stacktrace []byte 15 | } 16 | 17 | func (e *PanicErrorMessage) Error() string { 18 | return fmt.Sprintf("%s", e.Inner) 19 | } 20 | 21 | func (e *PanicErrorMessage) Unwrap() []error { 22 | return []error{PanicError} 23 | } 24 | -------------------------------------------------------------------------------- /utils/debug/decoder.go: -------------------------------------------------------------------------------- 1 | package debug 2 | 3 | import ( 4 | "runtime/debug" 5 | 6 | "github.com/netsampler/goflow2/v2/utils" 7 | ) 8 | 9 | func PanicDecoderWrapper(wrapped utils.DecoderFunc) utils.DecoderFunc { 10 | return func(msg interface{}) (err error) { 11 | defer func() { 12 | if pErr := recover(); pErr != nil { 13 | pErrC, _ := pErr.(string) 14 | err = &PanicErrorMessage{Msg: msg, Inner: pErrC, Stacktrace: debug.Stack()} 15 | } 16 | }() 17 | err = wrapped(msg) 18 | return err 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /utils/debug/producer.go: -------------------------------------------------------------------------------- 1 | package debug 2 | 3 | import ( 4 | "runtime/debug" 5 | 6 | "github.com/netsampler/goflow2/v2/producer" 7 | ) 8 | 9 | type PanicProducerWrapper struct { 10 | wrapped producer.ProducerInterface 11 | } 12 | 13 | func (p *PanicProducerWrapper) Produce(msg interface{}, args *producer.ProduceArgs) (flowMessageSet []producer.ProducerMessage, err error) { 14 | 15 | defer func() { 16 | if pErr := recover(); pErr != nil { 17 | pErrC, _ := pErr.(string) 18 | err = &PanicErrorMessage{Msg: msg, Inner: pErrC, Stacktrace: debug.Stack()} 19 | } 20 | }() 21 | 22 | flowMessageSet, err = p.wrapped.Produce(msg, args) 23 | return flowMessageSet, err 24 | } 25 | 26 | func (p *PanicProducerWrapper) Close() { 27 | p.wrapped.Close() 28 | } 29 | 30 | func (p *PanicProducerWrapper) Commit(flowMessageSet []producer.ProducerMessage) { 31 | p.wrapped.Commit(flowMessageSet) 32 | } 33 | 34 | func WrapPanicProducer(wrapped producer.ProducerInterface) producer.ProducerInterface { 35 | return &PanicProducerWrapper{ 36 | wrapped: wrapped, 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /utils/mute.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | type BatchMute struct { 8 | batchTime time.Time 9 | resetInterval time.Duration 10 | ctr int 11 | max int 12 | } 13 | 14 | func (b *BatchMute) increment(val int, t time.Time) (muted bool, skipped int) { 15 | 16 | if b.max == 0 || b.resetInterval == 0 { 17 | return muted, skipped 18 | } 19 | 20 | if b.ctr >= b.max { 21 | skipped = b.ctr - b.max 22 | } 23 | 24 | if t.Sub(b.batchTime) > b.resetInterval { 25 | b.ctr = 0 26 | b.batchTime = t 27 | } 28 | b.ctr += val 29 | 30 | return b.max > 0 && b.ctr > b.max, skipped 31 | } 32 | 33 | func (b *BatchMute) Increment() (muting bool, skipped int) { 34 | return b.increment(1, time.Now().UTC()) 35 | } 36 | 37 | func NewBatchMute(resetInterval time.Duration, max int) *BatchMute { 38 | return &BatchMute{ 39 | batchTime: time.Now().UTC(), 40 | resetInterval: resetInterval, 41 | max: max, 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /utils/mute_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestBatchMute(t *testing.T) { 9 | tm := time.Date(2023, time.November, 10, 23, 0, 0, 0, time.UTC) 10 | bm := BatchMute{ 11 | batchTime: tm, 12 | resetInterval: time.Second * 10, 13 | max: 5, 14 | } 15 | 16 | for i := 0; i < 20; i++ { 17 | tm = tm.Add(time.Second) 18 | t.Log(bm.increment(1, tm)) 19 | } 20 | 21 | } 22 | 23 | func TestBatchMuteZero(t *testing.T) { 24 | tm := time.Date(2023, time.November, 10, 23, 0, 0, 0, time.UTC) 25 | bm := BatchMute{ 26 | batchTime: tm, 27 | resetInterval: time.Second * 10, 28 | max: 0, 29 | } 30 | 31 | for i := 0; i < 20; i++ { 32 | tm = tm.Add(time.Second) 33 | t.Log(bm.increment(1, tm)) 34 | } 35 | 36 | } 37 | 38 | func TestBatchMuteInterval(t *testing.T) { 39 | tm := time.Date(2023, time.November, 10, 23, 0, 0, 0, time.UTC) 40 | bm := BatchMute{ 41 | batchTime: tm, 42 | resetInterval: 0, 43 | max: 5, 44 | } 45 | 46 | for i := 0; i < 20; i++ { 47 | tm = tm.Add(time.Second) 48 | t.Log(bm.increment(1, tm)) 49 | } 50 | 51 | } 52 | -------------------------------------------------------------------------------- /utils/pipe.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "sync" 7 | 8 | "github.com/netsampler/goflow2/v2/decoders/netflow" 9 | "github.com/netsampler/goflow2/v2/decoders/netflowlegacy" 10 | "github.com/netsampler/goflow2/v2/decoders/sflow" 11 | "github.com/netsampler/goflow2/v2/decoders/utils" 12 | "github.com/netsampler/goflow2/v2/format" 13 | "github.com/netsampler/goflow2/v2/producer" 14 | "github.com/netsampler/goflow2/v2/transport" 15 | "github.com/netsampler/goflow2/v2/utils/templates" 16 | ) 17 | 18 | type FlowPipe interface { 19 | DecodeFlow(msg interface{}) error 20 | Close() 21 | } 22 | 23 | type flowpipe struct { 24 | format format.FormatInterface 25 | transport transport.TransportInterface 26 | producer producer.ProducerInterface 27 | 28 | netFlowTemplater templates.TemplateSystemGenerator 29 | } 30 | 31 | type PipeConfig struct { 32 | Format format.FormatInterface 33 | Transport transport.TransportInterface 34 | Producer producer.ProducerInterface 35 | 36 | NetFlowTemplater templates.TemplateSystemGenerator 37 | } 38 | 39 | func (p *flowpipe) formatSend(flowMessageSet []producer.ProducerMessage) error { 40 | for _, msg := range flowMessageSet { 41 | // todo: pass normal 42 | if p.format != nil { 43 | key, data, err := p.format.Format(msg) 44 | if err != nil { 45 | return err 46 | } 47 | if p.transport != nil { 48 | if err = p.transport.Send(key, data); err != nil { 49 | return err 50 | } 51 | } 52 | // send to pool for reuse 53 | } 54 | } 55 | return nil 56 | 57 | } 58 | 59 | func (p *flowpipe) parseConfig(cfg *PipeConfig) { 60 | p.format = cfg.Format 61 | p.transport = cfg.Transport 62 | p.producer = cfg.Producer 63 | if cfg.NetFlowTemplater != nil { 64 | p.netFlowTemplater = cfg.NetFlowTemplater 65 | } else { 66 | p.netFlowTemplater = templates.DefaultTemplateGenerator 67 | } 68 | 69 | } 70 | 71 | type SFlowPipe struct { 72 | flowpipe 73 | } 74 | 75 | type NetFlowPipe struct { 76 | flowpipe 77 | 78 | templateslock *sync.RWMutex 79 | templates map[string]netflow.NetFlowTemplateSystem 80 | } 81 | 82 | type PipeMessageError struct { 83 | Message *Message 84 | Err error 85 | } 86 | 87 | func (e *PipeMessageError) Error() string { 88 | return fmt.Sprintf("message from %s %s", e.Message.Src.String(), e.Err.Error()) 89 | } 90 | 91 | func (e *PipeMessageError) Unwrap() error { 92 | return e.Err 93 | } 94 | 95 | func NewSFlowPipe(cfg *PipeConfig) *SFlowPipe { 96 | p := &SFlowPipe{} 97 | p.parseConfig(cfg) 98 | return p 99 | } 100 | 101 | func (p *SFlowPipe) Close() { 102 | } 103 | 104 | func (p *SFlowPipe) DecodeFlow(msg interface{}) error { 105 | pkt, ok := msg.(*Message) 106 | if !ok { 107 | return fmt.Errorf("flow is not *Message") 108 | } 109 | buf := bytes.NewBuffer(pkt.Payload) 110 | //key := pkt.Src.String() 111 | 112 | var packet sflow.Packet 113 | if err := sflow.DecodeMessageVersion(buf, &packet); err != nil { 114 | return &PipeMessageError{pkt, err} 115 | } 116 | 117 | args := producer.ProduceArgs{ 118 | Src: pkt.Src, 119 | Dst: pkt.Dst, 120 | 121 | TimeReceived: pkt.Received, 122 | SamplerAddress: pkt.Src.Addr(), 123 | } 124 | if p.producer == nil { 125 | return nil 126 | } 127 | flowMessageSet, err := p.producer.Produce(&packet, &args) 128 | defer p.producer.Commit(flowMessageSet) 129 | if err != nil { 130 | return &PipeMessageError{pkt, err} 131 | } 132 | return p.formatSend(flowMessageSet) 133 | } 134 | 135 | func NewNetFlowPipe(cfg *PipeConfig) *NetFlowPipe { 136 | p := &NetFlowPipe{ 137 | templateslock: &sync.RWMutex{}, 138 | templates: make(map[string]netflow.NetFlowTemplateSystem), 139 | } 140 | p.parseConfig(cfg) 141 | return p 142 | } 143 | 144 | func (p *NetFlowPipe) DecodeFlow(msg interface{}) error { 145 | pkt, ok := msg.(*Message) 146 | if !ok { 147 | return fmt.Errorf("flow is not *Message") 148 | } 149 | buf := bytes.NewBuffer(pkt.Payload) 150 | 151 | key := pkt.Src.String() 152 | 153 | p.templateslock.RLock() 154 | templates, ok := p.templates[key] 155 | p.templateslock.RUnlock() 156 | if !ok { 157 | templates = p.netFlowTemplater(key) 158 | p.templateslock.Lock() 159 | p.templates[key] = templates 160 | p.templateslock.Unlock() 161 | } 162 | 163 | var packetV5 netflowlegacy.PacketNetFlowV5 164 | var packetNFv9 netflow.NFv9Packet 165 | var packetIPFIX netflow.IPFIXPacket 166 | 167 | // decode the version 168 | var version uint16 169 | if err := utils.BinaryDecoder(buf, &version); err != nil { 170 | return &PipeMessageError{pkt, err} 171 | } 172 | switch version { 173 | case 5: 174 | packetV5.Version = 5 175 | if err := netflowlegacy.DecodeMessage(buf, &packetV5); err != nil { 176 | return &PipeMessageError{pkt, err} 177 | } 178 | case 9: 179 | packetNFv9.Version = 9 180 | if err := netflow.DecodeMessageNetFlow(buf, templates, &packetNFv9); err != nil { 181 | return &PipeMessageError{pkt, err} 182 | } 183 | case 10: 184 | packetIPFIX.Version = 10 185 | if err := netflow.DecodeMessageIPFIX(buf, templates, &packetIPFIX); err != nil { 186 | return &PipeMessageError{pkt, err} 187 | } 188 | default: 189 | return &PipeMessageError{pkt, fmt.Errorf("not a NetFlow packet")} 190 | } 191 | 192 | var flowMessageSet []producer.ProducerMessage 193 | var err error 194 | 195 | args := producer.ProduceArgs{ 196 | Src: pkt.Src, 197 | Dst: pkt.Dst, 198 | 199 | TimeReceived: pkt.Received, 200 | SamplerAddress: pkt.Src.Addr(), 201 | } 202 | 203 | if p.producer == nil { 204 | return nil 205 | } 206 | 207 | switch version { 208 | case 5: 209 | flowMessageSet, err = p.producer.Produce(&packetV5, &args) 210 | case 9: 211 | flowMessageSet, err = p.producer.Produce(&packetNFv9, &args) 212 | case 10: 213 | flowMessageSet, err = p.producer.Produce(&packetIPFIX, &args) 214 | } 215 | defer p.producer.Commit(flowMessageSet) 216 | if err != nil { 217 | return &PipeMessageError{pkt, err} 218 | } 219 | 220 | return p.formatSend(flowMessageSet) 221 | } 222 | 223 | func (p *NetFlowPipe) Close() { 224 | } 225 | 226 | type AutoFlowPipe struct { 227 | *SFlowPipe 228 | *NetFlowPipe 229 | } 230 | 231 | func NewFlowPipe(cfg *PipeConfig) *AutoFlowPipe { 232 | p := &AutoFlowPipe{ 233 | SFlowPipe: NewSFlowPipe(cfg), 234 | NetFlowPipe: NewNetFlowPipe(cfg), 235 | } 236 | return p 237 | } 238 | 239 | func (p *AutoFlowPipe) Close() { 240 | p.SFlowPipe.Close() 241 | p.NetFlowPipe.Close() 242 | } 243 | 244 | func (p *AutoFlowPipe) DecodeFlow(msg interface{}) error { 245 | pkt, ok := msg.(*Message) 246 | if !ok { 247 | return fmt.Errorf("flow is not *Message") 248 | } 249 | buf := bytes.NewBuffer(pkt.Payload) 250 | 251 | var proto uint32 252 | if err := utils.BinaryDecoder(buf, &proto); err != nil { 253 | return &PipeMessageError{pkt, err} 254 | } 255 | 256 | protoNetFlow := (proto & 0xFFFF0000) >> 16 257 | if proto == 5 { 258 | return p.SFlowPipe.DecodeFlow(msg) 259 | } else if protoNetFlow == 5 || protoNetFlow == 9 || protoNetFlow == 10 { 260 | return p.NetFlowPipe.DecodeFlow(msg) 261 | } 262 | return fmt.Errorf("could not identify protocol %d", proto) 263 | } 264 | -------------------------------------------------------------------------------- /utils/templates/templates.go: -------------------------------------------------------------------------------- 1 | package templates 2 | 3 | import ( 4 | "github.com/netsampler/goflow2/v2/decoders/netflow" 5 | ) 6 | 7 | // Function that Create Template Systems. 8 | // This is meant to be used by a pipe 9 | type TemplateSystemGenerator func(key string) netflow.NetFlowTemplateSystem 10 | 11 | // Default template generator 12 | func DefaultTemplateGenerator(key string) netflow.NetFlowTemplateSystem { 13 | return netflow.CreateTemplateSystem() 14 | } 15 | -------------------------------------------------------------------------------- /utils/udp.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "net/netip" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | reuseport "github.com/libp2p/go-reuseport" 12 | ) 13 | 14 | type ReceiverCallback interface { 15 | Dropped(msg Message) 16 | } 17 | 18 | // Callback used to decode a UDP message 19 | type DecoderFunc func(msg interface{}) error 20 | 21 | type udpPacket struct { 22 | src *net.UDPAddr 23 | dst *net.UDPAddr 24 | size int 25 | payload []byte 26 | received time.Time 27 | } 28 | 29 | type Message struct { 30 | Src netip.AddrPort 31 | Dst netip.AddrPort 32 | Payload []byte 33 | Received time.Time 34 | } 35 | 36 | var packetPool = sync.Pool{ 37 | New: func() any { 38 | return &udpPacket{ 39 | payload: make([]byte, 9000), 40 | } 41 | }, 42 | } 43 | 44 | type UDPReceiver struct { 45 | ready chan bool 46 | q chan bool 47 | wg *sync.WaitGroup 48 | dispatch chan *udpPacket 49 | errCh chan error // linked to receiver, never closed 50 | 51 | decodersCnt int 52 | blocking bool 53 | 54 | workers int 55 | sockets int 56 | 57 | cb ReceiverCallback 58 | } 59 | 60 | type UDPReceiverConfig struct { 61 | Workers int 62 | Sockets int 63 | Blocking bool 64 | QueueSize int 65 | 66 | ReceiverCallback ReceiverCallback 67 | } 68 | 69 | func NewUDPReceiver(cfg *UDPReceiverConfig) (*UDPReceiver, error) { 70 | r := &UDPReceiver{ 71 | wg: &sync.WaitGroup{}, 72 | sockets: 2, 73 | workers: 2, 74 | ready: make(chan bool), 75 | errCh: make(chan error), 76 | } 77 | 78 | dispatchSize := 1000000 79 | if cfg != nil { 80 | if cfg.Sockets <= 0 { 81 | cfg.Sockets = 1 82 | } 83 | 84 | if cfg.Workers <= 0 { 85 | cfg.Workers = cfg.Sockets 86 | } 87 | 88 | r.sockets = cfg.Sockets 89 | r.workers = cfg.Workers 90 | dispatchSize = cfg.QueueSize 91 | r.blocking = cfg.Blocking 92 | r.cb = cfg.ReceiverCallback 93 | } 94 | 95 | if dispatchSize == 0 { 96 | r.dispatch = make(chan *udpPacket) // synchronous mode 97 | } else { 98 | r.dispatch = make(chan *udpPacket, dispatchSize) 99 | } 100 | 101 | err := r.init() 102 | 103 | return r, err 104 | } 105 | 106 | // Initialize channels that are related to a session 107 | // Once the user calls Stop, they can restart the capture 108 | func (r *UDPReceiver) init() error { 109 | 110 | r.q = make(chan bool) 111 | r.decodersCnt = 0 112 | select { 113 | case <-r.ready: 114 | return fmt.Errorf("receiver is already stopped") 115 | default: 116 | close(r.ready) 117 | } 118 | return nil 119 | } 120 | 121 | func (r *UDPReceiver) logError(err error) { 122 | select { 123 | case r.errCh <- err: 124 | default: 125 | } 126 | } 127 | 128 | func (r *UDPReceiver) Errors() <-chan error { 129 | return r.errCh 130 | } 131 | 132 | func (r *UDPReceiver) receive(addr string, port int, started chan bool) error { 133 | if strings.IndexRune(addr, ':') >= 0 && strings.IndexRune(addr, '[') == -1 { 134 | addr = "[" + addr + "]" 135 | } 136 | 137 | pconn, err := reuseport.ListenPacket("udp", fmt.Sprintf("%s:%d", addr, port)) 138 | if err != nil { 139 | return err 140 | } 141 | close(started) // indicates receiver is setup 142 | 143 | q := make(chan bool) 144 | // function to quit 145 | go func() { 146 | select { 147 | case <-q: // if routine has exited before 148 | case <-r.q: // upon general close 149 | } 150 | pconn.Close() 151 | }() 152 | defer close(q) 153 | 154 | udpconn, ok := pconn.(*net.UDPConn) 155 | if !ok { 156 | return fmt.Errorf("not a udp connection") 157 | } 158 | 159 | return r.receiveRoutine(udpconn) 160 | } 161 | 162 | func (r *UDPReceiver) receiveRoutine(udpconn *net.UDPConn) (err error) { 163 | localAddr, _ := udpconn.LocalAddr().(*net.UDPAddr) 164 | 165 | for { 166 | pkt := packetPool.Get().(*udpPacket) 167 | pkt.size, pkt.src, err = udpconn.ReadFromUDP(pkt.payload) 168 | if err != nil { 169 | packetPool.Put(pkt) 170 | return err 171 | } 172 | pkt.dst = localAddr 173 | pkt.received = time.Now().UTC() 174 | if pkt.size == 0 { 175 | // error 176 | continue 177 | } 178 | 179 | if r.blocking { 180 | // does not drop 181 | // if combined with synchronous mode 182 | select { 183 | case r.dispatch <- pkt: 184 | case <-r.q: 185 | return nil 186 | } 187 | } else { 188 | select { 189 | case r.dispatch <- pkt: 190 | case <-r.q: 191 | return nil 192 | default: 193 | if r.cb != nil { 194 | r.cb.Dropped(Message{ 195 | Src: pkt.src.AddrPort(), 196 | Dst: pkt.dst.AddrPort(), 197 | Payload: pkt.payload[0:pkt.size], 198 | Received: pkt.received, 199 | }) 200 | } 201 | packetPool.Put(pkt) 202 | // increase counter 203 | } 204 | } 205 | 206 | } 207 | 208 | } 209 | 210 | type ReceiverError struct { 211 | Err error 212 | } 213 | 214 | func (e *ReceiverError) Error() string { 215 | return "receiver: " + e.Err.Error() 216 | } 217 | 218 | func (e *ReceiverError) Unwrap() error { 219 | return e.Err 220 | } 221 | 222 | // Start the processing routines 223 | func (r *UDPReceiver) decoders(workers int, decodeFunc DecoderFunc) error { 224 | for i := 0; i < workers; i++ { 225 | r.wg.Add(1) 226 | r.decodersCnt += 1 227 | go func() { 228 | defer r.wg.Done() 229 | for pkt := range r.dispatch { 230 | 231 | if pkt == nil { 232 | return 233 | } 234 | if decodeFunc != nil { 235 | msg := Message{ 236 | Src: pkt.src.AddrPort(), 237 | Dst: pkt.dst.AddrPort(), 238 | Payload: pkt.payload[0:pkt.size], 239 | Received: pkt.received, 240 | } 241 | 242 | if err := decodeFunc(&msg); err != nil { 243 | r.logError(&ReceiverError{err}) 244 | } 245 | } 246 | packetPool.Put(pkt) 247 | 248 | } 249 | }() 250 | } 251 | 252 | return nil 253 | } 254 | 255 | // Starts the UDP receiving workers 256 | func (r *UDPReceiver) receivers(sockets int, addr string, port int) (rErr error) { 257 | for i := 0; i < sockets; i++ { 258 | if rErr != nil { // do not instanciate the rest of the receivers 259 | break 260 | } 261 | 262 | r.wg.Add(1) 263 | started := make(chan bool) // indicates receiver setup is complete 264 | go func() { 265 | defer r.wg.Done() 266 | if err := r.receive(addr, port, started); err != nil { 267 | err = &ReceiverError{err} 268 | 269 | select { 270 | case <-started: 271 | default: // in case the receiver is not started yet 272 | rErr = err 273 | close(started) 274 | return 275 | } 276 | 277 | r.logError(err) 278 | } 279 | }() 280 | <-started 281 | } 282 | 283 | return rErr 284 | } 285 | 286 | // Start UDP receivers and the processing routines 287 | func (r *UDPReceiver) Start(addr string, port int, decodeFunc DecoderFunc) error { 288 | select { 289 | case <-r.ready: 290 | r.ready = make(chan bool) 291 | default: 292 | return fmt.Errorf("receiver is already started") 293 | } 294 | 295 | if err := r.decoders(r.workers, decodeFunc); err != nil { 296 | r.Stop() 297 | return err 298 | } 299 | if err := r.receivers(r.sockets, addr, port); err != nil { 300 | r.Stop() 301 | return err 302 | } 303 | return nil 304 | } 305 | 306 | // Stops the routines 307 | func (r *UDPReceiver) Stop() error { 308 | select { 309 | case <-r.q: 310 | default: 311 | close(r.q) 312 | } 313 | 314 | for i := 0; i < r.decodersCnt; i++ { 315 | r.dispatch <- nil 316 | } 317 | 318 | r.wg.Wait() 319 | 320 | return r.init() // recreates the closed channels 321 | } 322 | -------------------------------------------------------------------------------- /utils/udp_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "testing" 7 | 8 | //"github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | ) 11 | 12 | func TestUDPReceiver(t *testing.T) { 13 | addr := "[::1]" 14 | port, err := getFreeUDPPort() 15 | require.NoError(t, err) 16 | t.Logf("starting UDP receiver on %s:%d\n", addr, port) 17 | 18 | r, err := NewUDPReceiver(nil) 19 | require.NoError(t, err) 20 | 21 | require.NoError(t, r.Start(addr, port, nil)) 22 | sendMessage := func(msg string) error { 23 | conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", addr, port)) 24 | if err != nil { 25 | return err 26 | } 27 | defer conn.Close() 28 | _, err = conn.Write([]byte(msg)) 29 | return err 30 | } 31 | require.NoError(t, sendMessage("message")) 32 | t.Log("sending message\n") 33 | require.NoError(t, r.Stop()) 34 | } 35 | 36 | func TestUDPClose(t *testing.T) { 37 | addr := "[::1]" 38 | port, err := getFreeUDPPort() 39 | require.NoError(t, err) 40 | t.Logf("starting UDP receiver on %s:%d\n", addr, port) 41 | 42 | r, err := NewUDPReceiver(nil) 43 | require.NoError(t, err) 44 | require.NoError(t, r.Start(addr, port, nil)) 45 | require.NoError(t, r.Stop()) 46 | require.NoError(t, r.Start(addr, port, nil)) 47 | require.Error(t, r.Start(addr, port, nil)) 48 | require.NoError(t, r.Stop()) 49 | require.Error(t, r.Stop()) 50 | } 51 | 52 | func getFreeUDPPort() (int, error) { 53 | a, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") 54 | if err != nil { 55 | return 0, err 56 | } 57 | l, err := net.ListenUDP("udp", a) 58 | if err != nil { 59 | return 0, err 60 | } 61 | defer l.Close() 62 | return l.LocalAddr().(*net.UDPAddr).Port, nil 63 | } 64 | --------------------------------------------------------------------------------