├── .github
└── workflows
│ └── test.yml
├── .gitignore
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── Makefile
├── NOTICE
├── README.md
├── consumers
├── cli
│ ├── README.md
│ └── main.go
├── clickhouse
│ ├── README.md
│ ├── main.go
│ └── schema.sql
├── memsql
│ ├── README.md
│ ├── schema.sql
│ └── transform.py
└── spark
│ ├── .gitignore
│ ├── README.md
│ ├── build.sbt
│ ├── project
│ └── plugins.sbt
│ └── src
│ └── main
│ └── scala
│ └── com
│ └── oath
│ └── vdms
│ └── vflow
│ └── consumer
│ └── spark
│ ├── driver
│ └── IngestStream.scala
│ ├── model
│ └── IPFix.scala
│ └── util
│ ├── FieldMappings.scala
│ └── ParseDataUtil.scala
├── disc
└── disc.go
├── docker-compose.yml
├── docs
├── config.md
├── design.md
├── imgs
│ ├── architecture.gif
│ ├── clickhouse.jpeg
│ ├── clickhouse_s1.png
│ ├── clickhouse_s2.png
│ ├── grafana.gif
│ ├── stress.gif
│ ├── vflow.gif
│ ├── vflow_logo.png
│ └── vflow_memsql.jpeg
├── junos_integration.md
├── quick_start_kafka.md
└── quick_start_nsq.md
├── go.mod
├── go.sum
├── ipfix
├── decoder.go
├── decoder_test.go
├── doc.go
├── interpret.go
├── marshal.go
├── marshal_test.go
├── memcache.go
├── memcache_rpc.go
├── memcache_test.go
└── rfc5102_model.go
├── kubernetes
└── deploy.yaml
├── mirror
├── doc.go
├── ipv4.go
├── ipv6.go
├── mirror.go
├── mirror_test.go
└── udp.go
├── monitor
├── README.md
├── monitor.go
└── store
│ ├── doc.go
│ ├── influxdb.go
│ ├── store.go
│ └── tsdb.go
├── netflow
├── v5
│ ├── decoder.go
│ ├── decoder_test.go
│ ├── doc.go
│ └── marshal.go
└── v9
│ ├── decoder.go
│ ├── decoder_test.go
│ ├── doc.go
│ ├── marshal.go
│ └── memcache.go
├── packet
├── doc.go
├── ethernet.go
├── ethernet_test.go
├── icmp.go
├── network.go
├── network_test.go
├── packet.go
├── packet_test.go
├── transport.go
└── transport_test.go
├── producer
├── doc.go
├── nats.go
├── nsq.go
├── producer.go
├── producer_test.go
├── rawSocket.go
├── sarama.go
└── segmentio.go
├── reader
├── reader.go
└── reader_test.go
├── scripts
├── dockerStart.sh
├── dpkg
│ └── DEBIAN
│ │ ├── control
│ │ └── copyright
├── ipfix.elements
├── kafka.conf
├── prometheus.yml
├── rpmbuild
│ └── SPECS
│ │ └── vflow.spec
├── vflow.conf
├── vflow.logrotate
├── vflow.monit
├── vflow.service
└── vflow.supervisor
├── sflow
├── decoder.go
├── decoder_test.go
├── doc.go
├── flow_counter.go
└── flow_sample.go
├── stress
├── README.md
├── hammer
│ ├── doc.go
│ ├── hammer.go
│ ├── hammer_test.go
│ ├── ipfix_samples.go
│ └── sflow_samples.go
└── stress.go
└── vflow
├── ipfix.go
├── ipfix_test.go
├── ipfix_unix.go
├── ipfix_windows.go
├── netflow_v5.go
├── netflow_v9.go
├── options.go
├── sflow.go
├── sflow_unix.go
├── stats.go
└── vflow.go
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: "vflow"
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | paths-ignore:
8 | - "docs/**"
9 | - "**.md"
10 | pull_request:
11 | branches:
12 | - master
13 | paths-ignore:
14 | - "docs/**"
15 | - "**.md"
16 |
17 | jobs:
18 | test:
19 | name: Test
20 | runs-on: ubuntu-latest
21 | steps:
22 | - name: Set up Golang
23 | uses: actions/setup-go@v2
24 | with:
25 | go-version: ^1.22
26 |
27 | - name: Check out code
28 | uses: actions/checkout@v2
29 |
30 | - name: Test
31 | run: go test -v ./... -timeout 1m
32 | - name: Build
33 | run: make build
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | stress/
2 | vflow/vflow
3 | .DS_Store
4 |
5 | # VSCode
6 | .vs/
7 |
8 | # Intellij
9 | .idea/
10 | *.iml
11 | *.iws
12 | *~
13 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to vFlow
2 |
3 | vFlow is an open source project. We appreciate your help!
4 |
5 | - Fork the project on github.com.
6 | - Create a new branch.
7 | - Commit changes to the new branch.
8 | - Send a pull request.
9 |
10 | ## Filing issues
11 |
12 | Please make sure to answer the below questions:
13 |
14 | - what device or software are you using with vFlow?
15 | - what operating system are you using?
16 | - did you compile the code? if yes what Go version?
17 | - what release version are you using?
18 | - explain the details of the issue.
19 |
20 |
21 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # build vFlow in the first stage
2 | FROM golang:1.15.3 as builder
3 | WORKDIR /go/src/
4 |
5 | RUN mkdir -p github.com/EdgeCast/vflow
6 | ADD . github.com/EdgeCast/vflow
7 | WORKDIR /go/src/github.com/EdgeCast/vflow
8 | RUN make build
9 |
10 | # run vFlow within alpine in the second stage
11 | FROM alpine:latest
12 | COPY --from=builder /go/src/github.com/EdgeCast/vflow/vflow/vflow /usr/bin/
13 | COPY scripts/dockerStart.sh /dockerStart.sh
14 |
15 | EXPOSE 4739 6343 9996 4729 8081
16 |
17 | VOLUME /etc/vflow
18 |
19 | CMD sh /dockerStart.sh
20 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | VERSION= 0.9.0
2 | PACKAGES= $(shell find . -name '*.go' -print0 | xargs -0 -n1 dirname | sort --unique)
3 | LDFLAGS= -ldflags "-X main.version=${VERSION}"
4 | DEBPATH= scripts/dpkg
5 | RPMPATH= scripts/rpmbuild
6 | ARCH=`uname -m`
7 |
8 | default: test
9 |
10 | test:
11 | go test -v ./... -timeout 1m
12 |
13 | bench:
14 | go test -v ./... -bench=. -timeout 2m
15 |
16 | run: build
17 | cd vflow; ./vflow -sflow-workers 100 -ipfix-workers 100
18 |
19 | debug: build
20 | cd vflow; ./vflow -sflow-workers 100 -ipfix-workers 100 -verbose=true
21 |
22 | gctrace: build
23 | cd vflow; env GODEBUG=gctrace=1 ./vflow -sflow-workers 100 -ipfix-workers 100
24 |
25 | lint:
26 | golint ./...
27 |
28 | cyclo:
29 | gocyclo -over 15 $(PACKAGES)
30 |
31 | errcheck:
32 | errcheck ./...
33 |
34 | tools:
35 | go get github.com/golang/lint/golint
36 | go get github.com/kisielk/errcheck
37 | go get github.com/alecthomas/gocyclo
38 |
39 | build:
40 | cd vflow; CGO_ENABLED=0 go build $(LDFLAGS)
41 | cd stress; CGO_ENABLED=0 go build
42 |
43 | dpkg: build
44 | mkdir -p ${DEBPATH}/etc/init.d ${DEBPATH}/etc/logrotate.d
45 | mkdir -p ${DEBPATH}/etc/vflow ${DEBPATH}/usr/share/doc/vflow
46 | mkdir -p ${DEBPATH}/usr/bin ${DEBPATH}/usr/local/vflow
47 | sed -i 's/%VERSION%/${VERSION}/' ${DEBPATH}/DEBIAN/control
48 | cp vflow/vflow ${DEBPATH}/usr/bin/
49 | cp stress/stress ${DEBPATH}/usr/bin/vflow_stress
50 | cp scripts/vflow.service ${DEBPATH}/etc/init.d/vflow
51 | cp scripts/vflow.logrotate ${DEBPATH}/etc/logrotate.d/vflow
52 | cp scripts/vflow.conf ${DEBPATH}/etc/vflow/vflow.conf
53 | cp scripts/kafka.conf ${DEBPATH}/etc/vflow/mq.conf
54 | cp scripts/ipfix.elements ${DEBPATH}/etc/vflow/
55 | cp ${DEBPATH}/DEBIAN/copyright ${DEBPATH}/usr/share/doc/vflow/
56 | cp LICENSE ${DEBPATH}/usr/share/doc/vflow/license
57 | dpkg-deb -b ${DEBPATH}
58 | mv ${DEBPATH}.deb scripts/vflow-${VERSION}-${ARCH}.deb
59 | sed -i 's/${VERSION}/%VERSION%/' ${DEBPATH}/DEBIAN/control
60 |
61 | rpm: build
62 | sed -i 's/%VERSION%/${VERSION}/' ${RPMPATH}/SPECS/vflow.spec
63 | rm -rf ${RPMPATH}/SOURCES/
64 | mkdir ${RPMPATH}/SOURCES/
65 | cp vflow/vflow ${RPMPATH}/SOURCES/
66 | cp stress/stress ${RPMPATH}/SOURCES/vflow_stress
67 | cp scripts/vflow.conf ${RPMPATH}/SOURCES/
68 | cp scripts/vflow.service ${RPMPATH}/SOURCES/
69 | cp scripts/vflow.logrotate ${RPMPATH}/SOURCES/
70 | cp scripts/kafka.conf ${RPMPATH}/SOURCES/mq.conf
71 | cp scripts/ipfix.elements ${RPMPATH}/SOURCES/
72 | cp LICENSE ${RPMPATH}/SOURCES/license
73 | cp NOTICE ${RPMPATH}/SOURCES/notice
74 | apt-get install rpm
75 | rpmbuild -ba ${RPMPATH}/SPECS/vflow.spec --define "_topdir `pwd`/scripts/rpmbuild"
76 | sed -i 's/${VERSION}/%VERSION%/' ${RPMPATH}/SPECS/vflow.spec
77 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | ==============================================================
2 | vFlow
3 | Copyright (C) 2017 Verizon.
4 | ==============================================================
5 |
6 | This product includes software developed by
7 | Verizon (www.verizon.com; www.github.com/Verizon).
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 | ##
3 | [](https://github.com/EdgeCast/vflow/actions?query=workflow%3Avflow) [](https://goreportcard.com/report/github.com/EdgeCast/vflow) [](https://pkg.go.dev/github.com/EdgeCast/vflow)
4 |
5 | High-performance, scalable and reliable IPFIX, sFlow and Netflow collector (written in pure Golang).
6 |
7 | ## Features
8 | - IPFIX RFC7011 collector
9 | - sFLow v5 raw header / counters collector
10 | - Netflow v5 collector
11 | - Netflow v9 collector
12 | - Decoding sFlow raw header L2/L3/L4
13 | - Produce to Apache Kafka, NSQ, NATS
14 | - Replicate IPFIX and sFlow to 3rd party collector
15 | - Supports IPv4 and IPv6
16 | - Prometheus and RESTful APIs monitoring
17 |
18 | 
19 |
20 | ## Documentation
21 | - [Architecture](/docs/design.md).
22 | - [Configuration](/docs/config.md).
23 | - [Quick Start](/docs/quick_start_nsq.md).
24 | - [JUNOS Integration](/docs/junos_integration.md).
25 | - [Monitoring](/monitor/README.md).
26 | - [Stress / Load Generator](/stress/README.md).
27 | - [Kafka consumer examples](https://github.com/EdgeCast/vflow/tree/master/consumers).
28 |
29 | ## Decoded IPFIX data
30 | The IPFIX data decodes to JSON format and IDs are [IANA IPFIX element ID](http://www.iana.org/assignments/ipfix/ipfix.xhtml)
31 | ```json
32 | {"AgentID":"192.168.21.15","Header":{"Version":10,"Length":420,"ExportTime":1483484642,"SequenceNo":1434533677,"DomainID":32771},"DataSets":[[{"I":8,"V":"192.16.28.217"},{"I":12,"V":"180.10.210.240"},{"I":5,"V":2},{"I":4,"V":6},{"I":7,"V":443},{"I":11,"V":64381},{"I":32,"V":0},{"I":10,"V":811},{"I":58,"V":0},{"I":9,"V":24},{"I":13,"V":20},{"I":16,"V":4200000000},{"I":17,"V":27747},{"I":15,"V":"180.105.10.210"},{"I":6,"V":"0x10"},{"I":14,"V":1113},{"I":1,"V":22500},{"I":2,"V":15},{"I":52,"V":63},{"I":53,"V":63},{"I":152,"V":1483484581770},{"I":153,"V":1483484622384},{"I":136,"V":2},{"I":243,"V":0},{"I":245,"V":0}]]}
33 | ```
34 |
35 | ## Decoded sFlow data
36 | ```json
37 | {"Version":5,"IPVersion":1,"AgentSubID":5,"SequenceNo":37591,"SysUpTime":3287084017,"SamplesNo":1,"Samples":[{"SequenceNo":1530345639,"SourceID":0,"SamplingRate":4096,"SamplePool":1938456576,"Drops":0,"Input":536,"Output":728,"RecordsNo":3,"Records":{"ExtRouter":{"NextHop":"115.131.251.90","SrcMask":24,"DstMask":14},"ExtSwitch":{"SrcVlan":0,"SrcPriority":0,"DstVlan":0,"DstPriority":0},"RawHeader":{"L2":{"SrcMAC":"58:00:bb:e7:57:6f","DstMAC":"f4:a7:39:44:a8:27","Vlan":0,"EtherType":2048},"L3":{"Version":4,"TOS":0,"TotalLen":1452,"ID":13515,"Flags":0,"FragOff":0,"TTL":62,"Protocol":6,"Checksum":8564,"Src":"10.1.8.5","Dst":"161.140.24.181"},"L4":{"SrcPort":443,"DstPort":56521,"DataOffset":5,"Reserved":0,"Flags":16}}}}],"IPAddress":"192.168.10.0","ColTime": 1646157296}
38 | ```
39 | ## Decoded Netflow v5 data
40 | ``` json
41 | {"AgentID":"114.23.3.231","Header":{"Version":5,"Count":3,"SysUpTimeMSecs":51469784,"UNIXSecs":1544476581,"UNIXNSecs":0,"SeqNum":873873830,"EngType":0,"EngID":0,"SmpInt":1000},"Flows":[{"SrcAddr":"125.238.46.48","DstAddr":"114.23.236.96","NextHop":"114.23.3.231","Input":791,"Output":817,"PktCount":4,"L3Octets":1708,"StartTime":51402145,"EndTime":51433264,"SrcPort":49233,"DstPort":443,"Padding1":0,"TCPFlags":16,"ProtType":6,"Tos":0,"SrcAsNum":4771,"DstAsNum":56030,"SrcMask":20,"DstMask":22,"Padding2":0},{"SrcAddr":"125.238.46.48","DstAddr":"114.23.236.96","NextHop":"114.23.3.231","Input":791,"Output":817,"PktCount":1,"L3Octets":441,"StartTime":51425137,"EndTime":51425137,"SrcPort":49233,"DstPort":443,"Padding1":0,"TCPFlags":24,"ProtType":6,"Tos":0,"SrcAsNum":4771,"DstAsNum":56030,"SrcMask":20,"DstMask":22,"Padding2":0},{"SrcAddr":"210.5.53.48","DstAddr":"103.22.200.210","NextHop":"122.56.118.157","Input":564,"Output":802,"PktCount":1,"L3Octets":1500,"StartTime":51420072,"EndTime":51420072,"SrcPort":80,"DstPort":56108,"Padding1":0,"TCPFlags":16,"ProtType":6,"Tos":0,"SrcAsNum":56030,"DstAsNum":13335,"SrcMask":24,"DstMask":23,"Padding2":0}]}
42 | ```
43 | ## Decoded Netflow v9 data
44 | ```json
45 | {"AgentID":"10.81.70.56","Header":{"Version":9,"Count":1,"SysUpTime":357280,"UNIXSecs":1493918653,"SeqNum":14,"SrcID":87},"DataSets":[[{"I":1,"V":"0x00000050"},{"I":2,"V":"0x00000002"},{"I":4,"V":2},{"I":5,"V":192},{"I":6,"V":"0x00"},{"I":7,"V":0},{"I":8,"V":"10.81.70.56"},{"I":9,"V":0},{"I":10,"V":0},{"I":11,"V":0},{"I":12,"V":"224.0.0.22"},{"I":13,"V":0},{"I":14,"V":0},{"I":15,"V":"0.0.0.0"},{"I":16,"V":0},{"I":17,"V":0},{"I":21,"V":300044},{"I":22,"V":299144}]]}
46 | ```
47 |
48 | ## Supported platform
49 | - Linux
50 | - Windows
51 |
52 | ## Build
53 | Given that the Go Language compiler is installed, you can build it with:
54 | ```
55 | go get github.com/EdgeCast/vflow/vflow
56 | cd $GOPATH/src/github.com/EdgeCast/vflow
57 |
58 | make build
59 | or
60 | cd vflow; go build
61 | ```
62 | ## Installation
63 | You can download and install pre-built debian package as below ([RPM and Linux binary are available](https://github.com/EdgeCast/vflow/releases)).
64 |
65 | dpkg -i [vflow-0.9.0-x86_64.deb](https://github.com/EdgeCast/vflow/releases/download/v0.9.0/vflow-0.9.0-x86_64.deb)
66 |
67 | Once you installed you need to configure the below files, for more information check [configuration guide](/docs/config.md):
68 | ```
69 | /etc/vflow/vflow.conf
70 | /etc/vflow/mq.conf
71 | ```
72 | You can start the service by the below:
73 | ```
74 | service vflow start
75 | ```
76 |
77 | ## Kubernetes
78 | ```
79 | kubectl apply -f https://github.com/EdgeCast/vflow/blob/master/kubernetes/deploy.yaml
80 | ```
81 |
82 | ## Docker
83 | ```
84 | docker run -d -p 2181:2181 -p 9092:9092 spotify/kafka
85 | docker run -d -p 4739:4739 -p 4729:4729 -p 6343:6343 -p 8081:8081 -e VFLOW_KAFKA_BROKERS="172.17.0.1:9092" mehrdadrad/vflow
86 | ```
87 |
88 | ## License
89 | Licensed under the Apache License, Version 2.0 (the "License")
90 |
91 | ## Contribute
92 | Welcomes any kind of contribution, please follow the next steps:
93 |
94 | - Fork the project on github.com.
95 | - Create a new branch.
96 | - Commit changes to the new branch.
97 | - Send a pull request.
98 |
--------------------------------------------------------------------------------
/consumers/cli/README.md:
--------------------------------------------------------------------------------
1 | ## Command line Kafka consumer without any back-end.
2 | This is an example of the vFlow.IPFIX CLI consumer. it supports only destination IP address filtring by default but you can change the element ID number through CLI based on the IANA IPFIX element ID.
3 |
4 | ### Build
5 |
6 | ```
7 | go get -d ./...
8 | go build main.go
9 | ```
10 |
--------------------------------------------------------------------------------
/consumers/cli/main.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: main.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 05/25/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | // Package main is the command line vflow IPFIX consumer with simple filter feature
24 | package main
25 |
26 | import (
27 | "encoding/json"
28 | "flag"
29 | "log"
30 | "sync"
31 | "time"
32 |
33 | cluster "github.com/bsm/sarama-cluster"
34 | )
35 |
36 | type options struct {
37 | Broker string
38 | Topic string
39 | Id int
40 | Value string
41 | Debug bool
42 | Workers int
43 | }
44 |
45 | type dataField struct {
46 | I int
47 | V interface{}
48 | }
49 |
50 | type ipfix struct {
51 | AgentID string
52 | DataSets [][]dataField
53 | }
54 |
55 | var opts options
56 |
57 | func init() {
58 | flag.StringVar(&opts.Broker, "broker", "127.0.0.1:9092", "broker ipaddress:port")
59 | flag.StringVar(&opts.Topic, "topic", "vflow.ipfix", "kafka topic")
60 | flag.StringVar(&opts.Value, "value", "8.8.8.8", "element value - string")
61 | flag.BoolVar(&opts.Debug, "debug", false, "enabled/disabled debug")
62 | flag.IntVar(&opts.Id, "id", 12, "IPFIX element ID")
63 | flag.IntVar(&opts.Workers, "workers", 16, "workers number / partition number")
64 |
65 | flag.Parse()
66 | }
67 |
68 | func main() {
69 | var wg sync.WaitGroup
70 |
71 | config := cluster.NewConfig()
72 | config.Consumer.Return.Errors = true
73 | config.Group.Return.Notifications = true
74 |
75 | wg.Add(opts.Workers)
76 |
77 | for i := 0; i < opts.Workers; i++ {
78 | go func(ti int) {
79 | var objmap ipfix
80 |
81 | brokers := []string{opts.Broker}
82 | topics := []string{opts.Topic}
83 | consumer, err := cluster.NewConsumer(brokers, "mygroup", topics, config)
84 |
85 | if err != nil {
86 | panic(err)
87 | }
88 | defer consumer.Close()
89 |
90 | pCount := 0
91 | count := 0
92 | tik := time.Tick(10 * time.Second)
93 |
94 | for {
95 | select {
96 | case <-tik:
97 | if opts.Debug {
98 | log.Printf("partition GroupId#%d, rate=%d\n", ti, (count-pCount)/10)
99 | }
100 | pCount = count
101 | case msg, more := <-consumer.Messages():
102 | if more {
103 | if err := json.Unmarshal(msg.Value, &objmap); err != nil {
104 | log.Println(err)
105 | } else {
106 | for _, data := range objmap.DataSets {
107 | for _, dd := range data {
108 | if dd.I == opts.Id && dd.V == opts.Value {
109 | log.Printf("%#v\n", data)
110 | }
111 | }
112 | }
113 | }
114 |
115 | consumer.MarkOffset(msg, "")
116 | count++
117 | }
118 | }
119 | }
120 | }(i)
121 | }
122 |
123 | wg.Wait()
124 | }
125 |
--------------------------------------------------------------------------------
/consumers/clickhouse/README.md:
--------------------------------------------------------------------------------
1 | ## Clickhouse, Apache Kafka
2 | ClickHouse is an open source column-oriented database management system capable of real time generation of analytical data reports using SQL queries. ClickHouse's performance exceeds comparable column-oriented DBMS currently available on the market. It processes hundreds of millions to more than a billion rows and tens of gigabytes of data per single server per second. ClickHouse uses all available hardware to it's full potential to process each query as fast as possible. The peak processing performance for a single query (after decompression, only used columns) stands at more than 2 terabytes per second. (https://clickhouse.yandex/)
3 | 
4 | The below clickhouse setup needs a zookeeper server, replica server is optional.
5 |
6 | ### Configuration (/etc/clickhouse-server/config.xml)
7 | Configure at least a zookeeper host (replica server is optional)
8 |
9 | ```
10 |
11 |
12 | zk001
13 | 2181
14 |
15 | 1000
16 |
17 |
18 |
19 |
20 |
21 | 1
22 | false
23 |
24 | CLICKHOUSE_SRV1
25 | 9000
26 |
27 |
28 |
29 |
30 | ```
31 |
32 | ### Create Database
33 | ```
34 | CREATE DATABASE vflow
35 | ```
36 |
37 | ### Create Table
38 | ```
39 | CREATE TABLE vflow.samples
40 | (
41 | date Date,
42 | time DateTime,
43 | device String,
44 | src String,
45 | dst String,
46 | srcASN UInt64,
47 | dstASN UInt64,
48 | proto UInt8
49 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/', '1', date, (src, time), 8192);
50 | ```
51 | ### Build Kafka Consumer
52 | ```
53 | go get -d ./...
54 | go build main.go
55 | ```
56 |
57 | ### Benchmark Details
58 | I tried it with two clickhouse servers, one for ingest and one for query. they had below hardware information and you can see the below results based on the above database configuration.
59 |
60 | Hardware
61 | - CPU Intel Core Processor (Haswell, no TSX) cores = 8, 2.6GHz, x86_64
62 | - Memory 16G
63 | - Drive SSD in software RAID
64 |
65 | 
66 | 
67 |
--------------------------------------------------------------------------------
/consumers/clickhouse/main.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: main.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 06/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | // Package main is the vflow IPFIX consumer for the ClickHouse database (https://clickhouse.yandex)
24 | package main
25 |
26 | import (
27 | "database/sql"
28 | "encoding/json"
29 | "flag"
30 | "log"
31 | "sync"
32 | "time"
33 |
34 | "github.com/ClickHouse/clickhouse-go"
35 | cluster "github.com/bsm/sarama-cluster"
36 | )
37 |
38 | type options struct {
39 | Broker string
40 | Topic string
41 | Debug bool
42 | Workers int
43 | }
44 |
45 | type dataField struct {
46 | I int
47 | V interface{}
48 | }
49 |
50 | type ipfix struct {
51 | AgentID string
52 | DataSets [][]dataField
53 | }
54 |
55 | type dIPFIXSample struct {
56 | device string
57 | src string
58 | dst string
59 | srcASN uint64
60 | dstASN uint64
61 | proto uint8
62 | }
63 |
64 | var opts options
65 |
66 | func init() {
67 | flag.StringVar(&opts.Broker, "broker", "127.0.0.1:9092", "broker ipaddress:port")
68 | flag.StringVar(&opts.Topic, "topic", "vflow.ipfix", "kafka topic")
69 | flag.BoolVar(&opts.Debug, "debug", false, "enabled/disabled debug")
70 | flag.IntVar(&opts.Workers, "workers", 16, "workers number / partition number")
71 |
72 | flag.Parse()
73 | }
74 |
75 | func main() {
76 | var (
77 | wg sync.WaitGroup
78 | ch = make(chan ipfix, 10000)
79 | )
80 |
81 | config := cluster.NewConfig()
82 | config.Consumer.Return.Errors = true
83 | config.Group.Return.Notifications = true
84 |
85 | for i := 0; i < 5; i++ {
86 | go ingestClickHouse(ch)
87 | }
88 |
89 | wg.Add(opts.Workers)
90 |
91 | for i := 0; i < opts.Workers; i++ {
92 | go func(ti int) {
93 | brokers := []string{opts.Broker}
94 | topics := []string{opts.Topic}
95 | consumer, err := cluster.NewConsumer(brokers, "mygroup", topics, config)
96 |
97 | if err != nil {
98 | panic(err)
99 | }
100 | defer consumer.Close()
101 |
102 | pCount := 0
103 | count := 0
104 | tik := time.Tick(10 * time.Second)
105 |
106 | for {
107 | select {
108 | case <-tik:
109 | if opts.Debug {
110 | log.Printf("partition GroupId#%d, rate=%d\n", ti, (count-pCount)/10)
111 | }
112 | pCount = count
113 | case msg, more := <-consumer.Messages():
114 | objmap := ipfix{}
115 | if more {
116 | if err := json.Unmarshal(msg.Value, &objmap); err != nil {
117 | log.Println(err)
118 | } else {
119 | ch <- objmap
120 | }
121 | consumer.MarkOffset(msg, "")
122 | count++
123 | }
124 | }
125 | }
126 | }(i)
127 | }
128 |
129 | wg.Wait()
130 | }
131 |
132 | func ingestClickHouse(ch chan ipfix) {
133 | var sample ipfix
134 |
135 | connect, err := sql.Open("clickhouse", "tcp://127.0.0.1:9000?debug=false")
136 | if err != nil {
137 | log.Fatal(err)
138 | }
139 | if err := connect.Ping(); err != nil {
140 | if exception, ok := err.(*clickhouse.Exception); ok {
141 | log.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
142 | } else {
143 | log.Println(err)
144 | }
145 | return
146 | }
147 |
148 | for {
149 | tx, err := connect.Begin()
150 | if err != nil {
151 | log.Fatal(err)
152 | }
153 | stmt, err := tx.Prepare("INSERT INTO vflow.samples (date,time,device,src,dst,srcASN,dstASN, proto) VALUES (?, ?, ?, ?, ?, ?, ?, ?)")
154 | if err != nil {
155 | log.Fatal(err)
156 | }
157 |
158 | for i := 0; i < 10000; i++ {
159 |
160 | sample = <-ch
161 | for _, data := range sample.DataSets {
162 | s := dIPFIXSample{}
163 | for _, dd := range data {
164 | switch dd.I {
165 | case 8, 27:
166 | s.src = dd.V.(string)
167 | case 12, 28:
168 | s.dst = dd.V.(string)
169 | case 16:
170 | s.srcASN = uint64(dd.V.(float64))
171 | case 17:
172 | s.dstASN = uint64(dd.V.(float64))
173 | case 4:
174 | s.proto = uint8(dd.V.(float64))
175 | }
176 | }
177 | if _, err := stmt.Exec(
178 | time.Now(),
179 | time.Now(),
180 | sample.AgentID,
181 | s.src,
182 | s.dst,
183 | s.srcASN,
184 | s.dstASN,
185 | s.proto,
186 | ); err != nil {
187 | log.Fatal(err)
188 | }
189 |
190 | }
191 | }
192 |
193 | go func(tx *sql.Tx) {
194 | if err := tx.Commit(); err != nil {
195 | log.Fatal(err)
196 | }
197 | }(tx)
198 | }
199 | }
200 |
--------------------------------------------------------------------------------
/consumers/clickhouse/schema.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE samples
2 | (
3 | date Date,
4 | time DateTime,
5 | device String,
6 | src String,
7 | dst String,
8 | srcASN UInt64,
9 | dstASN UInt64,
10 | proto UInt8
11 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/1/', '1', date, (src, time), 8192);
12 |
--------------------------------------------------------------------------------
/consumers/memsql/README.md:
--------------------------------------------------------------------------------
1 | ## MemSQL Pipeline, Apache Kafka
2 | The memSQL v5.8 has built in Kafka consumer and you don't need to spend time for developing a consumer. you can create the below pipeline very quick. I tried it once the vFlow was producing 75k-100k message per seconds and the memSQL installed on a Dell G6 32 cores. It could be able to consume without lag. I put a shell script to clean up the database to having just 5-10 minutes of the flows.
3 |
4 | 
5 |
6 | ```
7 | memsql> select * from samples order by bytes desc limit 20;
8 | +----------------+-----------------+-----------------+--------+--------+-------+---------+---------+----------+--------+---------------------+
9 | | device | src | dst | srcASN | dstASN | proto | srcPort | dstPort | tcpFlags | bytes | datetime |
10 | +----------------+-----------------+-----------------+--------+--------+-------+---------+---------+----------+--------+---------------------+
11 | | 192.129.230.0 | 87.11.81.121 | 61.231.215.18 | 131780 | 21773 | 6 | 80 | 64670 | 0x10 | 342000 | 2017-04-27 22:05:55 |
12 | | 52.20.79.116 | 87.11.81.100 | 216.38.140.154 | 41171 | 7994 | 6 | 443 | 26798 | 0x18 | 283364 | 2017-04-27 22:06:00 |
13 | | 52.20.79.116 | 192.229.211.70 | 50.240.197.150 | 41171 | 33651 | 6 | 80 | 23397 | 0x10 | 216000 | 2017-04-27 22:05:55 |
14 | | 108.161.249.16 | 152.125.33.113 | 74.121.78.10 | 13768 | 9551 | 6 | 80 | 49217 | 0x18 | 196500 | 2017-04-27 22:05:59 |
15 | | 192.229.130.0 | 87.21.81.254 | 94.56.54.135 | 132780 | 21773 | 6 | 80 | 52853 | 0x18 | 165000 | 2017-04-27 22:05:55 |
16 | | 108.161.229.96 | 93.184.215.169 | 152.157.32.200 | 12768 | 11430 | 6 | 443 | 50488 | 0x18 | 86400 | 2017-04-27 22:06:01 |
17 | | 52.22.49.106 | 122.229.210.189 | 99.31.208.183 | 22171 | 8018 | 6 | 443 | 33059 | 0x18 | 73500 | 2017-04-27 22:05:55 |
18 | | 52.22.49.126 | 81.21.81.131 | 66.215.169.120 | 22171 | 20115 | 6 | 80 | 57468 | 0x10 | 66000 | 2017-04-27 22:05:59 |
19 | | 108.160.149.96 | 94.184.215.151 | 123.90.233.120 | 16768 | 14476 | 6 | 80 | 63905 | 0x18 | 65540 | 2017-04-27 22:05:57 |
20 | | 52.22.79.116 | 162.129.210.181 | 60.180.253.156 | 21271 | 31651 | 6 | 443 | 59652 | 0x18 | 64805 | 2017-04-27 22:06:00 |
21 | | 108.161.149.90 | 93.184.215.169 | 80.96.58.146 | 13868 | 22394 | 6 | 443 | 1151 | 0x18 | 59976 | 2017-04-27 22:05:54 |
22 | | 102.232.179.20 | 111.18.232.131 | 121.62.44.149 | 24658 | 4771 | 6 | 80 | 61076 | 0x10 | 59532 | 2017-04-27 22:05:54 |
23 | | 102.232.179.20 | 192.129.145.6 | 110.49.221.232 | 24658 | 4804 | 6 | 443 | 50002 | 0x10 | 58500 | 2017-04-27 22:05:55 |
24 | | 102.232.179.20 | 192.129.232.112 | 124.132.217.101 | 24658 | 43124 | 6 | 443 | 37686 | 0x10 | 57000 | 2017-04-27 22:06:00 |
25 | | 192.229.230.0 | 87.11.81.253 | 219.147.144.22 | 132380 | 2900 | 6 | 80 | 25202 | 0x18 | 56120 | 2017-04-27 22:05:58 |
26 | | 192.129.130.0 | 87.21.11.200 | 180.239.187.151 | 132380 | 8151 | 6 | 443 | 55062 | 0x18 | 52220 | 2017-04-27 22:05:59 |
27 | | 52.12.79.126 | 87.21.11.254 | 64.30.125.221 | 21071 | 14051 | 6 | 80 | 57072 | 0x10 | 51000 | 2017-04-27 22:05:54 |
28 | | 192.229.110.1 | 150.195.33.40 | 98.171.170.51 | 132980 | 28773 | 6 | 80 | 53270 | 0x18 | 51000 | 2017-04-27 22:05:57 |
29 | | 192.229.110.1 | 87.21.81.254 | 68.96.162.21 | 132980 | 28773 | 6 | 80 | 46727 | 0x18 | 49500 | 2017-04-27 22:06:01 |
30 | | 52.22.59.110 | 192.129.210.181 | 151.203.130.228 | 21271 | 12452 | 6 | 80 | 43720 | 0x18 | 49500 | 2017-04-27 22:05:55 |
31 | +----------------+-----------------+-----------------+--------+--------+-------+---------+---------+----------+--------+---------------------+
32 | 20 rows in set (0.06 sec)
33 | ```
34 |
--------------------------------------------------------------------------------
/consumers/memsql/schema.sql:
--------------------------------------------------------------------------------
1 | DROP DATABASE IF EXISTS vflow;
2 | CREATE DATABASE vflow;
3 | USE vflow;
4 |
5 | CREATE TABLE `samples` (
6 | `device` varchar(20) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
7 | `src` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
8 | `dst` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
9 | `nextHop` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
10 | `srcASN` int(11) UNSIGNED DEFAULT NULL,
11 | `dstASN` int(11) UNSIGNED DEFAULT NULL,
12 | `proto` int(11) DEFAULT NULL,
13 | `srcPort` int(11) DEFAULT NULL,
14 | `dstPort` int(11) DEFAULT NULL,
15 | `tcpFlags` varchar(10) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL,
16 | `ingressIf` int(11) DEFAULT NULL,
17 | `egressIf` int(11) DEFAULT NULL,
18 | `bytes` int(11) DEFAULT NULL,
19 | `datetime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
20 | /*!90618 , SHARD KEY () */
21 | )
22 |
--------------------------------------------------------------------------------
/consumers/memsql/transform.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #: ----------------------------------------------------------------------------
3 | #: Copyright (C) 2017 Verizon. All Rights Reserved.
4 | #: All Rights Reserved
5 | #:
6 | #: file: transform.py
7 | #: details: memsql pipline transform python script
8 | #: author: Mehrdad Arshad Rad
9 | #: date: 04/27/2017
10 | #:
11 | #: Licensed under the Apache License, Version 2.0 (the "License");
12 | #: you may not use this file except in compliance with the License.
13 | #: You may obtain a copy of the License at
14 | #:
15 | #: http://www.apache.org/licenses/LICENSE-2.0
16 | #:
17 | #: Unless required by applicable law or agreed to in writing, software
18 | #: distributed under the License is distributed on an "AS IS" BASIS,
19 | #: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20 | #: See the License for the specific language governing permissions and
21 | #: limitations under the License.
22 | #: ----------------------------------------------------------------------------
23 |
24 | import json
25 | import struct
26 | import sys
27 | import time
28 |
29 |
30 | def transform_records():
31 | while True:
32 | byte_len = sys.stdin.read(8)
33 | if len(byte_len) == 8:
34 | byte_len = struct.unpack("L", byte_len)[0]
35 | result = sys.stdin.read(byte_len)
36 | yield result
37 | else:
38 | assert len(byte_len) == 0, byte_len
39 | return
40 |
41 | for records in transform_records():
42 | flows = json.loads(records)
43 | exported_time = time.strftime('%Y-%m-%d %H:%M:%S',
44 | time.localtime(flows["Header"]["ExportTime"]))
45 |
46 | try:
47 | for flow in flows["DataSets"]:
48 | sourceIPAddress = "unknown"
49 | destinationIPAddress = "unknown"
50 | bgpSourceAsNumber = "unknown"
51 | bgpDestinationAsNumber = "unknown"
52 | protocolIdentifier = 0
53 | sourceTransportPort = 0
54 | destinationTransportPort = 0
55 | tcpControlBits = "unknown"
56 | ipNextHopIPAddress = "unknown"
57 | octetDeltaCount = 0
58 | ingressInterface = 0
59 | egressInterface = 0
60 |
61 | for field in flow:
62 | if field["I"] in [214]:
63 | raise
64 | elif field["I"] in [8, 27]:
65 | sourceIPAddress = field["V"]
66 | elif field["I"] in [12, 28]:
67 | destinationIPAddress = field["V"]
68 | elif field["I"] in [15, 62]:
69 | ipNextHopIPAddress = field["V"]
70 | elif field["I"] == 16:
71 | bgpSourceAsNumber = field["V"]
72 | elif field["I"] == 17:
73 | bgpDestinationAsNumber = field["V"]
74 | elif field["I"] == 14:
75 | ingressInterface = field["V"]
76 | elif field["I"] == 10:
77 | egressInterface = field["V"]
78 | elif field["I"] == 7:
79 | sourceTransportPort = field["V"]
80 | elif field["I"] == 11:
81 | destinationTransportPort = field["V"]
82 | elif field["I"] == 4:
83 | protocolIdentifier = field["V"]
84 | elif field["I"] == 6:
85 | tcpControlBits = field["V"]
86 | elif field["I"] == 1:
87 | octetDeltaCount = field["V"]
88 |
89 | out = b"%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" \
90 | % (
91 | flows["AgentID"],
92 | sourceIPAddress,
93 | destinationIPAddress,
94 | ipNextHopIPAddress,
95 | bgpSourceAsNumber,
96 | bgpDestinationAsNumber,
97 | protocolIdentifier,
98 | sourceTransportPort,
99 | destinationTransportPort,
100 | tcpControlBits,
101 | ingressInterface,
102 | egressInterface,
103 | octetDeltaCount,
104 | exported_time,
105 | )
106 |
107 | sys.stdout.write(out)
108 | except:
109 | continue
110 |
--------------------------------------------------------------------------------
/consumers/spark/.gitignore:
--------------------------------------------------------------------------------
1 | target
2 | .idea
--------------------------------------------------------------------------------
/consumers/spark/README.md:
--------------------------------------------------------------------------------
1 | Spark consumer for vFlow data has the following components. This document assume users have basic knowledge about spark and hadoop insfrastructure
2 |
3 | 1. Spark Component (Yarn or Standalone cluster. Works with Pseudo mode also)
4 | 2. HDFS & Hive Component
5 | 3. Presto (Optional)
6 |
7 | Vflow data from Kafka will be processed using the spark component and stored in HDFS component as Hive tables. Spark consumer is highly scalable and reliable. Consumer is tested against Spark 2.1. It uses scala 2.11.8. This consumer serves as the skeleton for processing vflow data using spark. Complex processing and analysis can be built on top of this code. With Presto (https://prestodb.io), billions of entries can be queried/joined in few minutes or seconds depending on the cluster configuration. Superset can be used for visualization (https://superset.incubator.apache.org). Consumer can also be easily modified to use other storage frameworks.
8 |
9 | # Build
10 | `sbt assembly`
11 |
12 | # Spark Submit
13 | `spark-submit --master --class com.oath.vdms.vflow.consumer.spark.driver.IngestStream vflow_spark_consumer-assembly-1.0.jar `
14 |
15 | # Example
16 | `spark-submit --master spark://master:7077 --driver-memory 8G --executor-memory 4G --executor-cores 2 --conf "spark.driver.extraJavaOptions=-Dspark.hadoop.dfs.replication=1" --class com.oath.vdms.vflow.consumer.spark.driver.IngestStream vflow_spark_consumer-assembly-1.0.jar vflow.ipfix localhost:9092 consumer-group ORC ipfix-table`
17 |
--------------------------------------------------------------------------------
/consumers/spark/build.sbt:
--------------------------------------------------------------------------------
1 | import AssemblyKeys._
2 |
3 | name := "vflow_spark_consumer"
4 |
5 | version := "1.0"
6 |
7 | scalaVersion := "2.11.8"
8 |
9 |
10 |
11 | libraryDependencies ++= Seq(
12 | "org.apache.spark" %% "spark-streaming-kafka-0-10" % "2.2.0",
13 | "org.apache.spark" %% "spark-core" % "2.1.0",
14 | "org.apache.spark" %% "spark-streaming" % "2.2.0",
15 | "org.apache.spark" %% "spark-sql" % "2.1.1"
16 | )
17 |
18 | assemblySettings
19 |
20 | mergeStrategy in assembly := {
21 | case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
22 | case m if m.toLowerCase.matches("meta-inf.*\\.sf$") => MergeStrategy.discard
23 | case "log4j.properties" => MergeStrategy.discard
24 | case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines
25 | case "reference.conf" => MergeStrategy.concat
26 | case _ => MergeStrategy.first
27 | }
--------------------------------------------------------------------------------
/consumers/spark/project/plugins.sbt:
--------------------------------------------------------------------------------
1 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.11.2")
--------------------------------------------------------------------------------
/consumers/spark/src/main/scala/com/oath/vdms/vflow/consumer/spark/driver/IngestStream.scala:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: IngestStream.scala
6 | //: details: vflow spark consumer
7 | //: author: Satheesh Ravi
8 | //: date: 09/01/2017
9 |
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 |
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package com.oath.vdms.vflow.consumer.spark.driver
24 |
25 |
26 | import com.oath.vdms.vflow.consumer.spark.model.IPFix
27 | import com.oath.vdms.vflow.consumer.spark.util.ParseDataUtil
28 | import org.apache.log4j.Logger
29 | import org.apache.spark.{SparkConf, SparkContext}
30 | import org.apache.spark.streaming.{Seconds, StreamingContext}
31 | import org.apache.kafka.common.serialization.StringDeserializer
32 | import org.apache.spark.streaming.kafka010._
33 | import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
34 | import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
35 | import org.apache.spark.sql.{SparkSession}
36 |
37 | /**
38 | * Created by sravi on 9/1/17.
39 | */
40 | object IngestStream {
41 | val appName = "Vflow Ingestion"
42 | val sparkConf = new SparkConf().setAppName(appName)
43 | val logger = Logger.getLogger(getClass.getName)
44 |
45 | def generateSparkSession(sparkConf: SparkConf) = {
46 | SparkSession
47 | .builder()
48 | .config(sparkConf)
49 | .enableHiveSupport()
50 | .getOrCreate()
51 | }
52 |
53 |
54 | def main(args: Array[String]): Unit = {
55 |
56 | //Initilize context and stream
57 | val sc = new SparkContext(sparkConf)
58 | val streamingContext = new StreamingContext(sc, Seconds(20))
59 | System.setProperty("spark.hadoop.dfs.replication", "1")
60 |
61 | val argsLen = args.length
62 |
63 | //Get valid number of arguments
64 | argsLen match {
65 | case 5 => {
66 | val topics = Array(args(0))
67 | val bootstrap = args(1)
68 | val groupID = args(2)
69 | val writeFormat: String = args(3)
70 | val tablename: String = args(4)
71 | val sparkSession = generateSparkSession(sparkConf)
72 | val kafkaParams = Map[String, Object](
73 | "bootstrap.servers" -> bootstrap,
74 | "key.deserializer" -> classOf[StringDeserializer],
75 | "value.deserializer" -> classOf[StringDeserializer],
76 | "group.id" -> groupID,
77 | "auto.offset.reset" -> "latest",
78 | "enable.auto.commit" -> (true: java.lang.Boolean)
79 | )
80 | val stream = KafkaUtils.createDirectStream[String, String](
81 | streamingContext,
82 | PreferConsistent,
83 | Subscribe[String, String](topics, kafkaParams)
84 | )
85 | import sparkSession.implicits._
86 | val tf = stream.flatMap(ParseDataUtil.parseRec)
87 | tf.foreachRDD(record => {
88 | record.toDS().write.format(writeFormat).mode(org.apache.spark.sql.SaveMode.Append).saveAsTable(tablename)
89 | })
90 | streamingContext.start()
91 | streamingContext.awaitTermination()
92 | }
93 | case _ => logger.error("Invalid Argument Count. Please give arguments in this order: topic bootstrap_server group_id format tablename")
94 | }
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/consumers/spark/src/main/scala/com/oath/vdms/vflow/consumer/spark/model/IPFix.scala:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: IPFix.scala
6 | //: details: vflow spark consumer
7 | //: author: Satheesh Ravi
8 | //: date: 09/01/2017
9 |
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 |
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 |
24 | package com.oath.vdms.vflow.consumer.spark.model
25 |
26 | import java.sql.Timestamp
27 |
28 | @SerialVersionUID(19900528L)
29 | case class IPFix(
30 | var exportTime: Timestamp,
31 | var sourceIPAddress: String,
32 | var destinationIPAddress: String,
33 | var ipNextHopIPAddress: String,
34 | var bgpSourceAsNumber: Long,
35 | var bgpDestinationAsNumber: Long,
36 | var ingressInterface: Long,
37 | var egressInterface: Long,
38 | var sourceTransportPort: Int,
39 | var destinationTransportPort: Int,
40 | var protocolIdentifier: Int,
41 | var tcpControlBits: String,
42 | var octetDeltaCount: Long) {
43 |
44 | import java.sql.Timestamp
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/consumers/spark/src/main/scala/com/oath/vdms/vflow/consumer/spark/util/FieldMappings.scala:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: FieldMappings.scala
6 | //: details: vflow spark consumer
7 | //: author: Satheesh Ravi
8 | //: date: 09/01/2017
9 |
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 |
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package com.oath.vdms.vflow.consumer.spark.util
24 |
25 | object FieldMappings {
26 | val indexMap = Map(
27 | 8 -> "sourceIPAddress",
28 | 27 -> "sourceIPAddress",
29 | 12 -> "destinationIPAddress",
30 | 28 -> "destinationIPAddress",
31 | 15 -> "ipNextHopIPAddress",
32 | 62 -> "ipNextHopIPAddress",
33 | 16 -> "bgpSourceAsNumber",
34 | 17 -> "bgpDestinationAsNumber",
35 | 14 -> "ingressInterface",
36 | 10 -> "egressInterface",
37 | 7 -> "sourceTransportPort",
38 | 11 -> "destinationTransportPort",
39 | 4 -> "protocolIdentifier",
40 | 6 -> "tcpControlBits",
41 | 1 -> "octetDeltaCount"
42 | )
43 | }
--------------------------------------------------------------------------------
/consumers/spark/src/main/scala/com/oath/vdms/vflow/consumer/spark/util/ParseDataUtil.scala:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ParseDataUtil.scala
6 | //: details: vflow spark consumer
7 | //: author: Satheesh Ravi
8 | //: date: 09/01/2017
9 |
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 |
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package com.oath.vdms.vflow.consumer.spark.util
24 |
25 | import org.apache.kafka.clients.consumer.ConsumerRecord
26 | import java.lang.reflect.Field
27 | import java.sql.Timestamp
28 |
29 | import com.oath.vdms.vflow.consumer.spark.model.IPFix
30 | import org.apache.log4j.Logger
31 |
32 | import scala.collection.immutable.{List, Map}
33 | import scala.util.Try
34 | import scala.util.parsing.json._
35 | import java.text.SimpleDateFormat
36 | import java.util.{Date, TimeZone}
37 |
38 | object ParseDataUtil {
39 |
40 | val logger = Logger.getLogger(getClass.getName)
41 |
42 |
43 | //Parse data from stream and convert it to json object
44 | def parseRec(record: ConsumerRecord[String, String]): Seq[IPFix] = {
45 | val resultMap = JSON.parseFull(record.value()).getOrElse(Map).asInstanceOf[Map[String, String]]
46 | println(resultMap.get("Header").toString)
47 | val epochTime:Long = Try(resultMap.get("Header").getOrElse(Map()).asInstanceOf[Map[String, _]].get("ExportTime").getOrElse(0).asInstanceOf[Double].toLong).getOrElse(0)
48 | println(epochTime)
49 | val time:Timestamp = generateTimeObj(epochTime)
50 | val dataSetList = resultMap.get("DataSets").getOrElse(List.empty).asInstanceOf[List[List[Map[String, _]]]]
51 | for (dataSet <- dataSetList) yield setFields(dataSet, time)
52 | }
53 |
54 | def generateTimeObj(epochTime: Long):Timestamp = {
55 | val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
56 | sdf.setTimeZone(TimeZone.getTimeZone("UTC"))
57 | Timestamp.valueOf(sdf.format(new Date((epochTime * 1000))))
58 | }
59 |
60 | //Iterate through the json to extract data and set it in the right object field
61 | def setFields(dateSet: List[Map[String, _]], time: Timestamp): IPFix = {
62 | val ipFix = new IPFix(time, "", "", "", 0, 0, 0, 0, 0, 0, 0, "", 0)
63 | val c: Class[_] = ipFix.getClass
64 | for (entry <- dateSet) {
65 | try {
66 | val index = entry.get("I").getOrElse(-1.00).asInstanceOf[Double].toInt
67 | val fieldName = FieldMappings.indexMap.getOrElse(index, "")
68 | fieldName match {
69 | case "" =>
70 | case _ => {
71 | val field: Field = c.getDeclaredField(FieldMappings.indexMap(index))
72 | field.setAccessible(true)
73 | field.getType.getName match {
74 | case "java.lang.String" => field.set(ipFix, entry.get("V").getOrElse("").toString)
75 | case "long" => field.set(ipFix, Try(entry.get("V").getOrElse(0L).asInstanceOf[Double].toLong).getOrElse(0))
76 | case "int" => field.set(ipFix, Try(entry.get("V").getOrElse(0).asInstanceOf[Double].toInt).getOrElse(0))
77 | case _ =>
78 | }
79 | }
80 | }
81 | }
82 | catch {
83 | case nse: NoSuchFieldException => logger.error("Unknown field " + entry.get("I").toString + "->" + entry.get("V").toString + " " + nse.toString)
84 | case nfe: NumberFormatException => logger.error("Unknown field " + entry.get("I").toString + "->" + entry.get("V").toString + " " + nfe.toString)
85 | case iae: IllegalArgumentException => logger.error("Unknown field " + entry.get("I").toString + "->" + entry.get("V").toString + " " + iae.toString)
86 | case cce: ClassCastException => logger.error("Unknown field " + entry.get("I").toString + "->" + entry.get("V").toString + " " + cce.toString)
87 | }
88 | }
89 | ipFix
90 | }
91 |
92 |
93 | }
94 |
--------------------------------------------------------------------------------
/disc/disc.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: disc.go
6 | //: details: discovery vFlow nodes by multicasting
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 04/17/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | // Package discovery handles finding vFlow nodes through multicasting
24 | package discovery
25 |
26 | import (
27 | "errors"
28 | "log"
29 | "net"
30 | "strconv"
31 | "sync"
32 | "time"
33 |
34 | "golang.org/x/net/ipv4"
35 | "golang.org/x/net/ipv6"
36 | )
37 |
38 | type vFlowServer struct {
39 | timestamp int64
40 | }
41 |
42 | // Discovery represents vflow discovery
43 | type Discovery struct {
44 | vFlowServers map[string]vFlowServer
45 | mu sync.RWMutex
46 | }
47 |
48 | var errMCInterfaceNotAvail = errors.New("multicast interface not available")
49 |
50 | // Run starts sending multicast hello packet
51 | func Run(ip, port string) error {
52 | tick := time.NewTicker(1 * time.Second)
53 |
54 | p, err := strconv.Atoi(port)
55 | if err != nil {
56 | return err
57 | }
58 |
59 | c, err := net.DialUDP("udp", nil, &net.UDPAddr{
60 | IP: net.ParseIP(ip),
61 | Port: p,
62 | })
63 |
64 | b := []byte("hello vflow")
65 |
66 | if err != nil {
67 | return err
68 | }
69 |
70 | for {
71 | <-tick.C
72 | c.Write(b)
73 | }
74 | }
75 |
76 | // Listen receives discovery hello packet
77 | func Listen(ip, port string) (*Discovery, error) {
78 | var (
79 | conn interface{}
80 | buff = make([]byte, 1500)
81 | disc = &Discovery{
82 | vFlowServers: make(map[string]vFlowServer, 10),
83 | }
84 | )
85 |
86 | c, err := net.ListenPacket("udp", net.JoinHostPort(
87 | ip,
88 | port,
89 | ))
90 |
91 | if err != nil {
92 | return nil, err
93 | }
94 |
95 | ifs, err := getMulticastIfs()
96 | if err != nil {
97 | return nil, err
98 | }
99 |
100 | if net.ParseIP(ip).To4() != nil {
101 | conn = ipv4.NewPacketConn(c)
102 | for _, i := range ifs {
103 | err = conn.(*ipv4.PacketConn).JoinGroup(
104 | &i,
105 | &net.UDPAddr{IP: net.ParseIP(ip)},
106 | )
107 | if err != nil {
108 | return nil, err
109 | }
110 | }
111 | } else {
112 | conn = ipv6.NewPacketConn(c)
113 | for _, i := range ifs {
114 | err = conn.(*ipv4.PacketConn).JoinGroup(
115 | &i,
116 | &net.UDPAddr{IP: net.ParseIP(ip)},
117 | )
118 | if err != nil {
119 | return nil, err
120 | }
121 | }
122 | }
123 |
124 | laddrs, err := getLocalIPs()
125 | if err != nil {
126 | log.Fatal(err)
127 | }
128 |
129 | go func() {
130 |
131 | var (
132 | addr net.Addr
133 | err error
134 | )
135 |
136 | for {
137 |
138 | if net.ParseIP(ip).To4() != nil {
139 | _, _, addr, err = conn.(*ipv4.PacketConn).ReadFrom(buff)
140 | } else {
141 | _, _, addr, err = conn.(*ipv6.PacketConn).ReadFrom(buff)
142 | }
143 |
144 | if err != nil {
145 | continue
146 | }
147 |
148 | host, _, err := net.SplitHostPort(addr.String())
149 | if err != nil {
150 | continue
151 | }
152 |
153 | if _, ok := laddrs[host]; ok {
154 | continue
155 | }
156 |
157 | disc.mu.Lock()
158 | disc.vFlowServers[host] = vFlowServer{time.Now().Unix()}
159 | disc.mu.Unlock()
160 |
161 | }
162 |
163 | }()
164 |
165 | return disc, nil
166 | }
167 |
168 | // Nodes returns a slice of available vFlow nodes
169 | func (d *Discovery) Nodes() []string {
170 | var servers []string
171 |
172 | now := time.Now().Unix()
173 |
174 | d.mu.Lock()
175 |
176 | for ip, server := range d.vFlowServers {
177 | if now-server.timestamp < 300 {
178 | servers = append(servers, ip)
179 | } else {
180 | delete(d.vFlowServers, ip)
181 | }
182 | }
183 |
184 | d.mu.Unlock()
185 |
186 | return servers
187 | }
188 |
189 | func getMulticastIfs() ([]net.Interface, error) {
190 | var out []net.Interface
191 |
192 | ifs, err := net.Interfaces()
193 | if err != nil {
194 | return nil, err
195 | }
196 |
197 | for _, i := range ifs {
198 | if i.Flags == 19 {
199 | out = append(out, i)
200 | }
201 | }
202 |
203 | if len(out) < 1 {
204 | return nil, errMCInterfaceNotAvail
205 | }
206 |
207 | return out, nil
208 | }
209 |
210 | func getLocalIPs() (map[string]struct{}, error) {
211 | ips := make(map[string]struct{})
212 |
213 | ifs, err := net.Interfaces()
214 | if err != nil {
215 | return nil, err
216 | }
217 |
218 | for _, i := range ifs {
219 | addrs, err := i.Addrs()
220 | if err != nil || i.Flags != 19 {
221 | continue
222 | }
223 | for _, addr := range addrs {
224 | ip, _, _ := net.ParseCIDR(addr.String())
225 | ips[ip.String()] = struct{}{}
226 | }
227 | }
228 |
229 | return ips, nil
230 | }
231 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | kafka:
4 | image: spotify/kafka
5 | hostname: kafka
6 | ports:
7 | - "2181:2181"
8 | - "9092:9092"
9 |
10 | vflow:
11 | image: mehrdadrad/vflow
12 | hostname: vflow
13 | entrypoint: /bin/sh -c "sleep 5 && vflow"
14 | depends_on:
15 | - kafka
16 | environment:
17 | - VFLOW_KAFKA_BROKERS=kafka:9092
18 | ports:
19 | - "4739:4739/udp"
20 | - "4729:4729/udp"
21 | - "6343:6343/udp"
22 | - "9996:9996/udp"
23 | - "8081:8081"
24 | prometheus:
25 | image: prom/prometheus:latest
26 | hostname: prom
27 | ports:
28 | - "9090:9090"
29 | container_name: prometheus
30 | volumes:
31 | - ./scripts:/etc/prometheus
32 | command:
33 | - "--config.file=/etc/prometheus/prometheus.yml"
34 |
--------------------------------------------------------------------------------
/docs/design.md:
--------------------------------------------------------------------------------
1 | # Overview
2 |
3 | The vFlow project is an enterprise IPFIX and sFlow collector. it produces the decoded samples to a message bus like Kafka
4 | or NSQ. The vFlow is high performance and scalable, It can be able to grow horizontally (each node can talk through RPC
5 | to find out any missed IPFIX template). there is cloning IPFIX UDP packet feature with spoofing in case you need to have
6 | the IPFIX raw data somewhere else.
7 |
8 | # Architecture
9 |
10 | 
11 |
12 | # Dynamic pool
13 |
14 | The number of worker processes can be changed at runtime automated based on the incoming load. the minimum workers can be able to configure then vFlow adjusts it at runtime gradually.
15 |
16 | # Discovery
17 |
18 | Each vFlow uses multicasting on all interfaces to discover nodes to communicate in regard to get any IPFIX new template from other nodes. The multicast IP address is 224.0.0.55 and each node sends hello packet every second. You do not need to enable multicast communications across routers.
19 |
20 | # Pluggable architecture
21 |
22 | The vFlow accepts message queue plugin. for the time being it has Kafka and NSQ plugins but you can write for a message queue like RabbitMQ quick and easy.
23 |
24 | # Hardware requirements
25 |
26 | |Load|IPFIX PPS|CPU|RAM|
27 | |----|---------|---|---|
28 | |low| < 1K |2-4|64M|
29 | |moderate| < 10K| 8+| 256M|
30 | |high| < 50K| 12+| 512M|
31 | |x-high| < 100K | 24+ | 1G|
32 |
--------------------------------------------------------------------------------
/docs/imgs/architecture.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/architecture.gif
--------------------------------------------------------------------------------
/docs/imgs/clickhouse.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/clickhouse.jpeg
--------------------------------------------------------------------------------
/docs/imgs/clickhouse_s1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/clickhouse_s1.png
--------------------------------------------------------------------------------
/docs/imgs/clickhouse_s2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/clickhouse_s2.png
--------------------------------------------------------------------------------
/docs/imgs/grafana.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/grafana.gif
--------------------------------------------------------------------------------
/docs/imgs/stress.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/stress.gif
--------------------------------------------------------------------------------
/docs/imgs/vflow.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/vflow.gif
--------------------------------------------------------------------------------
/docs/imgs/vflow_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/vflow_logo.png
--------------------------------------------------------------------------------
/docs/imgs/vflow_memsql.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Edgio/vflow/811977722a7ce0671f798f734fc0fb36714ac2ec/docs/imgs/vflow_memsql.jpeg
--------------------------------------------------------------------------------
/docs/junos_integration.md:
--------------------------------------------------------------------------------
1 | ### Juniper MX Series routers (MX5, MX10, MX40, MX80, MX104, MX120, MX240, MX480, MX960)
2 |
3 |
4 | Setting sampling on the interfaces like below:
5 | ```
6 | set interfaces xe-1/0/0.0 family inet sampling input
7 | set interfaces xe-1/0/0.0 family inet sampling output
8 | ```
9 |
10 | Create vflow template
11 | ```
12 | set services flow-monitoring version-ipfix template vflow flow-active-timeout 10
13 | set services flow-monitoring version-ipfix template vflow flow-inactive-timeout 10
14 | set services flow-monitoring version-ipfix template vflow template-refresh-rate packets 1000
15 | set services flow-monitoring version-ipfix template vflow template-refresh-rate seconds 10
16 | set services flow-monitoring version-ipfix template vflow option-refresh-rate packets 1000
17 | set services flow-monitoring version-ipfix template vflow option-refresh-rate seconds 10
18 | set services flow-monitoring version-ipfix template vflow ipv4-template
19 | ```
20 |
21 | ```
22 | set chassis fpc 0 sampling-instance vflow
23 | set chassis fpc 1 sampling-instance vflow
24 |
25 | set forwarding-options sampling instance ipfix input rate 100
26 | set forwarding-options sampling instance ipfix family inet output flow-server 192.168.0.10 port 4739
27 | set forwarding-options sampling instance ipfix family inet output flow-server 192.168.0.10 version-ipfix template vflow
28 | set forwarding-options sampling instance ipfix family inet output inline-jflow source-address 192.168.0.1
29 | ```
30 |
--------------------------------------------------------------------------------
/docs/quick_start_kafka.md:
--------------------------------------------------------------------------------
1 | # Install vFlow with Kafka - Linux
2 |
3 | ## Download and install vFlow
4 | ### Debian Package
5 | ```
6 | wget https://github.com/EdgeCast/vflow/releases/download/v0.4.1/vflow-0.4.1-amd64.deb
7 | dpkg -i vflow-0.4.1-amd64.deb
8 | ```
9 | ### RPM Package
10 | ```
11 | wget https://github.com/EdgeCast/vflow/releases/download/v0.4.1/vflow-0.4.1.amd64.rpm
12 | rpm -ivh vflow-0.4.1.amd64.rpm
13 | or
14 | yum localinstall vflow-0.4.1.amd64.rpm
15 | ```
16 | ## Download Kafka
17 | ```
18 | wget https://www.apache.org/dyn/closer.cgi?path=/kafka/0.11.0.0/kafka_2.11-0.11.0.0.tgz
19 | tar -xzf kafka_2.11-0.11.0.0.tgz
20 | cd kafka_2.11-0.11.0.0
21 | ```
22 | Kafka uses ZooKeeper so you need to first start a ZooKeeper server if already you don't have one
23 | ```
24 | bin/zookeeper-server-start.sh config/zookeeper.properties
25 | ```
26 | start the Kafka server
27 | ```
28 | bin/kafka-server-start.sh config/server.properties
29 | ```
30 | ## vFlow - start service
31 | ```
32 | service vflow start
33 | ```
34 |
35 | ## vFlow - load generator
36 | ```
37 | vflow_stress -sflow-rate-limit 1 0ipfix-rate-limit 1 &
38 | ```
39 |
40 | ## Consume IPFIX topic from NSQ
41 | ```
42 | bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic vflow.ipfix
43 | ```
44 |
--------------------------------------------------------------------------------
/docs/quick_start_nsq.md:
--------------------------------------------------------------------------------
1 | # Install vFlow with NSQ - Linux
2 |
3 | ## Download and install vFlow
4 | ### Debian Package
5 | ```
6 | wget https://github.com/EdgeCast/vflow/releases/download/v0.4.1/vflow-0.4.1-amd64.deb
7 | dpkg -i vflow-0.4.1-amd64.deb
8 | ```
9 | ### RPM Package
10 | ```
11 | wget https://github.com/EdgeCast/vflow/releases/download/v0.4.1/vflow-0.4.1.amd64.rpm
12 | rpm -ivh vflow-0.4.1.amd64.rpm
13 | or
14 | yum localinstall vflow-0.4.1.amd64.rpm
15 | ```
16 |
17 | ## Download NSQ
18 | ```
19 | wget https://s3.amazonaws.com/bitly-downloads/nsq/nsq-1.0.0-compat.linux-amd64.go1.8.tar.gz
20 | tar -xvf nsq-1.0.0-compat.linux-amd64.go1.8.tar.gz
21 | cp nsq-1.0.0-compat.linux-amd64.go1.8/bin/* /usr/bin
22 | ```
23 | ## NSQ - start service
24 |
25 | ```
26 | nsqd &
27 | ```
28 |
29 | ## vFlow - NSQ config
30 | ```
31 | echo "mq-name: nsq" >> /etc/vflow/vflow.conf
32 | ```
33 |
34 | ## vFlow - start service
35 | ```
36 | service vflow start
37 | ```
38 |
39 | ## vFlow - Load generator
40 | ```
41 | vflow_stress -sflow-rate-limit 1 0ipfix-rate-limit 1 &
42 | ```
43 | ## Consume IPFIX topic from NSQ
44 | ```
45 | nsq_tail --topic vflow.ipfix -nsqd-tcp-address localhost:4150
46 | ```
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/EdgeCast/vflow
2 |
3 | go 1.22
4 |
5 | require (
6 | github.com/ClickHouse/clickhouse-go v1.5.4
7 | github.com/Shopify/sarama v1.33.0
8 | github.com/bsm/sarama-cluster v2.1.15+incompatible
9 | github.com/nats-io/nats.go v1.15.0
10 | github.com/nsqio/go-nsq v1.1.0
11 | github.com/prometheus/client_golang v1.12.2
12 | github.com/segmentio/kafka-go v0.4.31
13 | golang.org/x/net v0.0.0-20220513224357-95641704303c
14 | gopkg.in/yaml.v2 v2.4.0
15 | )
16 |
17 | require (
18 | github.com/beorn7/perks v1.0.1 // indirect
19 | github.com/cespare/xxhash/v2 v2.1.2 // indirect
20 | github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
21 | github.com/davecgh/go-spew v1.1.1 // indirect
22 | github.com/eapache/go-resiliency v1.2.0 // indirect
23 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
24 | github.com/eapache/queue v1.1.0 // indirect
25 | github.com/golang/protobuf v1.5.2 // indirect
26 | github.com/golang/snappy v0.0.4 // indirect
27 | github.com/hashicorp/errwrap v1.0.0 // indirect
28 | github.com/hashicorp/go-multierror v1.1.1 // indirect
29 | github.com/hashicorp/go-uuid v1.0.2 // indirect
30 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect
31 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
32 | github.com/jcmturner/gofork v1.0.0 // indirect
33 | github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect
34 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect
35 | github.com/klauspost/compress v1.15.0 // indirect
36 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
37 | github.com/nats-io/nats-server/v2 v2.8.2 // indirect
38 | github.com/nats-io/nkeys v0.3.0 // indirect
39 | github.com/nats-io/nuid v1.0.1 // indirect
40 | github.com/onsi/ginkgo v1.16.5 // indirect
41 | github.com/onsi/gomega v1.19.0 // indirect
42 | github.com/pierrec/lz4 v2.6.1+incompatible // indirect
43 | github.com/pierrec/lz4/v4 v4.1.14 // indirect
44 | github.com/prometheus/client_model v0.2.0 // indirect
45 | github.com/prometheus/common v0.32.1 // indirect
46 | github.com/prometheus/procfs v0.7.3 // indirect
47 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
48 | golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
49 | golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
50 | google.golang.org/protobuf v1.26.0 // indirect
51 | )
52 |
--------------------------------------------------------------------------------
/ipfix/doc.go:
--------------------------------------------------------------------------------
1 | // Package ipfix decodes IPFIX packets
2 | package ipfix
3 |
--------------------------------------------------------------------------------
/ipfix/interpret.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ipfix.go
6 | //: details: Read IPFIX and Netflow v9 data fields based on the type
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package ipfix
24 |
25 | import (
26 | "encoding/binary"
27 | "math"
28 | "net"
29 | )
30 |
31 | // Interpret read data fields based on the type - big endian
32 | func Interpret(b *[]byte, t FieldType) interface{} {
33 | if len(*b) < t.minLen() {
34 | return *b
35 | }
36 |
37 | switch t {
38 | case Boolean:
39 | return (*b)[0] == 1
40 | case Uint8:
41 | return (*b)[0]
42 | case Uint16:
43 | return binary.BigEndian.Uint16(*b)
44 | case Uint32:
45 | return binary.BigEndian.Uint32(*b)
46 | case Uint64:
47 | return binary.BigEndian.Uint64(*b)
48 | case Int8:
49 | return int8((*b)[0])
50 | case Int16:
51 | return int16(binary.BigEndian.Uint16(*b))
52 | case Int32:
53 | return int32(binary.BigEndian.Uint32(*b))
54 | case Int64:
55 | return int64(binary.BigEndian.Uint64(*b))
56 | case Float32:
57 | return math.Float32frombits(binary.BigEndian.Uint32(*b))
58 | case Float64:
59 | return math.Float64frombits(binary.BigEndian.Uint64(*b))
60 | case MacAddress:
61 | return net.HardwareAddr(*b)
62 | case String:
63 | return string(*b)
64 | case Ipv4Address, Ipv6Address:
65 | return net.IP(*b)
66 | case DateTimeSeconds:
67 | return binary.BigEndian.Uint32(*b)
68 | case DateTimeMilliseconds, DateTimeMicroseconds, DateTimeNanoseconds:
69 | return binary.BigEndian.Uint64(*b)
70 | case Unknown, OctetArray:
71 | return *b
72 | }
73 | return *b
74 | }
75 |
76 | func (t FieldType) minLen() int {
77 | switch t {
78 | case Boolean:
79 | return 1
80 | case Uint8, Int8:
81 | return 1
82 | case Uint16, Int16:
83 | return 2
84 | case Uint32, Int32, Float32:
85 | return 4
86 | case DateTimeSeconds:
87 | return 4
88 | case Uint64, Int64, Float64:
89 | return 8
90 | case DateTimeMilliseconds, DateTimeMicroseconds, DateTimeNanoseconds:
91 | return 8
92 | case MacAddress:
93 | return 6
94 | case Ipv4Address:
95 | return 4
96 | case Ipv6Address:
97 | return 16
98 | default:
99 | return 0
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/ipfix/marshal.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: marshal.go
6 | //: details: encoding of each decoded IPFIX data sets
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package ipfix
24 |
25 | import (
26 | "bytes"
27 | "encoding/hex"
28 | "errors"
29 | "net"
30 | "strconv"
31 | )
32 |
33 | var errUknownMarshalDataType = errors.New("unknown data type to marshal")
34 |
35 | // JSONMarshal encodes IPFIX message
36 | func (m *Message) JSONMarshal(b *bytes.Buffer) ([]byte, error) {
37 | b.WriteString("{")
38 |
39 | // encode agent id
40 | m.encodeAgent(b)
41 |
42 | // encode header
43 | m.encodeHeader(b)
44 |
45 | // encode data sets
46 | if err := m.encodeDataSet(b); err != nil {
47 | return nil, err
48 | }
49 |
50 | b.WriteString("}")
51 |
52 | return b.Bytes(), nil
53 | }
54 |
55 | func (m *Message) encodeDataSet(b *bytes.Buffer) error {
56 | var (
57 | length int
58 | dsLength int
59 | err error
60 | )
61 |
62 | b.WriteString("\"DataSets\":")
63 | dsLength = len(m.DataSets)
64 |
65 | b.WriteByte('[')
66 |
67 | for i := range m.DataSets {
68 | length = len(m.DataSets[i])
69 |
70 | b.WriteByte('[')
71 | for j := range m.DataSets[i] {
72 | b.WriteString("{\"I\":")
73 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].ID), 10))
74 | b.WriteString(",\"V\":")
75 | err = m.writeValue(b, i, j)
76 |
77 | if m.DataSets[i][j].EnterpriseNo != 0 {
78 | b.WriteString(",\"E\":")
79 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].EnterpriseNo), 10))
80 | }
81 |
82 | if j < length-1 {
83 | b.WriteString("},")
84 | } else {
85 | b.WriteByte('}')
86 | }
87 | }
88 |
89 | if i < dsLength-1 {
90 | b.WriteString("],")
91 | } else {
92 | b.WriteByte(']')
93 | }
94 | }
95 |
96 | b.WriteByte(']')
97 |
98 | return err
99 | }
100 |
101 | func (m *Message) encodeDataSetFlat(b *bytes.Buffer) error {
102 | var (
103 | length int
104 | dsLength int
105 | err error
106 | )
107 |
108 | b.WriteString("\"DataSets\":")
109 | dsLength = len(m.DataSets)
110 |
111 | b.WriteByte('[')
112 |
113 | for i := range m.DataSets {
114 | length = len(m.DataSets[i])
115 |
116 | b.WriteByte('{')
117 | for j := range m.DataSets[i] {
118 | b.WriteByte('"')
119 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].ID), 10))
120 | b.WriteString("\":")
121 | err = m.writeValue(b, i, j)
122 |
123 | if j < length-1 {
124 | b.WriteByte(',')
125 | } else {
126 | b.WriteByte('}')
127 | }
128 | }
129 |
130 | if i < dsLength-1 {
131 | b.WriteString(",")
132 | }
133 | }
134 |
135 | b.WriteByte(']')
136 |
137 | return err
138 | }
139 |
140 | func (m *Message) encodeHeader(b *bytes.Buffer) {
141 | b.WriteString("\"Header\":{\"Version\":")
142 | b.WriteString(strconv.FormatInt(int64(m.Header.Version), 10))
143 | b.WriteString(",\"Length\":")
144 | b.WriteString(strconv.FormatInt(int64(m.Header.Length), 10))
145 | b.WriteString(",\"ExportTime\":")
146 | b.WriteString(strconv.FormatInt(int64(m.Header.ExportTime), 10))
147 | b.WriteString(",\"SequenceNo\":")
148 | b.WriteString(strconv.FormatInt(int64(m.Header.SequenceNo), 10))
149 | b.WriteString(",\"DomainID\":")
150 | b.WriteString(strconv.FormatInt(int64(m.Header.DomainID), 10))
151 | b.WriteString("},")
152 | }
153 |
154 | func (m *Message) encodeAgent(b *bytes.Buffer) {
155 | b.WriteString("\"AgentID\":\"")
156 | b.WriteString(m.AgentID)
157 | b.WriteString("\",")
158 | }
159 |
160 | func (m *Message) writeValue(b *bytes.Buffer, i, j int) error {
161 | switch m.DataSets[i][j].Value.(type) {
162 | case uint:
163 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint)), 10))
164 | case uint8:
165 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint8)), 10))
166 | case uint16:
167 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint16)), 10))
168 | case uint32:
169 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint32)), 10))
170 | case uint64:
171 | b.WriteString(strconv.FormatUint(m.DataSets[i][j].Value.(uint64), 10))
172 | case int:
173 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int)), 10))
174 | case int8:
175 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int8)), 10))
176 | case int16:
177 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int16)), 10))
178 | case int32:
179 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int32)), 10))
180 | case int64:
181 | b.WriteString(strconv.FormatInt(m.DataSets[i][j].Value.(int64), 10))
182 | case float32:
183 | b.WriteString(strconv.FormatFloat(float64(m.DataSets[i][j].Value.(float32)), 'E', -1, 32))
184 | case float64:
185 | b.WriteString(strconv.FormatFloat(m.DataSets[i][j].Value.(float64), 'E', -1, 64))
186 | case string:
187 | b.WriteByte('"')
188 | b.WriteString(m.DataSets[i][j].Value.(string))
189 | b.WriteByte('"')
190 | case net.IP:
191 | b.WriteByte('"')
192 | b.WriteString(m.DataSets[i][j].Value.(net.IP).String())
193 | b.WriteByte('"')
194 | case net.HardwareAddr:
195 | b.WriteByte('"')
196 | b.WriteString(m.DataSets[i][j].Value.(net.HardwareAddr).String())
197 | b.WriteByte('"')
198 | case []uint8:
199 | b.WriteByte('"')
200 | b.WriteString("0x" + hex.EncodeToString(m.DataSets[i][j].Value.([]uint8)))
201 | b.WriteByte('"')
202 | default:
203 | return errUknownMarshalDataType
204 | }
205 |
206 | return nil
207 | }
208 |
--------------------------------------------------------------------------------
/ipfix/marshal_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: marshal_test.go
6 | //: details: provides support for automated testing of marshal methods
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package ipfix
24 |
25 | import (
26 | "bytes"
27 | "encoding/json"
28 | "net"
29 | "testing"
30 | )
31 |
32 | type TestMessage struct {
33 | AgentID string
34 | Header MessageHeader
35 | DataSets [][]TestDecodedField
36 | }
37 |
38 | type TestDecodedField struct {
39 | I uint16
40 | V interface{}
41 | }
42 |
43 | var mockDecodedMsg = Message{
44 | AgentID: "10.10.10.10",
45 | Header: MessageHeader{
46 | Version: 10,
47 | Length: 420,
48 | ExportTime: 1483484756,
49 | SequenceNo: 2563920489,
50 | DomainID: 34560,
51 | },
52 | DataSets: [][]DecodedField{
53 | {
54 | {ID: 0x8, Value: net.IP{0x5b, 0x7d, 0x82, 0x79}},
55 | {ID: 0xc, Value: net.IP{0xc0, 0xe5, 0xdc, 0x85}},
56 | {ID: 0x5, Value: 0x0},
57 | {ID: 0x4, Value: 0x6},
58 | {ID: 0x7, Value: 0xecba},
59 | {ID: 0xb, Value: 0x1bb},
60 | {ID: 0x20, Value: 0x0},
61 | {ID: 0xa, Value: 0x503},
62 | {ID: 0x3a, Value: 0x0},
63 | {ID: 0x9, Value: 0x10},
64 | {ID: 0xd, Value: 0x18},
65 | {ID: 0x10, Value: 0x1ad7},
66 | {ID: 0x11, Value: 0x3b1d},
67 | {ID: 0xf, Value: net.IP{0xc0, 0x10, 0x1c, 0x58}},
68 | {ID: 0x6, Value: []uint8{0x10}},
69 | {ID: 0xe, Value: 0x4f6},
70 | {ID: 0x1, Value: 0x28},
71 | {ID: 0x2, Value: 0x1},
72 | {ID: 0x34, Value: 0x3a},
73 | {ID: 0x35, Value: 0x3a},
74 | {ID: 0x98, Value: 1483484685331},
75 | {ID: 0x99, Value: 1483484685331},
76 | {ID: 0x88, Value: 0x1},
77 | {ID: 0xf3, Value: 0x0},
78 | {ID: 0xf5, Value: 0x0},
79 | },
80 | },
81 | }
82 |
83 | func TestJSONMarshal(t *testing.T) {
84 | buf := new(bytes.Buffer)
85 | msg := TestMessage{}
86 |
87 | b, err := mockDecodedMsg.JSONMarshal(buf)
88 | if err != nil {
89 | t.Error("unexpected error", err)
90 | }
91 |
92 | err = json.Unmarshal(b, &msg)
93 | if err != nil {
94 | t.Error("unexpected error", err)
95 | }
96 | if msg.AgentID != "10.10.10.10" {
97 | t.Error("expect AgentID 10.10.10.10, got", msg.AgentID)
98 | }
99 | if msg.Header.Version != 10 {
100 | t.Error("expect Version 10, got", msg.Header.Version)
101 | }
102 | }
103 |
104 | func TestJSONMarshalDataSets(t *testing.T) {
105 | buf := new(bytes.Buffer)
106 | msg := TestMessage{}
107 |
108 | b, _ := mockDecodedMsg.JSONMarshal(buf)
109 | json.Unmarshal(b, &msg)
110 |
111 | for _, ds := range msg.DataSets {
112 | for _, f := range ds {
113 | switch f.I {
114 | case 1:
115 | chkFloat64(t, f, 40)
116 | case 2:
117 | chkFloat64(t, f, 1)
118 | case 4:
119 | chkFloat64(t, f, 6)
120 | case 5:
121 | chkFloat64(t, f, 0)
122 | case 6:
123 | chkString(t, f, "0x10")
124 | case 8:
125 | chkString(t, f, "91.125.130.121")
126 | case 12:
127 | chkString(t, f, "192.229.220.133")
128 | case 13:
129 | chkFloat64(t, f, 24)
130 | case 14:
131 | chkFloat64(t, f, 1270)
132 | case 152:
133 | chkFloat64(t, f, 1483484685331)
134 | }
135 | }
136 | }
137 | }
138 |
139 | func BenchmarkJSONMarshal(b *testing.B) {
140 | buf := new(bytes.Buffer)
141 |
142 | for i := 0; i < b.N; i++ {
143 | mockDecodedMsg.JSONMarshal(buf)
144 | }
145 |
146 | }
147 |
148 | func chkFloat64(t *testing.T, f TestDecodedField, expect float64) {
149 | if f.V.(float64) != expect {
150 | t.Errorf("expect ID %d value %f, got %f", f.I, expect, f.V)
151 | }
152 | }
153 |
154 | func chkString(t *testing.T, f TestDecodedField, expect string) {
155 | if f.V.(string) != expect {
156 | t.Errorf("expect ID %d value %s, got %s", f.I, expect, f.V.(string))
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/ipfix/memcache.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: memcache.go
6 | //: details: handles template caching in memory with sharding feature
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package ipfix
24 |
25 | import (
26 | "encoding/binary"
27 | "encoding/json"
28 | "hash/fnv"
29 | "io/ioutil"
30 | "net"
31 | "sort"
32 | "sync"
33 | "time"
34 | )
35 |
36 | var shardNo = 32
37 |
38 | // MemCache represents templates shards
39 | type MemCache []*TemplatesShard
40 |
41 | // Data represents template records and
42 | // updated timestamp
43 | type Data struct {
44 | Template TemplateRecord
45 | Timestamp int64
46 | }
47 |
48 | // TemplatesShard represents a shard
49 | type TemplatesShard struct {
50 | Templates map[uint32]Data
51 | sync.RWMutex
52 | }
53 | type memCacheDisk struct {
54 | Cache MemCache
55 | ShardNo int
56 | }
57 |
58 | // GetCache tries to load saved templates
59 | // otherwise it constructs new empty shards
60 | func GetCache(cacheFile string) MemCache {
61 | var (
62 | mem memCacheDisk
63 | err error
64 | )
65 |
66 | b, err := ioutil.ReadFile(cacheFile)
67 | if err == nil {
68 | err = json.Unmarshal(b, &mem)
69 | if err == nil && mem.ShardNo == shardNo {
70 | return mem.Cache
71 | }
72 | }
73 |
74 | m := make(MemCache, shardNo)
75 | for i := 0; i < shardNo; i++ {
76 | m[i] = &TemplatesShard{Templates: make(map[uint32]Data)}
77 | }
78 |
79 | return m
80 | }
81 |
82 | func (m MemCache) getShard(id uint16, addr net.IP) (*TemplatesShard, uint32) {
83 | b := make([]byte, 2)
84 | binary.BigEndian.PutUint16(b, id)
85 | key := append(addr, b...)
86 |
87 | hash := fnv.New32()
88 | hash.Write(key)
89 | hSum32 := hash.Sum32()
90 |
91 | return m[uint(hSum32)%uint(shardNo)], hSum32
92 | }
93 |
94 | func (m MemCache) insert(id uint16, addr net.IP, tr TemplateRecord) {
95 | shard, key := m.getShard(id, addr)
96 | shard.Lock()
97 | defer shard.Unlock()
98 | shard.Templates[key] = Data{tr, time.Now().Unix()}
99 | }
100 |
101 | func (m MemCache) retrieve(id uint16, addr net.IP) (TemplateRecord, bool) {
102 | shard, key := m.getShard(id, addr)
103 | shard.RLock()
104 | defer shard.RUnlock()
105 | v, ok := shard.Templates[key]
106 |
107 | return v.Template, ok
108 | }
109 |
110 | // Fill a slice with all known set ids. This is inefficient and is only used for error reporting or debugging.
111 | func (m MemCache) allSetIds() []int {
112 | num := 0
113 | for _, shard := range m {
114 | num += len(shard.Templates)
115 | }
116 | result := make([]int, 0, num)
117 | for _, shard := range m {
118 | shard.RLock()
119 | for _, set := range shard.Templates {
120 | result = append(result, int(set.Template.TemplateID))
121 | }
122 | shard.RUnlock()
123 | }
124 | sort.Ints(result)
125 | return result
126 | }
127 |
128 | // Dump saves the current templates to hard disk
129 | func (m MemCache) Dump(cacheFile string) error {
130 | b, err := json.Marshal(
131 | memCacheDisk{
132 | m,
133 | shardNo,
134 | },
135 | )
136 | if err != nil {
137 | return err
138 | }
139 |
140 | err = ioutil.WriteFile(cacheFile, b, 0644)
141 | if err != nil {
142 | return err
143 | }
144 |
145 | return nil
146 | }
147 |
--------------------------------------------------------------------------------
/ipfix/memcache_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: memcache_test.go
6 | //: details: memory template cache testing
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package ipfix
24 |
25 | import (
26 | "net"
27 | "reflect"
28 | "testing"
29 | )
30 |
31 | func TestMemCacheRetrieve(t *testing.T) {
32 | ip := net.ParseIP("127.0.0.1")
33 | mCache := GetCache("cache.file")
34 | d := NewDecoder(ip, tpl)
35 | d.Decode(mCache)
36 | v, ok := mCache.retrieve(256, ip)
37 | if !ok {
38 | t.Error("expected mCache retrieve status true, got", ok)
39 | }
40 | if v.TemplateID != 256 {
41 | t.Error("expected template id#:256, got", v.TemplateID)
42 | }
43 | }
44 |
45 | func TestMemCacheInsert(t *testing.T) {
46 | var tpl TemplateRecord
47 | ip := net.ParseIP("127.0.0.1")
48 | mCache := GetCache("cache.file")
49 |
50 | tpl.TemplateID = 310
51 | mCache.insert(310, ip, tpl)
52 |
53 | v, ok := mCache.retrieve(310, ip)
54 | if !ok {
55 | t.Error("expected mCache retrieve status true, got", ok)
56 | }
57 | if v.TemplateID != 310 {
58 | t.Error("expected template id#:310, got", v.TemplateID)
59 | }
60 | }
61 |
62 | func TestMemCacheAllSetIds(t *testing.T) {
63 | var tpl TemplateRecord
64 | ip := net.ParseIP("127.0.0.1")
65 | mCache := GetCache("cache.file")
66 |
67 | tpl.TemplateID = 310
68 | mCache.insert(tpl.TemplateID, ip, tpl)
69 | tpl.TemplateID = 410
70 | mCache.insert(tpl.TemplateID, ip, tpl)
71 | tpl.TemplateID = 210
72 | mCache.insert(tpl.TemplateID, ip, tpl)
73 |
74 | expected := []int{210, 310, 410}
75 | actual := mCache.allSetIds()
76 | if !reflect.DeepEqual(expected, actual) {
77 | t.Errorf("Expected set IDs %v, got %v", expected, actual)
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/kubernetes/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: vflow
5 | labels:
6 | app: vflow
7 | spec:
8 | ports:
9 | - name: ipfix
10 | protocol: UDP
11 | port: 4739
12 | - name: sflow
13 | protocol: UDP
14 | port: 6343
15 | - name: netflowv5
16 | protocol: UDP
17 | port: 9996
18 | - name: netflowv9
19 | protocol: UDP
20 | port: 4729
21 | - name: stats
22 | protocol: TCP
23 | port: 8081
24 | selector:
25 | app: vflow
26 | ---
27 | apiVersion: apps/v1
28 | kind: Deployment
29 | metadata:
30 | name: vflow-deployment
31 | spec:
32 | selector:
33 | matchLabels:
34 | app: vflow
35 | replicas: 3
36 | template:
37 | metadata:
38 | labels:
39 | app: vflow
40 | annotations:
41 | prometheus.io/scrape: "true"
42 | prometheus.io/port: "8081"
43 | spec:
44 | hostname: vflow
45 | affinity:
46 | podAntiAffinity:
47 | requiredDuringSchedulingIgnoredDuringExecution:
48 | - labelSelector:
49 | matchExpressions:
50 | - key: app
51 | operator: In
52 | values:
53 | - vflow
54 | topologyKey: "kubernetes.io/hostname"
55 | containers:
56 | - name: vflow
57 | image: mehrdadrad/vflow:latest
58 | imagePullPolicy: Always
59 | ports:
60 | - containerPort: 4739
61 | protocol: UDP
62 | - containerPort: 6343
63 | protocol: UDP
64 | - containerPort: 9996
65 | protocol: UDP
66 | - containerPort: 4729
67 | protocol: UDP
68 | - containerPort: 8081
69 | volumeMounts:
70 | - name: vflow-config-volume
71 | mountPath: /etc/vflow
72 | volumes:
73 | - name: vflow-config-volume
74 | configMap:
75 | defaultMode: 420
76 | name: vflow-config
77 | ---
78 | apiVersion: v1
79 | kind: ConfigMap
80 | metadata:
81 | name: vflow-config
82 | data:
83 | vflow.conf: |-
84 | mq-name: nsq
85 |
--------------------------------------------------------------------------------
/mirror/doc.go:
--------------------------------------------------------------------------------
1 | // Package mirror replicates the IPFIX packets with spoofing feature to 3rd party collector
2 | package mirror
3 |
--------------------------------------------------------------------------------
/mirror/ipv4.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ipv4.go
6 | //: details: mirror ipv4 handler
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package mirror
24 |
25 | import (
26 | "encoding/binary"
27 | "net"
28 | )
29 |
30 | // IPv4 represents the minimum IPV4 fields
31 | // which they need to setup.
32 | type IPv4 struct {
33 | Version uint8
34 | IHL uint8
35 | TOS uint8
36 | Length uint16
37 | TTL uint8
38 | Protocol uint8
39 | }
40 |
41 | // NewIPv4HeaderTpl constructs IPv4 header template
42 | func NewIPv4HeaderTpl(proto int) IPv4 {
43 | return IPv4{
44 | Version: 4,
45 | IHL: 5,
46 | TOS: 0,
47 | TTL: 64,
48 | Protocol: uint8(proto),
49 | }
50 | }
51 |
52 | // Marshal encodes the IPv4 packet
53 | func (ip IPv4) Marshal() []byte {
54 | b := make([]byte, IPv4HLen)
55 | b[0] = byte((ip.Version << 4) | ip.IHL)
56 | b[1] = byte(ip.TOS)
57 | binary.BigEndian.PutUint16(b[2:], ip.Length)
58 | b[6] = byte(0)
59 | b[7] = byte(0)
60 | b[8] = byte(ip.TTL)
61 | b[9] = byte(ip.Protocol)
62 |
63 | return b
64 | }
65 |
66 | // SetLen sets the IPv4 header length
67 | func (ip IPv4) SetLen(b []byte, n int) {
68 | binary.BigEndian.PutUint16(b[2:], IPv4HLen+uint16(n))
69 | }
70 |
71 | // SetAddrs sets the source and destination address
72 | func (ip IPv4) SetAddrs(b []byte, src, dst net.IP) {
73 | copy(b[12:16], src[12:16])
74 | copy(b[16:20], dst[12:16])
75 | }
76 |
--------------------------------------------------------------------------------
/mirror/ipv6.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ipv6.go
6 | //: details: mirror ipv6 handler
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package mirror
24 |
25 | import (
26 | "encoding/binary"
27 | "net"
28 | )
29 |
30 | // IPv6 represents IP version 6 header
31 | type IPv6 struct {
32 | Version uint8
33 | TrafficClass uint8
34 | FlowLabel uint32
35 | PayloadLength uint16
36 | NextHeader uint8
37 | HopLimit uint8
38 | }
39 |
40 | // NewIPv6HeaderTpl returns a new IPv6 as template
41 | func NewIPv6HeaderTpl(proto int) IPv6 {
42 | return IPv6{
43 | Version: 6,
44 | TrafficClass: 0,
45 | FlowLabel: 0,
46 | NextHeader: uint8(proto),
47 | HopLimit: 64,
48 | }
49 | }
50 |
51 | // Marshal returns encoded IPv6
52 | func (ip IPv6) Marshal() []byte {
53 | b := make([]byte, IPv6HLen)
54 | b[0] = byte((ip.Version << 4) | (ip.TrafficClass >> 4))
55 | b[1] = byte((ip.TrafficClass << 4) | uint8(ip.FlowLabel>>16))
56 | binary.BigEndian.PutUint16(b[2:], uint16(ip.FlowLabel))
57 | b[6] = byte(ip.NextHeader)
58 | b[7] = byte(ip.HopLimit)
59 |
60 | return b
61 | }
62 |
63 | // SetLen sets IPv6 length
64 | func (ip IPv6) SetLen(b []byte, n int) {
65 | binary.BigEndian.PutUint16(b[4:], IPv6HLen+uint16(n))
66 | }
67 |
68 | // SetAddrs sets IPv6 src and dst addresses
69 | func (ip IPv6) SetAddrs(b []byte, src, dst net.IP) {
70 | copy(b[8:], src)
71 | copy(b[24:], dst)
72 | }
73 |
--------------------------------------------------------------------------------
/mirror/mirror.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: mirror.go
6 | //: details: mirror replicates IPFIX UDP packets to 3rd party collector
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package mirror
24 |
25 | import (
26 | "net"
27 | "syscall"
28 | )
29 |
30 | const (
31 | // IPv4HLen is IP version 4 header length
32 | IPv4HLen = 20
33 |
34 | // IPv6HLen is IP version 6 header length
35 | IPv6HLen = 40
36 |
37 | // UDPHLen is UDP header length
38 | UDPHLen = 8
39 |
40 | // UDPProto is UDP protocol IANA number
41 | UDPProto = 17
42 | )
43 |
44 | // Conn represents socket connection properties
45 | type Conn struct {
46 | family int
47 | sotype int
48 | proto int
49 | fd int
50 | raddr syscall.Sockaddr
51 | }
52 |
53 | // IP is network layer corresponding to IPv4/IPv6
54 | type IP interface {
55 | Marshal() []byte
56 | SetLen([]byte, int)
57 | SetAddrs([]byte, net.IP, net.IP)
58 | }
59 |
60 | // NewRawConn constructs new raw socket
61 | func NewRawConn(raddr net.IP) (Conn, error) {
62 | var err error
63 | conn := Conn{
64 | sotype: syscall.SOCK_RAW,
65 | proto: syscall.IPPROTO_RAW,
66 | }
67 |
68 | if ipv4 := raddr.To4(); ipv4 != nil {
69 | ip := [4]byte{}
70 | copy(ip[:], ipv4)
71 |
72 | conn.family = syscall.AF_INET
73 | conn.raddr = &syscall.SockaddrInet4{
74 | Port: 0,
75 | Addr: ip,
76 | }
77 | } else if ipv6 := raddr.To16(); ipv6 != nil {
78 | ip := [16]byte{}
79 | copy(ip[:], ipv6)
80 |
81 | conn.family = syscall.AF_INET6
82 | conn.raddr = &syscall.SockaddrInet6{
83 | Addr: ip,
84 | }
85 |
86 | }
87 |
88 | conn.fd, err = syscall.Socket(conn.family, conn.sotype, conn.proto)
89 |
90 | return conn, err
91 | }
92 |
93 | // Send tries to put the bytes to wire
94 | func (c *Conn) Send(b []byte) error {
95 | return syscall.Sendto(c.fd, b, 0, c.raddr)
96 | }
97 |
98 | // Close releases file descriptor
99 | func (c *Conn) Close(b []byte) error {
100 | return syscall.Close(c.fd)
101 | }
102 |
--------------------------------------------------------------------------------
/mirror/mirror_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: mirror_test.go
6 | //: details: provides support for automated testing of mirror methods
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package mirror
24 |
25 | import (
26 | "net"
27 | "strings"
28 | "syscall"
29 | "testing"
30 |
31 | "golang.org/x/net/ipv4"
32 | "golang.org/x/net/ipv6"
33 | )
34 |
35 | func TestNewRawConn(t *testing.T) {
36 | ip := net.ParseIP("127.0.0.1")
37 | c, err := NewRawConn(ip)
38 | if err != nil {
39 | if strings.Contains(err.Error(), "not permitted") {
40 | t.Log(err)
41 | return
42 | }
43 | t.Error("unexpected error", err)
44 | }
45 | if c.family != syscall.AF_INET {
46 | t.Error("expected family# 2, got", c.family)
47 | }
48 | }
49 |
50 | func TestNewRawConnIPv6(t *testing.T) {
51 | ip := net.ParseIP("2001:0db8:0000:0000:0000:ff00:0042:8329")
52 | c, err := NewRawConn(ip)
53 | if err != nil {
54 | if strings.Contains(err.Error(), "not permitted") {
55 | t.Log(err)
56 | return
57 | }
58 | t.Error("unexpected error", err)
59 | }
60 | if c.family != syscall.AF_INET6 {
61 | t.Error("expected family# 10, got", c.family)
62 | }
63 | }
64 |
65 | func TestIPv4Header(t *testing.T) {
66 | ipv4RawHeader := NewIPv4HeaderTpl(17)
67 | b := ipv4RawHeader.Marshal()
68 | h, err := ipv4.ParseHeader(b)
69 | if err != nil {
70 | t.Error("unexpected error", err)
71 | }
72 | if h.Version != 4 {
73 | t.Error("expect version: 4, got", h.Version)
74 | }
75 | if h.Protocol != 17 {
76 | t.Error("expect protocol: 17, got", h.Protocol)
77 | }
78 | if h.TTL != 64 {
79 | t.Error("expect TTL: 64, got", h.TTL)
80 | }
81 | if h.Len != 20 {
82 | t.Error("expect Len: 20, got", h.Len)
83 | }
84 | if h.Checksum != 0 {
85 | t.Error("expect Checksum: 0, got", h.Checksum)
86 | }
87 | }
88 |
89 | func TestIPv6Header(t *testing.T) {
90 | ipv6RawHeader := NewIPv6HeaderTpl(17)
91 | b := ipv6RawHeader.Marshal()
92 | h, err := ipv6.ParseHeader(b)
93 | if err != nil {
94 | t.Error("unexpected error", err)
95 | }
96 | if h.Version != 6 {
97 | t.Error("expect version: 4, got", h.Version)
98 | }
99 | if h.NextHeader != 17 {
100 | t.Error("expect protocol: 17, got", h.NextHeader)
101 | }
102 | if h.HopLimit != 64 {
103 | t.Error("expect TTL: 64, got", h.HopLimit)
104 | }
105 | }
106 |
107 | func TestSetAddrs(t *testing.T) {
108 | src := net.ParseIP("10.11.12.13")
109 | dst := net.ParseIP("192.17.11.1")
110 | ipv4RawHeader := NewIPv4HeaderTpl(17)
111 | b := ipv4RawHeader.Marshal()
112 | ipv4RawHeader.SetAddrs(b, src, dst)
113 | h, err := ipv4.ParseHeader(b)
114 | if err != nil {
115 | t.Error("unexpected error", err)
116 | }
117 |
118 | if h.Src.String() != "10.11.12.13" {
119 | t.Error("expect src 10.11.12.13, got", h.Src.String())
120 | }
121 | if h.Dst.String() != "192.17.11.1" {
122 | t.Error("expect dst 192.17.11.1, got", h.Src.String())
123 | }
124 | }
125 |
126 | func TestSetLen(t *testing.T) {
127 | ipv4RawHeader := NewIPv4HeaderTpl(17)
128 | b := ipv4RawHeader.Marshal()
129 | ipv4RawHeader.SetLen(b, 15)
130 | h, err := ipv4.ParseHeader(b)
131 | if err != nil {
132 | t.Error("unexpected error", err)
133 | }
134 |
135 | if h.TotalLen != 35 {
136 | t.Error("expect total len 35, got", h.TotalLen)
137 | }
138 | }
139 |
--------------------------------------------------------------------------------
/mirror/udp.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: udp.go
6 | //: details: mirror udp handler
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package mirror
24 |
25 | import "encoding/binary"
26 |
27 | // UDP represents UDP header
28 | type UDP struct {
29 | SrcPort int
30 | DstPort int
31 | Length int
32 | Checksum int
33 | }
34 |
35 | // Marshal returns decoded UDP
36 | func (u *UDP) Marshal() []byte {
37 | b := make([]byte, UDPHLen)
38 |
39 | binary.BigEndian.PutUint16(b[0:], uint16(u.SrcPort))
40 | binary.BigEndian.PutUint16(b[2:], uint16(u.DstPort))
41 | binary.BigEndian.PutUint16(b[4:], uint16(UDPHLen+u.Length))
42 | binary.BigEndian.PutUint16(b[6:], uint16(u.Checksum))
43 |
44 | return b
45 | }
46 |
47 | // SetLen sets the payload length
48 | func (u *UDP) SetLen(b []byte, n int) {
49 | binary.BigEndian.PutUint16(b[4:], uint16(UDPHLen+n))
50 | }
51 |
52 | // SetChecksum calculates and sets IPv6 checksum
53 | func (u *UDP) SetChecksum() {
54 | // TODO
55 | }
56 |
--------------------------------------------------------------------------------
/monitor/monitor.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: monitor.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package main
24 |
25 | import (
26 | "flag"
27 | "log"
28 | "os"
29 |
30 | "github.com/EdgeCast/vflow/monitor/store"
31 | )
32 |
33 | type options struct {
34 | DBType string
35 | VFlowHost string
36 | InfluxDBAPI string
37 | InfluxDBName string
38 | TSDBAPI string
39 | Hostname string
40 | }
41 |
42 | var opts = options{
43 | DBType: "influxdb",
44 | VFlowHost: "http://localhost:8081",
45 | InfluxDBAPI: "http://localhost:8086",
46 | TSDBAPI: "http://localhost:4242",
47 | InfluxDBName: "vflow",
48 | Hostname: "na",
49 | }
50 |
51 | func init() {
52 |
53 | flag.StringVar(&opts.DBType, "db-type", opts.DBType, "database type name to ingest")
54 | flag.StringVar(&opts.VFlowHost, "vflow-host", opts.VFlowHost, "vflow host address and port")
55 | flag.StringVar(&opts.InfluxDBAPI, "influxdb-api-addr", opts.InfluxDBAPI, "influxdb api address")
56 | flag.StringVar(&opts.InfluxDBName, "influxdb-db-name", opts.InfluxDBName, "influxdb database name")
57 | flag.StringVar(&opts.TSDBAPI, "tsdb-api-addr", opts.TSDBAPI, "tsdb api address")
58 | flag.StringVar(&opts.Hostname, "hostname", opts.Hostname, "overwrite hostname")
59 |
60 | flag.Parse()
61 | }
62 |
63 | func main() {
64 | var m = make(map[string]store.Monitor)
65 | var err error
66 |
67 | m["influxdb"] = store.InfluxDB{
68 | API: opts.InfluxDBAPI,
69 | DB: opts.InfluxDBName,
70 | VHost: opts.VFlowHost,
71 | }
72 |
73 | m["tsdb"] = store.TSDB{
74 | API: opts.TSDBAPI,
75 | VHost: opts.VFlowHost,
76 | }
77 |
78 | if _, ok := m[opts.DBType]; !ok {
79 | log.Fatalf("the storage: %s is not available", opts.DBType)
80 | }
81 |
82 | if opts.Hostname == "na" {
83 | if opts.Hostname, err = os.Hostname(); err != nil {
84 | log.Println("unknown hostname")
85 | }
86 | }
87 |
88 | if err := m[opts.DBType].Netflow(opts.Hostname); err != nil {
89 | log.Println(err)
90 | }
91 | if err := m[opts.DBType].System(opts.Hostname); err != nil {
92 | log.Println(err)
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/monitor/store/doc.go:
--------------------------------------------------------------------------------
1 | // Package store ingest monitoring time series data points to different back-ends
2 | package store
3 |
--------------------------------------------------------------------------------
/monitor/store/influxdb.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: influxdb.go
6 | //: details: influx ingest handler
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package store
24 |
25 | import (
26 | "errors"
27 | "fmt"
28 | "os"
29 | )
30 |
31 | // InfluxDB represents InfluxDB backend
32 | type InfluxDB struct {
33 | API string
34 | DB string
35 | VHost string
36 | }
37 |
38 | // Netflow ingests flow's stats to InfluxDB
39 | func (i InfluxDB) Netflow(hostname string) error {
40 | flow, lastFlow, err := getFlow(i.VHost, hostname)
41 | if err != nil {
42 | return err
43 | }
44 |
45 | delta := flow.Timestamp - lastFlow.Timestamp
46 |
47 | value := abs((flow.IPFIX.UDPCount - lastFlow.IPFIX.UDPCount) / delta)
48 | query := fmt.Sprintf("udp.rate,type=ipfix,host=%s value=%d\n", hostname, value)
49 | value = abs((flow.SFlow.UDPCount - lastFlow.SFlow.UDPCount) / delta)
50 | query += fmt.Sprintf("udp.rate,type=sflow,host=%s value=%d\n", hostname, value)
51 | value = abs((flow.NetflowV5.UDPCount - lastFlow.NetflowV5.UDPCount) / delta)
52 | query += fmt.Sprintf("udp.rate,type=netflowv5,host=%s value=%d\n", hostname, value)
53 | value = abs((flow.NetflowV9.UDPCount - lastFlow.NetflowV9.UDPCount) / delta)
54 | query += fmt.Sprintf("udp.rate,type=netflowv9,host=%s value=%d\n", hostname, value)
55 |
56 | value = abs((flow.IPFIX.DecodedCount - lastFlow.IPFIX.DecodedCount) / delta)
57 | query += fmt.Sprintf("decode.rate,type=ipfix,host=%s value=%d\n", hostname, value)
58 | value = abs((flow.SFlow.DecodedCount - lastFlow.SFlow.DecodedCount) / delta)
59 | query += fmt.Sprintf("decode.rate,type=sflow,host=%s value=%d\n", hostname, value)
60 | value = abs((flow.NetflowV5.DecodedCount - lastFlow.NetflowV5.DecodedCount) / delta)
61 | query += fmt.Sprintf("decode.rate,type=netflowv5,host=%s value=%d\n", hostname, value)
62 | value = abs((flow.NetflowV9.DecodedCount - lastFlow.NetflowV9.DecodedCount) / delta)
63 | query += fmt.Sprintf("decode.rate,type=netflowv9,host=%s value=%d\n", hostname, value)
64 |
65 | value = abs((flow.IPFIX.MQErrorCount - lastFlow.IPFIX.MQErrorCount) / delta)
66 | query += fmt.Sprintf("mq.error.rate,type=ipfix,host=%s value=%d\n", hostname, value)
67 | value = abs((flow.SFlow.MQErrorCount - lastFlow.SFlow.MQErrorCount) / delta)
68 | query += fmt.Sprintf("mq.error.rate,type=sflow,host=%s value=%d\n", hostname, value)
69 | value = abs((flow.NetflowV5.MQErrorCount - lastFlow.NetflowV5.MQErrorCount) / delta)
70 | query += fmt.Sprintf("mq.error.rate,type=netflowv5,host=%s value=%d\n", hostname, value)
71 | value = abs((flow.NetflowV9.MQErrorCount - lastFlow.NetflowV9.MQErrorCount) / delta)
72 | query += fmt.Sprintf("mq.error.rate,type=netflowv9,host=%s value=%d\n", hostname, value)
73 |
74 | query += fmt.Sprintf("workers,type=ipfix,host=%s value=%d\n", hostname, flow.IPFIX.Workers)
75 | query += fmt.Sprintf("workers,type=sflow,host=%s value=%d\n", hostname, flow.SFlow.Workers)
76 | query += fmt.Sprintf("workers,type=netflowv5,host=%s value=%d\n", hostname, flow.NetflowV5.Workers)
77 | query += fmt.Sprintf("workers,type=netflowv9,host=%s value=%d\n", hostname, flow.NetflowV9.Workers)
78 |
79 | query += fmt.Sprintf("udp.queue,type=ipfix,host=%s value=%d\n", hostname, flow.IPFIX.UDPQueue)
80 | query += fmt.Sprintf("udp.queue,type=sflow,host=%s value=%d\n", hostname, flow.SFlow.UDPQueue)
81 | query += fmt.Sprintf("udp.queue,type=netflowv5,host=%s value=%d\n", hostname, flow.NetflowV5.UDPQueue)
82 | query += fmt.Sprintf("udp.queue,type=netflowv9,host=%s value=%d\n", hostname, flow.NetflowV9.UDPQueue)
83 |
84 | query += fmt.Sprintf("mq.queue,type=ipfix,host=%s value=%d\n", hostname, flow.IPFIX.MessageQueue)
85 | query += fmt.Sprintf("mq.queue,type=sflow,host=%s value=%d\n", hostname, flow.SFlow.MessageQueue)
86 | query += fmt.Sprintf("mq.queue,type=netflowv5,host=%s value=%d\n", hostname, flow.NetflowV5.MessageQueue)
87 | query += fmt.Sprintf("mq.queue,type=netflowv9,host=%s value=%d\n", hostname, flow.NetflowV9.MessageQueue)
88 |
89 | query += fmt.Sprintf("udp.mirror.queue,type=ipfix,host=%s value=%d\n", hostname, flow.IPFIX.UDPMirrorQueue)
90 |
91 | api := fmt.Sprintf("%s/write?db=%s", i.API, i.DB)
92 | client := NewHTTP()
93 | b, err := client.Post(api, "text/plain", query)
94 | if err != nil {
95 | return err
96 | }
97 |
98 | if len(b) > 0 {
99 | return errors.New("influxdb error: " + string(b))
100 | }
101 |
102 | return nil
103 | }
104 |
105 | // System ingests system's stats to InfluxDB
106 | func (i InfluxDB) System(hostname string) error {
107 | sys := new(Sys)
108 | client := NewHTTP()
109 | err := client.Get(i.VHost+"/sys", sys)
110 | if err != nil {
111 | return err
112 | }
113 |
114 | query := fmt.Sprintf("mem.heap.alloc,host=%s value=%d\n", hostname, sys.MemHeapAlloc)
115 | query += fmt.Sprintf("mem.alloc,host=%s value=%d\n", hostname, sys.MemAlloc)
116 | query += fmt.Sprintf("mcache.inuse,host=%s value=%d\n", hostname, sys.MCacheInuse)
117 | query += fmt.Sprintf("mem.total.alloc,host=%s value=%d\n", hostname, sys.MemTotalAlloc)
118 | query += fmt.Sprintf("mem.heap.sys,host=%s value=%d\n", hostname, sys.MemHeapSys)
119 | query += fmt.Sprintf("num.goroutine,host=%s value=%d\n", hostname, sys.NumGoroutine)
120 |
121 | api := fmt.Sprintf("%s/write?db=%s", i.API, i.DB)
122 | b, err := client.Post(api, "text/plain", query)
123 | if err != nil {
124 | return err
125 | }
126 |
127 | if len(b) > 0 {
128 | return errors.New("influxdb error: " + string(b))
129 | }
130 |
131 | return nil
132 | }
133 |
134 | func abs(a int64) int64 {
135 | if a < 0 {
136 | os.Exit(1)
137 | }
138 |
139 | return a
140 | }
141 |
--------------------------------------------------------------------------------
/monitor/store/store.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: store.go
6 | //: details: interface to other store back-end
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package store
24 |
25 | import (
26 | "bytes"
27 | "encoding/json"
28 | "io/ioutil"
29 | "net/http"
30 | "os"
31 | "time"
32 | )
33 |
34 | // Monitor is an interface to store system
35 | // and netflow statistics
36 | type Monitor interface {
37 | System(string) error
38 | Netflow(string) error
39 | }
40 |
41 | // IPFIX represents IPFIX metrics
42 | type IPFIX struct {
43 | UDPQueue int64
44 | UDPMirrorQueue int64
45 | MessageQueue int64
46 | UDPCount int64
47 | DecodedCount int64
48 | MQErrorCount int64
49 | Workers int64
50 | }
51 |
52 | // SFlow represents SFlow metrics
53 | type SFlow struct {
54 | UDPQueue int64
55 | MessageQueue int64
56 | UDPCount int64
57 | DecodedCount int64
58 | MQErrorCount int64
59 | Workers int64
60 | }
61 |
62 | // NetflowV5 represents Netflow v5 metrics
63 | type NetflowV5 struct {
64 | UDPQueue int64
65 | MessageQueue int64
66 | UDPCount int64
67 | DecodedCount int64
68 | MQErrorCount int64
69 | Workers int64
70 | }
71 |
72 | // NetflowV9 represents Netflow v9 metrics
73 | type NetflowV9 struct {
74 | UDPQueue int64
75 | MessageQueue int64
76 | UDPCount int64
77 | DecodedCount int64
78 | MQErrorCount int64
79 | Workers int64
80 | }
81 |
82 | // Flow represents flow (IPFIX+sFlow) metrics
83 | type Flow struct {
84 | StartTime int64
85 | Timestamp int64
86 | IPFIX IPFIX
87 | SFlow SFlow
88 | NetflowV5 NetflowV5
89 | NetflowV9 NetflowV9
90 | }
91 |
92 | // Sys represents system/go-runtime statistics
93 | type Sys struct {
94 | MemHeapAlloc int64
95 | MemAlloc int64
96 | MCacheInuse int64
97 | GCNext int64
98 | MemTotalAlloc int64
99 | GCSys int64
100 | MemHeapSys int64
101 | NumGoroutine int64
102 | NumLogicalCPU int64
103 | MemHeapReleased int64
104 | }
105 |
106 | // Client represents HTTP client
107 | type Client struct {
108 | client *http.Client
109 | }
110 |
111 | // NewHTTP constructs HTTP client
112 | func NewHTTP() *Client {
113 | return &Client{
114 | client: new(http.Client),
115 | }
116 | }
117 |
118 | // Get tries to get metrics through HTTP w/ get method
119 | func (c *Client) Get(url string, s interface{}) error {
120 |
121 | resp, err := c.client.Get(url)
122 | if err != nil {
123 | return err
124 | }
125 | defer resp.Body.Close()
126 |
127 | body, err := ioutil.ReadAll(resp.Body)
128 | if err != nil {
129 | return err
130 | }
131 |
132 | err = json.Unmarshal(body, &s)
133 | if err != nil {
134 | return err
135 | }
136 |
137 | return nil
138 | }
139 |
140 | // Post tries to digest metrics through HTTP w/ post method
141 | func (c *Client) Post(url string, cType, query string) ([]byte, error) {
142 | resp, err := c.client.Post(url, cType, bytes.NewBufferString(query))
143 | if err != nil {
144 | return nil, err
145 | }
146 |
147 | defer resp.Body.Close()
148 |
149 | body, err := ioutil.ReadAll(resp.Body)
150 | if err != nil {
151 | return nil, err
152 | }
153 |
154 | return body, nil
155 | }
156 |
157 | func getFlow(vhost, host string) (*Flow, *Flow, error) {
158 | lastFlowFile := "/tmp/vflow.mon.lastflow." + host
159 |
160 | flow := new(Flow)
161 | lastFlow := new(Flow)
162 |
163 | client := NewHTTP()
164 | err := client.Get(vhost+"/flow", flow)
165 | if err != nil {
166 | return nil, nil, err
167 | }
168 |
169 | flow.Timestamp = time.Now().Unix()
170 |
171 | b, err := ioutil.ReadFile(lastFlowFile)
172 | if err != nil {
173 | b, _ = json.Marshal(flow)
174 | ioutil.WriteFile(lastFlowFile, b, 0644)
175 | return nil, nil, err
176 | }
177 |
178 | err = json.Unmarshal(b, &lastFlow)
179 | if err != nil {
180 | return nil, nil, err
181 | }
182 |
183 | b, err = json.Marshal(flow)
184 | if err != nil {
185 | return nil, nil, err
186 | }
187 |
188 | ioutil.WriteFile(lastFlowFile, b, 0644)
189 |
190 | // once the vFlow restarted
191 | if flow.StartTime != lastFlow.StartTime {
192 | os.Exit(1)
193 | }
194 |
195 | return flow, lastFlow, err
196 | }
197 |
--------------------------------------------------------------------------------
/monitor/store/tsdb.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: tsdb.go
6 | //: details: TSDB insget handler
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package store
24 |
25 | import (
26 | "encoding/json"
27 | "errors"
28 | "fmt"
29 | "time"
30 | )
31 |
32 | // TSDB represents TSDB ingestion
33 | type TSDB struct {
34 | API string
35 | VHost string
36 | }
37 |
38 | // TSDBDataPoint represents single TSDB data point
39 | type TSDBDataPoint struct {
40 | Metric string `json:"metric"`
41 | Timestamp int64 `json:"timestamp"`
42 | Value int64 `json:"value"`
43 | Tags struct {
44 | Host string `json:"host"`
45 | Type string `json:"type"`
46 | }
47 | }
48 |
49 | // TSDBResp represents TSDP response
50 | type TSDBResp struct {
51 | Failed int `json:"failed"`
52 | Success int `json:"success"`
53 | }
54 |
55 | // Netflow ingests flow's stats to TSDB
56 | func (t TSDB) Netflow(hostname string) error {
57 | var (
58 | dps []TSDBDataPoint
59 | values []int64
60 | )
61 |
62 | flow, lastFlow, err := getFlow(t.VHost, hostname)
63 | if err != nil {
64 | return err
65 | }
66 |
67 | delta := flow.Timestamp - lastFlow.Timestamp
68 |
69 | metrics := [][]string{
70 | {"ipfix", "udp.rate"},
71 | {"sflow", "udp.rate"},
72 | {"ipfix", "decode.rate"},
73 | {"sflow", "decode.rate"},
74 | {"ipfix", "mq.error.rate"},
75 | {"sflow", "mq.error.rate"},
76 | {"ipfix", "workers"},
77 | {"sflow", "workers"},
78 | }
79 |
80 | values = append(values, abs((flow.IPFIX.UDPCount-lastFlow.IPFIX.UDPCount)/delta))
81 | values = append(values, abs((flow.SFlow.UDPCount-lastFlow.SFlow.UDPCount)/delta))
82 | values = append(values, abs((flow.IPFIX.DecodedCount-lastFlow.IPFIX.DecodedCount)/delta))
83 | values = append(values, abs((flow.SFlow.DecodedCount-lastFlow.SFlow.DecodedCount)/delta))
84 | values = append(values, abs((flow.IPFIX.MQErrorCount-lastFlow.IPFIX.MQErrorCount)/delta))
85 | values = append(values, abs((flow.SFlow.MQErrorCount-lastFlow.SFlow.MQErrorCount)/delta))
86 | values = append(values, flow.IPFIX.Workers)
87 | values = append(values, flow.SFlow.Workers)
88 |
89 | for i, m := range metrics {
90 | dps = append(dps, TSDBDataPoint{
91 | Metric: m[1],
92 | Timestamp: time.Now().Unix(),
93 | Value: values[i],
94 | Tags: struct {
95 | Host string `json:"host"`
96 | Type string `json:"type"`
97 | }{
98 | Host: hostname,
99 | Type: m[0],
100 | },
101 | })
102 |
103 | }
104 |
105 | err = t.put(dps)
106 |
107 | return err
108 | }
109 |
110 | // System ingests system's stats to TSDB
111 | func (t TSDB) System(hostname string) error {
112 | var dps []TSDBDataPoint
113 |
114 | sys := new(Sys)
115 | client := NewHTTP()
116 | err := client.Get(t.VHost+"/sys", sys)
117 | if err != nil {
118 | return err
119 | }
120 |
121 | metrics := []string{
122 | "mem.heap.alloc",
123 | "mem.alloc",
124 | "mcache.inuse",
125 | "mem.total.alloc",
126 | "mem.heap.sys",
127 | "num.goroutine",
128 | }
129 |
130 | values := []int64{
131 | sys.MemHeapAlloc,
132 | sys.MemAlloc,
133 | sys.MCacheInuse,
134 | sys.MemTotalAlloc,
135 | sys.MemHeapSys,
136 | sys.NumGoroutine,
137 | }
138 |
139 | for i, m := range metrics {
140 | dps = append(dps, TSDBDataPoint{
141 | Metric: m,
142 | Timestamp: time.Now().Unix(),
143 | Value: values[i],
144 | Tags: struct {
145 | Host string `json:"host"`
146 | Type string `json:"type"`
147 | }{
148 | Host: hostname,
149 | },
150 | })
151 |
152 | }
153 |
154 | err = t.put(dps)
155 |
156 | return err
157 | }
158 |
159 | func (t TSDB) put(dps []TSDBDataPoint) error {
160 | b, err := json.Marshal(dps)
161 | if err != nil {
162 | return err
163 | }
164 |
165 | api := fmt.Sprintf("%s/api/put", t.API)
166 | client := NewHTTP()
167 | b, err = client.Post(api, "application/json", string(b))
168 | if err != nil {
169 | return err
170 | }
171 |
172 | resp := TSDBResp{}
173 | json.Unmarshal(b, resp)
174 |
175 | if resp.Failed > 0 {
176 | return errors.New("TSDB error")
177 | }
178 |
179 | return nil
180 | }
181 |
--------------------------------------------------------------------------------
/netflow/v5/doc.go:
--------------------------------------------------------------------------------
1 | // Package netflow5 decodes netflow version v5 packets
2 | package netflow5
3 |
--------------------------------------------------------------------------------
/netflow/v5/marshal.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: marshal.go
6 | //: details: encoding of each decoded netflow v5 flow set
7 | //: author: Christopher Noel
8 | //: date: 12/10/2018
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package netflow5
24 |
25 | import (
26 | "bytes"
27 | "encoding/binary"
28 | "errors"
29 | "net"
30 | "strconv"
31 | )
32 |
33 | var errUknownMarshalDataType = errors.New("unknown data type to marshal")
34 |
35 | // JSONMarshal encodes netflow v9 message
36 | func (m *Message) JSONMarshal(b *bytes.Buffer) ([]byte, error) {
37 | b.WriteString("{")
38 |
39 | // encode agent id
40 | m.encodeAgent(b)
41 |
42 | // encode header
43 | m.encodeHeader(b)
44 |
45 | // encode flows
46 | // encode data sets
47 | if err := m.encodeFlows(b); err != nil {
48 | return nil, err
49 | }
50 |
51 | b.WriteString("}")
52 |
53 | return b.Bytes(), nil
54 | }
55 |
56 | func (m *Message) encodeHeader(b *bytes.Buffer) {
57 | b.WriteString("\"Header\":{\"Version\":")
58 | b.WriteString(strconv.FormatInt(int64(m.Header.Version), 10))
59 | b.WriteString(",\"Count\":")
60 | b.WriteString(strconv.FormatInt(int64(m.Header.Count), 10))
61 | b.WriteString(",\"SysUpTimeMSecs\":")
62 | b.WriteString(strconv.FormatInt(int64(m.Header.SysUpTimeMSecs), 10))
63 | b.WriteString(",\"UNIXSecs\":")
64 | b.WriteString(strconv.FormatInt(int64(m.Header.UNIXSecs), 10))
65 | b.WriteString(",\"UNIXNSecs\":")
66 | b.WriteString(strconv.FormatInt(int64(m.Header.UNIXNSecs), 10))
67 | b.WriteString(",\"SeqNum\":")
68 | b.WriteString(strconv.FormatInt(int64(m.Header.SeqNum), 10))
69 | b.WriteString(",\"EngType\":")
70 | b.WriteString(strconv.FormatInt(int64(m.Header.EngType), 10))
71 | b.WriteString(",\"EngID\":")
72 | b.WriteString(strconv.FormatInt(int64(m.Header.EngID), 10))
73 | b.WriteString(",\"SmpInt\":")
74 | b.WriteString(strconv.FormatInt(int64(m.Header.SmpInt), 10))
75 | b.WriteString("},")
76 | }
77 |
78 | func (m *Message) encodeAgent(b *bytes.Buffer) {
79 | b.WriteString("\"AgentID\":\"")
80 | b.WriteString(m.AgentID)
81 | b.WriteString("\",")
82 | }
83 |
84 | func (m *Message) encodeFlow(r FlowRecord, b *bytes.Buffer) {
85 |
86 | ip := make(net.IP, 4)
87 |
88 | b.WriteString("\"SrcAddr\":\"")
89 | binary.BigEndian.PutUint32(ip, r.SrcAddr)
90 | b.WriteString(ip.String())
91 |
92 | b.WriteString("\",\"DstAddr\":\"")
93 | binary.BigEndian.PutUint32(ip, r.DstAddr)
94 | b.WriteString(ip.String())
95 |
96 | b.WriteString("\",\"NextHop\":\"")
97 | binary.BigEndian.PutUint32(ip, r.NextHop)
98 | b.WriteString(ip.String())
99 |
100 | b.WriteString("\",\"Input\":")
101 | b.WriteString(strconv.FormatInt(int64(r.Input), 10))
102 | b.WriteString(",\"Output\":")
103 | b.WriteString(strconv.FormatInt(int64(r.Output), 10))
104 | b.WriteString(",\"PktCount\":")
105 | b.WriteString(strconv.FormatInt(int64(r.PktCount), 10))
106 | b.WriteString(",\"L3Octets\":")
107 | b.WriteString(strconv.FormatInt(int64(r.L3Octets), 10))
108 |
109 | // if these ever need to be translated actual time
110 | // then this will require knowing some information from
111 | // the header. I believe the basic formula is
112 | // UnixSecs - SysUpTime + StartTime
113 |
114 | b.WriteString(",\"StartTime\":")
115 | b.WriteString(strconv.FormatInt(int64(r.StartTime), 10))
116 | b.WriteString(",\"EndTime\":")
117 | b.WriteString(strconv.FormatInt(int64(r.EndTime), 10))
118 |
119 | b.WriteString(",\"SrcPort\":")
120 | b.WriteString(strconv.FormatInt(int64(r.SrcPort), 10))
121 | b.WriteString(",\"DstPort\":")
122 | b.WriteString(strconv.FormatInt(int64(r.DstPort), 10))
123 | b.WriteString(",\"Padding1\":")
124 | b.WriteString(strconv.FormatInt(int64(r.Padding1), 10))
125 | b.WriteString(",\"TCPFlags\":")
126 | b.WriteString(strconv.FormatInt(int64(r.TCPFlags), 10))
127 | b.WriteString(",\"ProtType\":")
128 | b.WriteString(strconv.FormatInt(int64(r.ProtType), 10))
129 | b.WriteString(",\"Tos\":")
130 | b.WriteString(strconv.FormatInt(int64(r.Tos), 10))
131 | b.WriteString(",\"SrcAsNum\":")
132 | b.WriteString(strconv.FormatInt(int64(r.SrcAsNum), 10))
133 | b.WriteString(",\"DstAsNum\":")
134 | b.WriteString(strconv.FormatInt(int64(r.DstAsNum), 10))
135 | b.WriteString(",\"SrcMask\":")
136 | b.WriteString(strconv.FormatInt(int64(r.SrcMask), 10))
137 | b.WriteString(",\"DstMask\":")
138 | b.WriteString(strconv.FormatInt(int64(r.DstMask), 10))
139 | b.WriteString(",\"Padding2\":")
140 | b.WriteString(strconv.FormatInt(int64(r.Padding2), 10))
141 | }
142 |
143 | func (m *Message) encodeFlows(b *bytes.Buffer) error {
144 | var (
145 | fLength int
146 | err error
147 | )
148 |
149 | b.WriteString("\"Flows\":")
150 | fLength = len(m.Flows)
151 |
152 | b.WriteByte('[')
153 |
154 | for i := range m.Flows {
155 | b.WriteString("{")
156 | m.encodeFlow(m.Flows[i], b)
157 | b.WriteString("}")
158 | if i < fLength-1 {
159 | b.WriteString(",")
160 | }
161 | }
162 |
163 | b.WriteByte(']')
164 |
165 | return err
166 | }
167 |
--------------------------------------------------------------------------------
/netflow/v9/decoder_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: decoder_test.go
6 | //: details: netflow v9 decoder tests and benchmarks
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 05/05/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package netflow9
24 |
25 | import (
26 | "net"
27 | "testing"
28 | )
29 |
30 | func TestDecodeNoData(t *testing.T) {
31 | ip := net.ParseIP("127.0.0.1")
32 | mCache := GetCache("cache.file")
33 | body := []byte{}
34 | d := NewDecoder(ip, body)
35 | if _, err := d.Decode(mCache); err == nil {
36 | t.Error("expected err but nothing")
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/netflow/v9/doc.go:
--------------------------------------------------------------------------------
1 | // Package netflow9 decodes netflow version v9 packets
2 | package netflow9
3 |
--------------------------------------------------------------------------------
/netflow/v9/marshal.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: marshal.go
6 | //: details: encoding of each decoded netflow v9 data sets
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 04/27/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package netflow9
24 |
25 | import (
26 | "bytes"
27 | "encoding/hex"
28 | "errors"
29 | "net"
30 | "strconv"
31 | )
32 |
33 | var errUknownMarshalDataType = errors.New("unknown data type to marshal")
34 |
35 | // JSONMarshal encodes netflow v9 message
36 | func (m *Message) JSONMarshal(b *bytes.Buffer) ([]byte, error) {
37 | b.WriteString("{")
38 |
39 | // encode agent id
40 | m.encodeAgent(b)
41 |
42 | // encode header
43 | m.encodeHeader(b)
44 |
45 | // encode data sets
46 | if err := m.encodeDataSet(b); err != nil {
47 | return nil, err
48 | }
49 |
50 | b.WriteString("}")
51 |
52 | return b.Bytes(), nil
53 | }
54 |
55 | func (m *Message) encodeDataSet(b *bytes.Buffer) error {
56 | var (
57 | length int
58 | dsLength int
59 | err error
60 | )
61 |
62 | b.WriteString("\"DataSets\":")
63 | dsLength = len(m.DataSets)
64 |
65 | b.WriteByte('[')
66 |
67 | for i := range m.DataSets {
68 | length = len(m.DataSets[i])
69 |
70 | b.WriteByte('[')
71 | for j := range m.DataSets[i] {
72 | b.WriteString("{\"I\":")
73 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].ID), 10))
74 | b.WriteString(",\"V\":")
75 | err = m.writeValue(b, i, j)
76 |
77 | if j < length-1 {
78 | b.WriteString("},")
79 | } else {
80 | b.WriteByte('}')
81 | }
82 | }
83 |
84 | if i < dsLength-1 {
85 | b.WriteString("],")
86 | } else {
87 | b.WriteByte(']')
88 | }
89 | }
90 |
91 | b.WriteByte(']')
92 |
93 | return err
94 | }
95 |
96 | func (m *Message) encodeDataSetFlat(b *bytes.Buffer) error {
97 | var (
98 | length int
99 | dsLength int
100 | err error
101 | )
102 |
103 | b.WriteString("\"DataSets\":")
104 | dsLength = len(m.DataSets)
105 |
106 | b.WriteByte('[')
107 |
108 | for i := range m.DataSets {
109 | length = len(m.DataSets[i])
110 |
111 | b.WriteByte('{')
112 | for j := range m.DataSets[i] {
113 | b.WriteByte('"')
114 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].ID), 10))
115 | b.WriteString("\":")
116 | err = m.writeValue(b, i, j)
117 |
118 | if j < length-1 {
119 | b.WriteByte(',')
120 | } else {
121 | b.WriteByte('}')
122 | }
123 | }
124 |
125 | if i < dsLength-1 {
126 | b.WriteString(",")
127 | }
128 | }
129 |
130 | b.WriteByte(']')
131 |
132 | return err
133 | }
134 |
135 | func (m *Message) encodeHeader(b *bytes.Buffer) {
136 | b.WriteString("\"Header\":{\"Version\":")
137 | b.WriteString(strconv.FormatInt(int64(m.Header.Version), 10))
138 | b.WriteString(",\"Count\":")
139 | b.WriteString(strconv.FormatInt(int64(m.Header.Count), 10))
140 | b.WriteString(",\"SysUpTime\":")
141 | b.WriteString(strconv.FormatInt(int64(m.Header.SysUpTime), 10))
142 | b.WriteString(",\"UNIXSecs\":")
143 | b.WriteString(strconv.FormatInt(int64(m.Header.UNIXSecs), 10))
144 | b.WriteString(",\"SeqNum\":")
145 | b.WriteString(strconv.FormatInt(int64(m.Header.SeqNum), 10))
146 | b.WriteString(",\"SrcID\":")
147 | b.WriteString(strconv.FormatInt(int64(m.Header.SrcID), 10))
148 | b.WriteString("},")
149 | }
150 |
151 | func (m *Message) encodeAgent(b *bytes.Buffer) {
152 | b.WriteString("\"AgentID\":\"")
153 | b.WriteString(m.AgentID)
154 | b.WriteString("\",")
155 | }
156 |
157 | func (m *Message) writeValue(b *bytes.Buffer, i, j int) error {
158 | switch m.DataSets[i][j].Value.(type) {
159 | case uint:
160 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint)), 10))
161 | case uint8:
162 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint8)), 10))
163 | case uint16:
164 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint16)), 10))
165 | case uint32:
166 | b.WriteString(strconv.FormatUint(uint64(m.DataSets[i][j].Value.(uint32)), 10))
167 | case uint64:
168 | b.WriteString(strconv.FormatUint(m.DataSets[i][j].Value.(uint64), 10))
169 | case int:
170 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int)), 10))
171 | case int8:
172 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int8)), 10))
173 | case int16:
174 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int16)), 10))
175 | case int32:
176 | b.WriteString(strconv.FormatInt(int64(m.DataSets[i][j].Value.(int32)), 10))
177 | case int64:
178 | b.WriteString(strconv.FormatInt(m.DataSets[i][j].Value.(int64), 10))
179 | case float32:
180 | b.WriteString(strconv.FormatFloat(float64(m.DataSets[i][j].Value.(float32)), 'E', -1, 32))
181 | case float64:
182 | b.WriteString(strconv.FormatFloat(m.DataSets[i][j].Value.(float64), 'E', -1, 64))
183 | case string:
184 | b.WriteByte('"')
185 | b.WriteString(m.DataSets[i][j].Value.(string))
186 | b.WriteByte('"')
187 | case net.IP:
188 | b.WriteByte('"')
189 | b.WriteString(m.DataSets[i][j].Value.(net.IP).String())
190 | b.WriteByte('"')
191 | case net.HardwareAddr:
192 | b.WriteByte('"')
193 | b.WriteString(m.DataSets[i][j].Value.(net.HardwareAddr).String())
194 | b.WriteByte('"')
195 | case []uint8:
196 | b.WriteByte('"')
197 | b.WriteString("0x" + hex.EncodeToString(m.DataSets[i][j].Value.([]uint8)))
198 | b.WriteByte('"')
199 | default:
200 | return errUknownMarshalDataType
201 | }
202 |
203 | return nil
204 | }
205 |
--------------------------------------------------------------------------------
/netflow/v9/memcache.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: memcache.go
6 | //: details: handles template caching in memory with sharding feature
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 04/19/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package netflow9
24 |
25 | import (
26 | "encoding/binary"
27 | "encoding/json"
28 | "hash/fnv"
29 | "io/ioutil"
30 | "net"
31 | "sync"
32 | "time"
33 | )
34 |
35 | var shardNo = 32
36 |
37 | // MemCache represents templates shards
38 | type MemCache []*TemplatesShard
39 |
40 | // Data represents template records and
41 | // updated timestamp
42 | type Data struct {
43 | Template TemplateRecord
44 | Timestamp int64
45 | }
46 |
47 | // TemplatesShard represents a shard
48 | type TemplatesShard struct {
49 | Templates map[uint32]Data
50 | sync.RWMutex
51 | }
52 | type memCacheDisk struct {
53 | Cache MemCache
54 | ShardNo int
55 | }
56 |
57 | // GetCache tries to load saved templates
58 | // otherwise it constructs new empty shards
59 | func GetCache(cacheFile string) MemCache {
60 | var (
61 | mem memCacheDisk
62 | err error
63 | )
64 |
65 | b, err := ioutil.ReadFile(cacheFile)
66 | if err == nil {
67 | err = json.Unmarshal(b, &mem)
68 | if err == nil && mem.ShardNo == shardNo {
69 | return mem.Cache
70 | }
71 | }
72 |
73 | m := make(MemCache, shardNo)
74 | for i := 0; i < shardNo; i++ {
75 | m[i] = &TemplatesShard{Templates: make(map[uint32]Data)}
76 | }
77 |
78 | return m
79 | }
80 |
81 | func (m MemCache) getShard(id uint16, addr net.IP) (*TemplatesShard, uint32) {
82 | b := make([]byte, 2)
83 | binary.BigEndian.PutUint16(b, id)
84 | key := append(addr, b...)
85 |
86 | hash := fnv.New32()
87 | hash.Write(key)
88 | hSum32 := hash.Sum32()
89 |
90 | return m[uint(hSum32)%uint(shardNo)], hSum32
91 | }
92 |
93 | func (m *MemCache) insert(id uint16, addr net.IP, tr TemplateRecord) {
94 | shard, key := m.getShard(id, addr)
95 | shard.Lock()
96 | defer shard.Unlock()
97 | shard.Templates[key] = Data{tr, time.Now().Unix()}
98 | }
99 |
100 | func (m *MemCache) retrieve(id uint16, addr net.IP) (TemplateRecord, bool) {
101 | shard, key := m.getShard(id, addr)
102 | shard.RLock()
103 | defer shard.RUnlock()
104 | v, ok := shard.Templates[key]
105 |
106 | return v.Template, ok
107 | }
108 |
109 | // Dump saves the current templates to hard disk
110 | func (m MemCache) Dump(cacheFile string) error {
111 | b, err := json.Marshal(
112 | memCacheDisk{
113 | m,
114 | shardNo,
115 | },
116 | )
117 | if err != nil {
118 | return err
119 | }
120 |
121 | err = ioutil.WriteFile(cacheFile, b, 0644)
122 | if err != nil {
123 | return err
124 | }
125 |
126 | return nil
127 | }
128 |
--------------------------------------------------------------------------------
/packet/doc.go:
--------------------------------------------------------------------------------
1 | // Package packet decodes layer two, three and four OSI model layers
2 | package packet
3 |
--------------------------------------------------------------------------------
/packet/ethernet.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ethernet.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import (
26 | "errors"
27 | "fmt"
28 | )
29 |
30 | // Datalink represents layer two IEEE 802.11
31 | type Datalink struct {
32 | // SrcMAC represents source MAC address
33 | SrcMAC string
34 |
35 | // DstMAC represents destination MAC address
36 | DstMAC string
37 |
38 | // Vlan represents VLAN value
39 | Vlan int
40 |
41 | // EtherType represents upper layer type value
42 | EtherType uint16
43 | }
44 |
45 | const (
46 | // EtherTypeARP is Address Resolution Protocol EtherType value
47 | EtherTypeARP = 0x0806
48 |
49 | // EtherTypeIPv4 is Internet Protocol version 4 EtherType value
50 | EtherTypeIPv4 = 0x0800
51 |
52 | // EtherTypeIPv6 is Internet Protocol Version 6 EtherType value
53 | EtherTypeIPv6 = 0x86DD
54 |
55 | // EtherTypeLACP is Link Aggregation Control Protocol EtherType value
56 | EtherTypeLACP = 0x8809
57 |
58 | // EtherTypeIEEE8021Q is VLAN-tagged frame (IEEE 802.1Q) EtherType value
59 | EtherTypeIEEE8021Q = 0x8100
60 | )
61 |
62 | var (
63 | errShortEthernetHeaderLength = errors.New("the ethernet header is too small")
64 | )
65 |
66 | func (p *Packet) decodeEthernet() error {
67 | var (
68 | d Datalink
69 | err error
70 | )
71 |
72 | if len(p.data) < 14 {
73 | return errShortEthernetHeaderLength
74 | }
75 |
76 | d, err = decodeIEEE802(p.data)
77 | if err != nil {
78 | return err
79 | }
80 |
81 | if d.EtherType == EtherTypeIEEE8021Q {
82 | vlan := int(p.data[14])<<8 | int(p.data[15])
83 | p.data[12], p.data[13] = p.data[16], p.data[17]
84 | p.data = append(p.data[:14], p.data[18:]...)
85 |
86 | d, err = decodeIEEE802(p.data)
87 | if err != nil {
88 | return err
89 | }
90 | d.Vlan = vlan
91 | }
92 |
93 | p.L2 = d
94 | p.data = p.data[14:]
95 |
96 | return nil
97 | }
98 |
99 | func decodeIEEE802(b []byte) (Datalink, error) {
100 | var d Datalink
101 |
102 | if len(b) < 14 {
103 | return d, errShortEthernetLength
104 | }
105 |
106 | d.EtherType = uint16(b[13]) | uint16(b[12])<<8
107 |
108 | hwAddrFmt := "%0.2x:%0.2x:%0.2x:%0.2x:%0.2x:%0.2x"
109 |
110 | if d.EtherType != EtherTypeIEEE8021Q {
111 | d.DstMAC = fmt.Sprintf(hwAddrFmt, b[0], b[1], b[2], b[3], b[4], b[5])
112 | d.SrcMAC = fmt.Sprintf(hwAddrFmt, b[6], b[7], b[8], b[9], b[10], b[11])
113 | }
114 |
115 | return d, nil
116 | }
117 |
--------------------------------------------------------------------------------
/packet/ethernet_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ethernet_test.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "testing"
26 |
27 | func TestDecodeIEEE802(t *testing.T) {
28 | b := []byte{
29 | 0xd4, 0x4, 0xff, 0x1,
30 | 0x1d, 0x9e, 0x30, 0x7c,
31 | 0x5e, 0xe5, 0x59, 0xef,
32 | 0x8, 0x0, 0x45, 0x0, 0x0,
33 | }
34 |
35 | d, err := decodeIEEE802(b)
36 | if err != nil {
37 | t.Error("unexpected error", err)
38 | }
39 |
40 | if d.DstMAC != "d4:04:ff:01:1d:9e" {
41 | t.Error("expected d4:04:ff:01:1d:9e, got", d.SrcMAC)
42 | }
43 |
44 | if d.SrcMAC != "30:7c:5e:e5:59:ef" {
45 | t.Error("expected 30:7c:5e:e5:59:ef, got", d.DstMAC)
46 | }
47 |
48 | if d.Vlan != 0 {
49 | t.Error("expected 0, got", d.Vlan)
50 | }
51 |
52 | if d.EtherType != 0x800 {
53 | t.Error("expected 0x800, got", d.EtherType)
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/packet/icmp.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: icmp.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "errors"
26 |
27 | // ICMP represents ICMP header
28 | // 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
29 | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
30 | // | Type | Code | Checksum |
31 | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
32 | // | Rest of Header |
33 | // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
34 |
35 | type ICMP struct {
36 | // Type is ICMP type
37 | Type int
38 |
39 | // Code is ICMP subtype
40 | Code int
41 |
42 | // Rest of Header
43 | RestHeader []byte
44 | }
45 |
46 | var errICMPHLenTooSHort = errors.New("ICMP header length is too short")
47 |
48 | func decodeICMP(b []byte) (ICMP, error) {
49 | if len(b) < 5 {
50 | return ICMP{}, errICMPHLenTooSHort
51 | }
52 |
53 | return ICMP{
54 | Type: int(b[0]),
55 | Code: int(b[1]),
56 | RestHeader: b[4:],
57 | }, nil
58 | }
59 |
--------------------------------------------------------------------------------
/packet/network.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: network.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import (
26 | "errors"
27 | "net"
28 | )
29 |
30 | // IPv4Header represents an IPv4 header
31 | type IPv4Header struct {
32 | Version int // protocol version
33 | TOS int // type-of-service
34 | TotalLen int // packet total length
35 | ID int // identification
36 | Flags int // flags
37 | FragOff int // fragment offset
38 | TTL int // time-to-live
39 | Protocol int // next protocol
40 | Checksum int // checksum
41 | Src string // source address
42 | Dst string // destination address
43 | }
44 |
45 | // IPv6Header represents an IPv6 header
46 | type IPv6Header struct {
47 | Version int // protocol version
48 | TrafficClass int // traffic class
49 | FlowLabel int // flow label
50 | PayloadLen int // payload length
51 | NextHeader int // next header
52 | HopLimit int // hop limit
53 | Src string // source address
54 | Dst string // destination address
55 | }
56 |
57 | const (
58 | // IPv4HLen is IPv4 header length size
59 | IPv4HLen = 20
60 |
61 | // IPv6HLen is IPv6 header length size
62 | IPv6HLen = 40
63 |
64 | // IANAProtoICMP is IANA Internet Control Message number
65 | IANAProtoICMP = 1
66 |
67 | // IANAProtoTCP is IANA Transmission Control number
68 | IANAProtoTCP = 6
69 |
70 | // IANAProtoUDP is IANA User Datagram number
71 | IANAProtoUDP = 17
72 |
73 | // IANAProtoIPv6ICMP is IANA Internet Control Message number for IPv6
74 | IANAProtoIPv6ICMP = 58
75 | )
76 |
77 | var (
78 | errShortIPv4HeaderLength = errors.New("short ipv4 header length")
79 | errShortIPv6HeaderLength = errors.New("short ipv6 header length")
80 | errShortEthernetLength = errors.New("short ethernet header length")
81 | errUnknownTransportLayer = errors.New("unknown transport layer")
82 | errUnknownL3Protocol = errors.New("unknown network layer protocol")
83 | )
84 |
85 | func (p *Packet) decodeNextLayer() error {
86 |
87 | var (
88 | proto int
89 | len int
90 | )
91 |
92 | switch p.L3.(type) {
93 | case IPv4Header:
94 | proto = p.L3.(IPv4Header).Protocol
95 | case IPv6Header:
96 | proto = p.L3.(IPv6Header).NextHeader
97 | default:
98 | return errUnknownL3Protocol
99 | }
100 |
101 | switch proto {
102 | case IANAProtoICMP, IANAProtoIPv6ICMP:
103 | icmp, err := decodeICMP(p.data)
104 | if err != nil {
105 | return err
106 | }
107 |
108 | p.L4 = icmp
109 | len = 4
110 | case IANAProtoTCP:
111 | tcp, err := decodeTCP(p.data)
112 | if err != nil {
113 | return err
114 | }
115 |
116 | p.L4 = tcp
117 | len = 20
118 | case IANAProtoUDP:
119 | udp, err := decodeUDP(p.data)
120 | if err != nil {
121 | return err
122 | }
123 |
124 | p.L4 = udp
125 | len = 8
126 | default:
127 | return errUnknownTransportLayer
128 | }
129 |
130 | p.data = p.data[len:]
131 |
132 | return nil
133 | }
134 |
135 | func (p *Packet) decodeIPv6Header() error {
136 | if len(p.data) < IPv6HLen {
137 | return errShortIPv6HeaderLength
138 | }
139 |
140 | var (
141 | src net.IP = p.data[8:24]
142 | dst net.IP = p.data[24:40]
143 | )
144 |
145 | p.L3 = IPv6Header{
146 | Version: int(p.data[0]) >> 4,
147 | TrafficClass: int(p.data[0]&0x0f)<<4 | int(p.data[1])>>4,
148 | FlowLabel: int(p.data[1]&0x0f)<<16 | int(p.data[2])<<8 | int(p.data[3]),
149 | PayloadLen: int(uint16(p.data[4])<<8 | uint16(p.data[5])),
150 | NextHeader: int(p.data[6]),
151 | HopLimit: int(p.data[7]),
152 | Src: src.String(),
153 | Dst: dst.String(),
154 | }
155 |
156 | p.data = p.data[IPv6HLen:]
157 |
158 | return nil
159 | }
160 |
161 | func (p *Packet) decodeIPv4Header() error {
162 | if len(p.data) < IPv4HLen {
163 | return errShortIPv4HeaderLength
164 | }
165 |
166 | var (
167 | src net.IP = p.data[12:16]
168 | dst net.IP = p.data[16:20]
169 | )
170 |
171 | p.L3 = IPv4Header{
172 | Version: int(p.data[0] & 0xf0 >> 4),
173 | TOS: int(p.data[1]),
174 | TotalLen: int(p.data[2])<<8 | int(p.data[3]),
175 | ID: int(p.data[4])<<8 | int(p.data[5]),
176 | Flags: int(p.data[6] & 0x07),
177 | TTL: int(p.data[8]),
178 | Protocol: int(p.data[9]),
179 | Checksum: int(p.data[10])<<8 | int(p.data[11]),
180 | Src: src.String(),
181 | Dst: dst.String(),
182 | }
183 |
184 | p.data = p.data[IPv4HLen:]
185 |
186 | return nil
187 | }
188 |
--------------------------------------------------------------------------------
/packet/network_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: network_test.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "testing"
26 |
27 | func TestDecodeIPv4Header(t *testing.T) {
28 | p := NewPacket()
29 |
30 | p.data = []byte{
31 | 0x45, 0x0, 0x0, 0x4b, 0x8,
32 | 0xf8, 0x0, 0x0, 0x3e, 0x11,
33 | 0x82, 0x91, 0xc0, 0xe5, 0xd8,
34 | 0x8f, 0xc0, 0xe5, 0x96, 0xbe,
35 | 0x64, 0x9b, 0x0, 0x35, 0x0,
36 | }
37 | err := p.decodeIPv4Header()
38 | if err != nil {
39 | t.Error("unexpected error", err)
40 | }
41 |
42 | ipv4 := p.L3.(IPv4Header)
43 |
44 | if ipv4.Version != 4 {
45 | t.Error("unexpected version, got", ipv4.Version)
46 | }
47 |
48 | if ipv4.TOS != 0 {
49 | t.Error("unexpected TOS, got", ipv4.TOS)
50 | }
51 | if ipv4.TotalLen != 75 {
52 | t.Error("unexpected TotalLen, got", ipv4.TotalLen)
53 | }
54 | if ipv4.Flags != 0 {
55 | t.Error("unexpected Flags, got", ipv4.Flags)
56 | }
57 |
58 | if ipv4.FragOff != 0 {
59 | t.Error("unexpected FragOff", ipv4.FragOff)
60 | }
61 |
62 | if ipv4.TTL != 62 {
63 | t.Error("unexpected TTL, got", ipv4.TTL)
64 | }
65 |
66 | if ipv4.Protocol != 17 {
67 | t.Error("unexpected protocol, got", ipv4.Protocol)
68 | }
69 |
70 | if ipv4.Checksum != 33425 {
71 | t.Error("unexpected checksum, got", ipv4.Checksum)
72 | }
73 | if ipv4.Src != "192.229.216.143" {
74 | t.Error("unexpected src addr, got", ipv4.Src)
75 | }
76 |
77 | if ipv4.Dst != "192.229.150.190" {
78 | t.Error("unexpected dst addr, got", ipv4.Dst)
79 | }
80 | }
81 |
--------------------------------------------------------------------------------
/packet/packet.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: packet.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "errors"
26 |
27 | // The header protocol describes the format of the sampled header
28 | const (
29 | headerProtocolEthernet uint32 = 1
30 | headerProtocolIPv4 uint32 = 11
31 | headerProtocolIPv6 uint32 = 12
32 | )
33 |
34 | // Packet represents layer 2,3,4 available info
35 | type Packet struct {
36 | L2 Datalink
37 | L3 interface{}
38 | L4 interface{}
39 | data []byte
40 | }
41 |
42 | var (
43 | errUnknownEtherType = errors.New("unknown ether type")
44 | errUnknownHeaderProtocol = errors.New("unknown header protocol")
45 | )
46 |
47 | // NewPacket constructs a packet object
48 | func NewPacket() Packet {
49 | return Packet{}
50 | }
51 |
52 | // Decoder decodes packet's layers
53 | func (p *Packet) Decoder(data []byte, protocol uint32) (*Packet, error) {
54 | var (
55 | err error
56 | )
57 |
58 | p.data = data
59 |
60 | switch protocol {
61 | case headerProtocolEthernet:
62 | err = p.decodeEthernetHeader()
63 | return p, err
64 | case headerProtocolIPv4:
65 | err = p.decodeIPv4Header()
66 | if err != nil {
67 | return p, err
68 | }
69 | case headerProtocolIPv6:
70 | err = p.decodeIPv6Header()
71 | if err != nil {
72 | return p, err
73 | }
74 | default:
75 | return p, errUnknownHeaderProtocol
76 | }
77 |
78 | err = p.decodeNextLayer()
79 | if err != nil {
80 | return p, err
81 | }
82 |
83 | return p, nil
84 | }
85 |
86 | func (p *Packet) decodeEthernetHeader() error {
87 | var (
88 | err error
89 | )
90 |
91 | err = p.decodeEthernet()
92 | if err != nil {
93 | return err
94 | }
95 |
96 | switch p.L2.EtherType {
97 | case EtherTypeIPv4:
98 |
99 | err = p.decodeIPv4Header()
100 | if err != nil {
101 | return err
102 | }
103 |
104 | err = p.decodeNextLayer()
105 | if err != nil {
106 | return err
107 | }
108 |
109 | case EtherTypeIPv6:
110 |
111 | err = p.decodeIPv6Header()
112 | if err != nil {
113 | return err
114 | }
115 |
116 | err = p.decodeNextLayer()
117 | if err != nil {
118 | return err
119 | }
120 |
121 | default:
122 | return errUnknownEtherType
123 | }
124 |
125 | return nil
126 | }
127 |
--------------------------------------------------------------------------------
/packet/packet_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: packet_test.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "testing"
26 |
27 | func TestDecodeEthernetIPv4TCP(t *testing.T) {
28 | b := []byte{
29 | 0xde, 0xad, 0x7a, 0x48, 0xcc, 0x37, 0xd4, 0x4, 0xff, 0x1, 0x18, 0x1e,
30 | 0x81, 0x0, 0x0, 0x7, 0x8, 0x0, 0x45, 0x0, 0x2, 0x6b, 0x95, 0x54, 0x40,
31 | 0x0, 0x3c, 0x6, 0xab, 0x3b, 0x6c, 0xa1, 0xf8, 0x5e, 0xc0, 0xe5, 0xd6,
32 | 0x17, 0x1f, 0xf7, 0xc5, 0xe5, 0xf, 0xf5, 0x1c, 0x14, 0x68, 0xa4, 0x11,
33 | 0x89, 0x80, 0x18, 0x1, 0x7, 0x35, 0xdc, 0x0, 0x0, 0x1, 0x1, 0x8, 0xa,
34 | 0x17, 0x32, 0x75, 0x97, 0xf8, 0x73, 0x54, 0x15, 0x17, 0x3, 0x3, 0x0,
35 | 0x1a, 0xad, 0xf8, 0x9d, 0x51, 0x3e, 0xcc, 0x7e, 0x5b, 0x6f, 0xdd, 0x16,
36 | 0x5a, 0xd3, 0xb4, 0x34, 0x7a, 0x4f, 0x8e, 0xc5, 0xa5, 0x5a, 0x3e, 0x8e,
37 | 0xea, 0x51, 0xb7, 0x17, 0x3, 0x3, 0x0, 0x1c, 0xad, 0xf8, 0x9d, 0x51,
38 | 0x3e, 0xcc, 0x7e, 0x5c, 0xe0, 0x79, 0xdb, 0x6f, 0x11, 0xc9, 0x50,
39 | 0x2f, 0x5e, 0x3e, 0x15, 0xcf, 0xf5, 0x62,
40 | }
41 | p := NewPacket()
42 | _, err := p.Decoder(b, headerProtocolEthernet)
43 | if err != nil {
44 | t.Error("unexpected error", err)
45 | }
46 | }
47 |
48 | func BenchmarkDecodeEthernetIPv4TCP(b *testing.B) {
49 | data := []byte{
50 | 0xde, 0xad, 0x7a, 0x48, 0xcc, 0x37, 0xd4, 0x4, 0xff, 0x1, 0x18, 0x1e,
51 | 0x81, 0x0, 0x0, 0x7, 0x8, 0x0, 0x45, 0x0, 0x2, 0x6b, 0x95, 0x54, 0x40,
52 | 0x0, 0x3c, 0x6, 0xab, 0x3b, 0x6c, 0xa1, 0xf8, 0x5e, 0xc0, 0xe5, 0xd6,
53 | 0x17, 0x1f, 0xf7, 0xc5, 0xe5, 0xf, 0xf5, 0x1c, 0x14, 0x68, 0xa4, 0x11,
54 | 0x89, 0x80, 0x18, 0x1, 0x7, 0x35, 0xdc, 0x0, 0x0, 0x1, 0x1, 0x8, 0xa,
55 | 0x17, 0x32, 0x75, 0x97, 0xf8, 0x73, 0x54, 0x15, 0x17, 0x3, 0x3, 0x0,
56 | 0x1a, 0xad, 0xf8, 0x9d, 0x51, 0x3e, 0xcc, 0x7e, 0x5b, 0x6f, 0xdd, 0x16,
57 | 0x5a, 0xd3, 0xb4, 0x34, 0x7a, 0x4f, 0x8e, 0xc5, 0xa5, 0x5a, 0x3e, 0x8e,
58 | 0xea, 0x51, 0xb7, 0x17, 0x3, 0x3, 0x0, 0x1c, 0xad, 0xf8, 0x9d, 0x51,
59 | 0x3e, 0xcc, 0x7e, 0x5c, 0xe0, 0x79, 0xdb, 0x6f, 0x11, 0xc9, 0x50,
60 | 0x2f, 0x5e, 0x3e, 0x15, 0xcf, 0xf5, 0x62,
61 | }
62 | for i := 0; i < b.N; i++ {
63 | p := NewPacket()
64 | p.Decoder(data, headerProtocolEthernet)
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/packet/transport.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: transport.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "errors"
26 |
27 | // TCPHeader represents TCP header
28 | type TCPHeader struct {
29 | SrcPort int
30 | DstPort int
31 | DataOffset int
32 | Reserved int
33 | Flags int
34 | }
35 |
36 | // UDPHeader represents UDP header
37 | type UDPHeader struct {
38 | SrcPort int
39 | DstPort int
40 | }
41 |
42 | var (
43 | errShortTCPHeaderLength = errors.New("short TCP header length")
44 | errShortUDPHeaderLength = errors.New("short UDP header length")
45 | )
46 |
47 | func decodeTCP(b []byte) (TCPHeader, error) {
48 | if len(b) < 20 {
49 | return TCPHeader{}, errShortTCPHeaderLength
50 | }
51 |
52 | return TCPHeader{
53 | SrcPort: int(b[0])<<8 | int(b[1]),
54 | DstPort: int(b[2])<<8 | int(b[3]),
55 | DataOffset: int(b[12]) >> 4,
56 | Reserved: 0,
57 | Flags: ((int(b[12])<<8 | int(b[13])) & 0x01ff),
58 | }, nil
59 | }
60 |
61 | func decodeUDP(b []byte) (UDPHeader, error) {
62 | if len(b) < 8 {
63 | return UDPHeader{}, errShortUDPHeaderLength
64 | }
65 |
66 | return UDPHeader{
67 | SrcPort: int(b[0])<<8 | int(b[1]),
68 | DstPort: int(b[2])<<8 | int(b[3]),
69 | }, nil
70 | }
71 |
--------------------------------------------------------------------------------
/packet/transport_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: transport_test.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package packet
24 |
25 | import "testing"
26 |
27 | func TestDecoderUDP(t *testing.T) {
28 | b := []byte{
29 | 0xa3, 0x6c, 0x0, 0x35, 0x0,
30 | 0x3d, 0xc8, 0xdc, 0x81, 0x9f,
31 | }
32 |
33 | udp, err := decodeUDP(b)
34 | if err != nil {
35 | t.Error("unexpected error", err)
36 | }
37 |
38 | if udp.SrcPort != 41836 {
39 | t.Error("expected src port:41836, got", udp.SrcPort)
40 | }
41 |
42 | if udp.DstPort != 53 {
43 | t.Error("expected dst port:53, got", udp.DstPort)
44 | }
45 | }
46 |
47 | func TestDecodeTCP(t *testing.T) {
48 | b := []byte{
49 | 0xa5, 0x8e, 0x20, 0xfb, 0x54,
50 | 0x1, 0x4f, 0x1c, 0x52, 0x7f,
51 | 0x0, 0xf9, 0x50, 0x10, 0x1,
52 | 0x2a, 0xbb, 0xde, 0x0, 0x0,
53 | }
54 |
55 | tcp, err := decodeTCP(b)
56 | if err != nil {
57 | t.Error("unexpected error", err)
58 | }
59 |
60 | if tcp.SrcPort != 42382 {
61 | t.Error("expected src port:4382, got", tcp.SrcPort)
62 | }
63 |
64 | if tcp.DstPort != 8443 {
65 | t.Error("expected dst port:8443, got", tcp.DstPort)
66 | }
67 |
68 | if tcp.Flags != 16 {
69 | t.Error("expected flags:16, got", tcp.Flags)
70 | }
71 | }
72 |
73 | func TestDecodeTCP2(t *testing.T) {
74 | b := []byte{
75 | 0xa5, 0x8e, 0x20, 0xfb, 0x54,
76 | 0x1, 0x4f, 0x1c, 0x52, 0x7f,
77 | 0x0, 0xf9, 0x51, 0x10, 0x1,
78 | 0x2a, 0xbb, 0xde, 0x0, 0x0,
79 | }
80 |
81 | tcp, err := decodeTCP(b)
82 | if err != nil {
83 | t.Error("unexpected error", err)
84 | }
85 |
86 | // NS flag
87 | if tcp.Flags != 272 {
88 | t.Error("expected flags:272, got", tcp.Flags)
89 | }
90 |
91 | // check dataoffset
92 | if tcp.DataOffset != 5 {
93 | t.Error("expected dataoffset:5, got", tcp.DataOffset)
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/producer/doc.go:
--------------------------------------------------------------------------------
1 | // Package producer pushes decoded messages to message queues
2 | package producer
3 |
--------------------------------------------------------------------------------
/producer/nats.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: nats.go
6 | //: details: vflow nats producer plugin
7 | //: author: Jeremy Rossi
8 | //: date: 06/19/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package producer
24 |
25 | import (
26 | "io/ioutil"
27 | "log"
28 |
29 | "github.com/nats-io/nats.go"
30 | "gopkg.in/yaml.v2"
31 | )
32 |
33 | // NATS represents nats producer
34 | type NATS struct {
35 | connection *nats.Conn
36 | config NATSConfig
37 | logger *log.Logger
38 | }
39 |
40 | // NATSConfig is the struct that holds all configuation for NATS connections
41 | type NATSConfig struct {
42 | URL string `yaml:"url"`
43 | }
44 |
45 | func (n *NATS) setup(configFile string, logger *log.Logger) error {
46 | var err error
47 | n.config = NATSConfig{
48 | URL: nats.DefaultURL,
49 | }
50 |
51 | if err = n.load(configFile); err != nil {
52 | logger.Println(err)
53 | return err
54 | }
55 |
56 | n.connection, err = nats.Connect(n.config.URL)
57 | if err != nil {
58 | logger.Println(err)
59 | return err
60 | }
61 |
62 | n.logger = logger
63 |
64 | return nil
65 | }
66 |
67 | func (n *NATS) inputMsg(topic string, mCh chan []byte, ec *uint64) {
68 | var (
69 | msg []byte
70 | err error
71 | ok bool
72 | )
73 |
74 | n.logger.Printf("start producer: NATS, server: %+v, topic: %s\n",
75 | n.config.URL, topic)
76 |
77 | for {
78 | msg, ok = <-mCh
79 | if !ok {
80 | break
81 | }
82 |
83 | err = n.connection.Publish(topic, msg)
84 | if err != nil {
85 | n.logger.Println(err)
86 | *ec++
87 | }
88 | }
89 | }
90 |
91 | func (n *NATS) load(f string) error {
92 | b, err := ioutil.ReadFile(f)
93 | if err != nil {
94 | return err
95 | }
96 |
97 | err = yaml.Unmarshal(b, &n.config)
98 | if err != nil {
99 | return err
100 | }
101 |
102 | return nil
103 | }
104 |
--------------------------------------------------------------------------------
/producer/nsq.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: nsq.go
6 | //: details: vflow nsq producer plugin
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package producer
24 |
25 | import (
26 | "io/ioutil"
27 | "log"
28 |
29 | "github.com/nsqio/go-nsq"
30 | "gopkg.in/yaml.v2"
31 | )
32 |
33 | // NSQ represents nsq producer
34 | type NSQ struct {
35 | producer *nsq.Producer
36 | config NSQConfig
37 | logger *log.Logger
38 | }
39 |
40 | // NSQConfig represents NSQ configuration
41 | type NSQConfig struct {
42 | Server string `yaml:"server"`
43 | }
44 |
45 | func (n *NSQ) setup(configFile string, logger *log.Logger) error {
46 | var (
47 | err error
48 | cfg = nsq.NewConfig()
49 | )
50 |
51 | // set default values
52 | n.config = NSQConfig{
53 | Server: "localhost:4150",
54 | }
55 |
56 | // load configuration if available
57 | if err = n.load(configFile); err != nil {
58 | logger.Println(err)
59 | }
60 |
61 | cfg.ClientID = "vflow.nsq"
62 |
63 | n.producer, err = nsq.NewProducer(n.config.Server, cfg)
64 | if err != nil {
65 | logger.Println(err)
66 | return err
67 | }
68 |
69 | n.logger = logger
70 |
71 | return nil
72 | }
73 |
74 | func (n *NSQ) inputMsg(topic string, mCh chan []byte, ec *uint64) {
75 | var (
76 | msg []byte
77 | err error
78 | ok bool
79 | )
80 |
81 | n.logger.Printf("start producer: NSQ, server: %+v, topic: %s\n",
82 | n.config.Server, topic)
83 |
84 | for {
85 | msg, ok = <-mCh
86 | if !ok {
87 | break
88 | }
89 |
90 | err = n.producer.Publish(topic, msg)
91 | if err != nil {
92 | n.logger.Println(err)
93 | *ec++
94 | }
95 | }
96 | }
97 |
98 | func (n *NSQ) load(f string) error {
99 | b, err := ioutil.ReadFile(f)
100 | if err != nil {
101 | return err
102 | }
103 |
104 | err = yaml.Unmarshal(b, &n.config)
105 | if err != nil {
106 | return err
107 | }
108 |
109 | return nil
110 | }
111 |
--------------------------------------------------------------------------------
/producer/producer.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: producer.go
6 | //: details: vflow kafka producer engine
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package producer
24 |
25 | import (
26 | "log"
27 | "sync"
28 | )
29 |
30 | // Producer represents messaging queue
31 | type Producer struct {
32 | MQ MQueue
33 | MQConfigFile string
34 | MQErrorCount *uint64
35 |
36 | Topic string
37 | Chan chan []byte
38 |
39 | Logger *log.Logger
40 | }
41 |
42 | // MQueue represents messaging queue methods
43 | type MQueue interface {
44 | setup(string, *log.Logger) error
45 | inputMsg(string, chan []byte, *uint64)
46 | }
47 |
48 | // NewProducer constructs new Messaging Queue
49 | func NewProducer(mqName string) *Producer {
50 | var mqRegistered = map[string]MQueue{
51 | "kafka": new(KafkaSarama),
52 | "kafka.sarama": new(KafkaSarama),
53 | "kafka.segmentio": new(KafkaSegmentio),
54 | "nsq": new(NSQ),
55 | "nats": new(NATS),
56 | "rawSocket": new(RawSocket),
57 | }
58 |
59 | return &Producer{
60 | MQ: mqRegistered[mqName],
61 | }
62 | }
63 |
64 | // Run configs and tries to be ready to produce
65 | func (p *Producer) Run() error {
66 | var (
67 | wg sync.WaitGroup
68 | err error
69 | )
70 |
71 | err = p.MQ.setup(p.MQConfigFile, p.Logger)
72 | if err != nil {
73 | return err
74 | }
75 |
76 | wg.Add(1)
77 | go func() {
78 | defer wg.Done()
79 | topic := p.Topic
80 | p.MQ.inputMsg(topic, p.Chan, p.MQErrorCount)
81 | }()
82 |
83 | wg.Wait()
84 |
85 | return nil
86 | }
87 |
88 | // Shutdown stops the producer
89 | func (p *Producer) Shutdown() {
90 | close(p.Chan)
91 | }
92 |
--------------------------------------------------------------------------------
/producer/producer_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: producer_test.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package producer
24 |
25 | import (
26 | "log"
27 | "sync"
28 | "testing"
29 | )
30 |
31 | type MQMock struct{}
32 |
33 | func (k *MQMock) setup(configFile string, logger *log.Logger) error {
34 | return nil
35 | }
36 |
37 | func (k *MQMock) inputMsg(topic string, mCh chan []byte, ec *uint64) {
38 | for {
39 | msg, ok := <-mCh
40 | if !ok {
41 | break
42 | }
43 | mCh <- msg
44 | }
45 | }
46 |
47 | func TestProducerChan(t *testing.T) {
48 | var (
49 | ch = make(chan []byte, 1)
50 | wg sync.WaitGroup
51 | )
52 |
53 | p := Producer{MQ: new(MQMock)}
54 | p.Chan = ch
55 |
56 | wg.Add(1)
57 | go func() {
58 | defer wg.Done()
59 | if err := p.Run(); err != nil {
60 | t.Error("unexpected error", err)
61 | }
62 | }()
63 |
64 | ch <- []byte("test")
65 | m := <-ch
66 | if string(m) != "test" {
67 | t.Error("expect to get test, got", string(m))
68 | }
69 |
70 | close(ch)
71 |
72 | wg.Wait()
73 | }
74 |
--------------------------------------------------------------------------------
/producer/rawSocket.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: rawSocket.go
6 | //: details: vflow tcp/udp producer plugin
7 | //: author: Joe Percivall
8 | //: date: 12/18/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package producer
24 |
25 | import (
26 | "io/ioutil"
27 | "log"
28 | "strings"
29 |
30 | "fmt"
31 | "gopkg.in/yaml.v2"
32 | "net"
33 | )
34 |
35 | // RawSocket represents RawSocket producer
36 | type RawSocket struct {
37 | connection net.Conn
38 | config RawSocketConfig
39 | logger *log.Logger
40 | }
41 |
42 | // RawSocketConfig is the struct that holds all configuation for RawSocketConfig connections
43 | type RawSocketConfig struct {
44 | URL string `yaml:"url"`
45 | Protocol string `yaml:"protocol"`
46 | MaxRetry int `yaml:"retry-max"`
47 | }
48 |
49 | func (rs *RawSocket) setup(configFile string, logger *log.Logger) error {
50 | var err error
51 | rs.config = RawSocketConfig{
52 | URL: "localhost:9555",
53 | Protocol: "tcp",
54 | MaxRetry: 2,
55 | }
56 |
57 | if err = rs.load(configFile); err != nil {
58 | logger.Println(err)
59 | return err
60 | }
61 |
62 | rs.connection, err = net.Dial(rs.config.Protocol, rs.config.URL)
63 | if err != nil {
64 | logger.Println(err)
65 | return err
66 | }
67 |
68 | rs.logger = logger
69 |
70 | return nil
71 | }
72 |
73 | func (rs *RawSocket) inputMsg(topic string, mCh chan []byte, ec *uint64) {
74 | var (
75 | msg []byte
76 | err error
77 | ok bool
78 | )
79 |
80 | rs.logger.Printf("start producer: RawSocket, server: %+v, Protocol: %s\n",
81 | rs.config.URL, rs.config.Protocol)
82 |
83 | for {
84 | msg, ok = <-mCh
85 | if !ok {
86 | break
87 | }
88 |
89 | for i := 0; ; i++ {
90 | _, err = fmt.Fprintf(rs.connection, string(msg)+"\n")
91 | if err == nil {
92 | break
93 | }
94 |
95 | *ec++
96 |
97 | if strings.HasSuffix(err.Error(), "broken pipe") {
98 | var newConnection, err = net.Dial(rs.config.Protocol, rs.config.URL)
99 | if err != nil {
100 | rs.logger.Println("Error when attempting to fix the broken pipe", err)
101 | } else {
102 | rs.logger.Println("Successfully reconnected")
103 | rs.connection = newConnection
104 | }
105 | }
106 |
107 | if i >= (rs.config.MaxRetry) {
108 | rs.logger.Println("message failed after the configured retry limit:", err)
109 | break
110 | } else {
111 | rs.logger.Println("retrying after error:", err)
112 | }
113 | }
114 | }
115 | }
116 |
117 | func (rs *RawSocket) load(f string) error {
118 | b, err := ioutil.ReadFile(f)
119 | if err != nil {
120 | return err
121 | }
122 |
123 | err = yaml.Unmarshal(b, &rs.config)
124 | if err != nil {
125 | return err
126 | }
127 |
128 | return nil
129 | }
130 |
--------------------------------------------------------------------------------
/producer/sarama.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: sarama.go
6 | //: details: vflow kafka producer plugin
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package producer
24 |
25 | import (
26 | "crypto/tls"
27 | "crypto/x509"
28 | "io/ioutil"
29 | "log"
30 | "os"
31 | "reflect"
32 | "strconv"
33 | "strings"
34 | "time"
35 |
36 | "github.com/Shopify/sarama"
37 | yaml "gopkg.in/yaml.v2"
38 | )
39 |
40 | // KafkaSarama represents kafka producer
41 | type KafkaSarama struct {
42 | producer sarama.AsyncProducer
43 | config KafkaSaramaConfig
44 | logger *log.Logger
45 | }
46 |
47 | // KafkaSaramaConfig represents kafka configuration
48 | type KafkaSaramaConfig struct {
49 | Brokers []string `yaml:"brokers" env:"BROKERS"`
50 | Compression string `yaml:"compression" env:"COMPRESSION"`
51 | RetryMax int `yaml:"retry-max" env:"RETRY_MAX"`
52 | RequestSizeMax int32 `yaml:"request-size-max" env:"REQUEST_SIZE_MAX"`
53 | RetryBackoff int `yaml:"retry-backoff" env:"RETRY_BACKOFF"`
54 | TLSEnabled bool `yaml:"tls-enabled" env:"TLS_ENABLED"`
55 | TLSCertFile string `yaml:"tls-cert" env:"TLS_CERT"`
56 | TLSKeyFile string `yaml:"tls-key" env:"TLS_KEY"`
57 | CAFile string `yaml:"ca-file" env:"CA_FILE"`
58 | TLSSkipVerify bool `yaml:"tls-skip-verify" env:"TLS_SKIP_VERIFY"`
59 | SASLUsername string `yaml:"sasl-username" env:"SASL_USERNAME"`
60 | SASLPassword string `yaml:"sasl-password" env:"SASL_PASSWORD"`
61 | }
62 |
63 | func (k *KafkaSarama) setup(configFile string, logger *log.Logger) error {
64 | var (
65 | config = sarama.NewConfig()
66 | err error
67 | )
68 |
69 | // set default values
70 | k.config = KafkaSaramaConfig{
71 | Brokers: []string{"localhost:9092"},
72 | RetryMax: 2,
73 | RequestSizeMax: 104857600,
74 | RetryBackoff: 10,
75 | TLSEnabled: false,
76 | TLSSkipVerify: true,
77 | }
78 |
79 | k.logger = logger
80 |
81 | // load configuration if available
82 | if err = k.load(configFile); err != nil {
83 | logger.Println(err)
84 | }
85 |
86 | // init kafka configuration
87 | config.ClientID = "vFlow.Kafka"
88 | config.Producer.Retry.Max = k.config.RetryMax
89 | config.Producer.Retry.Backoff = time.Duration(k.config.RetryBackoff) * time.Millisecond
90 |
91 | sarama.MaxRequestSize = k.config.RequestSizeMax
92 |
93 | switch k.config.Compression {
94 | case "gzip":
95 | config.Producer.Compression = sarama.CompressionGZIP
96 | case "lz4":
97 | config.Producer.Compression = sarama.CompressionLZ4
98 | case "snappy":
99 | config.Producer.Compression = sarama.CompressionSnappy
100 | default:
101 | config.Producer.Compression = sarama.CompressionNone
102 | }
103 |
104 | if tlsConfig := k.tlsConfig(); tlsConfig != nil || k.config.TLSEnabled {
105 | config.Net.TLS.Config = tlsConfig
106 | config.Net.TLS.Enable = true
107 | if k.config.TLSSkipVerify {
108 | k.logger.Printf("kafka client TLS enabled (server certificate didn't validate)")
109 | } else {
110 | k.logger.Printf("kafka client TLS enabled")
111 | }
112 | }
113 |
114 | // Enable SASL Auth Config if username is filled
115 | if k.config.SASLUsername != "" {
116 | config.Net.SASL.Enable = true
117 | config.Net.SASL.User = k.config.SASLUsername
118 | config.Net.SASL.Password = k.config.SASLPassword
119 | }
120 |
121 | // get env config
122 | k.loadEnv("VFLOW_KAFKA")
123 |
124 | if err = config.Validate(); err != nil {
125 | logger.Fatal(err)
126 | }
127 |
128 | k.producer, err = sarama.NewAsyncProducer(k.config.Brokers, config)
129 | if err != nil {
130 | return err
131 | }
132 |
133 | return nil
134 | }
135 |
136 | func (k *KafkaSarama) inputMsg(topic string, mCh chan []byte, ec *uint64) {
137 | var (
138 | msg []byte
139 | ok bool
140 | )
141 |
142 | k.logger.Printf("start producer: Kafka, brokers: %+v, topic: %s\n",
143 | k.config.Brokers, topic)
144 |
145 | for {
146 | msg, ok = <-mCh
147 | if !ok {
148 | break
149 | }
150 |
151 | select {
152 | case k.producer.Input() <- &sarama.ProducerMessage{
153 | Topic: topic,
154 | Value: sarama.ByteEncoder(msg),
155 | }:
156 | case err := <-k.producer.Errors():
157 | k.logger.Println(err)
158 | *ec++
159 | }
160 | }
161 |
162 | k.producer.Close()
163 | }
164 |
165 | func (k *KafkaSarama) load(f string) error {
166 | b, err := ioutil.ReadFile(f)
167 | if err != nil {
168 | return err
169 | }
170 |
171 | err = yaml.Unmarshal(b, &k.config)
172 | if err != nil {
173 | return err
174 | }
175 |
176 | return nil
177 | }
178 |
179 | func (k KafkaSarama) tlsConfig() *tls.Config {
180 | var t *tls.Config
181 |
182 | if k.config.TLSCertFile != "" || k.config.TLSKeyFile != "" || k.config.CAFile != "" {
183 | cert, err := tls.LoadX509KeyPair(k.config.TLSCertFile, k.config.TLSKeyFile)
184 | if err != nil {
185 | k.logger.Fatal("kafka TLS load X509 key pair error: ", err)
186 | }
187 |
188 | caCert, err := ioutil.ReadFile(k.config.CAFile)
189 | if err != nil {
190 | k.logger.Fatal("kafka TLS CA file error: ", err)
191 | }
192 |
193 | caCertPool := x509.NewCertPool()
194 | caCertPool.AppendCertsFromPEM(caCert)
195 |
196 | t = &tls.Config{
197 | Certificates: []tls.Certificate{cert},
198 | RootCAs: caCertPool,
199 | InsecureSkipVerify: k.config.TLSSkipVerify,
200 | }
201 | }
202 |
203 | return t
204 | }
205 |
206 | func (k *KafkaSarama) loadEnv(prefix string) {
207 | v := reflect.ValueOf(&k.config).Elem()
208 | t := v.Type()
209 | for i := 0; i < v.NumField(); i++ {
210 | f := v.Field(i)
211 | env := t.Field(i).Tag.Get("env")
212 | if env == "" {
213 | continue
214 | }
215 |
216 | val, ok := os.LookupEnv(prefix + "_" + env)
217 | if !ok {
218 | continue
219 | }
220 |
221 | switch f.Kind() {
222 | case reflect.Int:
223 | valInt, err := strconv.Atoi(val)
224 | if err != nil {
225 | k.logger.Println(err)
226 | continue
227 | }
228 | f.SetInt(int64(valInt))
229 | case reflect.String:
230 | f.SetString(val)
231 | case reflect.Slice:
232 | for _, elm := range strings.Split(val, ";") {
233 | f.Index(0).SetString(elm)
234 | }
235 | case reflect.Bool:
236 | valBool, err := strconv.ParseBool(val)
237 | if err != nil {
238 | k.logger.Println(err)
239 | continue
240 | }
241 | f.SetBool(valBool)
242 | }
243 | }
244 | }
245 |
--------------------------------------------------------------------------------
/reader/reader.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: reader.go
6 | //: details: decodes a variable from buffer
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | // Package reader decodes a variable from buffer
24 | package reader
25 |
26 | import (
27 | "encoding/binary"
28 | "errors"
29 | )
30 |
31 | // Reader represents the data bytes for reading
32 | type Reader struct {
33 | data []byte
34 | count int
35 | }
36 |
37 | var errReader = errors.New("can not read the data")
38 |
39 | // NewReader constructs a reader
40 | func NewReader(b []byte) *Reader {
41 | return &Reader{
42 | data: b,
43 | }
44 | }
45 |
46 | // Uint8 reads a byte
47 | func (r *Reader) Uint8() (uint8, error) {
48 | if len(r.data) < 1 {
49 | return 0, errReader
50 | }
51 |
52 | d := r.data[0]
53 | r.advance(1)
54 |
55 | return d, nil
56 | }
57 |
58 | // Uint16 reads two bytes as big-endian
59 | func (r *Reader) Uint16() (uint16, error) {
60 | if len(r.data) < 2 {
61 | return 0, errReader
62 | }
63 |
64 | d := binary.BigEndian.Uint16(r.data)
65 | r.advance(2)
66 |
67 | return d, nil
68 | }
69 |
70 | // Uint32 reads four bytes as big-endian
71 | func (r *Reader) Uint32() (uint32, error) {
72 | if len(r.data) < 4 {
73 | return 0, errReader
74 | }
75 |
76 | d := binary.BigEndian.Uint32(r.data)
77 | r.advance(4)
78 |
79 | return d, nil
80 | }
81 |
82 | // Uint64 reads eight bytes as big-endian
83 | func (r *Reader) Uint64() (uint64, error) {
84 | if len(r.data) < 8 {
85 | return 0, errReader
86 | }
87 |
88 | d := binary.BigEndian.Uint64(r.data)
89 | r.advance(8)
90 |
91 | return d, nil
92 | }
93 |
94 | // Read reads n bytes and returns it
95 | func (r *Reader) Read(n int) ([]byte, error) {
96 | if len(r.data) < n {
97 | return []byte{}, errReader
98 | }
99 |
100 | d := r.data[:n]
101 | r.advance(n)
102 |
103 | return d, nil
104 | }
105 |
106 | // PeekUint16 peeks the next two bytes interpreted as big-endian two-byte integer
107 | func (r *Reader) PeekUint16() (res uint16, err error) {
108 | var b []byte
109 | if b, err = r.Peek(2); err == nil {
110 | res = binary.BigEndian.Uint16(b)
111 | }
112 | return
113 | }
114 |
115 | // Peek returns the next n bytes in the reader without advancing in the stream
116 | func (r *Reader) Peek(n int) ([]byte, error) {
117 | if len(r.data) < n {
118 | return []byte{}, errReader
119 | }
120 | return r.data[:n], nil
121 | }
122 |
123 | // Len returns the current length of the reader's data
124 | func (r *Reader) Len() int {
125 | return len(r.data)
126 | }
127 |
128 | func (r *Reader) advance(num int) {
129 | r.data = r.data[num:]
130 | r.count += num
131 | }
132 |
133 | // ReadCount returns the number of bytes that have been read from this Reader in total
134 | func (r *Reader) ReadCount() int {
135 | return r.count
136 | }
137 |
--------------------------------------------------------------------------------
/reader/reader_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: reader_test.go
6 | //: details: unit testing for reader.go
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 03/22/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package reader
24 |
25 | import (
26 | "reflect"
27 | "testing"
28 | )
29 |
30 | func TestUint8(t *testing.T) {
31 | b := []byte{0x05, 0x11}
32 |
33 | r := NewReader(b)
34 | i, err := r.Uint8()
35 | if err != nil {
36 | t.Error("unexpected error happened, got", err)
37 | }
38 |
39 | if i != 5 {
40 | t.Error("expect read 5, got", i)
41 | }
42 | }
43 |
44 | func TestUint16(t *testing.T) {
45 | b := []byte{0x05, 0x11}
46 |
47 | r := NewReader(b)
48 | i, err := r.Uint16()
49 | if err != nil {
50 | t.Error("unexpected error happened, got", err)
51 | }
52 |
53 | if i != 1297 {
54 | t.Error("expect read 1297, got", i)
55 | }
56 | }
57 |
58 | func TestUint32(t *testing.T) {
59 | b := []byte{0x05, 0x11, 0x01, 0x16}
60 |
61 | r := NewReader(b)
62 | i, err := r.Uint32()
63 | if err != nil {
64 | t.Error("unexpected error happened, got", err)
65 | }
66 |
67 | if i != 85000470 {
68 | t.Error("expect read 85000470, got", i)
69 | }
70 | }
71 |
72 | func TestUint64(t *testing.T) {
73 | b := []byte{0x05, 0x11, 0x01, 0x16, 0x05, 0x01, 0x21, 0x26}
74 |
75 | r := NewReader(b)
76 | i, err := r.Uint64()
77 | if err != nil {
78 | t.Error("unexpected error happened, got", err)
79 | }
80 |
81 | if i != 365074238878589222 {
82 | t.Error("expect read 365074238878589222, got", i)
83 | }
84 | }
85 |
86 | func TestReadN(t *testing.T) {
87 | b := []byte{0x05, 0x11, 0x01, 0x16}
88 |
89 | r := NewReader(b)
90 | i, err := r.Read(2)
91 | if err != nil {
92 | t.Error("unexpected error happened, got", err)
93 | }
94 |
95 | if !reflect.DeepEqual(i, []byte{0x05, 0x11}) {
96 | t.Error("expect read [5 17], got", i)
97 | }
98 | }
99 |
100 | func TestReadCount(t *testing.T) {
101 | b := make([]byte, 18)
102 | for i := range b {
103 | b[i] = byte(i)
104 | }
105 | r := NewReader(b)
106 | check := func(expected int) {
107 | count := r.ReadCount()
108 | if count != expected {
109 | t.Error("Unexpected ReadCount(). Expected", expected, "got", count)
110 | }
111 | }
112 |
113 | check(0)
114 | r.Uint8()
115 | check(1)
116 | r.Uint16()
117 | check(3)
118 | r.Uint32()
119 | check(7)
120 | r.Uint64()
121 | check(15)
122 | r.Read(3)
123 | check(18)
124 | }
125 |
--------------------------------------------------------------------------------
/scripts/dockerStart.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh -e
2 |
3 | # Remove old pid file if it exists
4 | rm -f "/var/run/vflow.pid"
5 | # Touch the log file so tail has something to follow if it doesn't exist
6 | touch "/var/log/vflow.log"
7 |
8 | # Continuously provide logs so that 'docker logs' can produce them
9 | tail -F "/var/log/vflow.log" &
10 | "/usr/bin/vflow" &
11 | vflow_pid="$!"
12 |
13 | trap "echo Received trapped signal, beginning shutdown...;" KILL TERM HUP INT EXIT;
14 |
15 | echo vFlow running with PID ${vflow_pid}.
16 | wait ${vflow_pid}
--------------------------------------------------------------------------------
/scripts/dpkg/DEBIAN/control:
--------------------------------------------------------------------------------
1 | Package: vflow
2 | Version: %VERSION%
3 | Architecture: amd64
4 | Source: vflow
5 | Maintainer: Mehrdad Arshad Rad
6 | Section: net
7 | Priority: optional
8 | Homepage: https://github.com/EdgeCast/vflow
9 | Description: High-performance, scalable and reliable IPFIX, sFlow and Netflow collector
10 | Features:
11 | .
12 | IPFIX RFC7011 collector
13 | sFLow v5 raw header packet collector
14 | Netflow v9 (Beta)
15 | Decoding sFlow raw header L2/L3/L4
16 | Produce to Apache Kafka, NSQ
17 | Replicate IPFIX to 3rd party collector
18 | Supports IPv4 and IPv6
19 |
--------------------------------------------------------------------------------
/scripts/dpkg/DEBIAN/copyright:
--------------------------------------------------------------------------------
1 | Upstream-Name: vflow
2 | Source: https://github.com/EdgeCast/vflow
3 |
4 | Files: *
5 | Copyright: Copyright (C) 2017 Verizon (www.verizon.com; www.github.com/Verizon)
6 | License: Apache-2.0
7 |
--------------------------------------------------------------------------------
/scripts/kafka.conf:
--------------------------------------------------------------------------------
1 | brokers:
2 | - 127.0.0.1:9092
3 | retry-max: 1
4 | retry-backoff: 10
5 |
--------------------------------------------------------------------------------
/scripts/prometheus.yml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 10s
3 |
4 | scrape_configs:
5 | - job_name: "vflow"
6 | static_configs:
7 | - targets: ["vflow:8081"]
8 |
--------------------------------------------------------------------------------
/scripts/rpmbuild/SPECS/vflow.spec:
--------------------------------------------------------------------------------
1 | Name: vflow
2 | Version: %VERSION%
3 | Release: 0
4 | Group: Application
5 | URL: https://github.com/EdgeCast/vflow
6 | License: Apache-2
7 | Summary: IPFIX/sFlow/Netflow collector
8 | Source0: vflow
9 | Source1: vflow_stress
10 | Source2: vflow.conf
11 | Source3: mq.conf
12 | Source4: vflow.service
13 | Source5: license
14 | Source6: notice
15 | Source7: vflow.logrotate
16 |
17 | %description
18 | High-performance, scalable and reliable IPFIX, sFlow and Netflow collector
19 |
20 | %prep
21 |
22 | %install
23 | rm -rf %{buildroot}
24 |
25 | mkdir -p %{buildroot}/usr/bin
26 | mkdir -p %{buildroot}/usr/local/vflow
27 | mkdir -p %{buildroot}/usr/share/doc/vflow
28 | mkdir -p %{buildroot}/etc/vflow
29 | mkdir -p %{buildroot}/etc/init.d
30 | mkdir -p %{buildroot}/etc/logrotate.d
31 | cp -Rf %{SOURCE0} %{buildroot}/usr/bin/
32 | cp -Rf %{SOURCE1} %{buildroot}/usr/bin/
33 | cp -Rf %{SOURCE2} %{buildroot}/etc/vflow/
34 | cp -Rf %{SOURCE3} %{buildroot}/etc/vflow/
35 | cp -Rf %{SOURCE4} %{buildroot}/etc/init.d/vflow
36 | cp -Rf %{SOURCE5} %{buildroot}/usr/share/doc/vflow/
37 | cp -Rf %{SOURCE6} %{buildroot}/usr/share/doc/vflow/
38 | cp -Rf %{SOURCE7} %{buildroot}/etc/logrotate.d/vflow
39 |
40 | %files
41 | /usr/bin/vflow
42 | /usr/bin/vflow_stress
43 | /etc/vflow/vflow.conf
44 | /etc/vflow/mq.conf
45 | /etc/init.d/vflow
46 | /etc/logrotate.d/vflow
47 | /usr/share/doc/vflow/*
48 |
49 | %clean
50 | rm -rf $RPM_BUILD_ROOT
51 |
--------------------------------------------------------------------------------
/scripts/vflow.conf:
--------------------------------------------------------------------------------
1 | ipfix-workers: 100
2 | sflow-workers: 100
3 | netflow5-workers: 50
4 | netflow9-workers: 50
5 | log-file: /var/log/vflow.log
6 | ipfix-tpl-cache-file: /usr/local/vflow/vflow.templates
7 |
--------------------------------------------------------------------------------
/scripts/vflow.logrotate:
--------------------------------------------------------------------------------
1 | /var/log/vflow.log {
2 | size 200M
3 | create 644 root root
4 | compress
5 | rotate 5
6 | copytruncate
7 | }
8 |
--------------------------------------------------------------------------------
/scripts/vflow.monit:
--------------------------------------------------------------------------------
1 | check process vflow with pidfile /var/run/vflow.pid
2 | start program = "/etc/init.d/vflow start"
3 | stop program = "/etc/init.d/vflow stop"
4 |
--------------------------------------------------------------------------------
/scripts/vflow.service:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # description: vFlow enterprise IPFIX, sFlow v5 raw header collector and Kafka/NSQ producer
3 |
4 | USER=root
5 | DAEMON=vflow
6 | PIDFILE=/var/run/vflow.pid
7 |
8 | start() {
9 | if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE); then
10 | echo 'vFlow already is running' >&2
11 | return 1
12 | fi
13 | local CMD="$DAEMON &"
14 | echo 'Starting vFlow…' >&2
15 | su -s /bin/sh -c "$CMD" $USER
16 | echo 'vFlow started' >&2
17 | }
18 |
19 | stop() {
20 | echo 'Stopping vFlow…' >&2
21 | kill -15 $(cat "$PIDFILE") && rm -f "$PIDFILE"
22 | echo 'Service stopped' >&2
23 | }
24 |
25 | status() {
26 | if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE); then
27 | echo 'vFlow already is running' >&2
28 | else
29 | echo 'vFlow is not running' >&2
30 | fi
31 | }
32 |
33 | case "$1" in
34 | start)
35 | start
36 | ;;
37 | stop)
38 | stop
39 | ;;
40 | status)
41 | status
42 | ;;
43 | restart)
44 | stop
45 | sleep 2
46 | start
47 | ;;
48 | *)
49 | echo "Usage: $0 {start|stop|status|restart}"
50 | esac
51 |
52 | exit 0
53 |
--------------------------------------------------------------------------------
/scripts/vflow.supervisor:
--------------------------------------------------------------------------------
1 | [program:vflow]
2 | command=/usr/bin/vflow
3 | autostart=true
4 | autorestart=true
5 |
--------------------------------------------------------------------------------
/sflow/doc.go:
--------------------------------------------------------------------------------
1 | // Package sflow decodes sFlow packets
2 | package sflow
3 |
--------------------------------------------------------------------------------
/stress/README.md:
--------------------------------------------------------------------------------
1 | # Stress
2 |
3 | ## Stress tries to check correct behavior and robustness of vFlow
4 |
5 | ## Features
6 | - Generate IPFIX data, template and template option
7 | - Generate sFlow v5 sample header data
8 | - Simulate different IPFIX agents
9 |
10 | 
11 |
12 | ## Usage Manual
13 | ````
14 | -vflow-addr vflow ip address (default 127.0.0.1)
15 | -ipfix-port ipfix port number (default 4739)
16 | -sflow-port sflow port number (default 6343)
17 | -ipfix-interval ipfix template interval (default 10s)
18 | -ipfix-rate-limit ipfix rate limit packets per second (default 25000 PPS)
19 | -sflow-rate-limit sflow rate limit packets per second (default 25000 PPS)
20 | ````
21 |
--------------------------------------------------------------------------------
/stress/hammer/doc.go:
--------------------------------------------------------------------------------
1 | // Package hammer generates ipfix packets
2 | package hammer
3 |
--------------------------------------------------------------------------------
/stress/hammer/hammer_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: hammer.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 03/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package hammer
24 |
25 | import (
26 | "net"
27 | "strings"
28 | "testing"
29 | )
30 |
31 | func TestIPFIXGenPackets(t *testing.T) {
32 | ip := net.ParseIP("10.0.0.1")
33 | src := net.ParseIP("1.1.1.1")
34 |
35 | ipfix, err := NewIPFIX(ip)
36 | if err != nil {
37 | if !strings.Contains(err.Error(), "not permitted") {
38 | t.Error("unexpected error", err)
39 | } else {
40 | t.Skip(err)
41 | }
42 | }
43 |
44 | ipfix.srcs = append(ipfix.srcs, src)
45 |
46 | packets := ipfix.genPackets(dataType)
47 | if len(packets) < 1 {
48 | t.Error("expect to have packets, got", len(packets))
49 | }
50 | packets = ipfix.genPackets(templateType)
51 | if len(packets) < 1 {
52 | t.Error("expect to have tp; packets, got", len(packets))
53 | }
54 | packets = ipfix.genPackets(templateOptType)
55 | if len(packets) < 1 {
56 | t.Error("expect to have tpl opt packets, got", len(packets))
57 | }
58 | }
59 |
60 | func TestSFlowGenPackets(t *testing.T) {
61 | ip := net.ParseIP("10.0.0.1")
62 | src := net.ParseIP("1.1.1.1")
63 |
64 | sflow, err := NewSFlow(ip)
65 | if err != nil {
66 | if !strings.Contains(err.Error(), "not permitted") {
67 | t.Error("unexpected error", err)
68 | } else {
69 | t.Skip(err)
70 | }
71 | }
72 |
73 | sflow.srcs = append(sflow.srcs, src)
74 |
75 | packets := sflow.genPackets()
76 | if len(packets) < 1 {
77 | t.Error("expect to have packets, got", len(packets))
78 | }
79 |
80 | }
81 |
--------------------------------------------------------------------------------
/stress/hammer/sflow_samples.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: sflow_samples.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package hammer
24 |
25 | var sFlowDataSamples = [][]byte{
26 | {0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x1, 0xc0, 0xe5, 0xd6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0xa9, 0xb2, 0xa3, 0x8a, 0xc7, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0xa4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x16, 0x0, 0x0, 0x7, 0xd0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x16, 0x0, 0x0, 0x2, 0x1a, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x64, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x56, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x52, 0xd4, 0x4, 0xff, 0x1, 0x18, 0x1e, 0xde, 0xad, 0x7a, 0x48, 0xcc, 0x37, 0x81, 0x0, 0x0, 0x7, 0x8, 0x0, 0x45, 0x0, 0x0, 0x40, 0xe3, 0xa7, 0x0, 0x0, 0x4, 0x1, 0x3e, 0xc3, 0xc0, 0xe5, 0xd6, 0x17, 0xc0, 0x10, 0x3d, 0x45, 0x8, 0x0, 0x63, 0x3a, 0x8f, 0x44, 0x5, 0x81, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0xe9, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0, 0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
27 | {0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x1, 0xc0, 0xe5, 0xd6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x47, 0x7c, 0xb2, 0xa4, 0x3b, 0x1d, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0xa0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x31, 0x0, 0x0, 0x1f, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x31, 0x0, 0x0, 0x2, 0x1a, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x60, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x52, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x4e, 0xd4, 0x4, 0xff, 0x1, 0x1f, 0x5e, 0xf4, 0xcc, 0x55, 0xde, 0x1a, 0x92, 0x8, 0x0, 0x45, 0x0, 0x0, 0x40, 0xe6, 0x47, 0x0, 0x0, 0x5, 0x1, 0x40, 0x5, 0xc0, 0xe5, 0xd8, 0x35, 0xc0, 0x10, 0x36, 0x45, 0x8, 0x0, 0xa6, 0x2, 0x4b, 0x79, 0x6, 0x84, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0xe9, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
28 | {0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x1, 0xc0, 0xe5, 0xd6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x47, 0x79, 0xb2, 0xa4, 0x2a, 0x83, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0xb8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x1a, 0x0, 0x0, 0x7, 0xd0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xfe, 0x0, 0x0, 0x2, 0x1a, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x78, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x6b, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x67, 0x84, 0x18, 0x88, 0xf2, 0xe9, 0x98, 0xd4, 0x4, 0xff, 0x1, 0x1d, 0x91, 0x8, 0x0, 0x45, 0x0, 0x0, 0x59, 0x12, 0x74, 0x0, 0x0, 0x3e, 0x11, 0x1a, 0x4b, 0xc0, 0xe5, 0xd8, 0x8d, 0x41, 0x37, 0x75, 0x2b, 0x6d, 0xa6, 0x0, 0x35, 0x0, 0x45, 0xcc, 0xd9, 0xd0, 0xc4, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0xa, 0x63, 0x6f, 0x6c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4, 0x62, 0x6c, 0x6f, 0x62, 0x4, 0x63, 0x6f, 0x72, 0x65, 0x7, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x3, 0x6e, 0x65, 0x74, 0x0, 0x0, 0x1, 0x0, 0x1, 0x0, 0x0, 0x29, 0x10, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0xe9, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
29 | {0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x1, 0xc0, 0xe5, 0xd6, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5f, 0xa8, 0xb2, 0xa3, 0x7b, 0xf8, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0xa4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x16, 0x0, 0x0, 0x7, 0xd0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x16, 0x0, 0x0, 0x2, 0x28, 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x64, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x56, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x0, 0x52, 0xd4, 0x4, 0xff, 0x1, 0x18, 0x1e, 0xde, 0xad, 0x7a, 0x48, 0xcc, 0x37, 0x81, 0x0, 0x0, 0x7, 0x8, 0x0, 0x45, 0x0, 0x0, 0x40, 0x86, 0x41, 0x0, 0x0, 0xa, 0x1, 0x1c, 0x65, 0xc0, 0xe5, 0xd6, 0x17, 0x2e, 0x16, 0x49, 0x4, 0x8, 0x0, 0x70, 0x3b, 0x38, 0x40, 0x4f, 0x84, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0xe9, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0, 0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
30 | }
31 |
--------------------------------------------------------------------------------
/stress/stress.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: stress.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package main
24 |
25 | import (
26 | "flag"
27 | "log"
28 | "net"
29 | "sync"
30 | "time"
31 |
32 | "github.com/EdgeCast/vflow/stress/hammer"
33 | )
34 |
35 | var opts = struct {
36 | vflowAddr string
37 | ipfixPort int
38 | sflowPort int
39 | ipfixTick string
40 | ipfixRateLimit int
41 | sflowRateLimit int
42 | }{
43 | "127.0.0.1",
44 | 4739,
45 | 6343,
46 | "10s",
47 | 25000,
48 | 25000,
49 | }
50 |
51 | func init() {
52 | flag.IntVar(&opts.ipfixPort, "ipfix-port", opts.ipfixPort, "ipfix port number")
53 | flag.IntVar(&opts.sflowPort, "sflow-port", opts.sflowPort, "sflow port number")
54 | flag.StringVar(&opts.ipfixTick, "ipfix-interval", opts.ipfixTick, "ipfix template interval in seconds")
55 | flag.IntVar(&opts.ipfixRateLimit, "ipfix-rate-limit", opts.ipfixRateLimit, "ipfix rate limit packets per second")
56 | flag.IntVar(&opts.sflowRateLimit, "sflow-rate-limit", opts.sflowRateLimit, "sflow rate limit packets per second")
57 | flag.StringVar(&opts.vflowAddr, "vflow-addr", opts.vflowAddr, "vflow ip address")
58 |
59 | flag.Parse()
60 | }
61 |
62 | func main() {
63 | var (
64 | wg sync.WaitGroup
65 | vflow = net.ParseIP(opts.vflowAddr)
66 | )
67 |
68 | wg.Add(1)
69 | go func() {
70 | var err error
71 | defer wg.Done()
72 | ipfix, _ := hammer.NewIPFIX(vflow)
73 | ipfix.Port = opts.ipfixPort
74 | ipfix.Tick, err = time.ParseDuration(opts.ipfixTick)
75 | ipfix.RateLimit = opts.ipfixRateLimit
76 | if err != nil {
77 | log.Fatal(err)
78 | }
79 | ipfix.Run()
80 | }()
81 |
82 | wg.Add(1)
83 | go func() {
84 | defer wg.Done()
85 | sflow, _ := hammer.NewSFlow(vflow)
86 | sflow.Port = opts.sflowPort
87 | sflow.RateLimit = opts.sflowRateLimit
88 | sflow.Run()
89 | }()
90 |
91 | log.Printf("Stress is attacking %s target ...", opts.vflowAddr)
92 |
93 | wg.Wait()
94 | }
95 |
--------------------------------------------------------------------------------
/vflow/ipfix_test.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ipfix_test.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 03/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | package main
24 |
25 | import (
26 | "net"
27 | "strings"
28 | "testing"
29 | "time"
30 | )
31 |
32 | func init() {
33 | opts = &Options{}
34 | }
35 |
36 | func TestMirrorIPFIX(t *testing.T) {
37 | var (
38 | msg = make(chan IPFIXUDPMsg, 1)
39 | fb = make(chan IPFIXUDPMsg)
40 | dst = net.ParseIP("127.0.0.1")
41 | ready = make(chan struct{})
42 | )
43 |
44 | go func() {
45 | err := mirrorIPFIX(dst, 10024, msg)
46 | if err != nil {
47 | if strings.Contains(err.Error(), "not permitted") {
48 | t.Log(err)
49 | ready <- struct{}{}
50 | } else {
51 | t.Fatal("unexpected error", err)
52 | }
53 | }
54 | }()
55 |
56 | time.Sleep(2 * time.Second)
57 |
58 | go func() {
59 | b := make([]byte, 1500)
60 | laddr := &net.UDPAddr{
61 | IP: net.ParseIP("127.0.0.1"),
62 | Port: 10024,
63 | }
64 |
65 | conn, err := net.ListenUDP("udp", laddr)
66 | if err != nil {
67 | t.Fatal("unexpected error", err)
68 | }
69 |
70 | close(ready)
71 |
72 | n, raddr, err := conn.ReadFrom(b)
73 | if err != nil {
74 | t.Error("unexpected error", err)
75 | }
76 |
77 | host, _, err := net.SplitHostPort(raddr.String())
78 | if err != nil {
79 | t.Error("unexpected error", err)
80 | }
81 |
82 | fb <- IPFIXUDPMsg{
83 | body: b[:n],
84 | raddr: &net.UDPAddr{IP: net.ParseIP(host)},
85 | }
86 |
87 | }()
88 |
89 | _, ok := <-ready
90 | if ok {
91 | return
92 | }
93 |
94 | body := []byte("hello")
95 |
96 | msg <- IPFIXUDPMsg{
97 | body: body,
98 | raddr: &net.UDPAddr{
99 | IP: net.ParseIP("192.1.1.1"),
100 | },
101 | }
102 |
103 | feedback := <-fb
104 |
105 | if string(feedback.body) != "hello" {
106 | t.Error("expect body is hello, got", string(feedback.body))
107 | }
108 |
109 | if feedback.raddr.IP.String() != "192.1.1.1" {
110 | t.Error("expect raddr is 192.1.1.1, got", feedback.raddr.IP.String())
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/vflow/ipfix_unix.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ipfix.go
6 | //: author: Mehrdad Arshad Rad - copied by Jeremy Rossi, but not important
7 | //: date: 02/01/2017
8 | //:
9 | //: Licensed under the Apache License, Version 2.0 (the "License");
10 | //: you may not use this file except in compliance with the License.
11 | //: You may obtain a copy of the License at
12 | //:
13 | //: http://www.apache.org/licenses/LICENSE-2.0
14 | //:
15 | //: Unless required by applicable law or agreed to in writing, software
16 | //: distributed under the License is distributed on an "AS IS" BASIS,
17 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 | //: See the License for the specific language governing permissions and
19 | //: limitations under the License.
20 | //: ----------------------------------------------------------------------------
21 | // +build !windows
22 |
23 | package main
24 |
25 | import (
26 | "github.com/EdgeCast/vflow/mirror"
27 |
28 | "net"
29 | )
30 |
31 | func mirrorIPFIXDispatcher(ch chan IPFIXUDPMsg) {
32 | var (
33 | ch4 = make(chan IPFIXUDPMsg, 1000)
34 | ch6 = make(chan IPFIXUDPMsg, 1000)
35 | msg IPFIXUDPMsg
36 | )
37 |
38 | if opts.IPFIXMirrorAddr == "" {
39 | return
40 | }
41 |
42 | for w := 0; w < opts.IPFIXMirrorWorkers; w++ {
43 | dst := net.ParseIP(opts.IPFIXMirrorAddr)
44 |
45 | if dst.To4() != nil {
46 | go mirrorIPFIX(dst, opts.IPFIXMirrorPort, ch4)
47 | } else {
48 | go mirrorIPFIX(dst, opts.IPFIXMirrorPort, ch6)
49 | }
50 | }
51 |
52 | ipfixMirrorEnabled = true
53 | logger.Printf("ipfix mirror service is running (workers#: %d) ...", opts.IPFIXMirrorWorkers)
54 |
55 | for {
56 | msg = <-ch
57 | if msg.raddr.IP.To4() != nil {
58 | ch4 <- msg
59 | } else {
60 | ch6 <- msg
61 | }
62 | }
63 | }
64 |
65 | func mirrorIPFIX(dst net.IP, port int, ch chan IPFIXUDPMsg) error {
66 | var (
67 | packet = make([]byte, opts.IPFIXUDPSize)
68 | msg IPFIXUDPMsg
69 | pLen int
70 | err error
71 | ipHdr []byte
72 | ipHLen int
73 | ipv4 bool
74 | ip mirror.IP
75 | )
76 |
77 | conn, err := mirror.NewRawConn(dst)
78 | if err != nil {
79 | return err
80 | }
81 |
82 | udp := mirror.UDP{55117, port, 0, 0}
83 | udpHdr := udp.Marshal()
84 |
85 | if dst.To4() != nil {
86 | ipv4 = true
87 | }
88 |
89 | if ipv4 {
90 | ip = mirror.NewIPv4HeaderTpl(mirror.UDPProto)
91 | ipHdr = ip.Marshal()
92 | ipHLen = mirror.IPv4HLen
93 | } else {
94 | ip = mirror.NewIPv6HeaderTpl(mirror.UDPProto)
95 | ipHdr = ip.Marshal()
96 | ipHLen = mirror.IPv6HLen
97 | }
98 |
99 | for {
100 | msg = <-ch
101 | pLen = len(msg.body)
102 |
103 | ip.SetAddrs(ipHdr, msg.raddr.IP, dst)
104 | ip.SetLen(ipHdr, pLen+mirror.UDPHLen)
105 |
106 | udp.SetLen(udpHdr, pLen)
107 | // IPv6 checksum mandatory
108 | if !ipv4 {
109 | udp.SetChecksum()
110 | }
111 |
112 | copy(packet[0:ipHLen], ipHdr)
113 | copy(packet[ipHLen:ipHLen+8], udpHdr)
114 | copy(packet[ipHLen+8:], msg.body)
115 |
116 | ipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])
117 |
118 | if err = conn.Send(packet[0 : ipHLen+8+pLen]); err != nil {
119 | return err
120 | }
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/vflow/ipfix_windows.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: ipfix.go
6 | //: author: Mehrdad Arshad Rad - copied by Jeremy Rossi, but not important
7 | //: date: 02/01/2017
8 | //:
9 | //: Licensed under the Apache License, Version 2.0 (the "License");
10 | //: you may not use this file except in compliance with the License.
11 | //: You may obtain a copy of the License at
12 | //:
13 | //: http://www.apache.org/licenses/LICENSE-2.0
14 | //:
15 | //: Unless required by applicable law or agreed to in writing, software
16 | //: distributed under the License is distributed on an "AS IS" BASIS,
17 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 | //: See the License for the specific language governing permissions and
19 | //: limitations under the License.
20 | //: ----------------------------------------------------------------------------
21 | // +build windows
22 |
23 | package main
24 |
25 | func mirrorIPFIXDispatcher(ch chan IPFIXUDPMsg) {
26 | return
27 | }
28 |
--------------------------------------------------------------------------------
/vflow/sflow_unix.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2020 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: sflow_unix.go
6 | //: author: Mehrdad Arshad Rad
7 | //: date: 02/01/2020
8 | //:
9 | //: Licensed under the Apache License, Version 2.0 (the "License");
10 | //: you may not use this file except in compliance with the License.
11 | //: You may obtain a copy of the License at
12 | //:
13 | //: http://www.apache.org/licenses/LICENSE-2.0
14 | //:
15 | //: Unless required by applicable law or agreed to in writing, software
16 | //: distributed under the License is distributed on an "AS IS" BASIS,
17 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 | //: See the License for the specific language governing permissions and
19 | //: limitations under the License.
20 | //: ----------------------------------------------------------------------------
21 | // +build !windows
22 |
23 | package main
24 |
25 | import (
26 | "net"
27 |
28 | "github.com/EdgeCast/vflow/mirror"
29 | )
30 |
31 | func mirrorSFlowDispatcher(ch chan SFUDPMsg) {
32 | var (
33 | ch4 = make(chan SFUDPMsg, 1000)
34 | ch6 = make(chan SFUDPMsg, 1000)
35 | msg SFUDPMsg
36 | )
37 |
38 | if opts.SFlowMirrorAddr == "" {
39 | return
40 | }
41 |
42 | for w := 0; w < opts.SFlowMirrorWorkers; w++ {
43 | dst := net.ParseIP(opts.SFlowMirrorAddr)
44 |
45 | if dst.To4() != nil {
46 | go mirrorSFlow(dst, opts.SFlowMirrorPort, ch4)
47 | } else {
48 | go mirrorSFlow(dst, opts.SFlowMirrorPort, ch6)
49 | }
50 | }
51 |
52 | sFlowMirrorEnabled = true
53 | logger.Printf("sflow mirror service is running (workers#: %d) ...", opts.SFlowMirrorWorkers)
54 |
55 | for {
56 | msg = <-ch
57 | if msg.raddr.IP.To4() != nil {
58 | ch4 <- msg
59 | } else {
60 | ch6 <- msg
61 | }
62 | }
63 | }
64 |
65 | func mirrorSFlow(dst net.IP, port int, ch chan SFUDPMsg) error {
66 | var (
67 | packet = make([]byte, opts.SFlowUDPSize)
68 | msg SFUDPMsg
69 | pLen int
70 | err error
71 | ipHdr []byte
72 | ipHLen int
73 | ipv4 bool
74 | ip mirror.IP
75 | )
76 |
77 | conn, err := mirror.NewRawConn(dst)
78 | if err != nil {
79 | return err
80 | }
81 |
82 | udp := mirror.UDP{SrcPort: 55118, DstPort: port, Length: 0, Checksum: 0}
83 | udpHdr := udp.Marshal()
84 |
85 | if dst.To4() != nil {
86 | ipv4 = true
87 | }
88 |
89 | if ipv4 {
90 | ip = mirror.NewIPv4HeaderTpl(mirror.UDPProto)
91 | ipHdr = ip.Marshal()
92 | ipHLen = mirror.IPv4HLen
93 | } else {
94 | ip = mirror.NewIPv6HeaderTpl(mirror.UDPProto)
95 | ipHdr = ip.Marshal()
96 | ipHLen = mirror.IPv6HLen
97 | }
98 |
99 | for {
100 | msg = <-ch
101 | pLen = len(msg.body)
102 |
103 | ip.SetAddrs(ipHdr, msg.raddr.IP, dst)
104 | ip.SetLen(ipHdr, pLen+mirror.UDPHLen)
105 |
106 | udp.SetLen(udpHdr, pLen)
107 | // IPv6 checksum mandatory
108 | if !ipv4 {
109 | udp.SetChecksum()
110 | }
111 |
112 | copy(packet[0:ipHLen], ipHdr)
113 | copy(packet[ipHLen:ipHLen+8], udpHdr)
114 | copy(packet[ipHLen+8:], msg.body)
115 |
116 | sFlowBuffer.Put(msg.body[:opts.SFlowUDPSize])
117 |
118 | if err = conn.Send(packet[0 : ipHLen+8+pLen]); err != nil {
119 | return err
120 | }
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/vflow/vflow.go:
--------------------------------------------------------------------------------
1 | //: ----------------------------------------------------------------------------
2 | //: Copyright (C) 2017 Verizon. All Rights Reserved.
3 | //: All Rights Reserved
4 | //:
5 | //: file: vflow.go
6 | //: details: TODO
7 | //: author: Mehrdad Arshad Rad
8 | //: date: 02/01/2017
9 | //:
10 | //: Licensed under the Apache License, Version 2.0 (the "License");
11 | //: you may not use this file except in compliance with the License.
12 | //: You may obtain a copy of the License at
13 | //:
14 | //: http://www.apache.org/licenses/LICENSE-2.0
15 | //:
16 | //: Unless required by applicable law or agreed to in writing, software
17 | //: distributed under the License is distributed on an "AS IS" BASIS,
18 | //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | //: See the License for the specific language governing permissions and
20 | //: limitations under the License.
21 | //: ----------------------------------------------------------------------------
22 |
23 | // Package main is the vflow binary
24 | package main
25 |
26 | import (
27 | "log"
28 | "os"
29 | "os/signal"
30 | "runtime"
31 | "sync"
32 | "syscall"
33 | )
34 |
35 | var (
36 | opts *Options
37 | logger *log.Logger
38 | )
39 |
40 | type proto interface {
41 | run()
42 | shutdown()
43 | }
44 |
45 | func main() {
46 | var (
47 | wg sync.WaitGroup
48 | signalCh = make(chan os.Signal, 1)
49 | )
50 |
51 | opts = GetOptions()
52 | runtime.GOMAXPROCS(opts.getCPU())
53 |
54 | signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)
55 | logger = opts.Logger
56 |
57 | if !opts.ProducerEnabled {
58 | logger.Println("producer message queue has been disabled")
59 | }
60 |
61 | protos := []proto{NewSFlow(), NewIPFIX(), NewNetflowV5(), NewNetflowV9()}
62 |
63 | for _, p := range protos {
64 | wg.Add(1)
65 | go func(p proto) {
66 | defer wg.Done()
67 | p.run()
68 | }(p)
69 | }
70 |
71 | go statsExpose(protos)
72 |
73 | <-signalCh
74 |
75 | for _, p := range protos {
76 | wg.Add(1)
77 | go func(p proto) {
78 | defer wg.Done()
79 | p.shutdown()
80 | }(p)
81 | }
82 |
83 | wg.Wait()
84 | }
85 |
--------------------------------------------------------------------------------