├── routers.json.example ├── CONTRIBUTORS ├── AUTHORS ├── annotator ├── annotator_test.go ├── annotator.go └── bird │ └── bird.go ├── netflow ├── netflow.proto └── netflow.pb.go ├── CONTRIBUTING.md ├── nfserver ├── template_cache.go └── nfserver.go ├── ifserver ├── template_cache.go └── ifserver.go ├── tflow2.css ├── stats └── stats.go ├── tflow2.go ├── convert ├── convert.go └── convert_test.go ├── ipfix ├── templates.go ├── field_db.go ├── decode.go └── packet.go ├── nf9 ├── templates.go ├── field_db.go ├── decode.go ├── decode_test.go └── packet.go ├── README.md ├── avltree ├── avtltree_test.go └── avtltree.go ├── frontend └── frontend.go ├── tflow2.html ├── tflow2.js ├── protocol_numbers.csv ├── LICENSE └── database ├── database.go └── database_query.go /routers.json.example: -------------------------------------------------------------------------------- 1 | { 2 | "router01.pop01": { 3 | "id": "192.0.2.1", 4 | "interfaces": { 5 | "1": "lo", 6 | "2": "eth0", 7 | "3": "eth1", 8 | "4": "eth2", 9 | "5": "eth3" 10 | } 11 | 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # This is the official list of people who can contribute 2 | # (and typically have contributed) code to the tflow2 repository. 3 | # The AUTHORS file lists the copyright holders; this file 4 | # lists people. For example, Google employees are listed here 5 | # but not in AUTHORS, because Google holds the copyright. 6 | 7 | Oliver Herms 8 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of tflow2 authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | 5 | # Names should be added to this file as one of 6 | # Organization's name 7 | # Individual's name 8 | # Individual's name 9 | # See CONTRIBUTORS for the meaning of multiple email addresses. 10 | 11 | # Please keep the list sorted. 12 | 13 | Google Inc. 14 | -------------------------------------------------------------------------------- /annotator/annotator_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package annotator 13 | 14 | import ( 15 | "testing" 16 | 17 | "github.com/google/tflow2/netflow" 18 | ) 19 | 20 | func TestTimestampAggr(t *testing.T) { 21 | ca := make(chan *netflow.Flow) 22 | cb := make(chan *netflow.Flow) 23 | var aggr int64 = 60 24 | go Init(ca, cb, aggr, false, 1) 25 | 26 | testData := []struct { 27 | ts int64 28 | want int64 29 | }{ 30 | { 31 | ts: 1000, 32 | want: 960, 33 | }, 34 | { 35 | ts: 1234, 36 | want: 1200, 37 | }, 38 | } 39 | 40 | for _, test := range testData { 41 | fl := &netflow.Flow{ 42 | Timestamp: test.ts, 43 | } 44 | 45 | ca <- fl 46 | fl = <-cb 47 | if fl.Timestamp != test.want { 48 | t.Errorf("Input: %d, Got: %d, Expected: %d, ", test.ts, fl.Timestamp, test.want) 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /netflow/netflow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package netflow; 4 | 5 | // Pfx defines an IP prefix 6 | message pfx { 7 | // IPv4 or IPv6 address 8 | bytes IP = 1; 9 | // Netmask 10 | bytes mask = 2; 11 | } 12 | 13 | // Flow defines a network flow 14 | message Flow { 15 | // Router flow was received from 16 | bytes router = 1; 17 | 18 | // Address family 19 | uint32 family = 2; 20 | 21 | // SRC IP address 22 | bytes src_addr =3; 23 | 24 | // DST IP address 25 | bytes dst_addr = 4; 26 | 27 | // Protocol 28 | uint32 protocol = 5; 29 | 30 | // Number of packets 31 | uint32 packets = 6; 32 | 33 | // Size of flow 34 | uint64 size = 7; 35 | 36 | // SNMP interface id flow was received on 37 | uint32 int_in = 8; 38 | 39 | // SNMP interface if flow was transmitted on 40 | uint32 int_out = 9; 41 | 42 | // Next Hop IP address 43 | bytes next_hop = 10; 44 | 45 | // SRC ASN 46 | uint32 src_as = 11; 47 | 48 | // DST ASN 49 | uint32 dst_as = 12; 50 | 51 | // NEXT HOP ASN 52 | uint32 next_hop_as = 13; 53 | 54 | // Unix timestamp 55 | int64 timestamp = 14; 56 | 57 | // SRC prefix 58 | pfx src_pfx = 15; 59 | 60 | // DST perfix 61 | pfx dst_pfx = 16; 62 | 63 | // SRC port 64 | uint32 src_port = 17; 65 | 66 | //DST port 67 | uint32 dst_port = 18; 68 | } 69 | 70 | // Flows defines a groups of flows 71 | message Flows { 72 | // Group of flows 73 | repeated Flow flows = 1; 74 | } -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Want to contribute? Great! First, read this page (including the small print at the end). 2 | 3 | ### Before you contribute 4 | Before we can use your code, you must sign the 5 | [Google Individual Contributor License Agreement] 6 | (https://cla.developers.google.com/about/google-individual) 7 | (CLA), which you can do online. The CLA is necessary mainly because you own the 8 | copyright to your changes, even after your contribution becomes part of our 9 | codebase, so we need your permission to use and distribute your code. We also 10 | need to be sure of various other things—for instance that you'll tell us if you 11 | know that your code infringes on other people's patents. You don't have to sign 12 | the CLA until after you've submitted your code for review and a member has 13 | approved it, but you must do it before we can put your code into our codebase. 14 | Before you start working on a larger contribution, you should get in touch with 15 | us first through the issue tracker with your idea so that we can help out and 16 | possibly guide you. Coordinating up front makes it much easier to avoid 17 | frustration later on. 18 | 19 | ### Code reviews 20 | All submissions, including submissions by project members, require review. We 21 | use Github pull requests for this purpose. 22 | 23 | ### The small print 24 | Contributions made by corporations are covered by a different agreement than 25 | the one above, the 26 | [Software Grant and Corporate Contributor License Agreement] 27 | (https://cla.developers.google.com/about/google-corporate). 28 | -------------------------------------------------------------------------------- /nfserver/template_cache.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nfserver 13 | 14 | import ( 15 | "sync" 16 | 17 | "github.com/google/tflow2/nf9" 18 | ) 19 | 20 | type templateCache struct { 21 | cache map[uint32]map[uint32]map[uint16]nf9.TemplateRecords 22 | lock sync.RWMutex 23 | } 24 | 25 | // newTemplateCache creates and initializes a new `templateCache` instance 26 | func newTemplateCache() *templateCache { 27 | return &templateCache{cache: make(map[uint32]map[uint32]map[uint16]nf9.TemplateRecords)} 28 | } 29 | 30 | func (c *templateCache) set(rtr uint32, sourceID uint32, templateID uint16, records nf9.TemplateRecords) { 31 | c.lock.Lock() 32 | defer c.lock.Unlock() 33 | if _, ok := c.cache[rtr]; !ok { 34 | c.cache[rtr] = make(map[uint32]map[uint16]nf9.TemplateRecords) 35 | } 36 | if _, ok := c.cache[rtr][sourceID]; !ok { 37 | c.cache[rtr][sourceID] = make(map[uint16]nf9.TemplateRecords) 38 | } 39 | c.cache[rtr][sourceID][templateID] = records 40 | } 41 | 42 | func (c *templateCache) get(rtr uint32, sourceID uint32, templateID uint16) *nf9.TemplateRecords { 43 | c.lock.RLock() 44 | defer c.lock.RUnlock() 45 | if _, ok := c.cache[rtr]; !ok { 46 | return nil 47 | } 48 | if _, ok := c.cache[rtr][sourceID]; !ok { 49 | return nil 50 | } 51 | if _, ok := c.cache[rtr][sourceID][templateID]; !ok { 52 | return nil 53 | } 54 | ret := c.cache[rtr][sourceID][templateID] 55 | return &ret 56 | } 57 | -------------------------------------------------------------------------------- /ifserver/template_cache.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ifserver 13 | 14 | import ( 15 | "sync" 16 | 17 | "github.com/google/tflow2/ipfix" 18 | ) 19 | 20 | type templateCache struct { 21 | cache map[uint32]map[uint32]map[uint16]ipfix.TemplateRecords 22 | lock sync.RWMutex 23 | } 24 | 25 | // newTemplateCache creates and initializes a new `templateCache` instance 26 | func newTemplateCache() *templateCache { 27 | return &templateCache{cache: make(map[uint32]map[uint32]map[uint16]ipfix.TemplateRecords)} 28 | } 29 | 30 | func (c *templateCache) set(rtr uint32, domainID uint32, templateID uint16, records ipfix.TemplateRecords) { 31 | c.lock.Lock() 32 | defer c.lock.Unlock() 33 | if _, ok := c.cache[rtr]; !ok { 34 | c.cache[rtr] = make(map[uint32]map[uint16]ipfix.TemplateRecords) 35 | } 36 | if _, ok := c.cache[rtr][domainID]; !ok { 37 | c.cache[rtr][domainID] = make(map[uint16]ipfix.TemplateRecords) 38 | } 39 | c.cache[rtr][domainID][templateID] = records 40 | } 41 | 42 | func (c *templateCache) get(rtr uint32, domainID uint32, templateID uint16) *ipfix.TemplateRecords { 43 | c.lock.RLock() 44 | defer c.lock.RUnlock() 45 | if _, ok := c.cache[rtr]; !ok { 46 | return nil 47 | } 48 | if _, ok := c.cache[rtr][domainID]; !ok { 49 | return nil 50 | } 51 | if _, ok := c.cache[rtr][domainID][templateID]; !ok { 52 | return nil 53 | } 54 | ret := c.cache[rtr][domainID][templateID] 55 | return &ret 56 | } 57 | -------------------------------------------------------------------------------- /tflow2.css: -------------------------------------------------------------------------------- 1 | /* Copyright 2017 Google Inc. All Rights Reserved. 2 | Licensed under the Apache License, Version 2.0 (the "License"); 3 | you may not use this file except in compliance with the License. 4 | You may obtain a copy of the License at 5 | http://www.apache.org/licenses/LICENSE-2.0 6 | Unless required by applicable law or agreed to in writing, software 7 | distributed under the License is distributed on an "AS IS" BASIS, 8 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | See the License for the specific language governing permissions and 10 | limitations under the License. */ 11 | 12 | body { 13 | font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; 14 | font-weight: normal; 15 | color: #404040; 16 | margin: 0; 17 | padding: 0; 18 | border: 0; 19 | } 20 | header { 21 | background-color: #333333; 22 | margin: 0; 23 | padding: 0; 24 | border: 0; 25 | border-bottom-width: 5px; 26 | border-bottom-style: solid; 27 | border-bottom-color: rgb(255, 203, 5); 28 | } 29 | form { 30 | padding: 0; 31 | margin: 0; 32 | border: 0; 33 | width: 0; 34 | height: 0; 35 | } 36 | h1 { 37 | color: #dc0067; 38 | padding: 5px; 39 | padding-bottom: 0; 40 | margin: 0; 41 | border: 0; 42 | } 43 | #submit { 44 | background-color: #faa732; 45 | background-image: -webkit-linear-gradient(top, #fbb450, #f89406); 46 | color: #ffffff; 47 | text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); 48 | padding: 4px 10px 4px; 49 | margin-bottom: 0; 50 | font-size: 13px; 51 | line-height: 18px; 52 | } 53 | .in { 54 | width: 300px; 55 | height: 42px; 56 | float: left; 57 | margin-bottom: 10px; 58 | } 59 | .in input { 60 | width: 240px; 61 | } 62 | .in label { 63 | display: block; 64 | } 65 | .bd { 66 | width: 200px; 67 | height: 15px; 68 | float: left; 69 | margin-bottom: 10px; 70 | } 71 | fieldset { 72 | margin-bottom: 15px; 73 | } 74 | #fs_container { 75 | background-color: #cecece; 76 | padding-bottom: 5px; 77 | } 78 | #chart_div { 79 | width: 100%; 80 | height: 100%; 81 | } -------------------------------------------------------------------------------- /annotator/annotator.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package annotator annotates flows with meta data from external sources 13 | package annotator 14 | 15 | import ( 16 | "sync/atomic" 17 | 18 | "github.com/google/tflow2/annotator/bird" 19 | "github.com/google/tflow2/netflow" 20 | "github.com/google/tflow2/stats" 21 | ) 22 | 23 | // Annotator represents an flow annotator 24 | type Annotator struct { 25 | inputs []chan *netflow.Flow 26 | output chan *netflow.Flow 27 | aggregation int64 28 | numWorkers int 29 | bgpAugment bool 30 | birdAnnotator *bird.Annotator 31 | debug int 32 | } 33 | 34 | // New creates a new `Annotator` instance 35 | func New(inputs []chan *netflow.Flow, output chan *netflow.Flow, numWorkers int, aggregation int64, bgpAugment bool, birdSock string, birdSock6 string, debug int) *Annotator { 36 | a := &Annotator{ 37 | inputs: inputs, 38 | output: output, 39 | aggregation: aggregation, 40 | numWorkers: numWorkers, 41 | bgpAugment: bgpAugment, 42 | debug: debug, 43 | } 44 | if bgpAugment { 45 | a.birdAnnotator = bird.NewAnnotator(birdSock, birdSock6, debug) 46 | } 47 | a.Init() 48 | return a 49 | } 50 | 51 | // Init get's the annotation layer started, receives flows, annotates them, and carries them 52 | // further to the database module 53 | func (a *Annotator) Init() { 54 | for _, ch := range a.inputs { 55 | for i := 0; i < a.numWorkers; i++ { 56 | go func(ch chan *netflow.Flow) { 57 | for { 58 | // Read flow from netflow/IPFIX module 59 | fl := <-ch 60 | 61 | // Align timestamp on `aggrTime` raster 62 | fl.Timestamp = fl.Timestamp - (fl.Timestamp % a.aggregation) 63 | 64 | // Update global statstics 65 | atomic.AddUint64(&stats.GlobalStats.FlowBytes, fl.Size) 66 | atomic.AddUint64(&stats.GlobalStats.FlowPackets, uint64(fl.Packets)) 67 | 68 | // Annotate flows with ASN and Prefix information from local BIRD (bird.nic.cz) instance 69 | if a.bgpAugment { 70 | a.birdAnnotator.Augment(fl) 71 | } 72 | 73 | // Send flow over to database module 74 | a.output <- fl 75 | } 76 | }(ch) 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /stats/stats.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package stats provides central statistics about tflow2 13 | package stats 14 | 15 | import ( 16 | "fmt" 17 | "net/http" 18 | "sync/atomic" 19 | "time" 20 | ) 21 | 22 | // Stats represents statistics of this program that are to be exported via /varz 23 | type Stats struct { 24 | StartTime int64 25 | Flows4 uint64 26 | Flows6 uint64 27 | Queries uint64 28 | BirdCacheHits uint64 29 | BirdCacheMiss uint64 30 | FlowPackets uint64 31 | FlowBytes uint64 32 | Netflow9packets uint64 33 | Netflow9bytes uint64 34 | IPFIXpackets uint64 35 | IPFIXbytes uint64 36 | } 37 | 38 | // GlobalStats is instance of `Stats` to keep stats of this program 39 | var GlobalStats Stats 40 | 41 | // Init initilizes this module 42 | func Init() { 43 | GlobalStats.StartTime = time.Now().Unix() 44 | } 45 | 46 | // Varz is used to serve HTTP requests /varz and send the statistics to a client in borgmon/prometheus compatible format 47 | func Varz(w http.ResponseWriter) { 48 | now := time.Now().Unix() 49 | fmt.Fprintf(w, "netflow_collector_uptime %d\n", now-GlobalStats.StartTime) 50 | fmt.Fprintf(w, "netflow_collector_flows4 %d\n", atomic.LoadUint64(&GlobalStats.Flows4)) 51 | fmt.Fprintf(w, "netflow_collector_flows6 %d\n", atomic.LoadUint64(&GlobalStats.Flows6)) 52 | fmt.Fprintf(w, "netflow_collector_queries %d\n", atomic.LoadUint64(&GlobalStats.Queries)) 53 | fmt.Fprintf(w, "netflow_collector_bird_cache_hits %d\n", atomic.LoadUint64(&GlobalStats.BirdCacheHits)) 54 | fmt.Fprintf(w, "netflow_collector_bird_cache_miss %d\n", atomic.LoadUint64(&GlobalStats.BirdCacheMiss)) 55 | fmt.Fprintf(w, "netflow_collector_packets %d\n", atomic.LoadUint64(&GlobalStats.FlowPackets)) 56 | fmt.Fprintf(w, "netflow_collector_bytes %d\n", atomic.LoadUint64(&GlobalStats.FlowBytes)) 57 | fmt.Fprintf(w, "netflow_collector_netflow9_packets %d\n", atomic.LoadUint64(&GlobalStats.Netflow9packets)) 58 | fmt.Fprintf(w, "netflow_collector_netflow9_bytes %d\n", atomic.LoadUint64(&GlobalStats.Netflow9bytes)) 59 | fmt.Fprintf(w, "netflow_collector_ipfix_packets %d\n", atomic.LoadUint64(&GlobalStats.IPFIXpackets)) 60 | fmt.Fprintf(w, "netflow_collector_ipfix_bytes %d\n", atomic.LoadUint64(&GlobalStats.IPFIXbytes)) 61 | } 62 | -------------------------------------------------------------------------------- /tflow2.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package main is the main package of tflow2 13 | package main 14 | 15 | import ( 16 | "flag" 17 | "runtime" 18 | "sync" 19 | 20 | "github.com/google/tflow2/annotator" 21 | "github.com/google/tflow2/database" 22 | "github.com/google/tflow2/frontend" 23 | "github.com/google/tflow2/ifserver" 24 | "github.com/google/tflow2/netflow" 25 | "github.com/google/tflow2/nfserver" 26 | "github.com/google/tflow2/stats" 27 | ) 28 | 29 | var ( 30 | nfAddr = flag.String("netflow", ":2055", "Address to use to receive netflow packets") 31 | ipfixAddr = flag.String("ipfix", ":4739", "Address to use to receive ipfix packets") 32 | aggregation = flag.Int64("aggregation", 60, "Time to groups flows together into one data point") 33 | maxAge = flag.Int64("maxage", 1800, "Maximum age of saved flows") 34 | web = flag.String("web", ":4444", "Address to use for web service") 35 | birdSock = flag.String("birdsock", "/var/run/bird/bird.ctl", "Unix domain socket to communicate with BIRD") 36 | birdSock6 = flag.String("birdsock6", "/var/run/bird/bird6.ctl", "Unix domain socket to communicate with BIRD6") 37 | bgpAugment = flag.Bool("bgp", true, "Use BIRD to augment BGP flow information") 38 | protoNums = flag.String("protonums", "protocol_numbers.csv", "CSV file to read protocol definitions from") 39 | sockReaders = flag.Int("sockreaders", 24, "Num of go routines reading and parsing netflow packets") 40 | channelBuffer = flag.Int("channelbuffer", 1024, "Size of buffer for channels") 41 | dbAddWorkers = flag.Int("dbaddworkers", 24, "Number of workers adding flows into database") 42 | nAggr = flag.Int("numaggr", 12, "Number of flow aggregator workers") 43 | samplerate = flag.Int("samplerate", 1, "Samplerate of routers") 44 | debugLevel = flag.Int("debug", 0, "Debug level, 0: none, 1: +shows if we are receiving flows we are lacking templates for, 2: -, 3: +dump all packets on screen") 45 | compLevel = flag.Int("comp", 6, "gzip compression level for data storage on disk") 46 | dataDir = flag.String("data", "./data", "Path to store long term flow logs") 47 | anonymize = flag.Bool("anonymize", false, "Replace IP addresses with NULL before dumping flows to disk") 48 | ) 49 | 50 | func main() { 51 | flag.Parse() 52 | runtime.GOMAXPROCS(runtime.NumCPU()) 53 | stats.Init() 54 | 55 | nfs := nfserver.New(*nfAddr, *sockReaders, *bgpAugment, *debugLevel) 56 | 57 | ifs := ifserver.New(*ipfixAddr, *sockReaders, *bgpAugment, *debugLevel) 58 | 59 | chans := make([]chan *netflow.Flow, 0) 60 | chans = append(chans, nfs.Output) 61 | chans = append(chans, ifs.Output) 62 | 63 | flowDB := database.New(*aggregation, *maxAge, *dbAddWorkers, *samplerate, *debugLevel, *compLevel, *dataDir, *anonymize) 64 | 65 | annotator.New(chans, flowDB.Input, *nAggr, *aggregation, *bgpAugment, *birdSock, *birdSock6, *debugLevel) 66 | 67 | frontend.New(*web, *protoNums, flowDB) 68 | 69 | var wg sync.WaitGroup 70 | wg.Add(1) 71 | wg.Wait() 72 | } 73 | -------------------------------------------------------------------------------- /convert/convert.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package convert provides helper functions to convert data between 13 | // various types, e.g. []byte to int, etc. 14 | package convert 15 | 16 | import ( 17 | "bytes" 18 | "encoding/binary" 19 | "net" 20 | "strings" 21 | ) 22 | 23 | // IPByteSlice converts a string that contians an IP address into byte slice 24 | func IPByteSlice(ip string) []byte { 25 | ret := net.ParseIP(ip) 26 | if strings.Contains(ip, ".") { 27 | ipv4 := make([]byte, net.IPv4len) 28 | tmp := []byte(ret) 29 | copy(ipv4, tmp[len(tmp)-net.IPv4len:]) 30 | ret = ipv4 31 | } 32 | return ret 33 | } 34 | 35 | // Uint16b converts a byte slice to uint16 assuming the slice is BigEndian 36 | func Uint16b(data []byte) (ret uint16) { 37 | buf := bytes.NewBuffer(data) 38 | binary.Read(buf, binary.BigEndian, &ret) 39 | return 40 | } 41 | 42 | // Uint32b converts a byte slice to uint32 assuming the slice is BigEndian 43 | func Uint32b(data []byte) (ret uint32) { 44 | buf := bytes.NewBuffer(data) 45 | binary.Read(buf, binary.BigEndian, &ret) 46 | return 47 | } 48 | 49 | // Uint64b converts a byte slice to uint64 assuming the slice is BigEndian 50 | func Uint64b(data []byte) (ret uint64) { 51 | buf := bytes.NewBuffer(data) 52 | binary.Read(buf, binary.BigEndian, &ret) 53 | return 54 | } 55 | 56 | // Uint16 converts a byte slice into uint16 assuming LittleEndian 57 | func Uint16(data []byte) (ret uint16) { 58 | return uint16(UintX(data)) 59 | } 60 | 61 | // Uint32 converts a byte slice into uint32 assuming LittleEndian 62 | func Uint32(data []byte) (ret uint32) { 63 | return uint32(UintX(data)) 64 | } 65 | 66 | // Uint64 converts a byte slice into uint64 assuming LittleEndian 67 | func Uint64(data []byte) uint64 { 68 | return UintX(data) 69 | } 70 | 71 | // UintX converts a byte slice into uint64 assuming LittleEndian 72 | func UintX(data []byte) (ret uint64) { 73 | size := uint8(len(data)) 74 | var i uint8 75 | for i = 0; i < size; i++ { 76 | ret += (uint64(data[i]) << (i * 8)) 77 | } 78 | return ret 79 | } 80 | 81 | // Uint16Byte converts a uint16 to a byte slice in BigEndian 82 | func Uint16Byte(data uint16) (ret []byte) { 83 | buf := new(bytes.Buffer) 84 | binary.Write(buf, binary.BigEndian, data) 85 | return buf.Bytes() 86 | } 87 | 88 | // Uint32Byte converts a uint16 to a byte slice in BigEndian 89 | func Uint32Byte(data uint32) (ret []byte) { 90 | buf := new(bytes.Buffer) 91 | binary.Write(buf, binary.BigEndian, data) 92 | return buf.Bytes() 93 | } 94 | 95 | // Int64Byte converts a int64 to a byte slice in BigEndian 96 | func Int64Byte(data int64) (ret []byte) { 97 | buf := new(bytes.Buffer) 98 | binary.Write(buf, binary.BigEndian, data) 99 | return buf.Bytes() 100 | } 101 | 102 | // Reverse reverses byte slice without allocating new memory 103 | func Reverse(data []byte) []byte { 104 | n := len(data) 105 | for i := 0; i < n/2; i++ { 106 | data[i], data[n-i-1] = data[n-i-1], data[i] 107 | } 108 | return data 109 | } 110 | -------------------------------------------------------------------------------- /ipfix/templates.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ipfix 13 | 14 | import "unsafe" 15 | 16 | const ( 17 | // numPreAllocFlowDataRecs is number of elements to pre allocate in DataRecs slice 18 | numPreAllocFlowDataRecs = 20 19 | ) 20 | 21 | // TemplateRecordHeader represents the header of a template record 22 | type TemplateRecordHeader struct { 23 | // Number of fields in this Template Record. Because a Template FlowSet 24 | // usually contains multiple Template Records, this field allows the 25 | // Collector to determine the end of the current Template Record and 26 | // the start of the next. 27 | FieldCount uint16 28 | 29 | // Each of the newly generated Template Records is given a unique 30 | // Template ID. This uniqueness is local to the Observation Domain that 31 | // generated the Template ID. Template IDs of Data FlowSets are numbered 32 | // from 256 to 65535. 33 | TemplateID uint16 34 | } 35 | 36 | var sizeOfTemplateRecordHeader = unsafe.Sizeof(TemplateRecordHeader{}) 37 | 38 | // TemplateRecords is a single template that describes structure of a Flow Record 39 | // (actual Netflow data). 40 | type TemplateRecords struct { 41 | Header *TemplateRecordHeader 42 | 43 | // List of fields in this Template Record. 44 | Records []*TemplateRecord 45 | 46 | Packet *Packet 47 | 48 | Values [][]byte 49 | } 50 | 51 | //TemplateRecord represents a Template Record as described in RFC3954 52 | type TemplateRecord struct { 53 | // The length (in bytes) of the field. 54 | Length uint16 55 | 56 | // A numeric value that represents the type of field. 57 | Type uint16 58 | } 59 | 60 | // FlowDataRecord is actual NetFlow data. This structure does not contain any 61 | // information about the actual data meaning. It must be combined with 62 | // corresponding TemplateRecord to be decoded to a single NetFlow data row. 63 | type FlowDataRecord struct { 64 | // List of Flow Data Record values stored in raw format as []byte 65 | Values [][]byte 66 | } 67 | 68 | // sizeOfTemplateRecord is the raw size of a TemplateRecord 69 | var sizeOfTemplateRecord = unsafe.Sizeof(TemplateRecord{}) 70 | 71 | // DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to 72 | // a list of Flow Data Records. 73 | func (dtpl *TemplateRecords) DecodeFlowSet(set Set) (list []FlowDataRecord) { 74 | if set.Header.SetID != dtpl.Header.TemplateID { 75 | return nil 76 | } 77 | var record FlowDataRecord 78 | 79 | // Pre-allocate some room for flows 80 | list = make([]FlowDataRecord, 0, numPreAllocFlowDataRecs) 81 | 82 | // Assume total record length must be >= 4, otherwise it is impossible 83 | // to distinguish between padding and new record. Padding MUST be 84 | // supported. 85 | n := len(set.Records) 86 | count := 0 87 | 88 | for n >= 4 { 89 | record.Values, count = parseFieldValues(set.Records[0:n], dtpl.Records) 90 | if record.Values == nil { 91 | return 92 | } 93 | list = append(list, record) 94 | n = n - count 95 | } 96 | 97 | return 98 | } 99 | 100 | // parseFieldValues reads actual fields values from a Data Record utilizing a template 101 | func parseFieldValues(flows []byte, fields []*TemplateRecord) ([][]byte, int) { 102 | count := 0 103 | n := len(flows) 104 | values := make([][]byte, len(fields)) 105 | for i, f := range fields { 106 | if n < int(f.Length) { 107 | return nil, 0 108 | } 109 | values[i] = flows[n-int(f.Length) : n] 110 | count += int(f.Length) 111 | n -= int(f.Length) 112 | } 113 | return values, count 114 | } 115 | -------------------------------------------------------------------------------- /nf9/templates.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | import "unsafe" 15 | 16 | const ( 17 | // numPreAllocFlowDataRecs is number of elements to pre allocate in DataRecs slice 18 | numPreAllocFlowDataRecs = 20 19 | ) 20 | 21 | // TemplateRecordHeader represents the header of a template record 22 | type TemplateRecordHeader struct { 23 | // Number of fields in this Template Record. Because a Template FlowSet 24 | // usually contains multiple Template Records, this field allows the 25 | // Collector to determine the end of the current Template Record and 26 | // the start of the next. 27 | FieldCount uint16 28 | 29 | // Each of the newly generated Template Records is given a unique 30 | // Template ID. This uniqueness is local to the Observation Domain that 31 | // generated the Template ID. Template IDs of Data FlowSets are numbered 32 | // from 256 to 65535. 33 | TemplateID uint16 34 | } 35 | 36 | var sizeOfTemplateRecordHeader = unsafe.Sizeof(TemplateRecordHeader{}) 37 | 38 | // TemplateRecords is a single template that describes structure of a Flow Record 39 | // (actual Netflow data). 40 | type TemplateRecords struct { 41 | Header *TemplateRecordHeader 42 | 43 | // List of fields in this Template Record. 44 | Records []*TemplateRecord 45 | 46 | Packet *Packet 47 | 48 | Values [][]byte 49 | } 50 | 51 | //TemplateRecord represents a Template Record as described in RFC3954 52 | type TemplateRecord struct { 53 | // The length (in bytes) of the field. 54 | Length uint16 55 | 56 | // A numeric value that represents the type of field. 57 | Type uint16 58 | } 59 | 60 | // FlowDataRecord is actual NetFlow data. This structure does not contain any 61 | // information about the actual data meaning. It must be combined with 62 | // corresponding TemplateRecord to be decoded to a single NetFlow data row. 63 | type FlowDataRecord struct { 64 | // List of Flow Data Record values stored in raw format as []byte 65 | Values [][]byte 66 | } 67 | 68 | // sizeOfTemplateRecord is the raw size of a TemplateRecord 69 | var sizeOfTemplateRecord = unsafe.Sizeof(TemplateRecord{}) 70 | 71 | // DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to 72 | // a list of Flow Data Records. 73 | func (dtpl *TemplateRecords) DecodeFlowSet(set FlowSet) (list []FlowDataRecord) { 74 | if set.Header.FlowSetID != dtpl.Header.TemplateID { 75 | return nil 76 | } 77 | var record FlowDataRecord 78 | 79 | // Pre-allocate some room for flows 80 | list = make([]FlowDataRecord, 0, numPreAllocFlowDataRecs) 81 | 82 | // Assume total record length must be >= 4, otherwise it is impossible 83 | // to distinguish between padding and new record. Padding MUST be 84 | // supported. 85 | n := len(set.Flows) 86 | count := 0 87 | 88 | for n >= 4 { 89 | record.Values, count = parseFieldValues(set.Flows[0:n], dtpl.Records) 90 | if record.Values == nil { 91 | return 92 | } 93 | list = append(list, record) 94 | n = n - count 95 | } 96 | 97 | return 98 | } 99 | 100 | // parseFieldValues reads actual fields values from a Data Record utilizing a template 101 | func parseFieldValues(flows []byte, fields []*TemplateRecord) ([][]byte, int) { 102 | count := 0 103 | n := len(flows) 104 | values := make([][]byte, len(fields)) 105 | for i, f := range fields { 106 | if n < int(f.Length) { 107 | return nil, 0 108 | } 109 | values[i] = flows[n-int(f.Length) : n] 110 | count += int(f.Length) 111 | n -= int(f.Length) 112 | } 113 | return values, count 114 | } 115 | -------------------------------------------------------------------------------- /nf9/field_db.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | const ( 15 | InBytes = 1 16 | InPkts = 2 17 | Flows = 3 18 | Protocol = 4 19 | SrcTos = 5 20 | TCPFlags = 6 21 | L4SrcPort = 7 22 | IPv4SrcAddr = 8 23 | SrcMask = 9 24 | InputSnmp = 10 25 | L4DstPort = 11 26 | IPv4DstAddr = 12 27 | DstMask = 13 28 | OutputSnmp = 14 29 | IPv4NextHop = 15 30 | SrcAs = 16 31 | DstAs = 17 32 | BGPIPv4NextHop = 18 33 | MulDstPkts = 19 34 | MulDstBytes = 20 35 | LastSwitched = 21 36 | FirstSwitched = 22 37 | OutBytes = 23 38 | OutPkts = 24 39 | MinPktLngth = 25 40 | MaxPktLngth = 26 41 | IPv6SrcAddr = 27 42 | IPv6DstAddr = 28 43 | IPv6SrcMask = 29 44 | IPv6DstMask = 30 45 | IPv6FlowLabel = 31 46 | IcmpType = 32 47 | MulIgmpType = 33 48 | SamplingInterval = 34 49 | SamplingAlgorithm = 35 50 | FlowActiveTimeout = 36 51 | FlowInactiveTimeout = 37 52 | EngineType = 38 53 | EngineID = 39 54 | TotalBytesExp = 40 55 | TotalPktsExp = 41 56 | TotalFlowsExp = 42 57 | VendorProprietary43 = 43 58 | IPv4SrcPrefix = 44 59 | IPv4DstPrefix = 45 60 | MplsTopLabelType = 46 61 | MplsTopLabelIPAddr = 47 62 | FlowSamplerID = 48 63 | FlowSamplerMode = 49 64 | FlowSamplerRandomInterval = 50 65 | VendorProprietary51 = 51 66 | MinTTL = 52 67 | MaxTTL = 53 68 | IPv4Ident = 54 69 | DstTos = 55 70 | InSrcMac = 56 71 | OutDstMac = 57 72 | SrcVlan = 58 73 | DstVlan = 59 74 | IPProtocolVersion = 60 75 | Direction = 61 76 | IPv6NextHop = 62 77 | BgpIPv6NextHop = 63 78 | IPv6OptionsHeaders = 64 79 | VendorProprietary65 = 65 80 | VendorProprietary66 = 66 81 | VendorProprietary67 = 67 82 | VendorProprietary68 = 68 83 | VendorProprietary69 = 69 84 | MplsLabel1 = 70 85 | MplsLabel2 = 71 86 | MplsLabel3 = 72 87 | MplsLabel4 = 73 88 | MplsLabel5 = 74 89 | MplsLabel6 = 75 90 | MplsLabel7 = 76 91 | MplsLabel8 = 77 92 | MplsLabel9 = 78 93 | MplsLabel10 = 79 94 | InDstMac = 80 95 | OutSrcMac = 81 96 | IfName = 82 97 | IfDesc = 83 98 | SamplerName = 84 99 | InPermanentBytes = 85 100 | InPermanentPkts = 86 101 | VendorProprietary87 = 87 102 | FragmentOffset = 88 103 | ForwardingStatus = 89 104 | MplsPalRd = 90 105 | MplsPrefixLen = 91 106 | SrcTrafficIndex = 92 107 | DstTrafficIndex = 93 108 | ApplicationDescription = 94 109 | ApplicationTag = 95 110 | ApplicationName = 96 111 | ) 112 | -------------------------------------------------------------------------------- /ipfix/field_db.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ipfix 13 | 14 | const ( 15 | InBytes = 1 16 | InPkts = 2 17 | Flows = 3 18 | Protocol = 4 19 | SrcTos = 5 20 | TCPFlags = 6 21 | L4SrcPort = 7 22 | IPv4SrcAddr = 8 23 | SrcMask = 9 24 | InputSnmp = 10 25 | L4DstPort = 11 26 | IPv4DstAddr = 12 27 | DstMask = 13 28 | OutputSnmp = 14 29 | IPv4NextHop = 15 30 | SrcAs = 16 31 | DstAs = 17 32 | BGPIPv4NextHop = 18 33 | MulDstPkts = 19 34 | MulDstBytes = 20 35 | LastSwitched = 21 36 | FirstSwitched = 22 37 | OutBytes = 23 38 | OutPkts = 24 39 | MinPktLngth = 25 40 | MaxPktLngth = 26 41 | IPv6SrcAddr = 27 42 | IPv6DstAddr = 28 43 | IPv6SrcMask = 29 44 | IPv6DstMask = 30 45 | IPv6FlowLabel = 31 46 | IcmpType = 32 47 | MulIgmpType = 33 48 | SamplingInterval = 34 49 | SamplingAlgorithm = 35 50 | FlowActiveTimeout = 36 51 | FlowInactiveTimeout = 37 52 | EngineType = 38 53 | EngineID = 39 54 | TotalBytesExp = 40 55 | TotalPktsExp = 41 56 | TotalFlowsExp = 42 57 | VendorProprietary43 = 43 58 | IPv4SrcPrefix = 44 59 | IPv4DstPrefix = 45 60 | MplsTopLabelType = 46 61 | MplsTopLabelIPAddr = 47 62 | FlowSamplerID = 48 63 | FlowSamplerMode = 49 64 | FlowSamplerRandomInterval = 50 65 | VendorProprietary51 = 51 66 | MinTTL = 52 67 | MaxTTL = 53 68 | IPv4Ident = 54 69 | DstTos = 55 70 | InSrcMac = 56 71 | OutDstMac = 57 72 | SrcVlan = 58 73 | DstVlan = 59 74 | IPProtocolVersion = 60 75 | Direction = 61 76 | IPv6NextHop = 62 77 | BgpIPv6NextHop = 63 78 | IPv6OptionsHeaders = 64 79 | VendorProprietary65 = 65 80 | VendorProprietary66 = 66 81 | VendorProprietary67 = 67 82 | VendorProprietary68 = 68 83 | VendorProprietary69 = 69 84 | MplsLabel1 = 70 85 | MplsLabel2 = 71 86 | MplsLabel3 = 72 87 | MplsLabel4 = 73 88 | MplsLabel5 = 74 89 | MplsLabel6 = 75 90 | MplsLabel7 = 76 91 | MplsLabel8 = 77 92 | MplsLabel9 = 78 93 | MplsLabel10 = 79 94 | InDstMac = 80 95 | OutSrcMac = 81 96 | IfName = 82 97 | IfDesc = 83 98 | SamplerName = 84 99 | InPermanentBytes = 85 100 | InPermanentPkts = 86 101 | VendorProprietary87 = 87 102 | FragmentOffset = 88 103 | ForwardingStatus = 89 104 | MplsPalRd = 90 105 | MplsPrefixLen = 91 106 | SrcTrafficIndex = 92 107 | DstTrafficIndex = 93 108 | ApplicationDescription = 94 109 | ApplicationTag = 95 110 | ApplicationName = 96 111 | ) 112 | -------------------------------------------------------------------------------- /convert/convert_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package convert 13 | 14 | import "testing" 15 | 16 | func TestIPByteSlice(t *testing.T) { 17 | tests := []struct { 18 | address string 19 | wanted []byte 20 | }{ 21 | { 22 | address: "192.168.0.1", 23 | wanted: []byte{192, 168, 0, 1}, 24 | }, 25 | { 26 | address: "255.255.255.255", 27 | wanted: []byte{255, 255, 255, 255}, 28 | }, 29 | { 30 | address: "ffff::ff", 31 | wanted: []byte{255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255}, 32 | }, 33 | } 34 | 35 | for _, test := range tests { 36 | res := IPByteSlice(test.address) 37 | if !sliceEq(res, test.wanted) { 38 | t.Errorf("Expected: %d, got: %d", test.wanted, res) 39 | } 40 | } 41 | } 42 | 43 | func TestUint16b(t *testing.T) { 44 | tests := []struct { 45 | input []byte 46 | wanted uint16 47 | }{ 48 | { 49 | input: []byte{2, 4}, 50 | wanted: 516, 51 | }, 52 | { 53 | input: []byte{0, 22}, 54 | wanted: 22, 55 | }, 56 | } 57 | 58 | for _, test := range tests { 59 | res := Uint16b(test.input) 60 | if res != test.wanted { 61 | t.Errorf("Expected: %d, got: %d", test.wanted, res) 62 | } 63 | } 64 | } 65 | 66 | func TestUint32b(t *testing.T) { 67 | tests := []struct { 68 | input []byte 69 | wanted uint32 70 | }{ 71 | { 72 | input: []byte{2, 3, 4, 0}, 73 | wanted: 33752064, 74 | }, 75 | { 76 | input: []byte{0, 1, 0, 0}, 77 | wanted: 65536, 78 | }, 79 | } 80 | 81 | for _, test := range tests { 82 | res := Uint32b(test.input) 83 | if res != test.wanted { 84 | t.Errorf("Expected: %d, got: %d", test.wanted, res) 85 | } 86 | } 87 | } 88 | 89 | func TestUint64b(t *testing.T) { 90 | tests := []struct { 91 | input []byte 92 | wanted uint64 93 | }{ 94 | { 95 | input: []byte{0, 0, 0, 0, 2, 3, 4, 0}, 96 | wanted: 33752064, 97 | }, 98 | { 99 | input: []byte{0, 0, 0, 0, 0, 1, 0, 0}, 100 | wanted: 65536, 101 | }, 102 | { 103 | input: []byte{0, 0, 0, 1, 0, 0, 0, 0}, 104 | wanted: 4294967296, 105 | }, 106 | } 107 | 108 | for _, test := range tests { 109 | res := Uint64b(test.input) 110 | if res != test.wanted { 111 | t.Errorf("Expected: %d, got: %d", test.wanted, res) 112 | } 113 | } 114 | } 115 | 116 | func TestUintX(t *testing.T) { 117 | tests := []struct { 118 | input []byte 119 | wanted uint64 120 | }{ 121 | { 122 | input: []byte{0, 0, 0, 0, 2, 3, 4, 0}, 123 | wanted: 1129207031660544, 124 | }, 125 | { 126 | input: []byte{0, 0, 0, 0, 0, 1, 0, 0}, 127 | wanted: 1099511627776, 128 | }, 129 | { 130 | input: []byte{0, 0, 0, 1, 0, 0, 0, 0}, 131 | wanted: 16777216, 132 | }, 133 | } 134 | 135 | for _, test := range tests { 136 | res := UintX(test.input) 137 | if res != test.wanted { 138 | t.Errorf("Expected: %d, got: %d", test.wanted, res) 139 | } 140 | } 141 | } 142 | 143 | func TestReverse(t *testing.T) { 144 | tests := []struct { 145 | input []byte 146 | wanted []byte 147 | }{ 148 | { 149 | input: []byte{1, 2, 3, 4}, 150 | wanted: []byte{4, 3, 2, 1}, 151 | }, 152 | } 153 | 154 | for _, test := range tests { 155 | res := Reverse(test.input) 156 | if !sliceEq(res, test.wanted) { 157 | t.Errorf("Expected: %d, got: %d", test.wanted, res) 158 | } 159 | } 160 | } 161 | 162 | func sliceEq(a []byte, b []byte) bool { 163 | if a == nil && b == nil { 164 | return true 165 | } 166 | 167 | if a == nil || b == nil { 168 | return false 169 | } 170 | 171 | if len(a) != len(b) { 172 | return false 173 | } 174 | 175 | for i := range a { 176 | if a[i] != b[i] { 177 | return false 178 | } 179 | } 180 | return true 181 | } 182 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tflow2 2 | 3 | tflow2 is an in memory netflow version 9 and IPFIX analyzer. 4 | It is designed for fast arbitrary queries. 5 | 6 | *This software is currently not maintained in this repo. Check out 7 | https://github.com/taktv6/tflow2* 8 | 9 | ## Usage 10 | 11 | Quick install with `go get -u github.com/google/tflow2` 12 | and `go build github.com/google/tflow2` 13 | or download a pre-built binary from the 14 | [releases page](https://github.com/google/tflow2/releases). 15 | 16 | The release binaries have an additional command, `tflow2 -version`, 17 | which reports the release version. 18 | 19 | Once you start the main binary it will start reading netflow version 9 packets 20 | on port 2055 UDP and IPFIX packets on port 4739 on all interfaces. 21 | For user interaction it starts a webserver on port 4444 TCP on all interfaces. 22 | 23 | The webinterface allows you to run queries against the collected data. 24 | Start time and router are mandatory criteria. If you don't provide any of 25 | these you will always receive an empty result. 26 | 27 | ### Command line arguments 28 | -aggregation=int 29 | 30 | This is the time window in seconds used for aggregation of flows 31 | 32 | -alsologtostderr 33 | 34 | Will send logs to stderr on top 35 | 36 | -anonymize=bool 37 | 38 | If set to true IP addresses will be replaced with NULL before dumping 39 | flows to disk. Default is false. 40 | 41 | -bgp=bool 42 | 43 | tflow will connect to BIRD and BIRD6 unix domain sockets to augment flows 44 | with prefix and autonomous system information. This is useful in case your 45 | routers exported netflow data is lacking these. This is the case for example 46 | if you use the ipt-NETFLOW on Linux. 47 | 48 | BIRD needs a BGP session to each router that is emitting flow packets. 49 | The protocol needs to be named like this: "nf_x_y_z_a" with x_y_z_a being the 50 | source IP address of flow packets, e.g. nf_185_66_194_0 51 | 52 | -birdSock=path 53 | 54 | This is the path to the unix domain socket to talk to BIRD 55 | 56 | -birdSock6=path 57 | 58 | This is the path to the unix domain socket to talk to BIRD6 59 | 60 | -channelBuffer=int 61 | 62 | This is the amount of elements that any channel within the program can buffer. 63 | 64 | -dbaddworkers=int 65 | 66 | This is the amount of workers that are used to add flows into the in memory 67 | database. 68 | 69 | -debug=int 70 | 71 | Debug level. 1 will give you some more information. 2 is not in use at 72 | the moment. 3 will dump every single received netflow packet on the screen. 73 | 74 | -log_backtrace_at 75 | 76 | when logging hits line file:N, emit a stack trace (default :0) 77 | 78 | -log_dir 79 | 80 | If non-empty, write log files in this directory 81 | 82 | -logtostderr 83 | 84 | log to standard error instead of files 85 | 86 | -maxage=int 87 | 88 | Maximum age of flow data to keep in memory. Choose this parameter wisely or you 89 | will run out of memory. Experience shows that 500k flows need about 50G of RAM. 90 | 91 | -netflow=addr 92 | 93 | Address to use to receive netflow packets (default ":2055") via UDP 94 | 95 | -ipfix=addr 96 | 97 | Address to use to receive IPFIX packets (default ":4739") via UDP 98 | 99 | --protonums=path 100 | 101 | CSV file to read protocol definitions from (default "protocol_numbers.csv"). 102 | This is needed for suggestions in the web interface. 103 | 104 | -samplerate=int 105 | 106 | Samplerate of your routers. This is used to deviate real packet and volume rates 107 | in case you use sampling. 108 | 109 | -sockreaders=int 110 | 111 | Num of go routines reading and parsing netflow packets (default 24) 112 | 113 | -stderrthreshold 114 | 115 | logs at or above this threshold go to stderr 116 | 117 | -v value 118 | 119 | log level for V logs 120 | 121 | -vmodule value 122 | 123 | comma-separated list of pattern=N settings for file-filtered logging 124 | 125 | -web=addr 126 | 127 | Address to use for web service (default ":4444") 128 | 129 | ## Limitations 130 | 131 | This software currently only supports receiving netflow packets over IPv4. 132 | Please be aware this software is not platform indipendent. It will only work 133 | on little endian machines (such as x86) 134 | 135 | ## License 136 | 137 | (c) Google, 2017. Licensed under [Apache-2](LICENSE) license. 138 | 139 | This is not an official Google product. 140 | -------------------------------------------------------------------------------- /ipfix/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ipfix 13 | 14 | import ( 15 | "fmt" 16 | "net" 17 | "unsafe" 18 | 19 | "github.com/google/tflow2/convert" 20 | ) 21 | 22 | const ( 23 | // numPreAllocTmplRecs is the number of elements to pre allocate in TemplateRecords slice 24 | numPreAllocRecs = 20 25 | ) 26 | 27 | // SetIDTemplateMax is the maximum FlowSetID being used for templates according to RFC3954 28 | const SetIDTemplateMax = 255 29 | 30 | // TemplateSetID is the set ID reserved for template sets 31 | const TemplateSetID = 2 32 | 33 | // errorIncompatibleVersion prints an error message in case the detected version is not supported 34 | func errorIncompatibleVersion(version uint16) error { 35 | return fmt.Errorf("IPFIX: Incompatible protocol version v%d, only v10 is supported", version) 36 | } 37 | 38 | // Decode is the main function of this package. It converts raw packet bytes to Packet struct. 39 | func Decode(raw []byte, remote net.IP) (*Packet, error) { 40 | data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine 41 | 42 | pSize := len(data) 43 | bufSize := 1500 44 | buffer := [1500]byte{} 45 | 46 | if pSize > bufSize { 47 | panic("Buffer too small\n") 48 | } 49 | 50 | // copy data into array as arrays allow us to cast the shit out of it 51 | for i := 0; i < pSize; i++ { 52 | buffer[bufSize-pSize+i] = data[i] 53 | } 54 | 55 | bufferPtr := unsafe.Pointer(&buffer) 56 | bufferMinPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(pSize)) 57 | headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeader)) 58 | 59 | var packet Packet 60 | packet.Buffer = buffer[:] 61 | packet.Header = (*Header)(headerPtr) 62 | 63 | if packet.Header.Version != 10 { 64 | return nil, errorIncompatibleVersion(packet.Header.Version) 65 | } 66 | 67 | //Pre-allocate some room for templates to avoid later copying 68 | packet.Templates = make([]*TemplateRecords, 0, numPreAllocRecs) 69 | 70 | for uintptr(headerPtr) > uintptr(bufferMinPtr) { 71 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfSetHeader) 72 | 73 | fls := &Set{ 74 | Header: (*SetHeader)(ptr), 75 | } 76 | 77 | if fls.Header.SetID == TemplateSetID { 78 | // Template 79 | decodeTemplate(&packet, ptr, uintptr(fls.Header.Length)-sizeOfSetHeader, remote) 80 | } else if fls.Header.SetID > SetIDTemplateMax { 81 | // Actual data packet 82 | decodeData(&packet, ptr, uintptr(fls.Header.Length)-sizeOfSetHeader) 83 | } 84 | 85 | headerPtr = unsafe.Pointer(uintptr(headerPtr) - uintptr(fls.Header.Length)) 86 | } 87 | 88 | return &packet, nil 89 | } 90 | 91 | // decodeData decodes a flowSet from `packet` 92 | func decodeData(packet *Packet, headerPtr unsafe.Pointer, size uintptr) { 93 | flsh := (*SetHeader)(unsafe.Pointer(headerPtr)) 94 | data := unsafe.Pointer(uintptr(headerPtr) - uintptr(flsh.Length)) 95 | 96 | fls := &Set{ 97 | Header: flsh, 98 | Records: (*(*[1<<31 - 1]byte)(data))[sizeOfSetHeader:flsh.Length], 99 | } 100 | 101 | packet.FlowSets = append(packet.FlowSets, fls) 102 | } 103 | 104 | // decodeTemplate decodes a template from `packet` 105 | func decodeTemplate(packet *Packet, end unsafe.Pointer, size uintptr, remote net.IP) { 106 | min := uintptr(end) - size 107 | for uintptr(end) > min { 108 | headerPtr := unsafe.Pointer(uintptr(end) - sizeOfTemplateRecordHeader) 109 | 110 | tmplRecs := &TemplateRecords{} 111 | tmplRecs.Header = (*TemplateRecordHeader)(unsafe.Pointer(headerPtr)) 112 | tmplRecs.Packet = packet 113 | tmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) 114 | 115 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfTemplateRecordHeader) 116 | var i uint16 117 | for i = 0; i < tmplRecs.Header.FieldCount; i++ { 118 | rec := (*TemplateRecord)(unsafe.Pointer(ptr)) 119 | tmplRecs.Records = append(tmplRecs.Records, rec) 120 | ptr = unsafe.Pointer(uintptr(ptr) - sizeOfTemplateRecord) 121 | } 122 | 123 | packet.Templates = append(packet.Templates, tmplRecs) 124 | end = unsafe.Pointer(uintptr(end) - uintptr(tmplRecs.Header.FieldCount)*sizeOfTemplateRecord - sizeOfTemplateRecordHeader) 125 | } 126 | } 127 | 128 | // PrintHeader prints the header of `packet` 129 | func PrintHeader(p *Packet) { 130 | fmt.Printf("Version: %d\n", p.Header.Version) 131 | fmt.Printf("Length: %d\n", p.Header.Length) 132 | fmt.Printf("UnixSecs: %d\n", p.Header.ExportTime) 133 | fmt.Printf("Sequence: %d\n", p.Header.SequenceNumber) 134 | fmt.Printf("DomainId: %d\n", p.Header.DomainID) 135 | } 136 | -------------------------------------------------------------------------------- /nf9/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | import ( 15 | "fmt" 16 | "net" 17 | "unsafe" 18 | 19 | "github.com/google/tflow2/convert" 20 | ) 21 | 22 | const ( 23 | // numPreAllocTmplRecs is the number of elements to pre allocate in TemplateRecords slice 24 | numPreAllocRecs = 20 25 | ) 26 | 27 | // FlowSetIDTemplateMax is the maximum FlowSetID being used for templates according to RFC3954 28 | const FlowSetIDTemplateMax = 255 29 | 30 | // TemplateFlowSetID is the FlowSetID reserved for template flow sets 31 | const TemplateFlowSetID = 0 32 | 33 | // errorIncompatibleVersion prints an error message in case the detected version is not supported 34 | func errorIncompatibleVersion(version uint16) error { 35 | return fmt.Errorf("NF9: Incompatible protocol version v%d, only v9 is supported", version) 36 | } 37 | 38 | // Decode is the main function of this package. It converts raw packet bytes to Packet struct. 39 | func Decode(raw []byte, remote net.IP) (*Packet, error) { 40 | data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine 41 | 42 | pSize := len(data) 43 | bufSize := 1500 44 | buffer := [1500]byte{} 45 | 46 | if pSize > bufSize { 47 | panic("Buffer too small\n") 48 | } 49 | 50 | // copy data into array as arrays allow us to cast the shit out of it 51 | for i := 0; i < pSize; i++ { 52 | buffer[bufSize-pSize+i] = data[i] 53 | } 54 | 55 | bufferPtr := unsafe.Pointer(&buffer) 56 | bufferMinPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(pSize)) 57 | headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeader)) 58 | 59 | var packet Packet 60 | packet.Buffer = buffer[:] 61 | packet.Header = (*Header)(headerPtr) 62 | 63 | if packet.Header.Version != 9 { 64 | return nil, errorIncompatibleVersion(packet.Header.Version) 65 | } 66 | 67 | //Pre-allocate some room for templates to avoid later copying 68 | packet.Templates = make([]*TemplateRecords, 0, numPreAllocRecs) 69 | 70 | for uintptr(headerPtr) > uintptr(bufferMinPtr) { 71 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfFlowSetHeader) 72 | 73 | fls := &FlowSet{ 74 | Header: (*FlowSetHeader)(ptr), 75 | } 76 | 77 | if fls.Header.FlowSetID == TemplateFlowSetID { 78 | // Template 79 | decodeTemplate(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader, remote) 80 | } else if fls.Header.FlowSetID > FlowSetIDTemplateMax { 81 | // Actual data packet 82 | decodeData(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader) 83 | } 84 | 85 | headerPtr = unsafe.Pointer(uintptr(headerPtr) - uintptr(fls.Header.Length)) 86 | } 87 | 88 | return &packet, nil 89 | } 90 | 91 | // decodeData decodes a flowSet from `packet` 92 | func decodeData(packet *Packet, headerPtr unsafe.Pointer, size uintptr) { 93 | flsh := (*FlowSetHeader)(unsafe.Pointer(headerPtr)) 94 | data := unsafe.Pointer(uintptr(headerPtr) - uintptr(flsh.Length)) 95 | 96 | fls := &FlowSet{ 97 | Header: flsh, 98 | Flows: (*(*[1<<31 - 1]byte)(data))[sizeOfFlowSetHeader:flsh.Length], 99 | } 100 | 101 | packet.FlowSets = append(packet.FlowSets, fls) 102 | } 103 | 104 | // decodeTemplate decodes a template from `packet` 105 | func decodeTemplate(packet *Packet, end unsafe.Pointer, size uintptr, remote net.IP) { 106 | min := uintptr(end) - size 107 | for uintptr(end) > min { 108 | headerPtr := unsafe.Pointer(uintptr(end) - sizeOfTemplateRecordHeader) 109 | 110 | tmplRecs := &TemplateRecords{} 111 | tmplRecs.Header = (*TemplateRecordHeader)(unsafe.Pointer(headerPtr)) 112 | tmplRecs.Packet = packet 113 | tmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) 114 | 115 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfTemplateRecordHeader) 116 | var i uint16 117 | for i = 0; i < tmplRecs.Header.FieldCount; i++ { 118 | rec := (*TemplateRecord)(unsafe.Pointer(ptr)) 119 | tmplRecs.Records = append(tmplRecs.Records, rec) 120 | ptr = unsafe.Pointer(uintptr(ptr) - sizeOfTemplateRecord) 121 | } 122 | 123 | packet.Templates = append(packet.Templates, tmplRecs) 124 | end = unsafe.Pointer(uintptr(end) - uintptr(tmplRecs.Header.FieldCount)*sizeOfTemplateRecord - sizeOfTemplateRecordHeader) 125 | } 126 | } 127 | 128 | // PrintHeader prints the header of `packet` 129 | func PrintHeader(p *Packet) { 130 | fmt.Printf("Version: %d\n", p.Header.Version) 131 | fmt.Printf("Count: %d\n", p.Header.Count) 132 | fmt.Printf("SysUpTime: %d\n", p.Header.SysUpTime) 133 | fmt.Printf("UnixSecs: %d\n", p.Header.UnixSecs) 134 | fmt.Printf("Sequence: %d\n", p.Header.SequenceNumber) 135 | fmt.Printf("SourceId: %d\n", p.Header.SourceID) 136 | } 137 | -------------------------------------------------------------------------------- /avltree/avtltree_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package avltree 13 | 14 | import "testing" 15 | 16 | func testIsSmaller(c1 interface{}, c2 interface{}) bool { 17 | if c1.(int) < c2.(int) { 18 | return true 19 | } 20 | return false 21 | } 22 | 23 | func TestInsert(t *testing.T) { 24 | values := [...]int{100, 50, 150, 160, 170, 180, 90, 80, 70, 50, 60, 54, 32, 12, 5, 1} 25 | var tree *TreeNode 26 | 27 | for val := range values { 28 | tree.insert(val, val, testIsSmaller) 29 | } 30 | } 31 | 32 | func TestIntersection(t *testing.T) { 33 | candidates := make([]*Tree, 0) 34 | valuesA := [...]int{20, 100, 50, 150, 160, 170, 180, 90, 15, 80, 70, 50, 60, 54, 32, 12, 5, 1} 35 | valuesB := [...]int{101, 51, 150, 160, 171, 182, 89, 80, 75, 15, 53, 20, 1} 36 | valuesC := [...]int{20, 100, 50, 150, 15, 160, 170, 180, 90, 80, 70, 50, 60, 54, 32, 12, 5, 1} 37 | valuesD := [...]int{20, 101, 51, 150, 171, 182, 89, 80, 75, 15, 53, 20, 1} 38 | valuesE := [...]int{15, 20, 100, 50, 150, 160, 170, 180, 90, 80, 70, 50, 60, 54, 32, 12, 5, 1} 39 | valuesCommon := [...]int{150, 80, 1, 20, 15} 40 | 41 | treeA := New() 42 | treeB := New() 43 | treeC := New() 44 | treeD := New() 45 | treeE := New() 46 | 47 | for _, val := range valuesA { 48 | treeA.Insert(val, val, testIsSmaller) 49 | } 50 | 51 | for _, val := range valuesB { 52 | treeB.Insert(val, val, testIsSmaller) 53 | } 54 | 55 | for _, val := range valuesC { 56 | treeC.Insert(val, val, testIsSmaller) 57 | } 58 | 59 | for _, val := range valuesD { 60 | treeD.Insert(val, val, testIsSmaller) 61 | } 62 | 63 | for _, val := range valuesE { 64 | treeE.Insert(val, val, testIsSmaller) 65 | } 66 | 67 | candidates = append(candidates, treeA) 68 | candidates = append(candidates, treeB) 69 | candidates = append(candidates, treeC) 70 | candidates = append(candidates, treeD) 71 | candidates = append(candidates, treeE) 72 | 73 | res := Intersection(candidates) 74 | for _, val := range valuesCommon { 75 | if !res.Exists(val) { 76 | t.Errorf("Element %d not found in common elements tree\n", val) 77 | } 78 | } 79 | 80 | } 81 | 82 | func TestNodeExists(t *testing.T) { 83 | tests := []struct { 84 | input int 85 | want bool 86 | }{ 87 | { 88 | input: 90, 89 | want: true, 90 | }, 91 | { 92 | input: 50, 93 | want: true, 94 | }, 95 | { 96 | input: 54, 97 | want: true, 98 | }, 99 | { 100 | input: 111, 101 | want: false, 102 | }, 103 | } 104 | 105 | values := [...]int{100, 50, 150, 160, 170, 180, 90, 80, 70, 50, 60, 54, 32, 12, 5, 1} 106 | tree := New() 107 | for _, val := range values { 108 | tree.Insert(val, val, testIsSmaller) 109 | } 110 | 111 | for _, test := range tests { 112 | if ret := tree.Exists(test.input); ret != test.want { 113 | t.Errorf("Test for %d was %t expected to be %t", test.input, ret, test.want) 114 | } 115 | } 116 | 117 | } 118 | 119 | func TestCommon(t *testing.T) { 120 | valuesA := [...]int{20, 100, 50, 150, 160, 170, 180, 90, 80, 70, 50, 60, 54, 32, 12, 5, 1} 121 | valuesB := [...]int{20, 101, 51, 150, 160, 171, 182, 89, 80, 75, 53, 20, 1} 122 | valuesCommon := [...]int{20, 150, 160, 80, 1} 123 | treeA := New() 124 | treeB := New() 125 | 126 | for _, val := range valuesA { 127 | treeA.Insert(val, val, testIsSmaller) 128 | } 129 | 130 | for _, val := range valuesB { 131 | treeB.Insert(val, val, testIsSmaller) 132 | } 133 | 134 | common := treeA.Intersection(treeB) 135 | 136 | for _, val := range valuesCommon { 137 | if !common.Exists(val) { 138 | t.Errorf("Element %d not found in common elements tree\n", val) 139 | } 140 | } 141 | } 142 | 143 | func sliceEq(a []interface{}, b []int) bool { 144 | if a == nil && b == nil { 145 | return true 146 | } 147 | 148 | if a == nil || b == nil { 149 | return false 150 | } 151 | 152 | if len(a) != len(b) { 153 | return false 154 | } 155 | 156 | for i := range a { 157 | if a[i] != b[i] { 158 | return false 159 | } 160 | } 161 | return true 162 | } 163 | 164 | func TestTopN(t *testing.T) { 165 | tests := []struct { 166 | values [20]int 167 | topValues [6]int 168 | want bool 169 | }{ 170 | { 171 | values: [...]int{1000, 20, 100, 5555, 50, 150, 2000, 160, 170, 180, 90, 80, 70, 50, 60, 54, 32, 12, 5, 1}, 172 | topValues: [...]int{5555, 2000, 1000, 180, 170, 160}, 173 | want: true, 174 | }, 175 | { 176 | values: [...]int{57489, 2541, 5214, 2254, 2, 588, 98, 2874, 544, 98, 74, 22, 556, 14, 12, 23, 500, 532, 12, 15}, 177 | topValues: [...]int{57489, 5214, 2874, 2541, 2254, 588}, 178 | want: true, 179 | }, 180 | } 181 | 182 | for _, test := range tests { 183 | tree := New() 184 | for _, val := range test.values { 185 | tree.Insert(val, val, testIsSmaller) 186 | } 187 | 188 | res := tree.TopN(6) 189 | if sliceEq(res, 190 | test.topValues[:]) != test.want { 191 | t.Errorf("Tested: %v, got %v, wanted %v\n", test.values, res, test.topValues) 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /frontend/frontend.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package frontend provides services via HTTP 13 | package frontend 14 | 15 | import ( 16 | "bufio" 17 | "encoding/csv" 18 | "encoding/json" 19 | "fmt" 20 | "io" 21 | "io/ioutil" 22 | "net/http" 23 | _ "net/http/pprof" // Needed for profiling only 24 | "net/url" 25 | "os" 26 | "regexp" 27 | "strings" 28 | 29 | "github.com/google/tflow2/database" 30 | "github.com/google/tflow2/stats" 31 | "github.com/golang/glog" 32 | ) 33 | 34 | // Frontend represents the web interface 35 | type Frontend struct { 36 | protocols map[string]string 37 | indexHTML string 38 | flowDB *database.FlowDatabase 39 | } 40 | 41 | // New creates a new `Frontend` 42 | func New(addr string, protoNumsFilename string, fdb *database.FlowDatabase) *Frontend { 43 | fe := &Frontend{ 44 | flowDB: fdb, 45 | } 46 | fe.populateProtocols(protoNumsFilename) 47 | fe.populateIndexHTML() 48 | http.HandleFunc("/", fe.httpHandler) 49 | go http.ListenAndServe(addr, nil) 50 | return fe 51 | } 52 | 53 | // populateIndexHTML copies tflow2.html into indexHTML variable 54 | func (fe *Frontend) populateIndexHTML() { 55 | html, err := ioutil.ReadFile("tflow2.html") 56 | if err != nil { 57 | glog.Errorf("Unable to read tflow2.html: %v", err) 58 | return 59 | } 60 | 61 | fe.indexHTML = string(html) 62 | } 63 | 64 | func (fe *Frontend) populateProtocols(protoNumsFilename string) { 65 | f, err := os.Open(protoNumsFilename) 66 | if err != nil { 67 | glog.Errorf("Couldn't open protoNumsFile: %v\n", err) 68 | return 69 | } 70 | r := csv.NewReader(bufio.NewReader(f)) 71 | fe.protocols = make(map[string]string) 72 | for { 73 | record, err := r.Read() 74 | if err == io.EOF { 75 | break 76 | } 77 | 78 | ok, err := regexp.Match("^[0-9]{1,3}$", []byte(record[0])) 79 | if err != nil { 80 | fmt.Printf("Regex: %v\n", err) 81 | continue 82 | } 83 | if ok { 84 | fe.protocols[record[0]] = record[1] 85 | } 86 | } 87 | } 88 | 89 | func (fe *Frontend) httpHandler(w http.ResponseWriter, r *http.Request) { 90 | w.Header().Set("Access-Control-Allow-Origin", "*") 91 | 92 | parts := strings.Split(r.URL.Path, "?") 93 | path := parts[0] 94 | switch path { 95 | case "/": 96 | fe.indexHandler(w, r) 97 | case "/query": 98 | fe.queryHandler(w, r) 99 | case "/varz": 100 | stats.Varz(w) 101 | case "/protocols": 102 | fe.getProtocols(w, r) 103 | case "/routers": 104 | fileHandler(w, r, "routers.json") 105 | case "/tflow2.css": 106 | fileHandler(w, r, "tflow2.css") 107 | case "/tflow2.js": 108 | fileHandler(w, r, "tflow2.js") 109 | } 110 | } 111 | 112 | func (fe *Frontend) getProtocols(w http.ResponseWriter, r *http.Request) { 113 | output, err := json.Marshal(fe.protocols) 114 | if err != nil { 115 | glog.Warningf("Unable to marshal: %v", err) 116 | http.Error(w, "Unable to marshal data", 500) 117 | } 118 | fmt.Fprintf(w, "%s", output) 119 | } 120 | 121 | func fileHandler(w http.ResponseWriter, r *http.Request, filename string) { 122 | content, err := ioutil.ReadFile(filename) 123 | if err != nil { 124 | glog.Warningf("Unable to read file: %v", err) 125 | http.Error(w, "Unable to read file", 404) 126 | } 127 | fmt.Fprintf(w, "%s", string(content)) 128 | } 129 | 130 | func (fe *Frontend) indexHandler(w http.ResponseWriter, r *http.Request) { 131 | query := "{}" 132 | for _, p := range strings.Split(r.URL.RawQuery, "&") { 133 | parts := strings.SplitN(p, "=", 2) 134 | if len(parts) == 0 { 135 | glog.Warningf("query was empty") 136 | http.Error(w, "query was empty", 400) 137 | continue 138 | } 139 | param := parts[0] 140 | value := "" 141 | if len(parts) == 2 { 142 | value = parts[1] 143 | } 144 | 145 | if param == "query" { 146 | var err error 147 | query, err = url.QueryUnescape(value) 148 | if err != nil { 149 | glog.Warningf("unable to decode URL parameter query") 150 | http.Error(w, "unable to decode URL parameter query", 503) 151 | } 152 | } 153 | } 154 | 155 | output := strings.Replace(fe.indexHTML, "VAR_QUERY", query, -1) 156 | fmt.Fprintf(w, output) 157 | } 158 | 159 | func (fe *Frontend) queryHandler(w http.ResponseWriter, r *http.Request) { 160 | w.Header().Set("Access-Control-Allow-Origin", "*") 161 | q := "" 162 | for _, p := range strings.Split(r.URL.RawQuery, "&") { 163 | parts := strings.SplitN(p, "=", 2) 164 | param := parts[0] 165 | value := "" 166 | if len(parts) > 1 { 167 | value = parts[1] 168 | } 169 | 170 | if param == "q" { 171 | var err error 172 | q, err = url.QueryUnescape(value) 173 | if err != nil { 174 | glog.Warningf("Unable to unescape query: %v", err) 175 | http.Error(w, "Unable to unescape query", 400) 176 | } 177 | } 178 | } 179 | 180 | result, err := fe.flowDB.RunQuery(q) 181 | if err != nil { 182 | glog.Errorf("Query failed: %v", err) 183 | http.Error(w, "Query failed", 500) 184 | } 185 | 186 | fe.printResult(w, result) 187 | } 188 | 189 | func (fe *Frontend) printResult(w http.ResponseWriter, result [][]string) { 190 | rows := len(result) 191 | if rows == 0 { 192 | return 193 | } 194 | columns := len(result[0]) 195 | 196 | fmt.Fprintf(w, "[\n") 197 | fmt.Fprintf(w, "[ ") 198 | // Send header of table to client 199 | for i, val := range result[0] { 200 | if i < columns-1 { 201 | fmt.Fprintf(w, "\"%s\", ", string(val)) 202 | continue 203 | } 204 | fmt.Fprintf(w, "\"%s\"", string(val)) 205 | } 206 | if rows == 1 { 207 | fmt.Fprintf(w, "]\n") 208 | return 209 | } 210 | fmt.Fprintf(w, "],\n") 211 | 212 | for i, row := range result[1:] { 213 | fmt.Fprintf(w, "[ ") 214 | for j, column := range row { 215 | if j == 0 { 216 | fmt.Fprintf(w, "\"%s\", ", string(column)) 217 | continue 218 | } 219 | if j < columns-1 { 220 | fmt.Fprintf(w, "%s, ", string(column)) 221 | continue 222 | } 223 | fmt.Fprintf(w, "%s", string(column)) 224 | } 225 | if i < rows-2 { 226 | fmt.Fprintf(w, "],\n") 227 | continue 228 | } 229 | fmt.Fprintf(w, "]\n") 230 | } 231 | fmt.Fprintf(w, "]") 232 | } 233 | -------------------------------------------------------------------------------- /nf9/decode_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | import ( 15 | "net" 16 | "testing" 17 | ) 18 | 19 | func TestDecode(t *testing.T) { 20 | s := []byte{0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 249, 0, 6, 187, 71, 213, 103, 123, 68, 213, 103, 10, 5, 0, 0, 11, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 26, 187, 1, 239, 181, 153, 192, 66, 185, 34, 93, 13, 31, 65, 195, 66, 185, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 249, 0, 6, 183, 71, 213, 103, 7, 39, 213, 103, 224, 156, 0, 0, 153, 2, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 30, 80, 0, 105, 187, 153, 192, 66, 185, 136, 100, 80, 151, 65, 195, 66, 185, 128, 0, 221, 1, 0, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 6, 180, 71, 213, 103, 164, 62, 213, 103, 160, 0, 0, 0, 4, 0, 0, 0, 21, 0, 28, 0, 21, 0, 28, 0, 16, 80, 0, 87, 204, 185, 192, 66, 185, 147, 23, 217, 172, 93, 193, 66, 185, 64, 0, 223, 1, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 137, 0, 6, 191, 71, 213, 103, 248, 44, 213, 103, 125, 17, 0, 0, 57, 0, 0, 0, 21, 0, 72, 0, 21, 0, 72, 0, 24, 187, 1, 145, 226, 185, 192, 66, 185, 88, 160, 125, 74, 84, 193, 66, 185, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 124, 61, 213, 103, 164, 0, 0, 0, 3, 0, 0, 0, 39, 0, 22, 0, 39, 0, 22, 0, 19, 89, 216, 80, 0, 235, 5, 64, 100, 41, 193, 66, 185, 243, 121, 19, 50, 128, 0, 221, 1, 0, 0, 0, 221, 134, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 185, 71, 213, 103, 234, 62, 213, 103, 201, 53, 0, 0, 177, 0, 0, 0, 21, 0, 73, 0, 21, 0, 73, 0, 24, 187, 1, 181, 211, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 35, 0, 14, 64, 80, 20, 0, 42, 179, 79, 172, 109, 9, 172, 109, 133, 55, 19, 15, 48, 96, 34, 3, 42, 104, 0, 222, 1, 0, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 226, 68, 213, 103, 201, 21, 0, 0, 18, 0, 0, 0, 116, 0, 22, 0, 116, 0, 22, 0, 26, 172, 230, 187, 1, 101, 0, 64, 100, 49, 193, 66, 185, 36, 107, 175, 54, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 222, 67, 213, 103, 211, 5, 0, 0, 6, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 27, 80, 0, 243, 165, 153, 192, 66, 185, 138, 98, 227, 172, 65, 195, 66, 185, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 188, 71, 213, 103, 188, 71, 213, 103, 122, 0, 0, 0, 1, 0, 0, 0, 184, 0, 15, 0, 184, 0, 15, 0, 24, 145, 193, 230, 15, 213, 1, 64, 100, 16, 193, 66, 185, 210, 7, 182, 193, 188, 0, 221, 1, 0, 0, 0, 221, 134, 212, 186, 30, 36, 78, 204, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 179, 71, 213, 103, 215, 49, 213, 103, 248, 17, 0, 0, 13, 0, 0, 0, 119, 0, 16, 0, 119, 0, 16, 0, 26, 2, 201, 187, 1, 220, 90, 4, 46, 254, 94, 0, 2, 0, 0, 0, 0, 0, 0, 128, 254, 34, 44, 143, 56, 96, 67, 7, 176, 0, 70, 21, 1, 96, 34, 3, 42, 142, 0, 0, 0, 12, 176, 206, 250, 14, 3, 19, 240, 128, 40, 3, 42, 104, 0, 222, 1, 0, 0, 0, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 183, 71, 213, 103, 47, 68, 213, 103, 54, 23, 0, 0, 10, 0, 0, 0, 73, 0, 22, 0, 73, 0, 22, 0, 26, 79, 154, 187, 1, 59, 4, 64, 100, 85, 193, 66, 185, 43, 156, 16, 199, 68, 0, 221, 1, 0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 6, 179, 71, 213, 103, 179, 71, 213, 103, 61, 0, 0, 0, 1, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 16, 80, 0, 251, 209, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 16, 32, 0, 0, 0, 0, 0, 0, 33, 8, 1, 64, 80, 20, 0, 42, 159, 9, 125, 55, 155, 45, 217, 165, 2, 0, 20, 1, 96, 34, 3, 42, 100, 0, 220, 1, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 1, 179, 71, 213, 103, 19, 59, 213, 103, 152, 0, 0, 0, 2, 0, 0, 0, 15, 0, 93, 0, 15, 0, 93, 0, 3, 3, 0, 0, 153, 192, 66, 185, 119, 160, 222, 68, 31, 194, 66, 185, 60, 0, 228, 1, 2, 0, 0, 1, 6, 0, 56, 0, 6, 0, 80, 0, 1, 0, 5, 0, 1, 0, 4, 0, 4, 0, 21, 0, 4, 0, 22, 0, 4, 0, 1, 0, 4, 0, 2, 0, 2, 0, 253, 0, 2, 0, 252, 0, 2, 0, 14, 0, 2, 0, 10, 0, 2, 0, 11, 0, 2, 0, 7, 0, 4, 0, 15, 0, 4, 0, 12, 0, 4, 0, 8, 0, 18, 0, 228, 1, 80, 0, 0, 0, 0, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 192, 71, 213, 103, 192, 71, 213, 103, 52, 0, 0, 0, 1, 0, 0, 0, 21, 0, 178, 0, 21, 0, 178, 0, 16, 187, 1, 62, 139, 185, 192, 66, 185, 168, 8, 125, 74, 54, 194, 66, 185, 68, 0, 221, 1, 0, 0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 17, 189, 71, 213, 103, 189, 71, 213, 103, 76, 0, 0, 0, 1, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 0, 123, 0, 234, 170, 153, 192, 66, 185, 221, 186, 9, 5, 65, 195, 66, 185, 64, 0, 223, 1, 0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 188, 71, 213, 103, 103, 71, 213, 103, 247, 0, 0, 0, 3, 0, 0, 0, 26, 0, 21, 0, 26, 0, 21, 0, 26, 46, 155, 80, 0, 81, 4, 64, 100, 102, 193, 66, 185, 46, 208, 58, 216, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 179, 71, 213, 103, 101, 71, 213, 103, 247, 0, 0, 0, 3, 0, 0, 0, 26, 0, 21, 0, 26, 0, 21, 0, 26, 145, 155, 80, 0, 81, 4, 64, 100, 102, 193, 66, 185, 46, 208, 58, 216, 128, 0, 221, 1, 0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 180, 71, 213, 103, 134, 71, 213, 103, 38, 3, 0, 0, 2, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 24, 187, 1, 218, 156, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 78, 0, 1, 64, 80, 20, 0, 42, 35, 211, 203, 103, 92, 74, 192, 76, 7, 0, 20, 1, 96, 34, 3, 42, 104, 0, 222, 1, 0, 0, 0, 0, 167, 51, 204, 11, 128, 207, 118, 88, 75, 91, 213, 103, 19, 0, 9, 0} 21 | 22 | packet, err := Decode(s, net.IP([]byte{1, 1, 1, 1})) 23 | if err != nil { 24 | t.Errorf("Decoding packet failed: %v\n", err) 25 | } 26 | 27 | flowSet := []byte{0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 180, 71, 213, 103, 134, 71, 213, 103, 38, 3, 0, 0, 2, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 24, 187, 1, 218, 156, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 78, 0, 1, 64, 80, 20, 0, 42, 35, 211, 203, 103, 92, 74, 192, 76, 7, 0, 20, 1, 96, 34, 3, 42} 28 | 29 | if !testEq(packet.FlowSets[0].Flows, flowSet) { 30 | t.Errorf("Decoded FlowSet is not the expected one. Got: %v, Expected: %v\n", packet.FlowSets[0].Flows, flowSet) 31 | } 32 | } 33 | 34 | func testEq(a, b []byte) bool { 35 | 36 | if a == nil && b == nil { 37 | return true 38 | } 39 | 40 | if a == nil || b == nil { 41 | return false 42 | } 43 | 44 | if len(a) != len(b) { 45 | return false 46 | } 47 | 48 | for i := range a { 49 | if a[i] != b[i] { 50 | return false 51 | } 52 | } 53 | 54 | return true 55 | } 56 | -------------------------------------------------------------------------------- /nf9/packet.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package nf9 provides structures and functions to decode and analyze 13 | // NetFlow v9 packets. 14 | // 15 | // This package does only packet decoding in a single packet context. It keeps 16 | // no state when decoding multiple packets. As a result Data FlowSets can not be 17 | // decoded during initial packet decoding. To decode Data FlowSets user must 18 | // keep track of Template Records and Options Template Records manually. 19 | // 20 | // Examples of NetFlow v9 packets: 21 | // 22 | // +--------+--------------------------------------------------------+ 23 | // | | +----------+ +---------+ +-----------+ +---------+ | 24 | // | Packet | | Template | | Data | | Options | | Data | | 25 | // | Header | | FlowSet | | FlowSet | ... | Template | | FlowSet | | 26 | // | | | | | | | FlowSet | | | | 27 | // | | +----------+ +---------+ +-----------+ +---------+ | 28 | // +--------+--------------------------------------------------------+ 29 | // 30 | // +--------+----------------------------------------------+ 31 | // | | +---------+ +---------+ +---------+ | 32 | // | Packet | | Data | ... | Data | ... | Data | | 33 | // | Header | | FlowSet | ... | FlowSet | ... | FlowSet | | 34 | // | | +---------+ +---------+ +---------+ | 35 | // +--------+----------------------------------------------+ 36 | // 37 | // +--------+-------------------------------------------------+ 38 | // | | +----------+ +----------+ +----------+ | 39 | // | Packet | | Template | | Template | | Options | | 40 | // | Header | | FlowSet | ... | FlowSet | ... | Template | | 41 | // | | | | | | | FlowSet | | 42 | // | | +----------+ +----------+ +----------+ | 43 | // +--------+-------------------------------------------------+ 44 | // 45 | // Example of struct hierarchy after packet decoding: 46 | // Package 47 | // | 48 | // +--TemplateFlowSet 49 | // | | 50 | // | +--TemplateRecord 51 | // | | | 52 | // | | +--Field 53 | // | | +--... 54 | // | | +--Field 55 | // | | 56 | // | +--... 57 | // | | 58 | // | +--TemplateRecord 59 | // | | 60 | // | +--Field 61 | // | +--... 62 | // | +--Field 63 | // | 64 | // +--DataFlowSet 65 | // | 66 | // +--... 67 | // | 68 | // +--OptionsTemplateFlowSet 69 | // | | 70 | // | +--OptionsTemplateRecord 71 | // | | | 72 | // | | +--Field (scope) 73 | // | | +--... (scope) 74 | // | | +--Field (scope) 75 | // | | | 76 | // | | +--Field (option) 77 | // | | +--... (option) 78 | // | | +--Field (option) 79 | // | | 80 | // | +--... 81 | // | | 82 | // | +--OptionsTemplateRecord 83 | // | | 84 | // | +--Field (scope) 85 | // | +--... (scope) 86 | // | +--Field (scope) 87 | // | | 88 | // | +--Field (option) 89 | // | +--... (option) 90 | // | +--Field (option) 91 | // | 92 | // +--DataFlowSet 93 | // 94 | // When matched with appropriate template Data FlowSet can be decoded to list of 95 | // Flow Data Records or list of Options Data Records. Struct hierarchy example: 96 | // 97 | // []FlowDataRecord 98 | // | 99 | // +--FlowDataRecord 100 | // | | 101 | // | +--[]byte 102 | // | +--... 103 | // | +--[]byte 104 | // | 105 | // +--... 106 | // | 107 | // +--FlowDataRecord 108 | // | 109 | // +--[]byte 110 | // +--... 111 | // +--[]byte 112 | // 113 | // []OptionsDataRecord 114 | // | 115 | // +--OptionsDataRecord 116 | // | | 117 | // | +--[]byte (scope) 118 | // | +--... (scope) 119 | // | +--[]byte (scope) 120 | // | | 121 | // | +--[]byte (option) 122 | // | +--... (option) 123 | // | +--[]byte (option) 124 | // | 125 | // +--... 126 | // | 127 | // +--OptionsDataRecord 128 | // | 129 | // +--[]byte 130 | // +--... 131 | // +--[]byte 132 | // | 133 | // +--[]byte (option) 134 | // +--... (option) 135 | // +--[]byte (option) 136 | // 137 | // Most of structure names and comments are taken directly from RFC 3954. 138 | // Reading the NetFlow v9 protocol specification is highly recommended before 139 | // using this package. 140 | package nf9 141 | 142 | import "unsafe" 143 | 144 | // Header is the NetFlow version 9 header 145 | type Header struct { 146 | // A 32-bit value that identifies the Exporter Observation Domain. 147 | SourceID uint32 148 | 149 | // Incremental sequence counter of all Export Packets sent from the 150 | // current Observation Domain by the Exporter. 151 | //SequenceNumber uint32 152 | SequenceNumber uint32 153 | 154 | // Time in seconds since 0000 UTC 1970, at which the Export Packet 155 | // leaves the Exporter. 156 | //UnixSecs uint32 157 | UnixSecs uint32 158 | 159 | // Time in milliseconds since this device was first booted. 160 | //SysUpTime uint32 161 | SysUpTime uint32 162 | 163 | // The total number of records in the Export Packet, which is the sum 164 | // of Options FlowSet records, Template FlowSet records, and Data 165 | // FlowSet records. 166 | //Count uint16 167 | Count uint16 168 | 169 | // Version of Flow Record format exported in this packet. The value of 170 | //this field is 9 for the current version. 171 | //Version uint16 172 | Version uint16 173 | } 174 | 175 | // FlowSet represents a FlowSet as described in RFC3954 176 | type FlowSet struct { 177 | Header *FlowSetHeader 178 | Flows []byte 179 | } 180 | 181 | // FlowSetHeader is a decoded representation of the header of a FlowSet 182 | type FlowSetHeader struct { 183 | Length uint16 184 | FlowSetID uint16 185 | } 186 | 187 | var sizeOfFlowSetHeader = unsafe.Sizeof(FlowSetHeader{}) 188 | 189 | // Packet is a decoded representation of a single NetFlow v9 UDP packet. 190 | type Packet struct { 191 | // A pointer to the packets headers 192 | Header *Header 193 | 194 | // A slice of pointers to FlowSet. Each element is instance of (Data)FlowSet 195 | // found in this packet 196 | FlowSets []*FlowSet 197 | 198 | // A slice of pointers to TemplateRecords. Each element is instance of TemplateRecords 199 | // representing a template found in this packet. 200 | Templates []*TemplateRecords 201 | 202 | // Buffer is a slice pointing to the original byte array that this packet was decoded from. 203 | // This field is only populated if debug level is at least 2 204 | Buffer []byte 205 | } 206 | 207 | var sizeOfHeader = unsafe.Sizeof(Header{}) 208 | 209 | // GetTemplateRecords generate a list of all Template Records in the packet. 210 | // Template Records can be used to decode Data FlowSets to Data Records. 211 | func (p *Packet) GetTemplateRecords() []*TemplateRecords { 212 | return p.Templates 213 | } 214 | 215 | // DataFlowSets generate a list of all Data FlowSets in the packet. If matched 216 | // with appropriate templates Data FlowSets can be decoded to Data Records or 217 | // Options Data Records. 218 | func (p *Packet) DataFlowSets() []*FlowSet { 219 | return p.FlowSets 220 | } 221 | -------------------------------------------------------------------------------- /ipfix/packet.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package ipfix provides structures and functions to decode and analyze 13 | // IPFIX packets. 14 | // 15 | // This package does only packet decoding in a single packet context. It keeps 16 | // no state when decoding multiple packets. As a result Data FlowSets can not be 17 | // decoded during initial packet decoding. To decode Data FlowSets user must 18 | // keep track of Template Records and Options Template Records manually. 19 | // 20 | // Examples of IPFIX packets: 21 | // 22 | // 1. An IPFIX Message consisting of interleaved Template, Data, and 23 | // Options Template Sets, as shown in Figure C. Here, Template and 24 | // Options Template Sets are transmitted "on demand", before the 25 | // first Data Set whose structure they define. 26 | // 27 | // +--------+--------------------------------------------------------+ 28 | // | | +----------+ +---------+ +-----------+ +---------+ | 29 | // |Message | | Template | | Data | | Options | | Data | | 30 | // | Header | | Set | | Set | ... | Template | | Set | | 31 | // | | | | | | | Set | | | | 32 | // | | +----------+ +---------+ +-----------+ +---------+ | 33 | // +--------+--------------------------------------------------------+ 34 | // 35 | // +--------+----------------------------------------------+ 36 | // | | +---------+ +---------+ +---------+ | 37 | // |Message | | Data | | Data | | Data | | 38 | // | Header | | Set | ... | Set | ... | Set | | 39 | // | | +---------+ +---------+ +---------+ | 40 | // +--------+----------------------------------------------+ 41 | // 42 | // Figure D: IPFIX Message: Example 2 43 | // 44 | // 3. An IPFIX Message consisting entirely of Template and Options 45 | // Template Sets, as shown in Figure E. Such a message can be used 46 | // to define or redefine Templates and Options Templates in bulk. 47 | // 48 | // +--------+-------------------------------------------------+ 49 | // | | +----------+ +----------+ +----------+ | 50 | // |Message | | Template | | Template | | Options | | 51 | // | Header | | Set | ... | Set | ... | Template | | 52 | // | | | | | | | Set | | 53 | // | | +----------+ +----------+ +----------+ | 54 | // +--------+-------------------------------------------------+ 55 | // 56 | // 57 | // Example of struct hierarchy after packet decoding: 58 | // Package 59 | // | 60 | // +--TemplateFlowSet 61 | // | | 62 | // | +--TemplateRecord 63 | // | | | 64 | // | | +--Field 65 | // | | +--... 66 | // | | +--Field 67 | // | | 68 | // | +--... 69 | // | | 70 | // | +--TemplateRecord 71 | // | | 72 | // | +--Field 73 | // | +--... 74 | // | +--Field 75 | // | 76 | // +--DataFlowSet 77 | // | 78 | // +--... 79 | // | 80 | // +--OptionsTemplateFlowSet 81 | // | | 82 | // | +--OptionsTemplateRecord 83 | // | | | 84 | // | | +--Field (scope) 85 | // | | +--... (scope) 86 | // | | +--Field (scope) 87 | // | | | 88 | // | | +--Field (option) 89 | // | | +--... (option) 90 | // | | +--Field (option) 91 | // | | 92 | // | +--... 93 | // | | 94 | // | +--OptionsTemplateRecord 95 | // | | 96 | // | +--Field (scope) 97 | // | +--... (scope) 98 | // | +--Field (scope) 99 | // | | 100 | // | +--Field (option) 101 | // | +--... (option) 102 | // | +--Field (option) 103 | // | 104 | // +--DataFlowSet 105 | // 106 | // When matched with appropriate template Data FlowSet can be decoded to list of 107 | // Flow Data Records or list of Options Data Records. Struct hierarchy example: 108 | // 109 | // []FlowDataRecord 110 | // | 111 | // +--FlowDataRecord 112 | // | | 113 | // | +--[]byte 114 | // | +--... 115 | // | +--[]byte 116 | // | 117 | // +--... 118 | // | 119 | // +--FlowDataRecord 120 | // | 121 | // +--[]byte 122 | // +--... 123 | // +--[]byte 124 | // 125 | // []OptionsDataRecord 126 | // | 127 | // +--OptionsDataRecord 128 | // | | 129 | // | +--[]byte (scope) 130 | // | +--... (scope) 131 | // | +--[]byte (scope) 132 | // | | 133 | // | +--[]byte (option) 134 | // | +--... (option) 135 | // | +--[]byte (option) 136 | // | 137 | // +--... 138 | // | 139 | // +--OptionsDataRecord 140 | // | 141 | // +--[]byte 142 | // +--... 143 | // +--[]byte 144 | // | 145 | // +--[]byte (option) 146 | // +--... (option) 147 | // +--[]byte (option) 148 | // 149 | // Most of structure names and comments are taken directly from RFC 7011. 150 | // Reading the IPFIX protocol specification is highly recommended before 151 | // using this package. 152 | package ipfix 153 | 154 | import "unsafe" 155 | 156 | // Header is an IPFIX message header 157 | type Header struct { 158 | // A 32-bit value that identifies the Exporter Observation Domain. 159 | DomainID uint32 160 | 161 | // Incremental sequence counter of all Export Packets sent from the 162 | // current Observation Domain by the Exporter. 163 | SequenceNumber uint32 164 | 165 | // Time in seconds since 0000 UTC 1970, at which the Export Packet 166 | // leaves the Exporter. 167 | ExportTime uint32 168 | 169 | // The total number of bytes in this Export Packet 170 | Length uint16 171 | 172 | // Version of Flow Record format exported in this packet. The value of 173 | //this field is 9 for the current version. 174 | Version uint16 175 | } 176 | 177 | // Set represents a Set as described in RFC7011 178 | type Set struct { 179 | Header *SetHeader 180 | Records []byte 181 | } 182 | 183 | // SetHeader is a decoded representation of the header of a Set 184 | type SetHeader struct { 185 | Length uint16 186 | SetID uint16 187 | } 188 | 189 | var sizeOfSetHeader = unsafe.Sizeof(SetHeader{}) 190 | 191 | // Packet is a decoded representation of a single NetFlow v9 UDP packet. 192 | type Packet struct { 193 | // A pointer to the packets headers 194 | Header *Header 195 | 196 | // A slice of pointers to FlowSet. Each element is instance of (Data)FlowSet 197 | // found in this packet 198 | FlowSets []*Set 199 | 200 | // A slice of pointers to TemplateRecords. Each element is instance of TemplateRecords 201 | // representing a template found in this packet. 202 | Templates []*TemplateRecords 203 | 204 | // Buffer is a slice pointing to the original byte array that this packet was decoded from. 205 | // This field is only populated if debug level is at least 2 206 | Buffer []byte 207 | } 208 | 209 | var sizeOfHeader = unsafe.Sizeof(Header{}) 210 | 211 | // GetTemplateRecords generate a list of all Template Records in the packet. 212 | // Template Records can be used to decode Data FlowSets to Data Records. 213 | func (p *Packet) GetTemplateRecords() []*TemplateRecords { 214 | return p.Templates 215 | } 216 | 217 | // DataFlowSets generate a list of all Data FlowSets in the packet. If matched 218 | // with appropriate templates Data FlowSets can be decoded to Data Records or 219 | // Options Data Records. 220 | func (p *Packet) DataFlowSets() []*Set { 221 | return p.FlowSets 222 | } 223 | -------------------------------------------------------------------------------- /tflow2.html: -------------------------------------------------------------------------------- 1 | 2 | 14 | 15 | tflow Netflow Analyzer 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 |

tflow Netflow Analyzer

26 |
27 |
28 | 29 | 33 |
34 |
35 |
36 | Netflow Query 37 |
38 | Filter 39 |
40 | 41 | 42 |
43 |
44 | 45 | 46 |
47 |
48 | 49 | 50 |
51 |
52 | 53 | 54 |
55 |
56 | 57 | 58 |
59 |
60 | 61 | 62 |
63 |
64 | 65 | 66 |
67 |
68 | 69 | 70 |
71 |
72 | 73 | 74 |
75 |
76 | 77 | 78 |
79 |
80 | 81 | 82 |
83 |
84 | 85 | 86 |
87 |
88 | 89 | 90 |
91 |
92 | 93 | 94 |
95 |
96 | 97 | 98 |
99 |
100 | 101 | 102 |
103 |
104 |
105 | Breakdown 106 |
107 | 108 | 109 |
110 |
111 | 112 | 113 |
114 |
115 | 116 | 117 |
118 |
119 | 120 | 121 |
122 |
123 | 124 | 125 |
126 |
127 | 128 | 129 |
130 |
131 | 132 | 133 |
134 |
135 | 136 | 137 |
138 |
139 | 140 | 141 |
142 |
143 | 144 | 145 |
146 |
147 | 148 | 149 |
150 |
151 | 152 | 153 |
154 |
155 | 156 | 157 |
158 |
159 |
160 | 161 | 162 |
163 |
164 | 165 |
166 |
167 | 168 | -------------------------------------------------------------------------------- /tflow2.js: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | var query; 13 | var protocols; 14 | var availableProtocols = []; 15 | var rtrs; 16 | var routers = []; 17 | var interfaces = []; 18 | const OpEqual = 0; 19 | const OpUnequal = 1; 20 | const OpSmaller = 2; 21 | const OpGreater = 3; 22 | const FieldTimestamp = 0; 23 | const FieldRouter = 1; 24 | const FieldSrcAddr = 2; 25 | const FieldDstAddr = 3; 26 | const FieldProtocol = 4; 27 | const FieldIntIn = 5; 28 | const FieldIntOut = 6; 29 | const FieldNextHop = 7; 30 | const FieldSrcAs = 8; 31 | const FieldDstAs = 9; 32 | const FieldNextHopAs = 10; 33 | const FieldSrcPfx = 11; 34 | const FieldDstPfx = 12; 35 | const FieldSrcPort = 13; 36 | const FieldDstPort = 14; 37 | const fields = { 38 | "Router": 1, 39 | "SrcAddr": 2, 40 | "DstAddr": 3, 41 | "Protocol": 4, 42 | "IntIn": 5, 43 | "IntOut": 6, 44 | "NextHop": 7, 45 | "SrcAsn": 8, 46 | "DstAsn": 9, 47 | "NextHopAsn": 10, 48 | "SrcPfx": 11, 49 | "DstPfx": 12, 50 | "SrcPort": 13, 51 | "DstPort": 14, 52 | }; 53 | const fieldById = { 54 | "1": "Router", 55 | "2": "SrcAddr", 56 | "3": "DstAddr", 57 | "4": "Protocol", 58 | "5": "IntIn", 59 | "6": "IntOut", 60 | "7": "NextHop", 61 | "8": "SrcAsn", 62 | "9": "DstAsn", 63 | "10": "NextHopAsn", 64 | "11": "SrcPfx", 65 | "12": "DstPfx", 66 | "13": "SrcPort", 67 | "14": "DstPort" 68 | }; 69 | 70 | var bdfields = [ 71 | "SrcAddr", "DstAddr", "Protocol", "IntIn", "IntOut", "NextHop", "SrcAsn", "DstAsn", 72 | "NextHopAsn", "SrcPfx", "DstPfx", "SrcPort", "DstPort" ]; 73 | 74 | function drawChart() { 75 | var query = $("#query").val(); 76 | if (query == "" || query == "{}") { 77 | return; 78 | } 79 | 80 | var url = "/query?q=" + encodeURI(query) 81 | console.log(url); 82 | $.get(url, function(rdata) { 83 | console.log(rdata); 84 | d = []; 85 | d = JSON.parse(rdata); 86 | data = google.visualization.arrayToDataTable(d); 87 | 88 | var options = { 89 | isStacked: true, 90 | title: 'NetFlow bps of top flows', 91 | hAxis: { 92 | title: 'Time', 93 | titleTextStyle: { 94 | color: '#333' 95 | } 96 | }, 97 | vAxis: { 98 | minValue: 0 99 | } 100 | }; 101 | 102 | var chart = new google.visualization.AreaChart(document.getElementById('chart_div')); 103 | chart.draw(data, options); 104 | }); 105 | } 106 | 107 | function populateForm() { 108 | var q = $("#query").val(); 109 | if (q == "" || q == "{}") { 110 | return; 111 | } 112 | 113 | q = JSON.parse(q); 114 | $("#topn").val(q.TopN); 115 | for (var c in q['Cond']) { 116 | var fieldNum = q['Cond'][c]['Field']; 117 | var fieldName = fieldById[fieldNum]; 118 | var operand = q['Cond'][c]['Operand']; 119 | if (fieldNum == FieldRouter) { 120 | operand = getRouterById(operand); 121 | if (operand == null) { 122 | return; 123 | } 124 | } else if (fieldNum == FieldIntIn || fieldNum == FieldIntOut) { 125 | operand = getInterfaceById($("#Router").val(), operand); 126 | if (operand == null) { 127 | return; 128 | } 129 | } else if (fieldNum == FieldProtocol) { 130 | operand = protocols[operand]; 131 | if (operand == null) { 132 | return; 133 | } 134 | } 135 | 136 | $("#" + fieldName).val(operand); 137 | } 138 | loadInterfaceOptions(); 139 | 140 | for (var f in q['Breakdown']) { 141 | $("#bd" + f).prop( "checked", true ); 142 | } 143 | } 144 | 145 | function loadInterfaceOptions() { 146 | var rtr = $("#Router").val(); 147 | interfaces = []; 148 | if (!rtrs[rtr]) { 149 | return; 150 | } 151 | for (var k in rtrs[rtr]["interfaces"]) { 152 | interfaces.push(rtrs[rtr]["interfaces"][k]); 153 | } 154 | 155 | $("#IntIn").autocomplete({ 156 | source: interfaces 157 | }); 158 | 159 | $("#IntOut").autocomplete({ 160 | source: interfaces 161 | }); 162 | } 163 | 164 | function loadProtocols() { 165 | return $.get("/protocols", function(rdata) { 166 | protocols = JSON.parse(rdata); 167 | for (var k in protocols) { 168 | availableProtocols.push(protocols[k]); 169 | } 170 | 171 | $("#Protocol").autocomplete({ 172 | source: availableProtocols 173 | }); 174 | }); 175 | } 176 | 177 | function loadRouters() { 178 | return $.get("/routers", function(rdata) { 179 | rtrs = JSON.parse(rdata); 180 | for (var k in rtrs) { 181 | routers.push(k); 182 | } 183 | 184 | $("#Router").autocomplete({ 185 | source: routers, 186 | change: function() { 187 | loadInterfaceOptions(); 188 | } 189 | }); 190 | }); 191 | } 192 | 193 | $(document).ready(function() { 194 | var start = new Date(((new Date() / 1000) - 900)* 1000).toISOString().substr(0, 16) 195 | if ($("#TimeStart").val() == "") { 196 | $("#TimeStart").val(start); 197 | } 198 | 199 | var end = new Date().toISOString().substr(0, 16) 200 | if ($("#TimeEnd").val() == "") { 201 | $("#TimeEnd").val(end); 202 | } 203 | 204 | $.when(loadRouters(), loadProtocols()).done(function() { 205 | $("#Router").on('input', function() { 206 | loadInterfaceOptions(); 207 | }) 208 | populateForm(); 209 | }) 210 | 211 | $("#submit").on('click', submitQuery); 212 | 213 | google.charts.load('current', { 214 | 'packages': ['corechart'] 215 | }); 216 | google.charts.setOnLoadCallback(drawChart); 217 | }); 218 | 219 | function getProtocolId(name) { 220 | for (var k in protocols) { 221 | if (protocols[k] == name) { 222 | return k; 223 | } 224 | } 225 | return null; 226 | } 227 | 228 | function getIntId(rtr, name) { 229 | if (!rtrs[rtr]) { 230 | return null; 231 | } 232 | for (var k in rtrs[rtr]['interfaces']) { 233 | if (rtrs[rtr]['interfaces'][k] == name) { 234 | return k; 235 | } 236 | } 237 | return null; 238 | } 239 | 240 | function getRouterById(id) { 241 | for (var k in rtrs) { 242 | if (rtrs[k]['id'] == id) { 243 | return k; 244 | } 245 | } 246 | return null; 247 | } 248 | 249 | function getInterfaceById(router, id) { 250 | return rtrs[router]['interfaces'][id]; 251 | } 252 | 253 | function submitQuery() { 254 | var query = { 255 | Cond: [], 256 | Breakdown: {}, 257 | TopN: parseInt($("#topn").val()) 258 | }; 259 | 260 | console.log($("#TimeStart").val()); 261 | var start = new Date($("#TimeStart").val()); 262 | var end = new Date($("#TimeEnd").val()); 263 | start = Math.round(start.getTime() / 1000); 264 | end = Math.round(end.getTime() / 1000); 265 | query['Cond'].push({ 266 | Field: FieldTimestamp, 267 | Operator: OpGreater, 268 | Operand: start + "" 269 | }); 270 | query['Cond'].push({ 271 | Field: FieldTimestamp, 272 | Operator: OpSmaller, 273 | Operand: end + "" 274 | }); 275 | 276 | for (var k in fields) { 277 | tmp = $("#" + k).val(); 278 | if (tmp == "") { 279 | continue; 280 | } 281 | if (k == "Router") { 282 | tmp = rtrs[tmp]['id']; 283 | } else if (k == "IntIn" || k == "IntOut") { 284 | tmp = getIntId($("#Router").val(), tmp) 285 | if (tmp == null) { 286 | return; 287 | } 288 | } else if (k == "Protocol") { 289 | tmp = getProtocolId(tmp); 290 | if (tmp == null) { 291 | return; 292 | } 293 | } 294 | query['Cond'].push({ 295 | Field: fields[k], 296 | Operator: OpEqual, 297 | Operand: tmp + "" 298 | }); 299 | } 300 | 301 | for (var i = 0; i < bdfields.length; i++) { 302 | if (!$("#bd" + bdfields[i]).prop('checked')) { 303 | continue; 304 | } 305 | query['Breakdown'][bdfields[i]] = true; 306 | } 307 | 308 | console.log(query); 309 | $("#query").val(JSON.stringify(query)); 310 | $("#form").submit(); 311 | } -------------------------------------------------------------------------------- /annotator/bird/bird.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package bird can lookup IP prefixes and autonomous system numbers and 13 | // add them to flows in case the routers implementation doesn't support this, e.g. ipt-NETFLOW 14 | package bird 15 | 16 | import ( 17 | "fmt" 18 | "net" 19 | "strconv" 20 | "strings" 21 | "sync" 22 | "sync/atomic" 23 | 24 | "github.com/golang/glog" 25 | "github.com/google/tflow2/netflow" 26 | "github.com/google/tflow2/stats" 27 | ) 28 | 29 | // QueryResult carries all useful information we extracted from a BIRD querys result 30 | type QueryResult struct { 31 | // Pfx is the prefix that is being used to forward packets for the IP 32 | // address from the query 33 | Pfx net.IPNet 34 | 35 | // As is the ASN that the subject IP is announced by 36 | AS uint32 37 | 38 | // NhAs is the ASN of the subject IPs associated Next Hop 39 | NHAS uint32 40 | } 41 | 42 | // QueryCache represents a set of QueryResults that have been cached 43 | type QueryCache struct { 44 | cache map[string]QueryResult 45 | lock sync.RWMutex 46 | } 47 | 48 | // birdCon represents a connection to a BIRD instance 49 | type birdCon struct { 50 | sock string 51 | con net.Conn 52 | recon chan bool 53 | lock sync.RWMutex 54 | } 55 | 56 | // Annotator represents a BIRD based BGP annotator 57 | type Annotator struct { 58 | queryC chan string 59 | resC chan *QueryResult 60 | 61 | // cache is used to cache query results 62 | cache *QueryCache 63 | 64 | // connection to BIRD 65 | bird4 *birdCon 66 | 67 | // connectio to BIRD6 68 | bird6 *birdCon 69 | 70 | // debug level 71 | debug int 72 | } 73 | 74 | // NewAnnotator creates a new BIRD annotator and get's service started 75 | func NewAnnotator(sock string, sock6 string, debug int) *Annotator { 76 | a := &Annotator{ 77 | cache: newQueryCache(), 78 | queryC: make(chan string), 79 | resC: make(chan *QueryResult), 80 | debug: debug, 81 | } 82 | 83 | var wg sync.WaitGroup 84 | 85 | wg.Add(1) 86 | go func() { 87 | defer wg.Done() 88 | a.bird4 = newBirdCon(sock) 89 | }() 90 | 91 | wg.Add(1) 92 | go func() { 93 | defer wg.Done() 94 | a.bird6 = newBirdCon(sock6) 95 | }() 96 | 97 | wg.Wait() 98 | go a.gateway() 99 | 100 | return a 101 | } 102 | 103 | // getConn gets the net.Conn property of the BIRD connection 104 | func (c *birdCon) getConn() *net.Conn { 105 | return &c.con 106 | } 107 | 108 | // newQueryCache creates and initializes a new `QueryCache` 109 | func newQueryCache() *QueryCache { 110 | return &QueryCache{cache: make(map[string]QueryResult)} 111 | } 112 | 113 | // reconnector receives a signal via channel that triggers a connection attempt to BIRD 114 | func (c *birdCon) reconnector() { 115 | for { 116 | // wait for signal of a closed connection 117 | <-c.recon 118 | 119 | // try to connect up to 5 times 120 | for i := 0; i < 5; i++ { 121 | tmpCon, err := net.Dial("unix", c.sock) 122 | if err != nil { 123 | glog.Warningf("Unable to connect to BIRD on %s: %v", c.sock, err) 124 | continue 125 | } 126 | 127 | // Read welcome message we are not interested in 128 | buf := make([]byte, 1024) 129 | nbytes, err := tmpCon.Read(buf[:]) 130 | if err != nil || nbytes == 0 { 131 | if err == nil { 132 | tmpCon.Close() 133 | } 134 | glog.Warning("Reading from BIRD failed: %v", err) 135 | continue 136 | } 137 | 138 | c.lock.Lock() 139 | c.con = tmpCon 140 | c.lock.Unlock() 141 | break 142 | } 143 | } 144 | } 145 | 146 | // Get tries to receive an entry from QueryCache `qc` 147 | func (qc *QueryCache) Get(addr []byte) *QueryResult { 148 | qc.lock.RLock() 149 | defer qc.lock.RUnlock() 150 | 151 | res, ok := qc.cache[net.IP(addr).String()] 152 | if !ok { 153 | atomic.AddUint64(&stats.GlobalStats.BirdCacheMiss, 1) 154 | return nil 155 | } 156 | atomic.AddUint64(&stats.GlobalStats.BirdCacheHits, 1) 157 | return &res 158 | } 159 | 160 | // Set sets data for `addr` in QueryCache `qc` to `qres` 161 | func (qc *QueryCache) Set(addr []byte, qres *QueryResult) { 162 | qc.lock.Lock() 163 | defer qc.lock.Unlock() 164 | qc.cache[net.IP(addr).String()] = *qres 165 | } 166 | 167 | // newBirdCon creates a birdCon to socket `s` 168 | func newBirdCon(s string) *birdCon { 169 | b := &birdCon{ 170 | sock: s, 171 | recon: make(chan bool), 172 | } 173 | go b.reconnector() 174 | b.recon <- true 175 | return b 176 | } 177 | 178 | // Augment function provides the main interface to the external world to consume service of this module 179 | func (a *Annotator) Augment(fl *netflow.Flow) { 180 | srcRes := a.cache.Get(fl.SrcAddr) 181 | if srcRes == nil { 182 | srcRes = a.query(net.IP(fl.Router), fl.SrcAddr) 183 | a.cache.Set(fl.SrcAddr, srcRes) 184 | } 185 | 186 | dstRes := a.cache.Get(fl.DstAddr) 187 | if dstRes == nil { 188 | dstRes = a.query(net.IP(fl.Router), fl.DstAddr) 189 | a.cache.Set(fl.DstAddr, dstRes) 190 | } 191 | 192 | fl.SrcPfx = &netflow.Pfx{} 193 | fl.SrcPfx.IP = srcRes.Pfx.IP 194 | fl.SrcPfx.Mask = srcRes.Pfx.Mask 195 | 196 | fl.DstPfx = &netflow.Pfx{} 197 | fl.DstPfx.IP = dstRes.Pfx.IP 198 | fl.DstPfx.Mask = dstRes.Pfx.Mask 199 | 200 | fl.SrcAs = srcRes.AS 201 | fl.DstAs = dstRes.AS 202 | fl.NextHopAs = dstRes.NHAS 203 | } 204 | 205 | // query forms a query, sends it to the processing engine, reads the result and returns it 206 | func (a *Annotator) query(rtr net.IP, addr net.IP) *QueryResult { 207 | query := fmt.Sprintf("show route all for %s protocol nf_%s\n", addr.String(), strings.Replace(rtr.String(), ".", "_", -1)) 208 | a.queryC <- query 209 | return <-a.resC 210 | } 211 | 212 | // gateway starts the main service routine 213 | func (a *Annotator) gateway() { 214 | query := "" 215 | 216 | buf := make([]byte, 1024) 217 | for { 218 | var res QueryResult 219 | query = <-a.queryC 220 | if query == "" { 221 | continue 222 | } 223 | data := []byte(query) 224 | 225 | // Determine if we are being queried for an IPv4 or an IPv6 address 226 | bird := a.bird4 227 | if strings.Contains(query, ":") { 228 | bird = a.bird6 229 | } 230 | 231 | // Skip annotation if we're not connected to bird yet 232 | bird.lock.RLock() 233 | if bird.con == nil { 234 | glog.Warningf("skipped annotating flow: BIRD is not connected yet") 235 | bird.lock.RUnlock() 236 | a.resC <- &res 237 | continue 238 | } 239 | 240 | // Send query to BIRD 241 | _, err := bird.con.Write(data) 242 | if err != nil { 243 | bird.lock.RUnlock() 244 | glog.Errorf("Unable to write to BIRD: %v", err) 245 | bird.recon <- true 246 | continue 247 | } 248 | bird.lock.RUnlock() 249 | 250 | // Read reply from BIRD 251 | n, err := bird.con.Read(buf[:]) 252 | if err != nil { 253 | bird.lock.RUnlock() 254 | glog.Errorf("unable to read from BIRD: %v", err) 255 | bird.recon <- true 256 | continue 257 | } 258 | 259 | // Parse BIRDs output 260 | output := string(buf[:n]) 261 | lines := strings.Split(output, "\n") 262 | for i, line := range lines { 263 | // Take the first line as that should contain the prefix 264 | if i == 0 { 265 | parts := strings.Split(line, " ") 266 | if len(parts) == 0 { 267 | glog.Warningf("unexpected empty output for query '%v'", query) 268 | continue 269 | } 270 | pfx := parts[0] 271 | parts = strings.Split(pfx, "-") 272 | if len(parts) != 2 { 273 | glog.Warningf("unexpected split results for query '%v'", query) 274 | continue 275 | } 276 | pfx = parts[1] 277 | 278 | _, tmpNet, err := net.ParseCIDR(pfx) 279 | res.Pfx = *tmpNet 280 | if err != nil { 281 | glog.Warningf("unable to parse CIDR from BIRD: %v (query '%v')", err, query) 282 | continue 283 | } 284 | continue 285 | } 286 | 287 | // Find line that contains the AS Path 288 | if strings.Contains(line, "BGP.as_path: ") { 289 | // Remove curly braces from BIRD AS path (ignores aggregators), e.g. BGP.as_path: 25291 3320 20940 { 16625 } 290 | line = strings.Replace(line, "{ ", "", -1) 291 | line = strings.Replace(line, " }", "", -1) 292 | 293 | parts := strings.Split(line, "BGP.as_path: ") 294 | pathParts := strings.Split(parts[1], " ") 295 | 296 | if len(parts) < 2 || parts[1] == "" { 297 | break 298 | } 299 | 300 | AS, err := strconv.ParseUint(pathParts[len(pathParts)-1], 10, 32) 301 | if err != nil { 302 | glog.Warningf("unable to parse ASN") 303 | } 304 | 305 | NHAS, err := strconv.ParseUint(pathParts[0], 10, 32) 306 | if err != nil { 307 | glog.Warningf("unable to parse next hop ASN") 308 | } 309 | 310 | res.AS = uint32(AS) 311 | res.NHAS = uint32(NHAS) 312 | break 313 | } 314 | } 315 | if res.AS == 0 && a.debug > 2{ 316 | glog.Warningf("unable to find AS path for '%v'", query) 317 | } 318 | a.resC <- &res 319 | } 320 | } 321 | -------------------------------------------------------------------------------- /nfserver/nfserver.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package nfserver provides netflow collection services via UDP and passes flows into annotator layer 13 | package nfserver 14 | 15 | import ( 16 | "fmt" 17 | "net" 18 | "strconv" 19 | "strings" 20 | "sync/atomic" 21 | 22 | "github.com/golang/glog" 23 | "github.com/google/tflow2/convert" 24 | "github.com/google/tflow2/netflow" 25 | "github.com/google/tflow2/nf9" 26 | "github.com/google/tflow2/stats" 27 | ) 28 | 29 | // fieldMap describes what information is at what index in the slice 30 | // that we get from decoding a netflow packet 31 | type fieldMap struct { 32 | srcAddr int 33 | dstAddr int 34 | protocol int 35 | packets int 36 | size int 37 | intIn int 38 | intOut int 39 | nextHop int 40 | family int 41 | vlan int 42 | ts int 43 | srcAsn int 44 | dstAsn int 45 | srcPort int 46 | dstPort int 47 | } 48 | 49 | // NetflowServer represents a Netflow Collector instance 50 | type NetflowServer struct { 51 | // tmplCache is used to save received flow templates 52 | // for later lookup in order to decode netflow packets 53 | tmplCache *templateCache 54 | 55 | // receiver is the channel used to receive flows from the annotator layer 56 | Output chan *netflow.Flow 57 | 58 | // debug defines the debug level 59 | debug int 60 | 61 | // bgpAugment is used to decide if ASN information from netflow packets should be used 62 | bgpAugment bool 63 | } 64 | 65 | // New creates and starts a new `NetflowServer` instance 66 | func New(listenAddr string, numReaders int, bgpAugment bool, debug int) *NetflowServer { 67 | nfs := &NetflowServer{ 68 | debug: debug, 69 | tmplCache: newTemplateCache(), 70 | Output: make(chan *netflow.Flow), 71 | bgpAugment: bgpAugment, 72 | } 73 | 74 | addr, err := net.ResolveUDPAddr("udp", listenAddr) 75 | if err != nil { 76 | panic(fmt.Sprintf("ResolveUDPAddr: %v", err)) 77 | } 78 | 79 | con, err := net.ListenUDP("udp", addr) 80 | if err != nil { 81 | panic(fmt.Sprintf("Listen: %v", err)) 82 | } 83 | 84 | // Create goroutines that read netflow packet and process it 85 | for i := 0; i < numReaders; i++ { 86 | go func(num int) { 87 | nfs.packetWorker(num, con) 88 | }(i) 89 | } 90 | 91 | return nfs 92 | } 93 | 94 | // packetWorker reads netflow packet from socket and handsoff processing to processFlowSets() 95 | func (nfs *NetflowServer) packetWorker(identity int, conn *net.UDPConn) { 96 | buffer := make([]byte, 8960) 97 | for { 98 | length, remote, err := conn.ReadFromUDP(buffer) 99 | if err != nil { 100 | glog.Errorf("Error reading from socket: %v", err) 101 | continue 102 | } 103 | atomic.AddUint64(&stats.GlobalStats.Netflow9packets, 1) 104 | atomic.AddUint64(&stats.GlobalStats.Netflow9bytes, uint64(length)) 105 | 106 | remote.IP = remote.IP.To4() 107 | if remote.IP == nil { 108 | glog.Errorf("Received IPv6 packet. Dropped.") 109 | continue 110 | } 111 | 112 | nfs.processPacket(remote.IP, buffer[:length]) 113 | } 114 | } 115 | 116 | // processPacket takes a raw netflow packet, send it to the decoder, updates template cache 117 | // (if there are templates in the packet) and passes the decoded packet over to processFlowSets() 118 | func (nfs *NetflowServer) processPacket(remote net.IP, buffer []byte) { 119 | length := len(buffer) 120 | packet, err := nf9.Decode(buffer[:length], remote) 121 | if err != nil { 122 | glog.Errorf("nf9packet.Decode: %v", err) 123 | return 124 | } 125 | 126 | nfs.updateTemplateCache(remote, packet) 127 | nfs.processFlowSets(remote, packet.Header.SourceID, packet.DataFlowSets(), int64(packet.Header.UnixSecs), packet) 128 | } 129 | 130 | // processFlowSets iterates over flowSets and calls processFlowSet() for each flow set 131 | func (nfs *NetflowServer) processFlowSets(remote net.IP, sourceID uint32, flowSets []*nf9.FlowSet, ts int64, packet *nf9.Packet) { 132 | addr := remote.String() 133 | keyParts := make([]string, 3, 3) 134 | for _, set := range flowSets { 135 | template := nfs.tmplCache.get(convert.Uint32(remote), sourceID, set.Header.FlowSetID) 136 | 137 | if template == nil { 138 | templateKey := makeTemplateKey(addr, sourceID, set.Header.FlowSetID, keyParts) 139 | if nfs.debug > 0 { 140 | glog.Warningf("Template for given FlowSet not found: %s", templateKey) 141 | } 142 | continue 143 | } 144 | 145 | records := template.DecodeFlowSet(*set) 146 | if records == nil { 147 | glog.Warning("Error decoding FlowSet") 148 | continue 149 | } 150 | nfs.processFlowSet(template, records, remote, ts, packet) 151 | } 152 | } 153 | 154 | // process generates Flow elements from records and pushes them into the `receiver` channel 155 | func (nfs *NetflowServer) processFlowSet(template *nf9.TemplateRecords, records []nf9.FlowDataRecord, agent net.IP, ts int64, packet *nf9.Packet) { 156 | fm := generateFieldMap(template) 157 | 158 | for _, r := range records { 159 | if fm.family == 4 { 160 | atomic.AddUint64(&stats.GlobalStats.Flows4, 1) 161 | } else if fm.family == 6 { 162 | atomic.AddUint64(&stats.GlobalStats.Flows6, 1) 163 | } else { 164 | glog.Warning("Unknown address family") 165 | continue 166 | } 167 | 168 | var fl netflow.Flow 169 | fl.Router = agent 170 | fl.Timestamp = ts 171 | fl.Family = uint32(fm.family) 172 | fl.Packets = convert.Uint32(r.Values[fm.packets]) 173 | fl.Size = uint64(convert.Uint32(r.Values[fm.size])) 174 | fl.Protocol = convert.Uint32(r.Values[fm.protocol]) 175 | fl.IntIn = convert.Uint32(r.Values[fm.intIn]) 176 | fl.IntOut = convert.Uint32(r.Values[fm.intOut]) 177 | fl.SrcPort = convert.Uint32(r.Values[fm.srcPort]) 178 | fl.DstPort = convert.Uint32(r.Values[fm.dstPort]) 179 | fl.SrcAddr = convert.Reverse(r.Values[fm.srcAddr]) 180 | fl.DstAddr = convert.Reverse(r.Values[fm.dstAddr]) 181 | fl.NextHop = convert.Reverse(r.Values[fm.nextHop]) 182 | 183 | if !nfs.bgpAugment { 184 | fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) 185 | fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) 186 | } 187 | 188 | if nfs.debug > 2 { 189 | Dump(&fl) 190 | } 191 | 192 | nfs.Output <- &fl 193 | } 194 | } 195 | 196 | // Dump dumps a flow on the screen 197 | func Dump(fl *netflow.Flow) { 198 | fmt.Printf("--------------------------------\n") 199 | fmt.Printf("Flow dump:\n") 200 | fmt.Printf("Router: %d\n", fl.Router) 201 | fmt.Printf("Family: %d\n", fl.Family) 202 | fmt.Printf("SrcAddr: %s\n", net.IP(fl.SrcAddr).String()) 203 | fmt.Printf("DstAddr: %s\n", net.IP(fl.DstAddr).String()) 204 | fmt.Printf("Protocol: %d\n", fl.Protocol) 205 | fmt.Printf("NextHop: %s\n", net.IP(fl.NextHop).String()) 206 | fmt.Printf("IntIn: %d\n", fl.IntIn) 207 | fmt.Printf("IntOut: %d\n", fl.IntOut) 208 | fmt.Printf("Packets: %d\n", fl.Packets) 209 | fmt.Printf("Bytes: %d\n", fl.Size) 210 | fmt.Printf("--------------------------------\n") 211 | } 212 | 213 | // DumpTemplate dumps a template on the screen 214 | func DumpTemplate(tmpl *nf9.TemplateRecords) { 215 | fmt.Printf("Template %d\n", tmpl.Header.TemplateID) 216 | for rec, i := range tmpl.Records { 217 | fmt.Printf("%d: %v\n", i, rec) 218 | } 219 | } 220 | 221 | // generateFieldMap processes a TemplateRecord and populates a fieldMap accordingly 222 | // the FieldMap can then be used to read fields from a flow 223 | func generateFieldMap(template *nf9.TemplateRecords) *fieldMap { 224 | var fm fieldMap 225 | i := -1 226 | for _, f := range template.Records { 227 | i++ 228 | 229 | switch f.Type { 230 | case nf9.IPv4SrcAddr: 231 | fm.srcAddr = i 232 | fm.family = 4 233 | case nf9.IPv6SrcAddr: 234 | fm.srcAddr = i 235 | fm.family = 6 236 | case nf9.IPv4DstAddr: 237 | fm.dstAddr = i 238 | case nf9.IPv6DstAddr: 239 | fm.dstAddr = i 240 | case nf9.InBytes: 241 | fm.size = i 242 | case nf9.Protocol: 243 | fm.protocol = i 244 | case nf9.InPkts: 245 | fm.packets = i 246 | case nf9.InputSnmp: 247 | fm.intIn = i 248 | case nf9.OutputSnmp: 249 | fm.intOut = i 250 | case nf9.IPv4NextHop: 251 | fm.nextHop = i 252 | case nf9.IPv6NextHop: 253 | fm.nextHop = i 254 | case nf9.L4SrcPort: 255 | fm.srcPort = i 256 | case nf9.L4DstPort: 257 | fm.dstPort = i 258 | case nf9.SrcAs: 259 | fm.srcAsn = i 260 | case nf9.DstAs: 261 | fm.dstAsn = i 262 | } 263 | } 264 | return &fm 265 | } 266 | 267 | // updateTemplateCache updates the template cache 268 | func (nfs *NetflowServer) updateTemplateCache(remote net.IP, p *nf9.Packet) { 269 | templRecs := p.GetTemplateRecords() 270 | for _, tr := range templRecs { 271 | nfs.tmplCache.set(convert.Uint32(remote), tr.Packet.Header.SourceID, tr.Header.TemplateID, *tr) 272 | } 273 | } 274 | 275 | // makeTemplateKey creates a string of the 3 tuple router address, source id and template id 276 | func makeTemplateKey(addr string, sourceID uint32, templateID uint16, keyParts []string) string { 277 | keyParts[0] = addr 278 | keyParts[1] = strconv.Itoa(int(sourceID)) 279 | keyParts[2] = strconv.Itoa(int(templateID)) 280 | return strings.Join(keyParts, "|") 281 | } 282 | -------------------------------------------------------------------------------- /ifserver/ifserver.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package ifserver provides IPFIX collection services via UDP and passes flows into annotator layer 13 | package ifserver 14 | 15 | import ( 16 | "fmt" 17 | "net" 18 | "strconv" 19 | "strings" 20 | "sync/atomic" 21 | 22 | "github.com/golang/glog" 23 | "github.com/google/tflow2/convert" 24 | "github.com/google/tflow2/ipfix" 25 | "github.com/google/tflow2/netflow" 26 | "github.com/google/tflow2/stats" 27 | ) 28 | 29 | // fieldMap describes what information is at what index in the slice 30 | // that we get from decoding a netflow packet 31 | type fieldMap struct { 32 | srcAddr int 33 | dstAddr int 34 | protocol int 35 | packets int 36 | size int 37 | intIn int 38 | intOut int 39 | nextHop int 40 | family int 41 | vlan int 42 | ts int 43 | srcAsn int 44 | dstAsn int 45 | srcPort int 46 | dstPort int 47 | } 48 | 49 | // IPFIXServer represents a Netflow Collector instance 50 | type IPFIXServer struct { 51 | // tmplCache is used to save received flow templates 52 | // for later lookup in order to decode netflow packets 53 | tmplCache *templateCache 54 | 55 | // receiver is the channel used to receive flows from the annotator layer 56 | Output chan *netflow.Flow 57 | 58 | // debug defines the debug level 59 | debug int 60 | 61 | // bgpAugment is used to decide if ASN information from netflow packets should be used 62 | bgpAugment bool 63 | } 64 | 65 | // New creates and starts a new `NetflowServer` instance 66 | func New(listenAddr string, numReaders int, bgpAugment bool, debug int) *IPFIXServer { 67 | ifs := &IPFIXServer{ 68 | debug: debug, 69 | tmplCache: newTemplateCache(), 70 | Output: make(chan *netflow.Flow), 71 | bgpAugment: bgpAugment, 72 | } 73 | 74 | addr, err := net.ResolveUDPAddr("udp", listenAddr) 75 | if err != nil { 76 | panic(fmt.Sprintf("ResolveUDPAddr: %v", err)) 77 | } 78 | 79 | con, err := net.ListenUDP("udp", addr) 80 | if err != nil { 81 | panic(fmt.Sprintf("Listen: %v", err)) 82 | } 83 | 84 | // Create goroutines that read netflow packet and process it 85 | for i := 0; i < numReaders; i++ { 86 | go func(num int) { 87 | ifs.packetWorker(num, con) 88 | }(i) 89 | } 90 | 91 | return ifs 92 | } 93 | 94 | // packetWorker reads netflow packet from socket and handsoff processing to processFlowSets() 95 | func (ifs *IPFIXServer) packetWorker(identity int, conn *net.UDPConn) { 96 | buffer := make([]byte, 8960) 97 | for { 98 | length, remote, err := conn.ReadFromUDP(buffer) 99 | if err != nil { 100 | glog.Errorf("Error reading from socket: %v", err) 101 | continue 102 | } 103 | atomic.AddUint64(&stats.GlobalStats.IPFIXpackets, 1) 104 | atomic.AddUint64(&stats.GlobalStats.IPFIXbytes, uint64(length)) 105 | 106 | remote.IP = remote.IP.To4() 107 | if remote.IP == nil { 108 | glog.Errorf("Received IPv6 packet. Dropped.") 109 | continue 110 | } 111 | 112 | ifs.processPacket(remote.IP, buffer[:length]) 113 | } 114 | } 115 | 116 | // processPacket takes a raw netflow packet, send it to the decoder, updates template cache 117 | // (if there are templates in the packet) and passes the decoded packet over to processFlowSets() 118 | func (ifs *IPFIXServer) processPacket(remote net.IP, buffer []byte) { 119 | length := len(buffer) 120 | packet, err := ipfix.Decode(buffer[:length], remote) 121 | if err != nil { 122 | glog.Errorf("ipfix.Decode: %v", err) 123 | return 124 | } 125 | 126 | ifs.updateTemplateCache(remote, packet) 127 | ifs.processFlowSets(remote, packet.Header.DomainID, packet.DataFlowSets(), int64(packet.Header.ExportTime), packet) 128 | } 129 | 130 | // processFlowSets iterates over flowSets and calls processFlowSet() for each flow set 131 | func (ifs *IPFIXServer) processFlowSets(remote net.IP, domainID uint32, flowSets []*ipfix.Set, ts int64, packet *ipfix.Packet) { 132 | addr := remote.String() 133 | keyParts := make([]string, 3, 3) 134 | for _, set := range flowSets { 135 | template := ifs.tmplCache.get(convert.Uint32(remote), domainID, set.Header.SetID) 136 | 137 | if template == nil { 138 | templateKey := makeTemplateKey(addr, domainID, set.Header.SetID, keyParts) 139 | if ifs.debug > 0 { 140 | glog.Warningf("Template for given FlowSet not found: %s", templateKey) 141 | } 142 | continue 143 | } 144 | 145 | records := template.DecodeFlowSet(*set) 146 | if records == nil { 147 | glog.Warning("Error decoding FlowSet") 148 | continue 149 | } 150 | ifs.processFlowSet(template, records, remote, ts, packet) 151 | } 152 | } 153 | 154 | // process generates Flow elements from records and pushes them into the `receiver` channel 155 | func (ifs *IPFIXServer) processFlowSet(template *ipfix.TemplateRecords, records []ipfix.FlowDataRecord, agent net.IP, ts int64, packet *ipfix.Packet) { 156 | fm := generateFieldMap(template) 157 | 158 | for _, r := range records { 159 | if fm.family == 4 { 160 | atomic.AddUint64(&stats.GlobalStats.Flows4, 1) 161 | } else if fm.family == 6 { 162 | atomic.AddUint64(&stats.GlobalStats.Flows6, 1) 163 | } else { 164 | glog.Warning("Unknown address family") 165 | continue 166 | } 167 | 168 | var fl netflow.Flow 169 | fl.Router = agent 170 | fl.Timestamp = ts 171 | fl.Family = uint32(fm.family) 172 | fl.Packets = convert.Uint32(r.Values[fm.packets]) 173 | fl.Size = uint64(convert.Uint32(r.Values[fm.size])) 174 | fl.Protocol = convert.Uint32(r.Values[fm.protocol]) 175 | fl.IntIn = convert.Uint32(r.Values[fm.intIn]) 176 | fl.IntOut = convert.Uint32(r.Values[fm.intOut]) 177 | fl.SrcPort = convert.Uint32(r.Values[fm.srcPort]) 178 | fl.DstPort = convert.Uint32(r.Values[fm.dstPort]) 179 | fl.SrcAddr = convert.Reverse(r.Values[fm.srcAddr]) 180 | fl.DstAddr = convert.Reverse(r.Values[fm.dstAddr]) 181 | fl.NextHop = convert.Reverse(r.Values[fm.nextHop]) 182 | 183 | if !ifs.bgpAugment { 184 | fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) 185 | fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) 186 | } 187 | 188 | if ifs.debug > 2 { 189 | Dump(&fl) 190 | } 191 | 192 | ifs.Output <- &fl 193 | } 194 | } 195 | 196 | // Dump dumps a flow on the screen 197 | func Dump(fl *netflow.Flow) { 198 | fmt.Printf("--------------------------------\n") 199 | fmt.Printf("Flow dump:\n") 200 | fmt.Printf("Router: %d\n", fl.Router) 201 | fmt.Printf("Family: %d\n", fl.Family) 202 | fmt.Printf("SrcAddr: %s\n", net.IP(fl.SrcAddr).String()) 203 | fmt.Printf("DstAddr: %s\n", net.IP(fl.DstAddr).String()) 204 | fmt.Printf("Protocol: %d\n", fl.Protocol) 205 | fmt.Printf("NextHop: %s\n", net.IP(fl.NextHop).String()) 206 | fmt.Printf("IntIn: %d\n", fl.IntIn) 207 | fmt.Printf("IntOut: %d\n", fl.IntOut) 208 | fmt.Printf("Packets: %d\n", fl.Packets) 209 | fmt.Printf("Bytes: %d\n", fl.Size) 210 | fmt.Printf("--------------------------------\n") 211 | } 212 | 213 | // DumpTemplate dumps a template on the screen 214 | func DumpTemplate(tmpl *ipfix.TemplateRecords) { 215 | fmt.Printf("Template %d\n", tmpl.Header.TemplateID) 216 | for rec, i := range tmpl.Records { 217 | fmt.Printf("%d: %v\n", i, rec) 218 | } 219 | } 220 | 221 | // generateFieldMap processes a TemplateRecord and populates a fieldMap accordingly 222 | // the FieldMap can then be used to read fields from a flow 223 | func generateFieldMap(template *ipfix.TemplateRecords) *fieldMap { 224 | var fm fieldMap 225 | i := -1 226 | for _, f := range template.Records { 227 | i++ 228 | 229 | switch f.Type { 230 | case ipfix.IPv4SrcAddr: 231 | fm.srcAddr = i 232 | fm.family = 4 233 | case ipfix.IPv6SrcAddr: 234 | fm.srcAddr = i 235 | fm.family = 6 236 | case ipfix.IPv4DstAddr: 237 | fm.dstAddr = i 238 | case ipfix.IPv6DstAddr: 239 | fm.dstAddr = i 240 | case ipfix.InBytes: 241 | fm.size = i 242 | case ipfix.Protocol: 243 | fm.protocol = i 244 | case ipfix.InPkts: 245 | fm.packets = i 246 | case ipfix.InputSnmp: 247 | fm.intIn = i 248 | case ipfix.OutputSnmp: 249 | fm.intOut = i 250 | case ipfix.IPv4NextHop: 251 | fm.nextHop = i 252 | case ipfix.IPv6NextHop: 253 | fm.nextHop = i 254 | case ipfix.L4SrcPort: 255 | fm.srcPort = i 256 | case ipfix.L4DstPort: 257 | fm.dstPort = i 258 | case ipfix.SrcAs: 259 | fm.srcAsn = i 260 | case ipfix.DstAs: 261 | fm.dstAsn = i 262 | } 263 | } 264 | return &fm 265 | } 266 | 267 | // updateTemplateCache updates the template cache 268 | func (ifs *IPFIXServer) updateTemplateCache(remote net.IP, p *ipfix.Packet) { 269 | templRecs := p.GetTemplateRecords() 270 | for _, tr := range templRecs { 271 | ifs.tmplCache.set(convert.Uint32(remote), tr.Packet.Header.DomainID, tr.Header.TemplateID, *tr) 272 | } 273 | } 274 | 275 | // makeTemplateKey creates a string of the 3 tuple router address, source id and template id 276 | func makeTemplateKey(addr string, sourceID uint32, templateID uint16, keyParts []string) string { 277 | keyParts[0] = addr 278 | keyParts[1] = strconv.Itoa(int(sourceID)) 279 | keyParts[2] = strconv.Itoa(int(templateID)) 280 | return strings.Join(keyParts, "|") 281 | } 282 | -------------------------------------------------------------------------------- /protocol_numbers.csv: -------------------------------------------------------------------------------- 1 | Decimal,Keyword,Protocol,IPv6 Extension Header,Reference 2 | 0,HOPOPT,IPv6 Hop-by-Hop Option,Y,[RFC2460] 3 | 1,ICMP,Internet Control Message,,[RFC792] 4 | 2,IGMP,Internet Group Management,,[RFC1112] 5 | 3,GGP,Gateway-to-Gateway,,[RFC823] 6 | 4,IPv4,IPv4 encapsulation,,[RFC2003] 7 | 5,ST,Stream,,[RFC1190][RFC1819] 8 | 6,TCP,Transmission Control,,[RFC793] 9 | 7,CBT,CBT,,[Tony_Ballardie] 10 | 8,EGP,Exterior Gateway Protocol,,[RFC888][David_Mills] 11 | 9,IGP,"any private interior gateway 12 | (used by Cisco for their IGRP)",,[Internet_Assigned_Numbers_Authority] 13 | 10,BBN-RCC-MON,BBN RCC Monitoring,,[Steve_Chipman] 14 | 11,NVP-II,Network Voice Protocol,,[RFC741][Steve_Casner] 15 | 12,PUP,PUP,,"[Boggs, D., J. Shoch, E. Taft, and R. Metcalfe, ""PUP: An 16 | Internetwork Architecture"", XEROX Palo Alto Research Center, 17 | CSL-79-10, July 1979; also in IEEE Transactions on 18 | Communication, Volume COM-28, Number 4, April 1980.][[XEROX]]" 19 | 13,ARGUS (deprecated),ARGUS,,[Robert_W_Scheifler] 20 | 14,EMCON,EMCON,,[] 21 | 15,XNET,Cross Net Debugger,,"[Haverty, J., ""XNET Formats for Internet Protocol Version 4"", 22 | IEN 158, October 1980.][Jack_Haverty]" 23 | 16,CHAOS,Chaos,,[J_Noel_Chiappa] 24 | 17,UDP,User Datagram,,[RFC768][Jon_Postel] 25 | 18,MUX,Multiplexing,,"[Cohen, D. and J. Postel, ""Multiplexing Protocol"", IEN 90, 26 | USC/Information Sciences Institute, May 1979.][Jon_Postel]" 27 | 19,DCN-MEAS,DCN Measurement Subsystems,,[David_Mills] 28 | 20,HMP,Host Monitoring,,[RFC869][Bob_Hinden] 29 | 21,PRM,Packet Radio Measurement,,[Zaw_Sing_Su] 30 | 22,XNS-IDP,XEROX NS IDP,,"[""The Ethernet, A Local Area Network: Data Link Layer and 31 | Physical Layer Specification"", AA-K759B-TK, Digital 32 | Equipment Corporation, Maynard, MA. Also as: ""The 33 | Ethernet - A Local Area Network"", Version 1.0, Digital 34 | Equipment Corporation, Intel Corporation, Xerox 35 | Corporation, September 1980. And: ""The Ethernet, A Local 36 | Area Network: Data Link Layer and Physical Layer 37 | Specifications"", Digital, Intel and Xerox, November 1982. 38 | And: XEROX, ""The Ethernet, A Local Area Network: Data Link 39 | Layer and Physical Layer Specification"", X3T51/80-50, 40 | Xerox Corporation, Stamford, CT., October 1980.][[XEROX]]" 41 | 23,TRUNK-1,Trunk-1,,[Barry_Boehm] 42 | 24,TRUNK-2,Trunk-2,,[Barry_Boehm] 43 | 25,LEAF-1,Leaf-1,,[Barry_Boehm] 44 | 26,LEAF-2,Leaf-2,,[Barry_Boehm] 45 | 27,RDP,Reliable Data Protocol,,[RFC908][Bob_Hinden] 46 | 28,IRTP,Internet Reliable Transaction,,[RFC938][Trudy_Miller] 47 | 29,ISO-TP4,ISO Transport Protocol Class 4,,[RFC905][] 48 | 30,NETBLT,Bulk Data Transfer Protocol,,[RFC969][David_Clark] 49 | 31,MFE-NSP,MFE Network Services Protocol,,"[Shuttleworth, B., ""A Documentary of MFENet, a National 50 | Computer Network"", UCRL-52317, Lawrence Livermore Labs, 51 | Livermore, California, June 1977.][Barry_Howard]" 52 | 32,MERIT-INP,MERIT Internodal Protocol,,[Hans_Werner_Braun] 53 | 33,DCCP,Datagram Congestion Control Protocol,,[RFC4340] 54 | 34,3PC,Third Party Connect Protocol,,[Stuart_A_Friedberg] 55 | 35,IDPR,Inter-Domain Policy Routing Protocol,,[Martha_Steenstrup] 56 | 36,XTP,XTP,,[Greg_Chesson] 57 | 37,DDP,Datagram Delivery Protocol,,[Wesley_Craig] 58 | 38,IDPR-CMTP,IDPR Control Message Transport Proto,,[Martha_Steenstrup] 59 | 39,TP++,TP++ Transport Protocol,,[Dirk_Fromhein] 60 | 40,IL,IL Transport Protocol,,[Dave_Presotto] 61 | 41,IPv6,IPv6 encapsulation,,[RFC2473] 62 | 42,SDRP,Source Demand Routing Protocol,,[Deborah_Estrin] 63 | 43,IPv6-Route,Routing Header for IPv6,Y,[Steve_Deering] 64 | 44,IPv6-Frag,Fragment Header for IPv6,Y,[Steve_Deering] 65 | 45,IDRP,Inter-Domain Routing Protocol,,[Sue_Hares] 66 | 46,RSVP,Reservation Protocol,,[RFC2205][RFC3209][Bob_Braden] 67 | 47,GRE,Generic Routing Encapsulation,,[RFC2784][Tony_Li] 68 | 48,DSR,Dynamic Source Routing Protocol,,[RFC4728] 69 | 49,BNA,BNA,,[Gary Salamon] 70 | 50,ESP,Encap Security Payload,Y,[RFC4303] 71 | 51,AH,Authentication Header,Y,[RFC4302] 72 | 52,I-NLSP,Integrated Net Layer Security TUBA,,[K_Robert_Glenn] 73 | 53,SWIPE (deprecated),IP with Encryption,,[John_Ioannidis] 74 | 54,NARP,NBMA Address Resolution Protocol,,[RFC1735] 75 | 55,MOBILE,IP Mobility,,[Charlie_Perkins] 76 | 56,TLSP,"Transport Layer Security Protocol 77 | using Kryptonet key management",,[Christer_Oberg] 78 | 57,SKIP,SKIP,,[Tom_Markson] 79 | 58,IPv6-ICMP,ICMP for IPv6,,[RFC2460] 80 | 59,IPv6-NoNxt,No Next Header for IPv6,,[RFC2460] 81 | 60,IPv6-Opts,Destination Options for IPv6,Y,[RFC2460] 82 | 61,,any host internal protocol,,[Internet_Assigned_Numbers_Authority] 83 | 62,CFTP,CFTP,,"[Forsdick, H., ""CFTP"", Network Message, Bolt Beranek and 84 | Newman, January 1982.][Harry_Forsdick]" 85 | 63,,any local network,,[Internet_Assigned_Numbers_Authority] 86 | 64,SAT-EXPAK,SATNET and Backroom EXPAK,,[Steven_Blumenthal] 87 | 65,KRYPTOLAN,Kryptolan,,[Paul Liu] 88 | 66,RVD,MIT Remote Virtual Disk Protocol,,[Michael_Greenwald] 89 | 67,IPPC,Internet Pluribus Packet Core,,[Steven_Blumenthal] 90 | 68,,any distributed file system,,[Internet_Assigned_Numbers_Authority] 91 | 69,SAT-MON,SATNET Monitoring,,[Steven_Blumenthal] 92 | 70,VISA,VISA Protocol,,[Gene_Tsudik] 93 | 71,IPCV,Internet Packet Core Utility,,[Steven_Blumenthal] 94 | 72,CPNX,Computer Protocol Network Executive,,[David Mittnacht] 95 | 73,CPHB,Computer Protocol Heart Beat,,[David Mittnacht] 96 | 74,WSN,Wang Span Network,,[Victor Dafoulas] 97 | 75,PVP,Packet Video Protocol,,[Steve_Casner] 98 | 76,BR-SAT-MON,Backroom SATNET Monitoring,,[Steven_Blumenthal] 99 | 77,SUN-ND,SUN ND PROTOCOL-Temporary,,[William_Melohn] 100 | 78,WB-MON,WIDEBAND Monitoring,,[Steven_Blumenthal] 101 | 79,WB-EXPAK,WIDEBAND EXPAK,,[Steven_Blumenthal] 102 | 80,ISO-IP,ISO Internet Protocol,,[Marshall_T_Rose] 103 | 81,VMTP,VMTP,,[Dave_Cheriton] 104 | 82,SECURE-VMTP,SECURE-VMTP,,[Dave_Cheriton] 105 | 83,VINES,VINES,,[Brian Horn] 106 | 84,TTP,Transaction Transport Protocol,,[Jim_Stevens] 107 | 84,IPTM,Internet Protocol Traffic Manager,,[Jim_Stevens] 108 | 85,NSFNET-IGP,NSFNET-IGP,,[Hans_Werner_Braun] 109 | 86,DGP,Dissimilar Gateway Protocol,,"[M/A-COM Government Systems, ""Dissimilar Gateway Protocol 110 | Specification, Draft Version"", Contract no. CS901145, 111 | November 16, 1987.][Mike_Little]" 112 | 87,TCF,TCF,,[Guillermo_A_Loyola] 113 | 88,EIGRP,EIGRP,,[RFC7868] 114 | 89,OSPFIGP,OSPFIGP,,[RFC1583][RFC2328][RFC5340][John_Moy] 115 | 90,Sprite-RPC,Sprite RPC Protocol,,"[Welch, B., ""The Sprite Remote Procedure Call System"", 116 | Technical Report, UCB/Computer Science Dept., 86/302, 117 | University of California at Berkeley, June 1986.][Bruce Willins]" 118 | 91,LARP,Locus Address Resolution Protocol,,[Brian Horn] 119 | 92,MTP,Multicast Transport Protocol,,[Susie_Armstrong] 120 | 93,AX.25,AX.25 Frames,,[Brian_Kantor] 121 | 94,IPIP,IP-within-IP Encapsulation Protocol,,[John_Ioannidis] 122 | 95,MICP (deprecated),Mobile Internetworking Control Pro.,,[John_Ioannidis] 123 | 96,SCC-SP,Semaphore Communications Sec. Pro.,,[Howard_Hart] 124 | 97,ETHERIP,Ethernet-within-IP Encapsulation,,[RFC3378] 125 | 98,ENCAP,Encapsulation Header,,[RFC1241][Robert_Woodburn] 126 | 99,,any private encryption scheme,,[Internet_Assigned_Numbers_Authority] 127 | 100,GMTP,GMTP,,[[RXB5]] 128 | 101,IFMP,Ipsilon Flow Management Protocol,,"[Bob_Hinden][November 1995, 1997.]" 129 | 102,PNNI,PNNI over IP,,[Ross_Callon] 130 | 103,PIM,Protocol Independent Multicast,,[RFC7761][Dino_Farinacci] 131 | 104,ARIS,ARIS,,[Nancy_Feldman] 132 | 105,SCPS,SCPS,,[Robert_Durst] 133 | 106,QNX,QNX,,[Michael_Hunter] 134 | 107,A/N,Active Networks,,[Bob_Braden] 135 | 108,IPComp,IP Payload Compression Protocol,,[RFC2393] 136 | 109,SNP,Sitara Networks Protocol,,[Manickam_R_Sridhar] 137 | 110,Compaq-Peer,Compaq Peer Protocol,,[Victor_Volpe] 138 | 111,IPX-in-IP,IPX in IP,,[CJ_Lee] 139 | 112,VRRP,Virtual Router Redundancy Protocol,,[RFC5798] 140 | 113,PGM,PGM Reliable Transport Protocol,,[Tony_Speakman] 141 | 114,,any 0-hop protocol,,[Internet_Assigned_Numbers_Authority] 142 | 115,L2TP,Layer Two Tunneling Protocol,,[RFC3931][Bernard_Aboba] 143 | 116,DDX,D-II Data Exchange (DDX),,[John_Worley] 144 | 117,IATP,Interactive Agent Transfer Protocol,,[John_Murphy] 145 | 118,STP,Schedule Transfer Protocol,,[Jean_Michel_Pittet] 146 | 119,SRP,SpectraLink Radio Protocol,,[Mark_Hamilton] 147 | 120,UTI,UTI,,[Peter_Lothberg] 148 | 121,SMP,Simple Message Protocol,,[Leif_Ekblad] 149 | 122,SM (deprecated),Simple Multicast Protocol,,[Jon_Crowcroft][draft-perlman-simple-multicast] 150 | 123,PTP,Performance Transparency Protocol,,[Michael_Welzl] 151 | 124,ISIS over IPv4,,,[Tony_Przygienda] 152 | 125,FIRE,,,[Criag_Partridge] 153 | 126,CRTP,Combat Radio Transport Protocol,,[Robert_Sautter] 154 | 127,CRUDP,Combat Radio User Datagram,,[Robert_Sautter] 155 | 128,SSCOPMCE,,,[Kurt_Waber] 156 | 129,IPLT,,,[[Hollbach]] 157 | 130,SPS,Secure Packet Shield,,[Bill_McIntosh] 158 | 131,PIPE,Private IP Encapsulation within IP,,[Bernhard_Petri] 159 | 132,SCTP,Stream Control Transmission Protocol,,[Randall_R_Stewart] 160 | 133,FC,Fibre Channel,,[Murali_Rajagopal][RFC6172] 161 | 134,RSVP-E2E-IGNORE,,,[RFC3175] 162 | 135,Mobility Header,,Y,[RFC6275] 163 | 136,UDPLite,,,[RFC3828] 164 | 137,MPLS-in-IP,,,[RFC4023] 165 | 138,manet,MANET Protocols,,[RFC5498] 166 | 139,HIP,Host Identity Protocol,Y,[RFC7401] 167 | 140,Shim6,Shim6 Protocol,Y,[RFC5533] 168 | 141,WESP,Wrapped Encapsulating Security Payload,,[RFC5840] 169 | 142,ROHC,Robust Header Compression,,[RFC5858] 170 | 143-252,,Unassigned,,[Internet_Assigned_Numbers_Authority] 171 | 253,,Use for experimentation and testing,Y,[RFC3692] 172 | 254,,Use for experimentation and testing,Y,[RFC3692] 173 | 255,Reserved,,,[Internet_Assigned_Numbers_Authority] 174 | -------------------------------------------------------------------------------- /netflow/netflow.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. 2 | // source: netflow.proto 3 | // DO NOT EDIT! 4 | 5 | /* 6 | Package netflow is a generated protocol buffer package. 7 | 8 | It is generated from these files: 9 | netflow.proto 10 | 11 | It has these top-level messages: 12 | Pfx 13 | Flow 14 | Flows 15 | */ 16 | package netflow 17 | 18 | import proto "github.com/golang/protobuf/proto" 19 | import fmt "fmt" 20 | import math "math" 21 | 22 | // Reference imports to suppress errors if they are not otherwise used. 23 | var _ = proto.Marshal 24 | var _ = fmt.Errorf 25 | var _ = math.Inf 26 | 27 | // This is a compile-time assertion to ensure that this generated file 28 | // is compatible with the proto package it is being compiled against. 29 | // A compilation error at this line likely means your copy of the 30 | // proto package needs to be updated. 31 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 32 | 33 | // Pfx defines an IP prefix 34 | type Pfx struct { 35 | // IPv4 or IPv6 address 36 | IP []byte `protobuf:"bytes,1,opt,name=IP,proto3" json:"IP,omitempty"` 37 | // Netmask 38 | Mask []byte `protobuf:"bytes,2,opt,name=mask,proto3" json:"mask,omitempty"` 39 | } 40 | 41 | func (m *Pfx) Reset() { *m = Pfx{} } 42 | func (m *Pfx) String() string { return proto.CompactTextString(m) } 43 | func (*Pfx) ProtoMessage() {} 44 | func (*Pfx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 45 | 46 | func (m *Pfx) GetIP() []byte { 47 | if m != nil { 48 | return m.IP 49 | } 50 | return nil 51 | } 52 | 53 | func (m *Pfx) GetMask() []byte { 54 | if m != nil { 55 | return m.Mask 56 | } 57 | return nil 58 | } 59 | 60 | // Flow defines a network flow 61 | type Flow struct { 62 | // Router flow was received from 63 | Router []byte `protobuf:"bytes,1,opt,name=router,proto3" json:"router,omitempty"` 64 | // Address family 65 | Family uint32 `protobuf:"varint,2,opt,name=family" json:"family,omitempty"` 66 | // SRC IP address 67 | SrcAddr []byte `protobuf:"bytes,3,opt,name=src_addr,json=srcAddr,proto3" json:"src_addr,omitempty"` 68 | // DST IP address 69 | DstAddr []byte `protobuf:"bytes,4,opt,name=dst_addr,json=dstAddr,proto3" json:"dst_addr,omitempty"` 70 | // Protocol 71 | Protocol uint32 `protobuf:"varint,5,opt,name=protocol" json:"protocol,omitempty"` 72 | // Number of packets 73 | Packets uint32 `protobuf:"varint,6,opt,name=packets" json:"packets,omitempty"` 74 | // Size of flow 75 | Size uint64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"` 76 | // SNMP interface id flow was received on 77 | IntIn uint32 `protobuf:"varint,8,opt,name=int_in,json=intIn" json:"int_in,omitempty"` 78 | // SNMP interface if flow was transmitted on 79 | IntOut uint32 `protobuf:"varint,9,opt,name=int_out,json=intOut" json:"int_out,omitempty"` 80 | // Next Hop IP address 81 | NextHop []byte `protobuf:"bytes,10,opt,name=next_hop,json=nextHop,proto3" json:"next_hop,omitempty"` 82 | // SRC ASN 83 | SrcAs uint32 `protobuf:"varint,11,opt,name=src_as,json=srcAs" json:"src_as,omitempty"` 84 | // DST ASN 85 | DstAs uint32 `protobuf:"varint,12,opt,name=dst_as,json=dstAs" json:"dst_as,omitempty"` 86 | // NEXT HOP ASN 87 | NextHopAs uint32 `protobuf:"varint,13,opt,name=next_hop_as,json=nextHopAs" json:"next_hop_as,omitempty"` 88 | // Unix timestamp 89 | Timestamp int64 `protobuf:"varint,14,opt,name=timestamp" json:"timestamp,omitempty"` 90 | // SRC prefix 91 | SrcPfx *Pfx `protobuf:"bytes,15,opt,name=src_pfx,json=srcPfx" json:"src_pfx,omitempty"` 92 | // DST perfix 93 | DstPfx *Pfx `protobuf:"bytes,16,opt,name=dst_pfx,json=dstPfx" json:"dst_pfx,omitempty"` 94 | // SRC port 95 | SrcPort uint32 `protobuf:"varint,17,opt,name=src_port,json=srcPort" json:"src_port,omitempty"` 96 | // DST port 97 | DstPort uint32 `protobuf:"varint,18,opt,name=dst_port,json=dstPort" json:"dst_port,omitempty"` 98 | } 99 | 100 | func (m *Flow) Reset() { *m = Flow{} } 101 | func (m *Flow) String() string { return proto.CompactTextString(m) } 102 | func (*Flow) ProtoMessage() {} 103 | func (*Flow) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 104 | 105 | func (m *Flow) GetRouter() []byte { 106 | if m != nil { 107 | return m.Router 108 | } 109 | return nil 110 | } 111 | 112 | func (m *Flow) GetFamily() uint32 { 113 | if m != nil { 114 | return m.Family 115 | } 116 | return 0 117 | } 118 | 119 | func (m *Flow) GetSrcAddr() []byte { 120 | if m != nil { 121 | return m.SrcAddr 122 | } 123 | return nil 124 | } 125 | 126 | func (m *Flow) GetDstAddr() []byte { 127 | if m != nil { 128 | return m.DstAddr 129 | } 130 | return nil 131 | } 132 | 133 | func (m *Flow) GetProtocol() uint32 { 134 | if m != nil { 135 | return m.Protocol 136 | } 137 | return 0 138 | } 139 | 140 | func (m *Flow) GetPackets() uint32 { 141 | if m != nil { 142 | return m.Packets 143 | } 144 | return 0 145 | } 146 | 147 | func (m *Flow) GetSize() uint64 { 148 | if m != nil { 149 | return m.Size 150 | } 151 | return 0 152 | } 153 | 154 | func (m *Flow) GetIntIn() uint32 { 155 | if m != nil { 156 | return m.IntIn 157 | } 158 | return 0 159 | } 160 | 161 | func (m *Flow) GetIntOut() uint32 { 162 | if m != nil { 163 | return m.IntOut 164 | } 165 | return 0 166 | } 167 | 168 | func (m *Flow) GetNextHop() []byte { 169 | if m != nil { 170 | return m.NextHop 171 | } 172 | return nil 173 | } 174 | 175 | func (m *Flow) GetSrcAs() uint32 { 176 | if m != nil { 177 | return m.SrcAs 178 | } 179 | return 0 180 | } 181 | 182 | func (m *Flow) GetDstAs() uint32 { 183 | if m != nil { 184 | return m.DstAs 185 | } 186 | return 0 187 | } 188 | 189 | func (m *Flow) GetNextHopAs() uint32 { 190 | if m != nil { 191 | return m.NextHopAs 192 | } 193 | return 0 194 | } 195 | 196 | func (m *Flow) GetTimestamp() int64 { 197 | if m != nil { 198 | return m.Timestamp 199 | } 200 | return 0 201 | } 202 | 203 | func (m *Flow) GetSrcPfx() *Pfx { 204 | if m != nil { 205 | return m.SrcPfx 206 | } 207 | return nil 208 | } 209 | 210 | func (m *Flow) GetDstPfx() *Pfx { 211 | if m != nil { 212 | return m.DstPfx 213 | } 214 | return nil 215 | } 216 | 217 | func (m *Flow) GetSrcPort() uint32 { 218 | if m != nil { 219 | return m.SrcPort 220 | } 221 | return 0 222 | } 223 | 224 | func (m *Flow) GetDstPort() uint32 { 225 | if m != nil { 226 | return m.DstPort 227 | } 228 | return 0 229 | } 230 | 231 | // Flows defines a groups of flows 232 | type Flows struct { 233 | // Group of flows 234 | Flows []*Flow `protobuf:"bytes,1,rep,name=flows" json:"flows,omitempty"` 235 | } 236 | 237 | func (m *Flows) Reset() { *m = Flows{} } 238 | func (m *Flows) String() string { return proto.CompactTextString(m) } 239 | func (*Flows) ProtoMessage() {} 240 | func (*Flows) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } 241 | 242 | func (m *Flows) GetFlows() []*Flow { 243 | if m != nil { 244 | return m.Flows 245 | } 246 | return nil 247 | } 248 | 249 | func init() { 250 | proto.RegisterType((*Pfx)(nil), "netflow.pfx") 251 | proto.RegisterType((*Flow)(nil), "netflow.Flow") 252 | proto.RegisterType((*Flows)(nil), "netflow.Flows") 253 | } 254 | 255 | func init() { proto.RegisterFile("netflow.proto", fileDescriptor0) } 256 | 257 | var fileDescriptor0 = []byte{ 258 | // 383 bytes of a gzipped FileDescriptorProto 259 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x51, 0x5d, 0xab, 0xd3, 0x40, 260 | 0x10, 0x25, 0x4d, 0x9b, 0xb4, 0x93, 0xe6, 0xaa, 0x0b, 0xea, 0x28, 0x22, 0xa1, 0x22, 0x44, 0x90, 261 | 0xfb, 0x70, 0xfd, 0x05, 0xf7, 0x45, 0xec, 0x93, 0x25, 0x7f, 0x20, 0xc4, 0x7c, 0x60, 0xb8, 0x49, 262 | 0x76, 0xd9, 0x99, 0xd2, 0xe8, 0xbf, 0xf6, 0x1f, 0xc8, 0xec, 0xa6, 0xf5, 0xc5, 0xb7, 0x3d, 0xe7, 263 | 0xcc, 0x9c, 0x3d, 0x33, 0x03, 0xe9, 0xd4, 0x72, 0x37, 0xe8, 0xcb, 0xbd, 0xb1, 0x9a, 0xb5, 0x8a, 264 | 0x17, 0x78, 0xf8, 0x04, 0xa1, 0xe9, 0x66, 0x75, 0x07, 0xab, 0xe3, 0x09, 0x83, 0x2c, 0xc8, 0xf7, 265 | 0xc5, 0xea, 0x78, 0x52, 0x0a, 0xd6, 0x63, 0x45, 0x4f, 0xb8, 0x72, 0x8c, 0x7b, 0x1f, 0xfe, 0x84, 266 | 0xb0, 0xfe, 0x3a, 0xe8, 0x8b, 0x7a, 0x05, 0x91, 0xd5, 0x67, 0x6e, 0xed, 0xd2, 0xb0, 0x20, 0xe1, 267 | 0xbb, 0x6a, 0xec, 0x87, 0x5f, 0xae, 0x2d, 0x2d, 0x16, 0xa4, 0xde, 0xc0, 0x96, 0x6c, 0x5d, 0x56, 268 | 0x4d, 0x63, 0x31, 0x74, 0x1d, 0x31, 0xd9, 0xfa, 0xb1, 0x69, 0xac, 0x48, 0x0d, 0xb1, 0x97, 0xd6, 269 | 0x5e, 0x6a, 0x88, 0x9d, 0xf4, 0x16, 0xb6, 0x2e, 0x6b, 0xad, 0x07, 0xdc, 0x38, 0xbf, 0x1b, 0x56, 270 | 0x08, 0xb1, 0xa9, 0xea, 0xa7, 0x96, 0x09, 0x23, 0x27, 0x5d, 0xa1, 0x04, 0xa7, 0xfe, 0x77, 0x8b, 271 | 0x71, 0x16, 0xe4, 0xeb, 0xc2, 0xbd, 0xd5, 0x4b, 0x88, 0xfa, 0x89, 0xcb, 0x7e, 0xc2, 0xad, 0x2b, 272 | 0xde, 0xf4, 0x13, 0x1f, 0x27, 0xf5, 0x1a, 0x62, 0xa1, 0xf5, 0x99, 0x71, 0xe7, 0xf3, 0xf6, 0x13, 273 | 0x7f, 0x3f, 0xb3, 0x84, 0x9a, 0xda, 0x99, 0xcb, 0x9f, 0xda, 0x20, 0xf8, 0x50, 0x82, 0xbf, 0x69, 274 | 0x23, 0x56, 0x6e, 0x14, 0xc2, 0xc4, 0x5b, 0xc9, 0x20, 0x24, 0xb4, 0x1b, 0x83, 0x70, 0xef, 0x69, 275 | 0x19, 0x82, 0xd4, 0x7b, 0x48, 0xae, 0x46, 0xa2, 0xa5, 0x4e, 0xdb, 0x2d, 0x5e, 0x8f, 0xa4, 0xde, 276 | 0xc1, 0x8e, 0xfb, 0xb1, 0x25, 0xae, 0x46, 0x83, 0x77, 0x59, 0x90, 0x87, 0xc5, 0x3f, 0x42, 0x7d, 277 | 0x04, 0x59, 0x53, 0x69, 0xba, 0x19, 0x9f, 0x65, 0x41, 0x9e, 0x3c, 0xec, 0xef, 0x6f, 0x47, 0xec, 278 | 0xe6, 0x42, 0x82, 0x9c, 0xba, 0x59, 0xca, 0xe4, 0x6f, 0x29, 0x7b, 0xfe, 0xbf, 0xb2, 0x86, 0x58, 279 | 0xca, 0x96, 0x23, 0x18, 0x6d, 0x19, 0x5f, 0xf8, 0x9d, 0x89, 0x81, 0xb6, 0x7c, 0x3d, 0x82, 0x93, 280 | 0x94, 0x97, 0xa4, 0x49, 0x5b, 0x3e, 0x7c, 0x86, 0x8d, 0x9c, 0x9c, 0xd4, 0x07, 0xd8, 0x88, 0x25, 281 | 0x61, 0x90, 0x85, 0x79, 0xf2, 0x90, 0xde, 0xfe, 0x10, 0xb9, 0xf0, 0xda, 0x8f, 0xc8, 0x1d, 0xe8, 282 | 0xcb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0xa2, 0x1d, 0x5c, 0x6d, 0x02, 0x00, 0x00, 283 | } 284 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2015 Google Inc 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /database/database.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package database keeps track of flow information 13 | package database 14 | 15 | import ( 16 | "compress/gzip" 17 | "fmt" 18 | "net" 19 | "os" 20 | "sync" 21 | "sync/atomic" 22 | "time" 23 | "unsafe" 24 | 25 | "github.com/golang/glog" 26 | "github.com/golang/protobuf/proto" 27 | "github.com/google/tflow2/avltree" 28 | "github.com/google/tflow2/netflow" 29 | "github.com/google/tflow2/nfserver" 30 | ) 31 | 32 | // TimeGroup groups all indices to flows of a particular router at a particular 33 | // time into one object 34 | type TimeGroup struct { 35 | Any map[int]*avltree.Tree // Workaround: Why a map? Because: cannot assign to flows[fl.Timestamp][rtr].Any 36 | SrcAddr map[string]*avltree.Tree 37 | DstAddr map[string]*avltree.Tree 38 | Protocol map[uint32]*avltree.Tree 39 | IntIn map[uint32]*avltree.Tree 40 | IntOut map[uint32]*avltree.Tree 41 | NextHop map[string]*avltree.Tree 42 | SrcAs map[uint32]*avltree.Tree 43 | DstAs map[uint32]*avltree.Tree 44 | NextHopAs map[uint32]*avltree.Tree 45 | SrcPfx map[string]*avltree.Tree 46 | DstPfx map[string]*avltree.Tree 47 | SrcPort map[uint32]*avltree.Tree 48 | DstPort map[uint32]*avltree.Tree 49 | Locks *LockGroup 50 | } 51 | 52 | // LockGroup is a group of locks suitable to lock any particular member of TimeGroup 53 | type LockGroup struct { 54 | Any sync.RWMutex 55 | SrcAddr sync.RWMutex 56 | DstAddr sync.RWMutex 57 | Protocol sync.RWMutex 58 | IntIn sync.RWMutex 59 | IntOut sync.RWMutex 60 | NextHop sync.RWMutex 61 | SrcAs sync.RWMutex 62 | DstAs sync.RWMutex 63 | NextHopAs sync.RWMutex 64 | SrcPfx sync.RWMutex 65 | DstPfx sync.RWMutex 66 | SrcPort sync.RWMutex 67 | DstPort sync.RWMutex 68 | } 69 | 70 | // FlowsByTimeRtr holds all keys (and thus is the only way) to our flows 71 | type FlowsByTimeRtr map[int64]map[string]TimeGroup 72 | 73 | // FlowDatabase represents a flow database object 74 | type FlowDatabase struct { 75 | flows FlowsByTimeRtr 76 | lock sync.RWMutex 77 | maxAge int64 78 | aggregation int64 79 | lastDump int64 80 | compLevel int 81 | samplerate int 82 | storage string 83 | debug int 84 | anonymize bool 85 | Input chan *netflow.Flow 86 | } 87 | 88 | // New creates a new FlowDatabase and returns a pointer to it 89 | func New(aggregation int64, maxAge int64, numAddWorker int, samplerate int, debug int, compLevel int, storage string, anonymize bool) *FlowDatabase { 90 | flowDB := &FlowDatabase{ 91 | maxAge: maxAge, 92 | aggregation: aggregation, 93 | compLevel: compLevel, 94 | samplerate: samplerate, 95 | Input: make(chan *netflow.Flow), 96 | lastDump: time.Now().Unix(), 97 | storage: storage, 98 | debug: debug, 99 | flows: make(FlowsByTimeRtr), 100 | anonymize: anonymize, 101 | } 102 | 103 | for i := 0; i < numAddWorker; i++ { 104 | go func() { 105 | for { 106 | fl := <-flowDB.Input 107 | flowDB.Add(fl) 108 | } 109 | }() 110 | 111 | go func() { 112 | for { 113 | // Set a timer and wait for our next run 114 | event := time.NewTimer(time.Duration(flowDB.aggregation) * time.Second) 115 | <-event.C 116 | flowDB.CleanUp() 117 | } 118 | }() 119 | 120 | go func() { 121 | for { 122 | // Set a timer and wait for our next run 123 | event := time.NewTimer(time.Duration(flowDB.aggregation) * time.Second) 124 | <-event.C 125 | flowDB.Dumper() 126 | } 127 | }() 128 | } 129 | return flowDB 130 | } 131 | 132 | // Add adds flow `fl` to database fdb 133 | func (fdb *FlowDatabase) Add(fl *netflow.Flow) { 134 | // build indices for map access 135 | rtrip := net.IP(fl.Router) 136 | rtr := rtrip.String() 137 | srcAddr := net.IP(fl.SrcAddr).String() 138 | dstAddr := net.IP(fl.DstAddr).String() 139 | nextHopAddr := net.IP(fl.NextHop).String() 140 | srcPfx := fl.SrcPfx.String() 141 | dstPfx := fl.DstPfx.String() 142 | 143 | fdb.lock.Lock() 144 | // Check if timestamp entry exists already. If not, create it. 145 | if _, ok := fdb.flows[fl.Timestamp]; !ok { 146 | fdb.flows[fl.Timestamp] = make(map[string]TimeGroup) 147 | } 148 | 149 | // Check if router entry exists already. If not, create it. 150 | if _, ok := fdb.flows[fl.Timestamp][rtr]; !ok { 151 | fdb.flows[fl.Timestamp][rtr] = TimeGroup{ 152 | Any: make(map[int]*avltree.Tree), 153 | SrcAddr: make(map[string]*avltree.Tree), 154 | DstAddr: make(map[string]*avltree.Tree), 155 | Protocol: make(map[uint32]*avltree.Tree), 156 | IntIn: make(map[uint32]*avltree.Tree), 157 | IntOut: make(map[uint32]*avltree.Tree), 158 | NextHop: make(map[string]*avltree.Tree), 159 | SrcAs: make(map[uint32]*avltree.Tree), 160 | DstAs: make(map[uint32]*avltree.Tree), 161 | NextHopAs: make(map[uint32]*avltree.Tree), 162 | SrcPfx: make(map[string]*avltree.Tree), 163 | DstPfx: make(map[string]*avltree.Tree), 164 | SrcPort: make(map[uint32]*avltree.Tree), 165 | DstPort: make(map[uint32]*avltree.Tree), 166 | Locks: &LockGroup{}, 167 | } 168 | } 169 | fdb.lock.Unlock() 170 | 171 | fdb.lock.RLock() 172 | defer fdb.lock.RUnlock() 173 | if _, ok := fdb.flows[fl.Timestamp]; !ok { 174 | glog.Warningf("stopped adding data for %d: already deleted", fl.Timestamp) 175 | return 176 | } 177 | 178 | locks := fdb.flows[fl.Timestamp][rtr].Locks 179 | 180 | // Start the actual insertion into indices 181 | locks.Any.Lock() 182 | if fdb.flows[fl.Timestamp][rtr].Any[0] == nil { 183 | fdb.flows[fl.Timestamp][rtr].Any[0] = avltree.New() 184 | } 185 | fdb.flows[fl.Timestamp][rtr].Any[0].Insert(fl, fl, ptrIsSmaller) 186 | locks.Any.Unlock() 187 | 188 | locks.SrcAddr.Lock() 189 | if fdb.flows[fl.Timestamp][rtr].SrcAddr[srcAddr] == nil { 190 | fdb.flows[fl.Timestamp][rtr].SrcAddr[srcAddr] = avltree.New() 191 | } 192 | fdb.flows[fl.Timestamp][rtr].SrcAddr[srcAddr].Insert(fl, fl, ptrIsSmaller) 193 | locks.SrcAddr.Unlock() 194 | 195 | locks.DstAddr.Lock() 196 | if fdb.flows[fl.Timestamp][rtr].DstAddr[dstAddr] == nil { 197 | fdb.flows[fl.Timestamp][rtr].DstAddr[dstAddr] = avltree.New() 198 | } 199 | fdb.flows[fl.Timestamp][rtr].DstAddr[dstAddr].Insert(fl, fl, ptrIsSmaller) 200 | locks.DstAddr.Unlock() 201 | 202 | locks.Protocol.Lock() 203 | if fdb.flows[fl.Timestamp][rtr].Protocol[fl.Protocol] == nil { 204 | fdb.flows[fl.Timestamp][rtr].Protocol[fl.Protocol] = avltree.New() 205 | } 206 | fdb.flows[fl.Timestamp][rtr].Protocol[fl.Protocol].Insert(fl, fl, ptrIsSmaller) 207 | locks.Protocol.Unlock() 208 | 209 | locks.IntIn.Lock() 210 | if fdb.flows[fl.Timestamp][rtr].IntIn[fl.IntIn] == nil { 211 | fdb.flows[fl.Timestamp][rtr].IntIn[fl.IntIn] = avltree.New() 212 | } 213 | fdb.flows[fl.Timestamp][rtr].IntIn[fl.IntIn].Insert(fl, fl, ptrIsSmaller) 214 | locks.IntIn.Unlock() 215 | 216 | locks.IntOut.Lock() 217 | if fdb.flows[fl.Timestamp][rtr].IntOut[fl.IntOut] == nil { 218 | fdb.flows[fl.Timestamp][rtr].IntOut[fl.IntOut] = avltree.New() 219 | } 220 | fdb.flows[fl.Timestamp][rtr].IntOut[fl.IntOut].Insert(fl, fl, ptrIsSmaller) 221 | locks.IntOut.Unlock() 222 | 223 | locks.NextHop.Lock() 224 | if fdb.flows[fl.Timestamp][rtr].NextHop[nextHopAddr] == nil { 225 | fdb.flows[fl.Timestamp][rtr].NextHop[nextHopAddr] = avltree.New() 226 | } 227 | fdb.flows[fl.Timestamp][rtr].NextHop[nextHopAddr].Insert(fl, fl, ptrIsSmaller) 228 | locks.NextHop.Unlock() 229 | 230 | locks.SrcAs.Lock() 231 | if fdb.flows[fl.Timestamp][rtr].SrcAs[fl.SrcAs] == nil { 232 | fdb.flows[fl.Timestamp][rtr].SrcAs[fl.SrcAs] = avltree.New() 233 | } 234 | fdb.flows[fl.Timestamp][rtr].SrcAs[fl.SrcAs].Insert(fl, fl, ptrIsSmaller) 235 | locks.SrcAs.Unlock() 236 | 237 | locks.DstAs.Lock() 238 | if fdb.flows[fl.Timestamp][rtr].DstAs[fl.DstAs] == nil { 239 | fdb.flows[fl.Timestamp][rtr].DstAs[fl.DstAs] = avltree.New() 240 | } 241 | fdb.flows[fl.Timestamp][rtr].DstAs[fl.DstAs].Insert(fl, fl, ptrIsSmaller) 242 | locks.DstAs.Unlock() 243 | 244 | locks.NextHopAs.Lock() 245 | if fdb.flows[fl.Timestamp][rtr].NextHopAs[fl.NextHopAs] == nil { 246 | fdb.flows[fl.Timestamp][rtr].NextHopAs[fl.NextHopAs] = avltree.New() 247 | } 248 | fdb.flows[fl.Timestamp][rtr].NextHopAs[fl.NextHopAs].Insert(fl, fl, ptrIsSmaller) 249 | locks.NextHopAs.Unlock() 250 | 251 | locks.SrcPfx.Lock() 252 | if fdb.flows[fl.Timestamp][rtr].SrcPfx[srcPfx] == nil { 253 | fdb.flows[fl.Timestamp][rtr].SrcPfx[srcPfx] = avltree.New() 254 | } 255 | fdb.flows[fl.Timestamp][rtr].SrcPfx[srcPfx].Insert(fl, fl, ptrIsSmaller) 256 | locks.SrcPfx.Unlock() 257 | 258 | locks.DstPfx.Lock() 259 | if fdb.flows[fl.Timestamp][rtr].DstPfx[dstPfx] == nil { 260 | fdb.flows[fl.Timestamp][rtr].DstPfx[dstPfx] = avltree.New() 261 | } 262 | fdb.flows[fl.Timestamp][rtr].DstPfx[dstPfx].Insert(fl, fl, ptrIsSmaller) 263 | locks.DstPfx.Unlock() 264 | 265 | locks.SrcPort.Lock() 266 | if fdb.flows[fl.Timestamp][rtr].SrcPort[fl.SrcPort] == nil { 267 | fdb.flows[fl.Timestamp][rtr].SrcPort[fl.SrcPort] = avltree.New() 268 | } 269 | fdb.flows[fl.Timestamp][rtr].SrcPort[fl.SrcPort].Insert(fl, fl, ptrIsSmaller) 270 | locks.SrcPort.Unlock() 271 | } 272 | 273 | // CleanUp deletes all flows from database `fdb` that are older than `maxAge` seconds 274 | func (fdb *FlowDatabase) CleanUp() { 275 | now := time.Now().Unix() 276 | now = now - now%fdb.aggregation 277 | 278 | fdb.lock.Lock() 279 | defer fdb.lock.Unlock() 280 | for ts := range fdb.flows { 281 | if ts < now-fdb.maxAge { 282 | delete(fdb.flows, ts) 283 | } 284 | } 285 | } 286 | 287 | // Dumper dumps all flows in `fdb` to hard drive that haven't been dumped yet 288 | func (fdb *FlowDatabase) Dumper() { 289 | fdb.lock.RLock() 290 | defer fdb.lock.RUnlock() 291 | 292 | min := atomic.LoadInt64(&fdb.lastDump) 293 | now := time.Now().Unix() 294 | max := (now - now%fdb.aggregation) - 2*fdb.aggregation 295 | atomic.StoreInt64(&fdb.lastDump, max) 296 | 297 | for ts := range fdb.flows { 298 | if ts < min || ts > max { 299 | continue 300 | } 301 | for router := range fdb.flows[ts] { 302 | go fdb.dumpToDisk(ts, router) 303 | } 304 | atomic.StoreInt64(&fdb.lastDump, ts) 305 | } 306 | } 307 | 308 | func (fdb *FlowDatabase) dumpToDisk(ts int64, router string) { 309 | fdb.lock.RLock() 310 | tree := fdb.flows[ts][router].Any[0] 311 | fdb.lock.RUnlock() 312 | 313 | flows := &netflow.Flows{} 314 | 315 | tree.Each(dump, fdb.anonymize, flows) 316 | 317 | if fdb.debug > 1 { 318 | glog.Warningf("flows contains %d flows", len(flows.Flows)) 319 | } 320 | buffer, err := proto.Marshal(flows) 321 | if err != nil { 322 | glog.Errorf("unable to marshal flows into pb: %v", err) 323 | return 324 | } 325 | 326 | ymd := fmt.Sprintf("%04d-%02d-%02d", time.Unix(ts, 0).Year(), time.Unix(ts, 0).Month(), time.Unix(ts, 0).Day()) 327 | os.Mkdir(fmt.Sprintf("%s/%s", fdb.storage, ymd), 0700) 328 | 329 | fh, err := os.Create(fmt.Sprintf("%s/%s/nf-%d-%s.tflow2.pb.gzip", fdb.storage, ymd, ts, router)) 330 | if err != nil { 331 | glog.Errorf("couldn't create file: %v", err) 332 | } 333 | defer fh.Close() 334 | 335 | // Compress data before writing it out to the disk 336 | gz, err := gzip.NewWriterLevel(fh, fdb.compLevel) 337 | if err != nil { 338 | glog.Errorf("invalud gzip compression level: %v", err) 339 | return 340 | } 341 | _, err = gz.Write(buffer) 342 | gz.Close() 343 | 344 | if err != nil { 345 | glog.Errorf("failed to write file: %v", err) 346 | } 347 | } 348 | 349 | func dump(node *avltree.TreeNode, vals ...interface{}) { 350 | anonymize := vals[0].(bool) 351 | flows := vals[1].(*netflow.Flows) 352 | flow := node.Value.(*netflow.Flow) 353 | flowcopy := *flow 354 | 355 | if anonymize { 356 | // Remove information about particular IP addresses for privacy reason 357 | flowcopy.SrcAddr = []byte{0, 0, 0, 0} 358 | flowcopy.DstAddr = []byte{0, 0, 0, 0} 359 | } 360 | 361 | flows.Flows = append(flows.Flows, &flowcopy) 362 | } 363 | 364 | // ptrIsSmaller checks if uintptr c1 is smaller than uintptr c2 365 | func ptrIsSmaller(c1 interface{}, c2 interface{}) bool { 366 | x := uintptr(unsafe.Pointer(c1.(*netflow.Flow))) 367 | y := uintptr(unsafe.Pointer(c2.(*netflow.Flow))) 368 | 369 | return x < y 370 | } 371 | 372 | // uint64IsSmaller checks if uint64 c1 is smaller than uint64 c2 373 | func uint64IsSmaller(c1 interface{}, c2 interface{}) bool { 374 | return c1.(uint64) < c2.(uint64) 375 | } 376 | 377 | // uint64IsSmaller checks if int64 c1 is small than int64 c2 378 | func int64IsSmaller(c1 interface{}, c2 interface{}) bool { 379 | return c1.(int64) < c2.(int64) 380 | } 381 | 382 | // dumpFlows dumps all flows a tree `tree` 383 | func dumpFlows(tree *avltree.TreeNode) { 384 | tree.Each(printNode) 385 | } 386 | 387 | // printNode dumps the flow of `node` on the screen 388 | func printNode(node *avltree.TreeNode, vals ...interface{}) { 389 | fl := node.Value.(*netflow.Flow) 390 | nfserver.Dump(fl) 391 | } 392 | -------------------------------------------------------------------------------- /avltree/avtltree.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package avltree provides an universal AVL tree 13 | package avltree 14 | 15 | import ( 16 | "fmt" 17 | "sync" 18 | 19 | "github.com/golang/glog" 20 | ) 21 | 22 | // Comparable is an interface used to pass compare functions to this avltree 23 | type Comparable func(c1 interface{}, c2 interface{}) bool 24 | 25 | // EachFunc is an interface used to pass a function to the each() method 26 | type EachFunc func(node *TreeNode, vals ...interface{}) 27 | 28 | // Tree represents a tree 29 | type Tree struct { 30 | root *TreeNode 31 | lock sync.RWMutex 32 | Count int 33 | } 34 | 35 | // TreeNode represents a node in a tree 36 | type TreeNode struct { 37 | left *TreeNode 38 | right *TreeNode 39 | key interface{} 40 | Value interface{} 41 | height int64 42 | issmaller Comparable 43 | } 44 | 45 | // max gives returns the maximum of a and b 46 | func max(a, b int64) int64 { 47 | if a > b { 48 | return a 49 | } 50 | return b 51 | } 52 | 53 | // getHeight return the height of tree with root `root` 54 | func (root *TreeNode) getHeight() int64 { 55 | if root != nil { 56 | return root.height 57 | } 58 | return -1 59 | } 60 | 61 | // TreeMinValueNode returns the node with the minimal key in the tree 62 | func (root *TreeNode) minValueNode() *TreeNode { 63 | for root.left != nil { 64 | return root.left.minValueNode() 65 | } 66 | return nil 67 | } 68 | 69 | // search searches element with key `key` in tree with root `root` 70 | // in case the searched element doesn't exist nil is returned 71 | func (root *TreeNode) search(key interface{}) *TreeNode { 72 | if root.key == key { 73 | return root 74 | } 75 | 76 | if root.issmaller(key, root.key) { 77 | if root.left == nil { 78 | return nil 79 | } 80 | return root.left.search(key) 81 | } 82 | 83 | if root.right == nil { 84 | return nil 85 | } 86 | return root.right.search(key) 87 | } 88 | 89 | // getBalance return difference of height of left and right 90 | // subtrees of tree with root `root` 91 | func (root *TreeNode) getBalance() int64 { 92 | if root == nil { 93 | return 0 94 | } 95 | return root.left.getHeight() - root.right.getHeight() 96 | } 97 | 98 | // leftRotate rotates tree with root `root` to the left and 99 | // returns it's new root 100 | func (root *TreeNode) leftRotate() *TreeNode { 101 | node := root.right 102 | root.right = node.left 103 | node.left = root 104 | 105 | root.height = max(root.left.getHeight(), root.right.getHeight()) + 1 106 | node.height = max(node.right.getHeight(), node.left.getHeight()) + 1 107 | return node 108 | } 109 | 110 | // leftRightRotate performs a left-right rotation of tree with root 111 | // `root` and returns it's new root 112 | func (root *TreeNode) leftRightRotate() *TreeNode { 113 | root.left = root.left.leftRotate() 114 | root = root.rightRotate() 115 | return root 116 | } 117 | 118 | // rightRotate performs a right rotation of tree with root `root` 119 | // and returns it's new root 120 | func (root *TreeNode) rightRotate() *TreeNode { 121 | node := root.left 122 | root.left = node.right 123 | node.right = root 124 | root.height = max(root.left.getHeight(), root.right.getHeight()) + 1 125 | node.height = max(node.left.getHeight(), node.right.getHeight()) + 1 126 | return node 127 | } 128 | 129 | // rightLeftRotate preforms a right-left rotation of tree with root 130 | // `root` and returns it's new root 131 | func (root *TreeNode) rightLeftRotate() *TreeNode { 132 | root.right = root.right.rightRotate() 133 | root = root.leftRotate() 134 | return root 135 | } 136 | 137 | // delete deletes node with key `key` from tree. If necessary rebalancing 138 | // is done and new root is returned 139 | func (root *TreeNode) delete(key interface{}) *TreeNode { 140 | if root == nil { 141 | return nil 142 | } 143 | 144 | if root.issmaller(key, root.key) { 145 | root.left = root.left.delete(key) 146 | } else if key == root.key { 147 | if root.left == nil && root.right == nil { 148 | return nil 149 | } else if root.left == nil { 150 | return root.left 151 | } else if root.right == nil { 152 | return root.right 153 | } 154 | 155 | tmp := root.minValueNode() 156 | root.key = tmp.key 157 | root.Value = tmp.Value 158 | root.right = root.right.delete(tmp.key) 159 | 160 | root.height = max(root.left.getHeight(), root.right.getHeight()) + 1 161 | balance := root.getBalance() 162 | if balance > 1 && root.left.getBalance() >= 0 { 163 | return root.rightRotate() 164 | } else if balance > 1 && root.left.getBalance() < 0 { 165 | return root.leftRightRotate() 166 | } else if balance < -1 && root.right.getBalance() <= 0 { 167 | return root.leftRotate() 168 | } else if balance < -1 && root.right.getBalance() > 0 { 169 | return root.rightLeftRotate() 170 | } 171 | } else { 172 | root.right = root.right.delete(key) 173 | } 174 | 175 | return root 176 | } 177 | 178 | // isEqual is a generic function that compares a and b of any comprable type 179 | // return true if a and b are equal, otherwise false 180 | func isEqual(a interface{}, b interface{}) bool { 181 | return a == b 182 | } 183 | 184 | // New simply returns a new (empty) tree 185 | func New() *Tree { 186 | return &Tree{} 187 | } 188 | 189 | // Insert inserts an element to tree with root `t` 190 | func (t *Tree) Insert(key interface{}, value interface{}, issmaller Comparable) (new *TreeNode, err error) { 191 | if t == nil { 192 | return nil, fmt.Errorf("unable to insert into nil tree") 193 | } 194 | t.lock.Lock() 195 | defer t.lock.Unlock() 196 | t.root, new = t.root.insert(key, value, issmaller) 197 | t.Count++ 198 | return new, nil 199 | } 200 | 201 | // insert inserts an element into tree with root `root` 202 | func (root *TreeNode) insert(key interface{}, value interface{}, issmaller Comparable) (*TreeNode, *TreeNode) { 203 | if root == nil { 204 | root = &TreeNode{ 205 | left: nil, 206 | right: nil, 207 | key: key, 208 | Value: value, 209 | height: 0, 210 | issmaller: issmaller, 211 | } 212 | return root, root 213 | } 214 | 215 | if isEqual(key, root.key) { 216 | return root, root 217 | } 218 | 219 | var new *TreeNode 220 | if root.issmaller(key, root.key) { 221 | root.left, new = root.left.insert(key, value, issmaller) 222 | if root.left.getHeight()-root.right.getHeight() == 2 { 223 | if root.issmaller(key, root.left.key) { 224 | root = root.rightRotate() 225 | } else { 226 | root = root.leftRightRotate() 227 | } 228 | } 229 | } else { 230 | root.right, new = root.right.insert(key, value, issmaller) 231 | if root.right.getHeight()-root.left.getHeight() == 2 { 232 | if (!root.issmaller(key, root.right.key)) && !isEqual(key, root.right.key) { 233 | root = root.leftRotate() 234 | } else { 235 | root = root.rightLeftRotate() 236 | } 237 | } 238 | } 239 | 240 | root.height = max(root.left.getHeight(), root.right.getHeight()) + 1 241 | return root, new 242 | } 243 | 244 | // Exists checks if a node with key `key` exists in tree `t` 245 | func (t *Tree) Exists(key interface{}) bool { 246 | if t == nil { 247 | return false 248 | } 249 | return t.root.exists(key) 250 | } 251 | 252 | // exists recursively searches through tree with root `root` for element with 253 | // key `key` 254 | func (root *TreeNode) exists(key interface{}) bool { 255 | if root == nil { 256 | return false 257 | } 258 | 259 | if isEqual(key, root.key) { 260 | return true 261 | } 262 | 263 | if root.issmaller(key, root.key) { 264 | if root.left == nil { 265 | return false 266 | } 267 | return root.left.exists(key) 268 | } 269 | if root.right == nil { 270 | return false 271 | } 272 | return root.right.exists(key) 273 | } 274 | 275 | // Intersection finds common elements in trees `t` and `x` and returns them in a new tree 276 | func (t *Tree) Intersection(x *Tree) (res *Tree) { 277 | if t == nil || x == nil { 278 | return nil 279 | } 280 | res = New() 281 | t.lock.RLock() 282 | x.lock.RLock() 283 | defer t.lock.RUnlock() 284 | defer x.lock.RUnlock() 285 | 286 | n := 0 287 | newRoot := t.root.intersection(x.root, res.root, &n) 288 | 289 | return &Tree{ 290 | root: newRoot, 291 | Count: n, 292 | } 293 | } 294 | 295 | // Intersection builds a tree of common elements of all trees in `candidates` 296 | func Intersection(candidates []*Tree) (res *Tree) { 297 | n := len(candidates) 298 | if n == 0 { 299 | return nil 300 | } 301 | 302 | if n == 1 { 303 | return candidates[0] 304 | } 305 | 306 | chA := make([]chan *Tree, n/2) 307 | chB := make([]chan *Tree, n/2) 308 | chRet := make([]chan *Tree, n/2) 309 | 310 | // Start a go routine that builds intersection of each pair of candidates 311 | for i := 0; i < n/2; i++ { 312 | chA[i] = make(chan *Tree) 313 | chB[i] = make(chan *Tree) 314 | chRet[i] = make(chan *Tree) 315 | go func(chA chan *Tree, chB chan *Tree, chRes chan *Tree) { 316 | a := <-chA 317 | b := <-chB 318 | if a == nil || b == nil { 319 | chRes <- nil 320 | return 321 | } 322 | 323 | glog.Infof("finding common elements in %d and %d elements", a.Count, b.Count) 324 | chRes <- a.Intersection(b) 325 | }(chA[i], chB[i], chRet[i]) 326 | chA[i] <- candidates[i*2] 327 | chB[i] <- candidates[i*2+1] 328 | } 329 | 330 | results := make([]*Tree, 0) 331 | 332 | // If amount of candidate trees is uneven we have to add last tree to results 333 | if n%2 == 1 { 334 | results = append(results, candidates[n-1]) 335 | } 336 | 337 | // Fetch results 338 | for i := 0; i < n/2; i++ { 339 | results = append(results, <-chRet[i]) 340 | } 341 | 342 | // If we only have one tree left over, we're done 343 | if len(results) != 1 { 344 | return Intersection(results) 345 | } 346 | 347 | return results[0] 348 | } 349 | 350 | // intersection recursively finds common elements in tree with roots `root` and `b` 351 | // and returns the result in a new tree 352 | func (root *TreeNode) intersection(b *TreeNode, res *TreeNode, n *int) *TreeNode { 353 | if root == nil || b == nil { 354 | return res 355 | } 356 | 357 | if root.left != nil { 358 | res = root.left.intersection(b, res, n) 359 | } 360 | if root.right != nil { 361 | res = root.right.intersection(b, res, n) 362 | } 363 | if b.exists(root.key) { 364 | res, _ = res.insert(root.key, root.key, root.issmaller) 365 | *n++ 366 | } 367 | 368 | return res 369 | } 370 | 371 | // Each can be used to traverse tree `t` and call function f with params vals... 372 | // for each node in the tree 373 | func (t *Tree) Each(f EachFunc, vals ...interface{}) { 374 | if t == nil { 375 | return 376 | } 377 | 378 | t.lock.RLock() 379 | defer t.lock.RUnlock() 380 | t.root.Each(f, vals...) 381 | } 382 | 383 | // Each recursively traverses tree `tree` and calls functions f with params vals... 384 | // for each node in the tree 385 | func (root *TreeNode) Each(f EachFunc, vals ...interface{}) { 386 | if root == nil { 387 | return 388 | } 389 | f(root, vals...) 390 | if root.left != nil { 391 | root.left.Each(f, vals...) 392 | } 393 | if root.right != nil { 394 | root.right.Each(f, vals...) 395 | } 396 | } 397 | 398 | // Dump dumps tree `t` into a slice and returns it 399 | func (t *Tree) Dump() (res []interface{}) { 400 | if t == nil { 401 | return 402 | } 403 | 404 | t.lock.RLock() 405 | defer t.lock.RUnlock() 406 | return t.root.dump() 407 | } 408 | 409 | // dump recursively dumps all nodes of tree with root `t` into a slice and returns it 410 | func (root *TreeNode) dump() (res []interface{}) { 411 | if root == nil { 412 | return res 413 | } 414 | if root.left != nil { 415 | tmp := root.left.dump() 416 | res = append(res, tmp...) 417 | } 418 | res = append(res, root.Value) 419 | if root.right != nil { 420 | tmp := root.right.dump() 421 | res = append(res, tmp...) 422 | } 423 | return res 424 | } 425 | 426 | // TopN finds the the `n` biggest elements in tree `t` and returns them in a slice 427 | func (t *Tree) TopN(n int) (res []interface{}) { 428 | if t == nil { 429 | return 430 | } 431 | 432 | t.lock.RLock() 433 | defer t.lock.RUnlock() 434 | return t.root.topN(n) 435 | } 436 | 437 | // topN recursively traverses tree with root `t` to find biggest `n` elements. 438 | // Top elements are returned as a slice 439 | func (root *TreeNode) topN(n int) (res []interface{}) { 440 | if root == nil { 441 | return res 442 | } 443 | if root.right != nil { 444 | tmp := root.right.topN(n) 445 | for _, k := range tmp { 446 | if len(res) == n { 447 | return res 448 | } 449 | res = append(res, k) 450 | } 451 | } 452 | 453 | if len(res) < n { 454 | res = append(res, root.Value) 455 | } 456 | 457 | if len(res) == n { 458 | return res 459 | } 460 | 461 | if root.left != nil { 462 | tmp := root.left.topN(n - len(res)) 463 | for _, k := range tmp { 464 | if len(res) == n { 465 | return res 466 | } 467 | res = append(res, k) 468 | } 469 | } 470 | return res 471 | } 472 | -------------------------------------------------------------------------------- /database/database_query.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package database 13 | 14 | import ( 15 | "compress/gzip" 16 | "encoding/json" 17 | "fmt" 18 | "io/ioutil" 19 | "net" 20 | "os" 21 | "strconv" 22 | "strings" 23 | "sync" 24 | "time" 25 | 26 | "github.com/golang/glog" 27 | "github.com/golang/protobuf/proto" 28 | "github.com/google/tflow2/avltree" 29 | "github.com/google/tflow2/convert" 30 | "github.com/google/tflow2/netflow" 31 | "github.com/google/tflow2/stats" 32 | ) 33 | 34 | // BreakDownMap defines by what fields data should be broken down in a query 35 | type BreakDownMap struct { 36 | Router bool 37 | Family bool 38 | SrcAddr bool 39 | DstAddr bool 40 | Protocol bool 41 | IntIn bool 42 | IntOut bool 43 | NextHop bool 44 | SrcAsn bool 45 | DstAsn bool 46 | NextHopAsn bool 47 | SrcPfx bool 48 | DstPfx bool 49 | SrcPort bool 50 | DstPort bool 51 | } 52 | 53 | // Condition represents a query condition 54 | type Condition struct { 55 | Field int 56 | Operator int 57 | Operand []byte 58 | } 59 | 60 | // ConditionExt is external representation of a query condition 61 | type ConditionExt struct { 62 | Field int 63 | Operator int 64 | Operand string 65 | } 66 | 67 | // Conditions represents a set of conditions of a query 68 | type Conditions []Condition 69 | 70 | // ConditionsExt is external representation of conditions of a query 71 | type ConditionsExt []ConditionExt 72 | 73 | // QueryExt represents a query in the way it is received from the frontend 74 | type QueryExt struct { 75 | Cond ConditionsExt 76 | Breakdown BreakDownMap 77 | TopN int 78 | } 79 | 80 | // Query is the internal representation of a query 81 | type Query struct { 82 | Cond Conditions 83 | Breakdown BreakDownMap 84 | TopN int 85 | } 86 | 87 | type concurrentResSum struct { 88 | Values map[string]uint64 89 | Lock sync.Mutex 90 | } 91 | 92 | // These constants are used in communication with the frontend 93 | const ( 94 | OpEqual = 0 95 | OpUnequal = 1 96 | OpSmaller = 2 97 | OpGreater = 3 98 | FieldTimestamp = 0 99 | FieldRouter = 1 100 | FieldSrcAddr = 2 101 | FieldDstAddr = 3 102 | FieldProtocol = 4 103 | FieldIntIn = 5 104 | FieldIntOut = 6 105 | FieldNextHop = 7 106 | FieldSrcAs = 8 107 | FieldDstAs = 9 108 | FieldNextHopAs = 10 109 | FieldSrcPfx = 11 110 | FieldDstPfx = 12 111 | FieldSrcPort = 13 112 | FieldDstPort = 14 113 | ) 114 | 115 | // translateQuery translates a query from external representation to internal representaion 116 | func translateQuery(e QueryExt) (Query, error) { 117 | var q Query 118 | q.Breakdown = e.Breakdown 119 | q.TopN = e.TopN 120 | 121 | for _, c := range e.Cond { 122 | var operand []byte 123 | 124 | switch c.Field { 125 | case FieldTimestamp: 126 | op, err := strconv.Atoi(c.Operand) 127 | if err != nil { 128 | return q, err 129 | } 130 | operand = convert.Int64Byte(int64(op)) 131 | 132 | case FieldProtocol: 133 | op, err := strconv.Atoi(c.Operand) 134 | if err != nil { 135 | return q, err 136 | } 137 | operand = convert.Uint16Byte(uint16(op)) 138 | 139 | case FieldSrcPort: 140 | op, err := strconv.Atoi(c.Operand) 141 | if err != nil { 142 | return q, err 143 | } 144 | operand = convert.Uint16Byte(uint16(op)) 145 | 146 | case FieldDstPort: 147 | op, err := strconv.Atoi(c.Operand) 148 | if err != nil { 149 | return q, err 150 | } 151 | operand = convert.Uint16Byte(uint16(op)) 152 | 153 | case FieldSrcAddr: 154 | operand = convert.IPByteSlice(c.Operand) 155 | 156 | case FieldDstAddr: 157 | operand = convert.IPByteSlice(c.Operand) 158 | 159 | case FieldRouter: 160 | operand = convert.IPByteSlice(c.Operand) 161 | 162 | case FieldIntIn: 163 | op, err := strconv.Atoi(c.Operand) 164 | if err != nil { 165 | return q, err 166 | } 167 | operand = convert.Uint16Byte(uint16(op)) 168 | 169 | case FieldIntOut: 170 | op, err := strconv.Atoi(c.Operand) 171 | if err != nil { 172 | return q, err 173 | } 174 | operand = convert.Uint16Byte(uint16(op)) 175 | 176 | case FieldNextHop: 177 | operand = convert.IPByteSlice(c.Operand) 178 | 179 | case FieldSrcAs: 180 | op, err := strconv.Atoi(c.Operand) 181 | if err != nil { 182 | return q, err 183 | } 184 | operand = convert.Uint32Byte(uint32(op)) 185 | 186 | case FieldDstAs: 187 | op, err := strconv.Atoi(c.Operand) 188 | if err != nil { 189 | return q, err 190 | } 191 | operand = convert.Uint32Byte(uint32(op)) 192 | 193 | case FieldNextHopAs: 194 | op, err := strconv.Atoi(c.Operand) 195 | if err != nil { 196 | return q, err 197 | } 198 | operand = convert.Uint32Byte(uint32(op)) 199 | 200 | case FieldSrcPfx: 201 | _, pfx, err := net.ParseCIDR(string(c.Operand)) 202 | if err != nil { 203 | return q, err 204 | } 205 | operand = []byte(pfx.String()) 206 | 207 | case FieldDstPfx: 208 | _, pfx, err := net.ParseCIDR(string(c.Operand)) 209 | if err != nil { 210 | return q, err 211 | } 212 | operand = []byte(pfx.String()) 213 | } 214 | 215 | q.Cond = append(q.Cond, Condition{ 216 | Field: c.Field, 217 | Operator: c.Operator, 218 | Operand: operand, 219 | }) 220 | } 221 | 222 | return q, nil 223 | } 224 | 225 | // loadFromDisc loads netflow data from disk into in memory data structure 226 | func (fdb *FlowDatabase) loadFromDisc(ts int64, router string, query Query, ch chan map[string]uint64, resSum *concurrentResSum) { 227 | res := avltree.New() 228 | ymd := fmt.Sprintf("%04d-%02d-%02d", time.Unix(ts, 0).Year(), time.Unix(ts, 0).Month(), time.Unix(ts, 0).Day()) 229 | filename := fmt.Sprintf("%s/%s/nf-%d-%s.tflow2.pb.gzip", fdb.storage, ymd, ts, router) 230 | fh, err := os.Open(filename) 231 | if err != nil { 232 | if fdb.debug > 0 { 233 | glog.Errorf("unable to open file: %v", err) 234 | } 235 | ch <- nil 236 | return 237 | } 238 | if fdb.debug > 1 { 239 | glog.Infof("sucessfully opened file: %s", filename) 240 | } 241 | defer fh.Close() 242 | 243 | gz, err := gzip.NewReader(fh) 244 | if err != nil { 245 | glog.Errorf("unable to create gzip reader: %v", err) 246 | ch <- nil 247 | return 248 | } 249 | defer gz.Close() 250 | 251 | buffer, err := ioutil.ReadAll(gz) 252 | if err != nil { 253 | glog.Errorf("unable to gunzip: %v", err) 254 | ch <- nil 255 | return 256 | } 257 | 258 | // Unmarshal protobuf 259 | flows := netflow.Flows{} 260 | err = proto.Unmarshal(buffer, &flows) 261 | if err != nil { 262 | glog.Errorf("unable to unmarshal protobuf: %v", err) 263 | ch <- nil 264 | return 265 | } 266 | 267 | if fdb.debug > 1 { 268 | glog.Infof("file %s contains %d flows", filename, len(flows.Flows)) 269 | } 270 | 271 | // Validate flows and add them to res tree 272 | for _, fl := range flows.Flows { 273 | if validateFlow(fl, query) { 274 | res.Insert(fl, fl, ptrIsSmaller) 275 | } 276 | } 277 | 278 | // Breakdown 279 | resTime := make(map[string]uint64) 280 | res.Each(breakdown, query.Breakdown, resSum, resTime) 281 | 282 | ch <- resTime 283 | } 284 | 285 | func validateFlow(fl *netflow.Flow, query Query) bool { 286 | for _, c := range query.Cond { 287 | switch c.Field { 288 | case FieldTimestamp: 289 | continue 290 | case FieldRouter: 291 | continue 292 | case FieldProtocol: 293 | if fl.Protocol != uint32(convert.Uint16b(c.Operand)) { 294 | return false 295 | } 296 | continue 297 | case FieldSrcAddr: 298 | if net.IP(fl.SrcAddr).String() != net.IP(c.Operand).String() { 299 | return false 300 | } 301 | continue 302 | case FieldDstAddr: 303 | if net.IP(fl.DstAddr).String() != net.IP(c.Operand).String() { 304 | return false 305 | } 306 | continue 307 | case FieldIntIn: 308 | if fl.IntIn != uint32(convert.Uint16b(c.Operand)) { 309 | return false 310 | } 311 | continue 312 | case FieldIntOut: 313 | if fl.IntOut != uint32(convert.Uint16b(c.Operand)) { 314 | return false 315 | } 316 | continue 317 | case FieldNextHop: 318 | if net.IP(fl.NextHop).String() != net.IP(c.Operand).String() { 319 | return false 320 | } 321 | continue 322 | case FieldSrcAs: 323 | if fl.SrcAs != convert.Uint32b(c.Operand) { 324 | return false 325 | } 326 | continue 327 | case FieldDstAs: 328 | if fl.DstAs != convert.Uint32b(c.Operand) { 329 | return false 330 | } 331 | continue 332 | case FieldNextHopAs: 333 | if fl.NextHopAs != convert.Uint32b(c.Operand) { 334 | return false 335 | } 336 | case FieldSrcPort: 337 | if fl.SrcPort != uint32(convert.Uint16b(c.Operand)) { 338 | return false 339 | } 340 | continue 341 | case FieldDstPort: 342 | if fl.DstPort != uint32(convert.Uint16b(c.Operand)) { 343 | return false 344 | } 345 | continue 346 | case FieldSrcPfx: 347 | if fl.SrcPfx.String() != string(c.Operand) { 348 | return false 349 | } 350 | continue 351 | case FieldDstPfx: 352 | if fl.DstPfx.String() != string(c.Operand) { 353 | return false 354 | } 355 | continue 356 | } 357 | } 358 | return true 359 | } 360 | 361 | // RunQuery executes a query and returns sends the result as JSON on `w` 362 | func (fdb *FlowDatabase) RunQuery(query string) ([][]string, error) { 363 | queryStart := time.Now() 364 | stats.GlobalStats.Queries++ 365 | var qe QueryExt 366 | err := json.Unmarshal([]byte(query), &qe) 367 | if err != nil { 368 | glog.Warningf("Unable unmarshal json query: %s", query) 369 | return nil, err 370 | } 371 | q, err := translateQuery(qe) 372 | if err != nil { 373 | glog.Warningf("Unable to translate query") 374 | return nil, err 375 | } 376 | 377 | // Determine router 378 | rtr := "" 379 | for _, c := range q.Cond { 380 | if c.Field == FieldRouter { 381 | iprtr := net.IP(c.Operand) 382 | rtr = iprtr.String() 383 | } 384 | } 385 | if rtr == "" { 386 | glog.Warningf("Router is mandatory cirteria") 387 | return nil, err 388 | } 389 | 390 | var start int64 391 | end := time.Now().Unix() 392 | 393 | // Determine time window 394 | for _, c := range q.Cond { 395 | if c.Field != FieldTimestamp { 396 | continue 397 | } 398 | switch c.Operator { 399 | case OpGreater: 400 | start = int64(convert.Uint64b(c.Operand)) 401 | case OpSmaller: 402 | end = int64(convert.Uint64b(c.Operand)) 403 | } 404 | } 405 | 406 | // Allign start point to `aggregation` raster 407 | start = start - (start % fdb.aggregation) 408 | 409 | resSum := &concurrentResSum{} 410 | resSum.Values = make(map[string]uint64) 411 | resTime := make(map[int64]map[string]uint64) 412 | resChannels := make(map[int64]chan map[string]uint64) 413 | 414 | for ts := start; ts < end; ts += fdb.aggregation { 415 | resChannels[ts] = make(chan map[string]uint64) 416 | fdb.lock.RLock() 417 | if _, ok := fdb.flows[ts]; !ok { 418 | fdb.lock.RUnlock() 419 | go fdb.loadFromDisc(ts, rtr, q, resChannels[ts], resSum) 420 | fdb.lock.RLock() 421 | if _, ok := fdb.flows[ts]; !ok { 422 | fdb.lock.RUnlock() 423 | continue 424 | } 425 | } 426 | 427 | // candidates keeps a list of all trees that fulfill the queries criteria 428 | candidates := make([]*avltree.Tree, 0) 429 | for _, c := range q.Cond { 430 | if fdb.debug > 1 { 431 | glog.Infof("Adding tree to cancidates list: Field: %d, Value: %d", c.Field, c.Operand) 432 | } 433 | switch c.Field { 434 | case FieldTimestamp: 435 | continue 436 | case FieldRouter: 437 | continue 438 | case FieldProtocol: 439 | candidates = append(candidates, fdb.flows[ts][rtr].Protocol[uint32(convert.Uint16b(c.Operand))]) 440 | case FieldSrcAddr: 441 | candidates = append(candidates, fdb.flows[ts][rtr].SrcAddr[net.IP(c.Operand).String()]) 442 | case FieldDstAddr: 443 | candidates = append(candidates, fdb.flows[ts][rtr].DstAddr[net.IP(c.Operand).String()]) 444 | case FieldIntIn: 445 | candidates = append(candidates, fdb.flows[ts][rtr].IntIn[uint32(convert.Uint16b(c.Operand))]) 446 | case FieldIntOut: 447 | candidates = append(candidates, fdb.flows[ts][rtr].IntOut[uint32(convert.Uint16b(c.Operand))]) 448 | case FieldNextHop: 449 | candidates = append(candidates, fdb.flows[ts][rtr].NextHop[net.IP(c.Operand).String()]) 450 | case FieldSrcAs: 451 | candidates = append(candidates, fdb.flows[ts][rtr].SrcAs[convert.Uint32b(c.Operand)]) 452 | case FieldDstAs: 453 | candidates = append(candidates, fdb.flows[ts][rtr].DstAs[convert.Uint32b(c.Operand)]) 454 | case FieldNextHopAs: 455 | candidates = append(candidates, fdb.flows[ts][rtr].NextHopAs[convert.Uint32b(c.Operand)]) 456 | case FieldSrcPort: 457 | candidates = append(candidates, fdb.flows[ts][rtr].SrcPort[uint32(convert.Uint16b(c.Operand))]) 458 | case FieldDstPort: 459 | candidates = append(candidates, fdb.flows[ts][rtr].DstPort[uint32(convert.Uint16b(c.Operand))]) 460 | case FieldSrcPfx: 461 | candidates = append(candidates, fdb.flows[ts][rtr].SrcPfx[string(c.Operand)]) 462 | case FieldDstPfx: 463 | candidates = append(candidates, fdb.flows[ts][rtr].DstPfx[string(c.Operand)]) 464 | } 465 | } 466 | 467 | if len(candidates) == 0 { 468 | candidates = append(candidates, fdb.flows[ts][rtr].Any[0]) 469 | } 470 | fdb.lock.RUnlock() 471 | 472 | go func(candidates []*avltree.Tree, ch chan map[string]uint64, ts int64) { 473 | if fdb.debug > 1 { 474 | glog.Infof("candidate trees: %d (%d)", len(candidates), ts) 475 | } 476 | 477 | // Find common elements of candidate trees 478 | res := avltree.Intersection(candidates) 479 | if res == nil { 480 | glog.Warningf("Interseciton Result was empty!") 481 | res = fdb.flows[ts][rtr].Any[0] 482 | } 483 | 484 | // Breakdown 485 | resTime := make(map[string]uint64) 486 | res.Each(breakdown, q.Breakdown, resSum, resTime) 487 | ch <- resTime 488 | }(candidates, resChannels[ts], ts) 489 | } 490 | 491 | // Reading results from go routines 492 | glog.Infof("Awaiting results from go routines") 493 | for ts := start; ts < end; ts += fdb.aggregation { 494 | glog.Infof("Waiting for results for ts %d", ts) 495 | resTime[ts] = <-resChannels[ts] 496 | } 497 | glog.Infof("Done reading results") 498 | 499 | // Build list of all keys 500 | keys := make([]string, 0) 501 | 502 | // Build Tree Bytes -> Key to allow efficient finding of top n flows 503 | var btree = avltree.New() 504 | for k, b := range resSum.Values { 505 | keys = append(keys, k) 506 | btree.Insert(b, k, uint64IsSmaller) 507 | } 508 | 509 | // Find top n keys 510 | topKeysList := btree.TopN(q.TopN) 511 | topKeys := make(map[string]int) 512 | for _, v := range topKeysList { 513 | topKeys[v.(string)] = 1 514 | } 515 | 516 | // Find all timestamps we have and get them sorted 517 | tsTree := avltree.New() 518 | for ts := range resTime { 519 | tsTree.Insert(ts, ts, int64IsSmaller) 520 | } 521 | timestamps := tsTree.Dump() 522 | 523 | queryResult := make([][]string, 0) 524 | 525 | // Construct table header 526 | headLine := make([]string, 0) 527 | headLine = append(headLine, "Time") 528 | for _, k := range topKeysList { 529 | headLine = append(headLine, k.(string)) 530 | } 531 | headLine = append(headLine, "Rest") 532 | queryResult = append(queryResult, headLine) 533 | 534 | for _, ts := range timestamps { 535 | line := make([]string, 0) 536 | t := time.Unix(ts.(int64), 0) 537 | line = append(line, fmt.Sprintf("%02d:%02d:%02d", t.Hour(), t.Minute(), t.Second())) 538 | 539 | // Top flows 540 | buckets := resTime[ts.(int64)] 541 | for _, k := range topKeysList { 542 | if _, ok := buckets[k.(string)]; !ok { 543 | line = append(line, "0") 544 | } else { 545 | line = append(line, fmt.Sprintf("%d", buckets[k.(string)]/uint64(fdb.aggregation)*8*uint64(fdb.samplerate))) 546 | } 547 | } 548 | 549 | // Rest 550 | var rest uint64 551 | for k, v := range buckets { 552 | if _, ok := topKeys[k]; ok { 553 | continue 554 | } 555 | rest += v 556 | } 557 | line = append(line, fmt.Sprintf("%d", rest)) 558 | queryResult = append(queryResult, line) 559 | } 560 | 561 | glog.Infof("Query %s took %d ns\n", query, time.Since(queryStart)) 562 | return queryResult, nil 563 | } 564 | 565 | // breakdown build all possible relevant keys of flows for flows in tree `node` 566 | // and builds sums for each key in order to allow us to find top combinations 567 | func breakdown(node *avltree.TreeNode, vals ...interface{}) { 568 | if len(vals) != 3 { 569 | glog.Errorf("lacking arguments") 570 | return 571 | } 572 | 573 | bd := vals[0].(BreakDownMap) 574 | sums := vals[1].(*concurrentResSum) 575 | buckets := vals[2].(map[string]uint64) 576 | fl := node.Value.(*netflow.Flow) 577 | 578 | // Build format string to build key 579 | srcAddr := "_" 580 | dstAddr := "_" 581 | protocol := "_" 582 | intIn := "_" 583 | intOut := "_" 584 | nextHop := "_" 585 | srcAs := "_" 586 | dstAs := "_" 587 | nextHopAs := "_" 588 | srcPfx := "_" 589 | dstPfx := "_" 590 | srcPort := "_" 591 | dstPort := "_" 592 | 593 | if bd.SrcAddr { 594 | srcAddr = fmt.Sprintf("Src:%s", net.IP(fl.SrcAddr).String()) 595 | } 596 | if bd.DstAddr { 597 | dstAddr = fmt.Sprintf("Dst:%s", net.IP(fl.DstAddr).String()) 598 | } 599 | if bd.Protocol { 600 | protocol = fmt.Sprintf("Proto:%d", fl.Protocol) 601 | } 602 | if bd.IntIn { 603 | intIn = fmt.Sprintf("IntIn:%d", fl.IntIn) 604 | } 605 | if bd.IntOut { 606 | intOut = fmt.Sprintf("IntOut:%d", fl.IntOut) 607 | } 608 | if bd.NextHop { 609 | nextHop = fmt.Sprintf("NH:%s", net.IP(fl.NextHop).String()) 610 | } 611 | if bd.SrcAsn { 612 | srcAs = fmt.Sprintf("SrcAS:%d", fl.SrcAs) 613 | } 614 | if bd.DstAsn { 615 | dstAs = fmt.Sprintf("DstAS:%d", fl.DstAs) 616 | } 617 | if bd.NextHopAsn { 618 | nextHopAs = fmt.Sprintf("NH_AS:%d", fl.NextHopAs) 619 | } 620 | if bd.SrcPfx { 621 | if fl.SrcPfx != nil { 622 | pfx := net.IPNet{ 623 | IP: fl.SrcPfx.IP, 624 | Mask: fl.SrcPfx.Mask, 625 | } 626 | srcPfx = fmt.Sprintf("SrcNet:%s", pfx.String()) 627 | } else { 628 | srcPfx = fmt.Sprintf("SrcNet:0.0.0.0/0") 629 | } 630 | } 631 | if bd.DstPfx { 632 | if fl.DstPfx != nil { 633 | pfx := net.IPNet{ 634 | IP: fl.DstPfx.IP, 635 | Mask: fl.DstPfx.Mask, 636 | } 637 | dstPfx = fmt.Sprintf("DstNet:%s", pfx.String()) 638 | } else { 639 | dstPfx = fmt.Sprintf("DstNet:0.0.0.0/0") 640 | } 641 | } 642 | if bd.SrcPort { 643 | srcPort = fmt.Sprintf("SrcPort:%d", fl.SrcPort) 644 | } 645 | if bd.DstPort { 646 | dstPort = fmt.Sprintf("DstPort:%d", fl.DstPort) 647 | } 648 | 649 | // Build key 650 | key := fmt.Sprintf("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s", srcAddr, dstAddr, protocol, intIn, intOut, nextHop, srcAs, dstAs, nextHopAs, srcPfx, dstPfx, srcPort, dstPort) 651 | 652 | // Remove underscores from key 653 | key = strings.Replace(key, ",_,", ",", -1) 654 | key = strings.Replace(key, "_,", "", -1) 655 | key = strings.Replace(key, ",_", "", -1) 656 | 657 | // Remove leading and trailing commas 658 | parts := strings.Split(key, "") 659 | first := 0 660 | last := len(parts) - 1 661 | if parts[0] == "," { 662 | first++ 663 | } 664 | if parts[last] == "," { 665 | last-- 666 | } 667 | key = strings.Join(parts[first:last+1], "") 668 | 669 | // Build sum for key 670 | if _, ok := buckets[key]; !ok { 671 | buckets[key] = fl.Size 672 | } else { 673 | buckets[key] += fl.Size 674 | } 675 | 676 | // Build overall sum 677 | sums.Lock.Lock() 678 | if _, ok := sums.Values[key]; !ok { 679 | sums.Values[key] = fl.Size 680 | } else { 681 | sums.Values[key] += fl.Size 682 | } 683 | sums.Lock.Unlock() 684 | } 685 | --------------------------------------------------------------------------------