├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── assets └── image.png ├── cmd └── flowhouse │ ├── config.yaml │ ├── config │ └── config.go │ └── main.go ├── go.mod ├── go.sum └── pkg ├── clickhousegw ├── clickhousegw.go └── clickhousegw_test.go ├── flowhouse └── flowhouse.go ├── frontend ├── assets │ ├── flowhouse.js │ └── index.html ├── bindata.go ├── frontend.go ├── frontend_test.go └── result.go ├── intfmapper ├── device.go └── intfmapper.go ├── ipannotator └── ipannotator.go ├── models └── flow │ └── flow.go ├── packet ├── ipfix │ ├── decode.go │ ├── decode_test.go │ ├── field_db.go │ ├── options_templates.go │ ├── packet.go │ ├── template_test.go │ └── templates.go ├── nf9 │ ├── decode.go │ ├── decode_test.go │ ├── field_db.go │ ├── packet.go │ └── templates.go ├── packet │ ├── dot1q.go │ ├── ethernet.go │ ├── ethernet_test.go │ ├── ipv4.go │ ├── ipv6.go │ ├── tcp.go │ └── udp.go └── sflow │ ├── decode.go │ ├── decode_test.go │ └── packet.go ├── routemirror ├── route_mirror.go ├── router.go └── vrf.go └── servers ├── aggregator └── aggregator.go ├── ipfix ├── ipfix_server.go ├── ipfix_template_cache.go └── sample_rate_cache.go └── sflow ├── sfserver.go └── sfserver_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | .idea -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: default test vendor vendor-deps container push gitlab_ci_check apply-vendor-lock prepare-vendor-updates 2 | 3 | all: bindata build 4 | 5 | build: 6 | cd cmd/flowhouse; go build 7 | 8 | bindata: 9 | cd pkg/frontend; go-bindata -pkg frontend assets/ 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # flowhouse 2 | 3 | Flowhouse is a [Clickhouse](https://clickhouse.tech/) based sFlow + IPFIX collector and web based analyzer that offers rich annotation and querying features. 4 | 5 | ![web ui flowhouse](assets/image.png) 6 | 7 | ## Interface Name Discovery 8 | 9 | Discovery of interface names is supported using SNMP v2 and v3. The database always stores interface namens. Not IDs. 10 | 11 | ## Static Meta Data Annotations 12 | 13 | Static meta data annotations are supported by the use of Clickhouse dicts. 14 | 15 | `config.yaml` snippet: 16 | ``` 17 | dicts: 18 | - field: "agent" 19 | dict: "ip_addrs" 20 | expr: "tuple(IPv6NumToString(%s))" 21 | - field: "src_ip_addr" 22 | dict: "ip_addrs" 23 | expr: "tuple(IPv6NumToString(%s))" 24 | - field: "dst_ip_addr" 25 | dict: "ip_addrs" 26 | expr: "tuple(IPv6NumToString(%s))" 27 | ``` 28 | 29 | Enable dict on a clickhouse-server (extract from v24.12.3.47) 30 | ``` 31 | user@host ~ % cat /etc/clickhouse-server/config.d/ip_addrs_dictionary.xml 32 | 33 | 34 | 35 | ip_addrs 36 | 37 | 38 | /etc/clickhouse-server/ips.csv 39 | CSV 40 | 41 | 42 | 300 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | address 51 | String 52 | 53 | 54 | 55 | hostname 56 | String 57 | ? 58 | 59 | 60 | interface 61 | String 62 | ? 63 | 64 | 65 | role 66 | String 67 | ? 68 | 69 | 70 | site 71 | String 72 | ? 73 | 74 | 75 | region 76 | String 77 | ? 78 | 79 | 80 | asn 81 | UInt32 82 | 0 83 | 84 | 85 | 86 | 87 | ``` 88 | 89 | CSV for a dict: 90 | ``` 91 | user@host ~ % cat /etc/clickhouse-server/ips.csv 92 | 192.0.2.1,core01.pop01,et-0/0/0.0,backbone-router,FRA01,eu-central,4200001234 93 | 192.0.2.3,core02.pop02,et-0/0/1.0,backbone-router,FRA02,eu-central,4200001234 94 | 10.0.0.1,srv01,ens3,server,DUB01,eu-west,4200002947 95 | ``` 96 | 97 | 98 | ## Dynamic Routing Meta Data Annotations 99 | 100 | Dynamic routing meta data annotations like source and destination prefix, source, destination and nexthop ASN are supported 101 | on the basis of the [BIO routing RIS](https://github.com/bio-routing/bio-rd/tree/master/cmd/ris). 102 | 103 | ## Installation 104 | ```go get github.com/bio-routing/flowhouse/cmd/flowhouse``` 105 | 106 | ```go install github.com/bio-routing/flowhouse/cmd/flowhouse``` 107 | 108 | ## Configuration 109 | 110 | `config.yaml` example: [cmd/flowhouse/config.yaml](https://github.com/bio-routing/flowhouse/blob/master/cmd/flowhouse/config.yaml) 111 | 112 | 113 | Format is defined here: [https://github.com/bio-routing/flowhouse/blob/master/cmd/flowhouse/config/config.go#L21](https://github.com/bio-routing/flowhouse/blob/master/cmd/flowhouse/config/config.go#L21) 114 | 115 | ## Running 116 | ``` 117 | user@host ~ % flowhouse --help 118 | Usage of flowhouse: 119 | -config.file string 120 | Config file path (YAML) (default "config.yaml") 121 | -debug 122 | Enable debug logging 123 | ``` 124 | -------------------------------------------------------------------------------- /assets/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bio-routing/flowhouse/fdc798c58712dde038678ab91126a53ddc055c36/assets/image.png -------------------------------------------------------------------------------- /cmd/flowhouse/config.yaml: -------------------------------------------------------------------------------- 1 | ris_timeout: 10 2 | listen_sflow: ":6343" 3 | listen_ipfix: ":2055" 4 | listen_http: ":9991" 5 | default_vrf: "0:0" 6 | disable_ip_annotator: true 7 | snmp: 8 | version: 2 9 | community: "PLEASE-CHANGE-ME" 10 | user: "PLEASE-CHANGE-ME" 11 | auth-key: "PLEASE-CHANGE-ME" 12 | privacy-passphrase: "PLEASE-CHANGE-ME" 13 | clickhouse: 14 | address: "localhost:9000" 15 | user: "PLEASE-CHANGE-ME" 16 | password: "PLEASE-CHANGE-ME" 17 | database: "flows" 18 | dicts: 19 | - field: "agent" 20 | dict: "ip_addrs" 21 | expr: "tuple(IPv6NumToString(%s))" 22 | - field: "src_ip_addr" 23 | dict: "ip_addrs" 24 | expr: "tuple(IPv6NumToString(%s))" 25 | - field: "dst_ip_addr" 26 | dict: "ip_addrs" 27 | expr: "tuple(IPv6NumToString(%s))" 28 | routers: 29 | - name: "core01.pop01" 30 | address: 192.0.2.1 31 | ris_instances: 32 | - "ris01.pop01:4321" 33 | vrfs: ["0:0"] 34 | - name: "core02.pop02" 35 | address: 192.0.2.2 36 | -------------------------------------------------------------------------------- /cmd/flowhouse/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "io/ioutil" 5 | 6 | "github.com/bio-routing/bio-rd/routingtable/vrf" 7 | "github.com/bio-routing/flowhouse/pkg/clickhousegw" 8 | "github.com/bio-routing/flowhouse/pkg/frontend" 9 | "github.com/pkg/errors" 10 | "gopkg.in/yaml.v2" 11 | 12 | bnet "github.com/bio-routing/bio-rd/net" 13 | ) 14 | 15 | const ( 16 | listenSFlowDefault = ":6343" 17 | listenHTTPDefault = ":9991" 18 | ) 19 | 20 | // Config represents a config file 21 | type Config struct { 22 | RISTimeout uint64 `yaml:"ris_timeout"` 23 | SNMP *SNMPConfig `yaml:"snmp"` 24 | DefaultVRF string `yaml:"default_vrf"` 25 | defaultVRF uint64 26 | ListenSFlow string `yaml:"listen_sflow"` 27 | ListenIPFIX string `yaml:"listen_ipfix"` 28 | ListenHTTP string `yaml:"listen_http"` 29 | Dicts frontend.Dicts `yaml:"dicts"` 30 | Clickhouse *clickhousegw.ClickhouseConfig `yaml:"clickhouse"` 31 | Routers []*Router `yaml:"routers"` 32 | DisableIPAnnotator bool `yaml:"disable_ip_annotator"` 33 | } 34 | 35 | type SNMPConfig struct { 36 | Version uint `yaml:"version"` 37 | Community string `yaml:"community"` 38 | User string `yaml:"user"` 39 | AuthPassphrase string `yaml:"auth-key"` 40 | PrivacyPassphrase string `yaml:"privacy-passphrase"` 41 | } 42 | 43 | func (c *Config) load() error { 44 | if c.RISTimeout == 0 { 45 | c.RISTimeout = 10 46 | } 47 | 48 | if c.ListenSFlow == "" { 49 | c.ListenSFlow = listenSFlowDefault 50 | } 51 | 52 | if c.ListenHTTP == "" { 53 | c.ListenHTTP = listenHTTPDefault 54 | } 55 | 56 | if c.DefaultVRF != "" { 57 | vrfID, err := vrf.ParseHumanReadableRouteDistinguisher(c.DefaultVRF) 58 | if err != nil { 59 | return errors.Wrap(err, "Unable to perse default VRF") 60 | } 61 | 62 | c.defaultVRF = vrfID 63 | } 64 | 65 | for _, r := range c.Routers { 66 | err := r.load() 67 | if err != nil { 68 | return errors.Wrapf(err, "Unable to load config for router %q", r.Name) 69 | } 70 | } 71 | 72 | return nil 73 | } 74 | 75 | // GetDefaultVRF gets the default VRF id 76 | func (c *Config) GetDefaultVRF() uint64 { 77 | return c.defaultVRF 78 | } 79 | 80 | // Router represents a router 81 | type Router struct { 82 | Name string `yaml:"name"` 83 | Address string `yaml:"address"` 84 | address bnet.IP 85 | RISInstances []string `yaml:"ris_instances"` 86 | VRFs []string `yaml:"vrfs"` 87 | vrfs []uint64 88 | } 89 | 90 | // GetAddress gets a routers address 91 | func (r *Router) GetAddress() bnet.IP { 92 | return r.address 93 | } 94 | 95 | // GetVRFs gets a routers VRFs 96 | func (r *Router) GetVRFs() []uint64 { 97 | return r.vrfs 98 | } 99 | 100 | func (r *Router) load() error { 101 | a, err := bnet.IPFromString(r.Address) 102 | if err != nil { 103 | return errors.Wrap(err, "Unable to parse IP address") 104 | } 105 | 106 | r.address = a 107 | 108 | for _, x := range r.VRFs { 109 | vrfRD, err := vrf.ParseHumanReadableRouteDistinguisher(x) 110 | if err != nil { 111 | return errors.Wrapf(err, "Unable to parse VRF RD %q", x) 112 | } 113 | 114 | r.vrfs = append(r.vrfs, vrfRD) 115 | } 116 | 117 | return nil 118 | } 119 | 120 | // GetConfig gets the configuration 121 | func GetConfig(fp string) (*Config, error) { 122 | fc, err := ioutil.ReadFile(fp) 123 | if err != nil { 124 | return nil, errors.Wrap(err, "Unable to read file") 125 | } 126 | 127 | c := &Config{} 128 | err = yaml.Unmarshal(fc, c) 129 | if err != nil { 130 | return nil, errors.Wrap(err, "Unable to unmarshal") 131 | } 132 | 133 | err = c.Validate() 134 | if err != nil { 135 | return nil, errors.Wrap(err, "Unable to validate config") 136 | } 137 | c.load() 138 | 139 | return c, nil 140 | } 141 | 142 | func (c *Config) Validate() error { 143 | if c.Clickhouse.Sharded && c.Clickhouse.Cluster == "" { 144 | return errors.New("cluster must be set when Clickhouse is replicated") 145 | } 146 | return nil 147 | } 148 | 149 | // GetRISList gets a list of all referenced RIS instances 150 | func (c *Config) GetRISList() []string { 151 | m := make(map[string]struct{}) 152 | 153 | for _, rtr := range c.Routers { 154 | for _, x := range rtr.RISInstances { 155 | m[x] = struct{}{} 156 | } 157 | } 158 | 159 | ret := make([]string, 0) 160 | for k := range m { 161 | ret = append(ret, k) 162 | } 163 | 164 | return ret 165 | } 166 | -------------------------------------------------------------------------------- /cmd/flowhouse/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "sync" 6 | "time" 7 | 8 | "github.com/bio-routing/flowhouse/cmd/flowhouse/config" 9 | "github.com/bio-routing/flowhouse/pkg/flowhouse" 10 | 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | var ( 15 | configFilePath = flag.String("config.file", "config.yaml", "Config file path (YAML)") 16 | debug = flag.Bool("debug", false, "Enable debug logging") 17 | ) 18 | 19 | func main() { 20 | flag.Parse() 21 | 22 | if *debug { 23 | log.SetLevel(log.DebugLevel) 24 | log.Debug("logLevel: DEBUG") 25 | } else { 26 | log.SetLevel(log.InfoLevel) 27 | } 28 | 29 | cfg, err := config.GetConfig(*configFilePath) 30 | if err != nil { 31 | log.WithError(err).Fatal("Unable to get config") 32 | } 33 | 34 | fhcfg := &flowhouse.Config{ 35 | ChCfg: cfg.Clickhouse, 36 | SNMP: cfg.SNMP, 37 | RISTimeout: time.Duration(cfg.RISTimeout) * time.Second, 38 | ListenSflow: cfg.ListenSFlow, 39 | ListenIPFIX: cfg.ListenIPFIX, 40 | ListenHTTP: cfg.ListenHTTP, 41 | DefaultVRF: cfg.GetDefaultVRF(), 42 | Dicts: cfg.Dicts, 43 | DisableIPAnnotator: cfg.DisableIPAnnotator, 44 | } 45 | 46 | fh, err := flowhouse.New(fhcfg) 47 | if err != nil { 48 | log.WithError(err).Fatal("Unable to create flowhouse instance") 49 | } 50 | 51 | for _, rtr := range cfg.Routers { 52 | fh.AddAgent(rtr.Name, rtr.GetAddress(), rtr.RISInstances, rtr.GetVRFs()) 53 | } 54 | 55 | var wg sync.WaitGroup 56 | wg.Add(1) 57 | go func() { 58 | defer wg.Done() 59 | fh.Run() 60 | }() 61 | 62 | wg.Wait() 63 | } 64 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/bio-routing/flowhouse 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.4 6 | 7 | require ( 8 | github.com/ClickHouse/clickhouse-go v1.4.1 9 | github.com/bio-routing/bio-rd v0.0.3-pre5 10 | github.com/bio-routing/tflow2 v0.0.0-20200122091514-89924193643e 11 | github.com/gosnmp/gosnmp v1.38.0 12 | github.com/pkg/errors v0.9.1 13 | github.com/prometheus/client_golang v1.11.1 14 | github.com/sirupsen/logrus v1.6.0 15 | github.com/stretchr/testify v1.9.0 16 | google.golang.org/grpc v1.56.3 17 | gopkg.in/yaml.v2 v2.3.0 18 | ) 19 | 20 | require ( 21 | github.com/beorn7/perks v1.0.1 // indirect 22 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 23 | github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect 24 | github.com/davecgh/go-spew v1.1.1 // indirect 25 | github.com/golang/protobuf v1.5.3 // indirect 26 | github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect 27 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 28 | github.com/pmezard/go-difflib v1.0.0 // indirect 29 | github.com/prometheus/client_model v0.2.0 // indirect 30 | github.com/prometheus/common v0.26.0 // indirect 31 | github.com/prometheus/procfs v0.6.0 // indirect 32 | golang.org/x/net v0.38.0 // indirect 33 | golang.org/x/sys v0.31.0 // indirect 34 | golang.org/x/text v0.23.0 // indirect 35 | google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect 36 | google.golang.org/protobuf v1.30.0 // indirect 37 | gopkg.in/yaml.v3 v3.0.1 // indirect 38 | ) 39 | -------------------------------------------------------------------------------- /pkg/clickhousegw/clickhousegw.go: -------------------------------------------------------------------------------- 1 | package clickhousegw 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "net" 7 | "strings" 8 | "time" 9 | 10 | "github.com/bio-routing/flowhouse/pkg/models/flow" 11 | "github.com/pkg/errors" 12 | 13 | "github.com/ClickHouse/clickhouse-go" 14 | 15 | bnet "github.com/bio-routing/bio-rd/net" 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | const tableName = "flows" 20 | 21 | // ClickHouseGateway is a wrapper for Clickhouse 22 | type ClickHouseGateway struct { 23 | cfg *ClickhouseConfig 24 | db *sql.DB 25 | } 26 | 27 | // ClickhouseConfig represents a clickhouse client config 28 | type ClickhouseConfig struct { 29 | Host string `yaml:"host"` 30 | Address string `yaml:"address"` 31 | User string `yaml:"user"` 32 | Password string `yaml:"password"` 33 | Database string `yaml:"database"` 34 | Sharded bool `yaml:"sharded"` 35 | Cluster string `yaml:"cluster"` 36 | Secure bool `yaml:"secure"` 37 | } 38 | 39 | // New instantiates a new ClickHouseGateway 40 | func New(cfg *ClickhouseConfig) (*ClickHouseGateway, error) { 41 | dsn := fmt.Sprintf("tcp://%s?username=%s&password=%s&database=%s&read_timeout=10&write_timeout=20&secure=%t", 42 | cfg.Address, cfg.User, cfg.Password, cfg.Database, cfg.Secure) 43 | c, err := sql.Open("clickhouse", dsn) 44 | if err != nil { 45 | return nil, errors.Wrap(err, "sql.Open failed") 46 | } 47 | 48 | err = c.Ping() 49 | if err != nil { 50 | if exception, ok := err.(*clickhouse.Exception); ok { 51 | return nil, errors.Wrapf(err, "[%d] %s \n%s", exception.Code, exception.Message, exception.StackTrace) 52 | } 53 | 54 | return nil, errors.Wrap(err, "c.Ping failed") 55 | } 56 | 57 | chgw := &ClickHouseGateway{ 58 | cfg: cfg, 59 | db: c, 60 | } 61 | 62 | err = chgw.createFlowsSchemaIfNotExists() 63 | if err != nil { 64 | log.Errorf("Unable to create flows schema: %v", err) 65 | } 66 | 67 | return chgw, nil 68 | } 69 | 70 | func (c *ClickHouseGateway) createFlowsSchemaIfNotExists() error { 71 | zookeeperPathTimestamp := time.Now().Unix() 72 | _, err := c.db.Exec(c.getCreateTableSchemaDDL(true, zookeeperPathTimestamp)) 73 | 74 | if err != nil { 75 | return errors.Wrap(err, "Query failed") 76 | } 77 | 78 | if c.cfg.Sharded { 79 | _, err = c.db.Exec(c.getCreateTableSchemaDDL(false, zookeeperPathTimestamp)) 80 | } 81 | if err != nil { 82 | return errors.Wrap(err, "Query failed") 83 | } 84 | 85 | return nil 86 | } 87 | 88 | func (c *ClickHouseGateway) getCreateTableSchemaDDL(isBaseTable bool, zookeeperPathPrefix int64) string { 89 | tableDDl := ` 90 | CREATE TABLE IF NOT EXISTS %s%s ( 91 | agent IPv6, 92 | int_in String, 93 | int_out String, 94 | tos UInt8, 95 | dscp UInt8, 96 | src_ip_addr IPv6, 97 | dst_ip_addr IPv6, 98 | src_ip_pfx_addr IPv6, 99 | src_ip_pfx_len UInt8, 100 | dst_ip_pfx_addr IPv6, 101 | dst_ip_pfx_len UInt8, 102 | nexthop IPv6, 103 | next_asn UInt32, 104 | src_asn UInt32, 105 | dst_asn UInt32, 106 | ip_protocol UInt8, 107 | src_port UInt16, 108 | dst_port UInt16, 109 | timestamp DateTime, 110 | size UInt64, 111 | packets UInt64, 112 | samplerate UInt64 113 | ) ENGINE = %s 114 | PARTITION BY toStartOfTenMinutes(timestamp) 115 | ORDER BY (timestamp) 116 | %s 117 | SETTINGS index_granularity = 8192 118 | ` 119 | ttl := "TTL timestamp + INTERVAL 14 DAY" 120 | onClusterStatement := "" 121 | if c.cfg.Sharded { 122 | onClusterStatement = " ON CLUSTER " + c.cfg.Cluster 123 | } 124 | 125 | if isBaseTable { 126 | return fmt.Sprintf(tableDDl, c.getBaseTableName(), onClusterStatement, c.getBaseTableEngineDDL(zookeeperPathPrefix), ttl) 127 | } else { 128 | return fmt.Sprintf(tableDDl, tableName, onClusterStatement, c.getDistributedTableDDl(), "") 129 | } 130 | } 131 | 132 | func (c *ClickHouseGateway) getBaseTableName() string { 133 | if c.cfg.Sharded { 134 | return "_" + c.cfg.Database + "." + tableName + "_base" 135 | } 136 | 137 | return tableName 138 | } 139 | 140 | func (c *ClickHouseGateway) getBaseTableEngineDDL(zookeeperPathPrefix int64) string { 141 | if c.cfg.Sharded { 142 | // TODO: make zookeeper path configurable 143 | return fmt.Sprintf( 144 | "ReplicatedMergeTree('/clickhouse/tables/{shard}/%s/%s_%d', '{replica}')", 145 | c.cfg.Database, 146 | tableName, 147 | zookeeperPathPrefix) 148 | } 149 | 150 | return "MergeTree()" 151 | } 152 | func (c *ClickHouseGateway) getDistributedTableDDl() string { 153 | return fmt.Sprintf( 154 | "Distributed(%s, %s, %s, %s)", 155 | c.cfg.Cluster, 156 | "_"+c.cfg.Database, 157 | tableName+"_base", 158 | "rand()") 159 | } 160 | 161 | // InsertFlows inserts flows into clickhouse 162 | func (c *ClickHouseGateway) InsertFlows(flows []*flow.Flow) error { 163 | tx, err := c.db.Begin() 164 | if err != nil { 165 | return errors.Wrap(err, "Begin failed") 166 | } 167 | 168 | stmt, err := tx.Prepare(`INSERT INTO flows ( 169 | agent, 170 | int_in, 171 | int_out, 172 | tos, 173 | dscp, 174 | src_ip_addr, 175 | dst_ip_addr, 176 | src_ip_pfx_addr, 177 | src_ip_pfx_len, 178 | dst_ip_pfx_addr, 179 | dst_ip_pfx_len, 180 | nexthop, 181 | next_asn, 182 | src_asn, 183 | dst_asn, 184 | ip_protocol, 185 | src_port, 186 | dst_port, 187 | timestamp, 188 | size, 189 | packets, 190 | samplerate 191 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? , ?, ?, ?)`) 192 | defer stmt.Close() 193 | if err != nil { 194 | return errors.Wrap(err, "Prepare failed") 195 | } 196 | 197 | for _, fl := range flows { 198 | _, err := stmt.Exec( 199 | fl.Agent.ToNetIP(), 200 | fl.IntIn, 201 | fl.IntOut, 202 | fl.TOS, 203 | dscp(fl.TOS), 204 | fl.SrcAddr.ToNetIP(), 205 | fl.DstAddr.ToNetIP(), 206 | addrToNetIP(fl.SrcPfx.Addr()), 207 | fl.SrcPfx.Pfxlen(), 208 | addrToNetIP(fl.DstPfx.Addr()), 209 | fl.DstPfx.Pfxlen(), 210 | fl.NextHop.ToNetIP(), 211 | fl.NextAs, 212 | fl.SrcAs, 213 | fl.DstAs, 214 | fl.Protocol, 215 | fl.SrcPort, 216 | fl.DstPort, 217 | fl.Timestamp, 218 | fl.Size, 219 | fl.Packets, 220 | fl.Samplerate, 221 | ) 222 | if err != nil { 223 | return errors.Wrap(err, "Exec failed") 224 | } 225 | } 226 | 227 | err = tx.Commit() 228 | if err != nil { 229 | return errors.Wrap(err, "Commit failed") 230 | } 231 | 232 | return nil 233 | } 234 | 235 | func dscp(tos uint8) uint8 { 236 | // DSCP is the first 6 bits of the TOS field 237 | 238 | return tos >> 2 239 | } 240 | 241 | func addrToNetIP(addr *bnet.IP) net.IP { 242 | if addr == nil { 243 | return net.IP([]byte{0, 0, 0, 0}) 244 | } 245 | 246 | return addr.ToNetIP() 247 | } 248 | 249 | // Close closes the database handler 250 | func (c *ClickHouseGateway) Close() { 251 | c.db.Close() 252 | } 253 | 254 | // GetColumnValues gets all unique values of a column 255 | func (c *ClickHouseGateway) GetColumnValues(columnName string) ([]string, error) { 256 | columnName = strings.Replace(columnName, " ", "", -1) 257 | 258 | query := fmt.Sprintf("SELECT %s FROM flows GROUP BY %s", columnName, columnName) 259 | res, err := c.db.Query(query) 260 | defer res.Close() 261 | 262 | if err != nil { 263 | return nil, errors.Wrap(err, "Exec failed") 264 | } 265 | 266 | result := make([]string, 0) 267 | 268 | for { 269 | v := "" 270 | res.Scan(&v) 271 | 272 | result = append(result, v) 273 | if !res.Next() { 274 | break 275 | } 276 | } 277 | 278 | return result, nil 279 | } 280 | 281 | // GetDictValues gets all values of a certain dicts attribute 282 | func (c *ClickHouseGateway) GetDictValues(dictName string, attr string) ([]string, error) { 283 | dictName = strings.Replace(dictName, " ", "", -1) 284 | attr = strings.Replace(attr, " ", "", -1) 285 | 286 | query := fmt.Sprintf("SELECT %s FROM dictionary(%s) GROUP BY %s", attr, dictName, attr) 287 | res, err := c.db.Query(query) 288 | defer res.Close() 289 | 290 | if err != nil { 291 | return nil, errors.Wrap(err, "Exec failed") 292 | } 293 | 294 | result := make([]string, 0) 295 | 296 | for { 297 | v := "" 298 | res.Scan(&v) 299 | 300 | result = append(result, v) 301 | if !res.Next() { 302 | break 303 | } 304 | } 305 | 306 | return result, nil 307 | } 308 | 309 | // GetDictFields gets the names of all fields in a dictionary 310 | func (c *ClickHouseGateway) GetDictFields(dictName string) ([]string, error) { 311 | dictName = strings.Replace(dictName, " ", "", -1) 312 | 313 | query := fmt.Sprintf("SELECT attribute.names FROM system.dictionaries WHERE name = '%s';", dictName) 314 | res, err := c.db.Query(query) 315 | defer res.Close() 316 | if err != nil { 317 | return nil, errors.Wrap(err, "Exec failed") 318 | } 319 | 320 | result := make([]string, 0) 321 | res.Next() 322 | err = res.Scan(&result) 323 | if err != nil { 324 | return nil, err 325 | } 326 | 327 | return result, nil 328 | } 329 | 330 | // DescribeTable gets the names of all fields of a table 331 | func (c *ClickHouseGateway) DescribeTable(tableName string) ([]string, error) { 332 | tableName = strings.Replace(tableName, " ", "", -1) 333 | 334 | query := fmt.Sprintf("DESCRIBE %s", tableName) 335 | res, err := c.db.Query(query) 336 | defer res.Close() 337 | 338 | if err != nil { 339 | return nil, errors.Wrap(err, "Exec failed") 340 | } 341 | 342 | result := make([]string, 0) 343 | 344 | for { 345 | name := "" 346 | trash := "" 347 | res.Scan(&name, &trash, &trash, &trash, &trash, &trash, &trash) 348 | 349 | result = append(result, name) 350 | if !res.Next() { 351 | break 352 | } 353 | } 354 | 355 | return result, nil 356 | } 357 | 358 | // GetDatabaseName gets the databases name 359 | func (c *ClickHouseGateway) GetDatabaseName() string { 360 | return c.cfg.Database 361 | } 362 | 363 | // Query executs an SQL query 364 | func (c *ClickHouseGateway) Query(q string) (*sql.Rows, error) { 365 | return c.db.Query(q) 366 | } 367 | -------------------------------------------------------------------------------- /pkg/clickhousegw/clickhousegw_test.go: -------------------------------------------------------------------------------- 1 | package clickhousegw 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestClickHouseGateway_getCreateTableSchemaDDL(t *testing.T) { 11 | zookeeperPathPrefix := time.Now().Unix() 12 | type fields struct { 13 | cfg *ClickhouseConfig 14 | db *sql.DB 15 | } 16 | type args struct { 17 | isBaseTable bool 18 | zookeeperPathPrefix int64 19 | } 20 | tests := []struct { 21 | name string 22 | fields fields 23 | args args 24 | want string 25 | }{ 26 | { 27 | name: "Test getCreateTableSchemaDDL for simple MergeTree", 28 | fields: fields{ 29 | cfg: &ClickhouseConfig{ 30 | Database: "test", 31 | Sharded: false, 32 | }, 33 | }, 34 | args: args{ 35 | isBaseTable: true, 36 | zookeeperPathPrefix: zookeeperPathPrefix, 37 | }, 38 | want: ` 39 | CREATE TABLE IF NOT EXISTS flows ( 40 | agent IPv6, 41 | int_in String, 42 | int_out String, 43 | src_ip_addr IPv6, 44 | dst_ip_addr IPv6, 45 | src_ip_pfx_addr IPv6, 46 | src_ip_pfx_len UInt8, 47 | dst_ip_pfx_addr IPv6, 48 | dst_ip_pfx_len UInt8, 49 | nexthop IPv6, 50 | next_asn UInt32, 51 | src_asn UInt32, 52 | dst_asn UInt32, 53 | ip_protocol UInt8, 54 | src_port UInt16, 55 | dst_port UInt16, 56 | timestamp DateTime, 57 | size UInt64, 58 | packets UInt64, 59 | samplerate UInt64 60 | ) ENGINE = MergeTree() 61 | PARTITION BY toStartOfTenMinutes(timestamp) 62 | ORDER BY (timestamp) 63 | TTL timestamp + INTERVAL 14 DAY 64 | SETTINGS index_granularity = 8192 65 | `, 66 | }, 67 | { 68 | name: "Test getCreateTableSchemaDDL for sharded base table with engine ReplicatedMergeTree", 69 | fields: fields{ 70 | cfg: &ClickhouseConfig{ 71 | Database: "test", 72 | Cluster: "test_cluster", 73 | Sharded: true, 74 | }, 75 | }, 76 | args: args{ 77 | isBaseTable: true, 78 | zookeeperPathPrefix: zookeeperPathPrefix, 79 | }, 80 | want: fmt.Sprintf(` 81 | CREATE TABLE IF NOT EXISTS _test.flows_base ON CLUSTER test_cluster ( 82 | agent IPv6, 83 | int_in String, 84 | int_out String, 85 | src_ip_addr IPv6, 86 | dst_ip_addr IPv6, 87 | src_ip_pfx_addr IPv6, 88 | src_ip_pfx_len UInt8, 89 | dst_ip_pfx_addr IPv6, 90 | dst_ip_pfx_len UInt8, 91 | nexthop IPv6, 92 | next_asn UInt32, 93 | src_asn UInt32, 94 | dst_asn UInt32, 95 | ip_protocol UInt8, 96 | src_port UInt16, 97 | dst_port UInt16, 98 | timestamp DateTime, 99 | size UInt64, 100 | packets UInt64, 101 | samplerate UInt64 102 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/test/flows_%d', '{replica}') 103 | PARTITION BY toStartOfTenMinutes(timestamp) 104 | ORDER BY (timestamp) 105 | TTL timestamp + INTERVAL 14 DAY 106 | SETTINGS index_granularity = 8192 107 | `, zookeeperPathPrefix), 108 | }, 109 | { 110 | name: "Test getCreateTableSchemaDDL for Distributed Table", 111 | fields: fields{ 112 | cfg: &ClickhouseConfig{ 113 | Database: "test", 114 | Sharded: true, 115 | Cluster: "test_cluster", 116 | }, 117 | }, 118 | args: args{ 119 | isBaseTable: false, 120 | zookeeperPathPrefix: zookeeperPathPrefix, 121 | }, 122 | want: ` 123 | CREATE TABLE IF NOT EXISTS flows ON CLUSTER test_cluster ( 124 | agent IPv6, 125 | int_in String, 126 | int_out String, 127 | src_ip_addr IPv6, 128 | dst_ip_addr IPv6, 129 | src_ip_pfx_addr IPv6, 130 | src_ip_pfx_len UInt8, 131 | dst_ip_pfx_addr IPv6, 132 | dst_ip_pfx_len UInt8, 133 | nexthop IPv6, 134 | next_asn UInt32, 135 | src_asn UInt32, 136 | dst_asn UInt32, 137 | ip_protocol UInt8, 138 | src_port UInt16, 139 | dst_port UInt16, 140 | timestamp DateTime, 141 | size UInt64, 142 | packets UInt64, 143 | samplerate UInt64 144 | ) ENGINE = Distributed(test_cluster, _test, flows_base, rand()) 145 | PARTITION BY toStartOfTenMinutes(timestamp) 146 | ORDER BY (timestamp) 147 | 148 | SETTINGS index_granularity = 8192 149 | `, 150 | }, 151 | } 152 | for _, tt := range tests { 153 | t.Run(tt.name, func(t *testing.T) { 154 | c := &ClickHouseGateway{ 155 | cfg: tt.fields.cfg, 156 | db: tt.fields.db, 157 | } 158 | if got := c.getCreateTableSchemaDDL(tt.args.isBaseTable, zookeeperPathPrefix); got != tt.want { 159 | t.Errorf("getCreateTableSchemaDDL() = %v, want %v", got, tt.want) 160 | } 161 | }) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /pkg/flowhouse/flowhouse.go: -------------------------------------------------------------------------------- 1 | package flowhouse 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "runtime" 7 | "runtime/debug" 8 | "time" 9 | 10 | "github.com/bio-routing/bio-rd/util/grpc/clientmanager" 11 | "github.com/bio-routing/flowhouse/cmd/flowhouse/config" 12 | "github.com/bio-routing/flowhouse/pkg/clickhousegw" 13 | "github.com/bio-routing/flowhouse/pkg/frontend" 14 | "github.com/bio-routing/flowhouse/pkg/intfmapper" 15 | "github.com/bio-routing/flowhouse/pkg/ipannotator" 16 | "github.com/bio-routing/flowhouse/pkg/models/flow" 17 | "github.com/bio-routing/flowhouse/pkg/routemirror" 18 | "github.com/bio-routing/flowhouse/pkg/servers/ipfix" 19 | "github.com/bio-routing/flowhouse/pkg/servers/sflow" 20 | "github.com/pkg/errors" 21 | "github.com/prometheus/client_golang/prometheus/promhttp" 22 | "google.golang.org/grpc" 23 | "google.golang.org/grpc/keepalive" 24 | 25 | bnet "github.com/bio-routing/bio-rd/net" 26 | log "github.com/sirupsen/logrus" 27 | ) 28 | 29 | // Flowhouse is an clickhouse based sflow collector 30 | type Flowhouse struct { 31 | cfg *Config 32 | ifMapper *intfmapper.IntfMapper 33 | routeMirror *routemirror.RouteMirror 34 | grpcClientManager *clientmanager.ClientManager 35 | ipa *ipannotator.IPAnnotator 36 | sfs *sflow.SflowServer 37 | ifxs *ipfix.IPFIXServer 38 | chgw *clickhousegw.ClickHouseGateway 39 | fe *frontend.Frontend 40 | flowsRX chan []*flow.Flow 41 | } 42 | 43 | // Config is flow house instances configuration 44 | type Config struct { 45 | ChCfg *clickhousegw.ClickhouseConfig 46 | SNMP *config.SNMPConfig 47 | RISTimeout time.Duration 48 | ListenSflow string 49 | ListenIPFIX string 50 | ListenHTTP string 51 | DefaultVRF uint64 52 | Dicts frontend.Dicts 53 | DisableIPAnnotator bool 54 | } 55 | 56 | // ClickhouseConfig represents a clickhouse client config 57 | type ClickhouseConfig struct { 58 | Host string 59 | Address string 60 | User string 61 | Password string 62 | Database string 63 | } 64 | 65 | // New creates a new flowhouse instance 66 | func New(cfg *Config) (*Flowhouse, error) { 67 | fh := &Flowhouse{ 68 | cfg: cfg, 69 | ifMapper: intfmapper.New(), 70 | routeMirror: routemirror.New(), 71 | grpcClientManager: clientmanager.New(), 72 | flowsRX: make(chan []*flow.Flow, 1024), 73 | } 74 | 75 | if !cfg.DisableIPAnnotator { 76 | fh.ipa = ipannotator.New(fh.routeMirror) 77 | } 78 | 79 | sfs, err := sflow.New(fh.cfg.ListenSflow, runtime.NumCPU(), fh.flowsRX, fh.ifMapper) 80 | if err != nil { 81 | return nil, errors.Wrap(err, "Unable to start sflow server") 82 | } 83 | fh.sfs = sfs 84 | 85 | ifxs, err := ipfix.New(fh.cfg.ListenIPFIX, runtime.NumCPU(), fh.flowsRX, fh.ifMapper) 86 | if err != nil { 87 | return nil, errors.Wrap(err, "Unable to start IPFIX server") 88 | } 89 | fh.ifxs = ifxs 90 | 91 | chgw, err := clickhousegw.New(fh.cfg.ChCfg) 92 | if err != nil { 93 | return nil, errors.Wrap(err, "Unable to create clickhouse wrapper") 94 | } 95 | fh.chgw = chgw 96 | 97 | fh.fe = frontend.New(fh.chgw, cfg.Dicts) 98 | return fh, nil 99 | } 100 | 101 | // AddAgent adds an agent 102 | func (f *Flowhouse) AddAgent(name string, addr bnet.IP, risAddrs []string, vrfs []uint64) { 103 | if f.cfg.SNMP != nil { 104 | f.ifMapper.AddDevice(addr, f.cfg.SNMP) 105 | } 106 | 107 | rtSource := make([]*grpc.ClientConn, 0) 108 | for _, risAddr := range risAddrs { 109 | f.grpcClientManager.AddIfNotExists(risAddr, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{ 110 | Time: f.cfg.RISTimeout, 111 | Timeout: f.cfg.RISTimeout, 112 | PermitWithoutStream: true, 113 | })) 114 | 115 | rtSource = append(rtSource, f.grpcClientManager.Get(risAddr)) 116 | } 117 | 118 | for _, v := range vrfs { 119 | f.routeMirror.AddTarget(name, addr, rtSource, v) 120 | } 121 | } 122 | 123 | // Run runs flowhouse 124 | func (f *Flowhouse) Run() { 125 | f.installHTTPHandlers(f.fe) 126 | go http.ListenAndServe(f.cfg.ListenHTTP, nil) 127 | log.WithField("address", f.cfg.ListenHTTP).Info("Listening for HTTP requests") 128 | 129 | for { 130 | flows := <-f.flowsRX 131 | 132 | if f.ipa != nil { 133 | for _, fl := range flows { 134 | fl.VRFIn = f.cfg.DefaultVRF 135 | fl.VRFOut = f.cfg.DefaultVRF 136 | 137 | err := f.ipa.Annotate(fl) 138 | if err != nil { 139 | log.WithError(err).Info("Annotating failed") 140 | } 141 | } 142 | } 143 | 144 | err := f.chgw.InsertFlows(flows) 145 | if err != nil { 146 | log.WithError(err).Error("Insert failed") 147 | } 148 | } 149 | } 150 | 151 | func recoveryMiddleware(next http.Handler) http.Handler { 152 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 153 | defer func() { 154 | if err := recover(); err != nil { 155 | log.Printf("PANIC: %v\n%s", err, debug.Stack()) 156 | http.Error(w, 157 | fmt.Sprintf("Internal server error: %v", err), 158 | http.StatusInternalServerError) 159 | } 160 | }() 161 | next.ServeHTTP(w, r) 162 | }) 163 | } 164 | 165 | func (f *Flowhouse) installHTTPHandlers(fe *frontend.Frontend) { 166 | http.HandleFunc("/", fe.IndexHandler) 167 | http.HandleFunc("/flowhouse.js", fe.FlowhouseJSHandler) 168 | http.Handle("/query", recoveryMiddleware(http.HandlerFunc(fe.QueryHandler(false)))) 169 | http.Handle("/query/flat", recoveryMiddleware(http.HandlerFunc(fe.QueryHandler(true)))) 170 | http.Handle("/dict_values/", recoveryMiddleware(http.HandlerFunc(fe.GetDictValues))) 171 | http.Handle("/metrics", promhttp.Handler()) 172 | } 173 | -------------------------------------------------------------------------------- /pkg/frontend/assets/flowhouse.js: -------------------------------------------------------------------------------- 1 | var filtersCount = 0; 2 | 3 | $(document).ready(function() { 4 | var start = formatTimestamp(new Date(((new Date() / 1000) - 900 - new Date().getTimezoneOffset() * 60)* 1000)); 5 | if ($("#time_start").val() == "") { 6 | $("#time_start").val(start); 7 | } 8 | 9 | var end = formatTimestamp(new Date(((new Date() / 1000) - new Date().getTimezoneOffset() * 60)* 1000)); 10 | if ($("#time_end").val() == "") { 11 | $("#time_end").val(end); 12 | } 13 | 14 | $("#filterPlus").click(addFilter); 15 | $("form").on('submit', submitQuery); 16 | 17 | google.charts.load('current', { 18 | 'packages': ['corechart'] 19 | }); 20 | 21 | window.onhashchange = function () { 22 | google.charts.setOnLoadCallback(drawChart); 23 | } 24 | 25 | google.charts.setOnLoadCallback(drawChart); 26 | 27 | populateFields(); 28 | }); 29 | 30 | function addFilter() { 31 | const filterTemplate = $("#filterTemplate").html().replace(/__NUM__/g, filtersCount); 32 | $("#filters").append(filterTemplate); 33 | 34 | const $filterField = $(`#filter_field\\[${filtersCount}\\]`); 35 | const $filterValue = $(`#filter_value\\[${filtersCount}\\]`); 36 | const $filterRemove = $(`#filter_remove\\[${filtersCount}\\]`); 37 | 38 | $filterField.change(function() { 39 | const fieldName = $(this).val(); 40 | const filterNum = $(this).attr("id").match(/\d+/)[0]; 41 | $filterValue.attr("name", fieldName); 42 | loadValues(filterNum, fieldName); 43 | }); 44 | 45 | $filterRemove.click(function() { 46 | $(this).closest('.row').remove(); 47 | }); 48 | 49 | filtersCount++; 50 | } 51 | 52 | 53 | function parseParams(str) { 54 | return str.split('&').reduce(function(params, param) { 55 | const [key, value] = param.split('=').map(decodeURIComponent); 56 | params[key] = value.replace(/\+/g, ' '); 57 | return params; 58 | }, {}); 59 | } 60 | 61 | function populateFields() { 62 | var query = location.href.split("#")[1]; 63 | if (!query) { 64 | return; 65 | } 66 | 67 | var queryEquations = query.split('&'); 68 | for (var i = 0; i < queryEquations.length; i++) { 69 | var e = queryEquations[i].split('='); 70 | var k = e[0]; 71 | var v = decodeURIComponent(e[1]); 72 | 73 | if (k == "breakdown") { 74 | $("#breakdown option[value=" + v + "]").attr('selected', 'selected'); 75 | continue; 76 | } 77 | 78 | if (k == "time_start") { 79 | $("#time_start").val(v); 80 | continue; 81 | } 82 | 83 | if (k == "time_end") { 84 | $("#time_end").val(v); 85 | continue; 86 | } 87 | 88 | if (k == "topFlows") { 89 | $("#topFlows").val(v); 90 | continue; 91 | } 92 | 93 | if (k.match(/^filter_field/)) { 94 | continue; 95 | } 96 | 97 | var fieldIndex = addFilter(); 98 | $("#filter_field\\[" + fieldIndex + "\\]").val(k); 99 | $("#filter_field\\[" + fieldIndex + "\\]").trigger("change"); 100 | $("#filter_value\\[" + fieldIndex + "\\]").val(v); 101 | } 102 | } 103 | 104 | function submitQuery() { 105 | event.preventDefault(); 106 | 107 | // Validate 'topFlows' box 108 | const topFlows = $('#topFlows').val(); 109 | const topFlowsInt = parseInt(topFlows, 10); 110 | if (isNaN(topFlowsInt) || topFlowsInt < 1 || topFlowsInt > 10000) { 111 | alert("Incorrect 'Top Flows': please enter a valid integer between 1 and 10000."); 112 | return false; 113 | } 114 | 115 | params = $('form').serialize(); 116 | params += '&topFlows=' + encodeURIComponent(topFlows); 117 | location.href = "#" + params 118 | return false 119 | } 120 | 121 | function drawChart() { 122 | var query = location.href.split("#")[1] 123 | if (!query) { 124 | return; 125 | } 126 | 127 | $.ajax({ 128 | type: "GET", 129 | url: "/query?" + query, 130 | dataType: "text", 131 | success: function(rdata, status, xhr) { 132 | if (rdata == undefined) { 133 | $("#chart_div").text("No data found"); 134 | return; 135 | } 136 | renderChart(rdata); 137 | }, 138 | error: function(xhr) { 139 | showPopup( 140 | "Internal server error", 141 | "danger", 142 | 20000, 143 | xhr.responseText 144 | ); 145 | $("#chart_div").empty(); 146 | document.getElementById('custom_legend').innerHTML = ''; 147 | } 148 | }); 149 | } 150 | 151 | function renderChart(rdata) { 152 | pres = Papa.parse(rdata.trim()); 153 | 154 | var filtered = [pres.data[0]]; 155 | for (const row of pres.data) { 156 | const hasNonZero = row.slice(1).some(val => { 157 | const num = parseFloat((val || '').trim()); 158 | return !isNaN(num) && num !== 0; 159 | }); 160 | if (hasNonZero) { 161 | filtered.push(row); 162 | } 163 | } 164 | 165 | var data = []; 166 | for (var i = 0; i < filtered.length; i++) { 167 | data[i] = []; 168 | for (var j = 0; j < filtered[i].length; j++) { 169 | var x = filtered[i][j]; 170 | if (i !== 0 && j !== 0) { 171 | x = parseFloat((x || '').trim()); 172 | if (isNaN(x)) x = 0; 173 | } 174 | data[i][j] = x; 175 | } 176 | } 177 | 178 | if (!window.seriesVisibility || window.seriesVisibility.length !== data[0].length - 1) { 179 | window.seriesVisibility = Array(data[0].length - 1).fill(true); 180 | } 181 | 182 | var filteredData = []; 183 | for (var i = 0; i < data.length; i++) { 184 | var row = [data[i][0]]; 185 | for (var j = 1; j < data[i].length; j++) { 186 | if (window.seriesVisibility[j - 1]) { 187 | row.push(data[i][j]); 188 | } 189 | } 190 | filteredData.push(row); 191 | } 192 | 193 | if (filteredData[0].length < 2) { 194 | showPopup("No series selected. Please select at least one flow to display the chart.", "danger"); 195 | $("#chart_div").empty(); 196 | document.getElementById('custom_legend').innerHTML = ''; 197 | return; 198 | } 199 | 200 | 201 | var chartData = google.visualization.arrayToDataTable(filteredData); 202 | 203 | var options = { 204 | isStacked: false, 205 | title: 'Flow Mbps', 206 | titleTextStyle: { 207 | fontSize: 24, 208 | bold: true, 209 | color: '#333' 210 | }, 211 | hAxis: { 212 | title: 'Time', 213 | slantedText: true, 214 | slantedTextAngle: 60, 215 | showTextEvery: 10, 216 | titleTextStyle: { 217 | color: '#333', 218 | italic: false, 219 | bold: true, 220 | fontSize: 18 221 | }, 222 | gridlines: { 223 | color: '#f3f3f3', 224 | count: 10 225 | }, 226 | minorGridlines: { 227 | color: '#e9e9e9' 228 | }, 229 | textStyle: { 230 | color: '#333', 231 | fontSize: 12 232 | } 233 | }, 234 | vAxis: { 235 | minValue: 0, 236 | title: 'Megabits per second', 237 | titleTextStyle: { 238 | color: '#333', 239 | italic: false, 240 | bold: true, 241 | fontSize: 18 242 | }, 243 | gridlines: { 244 | color: '#f3f3f3', 245 | count: 10 246 | }, 247 | minorGridlines: { 248 | color: '#e9e9e9' 249 | }, 250 | textStyle: { 251 | color: '#333', 252 | fontSize: 12 253 | } 254 | }, 255 | height: screen.height * 0.7, 256 | chartArea: { 257 | width: '90%', 258 | height: '70%', 259 | top: '5%', 260 | backgroundColor: { 261 | stroke: '#ccc', 262 | strokeWidth: 1 263 | } 264 | }, 265 | backgroundColor: '#ffffff', 266 | colors: ['#2196F3', '#4CAF50', '#FFC107', '#FF5722', '#9C27B0'], 267 | animation: { 268 | startup: true, 269 | duration: 1000, 270 | easing: 'out' 271 | }, 272 | legend: { 273 | position: 'none' 274 | }, 275 | tooltip: { 276 | textStyle: { 277 | color: '#333', 278 | fontSize: 12 279 | }, 280 | showColorCode: true 281 | }, 282 | lineWidth: 2, 283 | pointSize: 1, 284 | series: { 285 | 0: { lineDashStyle: [4, 4] }, 286 | 1: { lineDashStyle: [2, 2] }, 287 | 2: { lineDashStyle: [4, 2] }, 288 | 3: { lineDashStyle: [2, 4] }, 289 | 4: { lineDashStyle: [1, 1] } 290 | } 291 | }; 292 | 293 | var chart = new google.visualization.AreaChart(document.getElementById('chart_div')); 294 | chart.draw(chartData, options); 295 | 296 | renderLegendTable(); 297 | 298 | function renderLegendTable() { 299 | const flowStats = []; 300 | for (let i = 1; i < data[0].length; i++) { 301 | let max = -Infinity; 302 | for (let j = 1; j < data.length; j++) { 303 | const val = data[j][i]; 304 | if (typeof val === "number" && !isNaN(val)) { 305 | if (val > max) max = val; 306 | } 307 | } 308 | flowStats.push({ 309 | index: i, 310 | label: data[0][i], 311 | max: max === -Infinity ? 0 : max 312 | }); 313 | } 314 | 315 | // Sorting logic 316 | if (!window.legendSort) window.legendSort = { key: "label", asc: true }; 317 | const sortKey = window.legendSort.key; 318 | const sortAsc = window.legendSort.asc; 319 | 320 | flowStats.sort((a, b) => { 321 | switch (sortKey) { 322 | case "label": 323 | return sortAsc 324 | ? a.label.localeCompare(b.label) 325 | : b.label.localeCompare(a.label); 326 | case "max": 327 | return sortAsc 328 | ? a.max - b.max 329 | : b.max - a.max; 330 | default: 331 | return 0; 332 | } 333 | }); 334 | 335 | const customLegendDiv = document.getElementById('custom_legend'); 336 | customLegendDiv.innerHTML = ` 337 |
338 | Usage:
339 | • Click a flow to show only that flow. Click again to show all.
340 | • Ctrl/Cmd/Option + Click to add or remove flows.
341 | • Click a column header to sort the legend. 342 |
343 | `; 344 | 345 | const colors = options.colors; 346 | const table = document.createElement('table'); 347 | table.classList.add('table', 'table-sm', 'table-bordered'); 348 | const thead = document.createElement('thead'); 349 | const headRow = document.createElement('tr'); 350 | 351 | function makeHeaderCell(text, key) { 352 | const th = document.createElement('th'); 353 | th.textContent = text; 354 | th.style.cursor = 'pointer'; 355 | th.style.userSelect = 'none'; 356 | th.addEventListener('click', (e) => { 357 | if (window.legendSort.key === key) { 358 | window.legendSort.asc = !window.legendSort.asc; 359 | } else { 360 | window.legendSort.key = key; 361 | // Default: sort by MAX Mbps descending, FLOW ascending 362 | window.legendSort.asc = (key === 'label'); 363 | } 364 | renderLegendTable(); 365 | e.stopPropagation(); 366 | }); 367 | if (window.legendSort.key === key) { 368 | th.textContent += window.legendSort.asc ? ' ▲' : ' ▼'; 369 | } 370 | return th; 371 | } 372 | 373 | headRow.appendChild(document.createElement('th')); // color cell (empty) 374 | headRow.appendChild(makeHeaderCell('FLOW', 'label')); 375 | headRow.appendChild(makeHeaderCell('MAX Mbps', 'max')); 376 | thead.appendChild(headRow); 377 | 378 | const tbody = document.createElement('tbody'); 379 | 380 | for (const stat of flowStats) { 381 | const i = stat.index; 382 | const row = document.createElement('tr'); 383 | const colorCell = document.createElement('td'); 384 | colorCell.style.backgroundColor = colors[(i - 1) % colors.length]; 385 | colorCell.style.width = '20px'; 386 | const labelCell = document.createElement('td'); 387 | labelCell.textContent = stat.label; 388 | const maxCell = document.createElement('td'); 389 | maxCell.textContent = stat.max.toFixed(1); 390 | row.appendChild(colorCell); 391 | row.appendChild(labelCell); 392 | row.appendChild(maxCell); 393 | tbody.appendChild(row); 394 | 395 | if (!window.seriesVisibility[i - 1]) { 396 | row.style.opacity = '0.4'; 397 | } else { 398 | row.style.opacity = '1.0'; 399 | } 400 | 401 | row.addEventListener('click', function(event) { 402 | const visibleCount = window.seriesVisibility.filter(Boolean).length; 403 | if (event.ctrlKey || event.metaKey || event.altKey) { 404 | if (window.seriesVisibility[i - 1] && visibleCount === 1) { 405 | window.seriesVisibility = Array(data[0].length - 1).fill(true); 406 | } else { 407 | window.seriesVisibility[i - 1] = !window.seriesVisibility[i - 1]; 408 | } 409 | } else { 410 | if (window.seriesVisibility[i - 1] && visibleCount === 1) { 411 | window.seriesVisibility = Array(data[0].length - 1).fill(true); 412 | } else { 413 | window.seriesVisibility = Array(data[0].length - 1).fill(false); 414 | window.seriesVisibility[i - 1] = true; 415 | } 416 | } 417 | renderChart(rdata); 418 | }); 419 | } 420 | 421 | table.appendChild(thead); 422 | table.appendChild(tbody); 423 | customLegendDiv.appendChild(table); 424 | } 425 | } 426 | 427 | function formatTimestamp(date) { 428 | return date.toISOString().substr(0, 16) 429 | } 430 | 431 | function showPopup(message, type="danger", timeout=15000, details=null) { 432 | const container = $("#popup-container"); 433 | const alertId = "popup-" + Date.now() + Math.floor(Math.random()*10000); 434 | let detailsHtml = ""; 435 | if (details) { 436 | const detailsId = alertId + "-details"; 437 | detailsHtml = ` 438 |
439 | Show Details 440 |
${$('
').text(details).html()}
455 |
456 | `; 457 | } 458 | const alert = $(` 459 | 466 | `); 467 | container.append(alert); 468 | setTimeout(() => { 469 | alert.alert('close'); 470 | }, timeout); 471 | } 472 | 473 | function loadValues(filterNum, field) { 474 | return $.getJSON("/dict_values/"+field, function(data) { 475 | $("#filter_value\\[" + filterNum + "\\]").autocomplete({ 476 | source: data, 477 | }); 478 | }); 479 | } 480 | -------------------------------------------------------------------------------- /pkg/frontend/assets/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Flowhouse 9 | 29 | 30 | 31 | 34 |
35 |
36 | 97 |
98 |
99 |
100 |
101 |
102 |
103 | 107 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /pkg/frontend/frontend_test.go: -------------------------------------------------------------------------------- 1 | package frontend 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestFormatPrefixCondition(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | pfx string 13 | fieldName string 14 | expected string 15 | wantFail bool 16 | }{ 17 | { 18 | name: "Test #1", 19 | pfx: "8.8.8.0/24", 20 | fieldName: "src_pfx", 21 | expected: "src_pfx_addr = IPv4ToIPv6(IPv4StringToNum('8.8.8.0')) AND src_pfx_len = 24", 22 | wantFail: false, 23 | }, 24 | { 25 | name: "Test #2", 26 | pfx: "2001:db8::/48", 27 | fieldName: "src_pfx", 28 | expected: "src_pfx_addr = IPv6StringToNum('2001:DB8:0:0:0:0:0:0') AND src_pfx_len = 48", 29 | wantFail: false, 30 | }, 31 | { 32 | name: "Test #3", 33 | pfx: "2001:db8::/48XXX", 34 | fieldName: "src_pfx", 35 | wantFail: true, 36 | }, 37 | } 38 | 39 | for _, test := range tests { 40 | res, err := formatPrefixCondition(test.fieldName, test.pfx) 41 | if test.wantFail && err == nil { 42 | t.Errorf("Unexpected success for test %s", test.name) 43 | continue 44 | } 45 | 46 | if !test.wantFail && err != nil { 47 | t.Errorf("Unexpected failure for test %s", test.name) 48 | continue 49 | } 50 | 51 | assert.Equal(t, test.expected, res, test.name) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /pkg/frontend/result.go: -------------------------------------------------------------------------------- 1 | package frontend 2 | 3 | import ( 4 | "encoding/csv" 5 | "fmt" 6 | "io" 7 | "sort" 8 | "time" 9 | ) 10 | 11 | type void struct{} 12 | 13 | type result struct { 14 | keys map[string]void 15 | data map[time.Time]map[string]uint64 // timestamps -> keys -> values 16 | } 17 | 18 | func newResult() *result { 19 | return &result{ 20 | keys: make(map[string]void), 21 | data: make(map[time.Time]map[string]uint64), 22 | } 23 | } 24 | 25 | func (r *result) add(ts time.Time, key string, value uint64) { 26 | r.keys[key] = void{} 27 | if _, exists := r.data[ts]; !exists { 28 | r.data[ts] = make(map[string]uint64) 29 | } 30 | 31 | r.data[ts][key] = value 32 | } 33 | 34 | func (r *result) csv(w io.Writer) error { 35 | cw := csv.NewWriter(w) 36 | defer cw.Flush() 37 | 38 | header := make([]string, 0) 39 | header = append(header, "timestamp") 40 | keys := r.getKeysSorted() 41 | for _, k := range keys { 42 | header = append(header, k) 43 | } 44 | 45 | err := cw.Write(header) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | for _, ts := range r.getTimestampsSorted() { 51 | record := make([]string, 0) 52 | record = append(record, ts.Format(time.RFC3339)) 53 | 54 | for _, k := range keys { 55 | record = append(record, fmt.Sprintf("%d", r.data[ts][k])) 56 | } 57 | 58 | err := cw.Write(record) 59 | if err != nil { 60 | return err 61 | } 62 | } 63 | 64 | return nil 65 | } 66 | 67 | // returns a CSV with just 3 columns: timestamp, dimension, value 68 | func (r *result) csvFlat(w io.Writer) error { 69 | cw := csv.NewWriter(w) 70 | defer cw.Flush() 71 | 72 | header := []string{"timestamp", "dimension", "value"} 73 | err := cw.Write(header) 74 | if err != nil { 75 | return err 76 | } 77 | 78 | keys := r.getKeysSorted() 79 | for _, ts := range r.getTimestampsSorted() { 80 | for _, k := range keys { 81 | record := []string{ 82 | ts.Format(time.RFC3339), 83 | k, 84 | fmt.Sprintf("%d", r.data[ts][k]), 85 | } 86 | err := cw.Write(record) 87 | if err != nil { 88 | return err 89 | } 90 | } 91 | } 92 | 93 | return nil 94 | } 95 | 96 | func (r *result) getKeysSorted() []string { 97 | keys := make([]string, len(r.keys)) 98 | i := 0 99 | for k := range r.keys { 100 | keys[i] = k 101 | i++ 102 | } 103 | 104 | sort.Slice(keys, func(i, j int) bool { 105 | return keys[i] < keys[j] 106 | }) 107 | 108 | return keys 109 | } 110 | 111 | func (r *result) getTimestampsSorted() []time.Time { 112 | res := make([]time.Time, len(r.data)) 113 | 114 | i := 0 115 | for ts := range r.data { 116 | res[i] = ts 117 | i++ 118 | } 119 | 120 | sort.Slice(res, func(i, j int) bool { 121 | return res[i].Before(res[j]) 122 | }) 123 | 124 | return res 125 | } 126 | -------------------------------------------------------------------------------- /pkg/intfmapper/device.go: -------------------------------------------------------------------------------- 1 | package intfmapper 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | "sync" 7 | "time" 8 | 9 | "github.com/bio-routing/flowhouse/cmd/flowhouse/config" 10 | "github.com/gosnmp/gosnmp" 11 | "github.com/pkg/errors" 12 | 13 | bnet "github.com/bio-routing/bio-rd/net" 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | const ( 18 | ifNameOID = "1.3.6.1.2.1.31.1.1.1.1" 19 | snmpPort = 161 20 | timeout = time.Second * 30 21 | ) 22 | 23 | type device struct { 24 | addr bnet.IP 25 | snmpCfg *config.SNMPConfig 26 | interfacesByID map[uint32]*netIf 27 | interfacesByName map[string]*netIf 28 | interfacesMu sync.RWMutex 29 | stopCh chan struct{} 30 | wg sync.WaitGroup 31 | ticker *time.Ticker 32 | } 33 | 34 | func newDevice(addr bnet.IP, snmpCfg *config.SNMPConfig) *device { 35 | d := &device{ 36 | addr: addr, 37 | snmpCfg: snmpCfg, 38 | interfacesByID: make(map[uint32]*netIf), 39 | interfacesByName: make(map[string]*netIf), 40 | ticker: time.NewTicker(time.Minute * 2), 41 | } 42 | 43 | d.startCollector() 44 | return d 45 | } 46 | 47 | func (d *device) update(interfaces []*netIf) { 48 | interfacesByID := make(map[uint32]*netIf) 49 | interfacesByName := make(map[string]*netIf) 50 | for _, ifa := range interfaces { 51 | interfacesByID[ifa.id] = ifa 52 | interfacesByName[ifa.name] = ifa 53 | } 54 | 55 | d.interfacesMu.Lock() 56 | defer d.interfacesMu.Unlock() 57 | 58 | d.interfacesByID = interfacesByID 59 | d.interfacesByName = interfacesByName 60 | } 61 | 62 | type netIf struct { 63 | id uint32 64 | name string 65 | } 66 | 67 | func (d *device) startCollector() { 68 | d.wg.Add(1) 69 | go d.collector() 70 | } 71 | 72 | func (d *device) collector() { 73 | defer d.wg.Done() 74 | 75 | for { 76 | err := d.collect() 77 | if err != nil { 78 | log.WithError(err).Warning("Collecting failed") 79 | continue 80 | } 81 | 82 | select { 83 | case <-d.stopCh: 84 | return 85 | case <-d.ticker.C: 86 | } 87 | } 88 | } 89 | 90 | func (d *device) collect() error { 91 | s := &gosnmp.GoSNMP{ 92 | Target: d.addr.String(), 93 | Port: snmpPort, 94 | Community: d.snmpCfg.Community, 95 | Version: gosnmp.Version2c, 96 | Timeout: timeout, 97 | Retries: 0, 98 | ExponentialTimeout: false, 99 | UseUnconnectedUDPSocket: true, 100 | } 101 | 102 | if d.snmpCfg.Version == 3 { 103 | s.Community = "" 104 | s.Version = gosnmp.Version3 105 | s.SecurityModel = gosnmp.UserSecurityModel 106 | s.MsgFlags = gosnmp.AuthPriv 107 | s.SecurityParameters = &gosnmp.UsmSecurityParameters{ 108 | UserName: d.snmpCfg.User, 109 | AuthenticationProtocol: gosnmp.SHA, 110 | AuthenticationPassphrase: d.snmpCfg.AuthPassphrase, 111 | PrivacyProtocol: gosnmp.AES, 112 | PrivacyPassphrase: d.snmpCfg.PrivacyPassphrase, 113 | } 114 | } 115 | 116 | err := s.Connect() 117 | if err != nil { 118 | return errors.Wrap(err, "Unable to connect") 119 | } 120 | 121 | defer s.Conn.Close() 122 | 123 | interfaces := make([]*netIf, 0) 124 | err = s.BulkWalk(ifNameOID, func(pdu gosnmp.SnmpPDU) error { 125 | oid := strings.Split(pdu.Name, ".") 126 | id, err := strconv.Atoi(oid[len(oid)-1]) 127 | if err != nil { 128 | return errors.Wrap(err, "Unable to convert interface id") 129 | } 130 | 131 | if pdu.Type != gosnmp.OctetString { 132 | return errors.Errorf("Unexpected PDU type: %d", pdu.Type) 133 | } 134 | 135 | name := string(pdu.Value.([]byte)) 136 | interfaces = append(interfaces, &netIf{ 137 | id: uint32(id), 138 | name: name, 139 | }) 140 | 141 | return nil 142 | }) 143 | if err != nil { 144 | return errors.Wrap(err, "BulkWalk failed for "+d.addr.String()) 145 | } 146 | 147 | d.update(interfaces) 148 | return nil 149 | } 150 | 151 | func (d *device) resolve(ifID uint32) string { 152 | d.interfacesMu.RLock() 153 | defer d.interfacesMu.RUnlock() 154 | 155 | if _, exists := d.interfacesByID[ifID]; !exists { 156 | return "" 157 | } 158 | 159 | return d.interfacesByID[ifID].name 160 | } 161 | -------------------------------------------------------------------------------- /pkg/intfmapper/intfmapper.go: -------------------------------------------------------------------------------- 1 | package intfmapper 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/bio-routing/flowhouse/cmd/flowhouse/config" 8 | 9 | bnet "github.com/bio-routing/bio-rd/net" 10 | log "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // IntfMapper allows mapping interface IDs into Names and vice versa 14 | type IntfMapper struct { 15 | devices map[bnet.IP]*device 16 | devicesMu sync.RWMutex 17 | } 18 | 19 | // New creates a new IntfMapper 20 | func New() *IntfMapper { 21 | return &IntfMapper{ 22 | devices: make(map[bnet.IP]*device), 23 | } 24 | } 25 | 26 | // Resolve resolves an agents interface ID into is' name 27 | func (im *IntfMapper) Resolve(agent bnet.IP, ifID uint32) string { 28 | im.devicesMu.RLock() 29 | defer im.devicesMu.RUnlock() 30 | 31 | if _, exists := im.devices[agent]; !exists { 32 | log.Warningf("IntfMapper: Device %q not found", agent.String()) 33 | return "" 34 | } 35 | 36 | return im.devices[agent].resolve(ifID) 37 | } 38 | 39 | // AddDevice adds a device 40 | func (im *IntfMapper) AddDevice(addr bnet.IP, snmpCfg *config.SNMPConfig) error { 41 | im.devicesMu.Lock() 42 | defer im.devicesMu.Unlock() 43 | 44 | if _, exists := im.devices[addr]; exists { 45 | return fmt.Errorf("Device exists already") 46 | } 47 | 48 | im.devices[addr] = newDevice(addr, snmpCfg) 49 | 50 | return nil 51 | } 52 | -------------------------------------------------------------------------------- /pkg/ipannotator/ipannotator.go: -------------------------------------------------------------------------------- 1 | package ipannotator 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/bio-routing/flowhouse/pkg/models/flow" 7 | "github.com/bio-routing/flowhouse/pkg/routemirror" 8 | "github.com/pkg/errors" 9 | ) 10 | 11 | type IPAnnotator struct { 12 | rm *routemirror.RouteMirror 13 | } 14 | 15 | func New(rm *routemirror.RouteMirror) *IPAnnotator { 16 | return &IPAnnotator{ 17 | rm: rm, 18 | } 19 | } 20 | 21 | func (ipa *IPAnnotator) Annotate(fl *flow.Flow) error { 22 | srt, err := ipa.rm.LPM(fl.Agent.String(), fl.VRFIn, fl.SrcAddr) 23 | if err != nil { 24 | return errors.Wrapf(err, "Unable to get route for source address %s", fl.SrcAddr.String()) 25 | } 26 | 27 | if srt == nil { 28 | return fmt.Errorf("No route found for %s", fl.SrcAddr.String()) 29 | } 30 | 31 | fl.SrcPfx = *srt.Prefix() 32 | srcFirstASPathSeg := srt.BestPath().BGPPath.ASPath.GetFirstSequenceSegment() 33 | if srcFirstASPathSeg != nil { 34 | srcASN := srcFirstASPathSeg.GetFirstASN() 35 | if srcASN != nil { 36 | fl.SrcAs = *srcASN 37 | } 38 | } 39 | 40 | drt, err := ipa.rm.LPM(fl.Agent.String(), fl.VRFOut, fl.DstAddr) 41 | if err != nil { 42 | return errors.Wrapf(err, "Unable to get route for destination address %s", fl.DstAddr.String()) 43 | } 44 | 45 | if drt == nil { 46 | return fmt.Errorf("No route found for %s", fl.DstAddr.String()) 47 | } 48 | 49 | fl.DstPfx = *drt.Prefix() 50 | dstLastASPathSeg := drt.BestPath().BGPPath.ASPath.GetLastSequenceSegment() 51 | if dstLastASPathSeg != nil { 52 | dstASN := dstLastASPathSeg.GetLastASN() 53 | if dstASN != nil { 54 | fl.DstAs = *dstASN 55 | } 56 | } 57 | 58 | dstFirstASPathSeg := drt.BestPath().BGPPath.ASPath.GetFirstSequenceSegment() 59 | if dstFirstASPathSeg != nil { 60 | nextASN := dstFirstASPathSeg.GetFirstASN() 61 | if nextASN != nil { 62 | fl.NextAs = *nextASN 63 | } 64 | } 65 | 66 | return nil 67 | } 68 | -------------------------------------------------------------------------------- /pkg/models/flow/flow.go: -------------------------------------------------------------------------------- 1 | package flow 2 | 3 | import ( 4 | "fmt" 5 | 6 | bnet "github.com/bio-routing/bio-rd/net" 7 | ) 8 | 9 | // Flow defines a network flow 10 | type Flow struct { 11 | Agent bnet.IP 12 | TOS uint8 13 | SrcPort uint16 14 | DstPort uint16 15 | SrcAs uint32 16 | DstAs uint32 17 | NextAs uint32 18 | IntIn string 19 | IntOut string 20 | Packets uint64 21 | Protocol uint8 22 | Family uint8 23 | Timestamp int64 24 | Size uint64 25 | Samplerate uint64 26 | SrcAddr bnet.IP 27 | DstAddr bnet.IP 28 | NextHop bnet.IP 29 | SrcPfx bnet.Prefix 30 | DstPfx bnet.Prefix 31 | VRFIn uint64 32 | VRFOut uint64 33 | } 34 | 35 | // Add adds up to flows 36 | func (fl *Flow) Add(a *Flow) { 37 | fl.Size += a.Size 38 | fl.Packets += a.Packets 39 | } 40 | 41 | // Dump dumps the flow 42 | func (fl *Flow) Dump() { 43 | fmt.Printf("--------------------------------\n") 44 | fmt.Printf("Flow dump:\n") 45 | fmt.Printf("Router: %s\n", fl.Agent.String()) 46 | fmt.Printf("Family: %d\n", fl.Family) 47 | fmt.Printf("SrcAddr: %s\n", fl.SrcAddr.String()) 48 | fmt.Printf("DstAddr: %s\n", fl.DstAddr.String()) 49 | fmt.Printf("Protocol: %d\n", fl.Protocol) 50 | fmt.Printf("NextHop: %s\n", fl.NextHop.String()) 51 | fmt.Printf("IntIn: %s\n", fl.IntIn) 52 | fmt.Printf("IntOut: %s\n", fl.IntOut) 53 | fmt.Printf("TOS/COS: %d\n", fl.TOS) 54 | fmt.Printf("Packets: %d\n", fl.Packets) 55 | fmt.Printf("Bytes: %d\n", fl.Size) 56 | fmt.Printf("--------------------------------\n") 57 | } 58 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ipfix 13 | 14 | import ( 15 | "fmt" 16 | "unsafe" 17 | 18 | "github.com/bio-routing/tflow2/convert" 19 | "github.com/pkg/errors" 20 | ) 21 | 22 | const ( 23 | // numPreAllocTmplRecs is the number of elements to pre allocate in TemplateRecords slice 24 | numPreAllocRecs = 20 25 | ) 26 | 27 | // SetIDTemplateMax is the maximum FlowSetID being used for templates according to RFC3954 28 | const SetIDTemplateMax = 255 29 | 30 | // TemplateSetID is the set ID reserved for template sets 31 | const TemplateSetID = 2 32 | 33 | // errorIncompatibleVersion prints an error message in case the detected version is not supported 34 | func errorIncompatibleVersion(version uint16) error { 35 | return errors.Errorf("IPFIX: Incompatible protocol version v%d, only v10 is supported", version) 36 | } 37 | 38 | // Decode is the main function of this package. It converts raw packet bytes to Packet struct. 39 | func Decode(raw []byte) (*Packet, error) { 40 | data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine 41 | 42 | pSize := len(data) 43 | bufSize := 1500 44 | buffer := [1500]byte{} 45 | 46 | if pSize > bufSize { 47 | panic("Buffer too small") 48 | } 49 | 50 | // copy data into array as arrays allow us to cast the shit out of it 51 | for i := 0; i < pSize; i++ { 52 | buffer[bufSize-pSize+i] = data[i] 53 | } 54 | 55 | bufferPtr := unsafe.Pointer(&buffer) 56 | bufferMinPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(pSize)) 57 | headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeader)) 58 | 59 | var packet Packet 60 | packet.Buffer = buffer[:] 61 | packet.Header = (*Header)(headerPtr) 62 | 63 | if packet.Header.Version != 10 { 64 | return nil, errorIncompatibleVersion(packet.Header.Version) 65 | } 66 | 67 | //Pre-allocate some room for templates to avoid later copying 68 | packet.Templates = make([]*TemplateRecords, 0, numPreAllocRecs) 69 | packet.OptionsTemplateRecords = make([]*OptionsTemplateRecords, 0) 70 | 71 | for uintptr(headerPtr) > uintptr(bufferMinPtr) { 72 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfFlowSetHeader) 73 | 74 | fls := &FlowSet{ 75 | Header: (*FlowSetHeader)(ptr), 76 | } 77 | 78 | switch fls.Header.SetID { 79 | case TemplateSetID: 80 | err := decodeTemplate(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader) 81 | if err != nil { 82 | return nil, errors.Wrap(err, "Unable to decode template") 83 | } 84 | case OptionsTemplateSetID: 85 | err := decodeOptionsTemplate(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader) 86 | if err != nil { 87 | return nil, errors.Wrap(err, "Unable to decode template") 88 | } 89 | default: 90 | decodeData(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader) 91 | } 92 | 93 | headerPtr = unsafe.Pointer(uintptr(headerPtr) - uintptr(fls.Header.Length)) 94 | } 95 | 96 | return &packet, nil 97 | } 98 | 99 | // decodeData decodes a flowSet from `packet` 100 | func decodeData(packet *Packet, headerPtr unsafe.Pointer, size uintptr) { 101 | flsh := (*FlowSetHeader)(unsafe.Pointer(headerPtr)) 102 | data := unsafe.Pointer(uintptr(headerPtr) - uintptr(flsh.Length)) 103 | 104 | fls := &FlowSet{ 105 | Header: flsh, 106 | Records: (*(*[1<<31 - 1]byte)(data))[sizeOfFlowSetHeader:flsh.Length], 107 | } 108 | 109 | packet.FlowSets = append(packet.FlowSets, fls) 110 | } 111 | 112 | // decodeTemplate decodes a template from `packet` 113 | func decodeTemplate(packet *Packet, p unsafe.Pointer, size uintptr) error { 114 | min := uintptr(p) - size 115 | for uintptr(p) > min { 116 | p = unsafe.Pointer(uintptr(p) - sizeOfTemplateRecordHeader) 117 | tmplRecs := &TemplateRecords{} 118 | tmplRecs.Header = (*TemplateRecordHeader)(unsafe.Pointer(p)) 119 | tmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) 120 | 121 | if uintptr(p)-uintptr(tmplRecs.Header.FieldCount*uint16(sizeOfTemplateRecord)) < min { 122 | return fmt.Errorf("invalid ipfix template: buffer underrun") 123 | } 124 | 125 | for i := uint16(0); i < tmplRecs.Header.FieldCount; i++ { 126 | p = unsafe.Pointer(uintptr(p) - sizeOfTemplateRecord) 127 | rec := (*TemplateRecord)(unsafe.Pointer(p)) 128 | 129 | if rec.isEnterprise() { 130 | return fmt.Errorf("enterprise TLV currently not supported") 131 | } 132 | 133 | tmplRecs.Records = append(tmplRecs.Records, rec) 134 | } 135 | 136 | packet.Templates = append(packet.Templates, tmplRecs) 137 | } 138 | 139 | return nil 140 | } 141 | 142 | // decodeOptionsTemplate decodes a template from `packet` 143 | func decodeOptionsTemplate(packet *Packet, p unsafe.Pointer, size uintptr) error { 144 | min := uintptr(p) - size 145 | for uintptr(p) > min { 146 | p = unsafe.Pointer(uintptr(p) - sizeOfOptionsTemplateRecordHeader) 147 | optTmplRecs := &OptionsTemplateRecords{} 148 | optTmplRecs.Header = (*OptionsTemplateRecordHeader)(unsafe.Pointer(p)) 149 | optTmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) 150 | 151 | if uintptr(p)-uintptr(optTmplRecs.Header.TotelFieldCount*uint16(sizeOfTemplateRecord)) < min { 152 | return fmt.Errorf("invalid ipfix options template: buffer underrun") 153 | } 154 | 155 | for i := uint16(0); i < optTmplRecs.Header.TotelFieldCount; i++ { 156 | p = unsafe.Pointer(uintptr(p) - sizeOfTemplateRecord) 157 | rec := (*TemplateRecord)(unsafe.Pointer(p)) 158 | if rec.isEnterprise() { 159 | return fmt.Errorf("enterprise TLV currently not supported") 160 | } 161 | 162 | optTmplRecs.Records = append(optTmplRecs.Records, rec) 163 | } 164 | 165 | packet.OptionsTemplateRecords = append(packet.OptionsTemplateRecords, optTmplRecs) 166 | } 167 | 168 | return nil 169 | } 170 | 171 | // PrintHeader prints the header of `packet` 172 | func PrintHeader(p *Packet) { 173 | fmt.Printf("Version: %d\n", p.Header.Version) 174 | fmt.Printf("Length: %d\n", p.Header.Length) 175 | fmt.Printf("UnixSecs: %d\n", p.Header.ExportTime) 176 | fmt.Printf("Sequence: %d\n", p.Header.SequenceNumber) 177 | fmt.Printf("DomainId: %d\n", p.Header.DomainID) 178 | } 179 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/decode_test.go: -------------------------------------------------------------------------------- 1 | package ipfix 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestDecode(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | input []byte 13 | expected *Packet 14 | wantFail bool 15 | }{ 16 | { 17 | name: "Template", 18 | input: []byte{ 19 | 0x00, 0x0a, // Version 20 | 0x00, 0x24, // Length 21 | 0x68, 0x3d, 0x5a, 0xc1, // Timestamp 22 | 0x34, 0x0f, 0xb8, 0x58, // FlowSequence 23 | 0x00, 0x08, 0x00, 0x01, // Observation Domain ID 24 | 0x00, 0x02, // FlowSet ID = 2 = template 25 | 0x00, 0x14, // FlowSet length 26 | 0x01, 0x00, // Template ID 27 | 0x00, 0x03, // Field count 28 | 0x00, 0x08, 0x00, 0x04, 29 | 0x00, 0x0c, 0x00, 0x04, 30 | 0x00, 0x05, 0x00, 0x01, 31 | }, 32 | expected: &Packet{ 33 | Header: &Header{ 34 | Version: 10, 35 | Length: 36, 36 | ExportTime: 1748851393, 37 | DomainID: 524289, 38 | SequenceNumber: 873445464, 39 | }, 40 | Templates: []*TemplateRecords{ 41 | { 42 | Header: &TemplateRecordHeader{ 43 | TemplateID: 256, 44 | FieldCount: 3, 45 | }, 46 | Records: []*TemplateRecord{ 47 | { 48 | Type: 8, 49 | Length: 4, 50 | }, 51 | { 52 | Type: 12, 53 | Length: 4, 54 | }, 55 | { 56 | Type: 5, 57 | Length: 1, 58 | }, 59 | }, 60 | }, 61 | }, 62 | OptionsTemplateRecords: make([]*OptionsTemplateRecords, 0), 63 | }, 64 | }, 65 | { 66 | name: "Options Template", 67 | input: []byte{ 68 | 0x00, 0x0a, // Version 69 | 0x00, 0x46, // Length 70 | 0x68, 0x3d, 0x5a, 0xc1, // Timestamp 71 | 0x00, 0x00, 0x46, 0xe9, // Sequence Number 72 | 0x00, 0x08, 0x00, 0x00, // Observation Domain ID 73 | 0x00, 0x03, // FlowSetID 74 | 0x00, 0x36, // FlowSet Length 75 | 0x02, 0x00, // Template ID = 512 76 | 0x00, 0x0b, // Total Field Count 77 | 0x00, 0x01, // Scope Field Count 78 | 0x00, 0x90, 0x00, 0x04, 79 | 0x00, 0x29, 0x00, 0x08, 80 | 0x00, 0x2a, 0x00, 0x08, 81 | 0x00, 0xa0, 0x00, 0x08, 82 | 0x00, 0x82, 0x00, 0x04, 83 | 0x00, 0x83, 0x00, 0x10, 84 | 0x00, 0x22, 0x00, 0x04, 85 | 0x00, 0x24, 0x00, 0x02, 86 | 0x00, 0x25, 0x00, 0x02, 87 | 0x00, 0xd6, 0x00, 0x01, 88 | 0x00, 0xd7, 0x00, 0x01, 89 | }, 90 | expected: &Packet{ 91 | Header: &Header{ 92 | Version: 10, 93 | Length: 70, 94 | ExportTime: 1748851393, 95 | DomainID: 524288, 96 | SequenceNumber: 18153, 97 | }, 98 | Templates: make([]*TemplateRecords, 0), 99 | OptionsTemplateRecords: []*OptionsTemplateRecords{ 100 | { 101 | Header: &OptionsTemplateRecordHeader{ 102 | TemplateID: 512, 103 | TotelFieldCount: 11, 104 | ScopeFieldCound: 1, 105 | }, 106 | Records: []*TemplateRecord{ 107 | { 108 | Type: 144, 109 | Length: 4, 110 | }, 111 | { 112 | Type: 41, 113 | Length: 8, 114 | }, 115 | { 116 | Type: 42, 117 | Length: 8, 118 | }, 119 | { 120 | Type: 160, 121 | Length: 8, 122 | }, 123 | { 124 | Type: 130, 125 | Length: 4, 126 | }, 127 | { 128 | Type: 131, 129 | Length: 16, 130 | }, 131 | { 132 | Type: 34, 133 | Length: 4, 134 | }, 135 | { 136 | Type: 36, 137 | Length: 2, 138 | }, 139 | { 140 | Type: 37, 141 | Length: 2, 142 | }, 143 | { 144 | Type: 214, 145 | Length: 1, 146 | }, 147 | { 148 | Type: 215, 149 | Length: 1, 150 | }, 151 | }, 152 | }, 153 | }, 154 | }, 155 | }, 156 | } 157 | 158 | for _, test := range tests { 159 | p, err := Decode(test.input) 160 | if err == nil && test.wantFail { 161 | t.Errorf("unexpected success for %q", test.name) 162 | continue 163 | } 164 | 165 | if err != nil && !test.wantFail { 166 | t.Errorf("unexpected failure for %q: %v", test.name, err) 167 | continue 168 | } 169 | 170 | p.Buffer = nil 171 | assert.Equalf(t, test.expected, p, test.name) 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/field_db.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ipfix 13 | 14 | const ( 15 | InBytes = 1 16 | InPkts = 2 17 | Flows = 3 18 | Protocol = 4 19 | SrcTos = 5 20 | TCPFlags = 6 21 | L4SrcPort = 7 22 | IPv4SrcAddr = 8 23 | SrcMask = 9 24 | InputSnmp = 10 25 | L4DstPort = 11 26 | IPv4DstAddr = 12 27 | DstMask = 13 28 | OutputSnmp = 14 29 | IPv4NextHop = 15 30 | SrcAs = 16 31 | DstAs = 17 32 | BGPIPv4NextHop = 18 33 | MulDstPkts = 19 34 | MulDstBytes = 20 35 | LastSwitched = 21 36 | FirstSwitched = 22 37 | OutBytes = 23 38 | OutPkts = 24 39 | MinPktLngth = 25 40 | MaxPktLngth = 26 41 | IPv6SrcAddr = 27 42 | IPv6DstAddr = 28 43 | IPv6SrcMask = 29 44 | IPv6DstMask = 30 45 | IPv6FlowLabel = 31 46 | IcmpType = 32 47 | MulIgmpType = 33 48 | SamplingInterval = 34 49 | SamplingAlgorithm = 35 50 | FlowActiveTimeout = 36 51 | FlowInactiveTimeout = 37 52 | EngineType = 38 53 | EngineID = 39 54 | TotalBytesExp = 40 55 | TotalPktsExp = 41 56 | TotalFlowsExp = 42 57 | VendorProprietary43 = 43 58 | IPv4SrcPrefix = 44 59 | IPv4DstPrefix = 45 60 | MplsTopLabelType = 46 61 | MplsTopLabelIPAddr = 47 62 | FlowSamplerID = 48 63 | FlowSamplerMode = 49 64 | FlowSamplerRandomInterval = 50 65 | VendorProprietary51 = 51 66 | MinTTL = 52 67 | MaxTTL = 53 68 | IPv4Ident = 54 69 | DstTos = 55 70 | InSrcMac = 56 71 | OutDstMac = 57 72 | SrcVlan = 58 73 | DstVlan = 59 74 | IPProtocolVersion = 60 75 | Direction = 61 76 | IPv6NextHop = 62 77 | BgpIPv6NextHop = 63 78 | IPv6OptionsHeaders = 64 79 | VendorProprietary65 = 65 80 | VendorProprietary66 = 66 81 | VendorProprietary67 = 67 82 | VendorProprietary68 = 68 83 | VendorProprietary69 = 69 84 | MplsLabel1 = 70 85 | MplsLabel2 = 71 86 | MplsLabel3 = 72 87 | MplsLabel4 = 73 88 | MplsLabel5 = 74 89 | MplsLabel6 = 75 90 | MplsLabel7 = 76 91 | MplsLabel8 = 77 92 | MplsLabel9 = 78 93 | MplsLabel10 = 79 94 | InDstMac = 80 95 | OutSrcMac = 81 96 | IfName = 82 97 | IfDesc = 83 98 | SamplerName = 84 99 | InPermanentBytes = 85 100 | InPermanentPkts = 86 101 | VendorProprietary87 = 87 102 | FragmentOffset = 88 103 | ForwardingStatus = 89 104 | MplsPalRd = 90 105 | MplsPrefixLen = 91 106 | SrcTrafficIndex = 92 107 | DstTrafficIndex = 93 108 | ApplicationDescription = 94 109 | ApplicationTag = 95 110 | ApplicationName = 96 111 | SamplingPacketInterval = 305 112 | ) 113 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/options_templates.go: -------------------------------------------------------------------------------- 1 | package ipfix 2 | 3 | import ( 4 | "unsafe" 5 | ) 6 | 7 | // OptionsTemplateSetID is the set ID reserved for options template sets 8 | const OptionsTemplateSetID = 3 9 | 10 | // OptionsTemplateRecordHeader represents the header of a options template record 11 | type OptionsTemplateRecordHeader struct { 12 | ScopeFieldCound uint16 13 | 14 | // Totel number of fields in this Otions Template Record. Because a Template FlowSet 15 | // usually contains multiple Template Records, this field allows the 16 | // Collector to determine the end of the current Template Record and 17 | // the start of the next. 18 | TotelFieldCount uint16 19 | 20 | // Each of the newly generated Template Records is given a unique 21 | // Template ID. This uniqueness is local to the Observation Domain that 22 | // generated the Template ID. Template IDs of Data FlowSets are numbered 23 | // from 256 to 65535. 24 | TemplateID uint16 25 | } 26 | 27 | var sizeOfOptionsTemplateRecordHeader = unsafe.Sizeof(OptionsTemplateRecordHeader{}) 28 | 29 | // OptionsTemplateRecords is a single template that describes structure of an options Flow Record 30 | // (actual Netflow data). 31 | type OptionsTemplateRecords struct { 32 | Header *OptionsTemplateRecordHeader 33 | 34 | // List of fields in this Template Record. 35 | Records []*TemplateRecord 36 | } 37 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/packet.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package ipfix provides structures and functions to decode and analyze 13 | // IPFIX packets. 14 | // 15 | // This package does only packet decoding in a single packet context. It keeps 16 | // no state when decoding multiple packets. As a result Data FlowSets can not be 17 | // decoded during initial packet decoding. To decode Data FlowSets user must 18 | // keep track of Template Records and Options Template Records manually. 19 | // 20 | // Examples of IPFIX packets: 21 | // 22 | // 1. An IPFIX Message consisting of interleaved Template, Data, and 23 | // Options Template Sets, as shown in Figure C. Here, Template and 24 | // Options Template Sets are transmitted "on demand", before the 25 | // first Data Set whose structure they define. 26 | // 27 | // +--------+--------------------------------------------------------+ 28 | // | | +----------+ +---------+ +-----------+ +---------+ | 29 | // |Message | | Template | | Data | | Options | | Data | | 30 | // | Header | | Set | | Set | ... | Template | | Set | | 31 | // | | | | | | | Set | | | | 32 | // | | +----------+ +---------+ +-----------+ +---------+ | 33 | // +--------+--------------------------------------------------------+ 34 | // 35 | // +--------+----------------------------------------------+ 36 | // | | +---------+ +---------+ +---------+ | 37 | // |Message | | Data | | Data | | Data | | 38 | // | Header | | Set | ... | Set | ... | Set | | 39 | // | | +---------+ +---------+ +---------+ | 40 | // +--------+----------------------------------------------+ 41 | // 42 | // Figure D: IPFIX Message: Example 2 43 | // 44 | // 3. An IPFIX Message consisting entirely of Template and Options 45 | // Template Sets, as shown in Figure E. Such a message can be used 46 | // to define or redefine Templates and Options Templates in bulk. 47 | // 48 | // +--------+-------------------------------------------------+ 49 | // | | +----------+ +----------+ +----------+ | 50 | // |Message | | Template | | Template | | Options | | 51 | // | Header | | Set | ... | Set | ... | Template | | 52 | // | | | | | | | Set | | 53 | // | | +----------+ +----------+ +----------+ | 54 | // +--------+-------------------------------------------------+ 55 | // 56 | // Example of struct hierarchy after packet decoding: 57 | // 58 | // Package 59 | // | 60 | // +--TemplateFlowSet 61 | // | | 62 | // | +--TemplateRecord 63 | // | | | 64 | // | | +--Field 65 | // | | +--... 66 | // | | +--Field 67 | // | | 68 | // | +--... 69 | // | | 70 | // | +--TemplateRecord 71 | // | | 72 | // | +--Field 73 | // | +--... 74 | // | +--Field 75 | // | 76 | // +--DataFlowSet 77 | // | 78 | // +--... 79 | // | 80 | // +--OptionsTemplateFlowSet 81 | // | | 82 | // | +--OptionsTemplateRecord 83 | // | | | 84 | // | | +--Field (scope) 85 | // | | +--... (scope) 86 | // | | +--Field (scope) 87 | // | | | 88 | // | | +--Field (option) 89 | // | | +--... (option) 90 | // | | +--Field (option) 91 | // | | 92 | // | +--... 93 | // | | 94 | // | +--OptionsTemplateRecord 95 | // | | 96 | // | +--Field (scope) 97 | // | +--... (scope) 98 | // | +--Field (scope) 99 | // | | 100 | // | +--Field (option) 101 | // | +--... (option) 102 | // | +--Field (option) 103 | // | 104 | // +--DataFlowSet 105 | // 106 | // When matched with appropriate template Data FlowSet can be decoded to list of 107 | // Flow Data Records or list of Options Data Records. Struct hierarchy example: 108 | // 109 | // []FlowDataRecord 110 | // | 111 | // +--FlowDataRecord 112 | // | | 113 | // | +--[]byte 114 | // | +--... 115 | // | +--[]byte 116 | // | 117 | // +--... 118 | // | 119 | // +--FlowDataRecord 120 | // | 121 | // +--[]byte 122 | // +--... 123 | // +--[]byte 124 | // 125 | // []OptionsDataRecord 126 | // | 127 | // +--OptionsDataRecord 128 | // | | 129 | // | +--[]byte (scope) 130 | // | +--... (scope) 131 | // | +--[]byte (scope) 132 | // | | 133 | // | +--[]byte (option) 134 | // | +--... (option) 135 | // | +--[]byte (option) 136 | // | 137 | // +--... 138 | // | 139 | // +--OptionsDataRecord 140 | // | 141 | // +--[]byte 142 | // +--... 143 | // +--[]byte 144 | // | 145 | // +--[]byte (option) 146 | // +--... (option) 147 | // +--[]byte (option) 148 | // 149 | // Most of structure names and comments are taken directly from RFC 7011. 150 | // Reading the IPFIX protocol specification is highly recommended before 151 | // using this package. 152 | package ipfix 153 | 154 | import "unsafe" 155 | 156 | // Header is an IPFIX message header 157 | type Header struct { 158 | // A 32-bit value that identifies the Exporter Observation Domain. 159 | DomainID uint32 160 | 161 | // Incremental sequence counter of all Export Packets sent from the 162 | // current Observation Domain by the Exporter. 163 | SequenceNumber uint32 164 | 165 | // Time in seconds since 0000 UTC 1970, at which the Export Packet 166 | // leaves the Exporter. 167 | ExportTime uint32 168 | 169 | // The total number of bytes in this Export Packet 170 | Length uint16 171 | 172 | // Version of Flow Record format exported in this packet. The value of 173 | //this field is 9 for the current version. 174 | Version uint16 175 | } 176 | 177 | // FlowSet represents a FlowSet as described in RFC7011 178 | type FlowSet struct { 179 | Header *FlowSetHeader 180 | Records []byte 181 | } 182 | 183 | // FlowSetHeader is a decoded representation of the header of a Set 184 | type FlowSetHeader struct { 185 | Length uint16 186 | SetID uint16 187 | } 188 | 189 | var sizeOfFlowSetHeader = unsafe.Sizeof(FlowSetHeader{}) 190 | 191 | // Packet is a decoded representation of a single NetFlow v9 UDP packet. 192 | type Packet struct { 193 | // A pointer to the packets headers 194 | Header *Header 195 | 196 | // A slice of pointers to FlowSet. Each element is instance of (Data)FlowSet 197 | // found in this packet 198 | FlowSets []*FlowSet 199 | 200 | // A slice of pointers to TemplateRecords. Each element is instance of TemplateRecords 201 | // representing a template found in this packet. 202 | Templates []*TemplateRecords 203 | 204 | // A slice of pointers to OptionsTemplateRecords. Each element is instance of OptionsTemplateRecords 205 | // representing an options template found in this packet. 206 | OptionsTemplateRecords []*OptionsTemplateRecords 207 | 208 | // Buffer is a slice pointing to the original byte array that this packet was decoded from. 209 | // This field is only populated if debug level is at least 2 210 | Buffer []byte 211 | } 212 | 213 | var sizeOfHeader = unsafe.Sizeof(Header{}) 214 | 215 | // GetTemplateRecords generate a list of all Template Records in the packet. 216 | // Template Records can be used to decode Data FlowSets to Data Records. 217 | func (p *Packet) GetTemplateRecords() []*TemplateRecords { 218 | return p.Templates 219 | } 220 | 221 | func (p *Packet) GetOptionTemplateRecords() []*OptionsTemplateRecords { 222 | return p.OptionsTemplateRecords 223 | } 224 | 225 | // DataFlowSets generate a list of all Data FlowSets in the packet. If matched 226 | // with appropriate templates Data FlowSets can be decoded to Data Records or 227 | // Options Data Records. 228 | func (p *Packet) DataFlowSets() []*FlowSet { 229 | return p.FlowSets 230 | } 231 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/template_test.go: -------------------------------------------------------------------------------- 1 | package ipfix 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestIsEnterprise(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | tmpl *TemplateRecord 13 | expected bool 14 | }{ 15 | { 16 | name: "test #1", 17 | tmpl: &TemplateRecord{ 18 | Type: 0, 19 | }, 20 | expected: false, 21 | }, 22 | { 23 | name: "test #2", 24 | tmpl: &TemplateRecord{ 25 | Type: 65535, 26 | }, 27 | expected: true, 28 | }, 29 | { 30 | name: "test #3", 31 | tmpl: &TemplateRecord{ 32 | Type: 32768, 33 | }, 34 | expected: true, 35 | }, 36 | { 37 | name: "test #4", 38 | tmpl: &TemplateRecord{ 39 | Type: 32767, 40 | }, 41 | expected: false, 42 | }, 43 | } 44 | 45 | for _, test := range tests { 46 | assert.Equal(t, test.expected, test.tmpl.isEnterprise(), test.name) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/packet/ipfix/templates.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package ipfix 13 | 14 | import ( 15 | "unsafe" 16 | ) 17 | 18 | const ( 19 | // numPreAllocFlowDataRecs is number of elements to pre allocate in DataRecs slice 20 | numPreAllocFlowDataRecs = 20 21 | ) 22 | 23 | // TemplateRecordHeader represents the header of a template record 24 | type TemplateRecordHeader struct { 25 | // Number of fields in this Template Record. Because a Template FlowSet 26 | // usually contains multiple Template Records, this field allows the 27 | // Collector to determine the end of the current Template Record and 28 | // the start of the next. 29 | FieldCount uint16 30 | 31 | // Each of the newly generated Template Records is given a unique 32 | // Template ID. This uniqueness is local to the Observation Domain that 33 | // generated the Template ID. Template IDs of Data FlowSets are numbered 34 | // from 256 to 65535. 35 | TemplateID uint16 36 | } 37 | 38 | var sizeOfTemplateRecordHeader = unsafe.Sizeof(TemplateRecordHeader{}) 39 | 40 | // TemplateRecords is a single template that describes structure of a Flow Record 41 | // (actual Netflow data). 42 | type TemplateRecords struct { 43 | Header *TemplateRecordHeader 44 | 45 | // List of fields in this Template Record. 46 | Records []*TemplateRecord 47 | } 48 | 49 | // TemplateRecord represents a Template Record as described in RFC3954 50 | type TemplateRecord struct { 51 | // The length (in bytes) of the field. 52 | Length uint16 53 | 54 | // A numeric value that represents the type of field. 55 | Type uint16 56 | } 57 | 58 | func (tmpl *TemplateRecord) isEnterprise() bool { 59 | return tmpl.Type&0x8000 == 0x8000 60 | } 61 | 62 | // FlowDataRecord is actual NetFlow data. This structure does not contain any 63 | // information about the actual data meaning. It must be combined with 64 | // corresponding TemplateRecord to be decoded to a single NetFlow data row. 65 | type FlowDataRecord struct { 66 | // List of Flow Data Record values stored in raw format as []byte 67 | Values [][]byte 68 | } 69 | 70 | // sizeOfTemplateRecord is the raw size of a TemplateRecord 71 | var sizeOfTemplateRecord = unsafe.Sizeof(TemplateRecord{}) 72 | 73 | // DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to 74 | // a list of Flow Data Records. 75 | func DecodeFlowSet(set FlowSet, templateRecords []*TemplateRecord) (list []FlowDataRecord) { 76 | var record FlowDataRecord 77 | 78 | // Pre-allocate some room for flows 79 | list = make([]FlowDataRecord, 0, numPreAllocFlowDataRecs) 80 | 81 | // Assume total record length must be >= 4, otherwise it is impossible 82 | // to distinguish between padding and new record. Padding MUST be 83 | // supported. 84 | n := len(set.Records) 85 | count := 0 86 | 87 | for n >= 4 { 88 | record.Values, count = parseFieldValues(set.Records[0:n], templateRecords) 89 | if record.Values == nil { 90 | return 91 | } 92 | 93 | list = append(list, record) 94 | n -= count 95 | } 96 | 97 | return list 98 | } 99 | 100 | // parseFieldValues reads actual fields values from a Data Record utilizing a template 101 | func parseFieldValues(data []byte, fields []*TemplateRecord) ([][]byte, int) { 102 | count := 0 103 | n := len(data) 104 | values := make([][]byte, len(fields)) 105 | 106 | for i, f := range fields { 107 | if n < int(f.Length) { 108 | return nil, 0 109 | } 110 | 111 | values[i] = data[n-int(f.Length) : n] 112 | count += int(f.Length) 113 | n -= int(f.Length) 114 | } 115 | 116 | return values, count 117 | } 118 | -------------------------------------------------------------------------------- /pkg/packet/nf9/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | import ( 15 | "fmt" 16 | "net" 17 | "unsafe" 18 | 19 | "github.com/bio-routing/tflow2/convert" 20 | "github.com/pkg/errors" 21 | ) 22 | 23 | const ( 24 | // numPreAllocTmplRecs is the number of elements to pre allocate in TemplateRecords slice 25 | numPreAllocRecs = 20 26 | ) 27 | 28 | // FlowSetIDTemplateMax is the maximum FlowSetID being used for templates according to RFC3954 29 | const FlowSetIDTemplateMax = 255 30 | 31 | // TemplateFlowSetID is the FlowSetID reserved for template flow sets 32 | const TemplateFlowSetID = 0 33 | 34 | // OptionTemplateFlowSetID is the FlowSetID reserved for option template flow sets 35 | const OptionTemplateFlowSetID = 1 36 | 37 | // errorIncompatibleVersion prints an error message in case the detected version is not supported 38 | func errorIncompatibleVersion(version uint16) error { 39 | return errors.Errorf("NF9: Incompatible protocol version v%d, only v9 is supported", version) 40 | } 41 | 42 | // Decode is the main function of this package. It converts raw packet bytes to Packet struct. 43 | func Decode(raw []byte, remote net.IP) (*Packet, error) { 44 | data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine 45 | 46 | pSize := len(data) 47 | bufSize := 1500 48 | buffer := [1500]byte{} 49 | 50 | if pSize > bufSize { 51 | panic("Buffer too small\n") 52 | } 53 | 54 | // copy data into array as arrays allow us to cast the shit out of it 55 | for i := 0; i < pSize; i++ { 56 | buffer[bufSize-pSize+i] = data[i] 57 | } 58 | 59 | bufferPtr := unsafe.Pointer(&buffer) 60 | bufferMinPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(pSize)) 61 | headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeader)) 62 | 63 | var packet Packet 64 | packet.Buffer = buffer[:] 65 | packet.Header = (*Header)(headerPtr) 66 | 67 | if packet.Header.Version != 9 { 68 | return nil, errorIncompatibleVersion(packet.Header.Version) 69 | } 70 | 71 | //Pre-allocate some room for templates to avoid later copying 72 | packet.Templates = make([]*TemplateRecords, 0, numPreAllocRecs) 73 | 74 | for uintptr(headerPtr) > uintptr(bufferMinPtr) { 75 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfFlowSetHeader) 76 | 77 | fls := &FlowSet{ 78 | Header: (*FlowSetHeader)(ptr), 79 | } 80 | 81 | if fls.Header.FlowSetID == TemplateFlowSetID { 82 | // Template 83 | decodeTemplate(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader, remote) 84 | } else if fls.Header.FlowSetID == OptionTemplateFlowSetID { 85 | // Option Template 86 | decodeOption(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader, remote) 87 | } else if fls.Header.FlowSetID > FlowSetIDTemplateMax { 88 | // Actual data packet 89 | decodeData(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader) 90 | } 91 | 92 | headerPtr = unsafe.Pointer(uintptr(headerPtr) - uintptr(fls.Header.Length)) 93 | } 94 | 95 | return &packet, nil 96 | } 97 | 98 | // decodeOption decodes an option template from `packet` 99 | func decodeOption(packet *Packet, end unsafe.Pointer, size uintptr, remote net.IP) { 100 | min := uintptr(end) - size 101 | 102 | for uintptr(end) > min { 103 | headerPtr := unsafe.Pointer(uintptr(end) - sizeOfOptionsTemplateRecordHeader) 104 | 105 | tmplRecs := &TemplateRecords{} 106 | hdr := (*OptionsTemplateRecordHeader)(unsafe.Pointer(headerPtr)) 107 | tmplRecs.Header = &TemplateRecordHeader{TemplateID: hdr.TemplateID} 108 | tmplRecs.Packet = packet 109 | tmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) 110 | 111 | ptr := headerPtr 112 | // Process option scopes 113 | for i := uint16(0); i < hdr.OptionScopeLength/uint16(sizeOfOptionScope); i++ { 114 | optScope := (*OptionScope)(ptr) 115 | tmplRecs.OptionScopes = append(tmplRecs.OptionScopes, optScope) 116 | ptr = unsafe.Pointer(uintptr(ptr) - sizeOfOptionScope) 117 | } 118 | 119 | // Process option fields 120 | for i := uint16(0); i < hdr.OptionLength/uint16(sizeOfTemplateRecord); i++ { 121 | opt := (*TemplateRecord)(ptr) 122 | tmplRecs.Records = append(tmplRecs.Records, opt) 123 | ptr = unsafe.Pointer(uintptr(ptr) - sizeOfTemplateRecord) 124 | } 125 | 126 | //packet.OptionsTemplates = append(packet.OptionsTemplates, tmplRecs) 127 | packet.Templates = append(packet.Templates, tmplRecs) 128 | 129 | end = unsafe.Pointer(uintptr(end) - uintptr(hdr.OptionScopeLength) - uintptr(hdr.OptionLength) - sizeOfOptionsTemplateRecordHeader) 130 | } 131 | } 132 | 133 | // decodeTemplate decodes a template from `packet` 134 | func decodeTemplate(packet *Packet, end unsafe.Pointer, size uintptr, remote net.IP) { 135 | min := uintptr(end) - size 136 | for uintptr(end) > min { 137 | headerPtr := unsafe.Pointer(uintptr(end) - sizeOfTemplateRecordHeader) 138 | 139 | tmplRecs := &TemplateRecords{} 140 | tmplRecs.Header = (*TemplateRecordHeader)(unsafe.Pointer(headerPtr)) 141 | tmplRecs.Packet = packet 142 | tmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) 143 | 144 | ptr := unsafe.Pointer(uintptr(headerPtr) - sizeOfTemplateRecordHeader) 145 | var i uint16 146 | for i = 0; i < tmplRecs.Header.FieldCount; i++ { 147 | rec := (*TemplateRecord)(unsafe.Pointer(ptr)) 148 | tmplRecs.Records = append(tmplRecs.Records, rec) 149 | ptr = unsafe.Pointer(uintptr(ptr) - sizeOfTemplateRecord) 150 | } 151 | 152 | packet.Templates = append(packet.Templates, tmplRecs) 153 | end = unsafe.Pointer(uintptr(end) - uintptr(tmplRecs.Header.FieldCount)*sizeOfTemplateRecord - sizeOfTemplateRecordHeader) 154 | } 155 | } 156 | 157 | // decodeData decodes a flowSet from `packet` 158 | func decodeData(packet *Packet, headerPtr unsafe.Pointer, size uintptr) { 159 | flsh := (*FlowSetHeader)(unsafe.Pointer(headerPtr)) 160 | data := unsafe.Pointer(uintptr(headerPtr) - uintptr(flsh.Length)) 161 | 162 | fls := &FlowSet{ 163 | Header: flsh, 164 | Flows: (*(*[1<<31 - 1]byte)(data))[sizeOfFlowSetHeader:flsh.Length], 165 | } 166 | 167 | packet.FlowSets = append(packet.FlowSets, fls) 168 | } 169 | 170 | // PrintHeader prints the header of `packet` 171 | func PrintHeader(p *Packet) { 172 | fmt.Printf("Version: %d\n", p.Header.Version) 173 | fmt.Printf("Count: %d\n", p.Header.Count) 174 | fmt.Printf("SysUpTime: %d\n", p.Header.SysUpTime) 175 | fmt.Printf("UnixSecs: %d\n", p.Header.UnixSecs) 176 | fmt.Printf("Sequence: %d\n", p.Header.SequenceNumber) 177 | fmt.Printf("SourceId: %d\n", p.Header.SourceID) 178 | } 179 | -------------------------------------------------------------------------------- /pkg/packet/nf9/decode_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | import ( 15 | "net" 16 | "testing" 17 | 18 | "github.com/bio-routing/tflow2/convert" 19 | ) 20 | 21 | /*func TestDecode(t *testing.T) { 22 | s := []byte{0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 249, 0, 6, 187, 71, 213, 103, 123, 68, 213, 103, 10, 5, 0, 0, 11, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 26, 187, 1, 239, 181, 153, 192, 66, 185, 34, 93, 13, 31, 65, 195, 66, 185, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 249, 0, 6, 183, 71, 213, 103, 7, 39, 213, 103, 224, 156, 0, 0, 153, 2, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 30, 80, 0, 105, 187, 153, 192, 66, 185, 136, 100, 80, 151, 65, 195, 66, 185, 128, 0, 221, 1, 0, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 6, 180, 71, 213, 103, 164, 62, 213, 103, 160, 0, 0, 0, 4, 0, 0, 0, 21, 0, 28, 0, 21, 0, 28, 0, 16, 80, 0, 87, 204, 185, 192, 66, 185, 147, 23, 217, 172, 93, 193, 66, 185, 64, 0, 223, 1, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 137, 0, 6, 191, 71, 213, 103, 248, 44, 213, 103, 125, 17, 0, 0, 57, 0, 0, 0, 21, 0, 72, 0, 21, 0, 72, 0, 24, 187, 1, 145, 226, 185, 192, 66, 185, 88, 160, 125, 74, 84, 193, 66, 185, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 124, 61, 213, 103, 164, 0, 0, 0, 3, 0, 0, 0, 39, 0, 22, 0, 39, 0, 22, 0, 19, 89, 216, 80, 0, 235, 5, 64, 100, 41, 193, 66, 185, 243, 121, 19, 50, 128, 0, 221, 1, 0, 0, 0, 221, 134, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 185, 71, 213, 103, 234, 62, 213, 103, 201, 53, 0, 0, 177, 0, 0, 0, 21, 0, 73, 0, 21, 0, 73, 0, 24, 187, 1, 181, 211, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 35, 0, 14, 64, 80, 20, 0, 42, 179, 79, 172, 109, 9, 172, 109, 133, 55, 19, 15, 48, 96, 34, 3, 42, 104, 0, 222, 1, 0, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 226, 68, 213, 103, 201, 21, 0, 0, 18, 0, 0, 0, 116, 0, 22, 0, 116, 0, 22, 0, 26, 172, 230, 187, 1, 101, 0, 64, 100, 49, 193, 66, 185, 36, 107, 175, 54, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 222, 67, 213, 103, 211, 5, 0, 0, 6, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 27, 80, 0, 243, 165, 153, 192, 66, 185, 138, 98, 227, 172, 65, 195, 66, 185, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 188, 71, 213, 103, 188, 71, 213, 103, 122, 0, 0, 0, 1, 0, 0, 0, 184, 0, 15, 0, 184, 0, 15, 0, 24, 145, 193, 230, 15, 213, 1, 64, 100, 16, 193, 66, 185, 210, 7, 182, 193, 188, 0, 221, 1, 0, 0, 0, 221, 134, 212, 186, 30, 36, 78, 204, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 179, 71, 213, 103, 215, 49, 213, 103, 248, 17, 0, 0, 13, 0, 0, 0, 119, 0, 16, 0, 119, 0, 16, 0, 26, 2, 201, 187, 1, 220, 90, 4, 46, 254, 94, 0, 2, 0, 0, 0, 0, 0, 0, 128, 254, 34, 44, 143, 56, 96, 67, 7, 176, 0, 70, 21, 1, 96, 34, 3, 42, 142, 0, 0, 0, 12, 176, 206, 250, 14, 3, 19, 240, 128, 40, 3, 42, 104, 0, 222, 1, 0, 0, 0, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 183, 71, 213, 103, 47, 68, 213, 103, 54, 23, 0, 0, 10, 0, 0, 0, 73, 0, 22, 0, 73, 0, 22, 0, 26, 79, 154, 187, 1, 59, 4, 64, 100, 85, 193, 66, 185, 43, 156, 16, 199, 68, 0, 221, 1, 0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 6, 179, 71, 213, 103, 179, 71, 213, 103, 61, 0, 0, 0, 1, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 16, 80, 0, 251, 209, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 16, 32, 0, 0, 0, 0, 0, 0, 33, 8, 1, 64, 80, 20, 0, 42, 159, 9, 125, 55, 155, 45, 217, 165, 2, 0, 20, 1, 96, 34, 3, 42, 100, 0, 220, 1, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 1, 179, 71, 213, 103, 19, 59, 213, 103, 152, 0, 0, 0, 2, 0, 0, 0, 15, 0, 93, 0, 15, 0, 93, 0, 3, 3, 0, 0, 153, 192, 66, 185, 119, 160, 222, 68, 31, 194, 66, 185, 60, 0, 228, 1, 2, 0, 0, 1, 6, 0, 56, 0, 6, 0, 80, 0, 1, 0, 5, 0, 1, 0, 4, 0, 4, 0, 21, 0, 4, 0, 22, 0, 4, 0, 1, 0, 4, 0, 2, 0, 2, 0, 253, 0, 2, 0, 252, 0, 2, 0, 14, 0, 2, 0, 10, 0, 2, 0, 11, 0, 2, 0, 7, 0, 4, 0, 15, 0, 4, 0, 12, 0, 4, 0, 8, 0, 18, 0, 228, 1, 80, 0, 0, 0, 0, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 192, 71, 213, 103, 192, 71, 213, 103, 52, 0, 0, 0, 1, 0, 0, 0, 21, 0, 178, 0, 21, 0, 178, 0, 16, 187, 1, 62, 139, 185, 192, 66, 185, 168, 8, 125, 74, 54, 194, 66, 185, 68, 0, 221, 1, 0, 0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 17, 189, 71, 213, 103, 189, 71, 213, 103, 76, 0, 0, 0, 1, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 0, 123, 0, 234, 170, 153, 192, 66, 185, 221, 186, 9, 5, 65, 195, 66, 185, 64, 0, 223, 1, 0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 188, 71, 213, 103, 103, 71, 213, 103, 247, 0, 0, 0, 3, 0, 0, 0, 26, 0, 21, 0, 26, 0, 21, 0, 26, 46, 155, 80, 0, 81, 4, 64, 100, 102, 193, 66, 185, 46, 208, 58, 216, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 179, 71, 213, 103, 101, 71, 213, 103, 247, 0, 0, 0, 3, 0, 0, 0, 26, 0, 21, 0, 26, 0, 21, 0, 26, 145, 155, 80, 0, 81, 4, 64, 100, 102, 193, 66, 185, 46, 208, 58, 216, 128, 0, 221, 1, 0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 180, 71, 213, 103, 134, 71, 213, 103, 38, 3, 0, 0, 2, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 24, 187, 1, 218, 156, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 78, 0, 1, 64, 80, 20, 0, 42, 35, 211, 203, 103, 92, 74, 192, 76, 7, 0, 20, 1, 96, 34, 3, 42, 104, 0, 222, 1, 0, 0, 0, 0, 167, 51, 204, 11, 128, 207, 118, 88, 75, 91, 213, 103, 19, 0, 9, 0} 23 | s = convert.Reverse(s) 24 | 25 | packet, err := Decode(s, net.IP([]byte{1, 1, 1, 1})) 26 | if err != nil { 27 | t.Errorf("Decoding packet failed: %v\n", err) 28 | } 29 | 30 | flowSet := []byte{0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 180, 71, 213, 103, 134, 71, 213, 103, 38, 3, 0, 0, 2, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 24, 187, 1, 218, 156, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 78, 0, 1, 64, 80, 20, 0, 42, 35, 211, 203, 103, 92, 74, 192, 76, 7, 0, 20, 1, 96, 34, 3, 42} 31 | 32 | if !testEq(packet.FlowSets[0].Flows, flowSet) { 33 | t.Errorf("Decoded FlowSet is not the expected one. Got: %v, Expected: %v\n", packet.FlowSets[0].Flows, flowSet) 34 | } 35 | }*/ 36 | 37 | func TestDecode2(t *testing.T) { 38 | s := []byte{ 39 | 8, 0, // Length 40 | 44, 0, // Type 41 | 42 | 8, 0, // Length 43 | 43, 0, // Type 44 | 45 | 8, 0, // Length 46 | 42, 0, // Type 47 | 48 | 8, 0, // Length 49 | 41, 0, // Type 50 | 51 | 4, 0, // Scope 1 Field Length 52 | 1, 0, // Scope 1 Field Type = 1 = System 53 | 54 | 16, 0, // OptionLength 55 | 4, 0, // OptionScopeLength 56 | 10, 1, // TemplateID 57 | 58 | 30, 0, // Length 59 | 1, 0, // FlowSetID 60 | 61 | 0, 0, 0, 0, //Source ID 62 | 167, 51, 204, 11, // Sequence Number 63 | 128, 207, 118, 88, // UNIX secs 64 | 75, 91, 213, 103, // sysUpTime 65 | 1, 0, // Count 66 | 9, 0} // Version 67 | s = convert.Reverse(s) 68 | 69 | _, err := Decode(s, net.IP([]byte{1, 1, 1, 1})) 70 | if err != nil { 71 | t.Errorf("Decoding packet failed: %v\n", err) 72 | } 73 | 74 | } 75 | 76 | func testEq(a, b []byte) bool { 77 | 78 | if a == nil && b == nil { 79 | return true 80 | } 81 | 82 | if a == nil || b == nil { 83 | return false 84 | } 85 | 86 | if len(a) != len(b) { 87 | return false 88 | } 89 | 90 | for i := range a { 91 | if a[i] != b[i] { 92 | return false 93 | } 94 | } 95 | 96 | return true 97 | } 98 | -------------------------------------------------------------------------------- /pkg/packet/nf9/field_db.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | const ( 15 | InBytes = 1 16 | InPkts = 2 17 | Flows = 3 18 | Protocol = 4 19 | SrcTos = 5 20 | TCPFlags = 6 21 | L4SrcPort = 7 22 | IPv4SrcAddr = 8 23 | SrcMask = 9 24 | InputSnmp = 10 25 | L4DstPort = 11 26 | IPv4DstAddr = 12 27 | DstMask = 13 28 | OutputSnmp = 14 29 | IPv4NextHop = 15 30 | SrcAs = 16 31 | DstAs = 17 32 | BGPIPv4NextHop = 18 33 | MulDstPkts = 19 34 | MulDstBytes = 20 35 | LastSwitched = 21 36 | FirstSwitched = 22 37 | OutBytes = 23 38 | OutPkts = 24 39 | MinPktLngth = 25 40 | MaxPktLngth = 26 41 | IPv6SrcAddr = 27 42 | IPv6DstAddr = 28 43 | IPv6SrcMask = 29 44 | IPv6DstMask = 30 45 | IPv6FlowLabel = 31 46 | IcmpType = 32 47 | MulIgmpType = 33 48 | SamplingInterval = 34 49 | SamplingAlgorithm = 35 50 | FlowActiveTimeout = 36 51 | FlowInactiveTimeout = 37 52 | EngineType = 38 53 | EngineID = 39 54 | TotalBytesExp = 40 55 | TotalPktsExp = 41 56 | TotalFlowsExp = 42 57 | VendorProprietary43 = 43 58 | IPv4SrcPrefix = 44 59 | IPv4DstPrefix = 45 60 | MplsTopLabelType = 46 61 | MplsTopLabelIPAddr = 47 62 | FlowSamplerID = 48 63 | FlowSamplerMode = 49 64 | FlowSamplerRandomInterval = 50 65 | VendorProprietary51 = 51 66 | MinTTL = 52 67 | MaxTTL = 53 68 | IPv4Ident = 54 69 | DstTos = 55 70 | InSrcMac = 56 71 | OutDstMac = 57 72 | SrcVlan = 58 73 | DstVlan = 59 74 | IPProtocolVersion = 60 75 | Direction = 61 76 | IPv6NextHop = 62 77 | BgpIPv6NextHop = 63 78 | IPv6OptionsHeaders = 64 79 | VendorProprietary65 = 65 80 | VendorProprietary66 = 66 81 | VendorProprietary67 = 67 82 | VendorProprietary68 = 68 83 | VendorProprietary69 = 69 84 | MplsLabel1 = 70 85 | MplsLabel2 = 71 86 | MplsLabel3 = 72 87 | MplsLabel4 = 73 88 | MplsLabel5 = 74 89 | MplsLabel6 = 75 90 | MplsLabel7 = 76 91 | MplsLabel8 = 77 92 | MplsLabel9 = 78 93 | MplsLabel10 = 79 94 | InDstMac = 80 95 | OutSrcMac = 81 96 | IfName = 82 97 | IfDesc = 83 98 | SamplerName = 84 99 | InPermanentBytes = 85 100 | InPermanentPkts = 86 101 | VendorProprietary87 = 87 102 | FragmentOffset = 88 103 | ForwardingStatus = 89 104 | MplsPalRd = 90 105 | MplsPrefixLen = 91 106 | SrcTrafficIndex = 92 107 | DstTrafficIndex = 93 108 | ApplicationDescription = 94 109 | ApplicationTag = 95 110 | ApplicationName = 96 111 | ) 112 | -------------------------------------------------------------------------------- /pkg/packet/nf9/packet.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | // Package nf9 provides structures and functions to decode and analyze 13 | // NetFlow v9 packets. 14 | // 15 | // This package does only packet decoding in a single packet context. It keeps 16 | // no state when decoding multiple packets. As a result Data FlowSets can not be 17 | // decoded during initial packet decoding. To decode Data FlowSets user must 18 | // keep track of Template Records and Options Template Records manually. 19 | // 20 | // Examples of NetFlow v9 packets: 21 | // 22 | // +--------+--------------------------------------------------------+ 23 | // | | +----------+ +---------+ +-----------+ +---------+ | 24 | // | Packet | | Template | | Data | | Options | | Data | | 25 | // | Header | | FlowSet | | FlowSet | ... | Template | | FlowSet | | 26 | // | | | | | | | FlowSet | | | | 27 | // | | +----------+ +---------+ +-----------+ +---------+ | 28 | // +--------+--------------------------------------------------------+ 29 | // 30 | // +--------+----------------------------------------------+ 31 | // | | +---------+ +---------+ +---------+ | 32 | // | Packet | | Data | ... | Data | ... | Data | | 33 | // | Header | | FlowSet | ... | FlowSet | ... | FlowSet | | 34 | // | | +---------+ +---------+ +---------+ | 35 | // +--------+----------------------------------------------+ 36 | // 37 | // +--------+-------------------------------------------------+ 38 | // | | +----------+ +----------+ +----------+ | 39 | // | Packet | | Template | | Template | | Options | | 40 | // | Header | | FlowSet | ... | FlowSet | ... | Template | | 41 | // | | | | | | | FlowSet | | 42 | // | | +----------+ +----------+ +----------+ | 43 | // +--------+-------------------------------------------------+ 44 | // 45 | // Example of struct hierarchy after packet decoding: 46 | // Package 47 | // | 48 | // +--TemplateFlowSet 49 | // | | 50 | // | +--TemplateRecord 51 | // | | | 52 | // | | +--Field 53 | // | | +--... 54 | // | | +--Field 55 | // | | 56 | // | +--... 57 | // | | 58 | // | +--TemplateRecord 59 | // | | 60 | // | +--Field 61 | // | +--... 62 | // | +--Field 63 | // | 64 | // +--DataFlowSet 65 | // | 66 | // +--... 67 | // | 68 | // +--OptionsTemplateFlowSet 69 | // | | 70 | // | +--OptionsTemplateRecord 71 | // | | | 72 | // | | +--Field (scope) 73 | // | | +--... (scope) 74 | // | | +--Field (scope) 75 | // | | | 76 | // | | +--Field (option) 77 | // | | +--... (option) 78 | // | | +--Field (option) 79 | // | | 80 | // | +--... 81 | // | | 82 | // | +--OptionsTemplateRecord 83 | // | | 84 | // | +--Field (scope) 85 | // | +--... (scope) 86 | // | +--Field (scope) 87 | // | | 88 | // | +--Field (option) 89 | // | +--... (option) 90 | // | +--Field (option) 91 | // | 92 | // +--DataFlowSet 93 | // 94 | // When matched with appropriate template Data FlowSet can be decoded to list of 95 | // Flow Data Records or list of Options Data Records. Struct hierarchy example: 96 | // 97 | // []FlowDataRecord 98 | // | 99 | // +--FlowDataRecord 100 | // | | 101 | // | +--[]byte 102 | // | +--... 103 | // | +--[]byte 104 | // | 105 | // +--... 106 | // | 107 | // +--FlowDataRecord 108 | // | 109 | // +--[]byte 110 | // +--... 111 | // +--[]byte 112 | // 113 | // []OptionsDataRecord 114 | // | 115 | // +--OptionsDataRecord 116 | // | | 117 | // | +--[]byte (scope) 118 | // | +--... (scope) 119 | // | +--[]byte (scope) 120 | // | | 121 | // | +--[]byte (option) 122 | // | +--... (option) 123 | // | +--[]byte (option) 124 | // | 125 | // +--... 126 | // | 127 | // +--OptionsDataRecord 128 | // | 129 | // +--[]byte 130 | // +--... 131 | // +--[]byte 132 | // | 133 | // +--[]byte (option) 134 | // +--... (option) 135 | // +--[]byte (option) 136 | // 137 | // Most of structure names and comments are taken directly from RFC 3954. 138 | // Reading the NetFlow v9 protocol specification is highly recommended before 139 | // using this package. 140 | package nf9 141 | 142 | import "unsafe" 143 | 144 | // Header is the NetFlow version 9 header 145 | type Header struct { 146 | // A 32-bit value that identifies the Exporter Observation Domain. 147 | SourceID uint32 148 | 149 | // Incremental sequence counter of all Export Packets sent from the 150 | // current Observation Domain by the Exporter. 151 | //SequenceNumber uint32 152 | SequenceNumber uint32 153 | 154 | // Time in seconds since 0000 UTC 1970, at which the Export Packet 155 | // leaves the Exporter. 156 | //UnixSecs uint32 157 | UnixSecs uint32 158 | 159 | // Time in milliseconds since this device was first booted. 160 | //SysUpTime uint32 161 | SysUpTime uint32 162 | 163 | // The total number of records in the Export Packet, which is the sum 164 | // of Options FlowSet records, Template FlowSet records, and Data 165 | // FlowSet records. 166 | //Count uint16 167 | Count uint16 168 | 169 | // Version of Flow Record format exported in this packet. The value of 170 | //this field is 9 for the current version. 171 | //Version uint16 172 | Version uint16 173 | } 174 | 175 | // FlowSet represents a FlowSet as described in RFC3954 176 | type FlowSet struct { 177 | Header *FlowSetHeader 178 | Flows []byte 179 | } 180 | 181 | // FlowSetHeader is a decoded representation of the header of a FlowSet 182 | type FlowSetHeader struct { 183 | Length uint16 184 | FlowSetID uint16 185 | } 186 | 187 | var sizeOfFlowSetHeader = unsafe.Sizeof(FlowSetHeader{}) 188 | 189 | // Packet is a decoded representation of a single NetFlow v9 UDP packet. 190 | type Packet struct { 191 | // A pointer to the packets headers 192 | Header *Header 193 | 194 | // A slice of pointers to FlowSet. Each element is instance of (Data)FlowSet 195 | // found in this packet 196 | FlowSets []*FlowSet 197 | 198 | // A slice of pointers to TemplateRecords. Each element is instance of TemplateRecords 199 | // representing a template found in this packet. 200 | Templates []*TemplateRecords 201 | 202 | // Buffer is a slice pointing to the original byte array that this packet was decoded from. 203 | // This field is only populated if debug level is at least 2 204 | Buffer []byte 205 | } 206 | 207 | var sizeOfHeader = unsafe.Sizeof(Header{}) 208 | 209 | // GetTemplateRecords returns a list of all Template Records in the packet. 210 | // Template Records can be used to decode Data FlowSets to Data Records. 211 | func (p *Packet) GetTemplateRecords() []*TemplateRecords { 212 | return p.Templates 213 | } 214 | 215 | // DataFlowSets generate a list of all Data FlowSets in the packet. If matched 216 | // with appropriate templates Data FlowSets can be decoded to Data Records or 217 | // Options Data Records. 218 | func (p *Packet) DataFlowSets() []*FlowSet { 219 | return p.FlowSets 220 | } 221 | -------------------------------------------------------------------------------- /pkg/packet/nf9/templates.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package nf9 13 | 14 | import "unsafe" 15 | 16 | const ( 17 | // numPreAllocFlowDataRecs is number of elements to pre allocate in DataRecs slice 18 | numPreAllocFlowDataRecs = 20 19 | ) 20 | 21 | var ( 22 | sizeOfTemplateRecordHeader = unsafe.Sizeof(TemplateRecordHeader{}) 23 | sizeOfOptionsTemplateRecordHeader = unsafe.Sizeof(OptionsTemplateRecordHeader{}) 24 | sizeOfOptionScope = unsafe.Sizeof(OptionScope{}) 25 | ) 26 | 27 | // TemplateRecordHeader represents the header of a template record 28 | type TemplateRecordHeader struct { 29 | // Number of fields in this Template Record. Because a Template FlowSet 30 | // usually contains multiple Template Records, this field allows the 31 | // Collector to determine the end of the current Template Record and 32 | // the start of the next. 33 | FieldCount uint16 34 | 35 | // Each of the newly generated Template Records is given a unique 36 | // Template ID. This uniqueness is local to the Observation Domain that 37 | // generated the Template ID. Template IDs of Data FlowSets are numbered 38 | // from 256 to 65535. 39 | TemplateID uint16 40 | } 41 | 42 | // OptionsTemplateRecordHeader represents the header of an option template record 43 | type OptionsTemplateRecordHeader struct { 44 | // The length (in bytes) of any options field definitions 45 | // contained in this Options Template Record. 46 | OptionLength uint16 47 | 48 | // Number of fields in this Template Record. Because a Template FlowSet 49 | // usually contains multiple Template Records, this field allows the 50 | // Collector to determine the end of the current Template Record and 51 | // the start of the next. 52 | OptionScopeLength uint16 53 | 54 | // Each of the newly generated Template Records is given a unique 55 | // Template ID. This uniqueness is local to the Observation Domain that 56 | // generated the Template ID. Template IDs of Data FlowSets are numbered 57 | // from 256 to 65535. 58 | TemplateID uint16 59 | } 60 | 61 | // TemplateRecords is a single template that describes structure of a Flow Record 62 | // (actual Netflow data). 63 | type TemplateRecords struct { 64 | Header *TemplateRecordHeader 65 | 66 | // List of scopes 67 | OptionScopes []*OptionScope 68 | 69 | // List of fields in this Template Record. 70 | Records []*TemplateRecord 71 | 72 | Packet *Packet 73 | 74 | Values [][]byte 75 | } 76 | 77 | // OptionScope represents an option scope in an options template flowset 78 | type OptionScope struct { 79 | // The length (in bytes) of the Scope field, as it would appear in 80 | //an Options Data Record. 81 | ScopeFieldLength uint16 82 | 83 | //A numeric value that represents the type of field that would 84 | //appear in the Options Template Record. Refer to the Field Type 85 | //Definitions section. 86 | ScopeFieldType uint16 87 | } 88 | 89 | //TemplateRecord represents a Template Record as described in RFC3954 90 | type TemplateRecord struct { 91 | // The length (in bytes) of the field. 92 | Length uint16 93 | 94 | // A numeric value that represents the type of field. 95 | Type uint16 96 | } 97 | 98 | // FlowDataRecord is actual NetFlow data. This structure does not contain any 99 | // information about the actual data meaning. It must be combined with 100 | // corresponding TemplateRecord to be decoded to a single NetFlow data row. 101 | type FlowDataRecord struct { 102 | // List of Flow Data Record values stored in raw format as []byte 103 | Values [][]byte 104 | } 105 | 106 | // sizeOfTemplateRecord is the raw size of a TemplateRecord 107 | var sizeOfTemplateRecord = unsafe.Sizeof(TemplateRecord{}) 108 | 109 | // DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to 110 | // a list of Flow Data Records. 111 | /*func (dtpl *TemplateRecords) DecodeFlowSet(set FlowSet) (list []FlowDataRecord) { 112 | if set.Header.FlowSetID != dtpl.Header.TemplateID { 113 | return nil 114 | } 115 | var record FlowDataRecord 116 | 117 | // Pre-allocate some room for flows 118 | list = make([]FlowDataRecord, 0, numPreAllocFlowDataRecs) 119 | 120 | // Assume total record length must be >= 4, otherwise it is impossible 121 | // to distinguish between padding and new record. Padding MUST be 122 | // supported. 123 | n := len(set.Flows) 124 | count := 0 125 | 126 | for n >= 4 { 127 | record.Values, count = parseFieldValues(set.Flows[0:n], dtpl.Records) 128 | if record.Values == nil { 129 | return 130 | } 131 | list = append(list, record) 132 | n = n - count 133 | } 134 | 135 | return 136 | }*/ 137 | 138 | // DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to 139 | // a list of Flow Data Records. 140 | func DecodeFlowSet(templateRecords []*TemplateRecord, set FlowSet) (list []FlowDataRecord) { 141 | var record FlowDataRecord 142 | 143 | // Pre-allocate some room for flows 144 | list = make([]FlowDataRecord, 0, numPreAllocFlowDataRecs) 145 | 146 | // Assume total record length must be >= 4, otherwise it is impossible 147 | // to distinguish between padding and new record. Padding MUST be 148 | // supported. 149 | n := len(set.Flows) 150 | count := 0 151 | 152 | for n >= 4 { 153 | record.Values, count = parseFieldValues(set.Flows[0:n], templateRecords) 154 | if record.Values == nil { 155 | return 156 | } 157 | list = append(list, record) 158 | n = n - count 159 | } 160 | 161 | return 162 | } 163 | 164 | // parseFieldValues reads actual fields values from a Data Record utilizing a template 165 | func parseFieldValues(flows []byte, fields []*TemplateRecord) ([][]byte, int) { 166 | count := 0 167 | n := len(flows) 168 | values := make([][]byte, len(fields)) 169 | for i, f := range fields { 170 | if n < int(f.Length) { 171 | return nil, 0 172 | } 173 | values[i] = flows[n-int(f.Length) : n] 174 | count += int(f.Length) 175 | n -= int(f.Length) 176 | } 177 | return values, count 178 | } 179 | -------------------------------------------------------------------------------- /pkg/packet/packet/dot1q.go: -------------------------------------------------------------------------------- 1 | package packet 2 | 3 | import ( 4 | "fmt" 5 | "unsafe" 6 | ) 7 | 8 | var ( 9 | // SizeOfDot1Q is the size of an Dot1Q header in bytes 10 | SizeOfDot1Q = unsafe.Sizeof(Dot1Q{}) 11 | ) 12 | 13 | // Dot1Q represents an 802.1q header 14 | type Dot1Q struct { 15 | EtherType uint16 16 | TCI uint16 17 | } 18 | 19 | // DecodeDot1Q decodes an 802.1q header 20 | func DecodeDot1Q(raw unsafe.Pointer, length uint32) (*Dot1Q, error) { 21 | if SizeOfEthernetII > uintptr(length) { 22 | return nil, fmt.Errorf("frame is too short: %d", length) 23 | } 24 | 25 | ptr := unsafe.Pointer(uintptr(raw) - SizeOfDot1Q) 26 | dot1qHeader := (*Dot1Q)(ptr) 27 | 28 | return dot1qHeader, nil 29 | } 30 | -------------------------------------------------------------------------------- /pkg/packet/packet/ethernet.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package packet 13 | 14 | import ( 15 | "github.com/pkg/errors" 16 | "net" 17 | "unsafe" 18 | 19 | "github.com/bio-routing/tflow2/convert" 20 | ) 21 | 22 | const ( 23 | // EtherTypeARP is Address Resolution Protocol EtherType value 24 | EtherTypeARP = 0x0806 25 | 26 | // EtherTypeIPv4 is Internet Protocol version 4 EtherType value 27 | EtherTypeIPv4 = 0x0800 28 | 29 | // EtherTypeIPv6 is Internet Protocol Version 6 EtherType value 30 | EtherTypeIPv6 = 0x86DD 31 | 32 | // EtherTypeLACP is Link Aggregation Control Protocol EtherType value 33 | EtherTypeLACP = 0x8809 34 | 35 | // EtherTypeIEEE8021Q is VLAN-tagged frame (IEEE 802.1Q) EtherType value 36 | EtherTypeIEEE8021Q = 0x8100 37 | ) 38 | 39 | var ( 40 | // SizeOfEthernetII is the size of an EthernetII header in bytes 41 | SizeOfEthernetII = unsafe.Sizeof(ethernetII{}) 42 | ) 43 | 44 | // EthernetHeader represents layer two IEEE 802.11 45 | type EthernetHeader struct { 46 | SrcMAC net.HardwareAddr 47 | DstMAC net.HardwareAddr 48 | EtherType uint16 49 | } 50 | 51 | type ethernetII struct { 52 | EtherType uint16 53 | SrcMAC [6]byte 54 | DstMAC [6]byte 55 | } 56 | 57 | // DecodeEthernet decodes an EthernetII header 58 | func DecodeEthernet(raw unsafe.Pointer, length uint32) (*EthernetHeader, error) { 59 | if SizeOfEthernetII > uintptr(length) { 60 | return nil, errors.Errorf("Frame is too short: %d", length) 61 | } 62 | 63 | ptr := unsafe.Pointer(uintptr(raw) - SizeOfEthernetII) 64 | ethHeader := (*ethernetII)(ptr) 65 | 66 | srcMAC := ethHeader.SrcMAC[:] 67 | dstMAC := ethHeader.DstMAC[:] 68 | 69 | srcMAC = convert.Reverse(srcMAC) 70 | dstMAC = convert.Reverse(dstMAC) 71 | 72 | h := &EthernetHeader{ 73 | SrcMAC: net.HardwareAddr(srcMAC), 74 | DstMAC: net.HardwareAddr(dstMAC), 75 | EtherType: ethHeader.EtherType, 76 | } 77 | 78 | return h, nil 79 | } 80 | -------------------------------------------------------------------------------- /pkg/packet/packet/ethernet_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | package packet 12 | 13 | import ( 14 | "testing" 15 | "unsafe" 16 | ) 17 | 18 | func TestDecode(t *testing.T) { 19 | data := []byte{ 20 | 128, // Header Length 21 | 92, 180, 133, 203, // ACK Number 22 | 31, 4, 191, 24, // Sequence Number 23 | 222, 148, // DST port 24 | 80, 0, // SRC port 25 | 26 | 19, 131, 191, 87, // DST IP 27 | 238, 153, 37, 185, // SRC IP 28 | 186, 25, // Header Checksum 29 | 6, // Protocol 30 | 62, // TTL 31 | 0, 64, // Flags + Fragment offset 32 | 131, 239, // Identifier 33 | 212, 5, // Total Length 34 | 0, // TOS 35 | 69, // Version + Length 36 | 37 | 0, 8, // EtherType 38 | 185, 28, 4, 113, 78, 32, // Source MAC 39 | 148, 2, 127, 31, 113, 128, // Destination MAC 40 | } 41 | 42 | pSize := len(data) 43 | bufSize := 128 44 | buffer := [128]byte{} 45 | 46 | if pSize > bufSize { 47 | panic("Buffer too small\n") 48 | } 49 | 50 | // copy data into array as arrays allow us to cast the shit out of it 51 | for i := 0; i < pSize; i++ { 52 | buffer[bufSize-pSize+i] = data[i] 53 | } 54 | 55 | bufferPtr := unsafe.Pointer(&buffer) 56 | headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize)) 57 | 58 | etherHeader, err := DecodeEthernet(headerPtr, 128) 59 | if err != nil { 60 | t.Errorf("Decoding packet failed: %v\n", err) 61 | } 62 | 63 | if etherHeader.DstMAC.String() != "80:71:1f:7f:02:94" { 64 | t.Errorf("Unexpected DST MAC address. Expected %s. Got %s", "80:71:1f:7f:02:94", etherHeader.DstMAC.String()) 65 | } 66 | 67 | if etherHeader.SrcMAC.String() != "20:4e:71:04:1c:b9" { 68 | t.Errorf("Unexpected DST MAC address. Expected %s. Got %s", "20:4e:71:04:1c:b9", etherHeader.SrcMAC.String()) 69 | } 70 | 71 | if etherHeader.EtherType != EtherTypeIPv4 { 72 | t.Errorf("Unexpected ethertyp. Expected %d. Got %d", EtherTypeIPv4, etherHeader.EtherType) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /pkg/packet/packet/ipv4.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package packet 13 | 14 | import ( 15 | "unsafe" 16 | 17 | "github.com/pkg/errors" 18 | ) 19 | 20 | var ( 21 | SizeOfIPv4Header = unsafe.Sizeof(IPv4Header{}) 22 | ) 23 | 24 | type IPv4Header struct { 25 | DstAddr [4]byte 26 | SrcAddr [4]byte 27 | HeaderChecksum uint16 28 | Protocol uint8 29 | TTL uint8 30 | FlagsFragmentOffset uint16 31 | Identification uint16 32 | TotalLength uint16 33 | TOS uint8 34 | VersionHeaderLength uint8 35 | } 36 | 37 | func DecodeIPv4(raw unsafe.Pointer, length uint32) (*IPv4Header, error) { 38 | if SizeOfIPv4Header > uintptr(length) { 39 | return nil, errors.Errorf("frame is too short: %d", length) 40 | } 41 | 42 | return (*IPv4Header)(unsafe.Pointer(uintptr(raw) - SizeOfIPv4Header)), nil 43 | } 44 | -------------------------------------------------------------------------------- /pkg/packet/packet/ipv6.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package packet 13 | 14 | import ( 15 | "github.com/pkg/errors" 16 | "unsafe" 17 | ) 18 | 19 | var ( 20 | SizeOfIPv6Header = unsafe.Sizeof(IPv6Header{}) 21 | ) 22 | 23 | type IPv6Header struct { 24 | DstAddr [16]byte 25 | SrcAddr [16]byte 26 | HopLimit uint8 27 | NextHeader uint8 28 | PayloadLength uint16 29 | VersionTrafficClassFlowLabel uint32 30 | } 31 | 32 | func DecodeIPv6(raw unsafe.Pointer, length uint32) (*IPv6Header, error) { 33 | if SizeOfIPv6Header > uintptr(length) { 34 | return nil, errors.Errorf("Frame is too short: %d", length) 35 | } 36 | 37 | return (*IPv6Header)(unsafe.Pointer(uintptr(raw) - SizeOfIPv6Header)), nil 38 | } 39 | -------------------------------------------------------------------------------- /pkg/packet/packet/tcp.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package packet 13 | 14 | import ( 15 | "unsafe" 16 | 17 | "github.com/pkg/errors" 18 | ) 19 | 20 | const ( 21 | TCP = 6 22 | ) 23 | 24 | var ( 25 | SizeOfTCPHeader = unsafe.Sizeof(TCPHeader{}) 26 | ) 27 | 28 | type TCPHeader struct { 29 | UrgentPointer uint16 30 | Checksum uint16 31 | Window uint16 32 | Flags uint8 33 | DataOffset uint8 34 | ACKNumber uint32 35 | SequenceNumber uint32 36 | DstPort uint16 37 | SrcPort uint16 38 | } 39 | 40 | func DecodeTCP(raw unsafe.Pointer, length uint32) (*TCPHeader, error) { 41 | if SizeOfTCPHeader > uintptr(length) { 42 | return nil, errors.Errorf("Frame is too short: %d", length) 43 | } 44 | 45 | return (*TCPHeader)(unsafe.Pointer(uintptr(raw) - SizeOfTCPHeader)), nil 46 | } 47 | -------------------------------------------------------------------------------- /pkg/packet/packet/udp.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package packet 13 | 14 | import ( 15 | "unsafe" 16 | 17 | "github.com/pkg/errors" 18 | ) 19 | 20 | const ( 21 | // UDP IP protocol number 22 | UDP = 17 23 | ) 24 | 25 | var ( 26 | // SizeOfUDPHeader is the size of a UDP header in bytes 27 | SizeOfUDPHeader = unsafe.Sizeof(UDPHeader{}) 28 | ) 29 | 30 | // UDPHeader represents a UDP header 31 | type UDPHeader struct { 32 | Checksum uint16 33 | Length uint16 34 | DstPort uint16 35 | SrcPort uint16 36 | } 37 | 38 | // DecodeUDP decodes a UDP header 39 | func DecodeUDP(raw unsafe.Pointer, length uint32) (*UDPHeader, error) { 40 | if SizeOfTCPHeader > uintptr(length) { 41 | return nil, errors.Errorf("Frame is too short: %d", length) 42 | } 43 | 44 | return (*UDPHeader)(unsafe.Pointer(uintptr(raw) - SizeOfUDPHeader)), nil 45 | } 46 | -------------------------------------------------------------------------------- /pkg/packet/sflow/decode.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package sflow 13 | 14 | import ( 15 | "net" 16 | "unsafe" 17 | 18 | "github.com/bio-routing/tflow2/convert" 19 | "github.com/pkg/errors" 20 | 21 | log "github.com/sirupsen/logrus" 22 | ) 23 | 24 | const ( 25 | dataFlowSample = 1 26 | expandedFlowSample = 3 27 | dataCounterSample = 2 28 | standardSflow = 0 29 | rawPacketHeader = 1 30 | extendedSwitchData = 1001 31 | extendedRouterData = 1002 32 | ) 33 | 34 | // errorIncompatibleVersion prints an error message in case the detected version is not supported 35 | func errorIncompatibleVersion(version uint32) error { 36 | return errors.Errorf("Sflow: Incompatible protocol version v%d, only v5 is supported", version) 37 | } 38 | 39 | // Decode is the main function of this package. It converts raw packet bytes to Packet struct. 40 | func Decode(raw []byte) (*Packet, error) { 41 | data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine 42 | 43 | pSize := len(data) 44 | bufSize := 1500 45 | buffer := [1500]byte{} 46 | 47 | if pSize > bufSize { 48 | panic("Buffer too small\n") 49 | } 50 | 51 | // copy data into array as arrays allow us to cast the shit out of it 52 | for i := 0; i < pSize; i++ { 53 | buffer[bufSize-pSize+i] = data[i] 54 | } 55 | 56 | bufferPtr := unsafe.Pointer(&buffer) 57 | //bufferMinPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(pSize)) 58 | headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeaderTop)) 59 | 60 | var p Packet 61 | p.Buffer = buffer[:] 62 | p.headerTop = (*headerTop)(headerPtr) 63 | 64 | if p.headerTop.Version != 5 { 65 | return nil, errorIncompatibleVersion(p.Header.Version) 66 | } 67 | 68 | agentAddressLen := uint64(0) 69 | switch p.headerTop.AgentAddressType { 70 | default: 71 | return nil, errors.Errorf("Unknown AgentAddressType %d", p.headerTop.AgentAddressType) 72 | case 1: 73 | agentAddressLen = 4 74 | case 2: 75 | agentAddressLen = 16 76 | } 77 | 78 | headerBottomPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeaderTop) - uintptr(agentAddressLen) - uintptr(sizeOfHeaderBottom)) 79 | p.headerBottom = (*headerBottom)(headerBottomPtr) 80 | 81 | h := Header{ 82 | Version: p.headerTop.Version, 83 | AgentAddressType: p.headerTop.AgentAddressType, 84 | AgentAddress: getNetIP(headerPtr, agentAddressLen), 85 | SubAgentID: p.headerBottom.SubAgentID, 86 | SequenceNumber: p.headerBottom.SequenceNumber, 87 | SysUpTime: p.headerBottom.SysUpTime, 88 | NumSamples: p.headerBottom.NumSamples, 89 | } 90 | p.Header = &h 91 | 92 | flowSamples, err := decodeFlowSamples(headerBottomPtr, h.NumSamples) 93 | if err != nil { 94 | return nil, errors.Wrap(err, "Unable to dissect flows") 95 | } 96 | p.FlowSamples = flowSamples 97 | 98 | return &p, nil 99 | } 100 | 101 | func extractEnterpriseFormat(sfType uint32) (sfTypeEnterprise uint32, sfTypeFormat uint32) { 102 | return sfType >> 12, sfType & 0xfff 103 | } 104 | 105 | func decodeFlowSamples(samplesPtr unsafe.Pointer, NumSamples uint32) ([]*FlowSample, error) { 106 | flowSamples := make([]*FlowSample, 0) 107 | for i := uint32(0); i < NumSamples; i++ { 108 | sfTypeEnterprise, sfTypeFormat := extractEnterpriseFormat(*(*uint32)(unsafe.Pointer(uintptr(samplesPtr) - uintptr(4)))) 109 | 110 | if sfTypeEnterprise != 0 { 111 | return nil, errors.Errorf("Unknown Enterprise: %d", sfTypeEnterprise) 112 | } 113 | 114 | sampleLengthPtr := unsafe.Pointer(uintptr(samplesPtr) - uintptr(8)) 115 | sampleLength := *(*uint32)(sampleLengthPtr) 116 | 117 | if sfTypeFormat == dataFlowSample { 118 | fs, err := decodeFlowSample(samplesPtr) 119 | if err != nil { 120 | return nil, errors.Wrap(err, "Unable to decode flow sample") 121 | } 122 | flowSamples = append(flowSamples, fs) 123 | } else if sfTypeFormat == expandedFlowSample { 124 | fs, err := decodeExpandedFlowSample(samplesPtr) 125 | if err != nil { 126 | return nil, errors.Wrap(err, "Unable to decode flow sample") 127 | } 128 | flowSamples = append(flowSamples, fs) 129 | } 130 | 131 | samplesPtr = unsafe.Pointer(uintptr(samplesPtr) - uintptr(sampleLength+8)) 132 | } 133 | 134 | return flowSamples, nil 135 | } 136 | 137 | func decodeFlowSample(flowSamplePtr unsafe.Pointer) (*FlowSample, error) { 138 | flowSamplePtr = unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(sizeOfFlowSampleHeader)) 139 | fsh := (*FlowSampleHeader)(flowSamplePtr) 140 | 141 | return _decodeFlowSample(flowSamplePtr, fsh) 142 | } 143 | 144 | func decodeExpandedFlowSample(flowSamplePtr unsafe.Pointer) (*FlowSample, error) { 145 | flowSamplePtr = unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(sizeOfExpandedFlowSampleHeader)) 146 | fsh := (*ExpandedFlowSampleHeader)(flowSamplePtr).toFlowSampleHeader() 147 | 148 | return _decodeFlowSample(flowSamplePtr, fsh) 149 | } 150 | 151 | func _decodeFlowSample(flowSamplePtr unsafe.Pointer, fsh *FlowSampleHeader) (*FlowSample, error) { 152 | var rph *RawPacketHeader 153 | var rphd unsafe.Pointer 154 | var erd *ExtendedRouterData 155 | var esd *ExtendedSwitchData 156 | 157 | for i := uint32(0); i < fsh.FlowRecord; i++ { 158 | sfTypeEnterprise, sfTypeFormat := extractEnterpriseFormat(*(*uint32)(unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(4)))) 159 | flowDataLength := *(*uint32)(unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(8))) 160 | 161 | if sfTypeEnterprise == standardSflow { 162 | var err error 163 | switch sfTypeFormat { 164 | case rawPacketHeader: 165 | rph = decodeRawPacketHeader(flowSamplePtr) 166 | rphd = unsafe.Pointer(uintptr(flowSamplePtr) - sizeOfRawPacketHeader) 167 | 168 | case extendedRouterData: 169 | erd, err = decodeExtendRouterData(flowSamplePtr) 170 | if err != nil { 171 | return nil, errors.Wrap(err, "Unable to decide extended router data") 172 | } 173 | 174 | case extendedSwitchData: 175 | esd, err = decodeExtendedSwitchData(flowSamplePtr) 176 | if err != nil { 177 | return nil, errors.Wrap(err, "Unable to decide extended switch data") 178 | } 179 | 180 | default: 181 | log.Infof("Unknown sfTypeFormat %d\n", sfTypeFormat) 182 | } 183 | 184 | } 185 | 186 | flowSamplePtr = unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(8) - uintptr(flowDataLength)) 187 | } 188 | 189 | fs := &FlowSample{ 190 | FlowSampleHeader: fsh, 191 | RawPacketHeader: rph, 192 | Data: rphd, 193 | DataLen: rph.OriginalPacketLength, 194 | ExtendedSwitchData: esd, 195 | ExtendedRouterData: erd, 196 | } 197 | 198 | return fs, nil 199 | } 200 | 201 | func decodeRawPacketHeader(rphPtr unsafe.Pointer) *RawPacketHeader { 202 | rphPtr = unsafe.Pointer(uintptr(rphPtr) - uintptr(sizeOfRawPacketHeader)) 203 | rph := (*RawPacketHeader)(rphPtr) 204 | return rph 205 | } 206 | 207 | func decodeExtendRouterData(erhPtr unsafe.Pointer) (*ExtendedRouterData, error) { 208 | erhTopPtr := unsafe.Pointer(uintptr(erhPtr) - uintptr(sizeOfextendedRouterDataTop)) 209 | erhTop := (*extendedRouterDataTop)(erhTopPtr) 210 | 211 | addressLen := uint64(0) 212 | switch erhTop.AddressType { 213 | default: 214 | return nil, errors.Errorf("Unknown AgentAddressType %d", erhTop.AddressType) 215 | case 1: 216 | addressLen = 4 217 | case 2: 218 | addressLen = 16 219 | } 220 | 221 | erhBottomPtr := unsafe.Pointer(uintptr(erhTopPtr) - uintptr(sizeOfextendedRouterDataBottom) - uintptr(addressLen) - uintptr(sizeOfextendedRouterDataBottom)) 222 | erhBottom := (*extendedRouterDataBottom)(erhBottomPtr) 223 | 224 | return &ExtendedRouterData{ 225 | EnterpriseType: erhTop.EnterpriseType, 226 | FlowDataLength: erhTop.FlowDataLength, 227 | AddressType: erhTop.AddressType, 228 | NextHop: getNetIP(unsafe.Pointer(uintptr(erhTopPtr)), addressLen), 229 | NextHopSourceMask: erhBottom.NextHopSourceMask, 230 | NextHopDestinationMask: erhBottom.NextHopDestinationMask, 231 | }, nil 232 | } 233 | 234 | func decodeExtendedSwitchData(eshPtr unsafe.Pointer) (*ExtendedSwitchData, error) { 235 | eshPtr = unsafe.Pointer(uintptr(eshPtr) - uintptr(sizeOfExtendedSwitchData)) 236 | esh := (*ExtendedSwitchData)(eshPtr) 237 | 238 | eshCopy := *esh 239 | return &eshCopy, nil 240 | } 241 | 242 | func getNetIP(headerPtr unsafe.Pointer, addressLen uint64) net.IP { 243 | ptr := unsafe.Pointer(uintptr(headerPtr) - uintptr(1)) 244 | addr := make([]byte, addressLen) 245 | for i := uint64(0); i < addressLen; i++ { 246 | addr[i] = *(*byte)(unsafe.Pointer(uintptr(ptr) - uintptr(i))) 247 | } 248 | 249 | return net.IP(addr) 250 | } 251 | -------------------------------------------------------------------------------- /pkg/packet/sflow/decode_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | package sflow 12 | 13 | import ( 14 | "fmt" 15 | "testing" 16 | 17 | "github.com/bio-routing/tflow2/convert" 18 | ) 19 | 20 | func TestDecode(t *testing.T) { 21 | s := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 0, 0, 0, 32, 0, 0, 0, 62, 190, 59, 194, 1, 0, 0, 0, 16, 0, 0, 0, 234, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 237, 199, 45, 191, 139, 110, 125, 230, 182, 29, 57, 172, 218, 131, 46, 119, 222, 169, 239, 221, 168, 115, 245, 18, 162, 61, 247, 165, 225, 137, 141, 210, 165, 115, 237, 171, 115, 10, 153, 41, 121, 49, 57, 188, 199, 201, 25, 85, 91, 144, 240, 211, 169, 192, 41, 161, 202, 222, 113, 99, 33, 78, 210, 92, 70, 28, 134, 39, 126, 255, 10, 8, 1, 1, 0, 0, 118, 202, 230, 1, 16, 128, 78, 151, 101, 60, 114, 24, 235, 218, 161, 4, 80, 0, 127, 251, 90, 95, 2, 153, 37, 185, 194, 50, 6, 63, 0, 64, 128, 86, 180, 5, 0, 69, 0, 8, 236, 43, 4, 113, 78, 32, 82, 114, 59, 217, 103, 216, 128, 0, 0, 0, 4, 0, 0, 0, 198, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 190, 2, 0, 0, 168, 2, 0, 0, 0, 0, 0, 0, 64, 127, 94, 90, 224, 3, 0, 0, 144, 2, 0, 0, 197, 164, 97, 81, 232, 0, 0, 0, 1, 0, 0, 0, 22, 0, 0, 0, 22 | 32, 0, 0, 0, 23 | 62, 190, 59, 194, // Next-Hop 24 | 1, 0, 0, 0, // Address Family 25 | 16, 0, 0, 0, // Flow Data Length 26 | 234, 3, 0, 0, // Enterprise/Type (Extended router data) 27 | 28 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 75, 93, 7, 11, 45, 17, 165, 149, 120, 168, 247, 10, 136, 114, 169, 85, 104, 20, 124, 203, 71, 138, 96, 64, 49, 131, 198, 14, 182, 117, 228, 255, 19, 147, 111, 15, 10, 33, 225, 93, 118, 40, 164, 113, 66, 24, 150, 16, 218, 69, 118, 184, 150, 106, 186, 60, 41, 243, 231, 211, 233, 0, 131, 153, 43, 0, 3, 148, 69, 3, 10, 8, 1, 1, 0, 0, 233, 206, 130, 1, 16, 128, 172, 10, 7, 23, 40, 164, 166, 29, 62, 63, 80, 0, 43, 248, 17, 31, 4, 153, 37, 185, 46, 251, 6, 63, 0, 64, 174, 209, 180, 5, 0, 69, 0, 8, 236, 43, 4, 113, 78, 32, 82, 114, 59, 217, 103, 216, 128, 0, 0, 0, 4, 0, 0, 0, 198, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 190, 2, 0, 0, 170, 2, 0, 0, 0, 0, 0, 0, 96, 123, 94, 90, 224, 3, 0, 0, 144, 2, 0, 0, 196, 164, 97, 81, 232, 0, 0, 0, 1, 0, 0, 0, 14, 0, 0, 0, 32, 0, 0, 0, 57, 96, 89, 195, 1, 0, 0, 0, 16, 0, 0, 0, 234, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 215, 208, 48, 29, 1, 33, 28, 71, 110, 205, 210, 148, 225, 14, 237, 179, 197, 53, 4, 58, 246, 63, 228, 230, 166, 133, 111, 70, 124, 147, 240, 222, 21, 201, 13, 213, 140, 73, 144, 70, 156, 85, 47, 29, 86, 176, 195, 134, 78, 168, 63, 135, 252, 8, 80, 190, 183, 194, 133, 210, 26, 105, 239, 144, 29, 0, 2, 76, 160, 139, 10, 8, 1, 1, 0, 0, 167, 74, 239, 0, 16, 128, 210, 21, 9, 11, 29, 195, 141, 208, 244, 155, 80, 0, 91, 117, 210, 92, 4, 153, 37, 185, 251, 64, 6, 63, 0, 64, 209, 208, 212, 5, 0, 69, 0, 8, 188, 28, 4, 113, 78, 32, 3, 248, 103, 156, 181, 132, 128, 0, 0, 0, 4, 0, 0, 0, 230, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 149, 2, 0, 0, 170, 2, 0, 0, 0, 0, 0, 0, 96, 133, 157, 123, 224, 3, 0, 0, 149, 2, 0, 0, 116, 98, 15, 54, 232, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 32, 0, 0, 0, 33, 250, 157, 62, 1, 0, 0, 0, 16, 0, 0, 0, 234, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 193, 111, 105, 60, 190, 220, 121, 229, 158, 159, 65, 27, 79, 59, 89, 152, 153, 147, 249, 41, 34, 174, 115, 106, 7, 8, 148, 19, 165, 47, 135, 86, 42, 17, 129, 84, 254, 130, 222, 106, 42, 106, 209, 185, 205, 208, 71, 17, 126, 140, 32, 197, 254, 206, 15, 11, 174, 65, 151, 178, 9, 214, 21, 70, 123, 1, 217, 142, 46, 12, 10, 8, 1, 1, 0, 0, 80, 121, 23, 4, 16, 128, 116, 173, 164, 116, 56, 194, 157, 44, 176, 189, 80, 0, 246, 113, 186, 87, 3, 153, 37, 185, 75, 197, 6, 63, 0, 64, 255, 84, 212, 5, 0, 69, 0, 8, 185, 28, 4, 113, 78, 32, 148, 2, 127, 31, 113, 128, 128, 0, 0, 0, 4, 0, 0, 0, 230, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 146, 2, 0, 0, 171, 2, 0, 0, 0, 0, 0, 0, 128, 85, 79, 192, 224, 3, 0, 0, 146, 2, 0, 0, 211, 127, 173, 95, 232, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 29 | 32, 0, 0, 0, 30 | 33, 250, 157, 62, // Next-Hop 31 | 1, 0, 0, 0, // Address Family 32 | 16, 0, 0, 0, // Flow Data Length 33 | 234, 3, 0, 0, // Enterprise/Type (Extended router data) 34 | 35 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 210, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 209, 50, 196, 16, 191, 134, 236, 166, 206, 27, 249, 140, 64, 231, 148, 246, 19, 88, 36, 9, 167, 240, 97, 133, 46, 175, 100, 47, 143, 160, 84, 35, 234, 71, 176, 116, 103, 119, 151, 133, 184, 52, 169, 202, 53, 231, 149, 40, 16, 81, 31, 242, 100, 122, 152, 78, 32, 133, 116, 22, 89, 122, 149, 27, 64, 0, 173, 248, 203, 199, 10, 8, 1, 1, 0, 0, 199, 212, 235, 0, 16, 36 | 128, // Header Length 37 | 92, 180, 133, 203, // ACK Number 38 | 31, 4, 191, 24, // Sequence Number 39 | 222, 148, // DST port 40 | 80, 0, // SRC port 41 | 42 | 19, 131, 191, 87, // DST IP 43 | 238, 153, 37, 185, // SRC IP 44 | 186, 25, // Header Checksum 45 | 6, // Protocol 46 | 62, // TTL 47 | 0, 64, // Flags + Fragment offset 48 | 131, 239, // Identifier 49 | 212, 5, // Total Length 50 | 0, // TOS 51 | 69, // Version + Length 52 | 53 | 0, 8, // EtherType 54 | 185, 28, 4, 113, 78, 32, // Source MAC 55 | 148, 2, 127, 31, 113, 128, // Destination MAC 56 | 57 | 128, 0, 0, 0, // Original Packet length 58 | 4, 0, 0, 0, // Payload removed 59 | 230, 5, 0, 0, // Frame length 60 | 1, 0, 0, 0, // Header Protocol 61 | 144, 0, 0, 0, // Flow Data Length 62 | 1, 0, 0, 0, // Enterprise/Type 63 | 64 | 3, 0, 0, 0, // Flow Record count 65 | 146, 2, 0, 0, // Output interface 66 | 7, 2, 0, 0, // Input interface 67 | 0, 0, 0, 0, // Dropped Packets 68 | 160, 81, 79, 192, // Sampling Pool 69 | 224, 3, 0, 0, // Sampling Rate 70 | 146, 2, 0, 0, // Source ID + Index 71 | 210, 127, 173, 95, // Sequence Number 72 | 232, 0, 0, 0, // sample length 73 | 1, 0, 0, 0, // Enterprise/Type 74 | 75 | 5, 0, 0, 0, // NumSamples 76 | 111, 0, 0, 0, // SysUpTime 77 | 222, 0, 0, 0, // Sequence Number 78 | 0, 0, 0, 0, // Sub-AgentID 79 | 14, 19, 205, 10, // Agent Address 80 | 1, 0, 0, 0, // Agent Address Type 81 | 5, 0, 0, 0, // Version 82 | } 83 | s = convert.Reverse(s) 84 | 85 | packet, err := Decode(s) 86 | if err != nil { 87 | t.Errorf("Decoding packet failed: %v\n", err) 88 | } 89 | 90 | if packet.Header.AgentAddress.String() != "10.205.19.14" { 91 | t.Errorf("Incorrect AgentAddress: Exptected 10.205.19.14 got %s", packet.Header.AgentAddress.String()) 92 | } 93 | } 94 | 95 | func dump(packet *Packet) { 96 | fmt.Printf("PACKET DUMP:\n") 97 | for _, fs := range packet.FlowSamples { 98 | if fs.ExtendedRouterData != nil { 99 | fmt.Printf("Extended router data:\n") 100 | fmt.Printf("Next-Hop: %s\n", fs.ExtendedRouterData.NextHop.String()) 101 | } 102 | if fs.RawPacketHeader != nil { 103 | fmt.Printf("Raw packet header:\n") 104 | fmt.Printf("OriginalPacketLength: %d\n", fs.RawPacketHeader.OriginalPacketLength) 105 | fmt.Printf("Original Packet:\n") 106 | } 107 | } 108 | } 109 | 110 | func testEq(a, b []byte) bool { 111 | 112 | if a == nil && b == nil { 113 | return true 114 | } 115 | 116 | if a == nil || b == nil { 117 | return false 118 | } 119 | 120 | if len(a) != len(b) { 121 | return false 122 | } 123 | 124 | for i := range a { 125 | if a[i] != b[i] { 126 | return false 127 | } 128 | } 129 | 130 | return true 131 | } 132 | -------------------------------------------------------------------------------- /pkg/packet/sflow/packet.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 EXARING AG. All Rights Reserved. 2 | // Licensed under the Apache License, Version 2.0 (the "License"); 3 | // you may not use this file except in compliance with the License. 4 | // You may obtain a copy of the License at 5 | // http://www.apache.org/licenses/LICENSE-2.0 6 | // Unless required by applicable law or agreed to in writing, software 7 | // distributed under the License is distributed on an "AS IS" BASIS, 8 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | // See the License for the specific language governing permissions and 10 | // limitations under the License. 11 | 12 | package sflow 13 | 14 | import ( 15 | "net" 16 | "unsafe" 17 | ) 18 | 19 | // Packet is a decoded representation of a single sflow UDP packet. 20 | type Packet struct { 21 | // A pointer to the packets headers 22 | Header *Header 23 | headerTop *headerTop 24 | headerBottom *headerBottom 25 | 26 | // A slice of pointers to FlowSet. Each element is instance of (Data)FlowSet 27 | FlowSamples []*FlowSample 28 | 29 | // Buffer is a slice pointing to the original byte array that this packet was decoded from. 30 | // This field is only populated if debug level is at least 2 31 | Buffer []byte 32 | } 33 | 34 | var ( 35 | sizeOfHeaderTop = unsafe.Sizeof(headerTop{}) 36 | sizeOfHeaderBottom = unsafe.Sizeof(headerBottom{}) 37 | sizeOfFlowSampleHeader = unsafe.Sizeof(FlowSampleHeader{}) 38 | sizeOfExpandedFlowSampleHeader = unsafe.Sizeof(ExpandedFlowSampleHeader{}) 39 | sizeOfRawPacketHeader = unsafe.Sizeof(RawPacketHeader{}) 40 | sizeofExtendedRouterData = unsafe.Sizeof(ExtendedRouterData{}) 41 | sizeOfextendedRouterDataTop = unsafe.Sizeof(extendedRouterDataTop{}) 42 | sizeOfextendedRouterDataBottom = unsafe.Sizeof(extendedRouterDataBottom{}) 43 | sizeOfExtendedSwitchData = unsafe.Sizeof(ExtendedSwitchData{}) 44 | ) 45 | 46 | // Header is an sflow version 5 header 47 | type Header struct { 48 | Version uint32 49 | AgentAddressType uint32 50 | AgentAddress net.IP 51 | SubAgentID uint32 52 | SequenceNumber uint32 53 | SysUpTime uint32 54 | NumSamples uint32 55 | } 56 | 57 | type headerTop struct { 58 | AgentAddressType uint32 59 | Version uint32 60 | } 61 | 62 | type headerBottom struct { 63 | NumSamples uint32 64 | SysUpTime uint32 65 | SequenceNumber uint32 66 | SubAgentID uint32 67 | } 68 | 69 | // FlowSample is an sflow version 5 flow sample 70 | type FlowSample struct { 71 | FlowSampleHeader *FlowSampleHeader 72 | ExpandedFlowSampleHeader *ExpandedFlowSampleHeader 73 | RawPacketHeader *RawPacketHeader 74 | Data unsafe.Pointer 75 | DataLen uint32 76 | ExtendedSwitchData *ExtendedSwitchData 77 | ExtendedRouterData *ExtendedRouterData 78 | } 79 | 80 | // FlowSampleHeader is an sflow version 5 flow sample header 81 | type FlowSampleHeader struct { 82 | FlowRecord uint32 83 | OutputIf uint32 84 | InputIf uint32 85 | DroppedPackets uint32 86 | SamplePool uint32 87 | SamplingRate uint32 88 | SourceIDClassIndex uint32 89 | SequenceNumber uint32 90 | SampleLength uint32 91 | EnterpriseType uint32 92 | } 93 | 94 | // ExpandedFlowSampleHeader is an sflow version 5 flow expanded sample header 95 | type ExpandedFlowSampleHeader struct { 96 | FlowRecord uint32 97 | OutputIf uint32 98 | _ uint32 99 | InputIf uint32 100 | _ uint32 101 | DroppedPackets uint32 102 | SamplePool uint32 103 | SamplingRate uint32 104 | SourceIDClassIndex uint32 105 | _ uint32 106 | SequenceNumber uint32 107 | SampleLength uint32 108 | EnterpriseType uint32 109 | } 110 | 111 | func (e *ExpandedFlowSampleHeader) toFlowSampleHeader() *FlowSampleHeader { 112 | return &FlowSampleHeader{ 113 | FlowRecord: e.FlowRecord, 114 | OutputIf: e.OutputIf, 115 | InputIf: e.InputIf, 116 | DroppedPackets: e.DroppedPackets, 117 | SamplePool: e.SamplePool, 118 | SamplingRate: e.SamplingRate, 119 | SourceIDClassIndex: e.SourceIDClassIndex, 120 | SequenceNumber: e.SequenceNumber, 121 | SampleLength: e.SampleLength, 122 | EnterpriseType: e.EnterpriseType, 123 | } 124 | } 125 | 126 | // RawPacketHeader is a raw packet header 127 | type RawPacketHeader struct { 128 | OriginalPacketLength uint32 129 | PayloadRemoved uint32 130 | FrameLength uint32 131 | HeaderProtocol uint32 132 | FlowDataLength uint32 133 | EnterpriseType uint32 134 | } 135 | 136 | type extendedRouterDataTop struct { 137 | AddressType uint32 138 | FlowDataLength uint32 139 | EnterpriseType uint32 140 | } 141 | 142 | type extendedRouterDataBottom struct { 143 | NextHopDestinationMask uint32 144 | NextHopSourceMask uint32 145 | } 146 | 147 | // ExtendedRouterData represents sflow version 5 extended router data 148 | type ExtendedRouterData struct { 149 | NextHopDestinationMask uint32 150 | NextHopSourceMask uint32 151 | NextHop net.IP 152 | AddressType uint32 153 | FlowDataLength uint32 154 | EnterpriseType uint32 155 | } 156 | 157 | // ExtendedSwitchData represents sflow version 5 extended switch data 158 | type ExtendedSwitchData struct { 159 | OutgoingPriority uint32 160 | OutgoingVLAN uint32 161 | IncomingPriority uint32 162 | IncomingVLAN uint32 163 | FlowDataLength uint32 164 | EnterpriseType uint32 165 | } 166 | -------------------------------------------------------------------------------- /pkg/routemirror/route_mirror.go: -------------------------------------------------------------------------------- 1 | package routemirror 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/bio-routing/bio-rd/route" 8 | "google.golang.org/grpc" 9 | 10 | bnet "github.com/bio-routing/bio-rd/net" 11 | ) 12 | 13 | // RouteMirror is a RIS based route mirror 14 | type RouteMirror struct { 15 | routers map[string]*router 16 | routersMu sync.RWMutex 17 | } 18 | 19 | // New creates a new RouteMirror 20 | func New() *RouteMirror { 21 | return &RouteMirror{ 22 | routers: make(map[string]*router), 23 | } 24 | } 25 | 26 | // AddTarget adds a target 27 | func (r *RouteMirror) AddTarget(name string, address bnet.IP, sources []*grpc.ClientConn, vrfRD uint64) { 28 | r.routersMu.Lock() 29 | defer r.routersMu.Unlock() 30 | 31 | rtr := r.addRouterIfNotExists(name, address, sources) 32 | rtr.addVRFIfNotExists(vrfRD) 33 | } 34 | 35 | func (r *RouteMirror) addRouterIfNotExists(name string, address bnet.IP, sources []*grpc.ClientConn) *router { 36 | if _, exists := r.routers[name]; exists { 37 | return r.routers[name] 38 | } 39 | 40 | rtr := newRouter(name, address, sources) 41 | r.routers[name] = rtr 42 | 43 | return rtr 44 | } 45 | 46 | // Stop stops the route mirror 47 | func (r *RouteMirror) Stop() { 48 | r.routersMu.Lock() 49 | defer r.routersMu.Unlock() 50 | 51 | for _, rtr := range r.routers { 52 | rtr.stop() 53 | } 54 | } 55 | 56 | func (r *RouteMirror) getRouter(needle string) *router { 57 | r.routersMu.RLock() 58 | defer r.routersMu.RUnlock() 59 | 60 | for _, rtr := range r.routers { 61 | if rtr.address.String() == needle { 62 | return rtr 63 | } 64 | } 65 | 66 | return nil 67 | } 68 | 69 | // LPM preforms a Longest Prefix Match against a routers VRF 70 | func (r *RouteMirror) LPM(rtrAddr string, vrfRD uint64, addr bnet.IP) (*route.Route, error) { 71 | rtr := r.getRouter(rtrAddr) 72 | if rtr == nil { 73 | return nil, fmt.Errorf("Router %s not found", rtrAddr) 74 | } 75 | 76 | afi := uint8(6) 77 | pfxLen := uint8(128) 78 | if addr.IsIPv4() { 79 | afi = 4 80 | pfxLen = 32 81 | } 82 | 83 | v := rtr.getVRF(vrfRD) 84 | if v == nil { 85 | return nil, fmt.Errorf("Invalid VRF %d pn %s", vrfRD, rtrAddr) 86 | } 87 | 88 | rib := v.getLocRIB(afi) 89 | routes := rib.LPM(bnet.NewPfx(addr, pfxLen).Ptr()) 90 | 91 | if len(routes) == 0 { 92 | return nil, nil 93 | } 94 | 95 | return routes[len(routes)-1], nil 96 | } 97 | -------------------------------------------------------------------------------- /pkg/routemirror/router.go: -------------------------------------------------------------------------------- 1 | package routemirror 2 | 3 | import ( 4 | "fmt" 5 | 6 | bnet "github.com/bio-routing/bio-rd/net" 7 | "google.golang.org/grpc" 8 | ) 9 | 10 | type router struct { 11 | name string 12 | address bnet.IP 13 | sources []*grpc.ClientConn 14 | vrfs map[uint64]*routerVRF 15 | } 16 | 17 | func newRouter(name string, address bnet.IP, sources []*grpc.ClientConn) *router { 18 | return &router{ 19 | name: name, 20 | address: address, 21 | sources: sources, 22 | vrfs: make(map[uint64]*routerVRF), 23 | } 24 | } 25 | 26 | func (r *router) addVRFIfNotExists(rd uint64) *routerVRF { 27 | if _, exists := r.vrfs[rd]; !exists { 28 | r.vrfs[rd] = newRouterVRF(r, rd) 29 | for _, s := range r.sources { 30 | r.vrfs[rd].addRIS(s) 31 | } 32 | } 33 | 34 | return r.vrfs[rd] 35 | } 36 | 37 | func (r *router) getVRF(rd uint64) *routerVRF { 38 | if _, exists := r.vrfs[rd]; !exists { 39 | return nil 40 | } 41 | 42 | return r.vrfs[rd] 43 | } 44 | 45 | func (r *router) removeVRF(rd uint64) error { 46 | if _, exists := r.vrfs[rd]; !exists { 47 | return fmt.Errorf("VRF %d not found", rd) 48 | } 49 | 50 | // TODO: Implement 51 | return nil 52 | } 53 | 54 | func (r *router) stop() { 55 | for _, v := range r.vrfs { 56 | v.stop() 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /pkg/routemirror/vrf.go: -------------------------------------------------------------------------------- 1 | package routemirror 2 | 3 | import ( 4 | "github.com/bio-routing/bio-rd/cmd/ris/api" 5 | "github.com/bio-routing/bio-rd/risclient" 6 | "github.com/bio-routing/bio-rd/routingtable/locRIB" 7 | "github.com/bio-routing/bio-rd/routingtable/mergedlocrib" 8 | "google.golang.org/grpc" 9 | ) 10 | 11 | type routerVRF struct { 12 | router *router 13 | rd uint64 14 | locRIBIPv4 *locRIB.LocRIB 15 | locRIBIPv6 *locRIB.LocRIB 16 | mergedLocRIBIPv4 *mergedlocrib.MergedLocRIB 17 | mergedLocRIBIPv6 *mergedlocrib.MergedLocRIB 18 | risClients []*risclient.RISClient 19 | } 20 | 21 | func newRouterVRF(router *router, vrfRD uint64) *routerVRF { 22 | v := &routerVRF{ 23 | router: router, 24 | rd: vrfRD, 25 | locRIBIPv4: locRIB.New("inet.0"), 26 | locRIBIPv6: locRIB.New("inet6.0"), 27 | risClients: make([]*risclient.RISClient, 0), 28 | } 29 | 30 | v.mergedLocRIBIPv4 = mergedlocrib.New(v.locRIBIPv4) 31 | v.mergedLocRIBIPv6 = mergedlocrib.New(v.locRIBIPv6) 32 | 33 | return v 34 | } 35 | 36 | func (v *routerVRF) stop() { 37 | for _, rc := range v.risClients { 38 | rc.Stop() 39 | } 40 | } 41 | 42 | func (v *routerVRF) addRIS(cc *grpc.ClientConn) { 43 | for _, afi := range []uint8{0, 1} { 44 | c := v.mergedLocRIBIPv4 45 | if afi == 1 { 46 | c = v.mergedLocRIBIPv6 47 | } 48 | 49 | rc := risclient.New(&risclient.Request{ 50 | Router: v.router.address.String(), 51 | VRFRD: v.rd, 52 | AFI: api.ObserveRIBRequest_AFISAFI(afi), 53 | }, cc, c) 54 | 55 | v.risClients = append(v.risClients, rc) 56 | rc.Start() 57 | } 58 | } 59 | 60 | func (v *routerVRF) getLocRIB(afi uint8) *locRIB.LocRIB { 61 | if afi == 6 { 62 | return v.locRIBIPv6 63 | } 64 | 65 | return v.locRIBIPv4 66 | } 67 | -------------------------------------------------------------------------------- /pkg/servers/aggregator/aggregator.go: -------------------------------------------------------------------------------- 1 | package aggregator 2 | 3 | import ( 4 | "time" 5 | 6 | bnet "github.com/bio-routing/bio-rd/net" 7 | "github.com/bio-routing/flowhouse/pkg/models/flow" 8 | ) 9 | 10 | const ( 11 | AggregationWindowSeconds = 10 12 | ) 13 | 14 | type Key struct { 15 | Agent bnet.IP 16 | Src bnet.IP 17 | Dst bnet.IP 18 | Sport uint16 19 | Dport uint16 20 | Protocol uint8 21 | } 22 | 23 | type Aggregator struct { 24 | data map[Key]*flow.Flow 25 | stopCh chan struct{} 26 | ingress chan *flow.Flow 27 | output chan []*flow.Flow 28 | currentUnixTimeSeconds int64 29 | } 30 | 31 | func New(output chan []*flow.Flow) *Aggregator { 32 | a := &Aggregator{ 33 | data: make(map[Key]*flow.Flow), 34 | stopCh: make(chan struct{}), 35 | ingress: make(chan *flow.Flow), 36 | output: output, 37 | } 38 | 39 | go a.service() 40 | return a 41 | } 42 | 43 | func (a *Aggregator) Stop() { 44 | close(a.stopCh) 45 | } 46 | 47 | func FlowToKey(fl *flow.Flow) Key { 48 | return Key{ 49 | Agent: fl.Agent, 50 | Src: fl.SrcAddr, 51 | Dst: fl.DstAddr, 52 | Sport: fl.SrcPort, 53 | Dport: fl.DstPort, 54 | Protocol: fl.Protocol, 55 | } 56 | } 57 | 58 | func (a *Aggregator) IsStopped() bool { 59 | select { 60 | case <-a.stopCh: 61 | return true 62 | default: 63 | return false 64 | } 65 | } 66 | 67 | func (a *Aggregator) service() { 68 | for { 69 | if a.IsStopped() { 70 | return 71 | } 72 | 73 | fl := <-a.ingress 74 | a.Ingest(fl) 75 | 76 | } 77 | } 78 | 79 | func (a *Aggregator) Ingest(fl *flow.Flow) { 80 | currentUnixTimeSeconds := time.Now().Unix() 81 | currentUnixTimeSeconds -= currentUnixTimeSeconds % AggregationWindowSeconds 82 | 83 | if a.currentUnixTimeSeconds < currentUnixTimeSeconds { 84 | a.flush() 85 | a.currentUnixTimeSeconds = currentUnixTimeSeconds 86 | } 87 | 88 | fl.Timestamp = currentUnixTimeSeconds 89 | a.add(fl) 90 | } 91 | 92 | func (a *Aggregator) add(fl *flow.Flow) { 93 | k := FlowToKey(fl) 94 | 95 | if _, exists := a.data[k]; !exists { 96 | a.data[k] = fl 97 | return 98 | } 99 | 100 | a.data[k].Add(fl) 101 | } 102 | 103 | func (a *Aggregator) GetIngress() chan<- *flow.Flow { 104 | return a.ingress 105 | } 106 | 107 | func (a *Aggregator) flush() { 108 | s := make([]*flow.Flow, len(a.data)) 109 | 110 | i := 0 111 | for _, fl := range a.data { 112 | s[i] = fl 113 | i++ 114 | } 115 | 116 | a.output <- s 117 | a.data = make(map[Key]*flow.Flow) 118 | } 119 | -------------------------------------------------------------------------------- /pkg/servers/ipfix/ipfix_server.go: -------------------------------------------------------------------------------- 1 | package ipfix 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "runtime/debug" 7 | "strconv" 8 | "strings" 9 | "sync" 10 | 11 | bnet "github.com/bio-routing/bio-rd/net" 12 | "github.com/bio-routing/flowhouse/pkg/models/flow" 13 | "github.com/bio-routing/flowhouse/pkg/packet/ipfix" 14 | "github.com/bio-routing/flowhouse/pkg/servers/aggregator" 15 | "github.com/bio-routing/tflow2/convert" 16 | "github.com/pkg/errors" 17 | 18 | log "github.com/sirupsen/logrus" 19 | ) 20 | 21 | type InterfaceResolver interface { 22 | Resolve(agent bnet.IP, ifID uint32) string 23 | } 24 | 25 | // fieldMap describes what information is at what index in the slice 26 | // that we get from decoding a netflow packet 27 | type fieldMap struct { 28 | srcAddr int 29 | dstAddr int 30 | protocol int 31 | packets int 32 | size int 33 | intIn int 34 | intOut int 35 | nextHop int 36 | family int 37 | vlan int 38 | ts int 39 | srcAsn int 40 | dstAsn int 41 | srcPort int 42 | dstPort int 43 | samplingPacketInterval int 44 | srcTos int 45 | srcMask int 46 | dstMask int 47 | srcMask6 int 48 | dstMask6 int 49 | samplingInterval int 50 | } 51 | 52 | type IPFIXServer struct { 53 | // tmplCache is used to save received flow templates 54 | // for later lookup in order to decode netflow packets 55 | tmplCache *templateCache 56 | conn *net.UDPConn 57 | ifResolver InterfaceResolver 58 | output chan []*flow.Flow 59 | wg sync.WaitGroup 60 | stopCh chan struct{} 61 | aggregator *aggregator.Aggregator 62 | sampleRateCache *sampleRateCache 63 | } 64 | 65 | // New creates and starts a new `IPFIXServer` instance 66 | func New(listen string, numReaders int, output chan []*flow.Flow, ifResolver InterfaceResolver) (*IPFIXServer, error) { 67 | ipf := &IPFIXServer{ 68 | tmplCache: newTemplateCache(), 69 | ifResolver: ifResolver, 70 | stopCh: make(chan struct{}), 71 | output: output, 72 | aggregator: aggregator.New(output), 73 | sampleRateCache: newSampleRateCache(), 74 | } 75 | 76 | addr, err := net.ResolveUDPAddr("udp", listen) 77 | if err != nil { 78 | return nil, errors.Wrap(err, "Unable to resolve UDP address") 79 | } 80 | 81 | con, err := net.ListenUDP("udp", addr) 82 | if err != nil { 83 | return nil, errors.Wrap(err, "ListenUDP failed") 84 | } 85 | ipf.conn = con 86 | 87 | ipf.startService(numReaders) 88 | return ipf, nil 89 | } 90 | 91 | func (ipf *IPFIXServer) startService(numReaders int) { 92 | for i := 0; i < numReaders; i++ { 93 | ipf.wg.Add(1) 94 | go func() { 95 | defer ipf.wg.Done() 96 | err := ipf.packetWorker() 97 | if err != nil { 98 | log.WithError(err).Error("packetWorker failed") 99 | } 100 | }() 101 | } 102 | } 103 | 104 | // Stop closes the socket and stops the workers 105 | func (ipf *IPFIXServer) Stop() { 106 | log.Info("Stopping IPFIX server") 107 | debug.PrintStack() 108 | close(ipf.stopCh) 109 | ipf.conn.Close() 110 | ipf.aggregator.Stop() 111 | ipf.wg.Wait() 112 | } 113 | 114 | // packetWorker reads sflow packet from socket and handsoff processing to ??? 115 | func (ipf *IPFIXServer) packetWorker() error { 116 | buffer := make([]byte, 8960) 117 | for { 118 | if ipf.stopped() { 119 | return nil 120 | } 121 | 122 | length, remote, err := ipf.conn.ReadFromUDP(buffer) 123 | if err == io.EOF { 124 | return nil 125 | } 126 | 127 | if err != nil { 128 | return errors.Wrap(err, "ReadFromUDP failed") 129 | } 130 | 131 | remote4 := remote.IP.To4() 132 | if remote4 != nil { 133 | remote.IP = remote4 134 | } 135 | 136 | remoteAddr, err := bnet.IPFromBytes([]byte(remote.IP)) 137 | if err != nil { 138 | return errors.Wrapf(err, "Unable to convert net.IP to bnet.IP: %q", remote) 139 | } 140 | 141 | ipf.processPacket(remoteAddr, buffer[:length]) 142 | } 143 | } 144 | 145 | func (ipf *IPFIXServer) stopped() bool { 146 | select { 147 | case <-ipf.stopCh: 148 | return true 149 | default: 150 | return false 151 | } 152 | } 153 | 154 | func (ipf *IPFIXServer) processPacket(agent bnet.IP, buffer []byte) { 155 | pkt, err := ipfix.Decode(buffer) 156 | if err != nil { 157 | log.WithError(err).Error("Unable to decode IPFIX packet") 158 | return 159 | } 160 | 161 | ipf.updateTemplateCache(agent, pkt) 162 | ipf.processFlowSets(agent, pkt.Header.DomainID, pkt.DataFlowSets(), int64(pkt.Header.ExportTime)) 163 | } 164 | 165 | // processFlowSets iterates over flowSets and calls processFlowSet() for each flow set 166 | func (ipf *IPFIXServer) processFlowSets(remote bnet.IP, observationDomainID uint32, flowSets []*ipfix.FlowSet, ts int64) { 167 | addr := remote.String() 168 | for _, set := range flowSets { 169 | template, isOpts := ipf.tmplCache.get(remote, observationDomainID, set.Header.SetID) 170 | 171 | if template == nil { 172 | templateKey := makeTemplateKey(addr, observationDomainID, set.Header.SetID) 173 | log.Debugf("Template for given FlowSet not found: %s", templateKey) 174 | 175 | continue 176 | } 177 | 178 | records := ipfix.DecodeFlowSet(*set, template) 179 | if records == nil { 180 | log.Warning("Error decoding FlowSet") 181 | continue 182 | } 183 | 184 | ipf.processFlowSet(template, records, remote, observationDomainID, ts, isOpts) 185 | } 186 | } 187 | 188 | // process generates Flow elements from records and pushes them into the `receiver` channel 189 | func (ipf *IPFIXServer) processFlowSet(template []*ipfix.TemplateRecord, records []ipfix.FlowDataRecord, agent bnet.IP, observationDomainID uint32, ts int64, isOpts bool) { 190 | fm := generateFieldMap(template) 191 | 192 | for _, r := range records { 193 | if isOpts { 194 | if fm.samplingInterval > 0 { 195 | sampleRate := convert.Uint32(r.Values[fm.samplingInterval]) 196 | ipf.sampleRateCache.set(agent, observationDomainID, sampleRate) 197 | } 198 | 199 | continue 200 | } 201 | 202 | fl := &flow.Flow{ 203 | Agent: agent, 204 | Timestamp: ts, 205 | } 206 | 207 | if fm.family >= 0 { 208 | fl.Family = uint8(fm.family) 209 | } 210 | 211 | if fm.packets >= 0 { 212 | fl.Packets = convert.Uint64(r.Values[fm.packets]) 213 | } 214 | 215 | if fm.size >= 0 { 216 | fl.Size = uint64(convert.Uint32(r.Values[fm.size])) 217 | } 218 | 219 | if fm.protocol >= 0 { 220 | fl.Protocol = uint8(convert.Uint16(r.Values[fm.protocol])) 221 | } 222 | 223 | if fm.intIn >= 0 { 224 | fl.IntIn = ipf.ifResolver.Resolve(agent, convert.Uint32(r.Values[fm.intIn])) 225 | } 226 | 227 | if fm.intOut >= 0 { 228 | fl.IntOut = ipf.ifResolver.Resolve(agent, convert.Uint32(r.Values[fm.intOut])) 229 | } 230 | 231 | if fm.srcPort >= 0 { 232 | fl.SrcPort = convert.Uint16(r.Values[fm.srcPort]) 233 | } 234 | 235 | if fm.dstPort >= 0 { 236 | fl.DstPort = convert.Uint16(r.Values[fm.dstPort]) 237 | } 238 | 239 | if fm.srcAddr >= 0 { 240 | fl.SrcAddr = bnet.IPv4FromBytes(convert.Reverse(r.Values[fm.srcAddr])) 241 | } 242 | 243 | if fm.dstAddr >= 0 { 244 | fl.DstAddr = bnet.IPv4FromBytes(convert.Reverse(r.Values[fm.dstAddr])) 245 | } 246 | 247 | if fm.nextHop >= 0 { 248 | fl.NextHop = bnet.IPv4FromBytes(convert.Reverse(r.Values[fm.nextHop])) 249 | } 250 | 251 | if fm.srcTos >= 0 { 252 | fl.TOS = uint8(r.Values[fm.srcTos][0]) 253 | } 254 | 255 | if fm.dstAsn >= 0 { 256 | fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) 257 | } 258 | 259 | if fm.srcAsn >= 0 { 260 | fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) 261 | } 262 | 263 | if fm.srcMask > 0 { 264 | mask := uint8(r.Values[fm.srcMask][0]) 265 | p := bnet.NewPfx(fl.SrcAddr, mask) 266 | p.BaseAddr() 267 | fl.SrcPfx = bnet.NewPfx(*p.BaseAddr(), mask) 268 | } 269 | 270 | if fm.dstMask > 0 { 271 | mask := uint8(r.Values[fm.dstMask][0]) 272 | p := bnet.NewPfx(fl.DstAddr, mask) 273 | p.BaseAddr() 274 | fl.DstPfx = bnet.NewPfx(*p.BaseAddr(), mask) 275 | } 276 | 277 | if fm.srcMask6 > 0 { 278 | mask := uint8(r.Values[fm.srcMask6][0]) 279 | p := bnet.NewPfx(fl.SrcAddr, mask) 280 | p.BaseAddr() 281 | fl.SrcPfx = bnet.NewPfx(*p.BaseAddr(), mask) 282 | } 283 | 284 | if fm.dstMask6 > 0 { 285 | mask := uint8(r.Values[fm.dstMask6][0]) 286 | p := bnet.NewPfx(fl.DstAddr, mask) 287 | p.BaseAddr() 288 | fl.DstPfx = bnet.NewPfx(*p.BaseAddr(), mask) 289 | } 290 | 291 | fl.Samplerate = uint64(ipf.sampleRateCache.get(agent, observationDomainID)) 292 | 293 | ipf.aggregator.GetIngress() <- fl 294 | } 295 | } 296 | 297 | // generateFieldMap processes a TemplateRecord and populates a fieldMap accordingly 298 | // the FieldMap can then be used to read fields from a flow 299 | func generateFieldMap(template []*ipfix.TemplateRecord) *fieldMap { 300 | fm := fieldMap{ 301 | srcAddr: -1, 302 | dstAddr: -1, 303 | protocol: -1, 304 | packets: -1, 305 | size: -1, 306 | intIn: -1, 307 | intOut: -1, 308 | nextHop: -1, 309 | family: -1, 310 | vlan: -1, 311 | ts: -1, 312 | srcAsn: -1, 313 | dstAsn: -1, 314 | srcPort: -1, 315 | dstPort: -1, 316 | samplingPacketInterval: -1, 317 | srcTos: -1, 318 | srcMask: -1, 319 | dstMask: -1, 320 | srcMask6: -1, 321 | dstMask6: -1, 322 | samplingInterval: -1, 323 | } 324 | 325 | i := -1 326 | for _, f := range template { 327 | i++ 328 | 329 | switch f.Type { 330 | case ipfix.IPv4SrcAddr: 331 | fm.srcAddr = i 332 | fm.family = 4 333 | case ipfix.IPv6SrcAddr: 334 | fm.srcAddr = i 335 | fm.family = 6 336 | case ipfix.IPv4DstAddr: 337 | fm.dstAddr = i 338 | case ipfix.IPv6DstAddr: 339 | fm.dstAddr = i 340 | case ipfix.InBytes: 341 | fm.size = i 342 | case ipfix.Protocol: 343 | fm.protocol = i 344 | case ipfix.InPkts: 345 | fm.packets = i 346 | case ipfix.InputSnmp: 347 | fm.intIn = i 348 | case ipfix.OutputSnmp: 349 | fm.intOut = i 350 | case ipfix.IPv4NextHop: 351 | fm.nextHop = i 352 | case ipfix.IPv6NextHop: 353 | fm.nextHop = i 354 | case ipfix.L4SrcPort: 355 | fm.srcPort = i 356 | case ipfix.L4DstPort: 357 | fm.dstPort = i 358 | case ipfix.SrcAs: 359 | fm.srcAsn = i 360 | case ipfix.DstAs: 361 | fm.dstAsn = i 362 | case ipfix.SamplingPacketInterval: 363 | fm.samplingPacketInterval = i 364 | case ipfix.SrcTos: 365 | fm.srcTos = i 366 | case ipfix.SrcMask: 367 | fm.srcMask = i 368 | case ipfix.DstMask: 369 | fm.dstMask = i 370 | case ipfix.IPv6SrcMask: 371 | fm.srcMask6 = i 372 | case ipfix.IPv6DstMask: 373 | fm.dstMask6 = i 374 | case ipfix.SamplingInterval: 375 | fm.samplingInterval = i 376 | } 377 | } 378 | 379 | return &fm 380 | } 381 | 382 | // updateTemplateCache updates the template cache 383 | func (ipf *IPFIXServer) updateTemplateCache(remote bnet.IP, p *ipfix.Packet) { 384 | templRecs := p.GetTemplateRecords() 385 | for _, tr := range templRecs { 386 | ipf.tmplCache.set(remote, p.Header.DomainID, tr.Header.TemplateID, tr.Records, false) 387 | } 388 | 389 | optTemplRecs := p.GetOptionTemplateRecords() 390 | for _, tr := range optTemplRecs { 391 | ipf.tmplCache.set(remote, p.Header.DomainID, tr.Header.TemplateID, tr.Records, true) 392 | } 393 | } 394 | 395 | // makeTemplateKey creates a string of the 3 tuple router address, source id and template id 396 | func makeTemplateKey(addr string, sourceID uint32, templateID uint16) string { 397 | keyParts := []string{ 398 | addr, 399 | strconv.Itoa(int(sourceID)), 400 | strconv.Itoa(int(templateID)), 401 | } 402 | return strings.Join(keyParts, "|") 403 | } 404 | -------------------------------------------------------------------------------- /pkg/servers/ipfix/ipfix_template_cache.go: -------------------------------------------------------------------------------- 1 | package ipfix 2 | 3 | import ( 4 | "sync" 5 | 6 | bnet "github.com/bio-routing/bio-rd/net" 7 | "github.com/bio-routing/flowhouse/pkg/packet/ipfix" 8 | ) 9 | 10 | type templateCacheKey struct { 11 | agent bnet.IP 12 | observationDomain uint32 13 | templateID uint16 14 | } 15 | 16 | func newTemplateCacheKey(agent bnet.IP, observationDomain uint32, templateID uint16) templateCacheKey { 17 | return templateCacheKey{ 18 | agent: agent, 19 | observationDomain: observationDomain, 20 | templateID: templateID, 21 | } 22 | } 23 | 24 | type templateCache struct { 25 | cache map[templateCacheKey]*templateCacheEntry 26 | lock sync.RWMutex 27 | } 28 | 29 | type templateCacheEntry struct { 30 | isOptionsTemplate bool 31 | records []*ipfix.TemplateRecord 32 | } 33 | 34 | // newTemplateCache creates and initializes a new `templateCache` instance 35 | func newTemplateCache() *templateCache { 36 | return &templateCache{ 37 | cache: make(map[templateCacheKey]*templateCacheEntry), 38 | } 39 | } 40 | 41 | func (c *templateCache) set(rtr bnet.IP, domainID uint32, templateID uint16, records []*ipfix.TemplateRecord, opts bool) { 42 | k := newTemplateCacheKey(rtr, domainID, templateID) 43 | v := &templateCacheEntry{ 44 | isOptionsTemplate: opts, 45 | records: records, 46 | } 47 | 48 | c.lock.Lock() 49 | defer c.lock.Unlock() 50 | 51 | c.cache[k] = v 52 | } 53 | 54 | func (c *templateCache) get(rtr bnet.IP, domainID uint32, templateID uint16) ([]*ipfix.TemplateRecord, bool) { 55 | k := newTemplateCacheKey(rtr, domainID, templateID) 56 | 57 | c.lock.RLock() 58 | defer c.lock.RUnlock() 59 | 60 | e, found := c.cache[k] 61 | if !found { 62 | return nil, false 63 | } 64 | 65 | return e.records, e.isOptionsTemplate 66 | } 67 | -------------------------------------------------------------------------------- /pkg/servers/ipfix/sample_rate_cache.go: -------------------------------------------------------------------------------- 1 | package ipfix 2 | 3 | import ( 4 | "sync" 5 | 6 | bnet "github.com/bio-routing/bio-rd/net" 7 | ) 8 | 9 | type sampleRateCacheKey struct { 10 | agent bnet.IP 11 | observationDomainID uint32 12 | } 13 | 14 | func newSampleRateCacheKey(agent bnet.IP, observationDomainID uint32) sampleRateCacheKey { 15 | return sampleRateCacheKey{ 16 | agent: agent, 17 | observationDomainID: observationDomainID, 18 | } 19 | } 20 | 21 | type sampleRateCache struct { 22 | data map[sampleRateCacheKey]uint32 23 | dataMu sync.RWMutex 24 | } 25 | 26 | func newSampleRateCache() *sampleRateCache { 27 | return &sampleRateCache{ 28 | data: make(map[sampleRateCacheKey]uint32), 29 | } 30 | } 31 | 32 | func (src *sampleRateCache) get(agent bnet.IP, observationDomainID uint32) uint32 { 33 | src.dataMu.RLock() 34 | defer src.dataMu.RUnlock() 35 | 36 | return src.data[newSampleRateCacheKey(agent, observationDomainID)] 37 | } 38 | 39 | func (src *sampleRateCache) set(agent bnet.IP, observationDomainID uint32, rate uint32) { 40 | src.dataMu.Lock() 41 | defer src.dataMu.Unlock() 42 | 43 | src.data[newSampleRateCacheKey(agent, observationDomainID)] = rate 44 | } 45 | -------------------------------------------------------------------------------- /pkg/servers/sflow/sfserver.go: -------------------------------------------------------------------------------- 1 | // Package sfserver provides sflow collection services via UDP and passes flows into aggregator layer 2 | package sflow 3 | 4 | import ( 5 | "fmt" 6 | "io" 7 | "net" 8 | "runtime/debug" 9 | "sync" 10 | "time" 11 | "unsafe" 12 | 13 | "github.com/bio-routing/tflow2/convert" 14 | "github.com/pkg/errors" 15 | "github.com/prometheus/client_golang/prometheus" 16 | "github.com/prometheus/client_golang/prometheus/promauto" 17 | 18 | bnet "github.com/bio-routing/bio-rd/net" 19 | "github.com/bio-routing/flowhouse/pkg/models/flow" 20 | "github.com/bio-routing/flowhouse/pkg/packet/packet" 21 | "github.com/bio-routing/flowhouse/pkg/packet/sflow" 22 | "github.com/bio-routing/flowhouse/pkg/servers/aggregator" 23 | log "github.com/sirupsen/logrus" 24 | ) 25 | 26 | var labels []string 27 | 28 | func init() { 29 | labels = []string{ 30 | "agent", 31 | } 32 | } 33 | 34 | type InterfaceResolver interface { 35 | Resolve(agent bnet.IP, ifID uint32) string 36 | } 37 | 38 | // SflowServer represents a sflow Collector instance 39 | type SflowServer struct { 40 | aggregator *aggregator.Aggregator 41 | conn *net.UDPConn 42 | ifResolver InterfaceResolver 43 | wg sync.WaitGroup 44 | stopCh chan struct{} 45 | packetsReceived *prometheus.CounterVec 46 | flowSamplesReceived *prometheus.CounterVec 47 | flowNoRawPktHeader *prometheus.CounterVec 48 | flowNoData *prometheus.CounterVec 49 | flowUnknownProtocol *prometheus.CounterVec 50 | flowEthernetDecodeErrors *prometheus.CounterVec 51 | flowUnknownEtherType *prometheus.CounterVec 52 | flowDot1qDecodeErrors *prometheus.CounterVec 53 | flowIPv4DecodeErrors *prometheus.CounterVec 54 | flowIPv6DecodeErrors *prometheus.CounterVec 55 | flowTCPDecodeErros *prometheus.CounterVec 56 | flowUDPDecodeErros *prometheus.CounterVec 57 | } 58 | 59 | // New creates and starts a new `SflowServer` instance 60 | func New(listen string, numReaders int, output chan []*flow.Flow, ifResolver InterfaceResolver) (*SflowServer, error) { 61 | sfs := &SflowServer{ 62 | aggregator: aggregator.New(output), 63 | ifResolver: ifResolver, 64 | packetsReceived: promauto.NewCounterVec(prometheus.CounterOpts{ 65 | Namespace: "flowhouse", 66 | Subsystem: "sflow", 67 | Name: "received_packets", 68 | Help: "Received sflow packets", 69 | }, labels), 70 | flowSamplesReceived: promauto.NewCounterVec(prometheus.CounterOpts{ 71 | Namespace: "flowhouse", 72 | Subsystem: "sflow", 73 | Name: "flow_samples_received", 74 | Help: "Flow samples received", 75 | }, labels), 76 | flowNoRawPktHeader: promauto.NewCounterVec(prometheus.CounterOpts{ 77 | Namespace: "flowhouse", 78 | Subsystem: "sflow", 79 | Name: "flow_samples_no_raw_pkt_header", 80 | Help: "Flow samples without raw packet header", 81 | }, labels), 82 | flowNoData: promauto.NewCounterVec(prometheus.CounterOpts{ 83 | Namespace: "flowhouse", 84 | Subsystem: "sflow", 85 | Name: "flow_no_data", 86 | Help: "Flow samples without data", 87 | }, labels), 88 | flowUnknownProtocol: promauto.NewCounterVec(prometheus.CounterOpts{ 89 | Namespace: "flowhouse", 90 | Subsystem: "sflow", 91 | Name: "flow_samples_unknown_protocol", 92 | Help: "Flow samples unknown protocol", 93 | }, labels), 94 | flowEthernetDecodeErrors: promauto.NewCounterVec(prometheus.CounterOpts{ 95 | Namespace: "flowhouse", 96 | Subsystem: "sflow", 97 | Name: "flow_samples_ethernet_decode_errors", 98 | Help: "Flow samples ethernet decode errors", 99 | }, labels), 100 | flowUnknownEtherType: promauto.NewCounterVec(prometheus.CounterOpts{ 101 | Namespace: "flowhouse", 102 | Subsystem: "sflow", 103 | Name: "flow_samples_unknown_ether_type", 104 | Help: "Flow samples unknown ether type", 105 | }, labels), 106 | flowDot1qDecodeErrors: promauto.NewCounterVec(prometheus.CounterOpts{ 107 | Namespace: "flowhouse", 108 | Subsystem: "sflow", 109 | Name: "flow_samples_dot1q_decode_errors", 110 | Help: "Flow samples Dot1Q decode errors", 111 | }, labels), 112 | flowIPv4DecodeErrors: promauto.NewCounterVec(prometheus.CounterOpts{ 113 | Namespace: "flowhouse", 114 | Subsystem: "sflow", 115 | Name: "flow_samples_ipv4_decode_errors", 116 | Help: "Flow samples IPv4 decode errors", 117 | }, labels), 118 | flowIPv6DecodeErrors: promauto.NewCounterVec(prometheus.CounterOpts{ 119 | Namespace: "flowhouse", 120 | Subsystem: "sflow", 121 | Name: "flow_samples_ipv6_decode_errors", 122 | Help: "Flow samples IPv6 decode errors", 123 | }, labels), 124 | flowTCPDecodeErros: promauto.NewCounterVec(prometheus.CounterOpts{ 125 | Namespace: "flowhouse", 126 | Subsystem: "sflow", 127 | Name: "flow_samples_tcp_decode_errors", 128 | Help: "Flow samples TCP decode errors", 129 | }, labels), 130 | flowUDPDecodeErros: promauto.NewCounterVec(prometheus.CounterOpts{ 131 | Namespace: "flowhouse", 132 | Subsystem: "sflow", 133 | Name: "flow_samples_udp_decode_errors", 134 | Help: "Flow samples UDP decode errors", 135 | }, labels), 136 | stopCh: make(chan struct{}), 137 | } 138 | 139 | addr, err := net.ResolveUDPAddr("udp", listen) 140 | if err != nil { 141 | return nil, errors.Wrap(err, "Unable to resolve UDP address") 142 | } 143 | 144 | con, err := net.ListenUDP("udp", addr) 145 | if err != nil { 146 | return nil, errors.Wrap(err, "ListenUDP failed") 147 | } 148 | sfs.conn = con 149 | 150 | sfs.startService(numReaders) 151 | return sfs, nil 152 | } 153 | 154 | func (sfs *SflowServer) startService(numReaders int) { 155 | for i := 0; i < numReaders; i++ { 156 | sfs.wg.Add(1) 157 | go func() { 158 | defer sfs.wg.Done() 159 | err := sfs.packetWorker() 160 | if err != nil { 161 | log.WithError(err).Error("packetWorker failed") 162 | } 163 | }() 164 | } 165 | } 166 | 167 | // Stop closes the socket and stops the workers 168 | func (sfs *SflowServer) Stop() { 169 | log.Info("Stopping SflowServer") 170 | debug.PrintStack() 171 | close(sfs.stopCh) 172 | sfs.aggregator.Stop() 173 | sfs.conn.Close() 174 | sfs.wg.Wait() 175 | } 176 | 177 | // packetWorker reads sflow packet from socket and handsoff processing to ??? 178 | func (sfs *SflowServer) packetWorker() error { 179 | buffer := make([]byte, 8960) 180 | for { 181 | if sfs.stopped() { 182 | return nil 183 | } 184 | 185 | length, remote, err := sfs.conn.ReadFromUDP(buffer) 186 | if err == io.EOF { 187 | return nil 188 | } 189 | 190 | if err != nil { 191 | return errors.Wrap(err, "ReadFromUDP failed") 192 | } 193 | 194 | remote4 := remote.IP.To4() 195 | if remote4 != nil { 196 | remote.IP = remote4 197 | } 198 | 199 | remoteAddr, err := bnet.IPFromBytes([]byte(remote.IP)) 200 | if err != nil { 201 | return errors.Wrapf(err, "Unable to convert net.IP to bnet.IP: %q", remote) 202 | } 203 | 204 | sfs.packetsReceived.WithLabelValues(remoteAddr.String()).Inc() 205 | sfs.processPacket(remoteAddr, buffer[:length]) 206 | } 207 | } 208 | 209 | func (sfs *SflowServer) stopped() bool { 210 | select { 211 | case <-sfs.stopCh: 212 | return true 213 | default: 214 | return false 215 | } 216 | } 217 | 218 | // processPacket takes a raw sflow packet, send it to the decoder and passes the decoded packet to the aggregator 219 | func (sfs *SflowServer) processPacket(agent bnet.IP, buffer []byte) { 220 | agentStr := agent.String() 221 | 222 | p, err := sflow.Decode(buffer) 223 | if err != nil { 224 | log.WithError(err).Error("Unable to decode sflow packet") 225 | return 226 | } 227 | 228 | for _, fs := range p.FlowSamples { 229 | sfs.flowSamplesReceived.WithLabelValues(agentStr).Inc() 230 | 231 | if fs.RawPacketHeader == nil { 232 | sfs.flowNoRawPktHeader.WithLabelValues(agentStr).Inc() 233 | continue 234 | } 235 | 236 | if fs.Data == nil { 237 | sfs.flowNoData.WithLabelValues(agentStr).Inc() 238 | continue 239 | } 240 | 241 | if fs.RawPacketHeader.HeaderProtocol != 1 { 242 | sfs.flowUnknownProtocol.WithLabelValues(agentStr).Inc() 243 | continue 244 | } 245 | 246 | ether, err := packet.DecodeEthernet(fs.Data, fs.RawPacketHeader.OriginalPacketLength) 247 | if err != nil { 248 | sfs.flowEthernetDecodeErrors.WithLabelValues(agentStr).Inc() 249 | log.WithError(err).Debug("Unable to decode ethernet packet") 250 | continue 251 | } 252 | fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfEthernetII) 253 | fs.DataLen -= uint32(packet.SizeOfEthernetII) 254 | 255 | fl := &flow.Flow{ 256 | Agent: agent, 257 | IntIn: sfs.ifResolver.Resolve(agent, fs.FlowSampleHeader.InputIf), 258 | IntOut: sfs.ifResolver.Resolve(agent, fs.FlowSampleHeader.OutputIf), 259 | Size: uint64(fs.RawPacketHeader.FrameLength), 260 | Packets: 1, 261 | Timestamp: time.Now().Unix(), 262 | Samplerate: uint64(fs.FlowSampleHeader.SamplingRate), 263 | } 264 | 265 | if fl.IntIn == "" { 266 | fl.IntIn += fmt.Sprintf("%d", fs.FlowSampleHeader.InputIf) 267 | } 268 | 269 | if fl.IntOut == "" { 270 | fl.IntOut += fmt.Sprintf("%d", fs.FlowSampleHeader.OutputIf) 271 | } 272 | 273 | if fs.ExtendedRouterData != nil { 274 | nh, err := bnet.IPFromBytes([]byte(fs.ExtendedRouterData.NextHop)) 275 | if err == nil { 276 | fl.NextHop = nh 277 | } 278 | } 279 | 280 | if fs.ExtendedSwitchData != nil { 281 | fl.IntIn += fmt.Sprintf(".%d", fs.ExtendedSwitchData.IncomingVLAN) 282 | fl.IntOut += fmt.Sprintf(".%d", fs.ExtendedSwitchData.OutgoingVLAN) 283 | } 284 | 285 | sfs.processEthernet(agentStr, ether.EtherType, fs, fl) 286 | sfs.aggregator.GetIngress() <- fl 287 | } 288 | } 289 | 290 | func (sfs *SflowServer) processEthernet(agentStr string, ethType uint16, fs *sflow.FlowSample, fl *flow.Flow) { 291 | if ethType == packet.EtherTypeIPv4 { 292 | sfs.processIPv4Packet(agentStr, fs, fl) 293 | } else if ethType == packet.EtherTypeIPv6 { 294 | sfs.processIPv6Packet(agentStr, fs, fl) 295 | } else if ethType == packet.EtherTypeARP || ethType == packet.EtherTypeLACP { 296 | return 297 | } else if ethType == packet.EtherTypeIEEE8021Q { 298 | sfs.processDot1QPacket(agentStr, fs, fl) 299 | } else { 300 | sfs.flowUnknownEtherType.WithLabelValues(agentStr).Inc() 301 | log.Debugf("Unknown EtherType: 0x%x", ethType) 302 | } 303 | } 304 | 305 | func (sfs *SflowServer) processDot1QPacket(agentStr string, fs *sflow.FlowSample, fl *flow.Flow) { 306 | dot1q, err := packet.DecodeDot1Q(fs.Data, fs.DataLen) 307 | if err != nil { 308 | sfs.flowDot1qDecodeErrors.WithLabelValues(agentStr).Inc() 309 | log.WithError(err).Debug("Unable to decode dot1q header") 310 | } 311 | fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfDot1Q) 312 | fs.DataLen -= uint32(packet.SizeOfDot1Q) 313 | 314 | sfs.processEthernet(agentStr, dot1q.EtherType, fs, fl) 315 | } 316 | 317 | func (sfs *SflowServer) processIPv4Packet(agentStr string, fs *sflow.FlowSample, fl *flow.Flow) { 318 | fl.Family = 4 319 | ipv4, err := packet.DecodeIPv4(fs.Data, fs.DataLen) 320 | if err != nil { 321 | sfs.flowIPv4DecodeErrors.WithLabelValues(agentStr).Inc() 322 | log.WithError(err).Debug("Unable to decode IPv4 packet") 323 | } 324 | fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfIPv4Header) 325 | fs.DataLen -= uint32(packet.SizeOfIPv4Header) 326 | 327 | fl.TOS = ipv4.TOS 328 | fl.SrcAddr, _ = bnet.IPFromBytes(convert.Reverse(ipv4.SrcAddr[:])) 329 | fl.DstAddr, _ = bnet.IPFromBytes(convert.Reverse(ipv4.DstAddr[:])) 330 | fl.Protocol = uint8(ipv4.Protocol) 331 | switch ipv4.Protocol { 332 | case packet.TCP: 333 | if err := getTCP(fs.Data, fs.DataLen, fl); err != nil { 334 | sfs.flowTCPDecodeErros.WithLabelValues(agentStr).Inc() 335 | log.WithError(err).Debug("Unable to decode TCP") 336 | } 337 | case packet.UDP: 338 | if err := getUDP(fs.Data, fs.DataLen, fl); err != nil { 339 | sfs.flowUDPDecodeErros.WithLabelValues(agentStr).Inc() 340 | log.WithError(err).Debug("Unable to decode UDP") 341 | } 342 | } 343 | } 344 | 345 | func (sfs *SflowServer) processIPv6Packet(agentStr string, fs *sflow.FlowSample, fl *flow.Flow) { 346 | fl.Family = 6 347 | ipv6, err := packet.DecodeIPv6(fs.Data, fs.DataLen) 348 | if err != nil { 349 | sfs.flowIPv6DecodeErrors.WithLabelValues(agentStr).Inc() 350 | log.WithError(err).Debug("Unable to decode IPv6 packet") 351 | } 352 | fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfIPv6Header) 353 | fs.DataLen -= uint32(packet.SizeOfIPv6Header) 354 | 355 | fl.TOS = extractTrafficClass(ipv6.VersionTrafficClassFlowLabel) 356 | fl.SrcAddr, _ = bnet.IPFromBytes(convert.Reverse(ipv6.SrcAddr[:])) 357 | fl.DstAddr, _ = bnet.IPFromBytes(convert.Reverse(ipv6.DstAddr[:])) 358 | fl.Protocol = uint8(ipv6.NextHeader) 359 | switch ipv6.NextHeader { 360 | case packet.TCP: 361 | if err := getTCP(fs.Data, fs.DataLen, fl); err != nil { 362 | sfs.flowTCPDecodeErros.WithLabelValues(agentStr).Inc() 363 | log.WithError(err).Debug("Unable to decode TCP") 364 | } 365 | case packet.UDP: 366 | if err := getUDP(fs.Data, fs.DataLen, fl); err != nil { 367 | sfs.flowUDPDecodeErros.WithLabelValues(agentStr).Inc() 368 | log.WithError(err).Debug("Unable to decode UDP") 369 | } 370 | } 371 | } 372 | 373 | func extractTrafficClass(versionTrafficClassFlowLabel uint32) uint8 { 374 | versionTrafficClassFlowLabel &= 0x0FF00000 375 | return uint8(versionTrafficClassFlowLabel >> 20) 376 | } 377 | 378 | func getUDP(udpPtr unsafe.Pointer, length uint32, fl *flow.Flow) error { 379 | udp, err := packet.DecodeUDP(udpPtr, length) 380 | if err != nil { 381 | return errors.Wrap(err, "Unable to decode UDP datagram") 382 | } 383 | 384 | fl.SrcPort = udp.SrcPort 385 | fl.DstPort = udp.DstPort 386 | 387 | return nil 388 | } 389 | 390 | func getTCP(tcpPtr unsafe.Pointer, length uint32, fl *flow.Flow) error { 391 | tcp, err := packet.DecodeTCP(tcpPtr, length) 392 | if err != nil { 393 | return errors.Wrap(err, "Unable to decode TCP segment") 394 | } 395 | 396 | fl.SrcPort = tcp.SrcPort 397 | fl.DstPort = tcp.DstPort 398 | 399 | return nil 400 | } 401 | 402 | // Dump dumps a flow on the screen 403 | func Dump(fl *flow.Flow) { 404 | fmt.Printf("--------------------------------\n") 405 | fmt.Printf("Flow dump:\n") 406 | fmt.Printf("Agent: %s\n", fl.Agent.String()) 407 | fmt.Printf("Family: %d\n", fl.Family) 408 | fmt.Printf("SrcAddr: %s\n", fl.SrcAddr.String()) 409 | fmt.Printf("DstAddr: %s\n", fl.DstAddr.String()) 410 | fmt.Printf("Protocol: %d\n", fl.Protocol) 411 | fmt.Printf("NextHop: %s\n", fl.NextHop.String()) 412 | fmt.Printf("IntIn: %s\n", fl.IntIn) 413 | fmt.Printf("IntOut: %s\n", fl.IntOut) 414 | fmt.Printf("Packets: %d\n", fl.Packets) 415 | fmt.Printf("Bytes: %d\n", fl.Size) 416 | fmt.Printf("--------------------------------\n") 417 | } 418 | -------------------------------------------------------------------------------- /pkg/servers/sflow/sfserver_test.go: -------------------------------------------------------------------------------- 1 | package sflow 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/bio-routing/tflow2/convert" 7 | ) 8 | 9 | func TestExtractTrafficClass(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | input uint32 13 | expected uint8 14 | }{ 15 | { 16 | name: "Test case 1", 17 | input: convert.Uint32b([]byte{0, 0, 0xff, 0xff}), 18 | expected: 0, 19 | }, 20 | { 21 | name: "Test case 2", 22 | input: convert.Uint32b([]byte{0xcf, 0xf0, 0xac, 0xab}), 23 | expected: 255, 24 | }, 25 | } 26 | 27 | for _, test := range tests { 28 | t.Run(test.name, func(t *testing.T) { 29 | result := extractTrafficClass(test.input) 30 | if result != test.expected { 31 | t.Errorf("Expected %d, got %d", test.expected, result) 32 | } 33 | }) 34 | } 35 | } 36 | --------------------------------------------------------------------------------