├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── assets
└── image.png
├── cmd
└── flowhouse
│ ├── config.yaml
│ ├── config
│ └── config.go
│ └── main.go
├── go.mod
├── go.sum
└── pkg
├── clickhousegw
├── clickhousegw.go
└── clickhousegw_test.go
├── flowhouse
└── flowhouse.go
├── frontend
├── assets
│ ├── flowhouse.js
│ └── index.html
├── bindata.go
├── frontend.go
├── frontend_test.go
└── result.go
├── intfmapper
├── device.go
└── intfmapper.go
├── ipannotator
└── ipannotator.go
├── models
└── flow
│ └── flow.go
├── packet
├── ipfix
│ ├── decode.go
│ ├── decode_test.go
│ ├── field_db.go
│ ├── options_templates.go
│ ├── packet.go
│ ├── template_test.go
│ └── templates.go
├── nf9
│ ├── decode.go
│ ├── decode_test.go
│ ├── field_db.go
│ ├── packet.go
│ └── templates.go
├── packet
│ ├── dot1q.go
│ ├── ethernet.go
│ ├── ethernet_test.go
│ ├── ipv4.go
│ ├── ipv6.go
│ ├── tcp.go
│ └── udp.go
└── sflow
│ ├── decode.go
│ ├── decode_test.go
│ └── packet.go
├── routemirror
├── route_mirror.go
├── router.go
└── vrf.go
└── servers
├── aggregator
└── aggregator.go
├── ipfix
├── ipfix_server.go
├── ipfix_template_cache.go
└── sample_rate_cache.go
└── sflow
├── sfserver.go
└── sfserver_test.go
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: default test vendor vendor-deps container push gitlab_ci_check apply-vendor-lock prepare-vendor-updates
2 |
3 | all: bindata build
4 |
5 | build:
6 | cd cmd/flowhouse; go build
7 |
8 | bindata:
9 | cd pkg/frontend; go-bindata -pkg frontend assets/
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # flowhouse
2 |
3 | Flowhouse is a [Clickhouse](https://clickhouse.tech/) based sFlow + IPFIX collector and web based analyzer that offers rich annotation and querying features.
4 |
5 | 
6 |
7 | ## Interface Name Discovery
8 |
9 | Discovery of interface names is supported using SNMP v2 and v3. The database always stores interface namens. Not IDs.
10 |
11 | ## Static Meta Data Annotations
12 |
13 | Static meta data annotations are supported by the use of Clickhouse dicts.
14 |
15 | `config.yaml` snippet:
16 | ```
17 | dicts:
18 | - field: "agent"
19 | dict: "ip_addrs"
20 | expr: "tuple(IPv6NumToString(%s))"
21 | - field: "src_ip_addr"
22 | dict: "ip_addrs"
23 | expr: "tuple(IPv6NumToString(%s))"
24 | - field: "dst_ip_addr"
25 | dict: "ip_addrs"
26 | expr: "tuple(IPv6NumToString(%s))"
27 | ```
28 |
29 | Enable dict on a clickhouse-server (extract from v24.12.3.47)
30 | ```
31 | user@host ~ % cat /etc/clickhouse-server/config.d/ip_addrs_dictionary.xml
32 |
33 |
34 |
35 | ip_addrs
36 |
37 |
38 | /etc/clickhouse-server/ips.csv
39 | CSV
40 |
41 |
42 | 300
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 | address
51 | String
52 |
53 |
54 |
55 | hostname
56 | String
57 | ?
58 |
59 |
60 | interface
61 | String
62 | ?
63 |
64 |
65 | role
66 | String
67 | ?
68 |
69 |
70 | site
71 | String
72 | ?
73 |
74 |
75 | region
76 | String
77 | ?
78 |
79 |
80 | asn
81 | UInt32
82 | 0
83 |
84 |
85 |
86 |
87 | ```
88 |
89 | CSV for a dict:
90 | ```
91 | user@host ~ % cat /etc/clickhouse-server/ips.csv
92 | 192.0.2.1,core01.pop01,et-0/0/0.0,backbone-router,FRA01,eu-central,4200001234
93 | 192.0.2.3,core02.pop02,et-0/0/1.0,backbone-router,FRA02,eu-central,4200001234
94 | 10.0.0.1,srv01,ens3,server,DUB01,eu-west,4200002947
95 | ```
96 |
97 |
98 | ## Dynamic Routing Meta Data Annotations
99 |
100 | Dynamic routing meta data annotations like source and destination prefix, source, destination and nexthop ASN are supported
101 | on the basis of the [BIO routing RIS](https://github.com/bio-routing/bio-rd/tree/master/cmd/ris).
102 |
103 | ## Installation
104 | ```go get github.com/bio-routing/flowhouse/cmd/flowhouse```
105 |
106 | ```go install github.com/bio-routing/flowhouse/cmd/flowhouse```
107 |
108 | ## Configuration
109 |
110 | `config.yaml` example: [cmd/flowhouse/config.yaml](https://github.com/bio-routing/flowhouse/blob/master/cmd/flowhouse/config.yaml)
111 |
112 |
113 | Format is defined here: [https://github.com/bio-routing/flowhouse/blob/master/cmd/flowhouse/config/config.go#L21](https://github.com/bio-routing/flowhouse/blob/master/cmd/flowhouse/config/config.go#L21)
114 |
115 | ## Running
116 | ```
117 | user@host ~ % flowhouse --help
118 | Usage of flowhouse:
119 | -config.file string
120 | Config file path (YAML) (default "config.yaml")
121 | -debug
122 | Enable debug logging
123 | ```
124 |
--------------------------------------------------------------------------------
/assets/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bio-routing/flowhouse/fdc798c58712dde038678ab91126a53ddc055c36/assets/image.png
--------------------------------------------------------------------------------
/cmd/flowhouse/config.yaml:
--------------------------------------------------------------------------------
1 | ris_timeout: 10
2 | listen_sflow: ":6343"
3 | listen_ipfix: ":2055"
4 | listen_http: ":9991"
5 | default_vrf: "0:0"
6 | disable_ip_annotator: true
7 | snmp:
8 | version: 2
9 | community: "PLEASE-CHANGE-ME"
10 | user: "PLEASE-CHANGE-ME"
11 | auth-key: "PLEASE-CHANGE-ME"
12 | privacy-passphrase: "PLEASE-CHANGE-ME"
13 | clickhouse:
14 | address: "localhost:9000"
15 | user: "PLEASE-CHANGE-ME"
16 | password: "PLEASE-CHANGE-ME"
17 | database: "flows"
18 | dicts:
19 | - field: "agent"
20 | dict: "ip_addrs"
21 | expr: "tuple(IPv6NumToString(%s))"
22 | - field: "src_ip_addr"
23 | dict: "ip_addrs"
24 | expr: "tuple(IPv6NumToString(%s))"
25 | - field: "dst_ip_addr"
26 | dict: "ip_addrs"
27 | expr: "tuple(IPv6NumToString(%s))"
28 | routers:
29 | - name: "core01.pop01"
30 | address: 192.0.2.1
31 | ris_instances:
32 | - "ris01.pop01:4321"
33 | vrfs: ["0:0"]
34 | - name: "core02.pop02"
35 | address: 192.0.2.2
36 |
--------------------------------------------------------------------------------
/cmd/flowhouse/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "io/ioutil"
5 |
6 | "github.com/bio-routing/bio-rd/routingtable/vrf"
7 | "github.com/bio-routing/flowhouse/pkg/clickhousegw"
8 | "github.com/bio-routing/flowhouse/pkg/frontend"
9 | "github.com/pkg/errors"
10 | "gopkg.in/yaml.v2"
11 |
12 | bnet "github.com/bio-routing/bio-rd/net"
13 | )
14 |
15 | const (
16 | listenSFlowDefault = ":6343"
17 | listenHTTPDefault = ":9991"
18 | )
19 |
20 | // Config represents a config file
21 | type Config struct {
22 | RISTimeout uint64 `yaml:"ris_timeout"`
23 | SNMP *SNMPConfig `yaml:"snmp"`
24 | DefaultVRF string `yaml:"default_vrf"`
25 | defaultVRF uint64
26 | ListenSFlow string `yaml:"listen_sflow"`
27 | ListenIPFIX string `yaml:"listen_ipfix"`
28 | ListenHTTP string `yaml:"listen_http"`
29 | Dicts frontend.Dicts `yaml:"dicts"`
30 | Clickhouse *clickhousegw.ClickhouseConfig `yaml:"clickhouse"`
31 | Routers []*Router `yaml:"routers"`
32 | DisableIPAnnotator bool `yaml:"disable_ip_annotator"`
33 | }
34 |
35 | type SNMPConfig struct {
36 | Version uint `yaml:"version"`
37 | Community string `yaml:"community"`
38 | User string `yaml:"user"`
39 | AuthPassphrase string `yaml:"auth-key"`
40 | PrivacyPassphrase string `yaml:"privacy-passphrase"`
41 | }
42 |
43 | func (c *Config) load() error {
44 | if c.RISTimeout == 0 {
45 | c.RISTimeout = 10
46 | }
47 |
48 | if c.ListenSFlow == "" {
49 | c.ListenSFlow = listenSFlowDefault
50 | }
51 |
52 | if c.ListenHTTP == "" {
53 | c.ListenHTTP = listenHTTPDefault
54 | }
55 |
56 | if c.DefaultVRF != "" {
57 | vrfID, err := vrf.ParseHumanReadableRouteDistinguisher(c.DefaultVRF)
58 | if err != nil {
59 | return errors.Wrap(err, "Unable to perse default VRF")
60 | }
61 |
62 | c.defaultVRF = vrfID
63 | }
64 |
65 | for _, r := range c.Routers {
66 | err := r.load()
67 | if err != nil {
68 | return errors.Wrapf(err, "Unable to load config for router %q", r.Name)
69 | }
70 | }
71 |
72 | return nil
73 | }
74 |
75 | // GetDefaultVRF gets the default VRF id
76 | func (c *Config) GetDefaultVRF() uint64 {
77 | return c.defaultVRF
78 | }
79 |
80 | // Router represents a router
81 | type Router struct {
82 | Name string `yaml:"name"`
83 | Address string `yaml:"address"`
84 | address bnet.IP
85 | RISInstances []string `yaml:"ris_instances"`
86 | VRFs []string `yaml:"vrfs"`
87 | vrfs []uint64
88 | }
89 |
90 | // GetAddress gets a routers address
91 | func (r *Router) GetAddress() bnet.IP {
92 | return r.address
93 | }
94 |
95 | // GetVRFs gets a routers VRFs
96 | func (r *Router) GetVRFs() []uint64 {
97 | return r.vrfs
98 | }
99 |
100 | func (r *Router) load() error {
101 | a, err := bnet.IPFromString(r.Address)
102 | if err != nil {
103 | return errors.Wrap(err, "Unable to parse IP address")
104 | }
105 |
106 | r.address = a
107 |
108 | for _, x := range r.VRFs {
109 | vrfRD, err := vrf.ParseHumanReadableRouteDistinguisher(x)
110 | if err != nil {
111 | return errors.Wrapf(err, "Unable to parse VRF RD %q", x)
112 | }
113 |
114 | r.vrfs = append(r.vrfs, vrfRD)
115 | }
116 |
117 | return nil
118 | }
119 |
120 | // GetConfig gets the configuration
121 | func GetConfig(fp string) (*Config, error) {
122 | fc, err := ioutil.ReadFile(fp)
123 | if err != nil {
124 | return nil, errors.Wrap(err, "Unable to read file")
125 | }
126 |
127 | c := &Config{}
128 | err = yaml.Unmarshal(fc, c)
129 | if err != nil {
130 | return nil, errors.Wrap(err, "Unable to unmarshal")
131 | }
132 |
133 | err = c.Validate()
134 | if err != nil {
135 | return nil, errors.Wrap(err, "Unable to validate config")
136 | }
137 | c.load()
138 |
139 | return c, nil
140 | }
141 |
142 | func (c *Config) Validate() error {
143 | if c.Clickhouse.Sharded && c.Clickhouse.Cluster == "" {
144 | return errors.New("cluster must be set when Clickhouse is replicated")
145 | }
146 | return nil
147 | }
148 |
149 | // GetRISList gets a list of all referenced RIS instances
150 | func (c *Config) GetRISList() []string {
151 | m := make(map[string]struct{})
152 |
153 | for _, rtr := range c.Routers {
154 | for _, x := range rtr.RISInstances {
155 | m[x] = struct{}{}
156 | }
157 | }
158 |
159 | ret := make([]string, 0)
160 | for k := range m {
161 | ret = append(ret, k)
162 | }
163 |
164 | return ret
165 | }
166 |
--------------------------------------------------------------------------------
/cmd/flowhouse/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "sync"
6 | "time"
7 |
8 | "github.com/bio-routing/flowhouse/cmd/flowhouse/config"
9 | "github.com/bio-routing/flowhouse/pkg/flowhouse"
10 |
11 | log "github.com/sirupsen/logrus"
12 | )
13 |
14 | var (
15 | configFilePath = flag.String("config.file", "config.yaml", "Config file path (YAML)")
16 | debug = flag.Bool("debug", false, "Enable debug logging")
17 | )
18 |
19 | func main() {
20 | flag.Parse()
21 |
22 | if *debug {
23 | log.SetLevel(log.DebugLevel)
24 | log.Debug("logLevel: DEBUG")
25 | } else {
26 | log.SetLevel(log.InfoLevel)
27 | }
28 |
29 | cfg, err := config.GetConfig(*configFilePath)
30 | if err != nil {
31 | log.WithError(err).Fatal("Unable to get config")
32 | }
33 |
34 | fhcfg := &flowhouse.Config{
35 | ChCfg: cfg.Clickhouse,
36 | SNMP: cfg.SNMP,
37 | RISTimeout: time.Duration(cfg.RISTimeout) * time.Second,
38 | ListenSflow: cfg.ListenSFlow,
39 | ListenIPFIX: cfg.ListenIPFIX,
40 | ListenHTTP: cfg.ListenHTTP,
41 | DefaultVRF: cfg.GetDefaultVRF(),
42 | Dicts: cfg.Dicts,
43 | DisableIPAnnotator: cfg.DisableIPAnnotator,
44 | }
45 |
46 | fh, err := flowhouse.New(fhcfg)
47 | if err != nil {
48 | log.WithError(err).Fatal("Unable to create flowhouse instance")
49 | }
50 |
51 | for _, rtr := range cfg.Routers {
52 | fh.AddAgent(rtr.Name, rtr.GetAddress(), rtr.RISInstances, rtr.GetVRFs())
53 | }
54 |
55 | var wg sync.WaitGroup
56 | wg.Add(1)
57 | go func() {
58 | defer wg.Done()
59 | fh.Run()
60 | }()
61 |
62 | wg.Wait()
63 | }
64 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/bio-routing/flowhouse
2 |
3 | go 1.23.0
4 |
5 | toolchain go1.23.4
6 |
7 | require (
8 | github.com/ClickHouse/clickhouse-go v1.4.1
9 | github.com/bio-routing/bio-rd v0.0.3-pre5
10 | github.com/bio-routing/tflow2 v0.0.0-20200122091514-89924193643e
11 | github.com/gosnmp/gosnmp v1.38.0
12 | github.com/pkg/errors v0.9.1
13 | github.com/prometheus/client_golang v1.11.1
14 | github.com/sirupsen/logrus v1.6.0
15 | github.com/stretchr/testify v1.9.0
16 | google.golang.org/grpc v1.56.3
17 | gopkg.in/yaml.v2 v2.3.0
18 | )
19 |
20 | require (
21 | github.com/beorn7/perks v1.0.1 // indirect
22 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
23 | github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
24 | github.com/davecgh/go-spew v1.1.1 // indirect
25 | github.com/golang/protobuf v1.5.3 // indirect
26 | github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
27 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
28 | github.com/pmezard/go-difflib v1.0.0 // indirect
29 | github.com/prometheus/client_model v0.2.0 // indirect
30 | github.com/prometheus/common v0.26.0 // indirect
31 | github.com/prometheus/procfs v0.6.0 // indirect
32 | golang.org/x/net v0.38.0 // indirect
33 | golang.org/x/sys v0.31.0 // indirect
34 | golang.org/x/text v0.23.0 // indirect
35 | google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
36 | google.golang.org/protobuf v1.30.0 // indirect
37 | gopkg.in/yaml.v3 v3.0.1 // indirect
38 | )
39 |
--------------------------------------------------------------------------------
/pkg/clickhousegw/clickhousegw.go:
--------------------------------------------------------------------------------
1 | package clickhousegw
2 |
3 | import (
4 | "database/sql"
5 | "fmt"
6 | "net"
7 | "strings"
8 | "time"
9 |
10 | "github.com/bio-routing/flowhouse/pkg/models/flow"
11 | "github.com/pkg/errors"
12 |
13 | "github.com/ClickHouse/clickhouse-go"
14 |
15 | bnet "github.com/bio-routing/bio-rd/net"
16 | log "github.com/sirupsen/logrus"
17 | )
18 |
19 | const tableName = "flows"
20 |
21 | // ClickHouseGateway is a wrapper for Clickhouse
22 | type ClickHouseGateway struct {
23 | cfg *ClickhouseConfig
24 | db *sql.DB
25 | }
26 |
27 | // ClickhouseConfig represents a clickhouse client config
28 | type ClickhouseConfig struct {
29 | Host string `yaml:"host"`
30 | Address string `yaml:"address"`
31 | User string `yaml:"user"`
32 | Password string `yaml:"password"`
33 | Database string `yaml:"database"`
34 | Sharded bool `yaml:"sharded"`
35 | Cluster string `yaml:"cluster"`
36 | Secure bool `yaml:"secure"`
37 | }
38 |
39 | // New instantiates a new ClickHouseGateway
40 | func New(cfg *ClickhouseConfig) (*ClickHouseGateway, error) {
41 | dsn := fmt.Sprintf("tcp://%s?username=%s&password=%s&database=%s&read_timeout=10&write_timeout=20&secure=%t",
42 | cfg.Address, cfg.User, cfg.Password, cfg.Database, cfg.Secure)
43 | c, err := sql.Open("clickhouse", dsn)
44 | if err != nil {
45 | return nil, errors.Wrap(err, "sql.Open failed")
46 | }
47 |
48 | err = c.Ping()
49 | if err != nil {
50 | if exception, ok := err.(*clickhouse.Exception); ok {
51 | return nil, errors.Wrapf(err, "[%d] %s \n%s", exception.Code, exception.Message, exception.StackTrace)
52 | }
53 |
54 | return nil, errors.Wrap(err, "c.Ping failed")
55 | }
56 |
57 | chgw := &ClickHouseGateway{
58 | cfg: cfg,
59 | db: c,
60 | }
61 |
62 | err = chgw.createFlowsSchemaIfNotExists()
63 | if err != nil {
64 | log.Errorf("Unable to create flows schema: %v", err)
65 | }
66 |
67 | return chgw, nil
68 | }
69 |
70 | func (c *ClickHouseGateway) createFlowsSchemaIfNotExists() error {
71 | zookeeperPathTimestamp := time.Now().Unix()
72 | _, err := c.db.Exec(c.getCreateTableSchemaDDL(true, zookeeperPathTimestamp))
73 |
74 | if err != nil {
75 | return errors.Wrap(err, "Query failed")
76 | }
77 |
78 | if c.cfg.Sharded {
79 | _, err = c.db.Exec(c.getCreateTableSchemaDDL(false, zookeeperPathTimestamp))
80 | }
81 | if err != nil {
82 | return errors.Wrap(err, "Query failed")
83 | }
84 |
85 | return nil
86 | }
87 |
88 | func (c *ClickHouseGateway) getCreateTableSchemaDDL(isBaseTable bool, zookeeperPathPrefix int64) string {
89 | tableDDl := `
90 | CREATE TABLE IF NOT EXISTS %s%s (
91 | agent IPv6,
92 | int_in String,
93 | int_out String,
94 | tos UInt8,
95 | dscp UInt8,
96 | src_ip_addr IPv6,
97 | dst_ip_addr IPv6,
98 | src_ip_pfx_addr IPv6,
99 | src_ip_pfx_len UInt8,
100 | dst_ip_pfx_addr IPv6,
101 | dst_ip_pfx_len UInt8,
102 | nexthop IPv6,
103 | next_asn UInt32,
104 | src_asn UInt32,
105 | dst_asn UInt32,
106 | ip_protocol UInt8,
107 | src_port UInt16,
108 | dst_port UInt16,
109 | timestamp DateTime,
110 | size UInt64,
111 | packets UInt64,
112 | samplerate UInt64
113 | ) ENGINE = %s
114 | PARTITION BY toStartOfTenMinutes(timestamp)
115 | ORDER BY (timestamp)
116 | %s
117 | SETTINGS index_granularity = 8192
118 | `
119 | ttl := "TTL timestamp + INTERVAL 14 DAY"
120 | onClusterStatement := ""
121 | if c.cfg.Sharded {
122 | onClusterStatement = " ON CLUSTER " + c.cfg.Cluster
123 | }
124 |
125 | if isBaseTable {
126 | return fmt.Sprintf(tableDDl, c.getBaseTableName(), onClusterStatement, c.getBaseTableEngineDDL(zookeeperPathPrefix), ttl)
127 | } else {
128 | return fmt.Sprintf(tableDDl, tableName, onClusterStatement, c.getDistributedTableDDl(), "")
129 | }
130 | }
131 |
132 | func (c *ClickHouseGateway) getBaseTableName() string {
133 | if c.cfg.Sharded {
134 | return "_" + c.cfg.Database + "." + tableName + "_base"
135 | }
136 |
137 | return tableName
138 | }
139 |
140 | func (c *ClickHouseGateway) getBaseTableEngineDDL(zookeeperPathPrefix int64) string {
141 | if c.cfg.Sharded {
142 | // TODO: make zookeeper path configurable
143 | return fmt.Sprintf(
144 | "ReplicatedMergeTree('/clickhouse/tables/{shard}/%s/%s_%d', '{replica}')",
145 | c.cfg.Database,
146 | tableName,
147 | zookeeperPathPrefix)
148 | }
149 |
150 | return "MergeTree()"
151 | }
152 | func (c *ClickHouseGateway) getDistributedTableDDl() string {
153 | return fmt.Sprintf(
154 | "Distributed(%s, %s, %s, %s)",
155 | c.cfg.Cluster,
156 | "_"+c.cfg.Database,
157 | tableName+"_base",
158 | "rand()")
159 | }
160 |
161 | // InsertFlows inserts flows into clickhouse
162 | func (c *ClickHouseGateway) InsertFlows(flows []*flow.Flow) error {
163 | tx, err := c.db.Begin()
164 | if err != nil {
165 | return errors.Wrap(err, "Begin failed")
166 | }
167 |
168 | stmt, err := tx.Prepare(`INSERT INTO flows (
169 | agent,
170 | int_in,
171 | int_out,
172 | tos,
173 | dscp,
174 | src_ip_addr,
175 | dst_ip_addr,
176 | src_ip_pfx_addr,
177 | src_ip_pfx_len,
178 | dst_ip_pfx_addr,
179 | dst_ip_pfx_len,
180 | nexthop,
181 | next_asn,
182 | src_asn,
183 | dst_asn,
184 | ip_protocol,
185 | src_port,
186 | dst_port,
187 | timestamp,
188 | size,
189 | packets,
190 | samplerate
191 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? , ?, ?, ?)`)
192 | defer stmt.Close()
193 | if err != nil {
194 | return errors.Wrap(err, "Prepare failed")
195 | }
196 |
197 | for _, fl := range flows {
198 | _, err := stmt.Exec(
199 | fl.Agent.ToNetIP(),
200 | fl.IntIn,
201 | fl.IntOut,
202 | fl.TOS,
203 | dscp(fl.TOS),
204 | fl.SrcAddr.ToNetIP(),
205 | fl.DstAddr.ToNetIP(),
206 | addrToNetIP(fl.SrcPfx.Addr()),
207 | fl.SrcPfx.Pfxlen(),
208 | addrToNetIP(fl.DstPfx.Addr()),
209 | fl.DstPfx.Pfxlen(),
210 | fl.NextHop.ToNetIP(),
211 | fl.NextAs,
212 | fl.SrcAs,
213 | fl.DstAs,
214 | fl.Protocol,
215 | fl.SrcPort,
216 | fl.DstPort,
217 | fl.Timestamp,
218 | fl.Size,
219 | fl.Packets,
220 | fl.Samplerate,
221 | )
222 | if err != nil {
223 | return errors.Wrap(err, "Exec failed")
224 | }
225 | }
226 |
227 | err = tx.Commit()
228 | if err != nil {
229 | return errors.Wrap(err, "Commit failed")
230 | }
231 |
232 | return nil
233 | }
234 |
235 | func dscp(tos uint8) uint8 {
236 | // DSCP is the first 6 bits of the TOS field
237 |
238 | return tos >> 2
239 | }
240 |
241 | func addrToNetIP(addr *bnet.IP) net.IP {
242 | if addr == nil {
243 | return net.IP([]byte{0, 0, 0, 0})
244 | }
245 |
246 | return addr.ToNetIP()
247 | }
248 |
249 | // Close closes the database handler
250 | func (c *ClickHouseGateway) Close() {
251 | c.db.Close()
252 | }
253 |
254 | // GetColumnValues gets all unique values of a column
255 | func (c *ClickHouseGateway) GetColumnValues(columnName string) ([]string, error) {
256 | columnName = strings.Replace(columnName, " ", "", -1)
257 |
258 | query := fmt.Sprintf("SELECT %s FROM flows GROUP BY %s", columnName, columnName)
259 | res, err := c.db.Query(query)
260 | defer res.Close()
261 |
262 | if err != nil {
263 | return nil, errors.Wrap(err, "Exec failed")
264 | }
265 |
266 | result := make([]string, 0)
267 |
268 | for {
269 | v := ""
270 | res.Scan(&v)
271 |
272 | result = append(result, v)
273 | if !res.Next() {
274 | break
275 | }
276 | }
277 |
278 | return result, nil
279 | }
280 |
281 | // GetDictValues gets all values of a certain dicts attribute
282 | func (c *ClickHouseGateway) GetDictValues(dictName string, attr string) ([]string, error) {
283 | dictName = strings.Replace(dictName, " ", "", -1)
284 | attr = strings.Replace(attr, " ", "", -1)
285 |
286 | query := fmt.Sprintf("SELECT %s FROM dictionary(%s) GROUP BY %s", attr, dictName, attr)
287 | res, err := c.db.Query(query)
288 | defer res.Close()
289 |
290 | if err != nil {
291 | return nil, errors.Wrap(err, "Exec failed")
292 | }
293 |
294 | result := make([]string, 0)
295 |
296 | for {
297 | v := ""
298 | res.Scan(&v)
299 |
300 | result = append(result, v)
301 | if !res.Next() {
302 | break
303 | }
304 | }
305 |
306 | return result, nil
307 | }
308 |
309 | // GetDictFields gets the names of all fields in a dictionary
310 | func (c *ClickHouseGateway) GetDictFields(dictName string) ([]string, error) {
311 | dictName = strings.Replace(dictName, " ", "", -1)
312 |
313 | query := fmt.Sprintf("SELECT attribute.names FROM system.dictionaries WHERE name = '%s';", dictName)
314 | res, err := c.db.Query(query)
315 | defer res.Close()
316 | if err != nil {
317 | return nil, errors.Wrap(err, "Exec failed")
318 | }
319 |
320 | result := make([]string, 0)
321 | res.Next()
322 | err = res.Scan(&result)
323 | if err != nil {
324 | return nil, err
325 | }
326 |
327 | return result, nil
328 | }
329 |
330 | // DescribeTable gets the names of all fields of a table
331 | func (c *ClickHouseGateway) DescribeTable(tableName string) ([]string, error) {
332 | tableName = strings.Replace(tableName, " ", "", -1)
333 |
334 | query := fmt.Sprintf("DESCRIBE %s", tableName)
335 | res, err := c.db.Query(query)
336 | defer res.Close()
337 |
338 | if err != nil {
339 | return nil, errors.Wrap(err, "Exec failed")
340 | }
341 |
342 | result := make([]string, 0)
343 |
344 | for {
345 | name := ""
346 | trash := ""
347 | res.Scan(&name, &trash, &trash, &trash, &trash, &trash, &trash)
348 |
349 | result = append(result, name)
350 | if !res.Next() {
351 | break
352 | }
353 | }
354 |
355 | return result, nil
356 | }
357 |
358 | // GetDatabaseName gets the databases name
359 | func (c *ClickHouseGateway) GetDatabaseName() string {
360 | return c.cfg.Database
361 | }
362 |
363 | // Query executs an SQL query
364 | func (c *ClickHouseGateway) Query(q string) (*sql.Rows, error) {
365 | return c.db.Query(q)
366 | }
367 |
--------------------------------------------------------------------------------
/pkg/clickhousegw/clickhousegw_test.go:
--------------------------------------------------------------------------------
1 | package clickhousegw
2 |
3 | import (
4 | "database/sql"
5 | "fmt"
6 | "testing"
7 | "time"
8 | )
9 |
10 | func TestClickHouseGateway_getCreateTableSchemaDDL(t *testing.T) {
11 | zookeeperPathPrefix := time.Now().Unix()
12 | type fields struct {
13 | cfg *ClickhouseConfig
14 | db *sql.DB
15 | }
16 | type args struct {
17 | isBaseTable bool
18 | zookeeperPathPrefix int64
19 | }
20 | tests := []struct {
21 | name string
22 | fields fields
23 | args args
24 | want string
25 | }{
26 | {
27 | name: "Test getCreateTableSchemaDDL for simple MergeTree",
28 | fields: fields{
29 | cfg: &ClickhouseConfig{
30 | Database: "test",
31 | Sharded: false,
32 | },
33 | },
34 | args: args{
35 | isBaseTable: true,
36 | zookeeperPathPrefix: zookeeperPathPrefix,
37 | },
38 | want: `
39 | CREATE TABLE IF NOT EXISTS flows (
40 | agent IPv6,
41 | int_in String,
42 | int_out String,
43 | src_ip_addr IPv6,
44 | dst_ip_addr IPv6,
45 | src_ip_pfx_addr IPv6,
46 | src_ip_pfx_len UInt8,
47 | dst_ip_pfx_addr IPv6,
48 | dst_ip_pfx_len UInt8,
49 | nexthop IPv6,
50 | next_asn UInt32,
51 | src_asn UInt32,
52 | dst_asn UInt32,
53 | ip_protocol UInt8,
54 | src_port UInt16,
55 | dst_port UInt16,
56 | timestamp DateTime,
57 | size UInt64,
58 | packets UInt64,
59 | samplerate UInt64
60 | ) ENGINE = MergeTree()
61 | PARTITION BY toStartOfTenMinutes(timestamp)
62 | ORDER BY (timestamp)
63 | TTL timestamp + INTERVAL 14 DAY
64 | SETTINGS index_granularity = 8192
65 | `,
66 | },
67 | {
68 | name: "Test getCreateTableSchemaDDL for sharded base table with engine ReplicatedMergeTree",
69 | fields: fields{
70 | cfg: &ClickhouseConfig{
71 | Database: "test",
72 | Cluster: "test_cluster",
73 | Sharded: true,
74 | },
75 | },
76 | args: args{
77 | isBaseTable: true,
78 | zookeeperPathPrefix: zookeeperPathPrefix,
79 | },
80 | want: fmt.Sprintf(`
81 | CREATE TABLE IF NOT EXISTS _test.flows_base ON CLUSTER test_cluster (
82 | agent IPv6,
83 | int_in String,
84 | int_out String,
85 | src_ip_addr IPv6,
86 | dst_ip_addr IPv6,
87 | src_ip_pfx_addr IPv6,
88 | src_ip_pfx_len UInt8,
89 | dst_ip_pfx_addr IPv6,
90 | dst_ip_pfx_len UInt8,
91 | nexthop IPv6,
92 | next_asn UInt32,
93 | src_asn UInt32,
94 | dst_asn UInt32,
95 | ip_protocol UInt8,
96 | src_port UInt16,
97 | dst_port UInt16,
98 | timestamp DateTime,
99 | size UInt64,
100 | packets UInt64,
101 | samplerate UInt64
102 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/test/flows_%d', '{replica}')
103 | PARTITION BY toStartOfTenMinutes(timestamp)
104 | ORDER BY (timestamp)
105 | TTL timestamp + INTERVAL 14 DAY
106 | SETTINGS index_granularity = 8192
107 | `, zookeeperPathPrefix),
108 | },
109 | {
110 | name: "Test getCreateTableSchemaDDL for Distributed Table",
111 | fields: fields{
112 | cfg: &ClickhouseConfig{
113 | Database: "test",
114 | Sharded: true,
115 | Cluster: "test_cluster",
116 | },
117 | },
118 | args: args{
119 | isBaseTable: false,
120 | zookeeperPathPrefix: zookeeperPathPrefix,
121 | },
122 | want: `
123 | CREATE TABLE IF NOT EXISTS flows ON CLUSTER test_cluster (
124 | agent IPv6,
125 | int_in String,
126 | int_out String,
127 | src_ip_addr IPv6,
128 | dst_ip_addr IPv6,
129 | src_ip_pfx_addr IPv6,
130 | src_ip_pfx_len UInt8,
131 | dst_ip_pfx_addr IPv6,
132 | dst_ip_pfx_len UInt8,
133 | nexthop IPv6,
134 | next_asn UInt32,
135 | src_asn UInt32,
136 | dst_asn UInt32,
137 | ip_protocol UInt8,
138 | src_port UInt16,
139 | dst_port UInt16,
140 | timestamp DateTime,
141 | size UInt64,
142 | packets UInt64,
143 | samplerate UInt64
144 | ) ENGINE = Distributed(test_cluster, _test, flows_base, rand())
145 | PARTITION BY toStartOfTenMinutes(timestamp)
146 | ORDER BY (timestamp)
147 |
148 | SETTINGS index_granularity = 8192
149 | `,
150 | },
151 | }
152 | for _, tt := range tests {
153 | t.Run(tt.name, func(t *testing.T) {
154 | c := &ClickHouseGateway{
155 | cfg: tt.fields.cfg,
156 | db: tt.fields.db,
157 | }
158 | if got := c.getCreateTableSchemaDDL(tt.args.isBaseTable, zookeeperPathPrefix); got != tt.want {
159 | t.Errorf("getCreateTableSchemaDDL() = %v, want %v", got, tt.want)
160 | }
161 | })
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/pkg/flowhouse/flowhouse.go:
--------------------------------------------------------------------------------
1 | package flowhouse
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | "runtime"
7 | "runtime/debug"
8 | "time"
9 |
10 | "github.com/bio-routing/bio-rd/util/grpc/clientmanager"
11 | "github.com/bio-routing/flowhouse/cmd/flowhouse/config"
12 | "github.com/bio-routing/flowhouse/pkg/clickhousegw"
13 | "github.com/bio-routing/flowhouse/pkg/frontend"
14 | "github.com/bio-routing/flowhouse/pkg/intfmapper"
15 | "github.com/bio-routing/flowhouse/pkg/ipannotator"
16 | "github.com/bio-routing/flowhouse/pkg/models/flow"
17 | "github.com/bio-routing/flowhouse/pkg/routemirror"
18 | "github.com/bio-routing/flowhouse/pkg/servers/ipfix"
19 | "github.com/bio-routing/flowhouse/pkg/servers/sflow"
20 | "github.com/pkg/errors"
21 | "github.com/prometheus/client_golang/prometheus/promhttp"
22 | "google.golang.org/grpc"
23 | "google.golang.org/grpc/keepalive"
24 |
25 | bnet "github.com/bio-routing/bio-rd/net"
26 | log "github.com/sirupsen/logrus"
27 | )
28 |
29 | // Flowhouse is an clickhouse based sflow collector
30 | type Flowhouse struct {
31 | cfg *Config
32 | ifMapper *intfmapper.IntfMapper
33 | routeMirror *routemirror.RouteMirror
34 | grpcClientManager *clientmanager.ClientManager
35 | ipa *ipannotator.IPAnnotator
36 | sfs *sflow.SflowServer
37 | ifxs *ipfix.IPFIXServer
38 | chgw *clickhousegw.ClickHouseGateway
39 | fe *frontend.Frontend
40 | flowsRX chan []*flow.Flow
41 | }
42 |
43 | // Config is flow house instances configuration
44 | type Config struct {
45 | ChCfg *clickhousegw.ClickhouseConfig
46 | SNMP *config.SNMPConfig
47 | RISTimeout time.Duration
48 | ListenSflow string
49 | ListenIPFIX string
50 | ListenHTTP string
51 | DefaultVRF uint64
52 | Dicts frontend.Dicts
53 | DisableIPAnnotator bool
54 | }
55 |
56 | // ClickhouseConfig represents a clickhouse client config
57 | type ClickhouseConfig struct {
58 | Host string
59 | Address string
60 | User string
61 | Password string
62 | Database string
63 | }
64 |
65 | // New creates a new flowhouse instance
66 | func New(cfg *Config) (*Flowhouse, error) {
67 | fh := &Flowhouse{
68 | cfg: cfg,
69 | ifMapper: intfmapper.New(),
70 | routeMirror: routemirror.New(),
71 | grpcClientManager: clientmanager.New(),
72 | flowsRX: make(chan []*flow.Flow, 1024),
73 | }
74 |
75 | if !cfg.DisableIPAnnotator {
76 | fh.ipa = ipannotator.New(fh.routeMirror)
77 | }
78 |
79 | sfs, err := sflow.New(fh.cfg.ListenSflow, runtime.NumCPU(), fh.flowsRX, fh.ifMapper)
80 | if err != nil {
81 | return nil, errors.Wrap(err, "Unable to start sflow server")
82 | }
83 | fh.sfs = sfs
84 |
85 | ifxs, err := ipfix.New(fh.cfg.ListenIPFIX, runtime.NumCPU(), fh.flowsRX, fh.ifMapper)
86 | if err != nil {
87 | return nil, errors.Wrap(err, "Unable to start IPFIX server")
88 | }
89 | fh.ifxs = ifxs
90 |
91 | chgw, err := clickhousegw.New(fh.cfg.ChCfg)
92 | if err != nil {
93 | return nil, errors.Wrap(err, "Unable to create clickhouse wrapper")
94 | }
95 | fh.chgw = chgw
96 |
97 | fh.fe = frontend.New(fh.chgw, cfg.Dicts)
98 | return fh, nil
99 | }
100 |
101 | // AddAgent adds an agent
102 | func (f *Flowhouse) AddAgent(name string, addr bnet.IP, risAddrs []string, vrfs []uint64) {
103 | if f.cfg.SNMP != nil {
104 | f.ifMapper.AddDevice(addr, f.cfg.SNMP)
105 | }
106 |
107 | rtSource := make([]*grpc.ClientConn, 0)
108 | for _, risAddr := range risAddrs {
109 | f.grpcClientManager.AddIfNotExists(risAddr, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{
110 | Time: f.cfg.RISTimeout,
111 | Timeout: f.cfg.RISTimeout,
112 | PermitWithoutStream: true,
113 | }))
114 |
115 | rtSource = append(rtSource, f.grpcClientManager.Get(risAddr))
116 | }
117 |
118 | for _, v := range vrfs {
119 | f.routeMirror.AddTarget(name, addr, rtSource, v)
120 | }
121 | }
122 |
123 | // Run runs flowhouse
124 | func (f *Flowhouse) Run() {
125 | f.installHTTPHandlers(f.fe)
126 | go http.ListenAndServe(f.cfg.ListenHTTP, nil)
127 | log.WithField("address", f.cfg.ListenHTTP).Info("Listening for HTTP requests")
128 |
129 | for {
130 | flows := <-f.flowsRX
131 |
132 | if f.ipa != nil {
133 | for _, fl := range flows {
134 | fl.VRFIn = f.cfg.DefaultVRF
135 | fl.VRFOut = f.cfg.DefaultVRF
136 |
137 | err := f.ipa.Annotate(fl)
138 | if err != nil {
139 | log.WithError(err).Info("Annotating failed")
140 | }
141 | }
142 | }
143 |
144 | err := f.chgw.InsertFlows(flows)
145 | if err != nil {
146 | log.WithError(err).Error("Insert failed")
147 | }
148 | }
149 | }
150 |
151 | func recoveryMiddleware(next http.Handler) http.Handler {
152 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
153 | defer func() {
154 | if err := recover(); err != nil {
155 | log.Printf("PANIC: %v\n%s", err, debug.Stack())
156 | http.Error(w,
157 | fmt.Sprintf("Internal server error: %v", err),
158 | http.StatusInternalServerError)
159 | }
160 | }()
161 | next.ServeHTTP(w, r)
162 | })
163 | }
164 |
165 | func (f *Flowhouse) installHTTPHandlers(fe *frontend.Frontend) {
166 | http.HandleFunc("/", fe.IndexHandler)
167 | http.HandleFunc("/flowhouse.js", fe.FlowhouseJSHandler)
168 | http.Handle("/query", recoveryMiddleware(http.HandlerFunc(fe.QueryHandler(false))))
169 | http.Handle("/query/flat", recoveryMiddleware(http.HandlerFunc(fe.QueryHandler(true))))
170 | http.Handle("/dict_values/", recoveryMiddleware(http.HandlerFunc(fe.GetDictValues)))
171 | http.Handle("/metrics", promhttp.Handler())
172 | }
173 |
--------------------------------------------------------------------------------
/pkg/frontend/assets/flowhouse.js:
--------------------------------------------------------------------------------
1 | var filtersCount = 0;
2 |
3 | $(document).ready(function() {
4 | var start = formatTimestamp(new Date(((new Date() / 1000) - 900 - new Date().getTimezoneOffset() * 60)* 1000));
5 | if ($("#time_start").val() == "") {
6 | $("#time_start").val(start);
7 | }
8 |
9 | var end = formatTimestamp(new Date(((new Date() / 1000) - new Date().getTimezoneOffset() * 60)* 1000));
10 | if ($("#time_end").val() == "") {
11 | $("#time_end").val(end);
12 | }
13 |
14 | $("#filterPlus").click(addFilter);
15 | $("form").on('submit', submitQuery);
16 |
17 | google.charts.load('current', {
18 | 'packages': ['corechart']
19 | });
20 |
21 | window.onhashchange = function () {
22 | google.charts.setOnLoadCallback(drawChart);
23 | }
24 |
25 | google.charts.setOnLoadCallback(drawChart);
26 |
27 | populateFields();
28 | });
29 |
30 | function addFilter() {
31 | const filterTemplate = $("#filterTemplate").html().replace(/__NUM__/g, filtersCount);
32 | $("#filters").append(filterTemplate);
33 |
34 | const $filterField = $(`#filter_field\\[${filtersCount}\\]`);
35 | const $filterValue = $(`#filter_value\\[${filtersCount}\\]`);
36 | const $filterRemove = $(`#filter_remove\\[${filtersCount}\\]`);
37 |
38 | $filterField.change(function() {
39 | const fieldName = $(this).val();
40 | const filterNum = $(this).attr("id").match(/\d+/)[0];
41 | $filterValue.attr("name", fieldName);
42 | loadValues(filterNum, fieldName);
43 | });
44 |
45 | $filterRemove.click(function() {
46 | $(this).closest('.row').remove();
47 | });
48 |
49 | filtersCount++;
50 | }
51 |
52 |
53 | function parseParams(str) {
54 | return str.split('&').reduce(function(params, param) {
55 | const [key, value] = param.split('=').map(decodeURIComponent);
56 | params[key] = value.replace(/\+/g, ' ');
57 | return params;
58 | }, {});
59 | }
60 |
61 | function populateFields() {
62 | var query = location.href.split("#")[1];
63 | if (!query) {
64 | return;
65 | }
66 |
67 | var queryEquations = query.split('&');
68 | for (var i = 0; i < queryEquations.length; i++) {
69 | var e = queryEquations[i].split('=');
70 | var k = e[0];
71 | var v = decodeURIComponent(e[1]);
72 |
73 | if (k == "breakdown") {
74 | $("#breakdown option[value=" + v + "]").attr('selected', 'selected');
75 | continue;
76 | }
77 |
78 | if (k == "time_start") {
79 | $("#time_start").val(v);
80 | continue;
81 | }
82 |
83 | if (k == "time_end") {
84 | $("#time_end").val(v);
85 | continue;
86 | }
87 |
88 | if (k == "topFlows") {
89 | $("#topFlows").val(v);
90 | continue;
91 | }
92 |
93 | if (k.match(/^filter_field/)) {
94 | continue;
95 | }
96 |
97 | var fieldIndex = addFilter();
98 | $("#filter_field\\[" + fieldIndex + "\\]").val(k);
99 | $("#filter_field\\[" + fieldIndex + "\\]").trigger("change");
100 | $("#filter_value\\[" + fieldIndex + "\\]").val(v);
101 | }
102 | }
103 |
104 | function submitQuery() {
105 | event.preventDefault();
106 |
107 | // Validate 'topFlows' box
108 | const topFlows = $('#topFlows').val();
109 | const topFlowsInt = parseInt(topFlows, 10);
110 | if (isNaN(topFlowsInt) || topFlowsInt < 1 || topFlowsInt > 10000) {
111 | alert("Incorrect 'Top Flows': please enter a valid integer between 1 and 10000.");
112 | return false;
113 | }
114 |
115 | params = $('form').serialize();
116 | params += '&topFlows=' + encodeURIComponent(topFlows);
117 | location.href = "#" + params
118 | return false
119 | }
120 |
121 | function drawChart() {
122 | var query = location.href.split("#")[1]
123 | if (!query) {
124 | return;
125 | }
126 |
127 | $.ajax({
128 | type: "GET",
129 | url: "/query?" + query,
130 | dataType: "text",
131 | success: function(rdata, status, xhr) {
132 | if (rdata == undefined) {
133 | $("#chart_div").text("No data found");
134 | return;
135 | }
136 | renderChart(rdata);
137 | },
138 | error: function(xhr) {
139 | showPopup(
140 | "Internal server error",
141 | "danger",
142 | 20000,
143 | xhr.responseText
144 | );
145 | $("#chart_div").empty();
146 | document.getElementById('custom_legend').innerHTML = '';
147 | }
148 | });
149 | }
150 |
151 | function renderChart(rdata) {
152 | pres = Papa.parse(rdata.trim());
153 |
154 | var filtered = [pres.data[0]];
155 | for (const row of pres.data) {
156 | const hasNonZero = row.slice(1).some(val => {
157 | const num = parseFloat((val || '').trim());
158 | return !isNaN(num) && num !== 0;
159 | });
160 | if (hasNonZero) {
161 | filtered.push(row);
162 | }
163 | }
164 |
165 | var data = [];
166 | for (var i = 0; i < filtered.length; i++) {
167 | data[i] = [];
168 | for (var j = 0; j < filtered[i].length; j++) {
169 | var x = filtered[i][j];
170 | if (i !== 0 && j !== 0) {
171 | x = parseFloat((x || '').trim());
172 | if (isNaN(x)) x = 0;
173 | }
174 | data[i][j] = x;
175 | }
176 | }
177 |
178 | if (!window.seriesVisibility || window.seriesVisibility.length !== data[0].length - 1) {
179 | window.seriesVisibility = Array(data[0].length - 1).fill(true);
180 | }
181 |
182 | var filteredData = [];
183 | for (var i = 0; i < data.length; i++) {
184 | var row = [data[i][0]];
185 | for (var j = 1; j < data[i].length; j++) {
186 | if (window.seriesVisibility[j - 1]) {
187 | row.push(data[i][j]);
188 | }
189 | }
190 | filteredData.push(row);
191 | }
192 |
193 | if (filteredData[0].length < 2) {
194 | showPopup("No series selected. Please select at least one flow to display the chart.", "danger");
195 | $("#chart_div").empty();
196 | document.getElementById('custom_legend').innerHTML = '';
197 | return;
198 | }
199 |
200 |
201 | var chartData = google.visualization.arrayToDataTable(filteredData);
202 |
203 | var options = {
204 | isStacked: false,
205 | title: 'Flow Mbps',
206 | titleTextStyle: {
207 | fontSize: 24,
208 | bold: true,
209 | color: '#333'
210 | },
211 | hAxis: {
212 | title: 'Time',
213 | slantedText: true,
214 | slantedTextAngle: 60,
215 | showTextEvery: 10,
216 | titleTextStyle: {
217 | color: '#333',
218 | italic: false,
219 | bold: true,
220 | fontSize: 18
221 | },
222 | gridlines: {
223 | color: '#f3f3f3',
224 | count: 10
225 | },
226 | minorGridlines: {
227 | color: '#e9e9e9'
228 | },
229 | textStyle: {
230 | color: '#333',
231 | fontSize: 12
232 | }
233 | },
234 | vAxis: {
235 | minValue: 0,
236 | title: 'Megabits per second',
237 | titleTextStyle: {
238 | color: '#333',
239 | italic: false,
240 | bold: true,
241 | fontSize: 18
242 | },
243 | gridlines: {
244 | color: '#f3f3f3',
245 | count: 10
246 | },
247 | minorGridlines: {
248 | color: '#e9e9e9'
249 | },
250 | textStyle: {
251 | color: '#333',
252 | fontSize: 12
253 | }
254 | },
255 | height: screen.height * 0.7,
256 | chartArea: {
257 | width: '90%',
258 | height: '70%',
259 | top: '5%',
260 | backgroundColor: {
261 | stroke: '#ccc',
262 | strokeWidth: 1
263 | }
264 | },
265 | backgroundColor: '#ffffff',
266 | colors: ['#2196F3', '#4CAF50', '#FFC107', '#FF5722', '#9C27B0'],
267 | animation: {
268 | startup: true,
269 | duration: 1000,
270 | easing: 'out'
271 | },
272 | legend: {
273 | position: 'none'
274 | },
275 | tooltip: {
276 | textStyle: {
277 | color: '#333',
278 | fontSize: 12
279 | },
280 | showColorCode: true
281 | },
282 | lineWidth: 2,
283 | pointSize: 1,
284 | series: {
285 | 0: { lineDashStyle: [4, 4] },
286 | 1: { lineDashStyle: [2, 2] },
287 | 2: { lineDashStyle: [4, 2] },
288 | 3: { lineDashStyle: [2, 4] },
289 | 4: { lineDashStyle: [1, 1] }
290 | }
291 | };
292 |
293 | var chart = new google.visualization.AreaChart(document.getElementById('chart_div'));
294 | chart.draw(chartData, options);
295 |
296 | renderLegendTable();
297 |
298 | function renderLegendTable() {
299 | const flowStats = [];
300 | for (let i = 1; i < data[0].length; i++) {
301 | let max = -Infinity;
302 | for (let j = 1; j < data.length; j++) {
303 | const val = data[j][i];
304 | if (typeof val === "number" && !isNaN(val)) {
305 | if (val > max) max = val;
306 | }
307 | }
308 | flowStats.push({
309 | index: i,
310 | label: data[0][i],
311 | max: max === -Infinity ? 0 : max
312 | });
313 | }
314 |
315 | // Sorting logic
316 | if (!window.legendSort) window.legendSort = { key: "label", asc: true };
317 | const sortKey = window.legendSort.key;
318 | const sortAsc = window.legendSort.asc;
319 |
320 | flowStats.sort((a, b) => {
321 | switch (sortKey) {
322 | case "label":
323 | return sortAsc
324 | ? a.label.localeCompare(b.label)
325 | : b.label.localeCompare(a.label);
326 | case "max":
327 | return sortAsc
328 | ? a.max - b.max
329 | : b.max - a.max;
330 | default:
331 | return 0;
332 | }
333 | });
334 |
335 | const customLegendDiv = document.getElementById('custom_legend');
336 | customLegendDiv.innerHTML = `
337 |
338 | Usage:
339 | • Click a flow to show only that flow. Click again to show all.
340 | • Ctrl/Cmd/Option + Click to add or remove flows.
341 | • Click a column header to sort the legend.
342 |