3 | Date: Wed, 17 Jul 2024 18:27:41 +0200
4 | Subject: [PATCH] Add support for prepatching
5 |
6 | ---
7 | internal/bzlmod/go_deps.bzl | 13 +++++++++++++
8 | internal/go_repository.bzl | 10 +++++++++-
9 | 2 files changed, 22 insertions(+), 1 deletion(-)
10 |
11 | diff --git a/internal/bzlmod/go_deps.bzl b/internal/bzlmod/go_deps.bzl
12 | index dcd0db3..7170506 100644
13 | --- a/internal/bzlmod/go_deps.bzl
14 | +++ b/internal/bzlmod/go_deps.bzl
15 | @@ -159,6 +159,9 @@ def _get_build_extra_args(path, gazelle_overrides, gazelle_default_attributes):
16 | def _get_patches(path, module_overrides):
17 | return _get_override_or_default(module_overrides, struct(), {}, path, [], "patches")
18 |
19 | +def _get_pre_patches(path, module_overrides):
20 | + return _get_override_or_default(module_overrides, struct(), {}, path, [], "pre_patches")
21 | +
22 | def _get_patch_args(path, module_overrides):
23 | override = _get_override_or_default(module_overrides, struct(), {}, path, None, "patch_strip")
24 | return ["-p{}".format(override)] if override else []
25 | @@ -235,6 +238,7 @@ def _process_gazelle_override(gazelle_override_tag):
26 | def _process_module_override(module_override_tag):
27 | return struct(
28 | patches = module_override_tag.patches,
29 | + pre_patches = module_override_tag.pre_patches,
30 | patch_strip = module_override_tag.patch_strip,
31 | )
32 |
33 | @@ -243,6 +247,7 @@ def _process_archive_override(archive_override_tag):
34 | urls = archive_override_tag.urls,
35 | sha256 = archive_override_tag.sha256,
36 | strip_prefix = archive_override_tag.strip_prefix,
37 | + pre_patches = archive_override_tag.pre_patches,
38 | patches = archive_override_tag.patches,
39 | patch_strip = archive_override_tag.patch_strip,
40 | )
41 | @@ -611,6 +616,7 @@ def _go_deps_impl(module_ctx):
42 | "build_directives": _get_directives(path, gazelle_overrides, gazelle_default_attributes),
43 | "build_file_generation": _get_build_file_generation(path, gazelle_overrides, gazelle_default_attributes),
44 | "build_extra_args": _get_build_extra_args(path, gazelle_overrides, gazelle_default_attributes),
45 | + "pre_patches": _get_pre_patches(path, module_overrides),
46 | "patches": _get_patches(path, module_overrides),
47 | "patch_args": _get_patch_args(path, module_overrides),
48 | "debug_mode": debug_mode,
49 | @@ -622,6 +628,7 @@ def _go_deps_impl(module_ctx):
50 | "urls": archive_override.urls,
51 | "strip_prefix": archive_override.strip_prefix,
52 | "sha256": archive_override.sha256,
53 | + "pre_patches": _get_pre_patches(path, archive_overrides),
54 | "patches": _get_patches(path, archive_overrides),
55 | "patch_args": _get_patch_args(path, archive_overrides),
56 | })
57 | @@ -775,6 +782,9 @@ _archive_override_tag = tag_class(
58 | SHA-256 sum of the downloaded archive. When set, Bazel will verify the archive
59 | against this sum before extracting it.""",
60 | ),
61 | + "pre_patches": attr.label_list(
62 | + doc = "A list of patches to apply to the repository before gazelle runs.",
63 | + ),
64 | "patches": attr.label_list(
65 | doc = "A list of patches to apply to the repository *after* gazelle runs.",
66 | ),
67 | @@ -813,6 +823,9 @@ _module_override_tag = tag_class(
68 | extension within this Bazel module.""",
69 | mandatory = True,
70 | ),
71 | + "pre_patches": attr.label_list(
72 | + doc = "A list of patches to apply to the repository before gazelle runs.",
73 | + ),
74 | "patches": attr.label_list(
75 | doc = "A list of patches to apply to the repository *after* gazelle runs.",
76 | ),
77 | diff --git a/internal/go_repository.bzl b/internal/go_repository.bzl
78 | index 48a9d14..e3efa5b 100644
79 | --- a/internal/go_repository.bzl
80 | +++ b/internal/go_repository.bzl
81 | @@ -286,6 +286,11 @@ def _go_repository_impl(ctx):
82 | if result.return_code:
83 | fail("%s: %s" % (ctx.name, result.stderr))
84 |
85 | + # TODO(lorenz): Replace this with patch() once the patches argument no longer gets merged with
86 | + # the attribute pulled from ctx.
87 | + for p in ctx.attr.pre_patches:
88 | + ctx.patch(p, 1)
89 | +
90 | # Repositories are fetched. Determine if build file generation is needed.
91 | build_file_names = ctx.attr.build_file_name.split(",")
92 | existing_build_file = ""
93 | @@ -623,7 +628,10 @@ go_repository = repository_rule(
94 | prefixed with `#` automatically. A common use case is to pass a list of
95 | Gazelle directives.""",
96 | ),
97 | -
98 | + # Patches to apply before running gazelle.
99 | + "pre_patches": attr.label_list(
100 | + doc = "A list of patches to apply to the repository before gazelle runs.",
101 | + ),
102 | # Patches to apply after running gazelle.
103 | "patches": attr.label_list(
104 | doc = "A list of patches to apply to the repository after gazelle runs.",
105 | --
106 | 2.44.1
107 |
--------------------------------------------------------------------------------
/deploy/single-node/schema/functions.cue:
--------------------------------------------------------------------------------
1 | package schema
2 |
3 | import "strings"
4 |
5 | import "list"
6 |
7 | function: HostToString: {
8 | arguments: ["Sampler", "Host"]
9 | query: "dictGetStringOrDefault('HostNames', 'Description', (IPv6NumToString(Sampler), Host), IPv6ToString(Host))"
10 | }
11 |
12 | function: SamplerToString: {
13 | arguments: ["Sampler"]
14 | query: "coalesce(dictGet('SamplerConfig', 'Description', IPv6NumToString(Sampler)), IPv6ToString(Sampler))"
15 | }
16 |
17 | function: ASNToString: {
18 | arguments: ["ASN"]
19 | query: "substring(dictGetString('autnums', 'name', toUInt64(ASN)), 1, 25) || ' AS' || toString(ASN)"
20 | }
21 |
22 | function: VLANToString: {
23 | arguments: ["Sampler", "VLAN"]
24 | query: "dictGetStringOrDefault('VlanNames', 'Description', (IPv6NumToString(Sampler), VLAN), VLAN)"
25 | }
26 |
27 | function: InterfaceToString: {
28 | arguments: ["Sampler", "Interface"]
29 | query: #"""
30 | if(
31 | isNull(dictGetOrNull('InterfaceNames', 'Description', (IPv6NumToString(Sampler), Interface))),
32 | SamplerToString(Sampler) || ' - ' || toString(Interface),
33 | SamplerToString(Sampler) || ' - ' || toString(Interface) || ' [' ||
34 | dictGetString('InterfaceNames', 'Description', (IPv6NumToString(Sampler), Interface)) || ']'
35 | )
36 | """#
37 | }
38 |
39 | function: isIncomingFlow: {
40 | arguments: ["SamplerAddress", "SrcAddr", "DstAddr", "SrcAS", "DstAS", "FlowDirection"]
41 |
42 | #ColumnExpression: {
43 | _function_column_handler: {
44 | SamplerAddressInRange: "isIPAddressInRange(toString(SamplerAddress, toIPv6Net('\(_strValue)'))"
45 | SrcAddrInRange: "isIPAddressInRange(toString(SrcAddr, toIPv6Net('\(_strValue)'))"
46 | DstAddrInRange: "isIPAddressInRange(toString(DstAddr, toIPv6Net('\(_strValue)'))"
47 | }
48 | _function_columns: [ for k, _ in _function_column_handler {k}]
49 |
50 | _valid: true & (list.Contains(arguments, column) | list.Contains(_function_columns, column))
51 | column: string
52 | value: string | int
53 | _strValue: "\(value)"
54 |
55 | #out: string | *"toString(\(column)) == '\(_strValue)'"
56 |
57 | // if the value is an int, we can skip toString
58 | if (value & int) != _|_ {
59 | #out: "\(column) == \(_strValue)"
60 | }
61 |
62 | // if it is a special handler
63 | if list.Contains(_function_columns, column) {
64 | #out: _function_column_handler[column]
65 | }
66 | }
67 |
68 | #SamplerExpression: {
69 | sampler: string
70 | expressions: [...string]
71 | #out: """
72 | if(
73 | toString(SamplerAddress) == '\(sampler)',
74 | \(strings.Join(expressions, " OR ")),
75 | NULL
76 | )
77 | """
78 | }
79 |
80 | // dear reader I am very sorry for this monstrosity, but when I used a pretty version with placeholders,
81 | // CUE just stackoverflowed. I hope that with a future version this can be pretty again, but for now
82 | // we have to live with it.
83 | _expressions: [
84 | for device, cfg in #Config.sampler if len(cfg.isIncomingFlow) != 0 {
85 |
86 | // join the column expressions together
87 | (#SamplerExpression & {
88 | sampler: device
89 | _valid: true & len(expressions) > 0
90 |
91 | expressions: [ for e in cfg.isIncomingFlow if len(e) != 0 {
92 | // for each instance create the column expressions and join them by AND
93 | _expressions: [ for c, v in e {
94 |
95 | // create the sql expression for this column
96 | (#ColumnExpression & {
97 | column: c
98 | value: v
99 | }).#out
100 | }]
101 |
102 | "( " + strings.Join(_expressions, " AND ") + " )"
103 | }]
104 | }).#out
105 | },
106 |
107 | // Fallback to FlowDirection == 0
108 | "FlowDirection == 0",
109 | ]
110 |
111 | query: "coalesce( \(strings.Join(_expressions, ",\n")) )"
112 | }
113 |
114 | function: toIPv6Net: {
115 | arguments: ["Net"]
116 | query: #"""
117 | if(
118 | isIPv4String(splitByChar('/', Net)[1]),
119 | '::ffff:' || arrayStringConcat(
120 | arrayMap((v, i) -> if(i, toString(toInt8(v) + 96), v), splitByChar('/', Net), [false, true]),
121 | '/'),
122 | Net
123 | )
124 | """#
125 | }
126 |
127 | function: IPv6ToString: {
128 | arguments: ["Address"]
129 | query: #"""
130 | if(
131 | startsWith(reinterpret(Address, 'FixedString(16)'), repeat(unhex('00'), 10) || repeat(unhex('FF'), 2)),
132 | IPv4NumToString(CAST(reinterpret(reverse(substring(reinterpret(Address, 'FixedString(16)'), 13, 16)), 'UInt32') AS IPv4)),
133 | IPv6NumToString(Address)
134 | )
135 | """#
136 | }
137 |
138 | function: ParseGoFlowAddress: {
139 | arguments: ["Address"]
140 | query: #"""
141 | if(
142 | -- endsWith IPv6v4NullPadding
143 | endsWith(reinterpret(Address, 'FixedString(16)'), repeat(unhex('00'), 12)),
144 | -- prepend ::ffff:
145 | CAST(toFixedString(repeat(unhex('00'), 10) || repeat(unhex('FF'), 2) || substring(reinterpret(Address, 'FixedString(16)'), 1, 4), 16) AS IPv6),
146 | CAST(Address AS IPv6)
147 | )
148 | """#
149 | }
150 |
151 | function: switchEndian: {
152 | arguments: ["s"]
153 | query:
154 | #"""
155 | unhex(
156 | arrayStringConcat(
157 | arrayMap(x -> substring(hex(s), x, 2), reverse(range(1, length(s) * 2, 2)))
158 | )
159 | )
160 | """#
161 | }
162 |
163 | function: ParseFastNetMonAddress: {
164 | arguments: ["Address"]
165 | query: #"""
166 | if(
167 | length(Address) == 4,
168 | IPv4ToIPv6(CAST(reinterpret(switchEndian(Address), 'UInt32') AS IPv4)),
169 | CAST(toFixedString(Address, 16) AS IPv6)
170 | )
171 | """#
172 | }
173 |
174 | // Wrapper for returning the ColumnIndex of a table
175 | // currently required to select a field inside a SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection
176 | // see isIncomingFlow for usage
177 | function: ColumnIndex: {
178 | arguments: ["Database", "Table", "Column"]
179 | query: #"""
180 | (SELECT position from system.columns where database = Database and table = Table and name = Column)
181 | """#
182 | }
183 |
--------------------------------------------------------------------------------
/cmd/reconciler/reconciler.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "crypto/sha256"
6 | "database/sql"
7 | "encoding/hex"
8 | "fmt"
9 | "github.com/ClickHouse/clickhouse-go/v2"
10 | "log"
11 | )
12 |
13 | type Reconciler struct {
14 | conn clickhouse.Conn
15 | cfg *Config
16 | }
17 |
18 | func (r *Reconciler) Reconcile() error {
19 | for _, function := range r.cfg.Functions {
20 | if err := r.reconcileFunction(function); err != nil {
21 | return err
22 | }
23 | }
24 |
25 | for _, table := range r.cfg.SourceTables {
26 | if err := r.reconcileTable(table); err != nil {
27 | return err
28 | }
29 | }
30 |
31 | for _, view := range r.cfg.MaterializedViews {
32 | if err := r.reconcileMaterializedView(view); err != nil {
33 | return err
34 | }
35 | }
36 |
37 | return nil
38 | }
39 |
40 | func (r *Reconciler) reconcileTable(t Table) error {
41 | currentQuery, err := r.fetchTable(t.Name)
42 | if err != nil && err != sql.ErrNoRows {
43 | return err
44 | }
45 |
46 | createQuery, err := t.CreateQuery(r.cfg.Database)
47 | if err != nil {
48 | return err
49 | }
50 |
51 | if currentQuery != "" {
52 | // fetchTable returns the CREATE TABLE statement
53 | equal, err := r.isEqual(createQuery, currentQuery)
54 | if err != nil {
55 | return err
56 | }
57 |
58 | // current table is equal -> skip
59 | if equal {
60 | log.Printf("table %q is equal: skipping", t.Name)
61 | return nil
62 | }
63 |
64 | log.Printf("table %q is not equal: dropping", t.Name)
65 | if err := r.conn.Exec(context.Background(), t.DropQuery(r.cfg.Database)); err != nil {
66 | return err
67 | }
68 | }
69 |
70 | log.Printf("table %q is missing: creating", t.Name)
71 | // create missing view
72 | return r.conn.Exec(context.Background(), createQuery)
73 | }
74 |
75 | func (r *Reconciler) reconcileMaterializedView(mv MaterializedView) error {
76 | currentHashString, err := r.fetchMaterializedView(mv.Name)
77 | if err != nil && err != sql.ErrNoRows {
78 | return err
79 | }
80 |
81 | newHash := sha256.Sum256([]byte(mv.CreateQuery(r.cfg.Database)))
82 | newHashString := hex.EncodeToString(newHash[:])
83 |
84 | if err != sql.ErrNoRows {
85 | // current mv is equal -> skip
86 | if newHashString == currentHashString {
87 | log.Printf("materializedview %q is equal: skipping", mv.Name)
88 | return nil
89 | }
90 |
91 | log.Printf("materializedview %q is not equal: dropping", mv.Name)
92 | if err := r.conn.Exec(context.Background(), mv.DropQuery(r.cfg.Database)); err != nil {
93 | return err
94 | }
95 | }
96 |
97 | log.Printf("materializedview %q is missing: creating", mv.Name)
98 | // create missing view
99 | if err := r.conn.Exec(context.Background(), mv.CreateQuery(r.cfg.Database)); err != nil {
100 | return err
101 | }
102 |
103 | return r.conn.Exec(context.Background(), fmt.Sprintf("ALTER TABLE %s.%s MODIFY COMMENT ?", r.cfg.Database, mv.Name), newHashString)
104 | }
105 |
106 | func (r *Reconciler) reconcileFunction(f Function) error {
107 | currentQuery, err := r.fetchFunction(f.Name)
108 | if err != nil && err != sql.ErrNoRows {
109 | return err
110 | }
111 |
112 | if currentQuery != "" {
113 | // fetchFunction returns the original CREATE FUNCTION statement
114 | equal, err := r.isEqual(f.CreateQuery(), currentQuery)
115 | if err != nil {
116 | return err
117 | }
118 |
119 | // current function is equal -> skip
120 | if equal {
121 | log.Printf("function %q is equal: skipping", f.Name)
122 | return nil
123 | }
124 |
125 | log.Printf("function %q is not equal: replacing", f.Name)
126 | // replace function
127 | return r.conn.Exec(context.Background(), f.CreateOrReplaceQuery())
128 | }
129 |
130 | // create missing function
131 | return r.conn.Exec(context.Background(), f.CreateQuery())
132 | }
133 |
134 | func (r *Reconciler) fetchTable(name string) (string, error) {
135 | row := r.conn.QueryRow(context.Background(),
136 | "SELECT create_table_query FROM system.tables WHERE database = ? AND name = ?",
137 | r.cfg.Database, name)
138 | if err := row.Err(); err != nil {
139 | return "", err
140 | }
141 |
142 | var createTableQuery string
143 | if err := row.Scan(&createTableQuery); err != nil {
144 | return "", err
145 | }
146 |
147 | return createTableQuery, nil
148 | }
149 |
150 | func (r *Reconciler) fetchFunction(name string) (string, error) {
151 | row := r.conn.QueryRow(context.Background(),
152 | "SELECT create_query FROM system.functions WHERE name = ?",
153 | name)
154 | if err := row.Err(); err != nil {
155 | return "", err
156 | }
157 |
158 | var createQuery string
159 | if err := row.Scan(&createQuery); err != nil {
160 | return "", err
161 | }
162 |
163 | return createQuery, nil
164 | }
165 |
166 | func (r *Reconciler) fetchMaterializedView(name string) (string, error) {
167 | row := r.conn.QueryRow(context.Background(),
168 | "SELECT comment FROM system.tables WHERE database = ? AND name = ?",
169 | "default", name)
170 | if err := row.Err(); err != nil {
171 | return "", err
172 | }
173 |
174 | var comment string
175 | if err := row.Scan(&comment); err != nil {
176 | return "", err
177 | }
178 |
179 | return comment, nil
180 | }
181 |
182 | func (r *Reconciler) formatQuery(query string) (string, error) {
183 | row := r.conn.QueryRow(context.Background(), "SELECT formatQuery(?)", query)
184 | if err := row.Err(); err != nil {
185 | return "", err
186 | }
187 |
188 | var result string
189 | if err := row.Scan(&result); err != nil {
190 | return "", err
191 | }
192 |
193 | return result, nil
194 | }
195 |
196 | func (r *Reconciler) isEqual(want, is string) (bool, error) {
197 | if want == "" {
198 | return false, fmt.Errorf("missing %q", "want")
199 | }
200 | if is == "" {
201 | return false, fmt.Errorf("missing %q", "is")
202 | }
203 |
204 | var err error
205 | want, err = r.formatQuery(want)
206 | if err != nil {
207 | return false, fmt.Errorf("formatting %q: %v", "want", err)
208 | }
209 |
210 | is, err = r.formatQuery(is)
211 | if err != nil {
212 | return false, fmt.Errorf("formatting %q: %v", "is", err)
213 | }
214 |
215 | return want == is, nil
216 | }
217 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/monogon-dev/netmeta
2 |
3 | go 1.23
4 |
5 | require (
6 | cuelang.org/go v0.11.0
7 | github.com/ClickHouse/clickhouse-go/v2 v2.28.3
8 | github.com/emicklei/proto v1.13.2
9 | github.com/gopacket/gopacket v1.3.1
10 | github.com/huandu/go-sqlbuilder v1.32.0
11 | github.com/netsampler/goflow2 v1.3.7
12 | github.com/netsampler/goflow2/v2 v2.2.1
13 | github.com/osrg/gobgp v2.0.0+incompatible
14 | github.com/pressly/goose/v3 v3.22.1
15 | github.com/sirupsen/logrus v1.9.3
16 | github.com/vishvananda/netlink v1.3.0
17 | k8s.io/klog/v2 v2.130.1
18 | )
19 |
20 | require (
21 | cuelabs.dev/go/oci/ociregistry v0.0.0-20240906074133-82eb438dd565 // indirect
22 | filippo.io/edwards25519 v1.1.0 // indirect
23 | github.com/ClickHouse/ch-go v0.61.5 // indirect
24 | github.com/Shopify/sarama v1.38.1 // indirect
25 | github.com/andybalholm/brotli v1.1.0 // indirect
26 | github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
27 | github.com/beorn7/perks v1.0.1 // indirect
28 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
29 | github.com/cockroachdb/apd/v3 v3.2.1 // indirect
30 | github.com/coder/websocket v1.8.12 // indirect
31 | github.com/davecgh/go-spew v1.1.1 // indirect
32 | github.com/dustin/go-humanize v1.0.1 // indirect
33 | github.com/eapache/go-resiliency v1.3.0 // indirect
34 | github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect
35 | github.com/eapache/queue v1.1.0 // indirect
36 | github.com/elastic/go-sysinfo v1.11.2 // indirect
37 | github.com/elastic/go-windows v1.0.1 // indirect
38 | github.com/go-faster/city v1.0.1 // indirect
39 | github.com/go-faster/errors v0.7.1 // indirect
40 | github.com/go-logr/logr v1.4.1 // indirect
41 | github.com/go-sql-driver/mysql v1.8.1 // indirect
42 | github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
43 | github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect
44 | github.com/golang-sql/sqlexp v0.1.0 // indirect
45 | github.com/golang/protobuf v1.5.4 // indirect
46 | github.com/golang/snappy v0.0.4 // indirect
47 | github.com/google/uuid v1.6.0 // indirect
48 | github.com/hashicorp/errwrap v1.0.0 // indirect
49 | github.com/hashicorp/go-multierror v1.1.1 // indirect
50 | github.com/hashicorp/go-uuid v1.0.3 // indirect
51 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
52 | github.com/huandu/xstrings v1.4.0 // indirect
53 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
54 | github.com/jackc/pgpassfile v1.0.0 // indirect
55 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
56 | github.com/jackc/pgx/v5 v5.7.1 // indirect
57 | github.com/jackc/puddle/v2 v2.2.2 // indirect
58 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect
59 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
60 | github.com/jcmturner/gofork v1.7.6 // indirect
61 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
62 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect
63 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
64 | github.com/jonboulle/clockwork v0.4.0 // indirect
65 | github.com/klauspost/compress v1.17.9 // indirect
66 | github.com/libp2p/go-reuseport v0.4.0 // indirect
67 | github.com/mattn/go-isatty v0.0.20 // indirect
68 | github.com/mfridman/interpolate v0.0.2 // indirect
69 | github.com/mfridman/xflag v0.0.0-20240825232106-efb77353e578 // indirect
70 | github.com/microsoft/go-mssqldb v1.7.2 // indirect
71 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect
72 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
73 | github.com/ncruces/go-strftime v0.1.9 // indirect
74 | github.com/opencontainers/go-digest v1.0.0 // indirect
75 | github.com/opencontainers/image-spec v1.1.0 // indirect
76 | github.com/paulmach/orb v0.11.1 // indirect
77 | github.com/pelletier/go-toml/v2 v2.2.3 // indirect
78 | github.com/pierrec/lz4/v4 v4.1.21 // indirect
79 | github.com/pkg/errors v0.9.1 // indirect
80 | github.com/prometheus/client_golang v1.20.0 // indirect
81 | github.com/prometheus/client_model v0.6.1 // indirect
82 | github.com/prometheus/common v0.55.0 // indirect
83 | github.com/prometheus/procfs v0.15.1 // indirect
84 | github.com/protocolbuffers/txtpbfmt v0.0.0-20240823084532-8e6b51fa9bef // indirect
85 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
86 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
87 | github.com/rogpeppe/go-internal v1.13.1 // indirect
88 | github.com/segmentio/asm v1.2.0 // indirect
89 | github.com/sethvargo/go-retry v0.3.0 // indirect
90 | github.com/shopspring/decimal v1.4.0 // indirect
91 | github.com/spf13/cobra v1.8.1 // indirect
92 | github.com/spf13/pflag v1.0.5 // indirect
93 | github.com/tetratelabs/wazero v1.6.0 // indirect
94 | github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d // indirect
95 | github.com/vertica/vertica-sql-go v1.3.3 // indirect
96 | github.com/vishvananda/netns v0.0.4 // indirect
97 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect
98 | github.com/xdg-go/scram v1.1.2 // indirect
99 | github.com/xdg-go/stringprep v1.0.4 // indirect
100 | github.com/ydb-platform/ydb-go-genproto v0.0.0-20240528144234-5d5a685e41f7 // indirect
101 | github.com/ydb-platform/ydb-go-sdk/v3 v3.80.2 // indirect
102 | github.com/ziutek/mymysql v1.5.4 // indirect
103 | go.opentelemetry.io/otel v1.26.0 // indirect
104 | go.opentelemetry.io/otel/trace v1.26.0 // indirect
105 | go.uber.org/multierr v1.11.0 // indirect
106 | golang.org/x/crypto v0.28.0 // indirect
107 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect
108 | golang.org/x/mod v0.21.0 // indirect
109 | golang.org/x/net v0.30.0 // indirect
110 | golang.org/x/oauth2 v0.23.0 // indirect
111 | golang.org/x/sync v0.8.0 // indirect
112 | golang.org/x/sys v0.26.0 // indirect
113 | golang.org/x/text v0.19.0 // indirect
114 | golang.org/x/tools v0.26.0 // indirect
115 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
116 | google.golang.org/grpc v1.62.1 // indirect
117 | google.golang.org/protobuf v1.34.2 // indirect
118 | gopkg.in/yaml.v2 v2.4.0 // indirect
119 | gopkg.in/yaml.v3 v3.0.1 // indirect
120 | howett.net/plist v1.0.0 // indirect
121 | modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
122 | modernc.org/mathutil v1.6.0 // indirect
123 | modernc.org/memory v1.8.0 // indirect
124 | modernc.org/sqlite v1.33.0 // indirect
125 | modernc.org/strutil v1.2.0 // indirect
126 | modernc.org/token v1.1.0 // indirect
127 | )
128 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NetMeta
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | | ⚠️ **PRE-RELEASE**: This is a work in progress - please watch this repo for news. |
18 | |-------------------------------------------------------------------------------------------|
19 |
20 | NetMeta is a scalable network observability toolkit optimized for performance.
21 |
22 | Flows are not pre-aggregated and stored with one second resolution. This allows for
23 | queries in arbitrary dimensions with high-fidelity graphs.
24 |
25 | ## ⚡️ Features
26 |
27 | NetMeta captures, aggregates and analyzes events from a variety of data sources:
28 |
29 | * sFlow
30 | * NetFlow/IPFIX
31 | * Port Mirror (AF_PACKET)
32 | * Linux NFLOG (soon)
33 | * Linux conntrack (soon)
34 | * Scamper traces (soon)
35 | * GCP VPC Flow Logs (soon)
36 | * AWS VPC Flow Logs (soon)
37 |
38 | The different pros/cons between these and a more informations about the ingest of events can be found [here](doc/ingest.md)
39 |
40 | NetMeta also allows to integrate FastNetMon Attack notifications on the Dashboards.
41 | The docs to setup this and other Grafana related settings can be found [here](doc/grafana.md)
42 |
43 | Sampling rate is detected automatically. Different devices with different sampling rates can be mixed.
44 | IPv6 is fully supported throughout the stack.
45 |
46 | ## 👀 Demo
47 |
48 |
49 |
50 |
51 |
52 | ## 💥 API Stability
53 |
54 | NetMeta is **beta software** and subject to change. It exposes the following APIs:
55 |
56 | * The cluster configuration file for single-node deployments.
57 | * ClickHouse SQL schema for raw database access.
58 | * Protobuf schemas for ingestion for writing custom processors.
59 |
60 | One NetMeta has stabilized, these APIs will be stable and backwards compatible.
61 |
62 | ## 🛠 Deployment
63 | ### [Single-node deployment](deploy/single-node/README.md)
64 |
65 | NetMeta includes a production-ready single node deployment that scales to up to ~100k events/s and billions of database
66 | rows. More infos can be found [here](deploy/single-node/README.md)
67 |
68 | Ingestion performance is limited by CPU performance and disk bandwidth.
69 | Query performance is limited by disk and memory bandwidth, as well as total amount of available memory for larger
70 | in-memory aggregations.
71 |
72 | Most reads/writes are sequential due to heavy use of batching in all parts of the stack,
73 | and it works fine even on network storage or spinning disks. We recommend local NVMe drives for best performance.
74 |
75 | NetMeta can scale to millions of events per seconds in multi-node deployments.
76 |
77 | ### Multi-node deployment
78 |
79 | We are currently finalizing the design for multi-node deployments. Please contact us if you're interested in
80 | large-scale deployments - we want your feedback!
81 |
82 | ### Monogon OS
83 |
84 | NetMeta will be a first-class citizen on [Monogon OS](https://monogon.tech/monogon_os.html) - stay tuned!
85 |
86 | ### ☸️ Kubernetes
87 |
88 | NetMeta works on any Kubernetes cluster that supports LoadBalancer and Ingress objects and can provision storage.
89 | It's up to you to carefully read the deployment code and cluster role assigments to make sure it works with your
90 | cluster.
91 | Note that we use two operators, which require cluster-admin permissions since CRDs are global
92 | ([Strimzi](https://strimzi.io/docs/master) for Kafka
93 | and [clickhouse-operator](https://github.com/Altinity/clickhouse-operator)).
94 |
95 | All pieces of NetMeta are installed into a single namespace. By default, this is ``default``, which is
96 | probably not what
97 | you want.
98 | You can change the target namespace in the deployment config.
99 |
100 | Please contact us if you need help porting NetMeta to an existing k8s cluster.
101 |
102 | ## 💼 Support
103 |
104 | [Please contact us](https://monogon.tech/pricing.html)
105 | for support and consulting. If you are using NetMeta in production, we'd love to hear from you!
106 |
107 | ## 🧩 Related
108 |
109 | NetMeta is powered by a number of great open source projects, we use:
110 |
111 | - [ClickHouse](https://clickhouse.tech) as the main database
112 | - [Kafka](https://kafka.apache.org) as a queue in front of ClickHouse
113 | - [Grafana](https://grafana.com/) with
114 | - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) as frontend
115 | - [goflow](https://github.com/cloudflare/goflow) as the sFlow/Netflow collector
116 | - [Strimzi](https://strimzi.io/) to deploy Kafka,
117 | - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) to deploy ClickHouse, as well as
118 | - [Kubernetes](https://kubernetes.io/) and Rancher's [k3s](https://k3s.io/).
119 |
120 | ## 🏰 Architecture
121 |
122 | ```mermaid
123 | flowchart TD;
124 | sFlow --> goflow
125 | IPFIX --> goflow
126 | Netflow --> goflow
127 |
128 | kafka[Kafka Broker]
129 | clickhouse["ClickHouse ServerMergeTree(hourly partitions)"]
130 |
131 | goflow --> kafka
132 | ntm-agent --> kafka
133 | kafka --> clickhouse
134 | clickhouse --> |SQL| grafana[Grafana]
135 | asmap --> clickhouse
136 | nexthop --> clickhouse
137 | resolver --> clickhouse
138 |
139 | ```
140 |
141 | ---
142 |
143 | (C) 2022 [Monogon SE](https://monogon.tech).
144 |
145 | This software is provided "as-is" and
146 | without any express or implied warranties, including, without limitation, the implied warranties of
147 | merchantability and fitness for a particular purpose.
148 |
--------------------------------------------------------------------------------
/deploy/single-node/k8s/clickhouse/files.cue:
--------------------------------------------------------------------------------
1 | package clickhouse
2 |
3 | import (
4 | "strings"
5 | "strconv"
6 | "netmeta.monogon.tech/xml"
7 | )
8 |
9 | // template for TSV dictionaries
10 | _files: [NAME=string]: {
11 | cfg: {
12 | layout: _
13 | structure: _
14 | }
15 | data: _
16 |
17 | _cfg: yandex: dictionary: {
18 | cfg
19 |
20 | name: NAME
21 | source: [{
22 | file: {
23 | path: "/etc/clickhouse-server/config.d/\(NAME).tsv"
24 | format: "TSV"
25 | }
26 | settings: format_tsv_null_representation: "NULL"
27 | }]
28 | lifetime: 60
29 | }
30 | }
31 |
32 | // Iterate over all defined files in _files and generate the config files for clickhouse
33 | ClickHouseInstallation: netmeta: spec: configuration: files: {
34 | for k, v in _files {
35 | "\(k).conf": (xml.#Marshal & {in: v._cfg}).out
36 | "\(k).tsv": v.data
37 | }
38 | }
39 |
40 | // Dictionary for user-defined interface name lookup
41 | _files: InterfaceNames: {
42 | data: strings.Join([ for s in #Config.sampler for i in s.interface {
43 | strings.Join([s.device, "\(i.id)", i.description], "\t")
44 | }], "\n")
45 |
46 | cfg: {
47 | layout: complex_key_hashed: null
48 | structure: {
49 | key: [{
50 | attribute: {
51 | name: "Device"
52 | type: "String"
53 | }
54 | }, {
55 | attribute: {
56 | name: "Index"
57 | type: "UInt32"
58 | }
59 | }]
60 | attribute: {
61 | name: "Description"
62 | type: "String"
63 | null_value: null
64 | }
65 | }
66 | }
67 | }
68 |
69 | // Dictionary for user-defined sampler settings lookup
70 | _files: SamplerConfig: {
71 | data: strings.Join([ for s in #Config.sampler {
72 | let samplingRate = [
73 | if s.samplingRate == 0 {
74 | "NULL"
75 | },
76 | "\(s.samplingRate)",
77 | ][0]
78 |
79 | let description = [
80 | if s.description == "" {
81 | "NULL"
82 | },
83 | "\(s.description)",
84 | ][0]
85 |
86 | strings.Join([s.device, samplingRate, description, strconv.FormatBool(s.anonymizeAddresses)], "\t")
87 | }], "\n")
88 |
89 | cfg: {
90 | layout: complex_key_hashed: null
91 | structure: [{
92 | key: [{
93 | attribute: {
94 | name: "Device"
95 | type: "String"
96 | }
97 | }]
98 | }, {
99 | attribute: {
100 | name: "SamplingRate"
101 | type: "Nullable(UInt64)"
102 | null_value: null
103 | }
104 | }, {
105 | attribute: {
106 | name: "Description"
107 | type: "Nullable(String)"
108 | null_value: null
109 | }
110 | }, {
111 | attribute: {
112 | name: "AnonymizeAddresses"
113 | type: "Bool"
114 | null_value: false
115 | }
116 | }]
117 | }
118 | }
119 |
120 | // Dictionary for user-defined vlan name lookup
121 | _files: VlanNames: {
122 | data: strings.Join([ for s in #Config.sampler for v in s.vlan {
123 | strings.Join([s.device, "\(v.id)", v.description], "\t")
124 | }], "\n")
125 |
126 | cfg: {
127 | layout: complex_key_hashed: null
128 | structure: {
129 | key: [{
130 | attribute: {
131 | name: "Device"
132 | type: "String"
133 | }
134 | }, {
135 | attribute: {
136 | name: "Index"
137 | type: "UInt32"
138 | }
139 | }]
140 | attribute: {
141 | name: "Description"
142 | type: "String"
143 | null_value: null
144 | }
145 | }
146 | }
147 | }
148 |
149 | // Dictionary for user-defined host name lookup
150 | _files: HostNames: {
151 | data: strings.Join([ for s in #Config.sampler for h in s.host {
152 | strings.Join([s.device, h.device, h.description], "\t")
153 | }], "\n")
154 |
155 | cfg: {
156 | layout: complex_key_hashed: null
157 | structure: {
158 | key: [{
159 | attribute: {
160 | name: "Sampler"
161 | type: "String"
162 | }
163 | }, {
164 | attribute: {
165 | name: "Device"
166 | type: "String"
167 | }
168 | }]
169 | attribute: {
170 | name: "Description"
171 | type: "String"
172 | null_value: null
173 | }
174 | }
175 | }
176 | }
177 |
178 | _files: user_autnums: {
179 | data: strings.Join([ for _, e in #Config.userData.autnums {
180 | strings.Join(["\(e.asn)", e.name, e.country], "\t")
181 | }], "\n")
182 |
183 | cfg: {
184 | layout: flat: null
185 | structure: [{
186 | id: name: "asnum"
187 | }, {
188 | attribute: {
189 | name: "name"
190 | type: "String"
191 | null_value: null
192 | }
193 | }, {
194 | attribute: {
195 | name: "country"
196 | type: "String"
197 | null_value: null
198 | }
199 | }]
200 | }
201 | }
202 |
203 | ClickHouseInstallation: netmeta: spec: configuration: files: "risinfo.conf": (xml.#Marshal & {in: {
204 | yandex: dictionary: {
205 | name: "risinfo"
206 | source: http: {
207 | url: "http://risinfo/rib.tsv"
208 | format: "TabSeparated"
209 | }
210 | lifetime: 3600
211 | layout: ip_trie: access_to_key_from_attributes: true
212 | structure: key: attribute: {
213 | name: "prefix"
214 | type: "String"
215 | }
216 | structure: attribute: {
217 | name: "asnum"
218 | type: "UInt32"
219 | null_value: 0
220 | }
221 | }
222 | }}).out
223 |
224 | ClickHouseInstallation: netmeta: spec: configuration: files: "autnums.conf": (xml.#Marshal & {in: {
225 | yandex: dictionary: {
226 | name: "autnums"
227 | source: clickhouse: {
228 | query:
229 | #"""
230 | SELECT * FROM dictionaries.risinfo_autnums
231 | UNION ALL
232 | SELECT * FROM dictionaries.user_autnums
233 | """#
234 | }
235 | lifetime: 3600
236 | layout: flat: null
237 | structure: [{
238 | id: name: "asnum"
239 | }, {
240 | attribute: {
241 | name: "name"
242 | type: "String"
243 | null_value: null
244 | }
245 | }, {
246 | attribute: {
247 | name: "country"
248 | type: "String"
249 | null_value: null
250 | }
251 | }]
252 | }
253 | }}).out
254 |
255 | ClickHouseInstallation: netmeta: spec: configuration: files: "risinfo_autnums.conf": (xml.#Marshal & {in: {
256 | yandex: dictionary: {
257 | name: "risinfo_autnums"
258 | source: http: {
259 | url: "http://risinfo/autnums.tsv"
260 | format: "TabSeparated"
261 | }
262 | lifetime: 86400
263 | layout: flat: null
264 | structure: [{
265 | id: name: "asnum"
266 | }, {
267 | attribute: {
268 | name: "name"
269 | type: "String"
270 | null_value: null
271 | }
272 | }, {
273 | attribute: {
274 | name: "country"
275 | type: "String"
276 | null_value: null
277 | }
278 | }]
279 | }
280 | }}).out
281 |
282 | ClickHouseInstallation: netmeta: spec: configuration: files: "format_function.xml": (xml.#Marshal & {in: {
283 | yandex: functions: {
284 | type: "executable"
285 | name: "formatQuery"
286 | return_type: "String"
287 | argument: [{
288 | type: "String"
289 | name: "query"
290 | }]
291 | format: "LineAsString"
292 | command: "clickhouse format --oneline"
293 | execute_direct: "0"
294 | }
295 | }}).out
296 |
--------------------------------------------------------------------------------
/deploy/dashboards/NetMeta_Relations.cue:
--------------------------------------------------------------------------------
1 | package dashboards
2 |
3 | import "list"
4 |
5 | _asRelationQueries: {
6 | "Inbound traffic relations (Top 20)":
7 | #"""
8 | SELECT
9 | ASNToString(SrcAS) AS SrcASName,
10 | ASNToString(DstAS) AS DstASName,
11 | (sum(Bytes * SamplingRate) / 1024) as Bytes
12 | FROM flows_raw
13 | WHERE $__timeFilter(TimeReceived)
14 | \#(_filtersWithHost)
15 | AND isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection)
16 | GROUP BY SrcAS, DstAS
17 | ORDER BY Bytes DESC
18 | LIMIT 20
19 | """#
20 |
21 | "Outbound traffic relations (Top 20)":
22 | #"""
23 | SELECT
24 | ASNToString(DstAS) AS DstASName,
25 | ASNToString(SrcAS) AS SrcASName,
26 | (sum(Bytes * SamplingRate) / 1024) as Bytes
27 | FROM flows_raw
28 | WHERE $__timeFilter(TimeReceived)
29 | \#(_filtersWithHost)
30 | AND NOT isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection)
31 | GROUP BY SrcAS, DstAS
32 | ORDER BY Bytes DESC
33 | LIMIT 20
34 | """#
35 |
36 | "Inbound traffic relations via interface (Top 30)":
37 | #"""
38 | SELECT
39 | ASNToString(SrcAS) AS SrcASName,
40 | InterfaceToString(SamplerAddress, OutIf) AS OutIfName,
41 | ASNToString(DstAS) AS DstASName,
42 | (sum(Bytes * SamplingRate) / 1024) as Bytes
43 | FROM flows_raw
44 | WHERE $__timeFilter(TimeReceived)
45 | \#(_filtersWithHost)
46 | AND isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection)
47 | GROUP BY SrcAS, DstAS, SamplerAddress, OutIf
48 | ORDER BY Bytes DESC
49 | LIMIT 30
50 | """#
51 |
52 | "Outbound traffic relations via interface (Top 30)":
53 | #"""
54 | SELECT
55 | ASNToString(DstAS) AS DstASName,
56 | InterfaceToString(SamplerAddress, OutIf) AS OutIfName,
57 | ASNToString(SrcAS) AS SrcASName,
58 | (sum(Bytes * SamplingRate) / 1024) as Bytes
59 | FROM flows_raw
60 | WHERE $__timeFilter(TimeReceived)
61 | \#(_filtersWithHost)
62 | AND NOT isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection)
63 | GROUP BY SrcAS, DstAS, SamplerAddress, OutIf
64 | ORDER BY Bytes DESC
65 | LIMIT 30
66 | """#
67 | }
68 |
69 | _asRelations: [{
70 | title: "AS Relations"
71 | gridPos: y: 6
72 | type: "row"
73 | }, {
74 | title: "Inbound traffic relations (Top 20)"
75 | type: "netsage-sankey-panel"
76 | gridPos: {h: 24, w: 12, x: 0, y: 7}
77 | options: nodePadding: 6
78 | options: nodeWidth: 23
79 | options: iteration: 15
80 | targets: [{
81 | rawSql: _asRelationQueries[title]
82 | }]
83 | }, {
84 | title: "Outbound traffic relations (Top 20)"
85 | type: "netsage-sankey-panel"
86 | gridPos: {h: 24, w: 12, x: 12, y: 7}
87 | options: nodePadding: 6
88 | options: nodeWidth: 23
89 | options: iteration: 15
90 | targets: [{
91 | rawSql: _asRelationQueries[title]
92 | }]
93 | }, {
94 | title: "Inbound traffic relations via interface (Top 30)"
95 | type: "netsage-sankey-panel"
96 | gridPos: {h: 24, w: 12, x: 0, y: 31}
97 | options: nodePadding: 6
98 | options: nodeWidth: 23
99 | options: iteration: 15
100 | targets: [{
101 | rawSql: _asRelationQueries[title]
102 | }]
103 | }, {
104 | title: "Outbound traffic relations via interface (Top 30)"
105 | type: "netsage-sankey-panel"
106 | gridPos: {h: 24, w: 12, x: 12, y: 31}
107 | options: nodePadding: 6
108 | options: nodeWidth: 23
109 | options: iteration: 15
110 | targets: [{
111 | rawSql: _asRelationQueries[title]
112 | }]
113 | }]
114 |
115 | _topFlowSankeyQueries: {
116 | "Top 30 Flows (per IP)":
117 | #"""
118 | SELECT
119 | if($showHostnames, HostToString(SamplerAddress, SrcAddr), IPv6ToString(SrcAddr)) AS Src,
120 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) AS Dst,
121 | (sum(Bytes * SamplingRate) / 1024) as Bytes
122 | FROM flows_raw
123 | WHERE $__timeFilter(TimeReceived)
124 | \#(_filtersWithHost)
125 | GROUP BY SamplerAddress, SrcAddr, DstAddr
126 | ORDER BY Bytes DESC
127 | LIMIT 30
128 | """#
129 |
130 | "Top 30 Flows (per IP+Port)":
131 | #"""
132 | SELECT
133 | if($showHostnames, HostToString(SamplerAddress, SrcAddr), IPv6ToString(SrcAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(SrcPort) as Src,
134 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(DstPort) as Dst,
135 | (sum(Bytes * SamplingRate) / 1024) as Bytes
136 | FROM flows_raw
137 | WHERE $__timeFilter(TimeReceived)
138 | \#(_filtersWithHost)
139 | GROUP BY SamplerAddress, SrcAddr, SrcPort, Proto, DstAddr, DstPort
140 | ORDER BY Bytes DESC
141 | LIMIT 30
142 | """#
143 | }
144 |
145 | _topFlowSankey: [{
146 | title: "AS Relations"
147 | gridPos: y: 32
148 | type: "row"
149 | }, {
150 | title: "Top 30 Flows (per IP)"
151 | type: "netsage-sankey-panel"
152 | gridPos: {h: 24, w: 12, x: 0, y: 33}
153 | options: nodePadding: 6
154 | options: nodeWidth: 28
155 | targets: [{
156 | rawSql: _topFlowSankeyQueries[title]
157 | }]
158 | }, {
159 | title: "Top 30 Flows (per IP+Port)"
160 | type: "netsage-sankey-panel"
161 | gridPos: {h: 24, w: 12, x: 12, y: 33}
162 | options: nodePadding: 11
163 | options: nodeWidth: 28
164 | targets: [{
165 | rawSql: _topFlowSankeyQueries[title]
166 | }]
167 | }]
168 |
169 | _flowsPerASNQueries: {
170 | "Top 30 ASN per service (inbound)":
171 | #"""
172 | SELECT
173 | ASNToString(SrcAS) AS SrcASName,
174 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(DstPort) as Dst,
175 | (sum(Bytes * SamplingRate) / 1024) as Bytes
176 | FROM flows_raw
177 | WHERE $__timeFilter(TimeReceived)
178 | \#(_filtersWithHost)
179 | AND isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection)
180 | GROUP BY SrcAS, SamplerAddress, DstAddr, Proto, DstPort
181 | ORDER BY Bytes DESC
182 | LIMIT 30
183 | """#
184 |
185 | "Top 30 ASN per service (outbound)":
186 | #"""
187 | SELECT
188 | ASNToString(SrcAS) AS SrcASName,
189 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(DstPort) as Dst,
190 | (sum(Bytes * SamplingRate) / 1024) as Bytes
191 | FROM flows_raw
192 | WHERE $__timeFilter(TimeReceived)
193 | \#(_filtersWithHost)
194 | AND NOT isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection)
195 | GROUP BY SrcAS, SamplerAddress, DstAddr, Proto, DstPort
196 | ORDER BY Bytes DESC
197 | LIMIT 30
198 | """#
199 | }
200 |
201 | _flowsPerASN: [{
202 | title: "Flows per ASN"
203 | gridPos: y: 34
204 | type: "row"
205 | }, {
206 | title: "Top 30 ASN per service (inbound)"
207 | type: "netsage-sankey-panel"
208 | gridPos: {h: 24, w: 12, x: 0, y: 60}
209 | options: nodePadding: 11
210 | options: nodeWidth: 28
211 | targets: [{
212 | rawSql: _flowsPerASNQueries[title]
213 | }]
214 | }, {
215 | title: "Top 30 ASN per service (outbound)"
216 | type: "netsage-sankey-panel"
217 | gridPos: {h: 24, w: 12, x: 12, y: 60}
218 | options: nodePadding: 11
219 | options: nodeWidth: 28
220 | targets: [{
221 | rawSql: _flowsPerASNQueries[title]
222 | }]
223 | }]
224 |
225 | dashboards: "Traffic Relations": {
226 | #folder: "NetMeta"
227 | title: "Traffic Relations"
228 | uid: "5pH2j5ank"
229 | _panels: list.Concat([
230 | _disclaimerPanels,
231 | _infoPanels,
232 | _asRelations,
233 | _topFlowSankey,
234 | _flowsPerASN,
235 | ])
236 | }
237 |
--------------------------------------------------------------------------------