├── .bazelversion ├── third_party ├── gazelle │ ├── BUILD.bazel │ └── add-prepatching.patch ├── rules_go │ ├── BUILD.bazel │ ├── disable-cgo.patch │ └── rules_go_absolute_embedsrc.patch ├── rules_oci │ ├── BUILD.bazel │ └── pr-715.patch ├── grafana │ ├── BUILD.bazel │ └── defs.bzl └── goflow │ ├── binaries.go │ ├── BUILD.bazel │ └── readd-flowdirection.patch ├── .gitignore ├── cmd ├── reconciler │ ├── placeholder.s │ ├── BUILD.bazel │ ├── quote.go │ ├── main.go │ ├── visitor.go │ ├── config.go │ └── reconciler.go ├── risinfo │ └── BUILD.bazel └── portmirror │ ├── BUILD.bazel │ ├── frame.go │ ├── portmirror.go │ └── iface.go ├── cue.mod ├── module.cue ├── BUILD.bazel ├── gen │ ├── k8s.io │ │ ├── api │ │ │ ├── core │ │ │ │ └── v1 │ │ │ │ │ ├── register_go_gen.cue │ │ │ │ │ ├── doc_go_gen.cue │ │ │ │ │ ├── well_known_taints_go_gen.cue │ │ │ │ │ └── well_known_labels_go_gen.cue │ │ │ ├── apps │ │ │ │ └── v1 │ │ │ │ │ └── register_go_gen.cue │ │ │ └── rbac │ │ │ │ └── v1 │ │ │ │ └── register_go_gen.cue │ │ ├── apimachinery │ │ │ └── pkg │ │ │ │ ├── runtime │ │ │ │ ├── embedded_go_gen.cue │ │ │ │ ├── types_proto_go_gen.cue │ │ │ │ ├── conversion_go_gen.cue │ │ │ │ ├── allocator_go_gen.cue │ │ │ │ ├── converter_go_gen.cue │ │ │ │ ├── negotiate_go_gen.cue │ │ │ │ ├── swagger_doc_generator_go_gen.cue │ │ │ │ ├── helper_go_gen.cue │ │ │ │ ├── codec_go_gen.cue │ │ │ │ ├── doc_go_gen.cue │ │ │ │ └── types_go_gen.cue │ │ │ │ ├── types │ │ │ │ ├── doc_go_gen.cue │ │ │ │ ├── namespacedname_go_gen.cue │ │ │ │ ├── uid_go_gen.cue │ │ │ │ ├── patch_go_gen.cue │ │ │ │ └── nodename_go_gen.cue │ │ │ │ ├── apis │ │ │ │ └── meta │ │ │ │ │ └── v1 │ │ │ │ │ ├── register_go_gen.cue │ │ │ │ │ ├── duration_go_gen.cue │ │ │ │ │ ├── micro_time_go_gen.cue │ │ │ │ │ ├── time_go_gen.cue │ │ │ │ │ ├── time_proto_go_gen.cue │ │ │ │ │ ├── watch_go_gen.cue │ │ │ │ │ ├── meta_go_gen.cue │ │ │ │ │ └── group_version_go_gen.cue │ │ │ │ ├── watch │ │ │ │ ├── doc_go_gen.cue │ │ │ │ ├── filter_go_gen.cue │ │ │ │ ├── streamwatcher_go_gen.cue │ │ │ │ ├── mux_go_gen.cue │ │ │ │ └── watch_go_gen.cue │ │ │ │ ├── api │ │ │ │ └── resource │ │ │ │ │ ├── suffix_go_gen.cue │ │ │ │ │ ├── math_go_gen.cue │ │ │ │ │ ├── amount_go_gen.cue │ │ │ │ │ └── quantity_go_gen.cue │ │ │ │ └── util │ │ │ │ └── intstr │ │ │ │ └── intstr_go_gen.cue │ │ └── apiextensions-apiserver │ │ │ └── pkg │ │ │ └── apis │ │ │ └── apiextensions │ │ │ └── v1 │ │ │ ├── doc_go_gen.cue │ │ │ └── register_go_gen.cue │ └── github.com │ │ └── monogon-dev │ │ └── netmeta │ │ └── reconciler │ │ └── config_go_gen.cue └── pkg │ └── netmeta.monogon.tech │ └── xml │ └── xml.cue ├── deploy ├── single-node │ ├── schema │ │ ├── 0002_create_dictionaries_database.sql │ │ ├── DBManager.proto │ │ ├── files.cue │ │ ├── schema.cue │ │ ├── tables.cue │ │ ├── 0001_create_flows_raw.sql │ │ ├── traffic_data.proto │ │ ├── FlowMessage.proto │ │ ├── views.cue │ │ └── functions.cue │ ├── diff_tool.cue │ ├── tests │ │ ├── README.md │ │ └── base.cue │ ├── k8s │ │ ├── grafana │ │ │ └── routes.cue │ │ ├── risinfo │ │ │ └── risinfo.cue │ │ ├── portmirror │ │ │ └── deployment.cue │ │ ├── reconciler │ │ │ └── reconciler.cue │ │ ├── goflow │ │ │ └── deployment.cue │ │ ├── kafka │ │ │ ├── kafka.cue │ │ │ └── metrics.cue │ │ ├── traefik │ │ │ └── deployment.cue │ │ └── clickhouse │ │ │ ├── clickhouse.cue │ │ │ ├── static_files.cue │ │ │ └── files.cue │ ├── dump_tool.cue │ ├── apply_tool.cue │ ├── BUILD.bazel │ └── config_legacy.cue ├── deps.go ├── k8s-base │ ├── strimzi-kafka-operator │ │ └── README.md │ ├── traefik │ │ ├── README.md │ │ └── kubernetes-crd-rbac.cue │ └── clickhouse-operator │ │ └── README.md ├── BUILD.bazel ├── go.mod └── dashboards │ ├── General_Home.cue │ └── NetMeta_Relations.cue ├── tools.go ├── .bazelproject ├── .aspect └── bazelrc │ ├── BUILD.bazel │ ├── debug.bazelrc │ ├── bazel7.bazelrc │ ├── convenience.bazelrc │ ├── performance.bazelrc │ ├── ci.bazelrc │ └── correctness.bazelrc ├── .github └── workflows │ └── build_images.yml ├── doc ├── grafana.md └── ingest.md ├── .bazelrc ├── BUILD.bazel ├── UPGRADE.md ├── MODULE.bazel ├── go.mod └── README.md /.bazelversion: -------------------------------------------------------------------------------- 1 | 7.4.0 -------------------------------------------------------------------------------- /third_party/gazelle/BUILD.bazel: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /third_party/rules_go/BUILD.bazel: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /third_party/rules_oci/BUILD.bazel: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *_local.cue 3 | deploy/single-node/out 4 | .ijwb 5 | bazel-* 6 | -------------------------------------------------------------------------------- /cmd/reconciler/placeholder.s: -------------------------------------------------------------------------------- 1 | // To allow the use of go:linkname a placeholder asm file is required -------------------------------------------------------------------------------- /third_party/grafana/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load(":defs.bzl", "grafana_image") 2 | 3 | grafana_image() 4 | -------------------------------------------------------------------------------- /cue.mod/module.cue: -------------------------------------------------------------------------------- 1 | module: "github.com/monogon-dev/netmeta" 2 | 3 | language: { 4 | version: "v0.9.0" 5 | } 6 | -------------------------------------------------------------------------------- /cue.mod/BUILD.bazel: -------------------------------------------------------------------------------- 1 | filegroup( 2 | name = "cue.mod", 3 | srcs = glob(["**/*.cue"]), 4 | visibility = ["//deploy:__pkg__"], 5 | ) 6 | -------------------------------------------------------------------------------- /third_party/goflow/binaries.go: -------------------------------------------------------------------------------- 1 | //go:build goflow 2 | 3 | package goflow 4 | 5 | // Build goflow container 6 | import ( 7 | _ "github.com/netsampler/goflow2/cmd/goflow2" 8 | ) 9 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/api/core/v1/register_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/api/core/v1 4 | 5 | package v1 6 | 7 | #GroupName: "" 8 | -------------------------------------------------------------------------------- /deploy/single-node/schema/0002_create_dictionaries_database.sql: -------------------------------------------------------------------------------- 1 | -- +goose Up 2 | CREATE DATABASE IF NOT EXISTS dictionaries Engine=Dictionary; 3 | 4 | -- +goose Down 5 | DROP DATABASE dictionaries; -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/api/apps/v1/register_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/api/apps/v1 4 | 5 | package v1 6 | 7 | #GroupName: "apps" 8 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/api/core/v1/doc_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/api/core/v1 4 | 5 | // Package v1 is the v1 version of the core API. 6 | package v1 7 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/api/rbac/v1/register_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/api/rbac/v1 4 | 5 | package v1 6 | 7 | #GroupName: "rbac.authorization.k8s.io" 8 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/embedded_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | _#encodable: _ 8 | -------------------------------------------------------------------------------- /deploy/single-node/diff_tool.cue: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | command: diff: { 4 | task: kube: { 5 | kind: "exec" 6 | cmd: "k3s kubectl diff --server-side --force-conflicts -f -" 7 | stdin: all_objects_yaml 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/types/doc_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/types 4 | 5 | // Package types implements various generic types used throughout kubernetes. 6 | package types 7 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/doc_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 4 | 5 | // Package v1 is the v1 version of the API. 6 | package v1 7 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/types_proto_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | #ProtobufMarshaller: _ 8 | 9 | #ProtobufReverseMarshaller: _ 10 | -------------------------------------------------------------------------------- /deploy/deps.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | 3 | package deploy 4 | 5 | // Static imports for Cue definitions 6 | 7 | import ( 8 | _ "k8s.io/api/apps/v1" 9 | _ "k8s.io/api/core/v1" 10 | _ "k8s.io/api/rbac/v1" 11 | _ "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 12 | ) 13 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/register_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 4 | 5 | package v1 6 | 7 | #GroupName: "apiextensions.k8s.io" 8 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/register_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | #GroupName: "meta.k8s.io" 8 | 9 | #WatchEventKind: "WatchEvent" 10 | -------------------------------------------------------------------------------- /deploy/single-node/tests/README.md: -------------------------------------------------------------------------------- 1 | # Config Tests 2 | 3 | To ensure we don't break existing configs, we validate the output against known values. To add a new one you can explore the result of the test configs with: 4 | ``` 5 | cue export ./tests -e 'test."empty sampler".out.k8s' --out cue 6 | ``` -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | 3 | package NetMeta 4 | 5 | // Static imports for command-line dependencies we build from source. 6 | 7 | import ( 8 | _ "cuelang.org/go/cmd/cue" 9 | _ "github.com/netsampler/goflow2/v2/cmd/goflow2" 10 | _ "github.com/pressly/goose/v3/cmd/goose" 11 | ) 12 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/watch/doc_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/watch 4 | 5 | // Package watch contains a generic watchable interface, and a fake for 6 | // testing code that uses the watch interface. 7 | package watch 8 | -------------------------------------------------------------------------------- /deploy/single-node/schema/DBManager.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package netmeta; 3 | 4 | import "google/protobuf/descriptor.proto"; 5 | 6 | extend google.protobuf.FieldOptions { 7 | optional string column_type = 50000; 8 | optional string column_name = 50001; 9 | optional bool column_skip = 50002; 10 | } -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/api/resource/suffix_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/api/resource 4 | 5 | package resource 6 | 7 | _#suffix: string 8 | 9 | // suffixer can interpret and construct suffixes. 10 | _#suffixer: _ 11 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/conversion_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | // Package runtime defines conversions between generic types and structs to map query strings 6 | // to struct objects. 7 | package runtime 8 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/types/namespacedname_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/types 4 | 5 | package types 6 | 7 | #NamespacedName: { 8 | Namespace: string 9 | Name: string 10 | } 11 | 12 | #Separator: 47 // '/' 13 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/watch/filter_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/watch 4 | 5 | package watch 6 | 7 | // Recorder records all events that are sent from the watch until it is closed. 8 | #Recorder: { 9 | Interface: #Interface 10 | } 11 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/allocator_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // SimpleAllocator a wrapper around make([]byte) 8 | // conforms to the MemoryAllocator interface 9 | #SimpleAllocator: { 10 | } 11 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/converter_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // UnstructuredConverter is an interface for converting between interface{} 8 | // and map[string]interface representation. 9 | #UnstructuredConverter: _ 10 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/negotiate_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // NegotiateError is returned when a ClientNegotiator is unable to locate 8 | // a serializer for the requested operation. 9 | #NegotiateError: { 10 | ContentType: string 11 | Stream: bool 12 | } 13 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/grafana/routes.cue: -------------------------------------------------------------------------------- 1 | package grafana 2 | 3 | IngressRoute: "grafana-tls": spec: { 4 | entryPoints: ["websecure"] 5 | routes: [ 6 | { 7 | match: "Host(`\(#Config.publicHostname)`) && PathPrefix(`/`)" 8 | kind: "Rule" 9 | services: [ 10 | { 11 | name: "grafana" 12 | port: 80 13 | }, 14 | ] 15 | }, 16 | ] 17 | tls: certResolver: "publicHostnameResolver" 18 | } 19 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/duration_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | // Duration is a wrapper around time.Duration which supports correct 8 | // marshaling to YAML and JSON. In particular, it marshals into strings, which 9 | // can be used as map keys in json. 10 | #Duration: _ 11 | -------------------------------------------------------------------------------- /.bazelproject: -------------------------------------------------------------------------------- 1 | directories: 2 | # Add the directories you want added as source here 3 | # By default, we've added your entire workspace ('.') 4 | . 5 | 6 | # Automatically includes all relevant targets under the 'directories' above 7 | derive_targets_from_directories: false 8 | 9 | targets: 10 | # If source code isn't resolving, add additional targets that compile it here 11 | //... 12 | 13 | additional_languages: 14 | go 15 | python -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/types/uid_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/types 4 | 5 | package types 6 | 7 | // UID is a type that holds unique ID values, including UUIDs. Because we 8 | // don't ONLY use UUIDs, this is an alias to string. Being a type captures 9 | // intent and helps make sure that UIDs and names do not get conflated. 10 | #UID: string 11 | -------------------------------------------------------------------------------- /deploy/single-node/schema/files.cue: -------------------------------------------------------------------------------- 1 | @extern(embed) 2 | 3 | package schema 4 | 5 | file: "DBManager.proto": _ @embed(file=DBManager.proto,type=text) 6 | 7 | file: "FlowMessage.proto": _ @embed(file=FlowMessage.proto,type=text) 8 | 9 | // FastNetMon traffic format 10 | // https://github.com/pavel-odintsov/fastnetmon/blob/master/src/traffic_output_formats/protobuf/traffic_data.proto 11 | file: "traffic_data.proto": _ @embed(file=traffic_data.proto,type=text) -------------------------------------------------------------------------------- /.aspect/bazelrc/BUILD.bazel: -------------------------------------------------------------------------------- 1 | "Aspect bazelrc presets; see https://docs.aspect.build/guides/bazelrc" 2 | 3 | load("@aspect_bazel_lib//lib:bazelrc_presets.bzl", "write_aspect_bazelrc_presets") 4 | 5 | write_aspect_bazelrc_presets( 6 | name = "update_aspect_bazelrc_presets", 7 | presets = [ 8 | "bazel7", 9 | "ci", 10 | "convenience", 11 | "correctness", 12 | "debug", 13 | "performance", 14 | ], 15 | ) 16 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // Pair of strings. We keed the name of fields and the doc 8 | #Pair: { 9 | Name: string 10 | Doc: string 11 | } 12 | 13 | // KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself 14 | #KubeTypes: [...#Pair] 15 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | #RFC3339Micro: "2006-01-02T15:04:05.000000Z07:00" 8 | 9 | // MicroTime is version of Time with microsecond level precision. 10 | // 11 | // +protobuf.options.marshal=false 12 | // +protobuf.as=Timestamp 13 | // +protobuf.options.(gogoproto.goproto_stringer)=false 14 | #MicroTime: _ 15 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/watch/streamwatcher_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/watch 4 | 5 | package watch 6 | 7 | // Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. 8 | #Decoder: _ 9 | 10 | // Reporter hides the details of how an error is turned into a runtime.Object for 11 | // reporting on a watch stream since this package may not import a higher level report. 12 | #Reporter: _ 13 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/api/resource/math_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/api/resource 4 | 5 | package resource 6 | 7 | // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. 8 | // It is also the maximum decimal digits that can be represented with an int64. 9 | _#maxInt64Factors: 18 10 | 11 | _#mostNegative: -9223372036854775808 12 | 13 | _#mostPositive: 9223372036854775807 14 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/time_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | // Time is a wrapper around time.Time which supports correct 8 | // marshaling to YAML and JSON. Wrappers are provided for many 9 | // of the factory methods that the time package offers. 10 | // 11 | // +protobuf.options.marshal=false 12 | // +protobuf.as=Timestamp 13 | // +protobuf.options.(gogoproto.goproto_stringer)=false 14 | #Time: _ 15 | -------------------------------------------------------------------------------- /deploy/k8s-base/strimzi-kafka-operator/README.md: -------------------------------------------------------------------------------- 1 | # Strimzi Kafka Operator 2 | 3 | The CUE schema is generated via these commands: 4 | - `curl "https://strimzi.io/install/latest?namespace=default" > strimzi-cluster-operator.yaml` 5 | - `cue import strimzi-cluster-operator.yaml -l kind -l metadata.name -p k8s -f` 6 | - `rm strimzi-cluster-operator.yaml` 7 | 8 | The upstream file contains an empty yaml object at the end of the stream which you have to delete. Else you will get the following error: 9 | ``` 10 | error evaluating label kind: reference "kind" not found 11 | ``` 12 | -------------------------------------------------------------------------------- /third_party/rules_go/disable-cgo.patch: -------------------------------------------------------------------------------- 1 | diff --git a/BUILD.bazel b/BUILD.bazel 2 | --- a/BUILD.bazel 3 | +++ b/BUILD.bazel 4 | @@ -40,12 +40,9 @@ 5 | # rules_go but not Gazelle, including our own go_bazel_tests. 6 | 7 | stdlib( 8 | name = "stdlib", 9 | - cgo_context_data = select({ 10 | - "//go/platform:internal_cgo_off": None, 11 | - "//conditions:default": ":cgo_context_data", 12 | - }), 13 | + cgo_context_data = None, 14 | visibility = ["//visibility:public"], 15 | ) 16 | 17 | # default_nogo is the nogo target that nogo references by default. It 18 | -------------------------------------------------------------------------------- /deploy/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("@rules_multirun//:defs.bzl", "multirun") 2 | 3 | # Use //deploy:all_files for accessing all relevant files. 4 | filegroup( 5 | name = "all_files", 6 | srcs = glob(["**/*"]) + [ 7 | "//cue.mod", 8 | "//deploy/single-node:all_files", 9 | ], 10 | visibility = ["//visibility:public"], 11 | ) 12 | 13 | multirun( 14 | name = "oci_load", 15 | commands = [ 16 | "//cmd/portmirror:portmirror_load", 17 | "//cmd/reconciler:reconciler_load", 18 | "//cmd/risinfo:risinfo_load", 19 | ], 20 | jobs = 0, 21 | ) 22 | -------------------------------------------------------------------------------- /deploy/k8s-base/traefik/README.md: -------------------------------------------------------------------------------- 1 | # Traefik 2 | 3 | The CUE schema is generated via these commands: 4 | - `wget https://raw.githubusercontent.com/traefik/traefik/v2.9.1/docs/content/reference/dynamic-configuration/kubernetes-crd-definition-v1.yml` 5 | - `wget https://raw.githubusercontent.com/traefik/traefik/v2.9.1/docs/content/reference/dynamic-configuration/kubernetes-crd-rbac.yml` 6 | - `cue import kubernetes-crd-definition-v1.yml -l kind -l metadata.name -p k8s -f` 7 | - `cue import kubernetes-crd-rbac.yml -l kind -l metadata.name -p k8s -f` 8 | - `rm kubernetes-crd-definition-v1.yml kubernetes-crd-rbac.yml` 9 | -------------------------------------------------------------------------------- /third_party/goflow/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load") 2 | load("@rules_pkg//pkg:tar.bzl", "pkg_tar") 3 | 4 | pkg_tar( 5 | name = "goflow_tar", 6 | srcs = ["@com_github_netsampler_goflow2//cmd/goflow2"], 7 | ) 8 | 9 | oci_image( 10 | name = "goflow_image", 11 | base = "@distroless_base", 12 | entrypoint = ["/goflow"], 13 | tars = [":goflow_tar"], 14 | visibility = ["//visibility:public"], 15 | ) 16 | 17 | oci_load( 18 | name = "goflow_load", 19 | image = ":goflow_image", 20 | repo_tags = ["github.com/monogon-dev/netmeta/goflow:latest"], 21 | visibility = ["//visibility:public"], 22 | ) 23 | -------------------------------------------------------------------------------- /deploy/single-node/dump_tool.cue: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "encoding/json" 5 | "strings" 6 | "tool/file" 7 | ) 8 | 9 | command: dump: { 10 | task: print: { 11 | kind: "print" 12 | text: all_objects_yaml 13 | } 14 | } 15 | 16 | command: dump_dashboards: { 17 | outDir: file.MkdirAll & { 18 | path: "out/dashboards" 19 | } 20 | 21 | for k, v in netmeta.dashboards { 22 | let fileName = "\(strings.ToLower(strings.Replace(k, " ", "_", -1))).json" 23 | "\(outDir.path)/\(fileName)": file.Create & { 24 | $after: outDir 25 | filename: "\(outDir.path)/\(fileName)" 26 | contents: json.Indent(json.Marshal(v), "", " ") 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /.github/workflows/build_images.yml: -------------------------------------------------------------------------------- 1 | on: [ push ] 2 | jobs: 3 | build: 4 | runs-on: ubuntu-latest 5 | if: github.ref == 'refs/heads/main' 6 | steps: 7 | - uses: actions/checkout@v3 8 | - uses: bazelbuild/setup-bazelisk@v2 9 | - name: Mount bazel cache 10 | uses: actions/cache@v3 11 | with: 12 | path: "~/.cache/bazel" 13 | key: bazel 14 | - name: Login to GitHub Container Registry 15 | uses: docker/login-action@v2 16 | with: 17 | registry: ghcr.io 18 | username: ${{ github.actor }} 19 | password: ${{ secrets.GITHUB_TOKEN }} 20 | - run: bazel run //:push 21 | -------------------------------------------------------------------------------- /third_party/goflow/readd-flowdirection.patch: -------------------------------------------------------------------------------- 1 | diff --git a/pb/flow.proto b/pb/flow.proto 2 | --- a/pb/flow.proto 3 | +++ b/pb/flow.proto 4 | @@ -1,7 +1,7 @@ 5 | syntax = "proto3"; 6 | package flowpb; 7 | -option go_package = "github.com/netsampler/goflow2/pb;flowpb"; 8 | +option go_package = "github.com/netsampler/goflow2/v2/pb;flowpb"; 9 | 10 | message FlowMessage { 11 | 12 | enum FlowType { 13 | @@ -16,9 +16,9 @@ 14 | uint64 time_received_ns = 110; 15 | uint32 sequence_num = 4; 16 | uint64 sampling_rate = 3; 17 | 18 | - //uint32 flow_direction = 42; 19 | + uint32 flow_direction = 42; 20 | 21 | // Sampler information 22 | bytes sampler_address = 11; 23 | 24 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/helper_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // MultiObjectTyper returns the types of objects across multiple schemes in order. 8 | #MultiObjectTyper: [...#ObjectTyper] 9 | 10 | _#defaultFramer: { 11 | } 12 | 13 | // WithVersionEncoder serializes an object and ensures the GVK is set. 14 | #WithVersionEncoder: { 15 | Version: #GroupVersioner 16 | Encoder: #Encoder 17 | ObjectTyper: #ObjectTyper 18 | } 19 | 20 | // WithoutVersionDecoder clears the group version kind of a deserialized object. 21 | #WithoutVersionDecoder: { 22 | Decoder: #Decoder 23 | } 24 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/watch/mux_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/watch 4 | 5 | package watch 6 | 7 | // FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch 8 | // channel is full. 9 | #FullChannelBehavior: int // #enumFullChannelBehavior 10 | 11 | #enumFullChannelBehavior: 12 | #WaitIfChannelFull | 13 | #DropIfChannelFull 14 | 15 | #values_FullChannelBehavior: { 16 | WaitIfChannelFull: #WaitIfChannelFull 17 | DropIfChannelFull: #DropIfChannelFull 18 | } 19 | 20 | #WaitIfChannelFull: #FullChannelBehavior & 0 21 | #DropIfChannelFull: #FullChannelBehavior & 1 22 | 23 | _#incomingQueueLength: 25 24 | 25 | _#internalRunFunctionMarker: "internal-do-function" 26 | -------------------------------------------------------------------------------- /third_party/rules_go/rules_go_absolute_embedsrc.patch: -------------------------------------------------------------------------------- 1 | This adds support for workspace-relative Go embeds. 2 | 3 | diff --git a/go/tools/builders/compilepkg.go b/go/tools/builders/compilepkg.go 4 | index 09e3ef6e..4623f803 100644 5 | --- a/go/tools/builders/compilepkg.go 6 | +++ b/go/tools/builders/compilepkg.go 7 | @@ -379,6 +379,13 @@ func compileArchive( 8 | } 9 | } 10 | } 11 | + // Sort by length descenting to not get wrong roots 12 | + sort.Slice(embedRoots, func(i, j int) bool { 13 | + return len(embedRoots[i]) > len(embedRoots[j]) 14 | + }) 15 | + for _, root := range embedRoots { 16 | + embedRootDirs = append(embedRootDirs, abs(root)) 17 | + } 18 | embedcfgPath, err := buildEmbedcfgFile(srcs.goSrcs, embedSrcs, embedRootDirs, workDir) 19 | if err != nil { 20 | return err -------------------------------------------------------------------------------- /doc/grafana.md: -------------------------------------------------------------------------------- 1 | # Grafana 2 | ## Exporting Dashboards 3 | It is possible to export all Grafana dashboards to JSON files for easier import into a self-managed Grafana instance. 4 | Use the following command to dump all dashboards: 5 | ``` 6 | ~/NetMeta/deploy $ cue dump_dashboards ./... 7 | ``` 8 | 9 | ## FastNetMon Integration 10 | 11 | NetMeta supports displaying FastNetMon attack notifications if a FastNetMon InfluxDB datasource is available in the same Grafana instance. 12 | 13 | To configure FastNetMon to write attack notifications you have to enable them via: 14 | ``` 15 | sudo fcli set main influxdb_attack_notification enable 16 | sudo fcli commit 17 | ``` 18 | 19 | More infos are available in the FastNetMon docs: https://fastnetmon.com/docs-fnm-advanced/fastnetmon-attack-notification-in-grafana/ 20 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/types/patch_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/types 4 | 5 | package types 6 | 7 | // Similarly to above, these are constants to support HTTP PATCH utilized by 8 | // both the client and server that didn't make sense for a whole package to be 9 | // dedicated to. 10 | #PatchType: string // #enumPatchType 11 | 12 | #enumPatchType: 13 | #JSONPatchType | 14 | #MergePatchType | 15 | #StrategicMergePatchType | 16 | #ApplyPatchType 17 | 18 | #JSONPatchType: #PatchType & "application/json-patch+json" 19 | #MergePatchType: #PatchType & "application/merge-patch+json" 20 | #StrategicMergePatchType: #PatchType & "application/strategic-merge-patch+json" 21 | #ApplyPatchType: #PatchType & "application/apply-patch+yaml" 22 | -------------------------------------------------------------------------------- /.aspect/bazelrc/debug.bazelrc: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | # Use `bazel test --config=debug` to enable these settings # 3 | ############################################################ 4 | 5 | # Stream stdout/stderr output from each test in real-time. 6 | # Docs: https://bazel.build/docs/user-manual#test-output 7 | test:debug --test_output=streamed 8 | 9 | # Run one test at a time. 10 | # Docs: https://bazel.build/reference/command-line-reference#flag--test_strategy 11 | test:debug --test_strategy=exclusive 12 | 13 | # Prevent long running tests from timing out. 14 | # Docs: https://bazel.build/docs/user-manual#test-timeout 15 | test:debug --test_timeout=9999 16 | 17 | # Always run tests even if they have cached results. 18 | # Docs: https://bazel.build/docs/user-manual#cache-test-results 19 | test:debug --nocache_test_results 20 | -------------------------------------------------------------------------------- /deploy/k8s-base/clickhouse-operator/README.md: -------------------------------------------------------------------------------- 1 | # Clickhouse Operator 2 | 3 | The CUE schema is generated via these commands: 4 | - `curl -s https://raw.githubusercontent.com/Altinity/clickhouse-operator/master/deploy/operator/clickhouse-operator-install-template.yaml | \ 5 | OPERATOR_IMAGE="altinity/clickhouse-operator:0.21.0@sha256:8f481827d60398d0c553ea7c1726c0acec4ca893af710333ea5aa795ca96c0b9" \ 6 | OPERATOR_NAMESPACE="default" \ 7 | OPERATOR_IMAGE_PULL_POLICY="IfNotPresent" \ 8 | METRICS_EXPORTER_IMAGE="altinity/metrics-exporter:0.21.0@sha256:a9743f3c012400e122abc470a3e4c95a7ab25ab3025df1e6d7f98af75f627215" \ 9 | METRICS_EXPORTER_NAMESPACE="default" \ 10 | METRICS_EXPORTER_IMAGE_PULL_POLICY="IfNotPresent" \ 11 | envsubst > clickhouse-operator.yaml` 12 | - `cue import clickhouse-operator.yaml -l kind -l metadata.name -p k8s -f` 13 | - `rm clickhouse-operator.yaml` 14 | -------------------------------------------------------------------------------- /deploy/single-node/apply_tool.cue: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import "encoding/yaml" 4 | 5 | command: apply: { 6 | task: apply: { 7 | $after: task.prereqs 8 | kind: "exec" 9 | cmd: "k3s kubectl apply --server-side --force-conflicts --all -f -" 10 | stdin: all_objects_yaml 11 | } 12 | } 13 | 14 | command: "apply-prune": { 15 | task: kube: { 16 | kind: "exec" 17 | cmd: "k3s kubectl apply --server-side --force-conflicts --all -f - --prune=true" 18 | stdin: all_objects_yaml 19 | } 20 | } 21 | 22 | // Applies configmaps only 23 | command: "apply-cm": { 24 | task: apply: { 25 | kind: "exec" 26 | cmd: "k3s kubectl apply --server-side --force-conflicts --all -f -" 27 | stdin: yaml.MarshalStream([ for v in all_objects if v.kind == "ConfigMap" {v}]) 28 | stdout: string 29 | } 30 | task: applyDisplay: { 31 | kind: "print" 32 | text: task.apply.stdout 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /.bazelrc: -------------------------------------------------------------------------------- 1 | # Import Aspect bazelrc presets 2 | import %workspace%/.aspect/bazelrc/bazel7.bazelrc 3 | import %workspace%/.aspect/bazelrc/convenience.bazelrc 4 | import %workspace%/.aspect/bazelrc/correctness.bazelrc 5 | import %workspace%/.aspect/bazelrc/debug.bazelrc 6 | import %workspace%/.aspect/bazelrc/performance.bazelrc 7 | 8 | build --@rules_go//go/config:pure 9 | build --@rules_go//go/config:static 10 | 11 | build --verbose_failures 12 | build --worker_sandboxing 13 | build --experimental_output_directory_naming_scheme=diff_against_dynamic_baseline 14 | build --action_env BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 15 | common --incompatible_enable_proto_toolchain_resolution 16 | 17 | build:linux --sandbox_add_mount_pair=/tmp 18 | build:macos --sandbox_add_mount_pair=/var/tmp 19 | build:windows --sandbox_add_mount_pair=C:\Temp 20 | 21 | test --sandbox_default_allow_network=false 22 | test --test_output=errors 23 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/util/intstr/intstr_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/util/intstr 4 | 5 | package intstr 6 | 7 | // IntOrString is a type that can hold an int32 or a string. When used in 8 | // JSON or YAML marshalling and unmarshalling, it produces or consumes the 9 | // inner type. This allows you to have, for example, a JSON field that can 10 | // accept a name or number. 11 | // TODO: Rename to Int32OrString 12 | // 13 | // +protobuf=true 14 | // +protobuf.options.(gogoproto.goproto_stringer)=false 15 | // +k8s:openapi-gen=true 16 | #IntOrString: _ 17 | 18 | // Type represents the stored type of IntOrString. 19 | #Type: int64 // #enumType 20 | 21 | #enumType: 22 | #Int | 23 | #String 24 | 25 | #values_Type: { 26 | Int: #Int 27 | String: #String 28 | } 29 | 30 | #Int: #Type & 0 31 | #String: #Type & 1 32 | -------------------------------------------------------------------------------- /deploy/single-node/schema/schema.cue: -------------------------------------------------------------------------------- 1 | package schema 2 | 3 | #Config: { 4 | fastNetMon?: { 5 | ... 6 | } 7 | sampler: [DEVICE=string]: isIncomingFlow: [...{[COLUMN=string]: string | int}] 8 | } 9 | 10 | // Function contains all information to create or benchmark a UDF inside Clickhouse 11 | function: [NAME=string]: { 12 | name: NAME 13 | arguments: [...string] 14 | query: string 15 | } 16 | 17 | // MaterializedView contains all information to create a MaterializedView inside Clickhouse 18 | // It also allows to benchmark the select statement inside the MV 19 | view: [NAME=string]: { 20 | name: NAME 21 | to: string 22 | from: string 23 | query: string 24 | } 25 | 26 | // Table contains the Name, the Type, additional Settings and 27 | // a reference to a Message inside the Protobuf file 28 | table: [NAME=string]:{ 29 | name: NAME 30 | schema: string 31 | engine: string 32 | settings: [string]: _ 33 | } 34 | -------------------------------------------------------------------------------- /deploy/single-node/schema/tables.cue: -------------------------------------------------------------------------------- 1 | package schema 2 | 3 | table: flows_queue: { 4 | schema: "FlowMessage.proto:FlowMessage" 5 | engine: "Kafka" 6 | settings: kafka_broker_list: "netmeta-kafka-bootstrap:9092" 7 | settings: kafka_topic_list: "flow-messages" 8 | settings: kafka_group_name: "clickhouse" 9 | settings: kafka_format: "Protobuf" 10 | settings: kafka_schema: schema 11 | settings: kafka_max_block_size: int & 1048576 12 | } 13 | 14 | if #Config.fastNetMon != _|_ { 15 | table: fastnetmon_queue: { 16 | schema: "traffic_data.proto:TrafficData" 17 | engine: "Kafka" 18 | settings: kafka_broker_list: "netmeta-kafka-bootstrap:9092" 19 | settings: kafka_topic_list: "fastnetmon" 20 | settings: kafka_group_name: "clickhouse" 21 | settings: kafka_format: "ProtobufSingle" 22 | settings: kafka_schema: schema 23 | settings: kafka_max_block_size: int & 1048576 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/codec_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // codec binds an encoder and decoder. 8 | _#codec: { 9 | Encoder: #Encoder 10 | Decoder: #Decoder 11 | } 12 | 13 | // NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. 14 | #NoopEncoder: { 15 | Decoder: #Decoder 16 | } 17 | 18 | _#noopEncoderIdentifier: #Identifier & "noop" 19 | 20 | // NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. 21 | #NoopDecoder: { 22 | Encoder: #Encoder 23 | } 24 | 25 | _#base64Serializer: { 26 | Encoder: #Encoder 27 | Decoder: #Decoder 28 | } 29 | 30 | _#internalGroupVersionerIdentifier: "internal" 31 | _#disabledGroupVersionerIdentifier: "disabled" 32 | 33 | _#internalGroupVersioner: { 34 | } 35 | 36 | _#disabledGroupVersioner: { 37 | } 38 | -------------------------------------------------------------------------------- /deploy/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/monogon-dev/netmeta/deploy 2 | 3 | go 1.19 4 | 5 | require ( 6 | k8s.io/api v0.25.3 7 | k8s.io/apiextensions-apiserver v0.25.3 8 | ) 9 | 10 | require ( 11 | github.com/go-logr/logr v1.2.3 // indirect 12 | github.com/gogo/protobuf v1.3.2 // indirect 13 | github.com/google/gofuzz v1.2.0 // indirect 14 | github.com/json-iterator/go v1.1.12 // indirect 15 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 16 | github.com/modern-go/reflect2 v1.0.2 // indirect 17 | golang.org/x/net v0.0.0-20220927171203-f486391704dc // indirect 18 | golang.org/x/text v0.3.7 // indirect 19 | gopkg.in/inf.v0 v0.9.1 // indirect 20 | gopkg.in/yaml.v2 v2.4.0 // indirect 21 | k8s.io/apimachinery v0.25.3 // indirect 22 | k8s.io/klog/v2 v2.70.1 // indirect 23 | k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect 24 | sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect 25 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 26 | ) 27 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | // Timestamp is a struct that is equivalent to Time, but intended for 8 | // protobuf marshalling/unmarshalling. It is generated into a serialization 9 | // that matches Time. Do not use in Go structs. 10 | #Timestamp: { 11 | // Represents seconds of UTC time since Unix epoch 12 | // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 13 | // 9999-12-31T23:59:59Z inclusive. 14 | seconds: int64 @go(Seconds) @protobuf(1,varint,opt) 15 | 16 | // Non-negative fractions of a second at nanosecond resolution. Negative 17 | // second values with fractions must still have non-negative nanos values 18 | // that count forward in time. Must be from 0 to 999,999,999 19 | // inclusive. This field may be limited in precision depending on context. 20 | nanos: int32 @go(Nanos) @protobuf(2,varint,opt) 21 | } 22 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/watch_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | import ( 8 | "k8s.io/apimachinery/pkg/runtime" 9 | "k8s.io/apimachinery/pkg/watch" 10 | ) 11 | 12 | // Event represents a single event to a watched resource. 13 | // 14 | // +protobuf=true 15 | // +k8s:deepcopy-gen=true 16 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 17 | #WatchEvent: { 18 | type: string @go(Type) @protobuf(1,bytes,opt) 19 | 20 | // Object is: 21 | // * If Type is Added or Modified: the new state of the object. 22 | // * If Type is Deleted: the state of the object immediately before deletion. 23 | // * If Type is Error: *Status is recommended; other types may make sense 24 | // depending on context. 25 | object: runtime.#RawExtension @go(Object) @protobuf(2,bytes,opt) 26 | } 27 | 28 | // InternalEvent makes watch.Event versioned 29 | // +protobuf=false 30 | #InternalEvent: watch.#Event 31 | -------------------------------------------------------------------------------- /cue.mod/pkg/netmeta.monogon.tech/xml/xml.cue: -------------------------------------------------------------------------------- 1 | package xml 2 | 3 | import "text/template" 4 | 5 | _template: #""" 6 | {{- define "node" -}} 7 | {{- range $key, $value := . -}} 8 | {{- $type := $value | printf "%#T" -}} 9 | {{- if eq "map[string]interface {}" $type -}} 10 | {{- if eq 0 (len $value) -}} 11 | <{{ $key }}/> 12 | {{- else -}} 13 | <{{ $key }}>{{ template "node" $value }} 14 | {{- end -}} 15 | {{- else if eq "[]interface {}" $type -}} 16 | <{{ $key }}> 17 | {{- range $v := $value -}} 18 | {{- template "node" $v -}} 19 | {{- end -}} 20 | 21 | {{- else if eq nil $value -}} 22 | <{{ $key }}/> 23 | {{- else -}} 24 | <{{ $key }}>{{ $value }} 25 | {{- end -}} 26 | {{- end -}} 27 | {{- end -}} 28 | 29 | {{ template "node" . }} 30 | """# 31 | 32 | #Marshal: { 33 | IN=in: _ 34 | out: template.Execute(_template, IN) 35 | } 36 | 37 | _xmlTest: #Marshal & { 38 | in: { 39 | a: b: c: "d" 40 | a: c: {} 41 | } 42 | } 43 | 44 | _xmlTest: out: "d" 45 | -------------------------------------------------------------------------------- /.aspect/bazelrc/bazel7.bazelrc: -------------------------------------------------------------------------------- 1 | # Speed up all builds by not checking if external repository files have been modified. 2 | # Docs: https://github.com/bazelbuild/bazel/blob/1af61b21df99edc2fc66939cdf14449c2661f873/src/main/java/com/google/devtools/build/lib/bazel/repository/RepositoryOptions.java#L244 3 | common --noexperimental_check_external_repository_files 4 | 5 | # Don't report when the root module's lower bound for a dependency happens to be less than the resolved version. 6 | # This is expected and should NOT prompt an engineer to update our lower bound to match. 7 | # WARNING: For repository 'aspect_bazel_lib', the root module requires module version aspect_bazel_lib@1.30.2, 8 | # but got aspect_bazel_lib@1.31.2 in the resolved dependency graph. 9 | common --check_direct_dependencies=off 10 | 11 | # Directories used by sandboxed non-worker execution may be reused to avoid unnecessary setup costs. 12 | # Save time on Sandbox creation and deletion when many of the same kind of action run during the 13 | # build. 14 | # Docs: https://bazel.build/reference/command-line-reference#flag--reuse_sandbox_directories 15 | build --reuse_sandbox_directories 16 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/api/resource/amount_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/api/resource 4 | 5 | package resource 6 | 7 | // Scale is used for getting and setting the base-10 scaled value. 8 | // Base-2 scales are omitted for mathematical simplicity. 9 | // See Quantity.ScaledValue for more details. 10 | #Scale: int32 // #enumScale 11 | 12 | #enumScale: 13 | #Nano | 14 | #Micro | 15 | #Milli | 16 | #Kilo | 17 | #Mega | 18 | #Giga | 19 | #Tera | 20 | #Peta | 21 | #Exa 22 | 23 | #values_Scale: { 24 | Nano: #Nano 25 | Micro: #Micro 26 | Milli: #Milli 27 | Kilo: #Kilo 28 | Mega: #Mega 29 | Giga: #Giga 30 | Tera: #Tera 31 | Peta: #Peta 32 | Exa: #Exa 33 | } 34 | 35 | #Nano: #Scale & -9 36 | #Micro: #Scale & -6 37 | #Milli: #Scale & -3 38 | #Kilo: #Scale & 3 39 | #Mega: #Scale & 6 40 | #Giga: #Scale & 9 41 | #Tera: #Scale & 12 42 | #Peta: #Scale & 15 43 | #Exa: #Scale & 18 44 | 45 | // infDecAmount implements common operations over an inf.Dec that are specific to the quantity 46 | // representation. 47 | _#infDecAmount: string 48 | -------------------------------------------------------------------------------- /cmd/risinfo/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("@rules_go//go:def.bzl", "go_binary", "go_library") 2 | load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load") 3 | load("@rules_pkg//pkg:tar.bzl", "pkg_tar") 4 | 5 | go_library( 6 | name = "risinfo_lib", 7 | srcs = ["risinfo.go"], 8 | importpath = "github.com/monogon-dev/netmeta/cmd/risinfo", 9 | visibility = ["//visibility:private"], 10 | deps = [ 11 | "@com_github_osrg_gobgp//pkg/packet/bgp", 12 | "@com_github_osrg_gobgp//pkg/packet/mrt", 13 | "@io_k8s_klog_v2//:klog", 14 | ], 15 | ) 16 | 17 | go_binary( 18 | name = "risinfo", 19 | embed = [":risinfo_lib"], 20 | visibility = ["//visibility:public"], 21 | ) 22 | 23 | pkg_tar( 24 | name = "risinfo_tar", 25 | srcs = [":risinfo"], 26 | ) 27 | 28 | oci_image( 29 | name = "risinfo_image", 30 | base = "@distroless_base", 31 | entrypoint = ["/risinfo"], 32 | tars = [":risinfo_tar"], 33 | visibility = ["//visibility:public"], 34 | ) 35 | 36 | oci_load( 37 | name = "risinfo_load", 38 | image = ":risinfo_image", 39 | repo_tags = ["github.com/monogon-dev/netmeta/risinfo:latest"], 40 | visibility = ["//visibility:public"], 41 | ) 42 | -------------------------------------------------------------------------------- /deploy/single-node/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("//build/cue:def.bzl", "cue_export") 2 | load("//build/cue/local-images:def.bzl", "cue_local_images") 3 | 4 | filegroup( 5 | name = "all_files", 6 | srcs = glob(["**/*"]), 7 | path = "", 8 | visibility = ["//deploy:__pkg__"], 9 | ) 10 | 11 | cue_local_images( 12 | name = "local_images", 13 | images = { 14 | "risinfo": "//cmd/risinfo:risinfo_load", 15 | "goflow": "//third_party/goflow:goflow_load", 16 | "portmirror": "//cmd/portmirror:portmirror_load", 17 | "reconciler": "//cmd/reconciler:reconciler_load", 18 | "grafana": "//third_party/grafana:grafana_load", 19 | }, 20 | ) 21 | 22 | # To allow short name 23 | alias( 24 | name = "single-node", 25 | actual = ":manifests", 26 | ) 27 | 28 | cue_export( 29 | name = "manifests", 30 | srcs = [ 31 | ":local_images", 32 | "//deploy:all_files", 33 | ], 34 | out = "text", 35 | args = [ 36 | "./deploy/single-node:k8s", 37 | "$(location :local_images)", 38 | ], 39 | expression = "all_objects_yaml", 40 | outfile = "netmeta.yaml", 41 | ) 42 | 43 | # TODO: add generator for initial config_local 44 | # TODO: clean up go.mod and deps.go 45 | # TODO: add nix flake? 46 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/risinfo/risinfo.cue: -------------------------------------------------------------------------------- 1 | package risinfo 2 | 3 | #Config: { 4 | digest: string 5 | image: string 6 | } 7 | 8 | Service: risinfo: spec: { 9 | ports: [{ 10 | name: "risinfo" 11 | protocol: "TCP" 12 | port: 80 13 | targetPort: "api" 14 | }] 15 | selector: Deployment["risinfo"].spec.selector.matchLabels 16 | } 17 | 18 | PersistentVolumeClaim: "risinfo-cache": {} 19 | 20 | Deployment: risinfo: { 21 | M=metadata: labels: app: "risinfo" 22 | 23 | spec: { 24 | selector: matchLabels: app: M.labels.app 25 | template: { 26 | metadata: labels: app: M.labels.app 27 | 28 | // Trigger redeployment when digest changes. 29 | metadata: annotations: "meta/local-image-digest": #Config.digest 30 | 31 | spec: containers: [{ 32 | name: "risinfo" 33 | imagePullPolicy: "Never" 34 | image: #Config.image 35 | args: ["-cacheDir=/cache"] 36 | ports: [{ 37 | name: "api" 38 | containerPort: 8080 39 | protocol: "TCP" 40 | }] 41 | 42 | volumeMounts: [{ 43 | mountPath: "/cache" 44 | name: "risinfo-cache" 45 | }]}, 46 | ] 47 | 48 | spec: volumes: [{ 49 | name: "risinfo-cache" 50 | persistentVolumeClaim: claimName: "risinfo-cache" 51 | }] 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /cue.mod/gen/github.com/monogon-dev/netmeta/reconciler/config_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go command-line-arguments 4 | 5 | package main 6 | 7 | // Function contains all information to create or benchmark a UDF inside Clickhouse 8 | #Function: { 9 | name: string @go(Name) 10 | arguments: [...string] @go(Arguments,[]string) 11 | query: string @go(Query) 12 | } 13 | 14 | // MaterializedView contains all information to create a MaterializedView inside Clickhouse 15 | // It also allows to benchmark the select statement inside the MV 16 | #MaterializedView: { 17 | name: string @go(Name) 18 | to: string @go(To) 19 | from: string @go(From) 20 | query: string @go(Query) 21 | } 22 | 23 | #Settings: [string]: _ 24 | 25 | // Table contains the Name, the Type, additional Settings and 26 | // a reference to a Message inside the Protobuf file 27 | #Table: { 28 | name: string @go(Name) 29 | schema: string @go(Schema) 30 | engine: string @go(Engine) 31 | settings: #Settings @go(Settings) 32 | } 33 | 34 | #Config: { 35 | database: string @go(Database) 36 | functions: [...#Function] @go(Functions,[]Function) 37 | materialized_views: [...#MaterializedView] @go(MaterializedViews,[]MaterializedView) 38 | source_tables: [...#Table] @go(SourceTables,[]Table) 39 | } 40 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/portmirror/deployment.cue: -------------------------------------------------------------------------------- 1 | package portmirror 2 | 3 | import "net" 4 | 5 | #Config: { 6 | digest: string 7 | image: string 8 | interfaces: string 9 | sampleRate: int 10 | samplerAddress: net.IP 11 | } 12 | 13 | Deployment: portmirror: { 14 | M=metadata: labels: app: "portmirror" 15 | spec: { 16 | strategy: type: "Recreate" 17 | selector: matchLabels: app: M.labels.app 18 | template: { 19 | metadata: labels: app: M.labels.app 20 | 21 | // Trigger redeployment when digest changes. 22 | metadata: annotations: "meta/local-image-digest": #Config.digest 23 | 24 | spec: { 25 | // We need to get access to the Host Interfaces 26 | hostNetwork: true 27 | dnsPolicy: "ClusterFirstWithHostNet" 28 | containers: [ 29 | { 30 | name: "portmirror" 31 | image: #Config.image 32 | 33 | args: [ 34 | "-transport.kafka.brokers=netmeta-kafka-bootstrap:9092", 35 | "-transport.kafka.log.err=true", 36 | "-transport.kafka.brokers=netmeta-kafka-bootstrap:9092", 37 | "-format.protobuf.fixedlen=true", 38 | "-loglevel=debug", 39 | "-iface=\(#Config.interfaces)", 40 | "-samplerate=\(#Config.sampleRate)", 41 | "-sampler-address=\(#Config.samplerAddress)", 42 | ] 43 | securityContext: capabilities: add: ["NET_ADMIN"] 44 | }, 45 | ] 46 | } 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /cmd/reconciler/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("@rules_go//go:def.bzl", "go_binary", "go_library") 2 | load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load") 3 | load("@rules_pkg//pkg:tar.bzl", "pkg_tar") 4 | 5 | go_library( 6 | name = "reconciler_lib", 7 | srcs = [ 8 | "config.go", 9 | "main.go", 10 | "placeholder.s", 11 | "quote.go", 12 | "reconciler.go", 13 | "visitor.go", 14 | ], 15 | importpath = "github.com/monogon-dev/netmeta/cmd/reconciler", 16 | visibility = ["//visibility:private"], 17 | deps = [ 18 | "@com_github_clickhouse_clickhouse_go_v2//:clickhouse-go", 19 | "@com_github_emicklei_proto//:proto", 20 | "@com_github_huandu_go_sqlbuilder//:go-sqlbuilder", 21 | ], 22 | ) 23 | 24 | go_binary( 25 | name = "reconciler", 26 | embed = [":reconciler_lib"], 27 | visibility = ["//visibility:public"], 28 | ) 29 | 30 | pkg_tar( 31 | name = "reconciler_tar", 32 | srcs = [":reconciler"], 33 | ) 34 | 35 | oci_image( 36 | name = "reconciler_image", 37 | base = "@distroless_base", 38 | entrypoint = ["/reconciler"], 39 | tars = [":reconciler_tar"], 40 | visibility = ["//visibility:public"], 41 | ) 42 | 43 | oci_load( 44 | name = "reconciler_load", 45 | image = ":reconciler_image", 46 | repo_tags = ["github.com/monogon-dev/netmeta/reconciler:latest"], 47 | visibility = ["//visibility:public"], 48 | ) 49 | -------------------------------------------------------------------------------- /deploy/single-node/config_legacy.cue: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | #InterfaceMap: { 4 | // Router source address (IPv6 or pseudo-IPv4 mapped address like ::ffff:100.0.0.1, and for the portmirror ::ffff:100.0.0.1) 5 | device: string 6 | // Numeric interface Index (often known as the "SNMP ID") 7 | idx: uint 8 | // Human-readable interface description to show in the frontend 9 | description: string 10 | } 11 | 12 | // deprecated config parameter 13 | #LegacyNetMetaConfig: { 14 | // List of router interfaces to resolve to names 15 | interfaceMap: [...#InterfaceMap] 16 | } 17 | 18 | #InterfaceMapConverter: { 19 | IN=in: [...#InterfaceMap] 20 | out: #SamplerConfig 21 | 22 | out: { 23 | for i in IN { 24 | "\(i.device)": interface: "\(i.idx)": description: "\(i.description)" 25 | } 26 | } 27 | } 28 | 29 | // A small test to verify that the #InterfaceMapConverter converts the structs correctly 30 | _interfaceMapConverterTest: #InterfaceMapConverter & { 31 | in: [ 32 | {device: "::ffff:100.0.0.1", idx: 858, description: "TRANSIT-ABC"}, 33 | {device: "::ffff:100.0.0.1", idx: 1126, description: "PEERING-YOLO-COLO"}, 34 | ] 35 | 36 | out: close({ 37 | "::ffff:100.0.0.1": { 38 | interface: "858": description: "TRANSIT-ABC" 39 | interface: "1126": description: "PEERING-YOLO-COLO" 40 | } 41 | }) 42 | } 43 | 44 | // set all interfaces that are defined in the old interfaceMap in the new schema 45 | netmeta: config: sampler: (#InterfaceMapConverter & {in: netmeta.config.interfaceMap}).out 46 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/meta_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | // TODO: move this, Object, List, and Type to a different package 8 | #ObjectMetaAccessor: _ 9 | 10 | // Object lets you work with object metadata from any of the versioned or 11 | // internal API objects. Attempting to set or retrieve a field on an object that does 12 | // not support that field (Name, UID, Namespace on lists) will be a no-op and return 13 | // a default value. 14 | #Object: _ 15 | 16 | // ListMetaAccessor retrieves the list interface from an object 17 | #ListMetaAccessor: _ 18 | 19 | // Common lets you work with core metadata from any of the versioned or 20 | // internal API objects. Attempting to set or retrieve a field on an object that does 21 | // not support that field will be a no-op and return a default value. 22 | // TODO: move this, and TypeMeta and ListMeta, to a different package 23 | #Common: _ 24 | 25 | // ListInterface lets you work with list metadata from any of the versioned or 26 | // internal API objects. Attempting to set or retrieve a field on an object that does 27 | // not support that field will be a no-op and return a default value. 28 | // TODO: move this, and TypeMeta and ListMeta, to a different package 29 | #ListInterface: _ 30 | 31 | // Type exposes the type and APIVersion of versioned or internal API objects. 32 | // TODO: move this, and TypeMeta and ListMeta, to a different package 33 | #Type: _ 34 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/watch/watch_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/watch 4 | 5 | package watch 6 | 7 | import "k8s.io/apimachinery/pkg/runtime" 8 | 9 | // Interface can be implemented by anything that knows how to watch and report changes. 10 | #Interface: _ 11 | 12 | // EventType defines the possible types of events. 13 | #EventType: string // #enumEventType 14 | 15 | #enumEventType: 16 | #Added | 17 | #Modified | 18 | #Deleted | 19 | #Bookmark | 20 | #Error 21 | 22 | #Added: #EventType & "ADDED" 23 | #Modified: #EventType & "MODIFIED" 24 | #Deleted: #EventType & "DELETED" 25 | #Bookmark: #EventType & "BOOKMARK" 26 | #Error: #EventType & "ERROR" 27 | 28 | // Event represents a single event to a watched resource. 29 | // +k8s:deepcopy-gen=true 30 | #Event: { 31 | Type: #EventType 32 | 33 | // Object is: 34 | // * If Type is Added or Modified: the new state of the object. 35 | // * If Type is Deleted: the state of the object immediately before deletion. 36 | // * If Type is Bookmark: the object (instance of a type being watched) where 37 | // only ResourceVersion field is set. On successful restart of watch from a 38 | // bookmark resourceVersion, client is guaranteed to not get repeat event 39 | // nor miss any events. 40 | // * If Type is Error: *api.Status is recommended; other types may make sense 41 | // depending on context. 42 | Object: runtime.#Object 43 | } 44 | 45 | // RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. 46 | #RaceFreeFakeWatcher: { 47 | Stopped: bool 48 | } 49 | -------------------------------------------------------------------------------- /.aspect/bazelrc/convenience.bazelrc: -------------------------------------------------------------------------------- 1 | # Attempt to build & test every target whose prerequisites were successfully built. 2 | # Docs: https://bazel.build/docs/user-manual#keep-going 3 | build --keep_going 4 | 5 | # Output test errors to stderr so users don't have to `cat` or open test failure log files when test 6 | # fail. This makes the log noisier in exchange for reducing the time-to-feedback on test failures for 7 | # users. 8 | # Docs: https://bazel.build/docs/user-manual#test-output 9 | test --test_output=errors 10 | 11 | # Show the output files created by builds that requested more than one target. This helps users 12 | # locate the build outputs in more cases 13 | # Docs: https://bazel.build/docs/user-manual#show-result 14 | build --show_result=20 15 | 16 | # Bazel picks up host-OS-specific config lines from bazelrc files. For example, if the host OS is 17 | # Linux and you run bazel build, Bazel picks up lines starting with build:linux. Supported OS 18 | # identifiers are `linux`, `macos`, `windows`, `freebsd`, and `openbsd`. Enabling this flag is 19 | # equivalent to using `--config=linux` on Linux, `--config=windows` on Windows, etc. 20 | # Docs: https://bazel.build/reference/command-line-reference#flag--enable_platform_specific_config 21 | common --enable_platform_specific_config 22 | 23 | # Output a heap dump if an OOM is thrown during a Bazel invocation 24 | # (including OOMs due to `--experimental_oom_more_eagerly_threshold`). 25 | # The dump will be written to `/.heapdump.hprof`. 26 | # You may need to configure CI to capture this artifact and upload for later use. 27 | # Docs: https://bazel.build/reference/command-line-reference#flag--heap_dump_on_oom 28 | common --heap_dump_on_oom 29 | -------------------------------------------------------------------------------- /cmd/portmirror/BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("@rules_go//go:def.bzl", "go_binary", "go_library") 2 | load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load") 3 | load("@rules_pkg//pkg:tar.bzl", "pkg_tar") 4 | 5 | go_library( 6 | name = "portmirror_lib", 7 | srcs = [ 8 | "frame.go", 9 | "iface.go", 10 | "portmirror.go", 11 | ], 12 | importpath = "github.com/monogon-dev/netmeta/cmd/portmirror", 13 | visibility = ["//visibility:private"], 14 | deps = [ 15 | "@com_github_gopacket_gopacket//:gopacket", 16 | "@com_github_gopacket_gopacket//afpacket", 17 | "@com_github_gopacket_gopacket//layers", 18 | "@com_github_netsampler_goflow2//format", 19 | "@com_github_netsampler_goflow2//format/protobuf", 20 | "@com_github_netsampler_goflow2//pb", 21 | "@com_github_netsampler_goflow2//transport", 22 | "@com_github_netsampler_goflow2//transport/kafka", 23 | "@com_github_sirupsen_logrus//:logrus", 24 | "@com_github_vishvananda_netlink//:netlink", 25 | ], 26 | ) 27 | 28 | go_binary( 29 | name = "portmirror", 30 | embed = [":portmirror_lib"], 31 | visibility = ["//visibility:public"], 32 | ) 33 | 34 | pkg_tar( 35 | name = "portmirror_tar", 36 | srcs = [":portmirror"], 37 | ) 38 | 39 | oci_image( 40 | name = "portmirror_image", 41 | base = "@distroless_base", 42 | entrypoint = ["/portmirror"], 43 | tars = [":portmirror_tar"], 44 | visibility = ["//visibility:public"], 45 | ) 46 | 47 | oci_load( 48 | name = "portmirror_load", 49 | image = ":portmirror_image", 50 | repo_tags = ["github.com/monogon-dev/netmeta/portmirror:latest"], 51 | visibility = ["//visibility:public"], 52 | ) 53 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/doc_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | // Package runtime includes helper functions for working with API objects 6 | // that follow the kubernetes API object conventions, which are: 7 | // 8 | // 0. Your API objects have a common metadata struct member, TypeMeta. 9 | // 10 | // 1. Your code refers to an internal set of API objects. 11 | // 12 | // 2. In a separate package, you have an external set of API objects. 13 | // 14 | // 3. The external set is considered to be versioned, and no breaking 15 | // changes are ever made to it (fields may be added but not changed 16 | // or removed). 17 | // 18 | // 4. As your api evolves, you'll make an additional versioned package 19 | // with every major change. 20 | // 21 | // 5. Versioned packages have conversion functions which convert to 22 | // and from the internal version. 23 | // 24 | // 6. You'll continue to support older versions according to your 25 | // deprecation policy, and you can easily provide a program/library 26 | // to update old versions into new versions because of 5. 27 | // 28 | // 7. All of your serializations and deserializations are handled in a 29 | // centralized place. 30 | // 31 | // Package runtime provides a conversion helper to make 5 easy, and the 32 | // Encode/Decode/DecodeInto trio to accomplish 7. You can also register 33 | // additional "codecs" which use a version of your choice. It's 34 | // recommended that you register your types with runtime in your 35 | // package's init function. 36 | // 37 | // As a bonus, a few common types useful from all api objects and versions 38 | // are provided in types.go. 39 | package runtime 40 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/types/nodename_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/types 4 | 5 | package types 6 | 7 | // NodeName is a type that holds a api.Node's Name identifier. 8 | // Being a type captures intent and helps make sure that the node name 9 | // is not confused with similar concepts (the hostname, the cloud provider id, 10 | // the cloud provider name etc) 11 | // 12 | // To clarify the various types: 13 | // 14 | // - Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. 15 | // Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. 16 | // 17 | // - Hostname is the hostname of the local machine (from uname -n). 18 | // However, some components allow the user to pass in a --hostname-override flag, 19 | // which will override this in most places. In the absence of anything more meaningful, 20 | // kubelet will use Hostname as the Node.Name when it creates the Node. 21 | // 22 | // * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. 23 | // 24 | // For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the 25 | // Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up 26 | // to the cloudprovider how to do this mapping. 27 | // 28 | // For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the 29 | // PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if 30 | // we are using a custom DHCP domain it won't be. 31 | #NodeName: string 32 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/api/core/v1/well_known_taints_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/api/core/v1 4 | 5 | package v1 6 | 7 | // TaintNodeNotReady will be added when node is not ready 8 | // and removed when node becomes ready. 9 | #TaintNodeNotReady: "node.kubernetes.io/not-ready" 10 | 11 | // TaintNodeUnreachable will be added when node becomes unreachable 12 | // (corresponding to NodeReady status ConditionUnknown) 13 | // and removed when node becomes reachable (NodeReady status ConditionTrue). 14 | #TaintNodeUnreachable: "node.kubernetes.io/unreachable" 15 | 16 | // TaintNodeUnschedulable will be added when node becomes unschedulable 17 | // and removed when node becomes schedulable. 18 | #TaintNodeUnschedulable: "node.kubernetes.io/unschedulable" 19 | 20 | // TaintNodeMemoryPressure will be added when node has memory pressure 21 | // and removed when node has enough memory. 22 | #TaintNodeMemoryPressure: "node.kubernetes.io/memory-pressure" 23 | 24 | // TaintNodeDiskPressure will be added when node has disk pressure 25 | // and removed when node has enough disk. 26 | #TaintNodeDiskPressure: "node.kubernetes.io/disk-pressure" 27 | 28 | // TaintNodeNetworkUnavailable will be added when node's network is unavailable 29 | // and removed when network becomes ready. 30 | #TaintNodeNetworkUnavailable: "node.kubernetes.io/network-unavailable" 31 | 32 | // TaintNodePIDPressure will be added when node has pid pressure 33 | // and removed when node has enough pid. 34 | #TaintNodePIDPressure: "node.kubernetes.io/pid-pressure" 35 | 36 | // TaintNodeOutOfService can be added when node is out of service in case of 37 | // a non-graceful shutdown 38 | #TaintNodeOutOfService: "node.kubernetes.io/out-of-service" 39 | -------------------------------------------------------------------------------- /deploy/k8s-base/traefik/kubernetes-crd-rbac.cue: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | ClusterRole: "traefik-ingress-controller": { 4 | apiVersion: "rbac.authorization.k8s.io/v1" 5 | kind: "ClusterRole" 6 | metadata: name: "traefik-ingress-controller" 7 | 8 | rules: [{ 9 | apiGroups: [ 10 | "", 11 | ] 12 | resources: [ 13 | "services", 14 | "endpoints", 15 | "secrets", 16 | ] 17 | verbs: [ 18 | "get", 19 | "list", 20 | "watch", 21 | ] 22 | }, { 23 | apiGroups: [ 24 | "extensions", 25 | "networking.k8s.io", 26 | ] 27 | resources: [ 28 | "ingresses", 29 | "ingressclasses", 30 | ] 31 | verbs: [ 32 | "get", 33 | "list", 34 | "watch", 35 | ] 36 | }, { 37 | apiGroups: [ 38 | "extensions", 39 | "networking.k8s.io", 40 | ] 41 | resources: [ 42 | "ingresses/status", 43 | ] 44 | verbs: [ 45 | "update", 46 | ] 47 | }, { 48 | apiGroups: [ 49 | "traefik.containo.us", 50 | ] 51 | resources: [ 52 | "middlewares", 53 | "middlewaretcps", 54 | "ingressroutes", 55 | "traefikservices", 56 | "ingressroutetcps", 57 | "ingressrouteudps", 58 | "tlsoptions", 59 | "tlsstores", 60 | "serverstransports", 61 | ] 62 | verbs: [ 63 | "get", 64 | "list", 65 | "watch", 66 | ] 67 | }] 68 | } 69 | ClusterRoleBinding: "traefik-ingress-controller": { 70 | apiVersion: "rbac.authorization.k8s.io/v1" 71 | kind: "ClusterRoleBinding" 72 | metadata: name: "traefik-ingress-controller" 73 | 74 | roleRef: { 75 | apiGroup: "rbac.authorization.k8s.io" 76 | kind: "ClusterRole" 77 | name: "traefik-ingress-controller" 78 | } 79 | subjects: [{ 80 | kind: "ServiceAccount" 81 | name: "traefik-ingress-controller" 82 | namespace: "default" 83 | }] 84 | } 85 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/reconciler/reconciler.cue: -------------------------------------------------------------------------------- 1 | package reconciler 2 | 3 | import ( 4 | reconciler "github.com/monogon-dev/netmeta/reconciler:main" 5 | "encoding/json" 6 | "encoding/hex" 7 | "crypto/sha256" 8 | ) 9 | 10 | #Config: { 11 | digest: string 12 | image: string 13 | files: [string]: string 14 | config: reconciler.#Config 15 | databaseHost: string 16 | databaseUser: string 17 | databasePass: string 18 | } 19 | 20 | ConfigMap: "reconciler-config": data: { 21 | for name, content in #Config.files { 22 | "\(name)": content 23 | } 24 | "config.json": json.Marshal(#Config.config) 25 | } 26 | 27 | Deployment: reconciler: { 28 | M=metadata: labels: app: "reconciler" 29 | 30 | spec: { 31 | selector: matchLabels: app: M.labels.app 32 | template: { 33 | metadata: labels: app: M.labels.app 34 | 35 | // Trigger redeployment when digest changes. 36 | metadata: annotations: "meta/local-image-digest": #Config.digest 37 | metadata: annotations: "meta/config-digest": hex.Encode(sha256.Sum256(json.Marshal(ConfigMap["reconciler-config"].data))) 38 | 39 | spec: containers: [{ 40 | name: "reconciler" 41 | imagePullPolicy: "Never" 42 | image: #Config.image 43 | env: [{ 44 | name: "DB_HOST" 45 | value: #Config.databaseHost 46 | }, { 47 | name: "DB_USER" 48 | value: #Config.databaseUser 49 | }, { 50 | name: "DB_PASS" 51 | value: #Config.databasePass 52 | }] 53 | workingDir: "/app" 54 | 55 | volumeMounts: [ for f, _ in ConfigMap["reconciler-config"].data { 56 | mountPath: "/app/\(f)" 57 | subPath: "\(f)" 58 | name: "reconciler-config" 59 | }] 60 | }] 61 | 62 | spec: volumes: [{ 63 | name: "reconciler-config" 64 | configMap: name: "reconciler-config" 65 | }] 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /BUILD.bazel: -------------------------------------------------------------------------------- 1 | load("@buildifier_prebuilt//:rules.bzl", "buildifier", "buildifier_test") 2 | load("@gazelle//:def.bzl", "gazelle", "gazelle_test") 3 | load("@rules_multirun//:defs.bzl", "command", "multirun") 4 | 5 | gazelle(name = "gazelle") 6 | 7 | gazelle_test( 8 | name = "gazelle_test", 9 | workspace = "//:MODULE.bazel", 10 | ) 11 | 12 | BUILDIFIER_EXCLUDES = [ 13 | "./.git/*", 14 | "./.ijwb/*", 15 | ] 16 | 17 | # Buildifier formats all Starlark files. 18 | buildifier( 19 | name = "buildifier", 20 | exclude_patterns = BUILDIFIER_EXCLUDES, 21 | lint_mode = "fix", 22 | mode = "fix", 23 | ) 24 | 25 | buildifier_test( 26 | name = "buildifier_test", 27 | exclude_patterns = BUILDIFIER_EXCLUDES, 28 | lint_mode = "warn", 29 | no_sandbox = True, 30 | workspace = "//:MODULE.bazel", 31 | ) 32 | 33 | # Shortcut for the Go SDK 34 | alias( 35 | name = "go", 36 | actual = "@rules_go//go", 37 | visibility = ["//visibility:public"], 38 | ) 39 | 40 | alias( 41 | name = "cue", 42 | actual = "@org_cuelang_go//cmd/cue", 43 | visibility = ["//visibility:public"], 44 | ) 45 | 46 | command( 47 | name = "go-mod-tidy", 48 | arguments = [ 49 | "mod", 50 | "tidy", 51 | ], 52 | command = ":go", 53 | ) 54 | 55 | # Shortcut to update go.mod, gazelle files and formatting. 56 | multirun( 57 | name = "tidy", 58 | commands = [ 59 | ":go-mod-tidy", 60 | ":gazelle", 61 | ":buildifier", 62 | ], 63 | ) 64 | 65 | platform( 66 | name = "linux_x86", 67 | constraint_values = [ 68 | "@platforms//os:linux", 69 | "@platforms//cpu:x86_64", 70 | ], 71 | ) 72 | 73 | platform( 74 | name = "linux_arm64", 75 | constraint_values = [ 76 | "@platforms//os:linux", 77 | "@platforms//cpu:arm64", 78 | ], 79 | ) 80 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/goflow/deployment.cue: -------------------------------------------------------------------------------- 1 | package goflow 2 | 3 | import "strings" 4 | 5 | #Config: { 6 | digest: string 7 | image: string 8 | ports: { 9 | netflow: int 10 | netflowLegacy: int 11 | sflow: int 12 | } 13 | } 14 | 15 | Deployment: goflow: { 16 | M=metadata: labels: app: "goflow" 17 | spec: { 18 | strategy: type: "Recreate" 19 | selector: matchLabels: app: M.labels.app 20 | template: { 21 | metadata: labels: app: M.labels.app 22 | 23 | // Trigger redeployment when digest changes. 24 | metadata: annotations: "meta/local-image-digest": #Config.digest 25 | 26 | spec: { 27 | // k3s does not support IPv6 networking, so we run goflow in the host network namespace. 28 | hostNetwork: true 29 | dnsPolicy: "ClusterFirstWithHostNet" 30 | containers: [ 31 | { 32 | name: "goflow" 33 | image: #Config.image 34 | 35 | // Removed in 58175b24, explicitly zero it for backwards compatibility. 36 | command: [] 37 | 38 | args: [ 39 | "-format=pb", 40 | "-transport=kafka", 41 | "-transport.kafka.brokers=netmeta-kafka-bootstrap:9092", 42 | "-format.protobuf.fixedlen=true", 43 | "-loglevel=debug", 44 | "-metrics.addr=127.0.0.1:18080", 45 | "-listen=" + strings.Join([ 46 | "sflow://:\(#Config.ports.sflow)", 47 | "netflow://:\(#Config.ports.netflow)", 48 | "nfl://:\(#Config.ports.netflowLegacy)", 49 | ], ","), 50 | ] 51 | 52 | ports: [ 53 | {name: "netflow-legacy", containerPort: #Config.ports.netflowLegacy, protocol: "UDP"}, 54 | {name: "netflow", containerPort: #Config.ports.netflow, protocol: "UDP"}, 55 | {name: "sflow", containerPort: #Config.ports.sflow, protocol: "UDP"}, 56 | ] 57 | }, 58 | ] 59 | } 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /deploy/dashboards/General_Home.cue: -------------------------------------------------------------------------------- 1 | package dashboards 2 | 3 | dashboards: Home: { 4 | #defaultParams: false 5 | title: "Home" 6 | uid: "NJoydihVk" 7 | _panels: [{ 8 | gridPos: {h: 3, w: 24, x: 0, y: 0} 9 | options: { 10 | content: 11 | #""" 12 |
13 |

Welcome to NetMeta

14 |
15 |

Need help?

16 |
17 | 19 | Issue Tracker 20 | 21 | 23 | Contact Us 24 | 25 |
26 |
27 |
28 | """# 29 | mode: "html" 30 | } 31 | type: "text" 32 | title: "" 33 | }, { 34 | gridPos: {h: 6, w: 12, x: 0, y: 3} 35 | options: { 36 | maxItems: 0 37 | tags: ["netmeta"] 38 | } 39 | title: "NetMeta Dashboards" 40 | type: "dashlist" 41 | }, { 42 | gridPos: {h: 9, w: 12, x: 0, y: 9} 43 | options: { 44 | query: "Clickhouse -" 45 | } 46 | title: "Clickhouse Dashboards" 47 | type: "dashlist" 48 | }, { 49 | gridPos: {h: 15, w: 12, x: 12, y: 3} 50 | options: { 51 | feedUrl: "https://netmeta-cache.leoluk.de/v1/releases.atom" 52 | showImage: true 53 | } 54 | title: "NetMeta News" 55 | type: "news" 56 | }] 57 | } 58 | -------------------------------------------------------------------------------- /cmd/portmirror/frame.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net" 5 | 6 | "github.com/gopacket/gopacket" 7 | "github.com/gopacket/gopacket/layers" 8 | ) 9 | 10 | func macToUint64(mac net.HardwareAddr) (u uint64) { 11 | return uint64(mac[0])<<40 | uint64(mac[1])<<32 | (uint64(mac[2])<<24 | uint64(mac[3])<<16 | uint64(mac[4])<<8 | uint64(mac[5])) 12 | } 13 | 14 | type frameInfo struct { 15 | EthernetType uint32 16 | SrcMAC net.HardwareAddr 17 | DstMAC net.HardwareAddr 18 | 19 | Protocol uint32 20 | SrcIP net.IP 21 | DstIP net.IP 22 | IPTOS uint8 23 | IPTTL uint8 24 | FlowLabel uint32 25 | 26 | TCPFlags uint32 27 | SrcPort uint32 28 | DstPort uint32 29 | SeqNum uint32 30 | } 31 | 32 | func readFrameInfo(packet gopacket.Packet) (info frameInfo) { 33 | for _, layer := range packet.Layers() { 34 | switch layer.(type) { 35 | case *layers.Ethernet: 36 | info.EthernetType = uint32(layer.(*layers.Ethernet).EthernetType) 37 | info.SrcMAC = layer.(*layers.Ethernet).SrcMAC 38 | info.DstMAC = layer.(*layers.Ethernet).DstMAC 39 | 40 | case *layers.IPv4: 41 | info.Protocol = uint32(layer.(*layers.IPv4).Protocol) 42 | info.SrcIP = layer.(*layers.IPv4).SrcIP 43 | info.DstIP = layer.(*layers.IPv4).DstIP 44 | info.IPTOS = layer.(*layers.IPv4).TOS 45 | info.IPTTL = layer.(*layers.IPv4).TTL 46 | 47 | case *layers.IPv6: 48 | info.Protocol = uint32(layer.(*layers.IPv6).NextHeader) 49 | info.SrcIP = layer.(*layers.IPv6).SrcIP 50 | info.DstIP = layer.(*layers.IPv6).DstIP 51 | info.FlowLabel = layer.(*layers.IPv6).FlowLabel 52 | 53 | case *layers.TCP: 54 | info.SrcPort = uint32(layer.(*layers.TCP).SrcPort) 55 | info.DstPort = uint32(layer.(*layers.TCP).DstPort) 56 | info.TCPFlags = uint32(layer.(*layers.TCP).Contents[13]) 57 | info.SeqNum = layer.(*layers.TCP).Seq 58 | 59 | case *layers.UDP: 60 | info.SrcPort = uint32(layer.(*layers.UDP).SrcPort) 61 | info.DstPort = uint32(layer.(*layers.UDP).DstPort) 62 | } 63 | } 64 | 65 | return 66 | } 67 | -------------------------------------------------------------------------------- /.aspect/bazelrc/performance.bazelrc: -------------------------------------------------------------------------------- 1 | # Don't apply `--noremote_upload_local_results` and `--noremote_accept_cached` to the disk cache. 2 | # If you have both `--noremote_upload_local_results` and `--disk_cache`, then this fixes a bug where 3 | # Bazel doesn't write to the local disk cache as it treats as a remote cache. 4 | # Docs: https://bazel.build/reference/command-line-reference#flag--incompatible_remote_results_ignore_disk 5 | build --incompatible_remote_results_ignore_disk 6 | 7 | # Directories used by sandboxed non-worker execution may be reused to avoid unnecessary setup costs. 8 | # Save time on Sandbox creation and deletion when many of the same kind of action run during the 9 | # build. 10 | # No longer experimental in Bazel 6: https://github.com/bazelbuild/bazel/commit/c1a95501a5611878e5cc43a3cc531f2b9e47835b 11 | # Docs: https://bazel.build/reference/command-line-reference#flag--reuse_sandbox_directories 12 | build --experimental_reuse_sandbox_directories 13 | 14 | # Do not build runfiles symlink forests for external repositories under 15 | # `.runfiles/wsname/external/repo` (in addition to `.runfiles/repo`). This reduces runfiles & 16 | # sandbox creation times & prevents accidentally depending on this feature which may flip to off by 17 | # default in the future. Note, some rules may fail under this flag, please file issues with the rule 18 | # author. 19 | # Docs: https://bazel.build/reference/command-line-reference#flag--legacy_external_runfiles 20 | build --nolegacy_external_runfiles 21 | 22 | # Avoid creating a runfiles tree for binaries or tests until it is needed. 23 | # Docs: https://bazel.build/reference/command-line-reference#flag--build_runfile_links 24 | # See https://github.com/bazelbuild/bazel/issues/6627 25 | # 26 | # This may break local workflows that `build` a binary target, then run the resulting program 27 | # outside of `bazel run`. In those cases, the script will need to call 28 | # `bazel build --build_runfile_links //my/binary:target` and then execute the resulting program. 29 | build --nobuild_runfile_links 30 | -------------------------------------------------------------------------------- /deploy/single-node/schema/0001_create_flows_raw.sql: -------------------------------------------------------------------------------- 1 | -- +goose Up 2 | CREATE TABLE IF NOT EXISTS flows_raw 3 | ( 4 | -- Generated Date field for partitioning 5 | Date Date, 6 | 7 | -- Raw fields 8 | 9 | FlowType Enum8( 10 | 'FLOWUNKNOWN' = 0, 11 | 'SFLOW_5' = 1, 12 | 'NETFLOW_V5' = 2, 13 | 'NETFLOW_V9' = 3, 14 | 'IPFIX' = 4 15 | ), 16 | 17 | SequenceNum UInt64, 18 | 19 | TimeReceived UInt64, 20 | SamplingRate UInt64, 21 | 22 | FlowDirection UInt8, 23 | 24 | SamplerAddress IPv6, 25 | 26 | TimeFlowStart UInt64, 27 | TimeFlowEnd UInt64, 28 | 29 | Bytes UInt64, 30 | Packets UInt64, 31 | 32 | SrcAddr IPv6, 33 | DstAddr IPv6, 34 | 35 | EType UInt16, 36 | 37 | Proto UInt8, 38 | 39 | SrcPort UInt32, 40 | DstPort UInt32, 41 | 42 | InIf UInt32, 43 | OutIf UInt32, 44 | 45 | SrcMac UInt64, 46 | DstMac UInt64, 47 | 48 | SrcVlan UInt32, 49 | DstVlan UInt32, 50 | VlanId UInt32, 51 | 52 | IngressVrfId UInt32, 53 | EgressVrfId UInt32, 54 | 55 | IPTos UInt8, 56 | ForwardingStatus UInt8, 57 | IPTTL UInt8, 58 | TCPFlags UInt8, 59 | IcmpType UInt8, 60 | IcmpCode UInt8, 61 | IPv6FlowLabel UInt32, 62 | 63 | FragmentId UInt32, 64 | FragmentOffset UInt32, 65 | 66 | BiFlowDirection UInt8, 67 | 68 | SrcAS UInt32, 69 | DstAS UInt32, 70 | 71 | NextHop IPv6, 72 | NextHopAS UInt32, 73 | 74 | SrcNet UInt8, 75 | DstNet UInt8 76 | ) ENGINE = MergeTree() 77 | PARTITION BY Date 78 | ORDER BY (TimeReceived, FlowDirection, SamplerAddress, SrcAS, DstAS, SrcAddr, DstAddr) 79 | TTL Date + INTERVAL 1 WEEK; 80 | 81 | -- +goose Down 82 | DROP TABLE IF EXISTS flows_raw; -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/apis/meta/v1/group_version_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/apis/meta/v1 4 | 5 | package v1 6 | 7 | // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying 8 | // concepts during lookup stages without having partially valid types 9 | // 10 | // +protobuf.options.(gogoproto.goproto_stringer)=false 11 | #GroupResource: { 12 | group: string @go(Group) @protobuf(1,bytes,opt) 13 | resource: string @go(Resource) @protobuf(2,bytes,opt) 14 | } 15 | 16 | // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion 17 | // to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling 18 | // 19 | // +protobuf.options.(gogoproto.goproto_stringer)=false 20 | #GroupVersionResource: { 21 | group: string @go(Group) @protobuf(1,bytes,opt) 22 | version: string @go(Version) @protobuf(2,bytes,opt) 23 | resource: string @go(Resource) @protobuf(3,bytes,opt) 24 | } 25 | 26 | // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying 27 | // concepts during lookup stages without having partially valid types 28 | // 29 | // +protobuf.options.(gogoproto.goproto_stringer)=false 30 | #GroupKind: { 31 | group: string @go(Group) @protobuf(1,bytes,opt) 32 | kind: string @go(Kind) @protobuf(2,bytes,opt) 33 | } 34 | 35 | // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion 36 | // to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling 37 | // 38 | // +protobuf.options.(gogoproto.goproto_stringer)=false 39 | #GroupVersionKind: { 40 | group: string @go(Group) @protobuf(1,bytes,opt) 41 | version: string @go(Version) @protobuf(2,bytes,opt) 42 | kind: string @go(Kind) @protobuf(3,bytes,opt) 43 | } 44 | 45 | // GroupVersion contains the "group" and the "version", which uniquely identifies the API. 46 | // 47 | // +protobuf.options.(gogoproto.goproto_stringer)=false 48 | #GroupVersion: _ 49 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/kafka/kafka.cue: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | #Config: { 4 | goflowTopicRetention: int 5 | enableExternalKafkaListener: bool 6 | advertisedKafkaHost: string 7 | fastNetMon?: { 8 | topicRetention: int 9 | ... 10 | } 11 | } 12 | 13 | KafkaTopic: "flow-messages": { 14 | metadata: labels: "strimzi.io/cluster": "netmeta" 15 | spec: { 16 | partitions: 1 17 | replicas: 1 18 | config: "retention.bytes": "\(#Config.goflowTopicRetention)" 19 | } 20 | } 21 | 22 | if #Config.fastNetMon.topicRetention != _|_ { 23 | KafkaTopic: "fastnetmon": { 24 | metadata: labels: "strimzi.io/cluster": "netmeta" 25 | spec: { 26 | partitions: 1 27 | replicas: 1 28 | config: "retention.bytes": "\(#Config.fastNetMon.topicRetention)" 29 | } 30 | } 31 | } 32 | 33 | Kafka: "netmeta": spec: { 34 | kafka: { 35 | version: "3.2.3" 36 | replicas: 1 37 | listeners: [{ 38 | name: "plain" 39 | port: 9092 40 | type: "internal" 41 | tls: false 42 | }, { 43 | name: "tls" 44 | port: 9093 45 | type: "internal" 46 | tls: true 47 | authentication: type: "tls" 48 | }, 49 | if #Config.enableExternalKafkaListener { 50 | { 51 | name: "external" 52 | port: 9094 53 | type: "nodeport" 54 | tls: false 55 | configuration: bootstrap: nodePort: 32100 56 | configuration: brokers: [{broker: 0, advertisedHost: #Config.advertisedKafkaHost}] 57 | } 58 | }, 59 | ] 60 | config: { 61 | "offsets.topic.replication.factor": 1 62 | "transaction.state.log.replication.factor": 1 63 | "transaction.state.log.min.isr": 1 64 | "log.message.format.version": "2.4" 65 | } 66 | storage: { 67 | type: "jbod" 68 | volumes: [{ 69 | id: 0 70 | type: "persistent-claim" 71 | size: "100Gi" 72 | deleteClaim: false 73 | }] 74 | } 75 | jvmOptions: javaSystemProperties: [{name: "log4j2.formatMsgNoLookups", value: "true"}] 76 | } 77 | zookeeper: { 78 | replicas: 1 79 | storage: { 80 | type: "persistent-claim" 81 | size: "100Gi" 82 | deleteClaim: false 83 | } 84 | jvmOptions: javaSystemProperties: [{name: "log4j2.formatMsgNoLookups", value: "true"}] 85 | } 86 | entityOperator: { 87 | topicOperator: { 88 | jvmOptions: javaSystemProperties: [{name: "log4j2.formatMsgNoLookups", value: "true"}] 89 | } 90 | userOperator: { 91 | jvmOptions: javaSystemProperties: [{name: "log4j2.formatMsgNoLookups", value: "true"}] 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /deploy/single-node/schema/traffic_data.proto: -------------------------------------------------------------------------------- 1 | // FastNetMon traffic format 2 | // https://github.com/pavel-odintsov/fastnetmon/blob/master/src/traffic_output_formats/protobuf/traffic_data.proto 3 | syntax = "proto3"; 4 | 5 | enum TrafficDirection { 6 | // Value is not set 7 | TRAFFIC_DIRECTION_UNKNOWN = 0; 8 | 9 | // Traffic is coming to our address space 10 | TRAFFIC_DIRECTION_INCOMING = 1; 11 | 12 | // Traffic is coming from our address space 13 | TRAFFIC_DIRECTION_OUTGOING = 2; 14 | 15 | // Traffic where both source and destination IPs do not belong to our address space or non IP traffic (for example ARP) 16 | TRAFFIC_DIRECTION_OTHER = 3; 17 | 18 | // Traffic is going from our address space to our address space 19 | TRAFFIC_DIRECTION_INTERNAL = 4; 20 | }; 21 | 22 | enum TelemetryType { 23 | TELEMETRY_TYPE_UNKNOWN = 0; 24 | TELEMETRY_TYPE_MIRROR = 1; 25 | TELEMETRY_TYPE_SFLOW = 2; 26 | TELEMETRY_TYPE_NETFLOW = 3; 27 | TELEMETRY_TYPE_TERA_FLOW = 4; 28 | } 29 | 30 | // Our unified flow - packet message 31 | message TrafficData { 32 | // Timestamp in seconds 33 | uint64 timestamp_seconds = 1; 34 | 35 | // Timestamp in milliseconds 36 | uint64 timestamp_milliseconds = 2; 37 | 38 | // Type of plugin which received traffic 39 | TelemetryType telemetry_type = 3; 40 | 41 | // IP protocol version: 4 or 6 42 | uint32 ip_version = 4; 43 | 44 | TrafficDirection traffic_direction = 5; 45 | 46 | // Sampling ratio 47 | uint64 sampling_ratio = 6; 48 | 49 | // Protocol field from IP packet 50 | uint32 protocol = 7; 51 | 52 | // Source and destination IPs for IPv4 (4 bytes) and IPv6 (16 bytes) 53 | bytes source_ip = 8; 54 | bytes destination_ip = 9; 55 | 56 | // Ports for UDP and TCP protocols 57 | uint32 source_port = 10; 58 | uint32 destination_port = 11; 59 | 60 | // Number of transferred packets 61 | uint64 packets = 12; 62 | 63 | // Total length in bytes for transferred packets 64 | uint64 octets = 13; 65 | 66 | // TTL for IPv4 or Hop Limit for IPv6 67 | uint32 ttl = 14; 68 | 69 | // TCP flags encoded in bit set 70 | uint32 tcp_flags = 15; 71 | 72 | bool ip_fragmented = 16; 73 | bool ip_dont_fragment = 17; 74 | 75 | // Input and output interfaces 76 | uint64 input_interface = 18; 77 | uint64 output_interface = 19; 78 | 79 | // Autonomous system numbers 80 | uint32 source_asn = 20; 81 | uint32 destination_asn = 21; 82 | 83 | // IPv4 or IPv6 address of device which sent traffic data 84 | bytes agent_address = 22; 85 | } 86 | -------------------------------------------------------------------------------- /third_party/grafana/defs.bzl: -------------------------------------------------------------------------------- 1 | load("@rules_oci//oci:defs.bzl", "oci_image", "oci_load") 2 | load("@rules_pkg//pkg:tar.bzl", "pkg_tar") 3 | 4 | # TODO(tim): Migrate this to a bazel extension 5 | _grafana_plugin_attrs = { 6 | "version": attr.string( 7 | mandatory = True, 8 | doc = "The plugin version", 9 | ), 10 | "plugin_name": attr.string( 11 | mandatory = True, 12 | doc = "The plugin name", 13 | ), 14 | } 15 | 16 | def _grafana_plugin_impl(ctx): 17 | ctx.download_and_extract( 18 | url = "https://grafana.com/api/plugins/%s/versions/%s/download?os=%s&arch=%s" % (ctx.attr.plugin_name, ctx.attr.version, os(ctx), arch(ctx)), 19 | type = "zip", 20 | ) 21 | 22 | ctx.file("BUILD.bazel", """ 23 | package(default_visibility = ["//visibility:public"]) 24 | 25 | filegroup( 26 | name = "files", 27 | srcs = glob(["**"]), 28 | ) 29 | """) 30 | 31 | grafana_plugin = repository_rule( 32 | attrs = _grafana_plugin_attrs, 33 | implementation = _grafana_plugin_impl, 34 | ) 35 | 36 | def _grafana_impl(ctx): 37 | ctx.download_and_extract( 38 | url = "https://dl.grafana.com/oss/release/grafana-11.4.0.{OS}-{ARCH}.tar.gz".format( 39 | OS = os(ctx), 40 | ARCH = arch(ctx), 41 | ), 42 | stripPrefix = "grafana-v11.4.0", 43 | ) 44 | ctx.file("BUILD.bazel", 'exports_files(["bin/grafana-server"])') 45 | 46 | grafana = repository_rule( 47 | implementation = _grafana_impl, 48 | ) 49 | 50 | def os(ctx): 51 | os = ctx.os.name.lower() 52 | if os == "mac os x": 53 | return "darwin" 54 | return os 55 | 56 | def arch(ctx): 57 | arch = ctx.os.arch.lower() 58 | if arch == "aarch64": 59 | return "arm64" 60 | return arch 61 | 62 | def grafana_plugin_layer(name): 63 | pkg_tar( 64 | name = name, 65 | srcs = ["@{}//:files".format(name)], 66 | package_dir = "/var/lib/grafana/plugins/{}".format(name), 67 | ) 68 | 69 | return ":{}".format(name) 70 | 71 | def grafana_image(): 72 | oci_image( 73 | name = "grafana_image", 74 | base = "@grafana", 75 | tars = [ 76 | grafana_plugin_layer("netsage-sankey-panel"), 77 | grafana_plugin_layer("grafana-clickhouse-datasource"), 78 | ], 79 | visibility = ["//visibility:public"], 80 | ) 81 | 82 | oci_load( 83 | name = "grafana_load", 84 | image = ":grafana_image", 85 | repo_tags = ["github.com/monogon-dev/netmeta/grafana:latest"], 86 | visibility = ["//visibility:public"], 87 | ) 88 | -------------------------------------------------------------------------------- /cmd/reconciler/quote.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "strconv" 5 | "unicode/utf8" 6 | _ "unsafe" 7 | ) 8 | 9 | // QuoteSingleQuote has the same functionality as strconv.Quote but uses single quotes 10 | func QuoteSingleQuote(s string) string { 11 | return quoteWith(s, '\'') 12 | } 13 | 14 | // The code followed below this comment is copied from strconv/quote.go 15 | // and slightly modified by removing unused features in our usage. 16 | 17 | const ( 18 | lowerhex = "0123456789abcdef" 19 | ) 20 | 21 | func quoteWith(s string, quote byte) string { 22 | return string(appendQuotedWith(make([]byte, 0, 3*len(s)/2), s, quote)) 23 | } 24 | 25 | func appendQuotedWith(buf []byte, s string, quote byte) []byte { 26 | // Often called with big strings, so preallocate. If there's quoting, 27 | // this is conservative but still helps a lot. 28 | if cap(buf)-len(buf) < len(s) { 29 | nBuf := make([]byte, len(buf), len(buf)+1+len(s)+1) 30 | copy(nBuf, buf) 31 | buf = nBuf 32 | } 33 | buf = append(buf, quote) 34 | for width := 0; len(s) > 0; s = s[width:] { 35 | r := rune(s[0]) 36 | width = 1 37 | if r >= utf8.RuneSelf { 38 | r, width = utf8.DecodeRuneInString(s) 39 | } 40 | if width == 1 && r == utf8.RuneError { 41 | buf = append(buf, `\x`...) 42 | buf = append(buf, lowerhex[s[0]>>4]) 43 | buf = append(buf, lowerhex[s[0]&0xF]) 44 | continue 45 | } 46 | buf = appendEscapedRune(buf, r, quote) 47 | } 48 | buf = append(buf, quote) 49 | return buf 50 | } 51 | 52 | func appendEscapedRune(buf []byte, r rune, quote byte) []byte { 53 | if r == rune(quote) || r == '\\' { // always backslashed 54 | buf = append(buf, '\\') 55 | buf = append(buf, byte(r)) 56 | return buf 57 | } 58 | if strconv.IsPrint(r) { 59 | return utf8.AppendRune(buf, r) 60 | } 61 | switch r { 62 | case '\a': 63 | buf = append(buf, `\a`...) 64 | case '\b': 65 | buf = append(buf, `\b`...) 66 | case '\f': 67 | buf = append(buf, `\f`...) 68 | case '\n': 69 | buf = append(buf, `\n`...) 70 | case '\r': 71 | buf = append(buf, `\r`...) 72 | case '\t': 73 | buf = append(buf, `\t`...) 74 | case '\v': 75 | buf = append(buf, `\v`...) 76 | default: 77 | switch { 78 | case r < ' ' || r == 0x7f: 79 | buf = append(buf, `\x`...) 80 | buf = append(buf, lowerhex[byte(r)>>4]) 81 | buf = append(buf, lowerhex[byte(r)&0xF]) 82 | case !utf8.ValidRune(r): 83 | r = 0xFFFD 84 | fallthrough 85 | case r < 0x10000: 86 | buf = append(buf, `\u`...) 87 | for s := 12; s >= 0; s -= 4 { 88 | buf = append(buf, lowerhex[r>>uint(s)&0xF]) 89 | } 90 | default: 91 | buf = append(buf, `\U`...) 92 | for s := 28; s >= 0; s -= 4 { 93 | buf = append(buf, lowerhex[r>>uint(s)&0xF]) 94 | } 95 | } 96 | } 97 | return buf 98 | } 99 | -------------------------------------------------------------------------------- /cmd/portmirror/portmirror.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "net" 7 | "os" 8 | "os/signal" 9 | "sync" 10 | "time" 11 | 12 | _ "github.com/netsampler/goflow2/format/protobuf" 13 | "github.com/netsampler/goflow2/transport" 14 | _ "github.com/netsampler/goflow2/transport/kafka" 15 | "github.com/sirupsen/logrus" 16 | ) 17 | 18 | var ( 19 | interfaces string 20 | sampleRate int 21 | logLevel string 22 | workerCount int 23 | fanoutBase int 24 | samplerAddressString string 25 | samplerAddress net.IP 26 | ) 27 | 28 | func init() { 29 | flag.StringVar(&interfaces, "iface", "", "which interface to use in the following format: RX_NAME:TX_NAME,RX_NAME:TX_NAME") 30 | flag.IntVar(&sampleRate, "samplerate", 1000, "the samplerate to use") 31 | flag.StringVar(&logLevel, "loglevel", "info", "Log level") 32 | flag.IntVar(&workerCount, "workercount", 8, "Number of workers per interface") 33 | flag.IntVar(&fanoutBase, "fanoutBase", 42, "fanout group base id") 34 | flag.StringVar(&samplerAddressString, "sampler-address", "127.0.0.1", "The address the instance use as SamplerAddress") 35 | } 36 | 37 | func main() { 38 | flag.Parse() 39 | 40 | lvl, _ := logrus.ParseLevel(logLevel) 41 | logrus.SetLevel(lvl) 42 | 43 | samplerAddress = net.ParseIP(samplerAddressString) 44 | if samplerAddress == nil { 45 | logrus.Fatalf("invalid sampler-address provided: %q", samplerAddressString) 46 | } 47 | 48 | tapPairs := loadConfig() 49 | 50 | kafka, err := transport.FindTransport(context.Background(), "kafka") 51 | if err != nil { 52 | logrus.Fatal(err) 53 | } 54 | 55 | var startGroup, endGroup sync.WaitGroup 56 | startGroup.Add(workerCount * len(tapPairs) * 2) 57 | endGroup.Add(workerCount * len(tapPairs) * 2) 58 | 59 | ctx, cancelFunc := context.WithCancel(context.Background()) 60 | for _, tp := range tapPairs { 61 | logrus.Infof("Starting workers on pair: RX: %q - TX: %q", tp.RX.name, tp.TX.name) 62 | for i := 0; i < workerCount; i++ { 63 | go tp.TX.Worker(ctx, &startGroup, &endGroup, kafka) 64 | } 65 | 66 | for i := 0; i < workerCount; i++ { 67 | go tp.RX.Worker(ctx, &startGroup, &endGroup, kafka) 68 | } 69 | } 70 | logrus.Infof("Waiting for workers to become ready...") 71 | startGroup.Wait() 72 | logrus.Infof("Lets go!") 73 | 74 | c := make(chan os.Signal, 1) 75 | signal.Notify(c, os.Interrupt) 76 | 77 | <-c 78 | logrus.Println("Got Interrupt. Exiting...") 79 | cancelFunc() 80 | 81 | logrus.Println("Waiting a maxiumum of 10 Seconds for goroutines to shutdown") 82 | timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second) 83 | go func() { 84 | endGroup.Wait() 85 | cancel() 86 | }() 87 | 88 | defer cancel() 89 | <-timeout.Done() 90 | } 91 | -------------------------------------------------------------------------------- /cmd/reconciler/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "github.com/ClickHouse/clickhouse-go/v2" 9 | "github.com/emicklei/proto" 10 | "github.com/huandu/go-sqlbuilder" 11 | "log" 12 | "os" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | var ( 18 | cfg = flag.String("config", "config.json", "The config file to use") 19 | dbHost = mustEnv("DB_HOST") 20 | dbUser = mustEnv("DB_USER") 21 | dbPass = mustEnv("DB_PASS") 22 | ) 23 | 24 | func loadConfig() (*Config, error) { 25 | cfgFile, err := os.Open(*cfg) 26 | if err != nil { 27 | return nil, err 28 | } 29 | defer cfgFile.Close() 30 | 31 | var cfg Config 32 | d := json.NewDecoder(cfgFile) 33 | d.DisallowUnknownFields() 34 | if err := d.Decode(&cfg); err != nil { 35 | return nil, err 36 | } 37 | 38 | return &cfg, nil 39 | } 40 | 41 | func mustEnv(name string) string { 42 | v, found := os.LookupEnv(name) 43 | if !found { 44 | log.Fatalf("missing env: %s", name) 45 | } 46 | 47 | return v 48 | } 49 | 50 | func main() { 51 | flag.Parse() 52 | 53 | cfg, err := loadConfig() 54 | if err != nil { 55 | log.Fatalf("loading config: %v", err) 56 | } 57 | 58 | conn, err := clickhouse.Open(&clickhouse.Options{ 59 | Addr: []string{dbHost}, 60 | Auth: clickhouse.Auth{ 61 | Database: cfg.Database, 62 | Username: dbUser, 63 | Password: dbPass, 64 | }, 65 | }) 66 | if err != nil { 67 | log.Fatal(err) 68 | } 69 | 70 | if err := conn.Ping(context.Background()); err != nil { 71 | log.Fatal(err) 72 | } 73 | 74 | r := &Reconciler{ 75 | conn: conn, 76 | cfg: cfg, 77 | } 78 | 79 | for { 80 | if err := r.Reconcile(); err != nil { 81 | log.Println(err) 82 | } 83 | log.Println("reconcile done. sleeping for some time") 84 | time.Sleep(1 * time.Minute) 85 | } 86 | } 87 | 88 | func loadTableSchema(schema string, builder *sqlbuilder.CreateTableBuilder) error { 89 | n := strings.SplitN(schema, ":", 2) 90 | if len(n) != 2 { 91 | return fmt.Errorf("invalid source table schema: %v", schema) 92 | } 93 | 94 | path, msgName := n[0], n[1] 95 | f, err := os.Open(path) 96 | if err != nil { 97 | return err 98 | } 99 | defer f.Close() 100 | 101 | parser := proto.NewParser(f) 102 | definition, err := parser.Parse() 103 | if err != nil { 104 | return err 105 | } 106 | 107 | cv := &clickhouseVisitor{ 108 | enumTypes: make(map[string]map[string]int), 109 | builder: builder, 110 | } 111 | 112 | var msg *proto.Message 113 | proto.Walk(definition, 114 | proto.WithEnum(func(e *proto.Enum) { 115 | e.Accept(cv) 116 | }), 117 | proto.WithMessage(func(message *proto.Message) { 118 | if message.Name == msgName { 119 | msg = message 120 | } 121 | })) 122 | if msg == nil { 123 | return fmt.Errorf("can't find message inside %q: %v", path, msgName) 124 | } 125 | 126 | // we have to evaluate the message after the proto.Walk 127 | // else it's not guaranteed that all enums are discovered 128 | msg.Accept(cv) 129 | 130 | return nil 131 | } 132 | -------------------------------------------------------------------------------- /cmd/reconciler/visitor.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "math" 7 | "sort" 8 | "strings" 9 | 10 | "github.com/emicklei/proto" 11 | "github.com/huandu/go-sqlbuilder" 12 | ) 13 | 14 | type clickhouseVisitor struct { 15 | proto.NoopVisitor 16 | enumTypes map[string]map[string]int 17 | builder *sqlbuilder.CreateTableBuilder 18 | } 19 | 20 | func (c *clickhouseVisitor) VisitMessage(m *proto.Message) { 21 | for _, v := range m.Elements { 22 | v.Accept(c) 23 | } 24 | } 25 | 26 | func (c *clickhouseVisitor) VisitNormalField(i *proto.NormalField) { 27 | columnName := i.Name 28 | columnType := c.ToColumnType(i.Type) 29 | 30 | for _, option := range i.Options { 31 | switch option.Name { 32 | case "(column_type)": 33 | columnType = option.Constant.Source 34 | case "(column_name)": 35 | columnName = option.Constant.Source 36 | case "(column_skip)": 37 | // just exit the visitor and ignore the rest of the logic 38 | return 39 | default: 40 | log.Printf("unknown option %q on %q! Skipping!", option.Name, i.Name) 41 | } 42 | } 43 | 44 | c.builder.Define(columnName, columnType) 45 | } 46 | 47 | func (c *clickhouseVisitor) VisitEnumField(i *proto.EnumField) { 48 | c.enumTypes[i.Parent.(*proto.Enum).Name][i.Name] = i.Integer 49 | } 50 | 51 | func (c *clickhouseVisitor) VisitEnum(e *proto.Enum) { 52 | c.enumTypes[e.Name] = make(map[string]int) 53 | for _, v := range e.Elements { 54 | v.Accept(c) 55 | } 56 | } 57 | 58 | type enumOption struct { 59 | value int 60 | name string 61 | } 62 | 63 | func (e enumOption) String() string { 64 | return fmt.Sprintf("%s = %d", QuoteSingleQuote(e.name), e.value) 65 | } 66 | 67 | func (c *clickhouseVisitor) ToColumnType(t string) string { 68 | if enum, found := c.enumTypes[t]; found { 69 | var enumOptions []enumOption 70 | 71 | for s, i := range enum { 72 | enumOptions = append(enumOptions, enumOption{ 73 | value: i, 74 | name: s, 75 | }) 76 | } 77 | 78 | size := int(math.Log2(float64(len(enumOptions)))) 79 | switch { 80 | case size <= 8: 81 | size = 8 82 | case size <= 16: 83 | size = 16 84 | default: 85 | size = 0 86 | } 87 | 88 | t := fmt.Sprintf("Enum%d", size) 89 | if size == 0 { 90 | t = "Enum" 91 | } 92 | 93 | // Enums are sorted inside clickhouse 94 | sort.SliceStable(enumOptions, func(i, j int) bool { 95 | return enumOptions[i].value < enumOptions[j].value 96 | }) 97 | 98 | var enumOptionStrings []string 99 | for _, option := range enumOptions { 100 | enumOptionStrings = append(enumOptionStrings, option.String()) 101 | } 102 | 103 | return fmt.Sprintf("%s(%s)", t, strings.Join(enumOptionStrings, ", ")) 104 | } 105 | 106 | switch t { 107 | case "int32": 108 | return "Int32" 109 | case "uint32": 110 | return "UInt32" 111 | case "int64": 112 | return "Int64" 113 | case "uint64": 114 | return "UInt64" 115 | case "bytes": 116 | return "String" 117 | case "bool": 118 | return "Bool" 119 | case "string": 120 | return "String" 121 | default: 122 | panic("type not implemented: " + t) 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/api/core/v1/well_known_labels_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/api/core/v1 4 | 5 | package v1 6 | 7 | #LabelHostname: "kubernetes.io/hostname" 8 | #LabelTopologyZone: "topology.kubernetes.io/zone" 9 | #LabelTopologyRegion: "topology.kubernetes.io/region" 10 | 11 | // These label have been deprecated since 1.17, but will be supported for 12 | // the foreseeable future, to accommodate things like long-lived PVs that 13 | // use them. New users should prefer the "topology.kubernetes.io/*" 14 | // equivalents. 15 | #LabelFailureDomainBetaZone: "failure-domain.beta.kubernetes.io/zone" 16 | #LabelFailureDomainBetaRegion: "failure-domain.beta.kubernetes.io/region" 17 | 18 | // Retained for compat when vendored. Do not use these consts in new code. 19 | #LabelZoneFailureDomain: "failure-domain.beta.kubernetes.io/zone" 20 | #LabelZoneRegion: "failure-domain.beta.kubernetes.io/region" 21 | #LabelZoneFailureDomainStable: "topology.kubernetes.io/zone" 22 | #LabelZoneRegionStable: "topology.kubernetes.io/region" 23 | #LabelInstanceType: "beta.kubernetes.io/instance-type" 24 | #LabelInstanceTypeStable: "node.kubernetes.io/instance-type" 25 | #LabelOSStable: "kubernetes.io/os" 26 | #LabelArchStable: "kubernetes.io/arch" 27 | 28 | // LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0. 29 | // It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763) 30 | #LabelWindowsBuild: "node.kubernetes.io/windows-build" 31 | 32 | // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) 33 | #LabelNamespaceSuffixKubelet: "kubelet.kubernetes.io" 34 | 35 | // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) 36 | #LabelNamespaceSuffixNode: "node.kubernetes.io" 37 | 38 | // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled 39 | #LabelNamespaceNodeRestriction: "node-restriction.kubernetes.io" 40 | 41 | // IsHeadlessService is added by Controller to an Endpoint denoting if its parent 42 | // Service is Headless. The existence of this label can be used further by other 43 | // controllers and kube-proxy to check if the Endpoint objects should be replicated when 44 | // using Headless Services 45 | #IsHeadlessService: "service.kubernetes.io/headless" 46 | 47 | // LabelNodeExcludeBalancers specifies that the node should not be considered as a target 48 | // for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only 49 | // understand nodes). For services that use externalTrafficPolicy=Local, this may mean that 50 | // any backends on excluded nodes are not reachable by those external load-balancers. 51 | // Implementations of this exclusion may vary based on provider. 52 | #LabelNodeExcludeBalancers: "node.kubernetes.io/exclude-from-external-load-balancers" 53 | 54 | // LabelMetadataName is the label name which, in-tree, is used to automatically label namespaces, so they can be selected easily by tools which require definitive labels 55 | #LabelMetadataName: "kubernetes.io/metadata.name" 56 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/traefik/deployment.cue: -------------------------------------------------------------------------------- 1 | package traefik 2 | 3 | import ( 4 | "list" 5 | ) 6 | 7 | #Config: { 8 | ports: { 9 | http: int 10 | https: int 11 | clickhouse: int 12 | } 13 | 14 | letsencryptMode: "staging" | "production" | "off" 15 | letsencryptAccountMail: string 16 | enableClickhouseIngress: bool 17 | } 18 | 19 | ServiceAccount: "traefik-ingress-controller": {} 20 | 21 | PersistentVolumeClaim: "traefik-data": {} 22 | 23 | TLSOption: default: spec: { 24 | minVersion: "VersionTLS12" 25 | cipherSuites: [ 26 | "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", 27 | "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", 28 | "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", 29 | "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", 30 | "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", 31 | "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", 32 | ] 33 | } 34 | 35 | Deployment: traefik: { 36 | metadata: labels: app: "traefik" 37 | spec: { 38 | replicas: 1 39 | strategy: type: "Recreate" 40 | selector: matchLabels: app: "traefik" 41 | template: { 42 | metadata: labels: app: "traefik" 43 | spec: { 44 | // The poor-man's LoadBalancer implementation in k3s does not support IPv6 and is limited to legacy IPv4. 45 | // Bypass this inconvenience by running traefik in the host network namespace. 46 | hostNetwork: true 47 | serviceAccountName: "traefik-ingress-controller" 48 | containers: [{ 49 | name: "traefik" 50 | image: "docker.io/traefik:v2.9.1@sha256:3eaf50b7874f63567ee5d18498536928faef199302c805cb6b766e06b649302d" 51 | 52 | let _args = [ 53 | [ 54 | "--accesslog", 55 | "--entrypoints.web.Address=:\(#Config.ports.http)", 56 | "--entrypoints.websecure.Address=:\(#Config.ports.https)", 57 | "--providers.kubernetescrd", 58 | ], 59 | if #Config.letsencryptMode != "off" { 60 | [ 61 | "--certificatesresolvers.publicHostnameResolver.acme.tlschallenge", 62 | "--certificatesresolvers.publicHostnameResolver.acme.email=\(#Config.letsencryptAccountMail)", 63 | "--certificatesresolvers.publicHostnameResolver.acme.storage=/data/acme-\(#Config.letsencryptMode).json", 64 | ] 65 | }, 66 | if #Config.letsencryptMode == "staging" { 67 | [ 68 | "--certificatesresolvers.publicHostnameResolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory", 69 | ] 70 | }, 71 | if #Config.enableClickhouseIngress { 72 | [ 73 | "--entrypoints.clickhouse.Address=:\(#Config.ports.clickhouse)", 74 | ] 75 | }, 76 | ] 77 | 78 | args: list.FlattenN(_args, 1) 79 | 80 | ports: [{ 81 | name: "web" 82 | containerPort: #Config.ports.http 83 | protocol: "TCP" 84 | }, { 85 | name: "websecure" 86 | containerPort: #Config.ports.https 87 | protocol: "TCP" 88 | }, { 89 | name: "clickhouse" 90 | containerPort: #Config.ports.clickhouse 91 | protocol: "TCP" 92 | }] 93 | 94 | volumeMounts: [{ 95 | mountPath: "/data" 96 | name: "traefik-data" 97 | }] 98 | }] 99 | restartPolicy: "Always" 100 | volumes: [{ 101 | name: "traefik-data" 102 | persistentVolumeClaim: claimName: "traefik-data" 103 | }] 104 | } 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/runtime/types_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/runtime 4 | 5 | package runtime 6 | 7 | // TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, 8 | // like this: 9 | // 10 | // type MyAwesomeAPIObject struct { 11 | // runtime.TypeMeta `json:",inline"` 12 | // ... // other fields 13 | // } 14 | // 15 | // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind 16 | // 17 | // TypeMeta is provided here for convenience. You may use it directly from this package or define 18 | // your own with the same fields. 19 | // 20 | // +k8s:deepcopy-gen=false 21 | // +protobuf=true 22 | // +k8s:openapi-gen=true 23 | #TypeMeta: { 24 | // +optional 25 | apiVersion?: string @go(APIVersion) @protobuf(1,bytes,opt) 26 | 27 | // +optional 28 | kind?: string @go(Kind) @protobuf(2,bytes,opt) 29 | } 30 | 31 | #ContentTypeJSON: "application/json" 32 | #ContentTypeYAML: "application/yaml" 33 | #ContentTypeProtobuf: "application/vnd.kubernetes.protobuf" 34 | 35 | // RawExtension is used to hold extensions in external versions. 36 | // 37 | // To use this, make a field which has RawExtension as its type in your external, versioned 38 | // struct, and Object in your internal struct. You also need to register your 39 | // various plugin types. 40 | // 41 | // // Internal package: 42 | // 43 | // type MyAPIObject struct { 44 | // runtime.TypeMeta `json:",inline"` 45 | // MyPlugin runtime.Object `json:"myPlugin"` 46 | // } 47 | // 48 | // type PluginA struct { 49 | // AOption string `json:"aOption"` 50 | // } 51 | // 52 | // // External package: 53 | // 54 | // type MyAPIObject struct { 55 | // runtime.TypeMeta `json:",inline"` 56 | // MyPlugin runtime.RawExtension `json:"myPlugin"` 57 | // } 58 | // 59 | // type PluginA struct { 60 | // AOption string `json:"aOption"` 61 | // } 62 | // 63 | // // On the wire, the JSON will look something like this: 64 | // 65 | // { 66 | // "kind":"MyAPIObject", 67 | // "apiVersion":"v1", 68 | // "myPlugin": { 69 | // "kind":"PluginA", 70 | // "aOption":"foo", 71 | // }, 72 | // } 73 | // 74 | // So what happens? Decode first uses json or yaml to unmarshal the serialized data into 75 | // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. 76 | // The next step is to copy (using pkg/conversion) into the internal struct. The runtime 77 | // package's DefaultScheme has conversion functions installed which will unpack the 78 | // JSON stored in RawExtension, turning it into the correct object type, and storing it 79 | // in the Object. (TODO: In the case where the object is of an unknown type, a 80 | // runtime.Unknown object will be created and stored.) 81 | // 82 | // +k8s:deepcopy-gen=true 83 | // +protobuf=true 84 | // +k8s:openapi-gen=true 85 | #RawExtension: _ 86 | 87 | // Unknown allows api objects with unknown types to be passed-through. This can be used 88 | // to deal with the API objects from a plug-in. Unknown objects still have functioning 89 | // TypeMeta features-- kind, version, etc. 90 | // TODO: Make this object have easy access to field based accessors and settors for 91 | // metadata and field mutatation. 92 | // 93 | // +k8s:deepcopy-gen=true 94 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 95 | // +protobuf=true 96 | // +k8s:openapi-gen=true 97 | #Unknown: _ 98 | -------------------------------------------------------------------------------- /UPGRADE.md: -------------------------------------------------------------------------------- 1 | # Upgrade guidance 2 | ## Upgrade from stable to main 3 | 4 | Upgrading from the stable branch to the main branch, or upgrading on the main branch from a commit before Oct 20, 2022, requires a manual migration with downtime. 5 | 6 | ``` 7 | # Be sure to have apparmor-utils installed (apparmor_parser is required by k3s) 8 | 9 | ./install.sh 10 | 11 | ./scripts/build_containers.sh 12 | cd deploy/single-node/ 13 | 14 | # Edit your config_local.cue 15 | # Replace clickhouseOperatorPassword with clickhouseAdminPassword 16 | 17 | kubectl delete crd \ 18 | kafkaconnectors.kafka.strimzi.io \ 19 | kafkaconnectors.kafka.strimzi.io \ 20 | kafkaconnects.kafka.strimzi.io \ 21 | kafkamirrormaker2s.kafka.strimzi.io \ 22 | kafkas.kafka.strimzi.io \ 23 | kafkamirrormakers.kafka.strimzi.io \ 24 | kafkabridges.kafka.strimzi.io 25 | 26 | kubectl patch clickhouseinstallation netmeta --type json -p='[{"op": "remove", "path": "/spec/templates/podTemplates/0/podDistribution"}]' 27 | kubectl patch clickhouseinstallation netmeta --type json -p='[{"op": "remove", "path": "/spec/templates/podTemplates/0/zone/values"}]' 28 | kubectl patch clickhouseinstallation netmeta --type json -p='[{"op": "remove", "path": "/status/errors"}]' 29 | 30 | cue apply 31 | 32 | # Wait for pods to rollout 33 | kubectl get pods -w 34 | 35 | kubectl exec -i chi-netmeta-netmeta-0-0-0 -c clickhouse -- clickhouse-client -q 'DROP TABLE flows_queue' 36 | kubectl exec -i chi-netmeta-netmeta-0-0-0 -c clickhouse -- clickhouse-client -q 'DROP VIEW flows_raw_view' 37 | 38 | GOOSE_DRIVER=clickhouse \ 39 | GOOSE_DB_USER=admin \ 40 | GOOSE_DB_PASS=$(cue export -e netmeta.config.clickhouseAdminPassword --out text) \ 41 | GOOSE_DB_ADDR=$(kubectl get pod chi-netmeta-netmeta-0-0-0 --template '{{.status.podIP}}') \ 42 | GOOSE_DBSTRING="tcp://$GOOSE_DB_USER:$GOOSE_DB_PASS@$GOOSE_DB_ADDR:9000/default" \ 43 | GOOSE_MIGRATION_DIR="schema/" \ 44 | goose up 45 | 46 | # If you access NetMeta from external infrastructure (e.g. your own Grafana), you have to change the username from `clickhouse_operator` to `admin` 47 | ``` 48 | 49 | ## Migrate to IPv4-mapped IPv6 addresses 50 | This can take a long time... 51 | 52 | ```clickhouse 53 | INSERT INTO flows_raw 54 | WITH 55 | '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' AS IPv6v4NullPadding, 56 | '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' AS IPv6Null, 57 | '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff' AS IPv6v4RFCPadding 58 | SELECT * REPLACE ( 59 | if(startsWith(SamplerAddress, IPv6v4NullPadding), 60 | reinterpret(toFixedString(IPv6v4RFCPadding || substr(SamplerAddress, 13, 16), 16), 'IPv6'), SamplerAddress) AS SamplerAddress, 61 | if(startsWith(SrcAddr, IPv6v4NullPadding), 62 | reinterpret(toFixedString(IPv6v4RFCPadding || substr(SrcAddr, 13, 16), 16), 'IPv6'), SrcAddr) AS SrcAddr, 63 | if(startsWith(DstAddr, IPv6v4NullPadding), 64 | reinterpret(toFixedString(IPv6v4RFCPadding || substr(DstAddr, 13, 16), 16), 'IPv6'), DstAddr) AS DstAddr, 65 | if(startsWith(NextHop, IPv6v4NullPadding) and NextHop != IPv6Null, 66 | reinterpret(toFixedString(IPv6v4RFCPadding || substr(NextHop, 13, 16), 16), 'IPv6'), NextHop) AS NextHop 67 | ) 68 | FROM flows_raw; 69 | 70 | -- Delete data with the old non-compliant mapping 71 | ALTER TABLE flows_raw 72 | DELETE WHERE 73 | startsWith(SamplerAddress, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') OR 74 | startsWith(SrcAddr, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') OR 75 | startsWith(DstAddr, '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'); 76 | ``` -------------------------------------------------------------------------------- /cmd/reconciler/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | 8 | "github.com/huandu/go-sqlbuilder" 9 | ) 10 | 11 | // Function contains all information to create or benchmark a UDF inside Clickhouse 12 | type Function struct { 13 | Name string `json:"name"` 14 | Arguments []string `json:"arguments"` 15 | Query string `json:"query"` 16 | } 17 | 18 | func (f *Function) CreateQuery() string { 19 | return fmt.Sprintf("CREATE FUNCTION %s AS (%s) -> %s", f.Name, strings.Join(f.Arguments, ", "), f.Query) 20 | } 21 | 22 | func (f *Function) CreateOrReplaceQuery() string { 23 | return fmt.Sprintf("CREATE OR REPLACE FUNCTION %s AS (%s) -> %s", f.Name, strings.Join(f.Arguments, ", "), f.Query) 24 | } 25 | 26 | // MaterializedView contains all information to create a MaterializedView inside Clickhouse 27 | // It also allows to benchmark the select statement inside the MV 28 | type MaterializedView struct { 29 | Name string `json:"name"` 30 | To string `json:"to"` 31 | From string `json:"from"` 32 | Query string `json:"query"` 33 | } 34 | 35 | func (mv MaterializedView) DropQuery(database string) string { 36 | return fmt.Sprintf("DROP VIEW %s.%s", database, mv.Name) 37 | } 38 | 39 | func (mv MaterializedView) SelectQuery(database string) string { 40 | return strings.Replace(mv.Query, "%%from%%", database+"."+mv.From, 1) 41 | } 42 | 43 | func (mv MaterializedView) CreateQuery(database string) string { 44 | s := fmt.Sprintf("CREATE MATERIALIZED VIEW %s TO %s AS %s", mv.Name, mv.To, mv.Query) 45 | s = strings.Replace(s, "%%from%%", database+"."+mv.From, 1) 46 | return s 47 | } 48 | 49 | type Settings map[string]any 50 | 51 | func (s Settings) String() string { 52 | var settings []string 53 | for k, v := range s { 54 | switch v.(type) { 55 | case int: 56 | settings = append(settings, fmt.Sprintf("%s = %d", k, v)) 57 | case float64: 58 | settings = append(settings, fmt.Sprintf("%s = %.0f", k, v)) 59 | case string: 60 | settings = append(settings, fmt.Sprintf("%s = %s", k, QuoteSingleQuote(v.(string)))) 61 | default: 62 | settings = append(settings, fmt.Sprintf("%s = %s", k, v)) 63 | } 64 | } 65 | 66 | // clickhouse sorts the settings internally 67 | sort.Strings(settings) 68 | 69 | return strings.Join(settings, ", ") 70 | } 71 | 72 | // Table contains the Name, the Type, additional Settings and 73 | // a reference to a Message inside the Protobuf file 74 | type Table struct { 75 | Name string `json:"name"` 76 | Schema string `json:"schema"` 77 | Engine string `json:"engine"` 78 | Settings Settings `json:"settings"` 79 | } 80 | 81 | func (t Table) CreateQuery(database string) (string, error) { 82 | builder := sqlbuilder. 83 | CreateTable(database + "." + t.Name) 84 | 85 | if err := loadTableSchema(t.Schema, builder); err != nil { 86 | return "", err 87 | } 88 | 89 | builder.SQL("ENGINE = " + t.Engine) 90 | 91 | if len(t.Settings) != 0 { 92 | builder.SQL("SETTINGS " + t.Settings.String()) 93 | } 94 | 95 | return builder.String(), nil 96 | } 97 | 98 | func (t Table) DropQuery(database string) string { 99 | return fmt.Sprintf("DROP TABLE %s.%s", database, t.Name) 100 | } 101 | 102 | type Config struct { 103 | Database string `json:"database"` 104 | Functions []Function `json:"functions"` 105 | MaterializedViews []MaterializedView `json:"materialized_views"` 106 | SourceTables []Table `json:"source_tables"` 107 | } 108 | -------------------------------------------------------------------------------- /.aspect/bazelrc/ci.bazelrc: -------------------------------------------------------------------------------- 1 | # Set this flag to enable re-tries of failed tests on CI. 2 | # When any test target fails, try one or more times. This applies regardless of whether the "flaky" 3 | # tag appears on the target definition. 4 | # This is a tradeoff: legitimately failing tests will take longer to report, 5 | # but we can paper over flaky tests that pass most of the time. 6 | # The alternative is to mark every flaky test with the `flaky = True` attribute, but this requires 7 | # the buildcop to make frequent code edits. 8 | # Not recommended for local builds so that the flakiness is observed during development and thus 9 | # is more likely to get fixed. 10 | # Note that when passing after the first attempt, Bazel will give a special "FLAKY" status. 11 | # Docs: https://bazel.build/docs/user-manual#flaky-test-attempts 12 | test --flaky_test_attempts=2 13 | 14 | # Announce all announces command options read from the bazelrc file(s) when starting up at the 15 | # beginning of each Bazel invocation. This is very useful on CI to be able to inspect what Bazel rc 16 | # settings are being applied on each run. 17 | # Docs: https://bazel.build/docs/user-manual#announce-rc 18 | build --announce_rc 19 | 20 | # Add a timestamp to each message generated by Bazel specifying the time at which the message was 21 | # displayed. 22 | # Docs: https://bazel.build/docs/user-manual#show-timestamps 23 | build --show_timestamps 24 | 25 | # Only show progress every 60 seconds on CI. 26 | # We want to find a compromise between printing often enough to show that the build isn't stuck, 27 | # but not so often that we produce a long log file that requires a lot of scrolling. 28 | # https://bazel.build/reference/command-line-reference#flag--show_progress_rate_limit 29 | build --show_progress_rate_limit=60 30 | 31 | # Use cursor controls in screen output. 32 | # Docs: https://bazel.build/docs/user-manual#curses 33 | build --curses=yes 34 | 35 | # Use colors to highlight output on the screen. Set to `no` if your CI does not display colors. 36 | # Docs: https://bazel.build/docs/user-manual#color 37 | build --color=yes 38 | 39 | # The terminal width in columns. Configure this to override the default value based on what your CI system renders. 40 | # Docs: https://github.com/bazelbuild/bazel/blob/1af61b21df99edc2fc66939cdf14449c2661f873/src/main/java/com/google/devtools/build/lib/runtime/UiOptions.java#L151 41 | build --terminal_columns=143 42 | 43 | ###################################### 44 | # Generic remote cache configuration # 45 | ###################################### 46 | 47 | # Only download remote outputs of top level targets to the local machine. 48 | # Docs: https://bazel.build/reference/command-line-reference#flag--remote_download_toplevel 49 | build --remote_download_toplevel 50 | 51 | # The maximum amount of time to wait for remote execution and cache calls. 52 | # https://bazel.build/reference/command-line-reference#flag--remote_timeout 53 | build --remote_timeout=3600 54 | 55 | # Upload locally executed action results to the remote cache. 56 | # Docs: https://bazel.build/reference/command-line-reference#flag--remote_upload_local_results 57 | build --remote_upload_local_results 58 | 59 | # Fall back to standalone local execution strategy if remote execution fails. If the grpc remote 60 | # cache connection fails, it will fail the build, add this so it falls back to the local cache. 61 | # Docs: https://bazel.build/reference/command-line-reference#flag--remote_local_fallback 62 | build --remote_local_fallback 63 | 64 | # Fixes builds hanging on CI that get the TCP connection closed without sending RST packets. 65 | # Docs: https://bazel.build/reference/command-line-reference#flag--grpc_keepalive_time 66 | build --grpc_keepalive_time=30s 67 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/clickhouse/clickhouse.cue: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | ) 7 | 8 | // A stripped down version of the #SamplerConfig found in deploy/single-node/config.cue 9 | #SamplerConfig: [string]: { 10 | device: string 11 | samplingRate: int 12 | anonymizeAddresses: bool 13 | description: string 14 | interface: [string]: { 15 | id: int 16 | description: string 17 | } 18 | vlan: [string]: { 19 | id: int 20 | description: string 21 | } 22 | host: [string]: { 23 | device: string 24 | description: string 25 | } 26 | ... 27 | } 28 | 29 | #UserData: { 30 | autnums: [string]: { 31 | asn: int 32 | name: string 33 | country: string 34 | } 35 | } 36 | 37 | #Config: { 38 | clickhouseAdminPassword: string 39 | clickhouseReadonlyPassword: string 40 | enableClickhouseIngress: bool | *false 41 | sampler: #SamplerConfig 42 | userData: #UserData 43 | } 44 | 45 | ClickHouseInstallation: netmeta: spec: { 46 | defaults: templates: { 47 | dataVolumeClaimTemplate: "local-pv" 48 | logVolumeClaimTemplate: "local-pv" 49 | serviceTemplate: "chi-service-internal" 50 | podTemplate: "clickhouse-static" 51 | } 52 | configuration: { 53 | settings: { 54 | format_schema_path: "/etc/clickhouse-server/config.d/" 55 | dictionaries_config: "config.d/*.conf" 56 | user_defined_executable_functions_config: "config.d/*_function.xml" 57 | http_port: 8123 58 | "access_control_improvements/settings_constraints_replace_previous": true 59 | } 60 | clusters: [ 61 | { 62 | name: "netmeta" 63 | layout: { 64 | shardsCount: 1 65 | replicasCount: 1 66 | } 67 | }, 68 | ] 69 | users: { 70 | "default/access_management": 1 71 | "admin/password_sha256_hex": hex.Encode(sha256.Sum256(#Config.clickhouseAdminPassword)) 72 | "admin/networks/ip": "::/0" 73 | "readonly/password_sha256_hex": hex.Encode(sha256.Sum256(#Config.clickhouseReadonlyPassword)) 74 | "readonly/networks/ip": "::/0" 75 | "readonly/profile": "readonly" 76 | } 77 | profiles: { 78 | "readonly/readonly": "1" 79 | "readonly/constraints/additional_table_filters/changeable_in_readonly": "" 80 | } 81 | files: [string]: string 82 | } 83 | templates: { 84 | volumeClaimTemplates: [{ 85 | name: "local-pv" 86 | spec: { 87 | accessModes: [ 88 | "ReadWriteOnce", 89 | ] 90 | resources: requests: storage: "100Gi" 91 | } 92 | }] 93 | serviceTemplates: [{ 94 | name: "chi-service-internal" 95 | generateName: "clickhouse-{chi}" 96 | spec: ports: [ 97 | {name: "http", port: 8123, targetPort: port, protocol: "TCP"}, 98 | {name: "tcp", port: 9000, targetPort: port, protocol: "TCP"}, 99 | ] 100 | }] 101 | podTemplates: [{ 102 | name: "clickhouse-static" 103 | spec: { 104 | securityContext: { 105 | runAsUser: 101 106 | runAsGroup: 101 107 | fsGroup: 101 108 | } 109 | containers: [{ 110 | name: "clickhouse" 111 | image: "docker.io/clickhouse/clickhouse-server:23.2.4.12-alpine@sha256:0a63183a4da2923868cc0e58285e404a4a60e1be657c5b7005e736e91cfd4da9" 112 | resources: {} 113 | }] 114 | } 115 | }] 116 | } 117 | } 118 | 119 | if #Config.enableClickhouseIngress { 120 | IngressRoute: "clickhouse-ingress": spec: { 121 | entryPoints: ["clickhouse"] 122 | routes: [ 123 | { 124 | match: "PathPrefix(`/`)" 125 | kind: "Rule" 126 | services: [ 127 | { 128 | name: "clickhouse-netmeta" 129 | port: "http" 130 | }, 131 | ] 132 | }, 133 | ] 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /deploy/single-node/schema/FlowMessage.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package netmeta; 3 | 4 | // Adapter from/compatible with cloudflare/goflow. 5 | 6 | message FlowMessage { 7 | enum FlowType { 8 | FLOWUNKNOWN = 0; 9 | SFLOW_5 = 1; 10 | NETFLOW_V5 = 2; 11 | NETFLOW_V9 = 3; 12 | IPFIX = 4; 13 | } 14 | FlowType Type = 1 [(column_name) = "FlowType"]; 15 | 16 | uint64 TimeReceived = 2; 17 | uint32 SequenceNum = 4; 18 | uint64 SamplingRate = 3; 19 | 20 | uint32 FlowDirection = 42 [(column_type) = "UInt8"]; 21 | 22 | // Sampler information 23 | bytes SamplerAddress = 11 [(column_type) = "FixedString(16)"]; 24 | 25 | // Found inside packet 26 | uint64 TimeFlowStart = 38; 27 | uint64 TimeFlowEnd = 5; 28 | 29 | // Size of the sampled packet 30 | uint64 Bytes = 9; 31 | uint64 Packets = 10; 32 | 33 | // Source/destination addresses 34 | bytes SrcAddr = 6 [(column_type) = "FixedString(16)"]; 35 | bytes DstAddr = 7 [(column_type) = "FixedString(16)"]; 36 | 37 | // Layer 3 protocol (IPv4/IPv6/ARP/MPLS...) 38 | uint32 Etype = 30 [(column_type) = "UInt16"]; 39 | 40 | // Layer 4 protocol 41 | uint32 Proto = 20 [(column_type) = "UInt8"]; 42 | 43 | // Ports for UDP and TCP 44 | uint32 SrcPort = 21; 45 | uint32 DstPort = 22; 46 | 47 | // Interfaces 48 | uint32 InIf = 18; 49 | uint32 OutIf = 19; 50 | 51 | // Ethernet information 52 | uint64 SrcMac = 27; 53 | uint64 DstMac = 28; 54 | 55 | // Vlan 56 | uint32 SrcVlan = 33; 57 | uint32 DstVlan = 34; 58 | // 802.1q VLAN in sampled packet 59 | uint32 VlanId = 29; 60 | 61 | // VRF 62 | uint32 IngressVrfID = 39; 63 | uint32 EgressVrfID = 40; 64 | 65 | // IP and TCP special flags 66 | uint32 IPTos = 23 [(column_type) = "UInt8"]; 67 | uint32 ForwardingStatus = 24 [(column_type) = "UInt8"]; 68 | uint32 IPTTL = 25 [(column_type) = "UInt8"]; 69 | uint32 TCPFlags = 26 [(column_type) = "UInt8"]; 70 | uint32 IcmpType = 31 [(column_type) = "UInt8"]; 71 | uint32 IcmpCode = 32 [(column_type) = "UInt8"]; 72 | uint32 IPv6FlowLabel = 37; 73 | 74 | // Fragments (IPv4/IPv6) 75 | uint32 FragmentId = 35; 76 | uint32 FragmentOffset = 36; 77 | 78 | uint32 BiFlowDirection = 41 [(column_type) = "UInt8"]; 79 | 80 | // Autonomous system information 81 | uint32 SrcAS = 14; 82 | uint32 DstAS = 15; 83 | 84 | bytes NextHop = 12 [(column_type) = "FixedString(16)"]; 85 | uint32 NextHopAS = 13; 86 | 87 | // Prefix size 88 | uint32 SrcNet = 16 [(column_type) = "UInt8"]; 89 | uint32 DstNet = 17 [(column_type) = "UInt8"]; 90 | 91 | // IP encapsulation information 92 | bool HasEncap = 43 [(column_skip) = true]; 93 | bytes SrcAddrEncap = 44 [(column_skip) = true]; 94 | bytes DstAddrEncap = 45 [(column_skip) = true]; 95 | uint32 ProtoEncap = 46 [(column_skip) = true]; 96 | uint32 EtypeEncap = 47 [(column_skip) = true]; 97 | 98 | uint32 IPTosEncap = 48 [(column_skip) = true]; 99 | uint32 IPTTLEncap = 49 [(column_skip) = true]; 100 | uint32 IPv6FlowLabelEncap = 50 [(column_skip) = true]; 101 | uint32 FragmentIdEncap = 51 [(column_skip) = true]; 102 | uint32 FragmentOffsetEncap = 52 [(column_skip) = true]; 103 | 104 | // MPLS information 105 | bool HasMPLS = 53 [(column_skip) = true]; 106 | uint32 MPLSCount = 54 [(column_skip) = true]; 107 | uint32 MPLS1TTL = 55 [(column_skip) = true]; // First TTL 108 | uint32 MPLS1Label = 56 [(column_skip) = true]; // First Label 109 | uint32 MPLS2TTL = 57 [(column_skip) = true]; // Second TTL 110 | uint32 MPLS2Label = 58 [(column_skip) = true]; // Second Label 111 | uint32 MPLS3TTL = 59 [(column_skip) = true]; // Third TTL 112 | uint32 MPLS3Label = 60 [(column_skip) = true]; // Third Label 113 | uint32 MPLSLastTTL = 61 [(column_skip) = true]; // Last TTL 114 | uint32 MPLSLastLabel = 62 [(column_skip) = true]; // Last Label 115 | 116 | // PPP information 117 | bool HasPPP = 63 [(column_skip) = true]; 118 | uint32 PPPAddressControl = 64 [(column_skip) = true]; 119 | 120 | // Custom fields: start after ID 1000: 121 | // uint32 MyCustomField = 1000; 122 | } 123 | -------------------------------------------------------------------------------- /MODULE.bazel: -------------------------------------------------------------------------------- 1 | bazel_dep(name = "gazelle") 2 | single_version_override( 3 | module_name = "gazelle", 4 | patch_strip = 1, 5 | patches = [ 6 | # Snatched from monogon-dev/monogon to support patches to 7 | # goflow for adding flowdirection. 8 | "//third_party/gazelle:add-prepatching.patch", 9 | ], 10 | version = "0.40.0", 11 | ) 12 | 13 | bazel_dep(name = "rules_go") 14 | single_version_override( 15 | module_name = "rules_go", 16 | patch_strip = 1, 17 | patches = [ 18 | # Kick out cgo support from stdlib as the zig toolchain 19 | # doesn't like darwin right now. 20 | "//third_party/rules_go:disable-cgo.patch", 21 | "//third_party/rules_go:rules_go_absolute_embedsrc.patch", 22 | ], 23 | version = "0.50.1", 24 | ) 25 | 26 | bazel_dep(name = "rules_oci", version = "2.0.1") 27 | bazel_dep(name = "rules_pkg", version = "1.0.1") 28 | bazel_dep(name = "aspect_bazel_lib", version = "2.9.0") 29 | bazel_dep(name = "hermetic_cc_toolchain", version = "3.1.0") 30 | bazel_dep(name = "toolchains_protoc", version = "0.3.3") 31 | bazel_dep(name = "rules_multirun", version = "0.9.0") 32 | bazel_dep(name = "buildifier_prebuilt", version = "8.0.1") 33 | 34 | toolchains = use_extension("@hermetic_cc_toolchain//toolchain:ext.bzl", "toolchains") 35 | use_repo(toolchains, "zig_sdk") 36 | 37 | register_toolchains( 38 | "@zig_sdk//toolchain:linux_amd64_musl", 39 | "@zig_sdk//toolchain:linux_arm64_musl", 40 | "@zig_sdk//toolchain:windows_amd64", 41 | "@zig_sdk//toolchain:windows_arm64", 42 | "@zig_sdk//toolchain:darwin_amd64", 43 | "@zig_sdk//toolchain:darwin_arm64", 44 | ) 45 | 46 | go_sdk = use_extension("@rules_go//go:extensions.bzl", "go_sdk") 47 | go_sdk.download(version = "1.23.1") 48 | 49 | go_deps = use_extension("@gazelle//:extensions.bzl", "go_deps") 50 | go_deps.from_file(go_mod = "//:go.mod") 51 | use_repo( 52 | go_deps, 53 | "com_github_clickhouse_clickhouse_go_v2", 54 | "com_github_emicklei_proto", 55 | "com_github_gopacket_gopacket", 56 | "com_github_huandu_go_sqlbuilder", 57 | "com_github_netsampler_goflow2", 58 | "com_github_netsampler_goflow2_v2", 59 | "com_github_osrg_gobgp", 60 | "com_github_pressly_goose_v3", 61 | "com_github_sirupsen_logrus", 62 | "com_github_vishvananda_netlink", 63 | "io_k8s_klog_v2", 64 | "org_cuelang_go", 65 | ) 66 | 67 | oci = use_extension("@rules_oci//oci:extensions.bzl", "oci") 68 | oci.pull( 69 | name = "distroless_base", 70 | digest = "sha256:7a4bffcb07307d97aa731b50cb6ab22a68a8314426f4e4428335939b5b1943a5", 71 | image = "gcr.io/distroless/base", 72 | platforms = [ 73 | "linux/amd64", 74 | "linux/arm/v7", 75 | "linux/arm64/v8", 76 | "linux/ppc64le", 77 | "linux/s390x", 78 | ], 79 | ) 80 | oci.pull( 81 | name = "grafana", 82 | digest = "sha256:2a73ae33c9f0c51af6eced2ef185d5d3682b4c378c4fdd6941a14e8ea4a3e95b", 83 | image = "index.docker.io/grafana/grafana", 84 | platforms = [ 85 | "linux/amd64", 86 | "linux/arm64/v8", 87 | "linux/arm/v7", 88 | ], 89 | ) 90 | use_repo( 91 | oci, 92 | "distroless_base", 93 | "distroless_base_linux_amd64", 94 | "distroless_base_linux_arm64_v8", 95 | "distroless_base_linux_arm_v7", 96 | "distroless_base_linux_ppc64le", 97 | "distroless_base_linux_s390x", 98 | "grafana", 99 | "grafana_linux_amd64", 100 | "grafana_linux_arm64_v8", 101 | "grafana_linux_arm_v7", 102 | ) 103 | 104 | grafana_plugin = use_repo_rule("//third_party/grafana:defs.bzl", "grafana_plugin") 105 | 106 | grafana_plugin( 107 | name = "netsage-sankey-panel", 108 | plugin_name = "netsage-sankey-panel", 109 | version = "1.1.3", 110 | ) 111 | 112 | grafana_plugin( 113 | name = "grafana-clickhouse-datasource", 114 | plugin_name = "grafana-clickhouse-datasource", 115 | version = "4.5.0", 116 | ) 117 | 118 | grafana = use_repo_rule("//third_party/grafana:defs.bzl", "grafana") 119 | 120 | grafana( 121 | name = "grafana_bin", 122 | ) 123 | -------------------------------------------------------------------------------- /cue.mod/gen/k8s.io/apimachinery/pkg/api/resource/quantity_go_gen.cue: -------------------------------------------------------------------------------- 1 | // Code generated by cue get go. DO NOT EDIT. 2 | 3 | //cue:generate cue get go k8s.io/apimachinery/pkg/api/resource 4 | 5 | package resource 6 | 7 | // Quantity is a fixed-point representation of a number. 8 | // It provides convenient marshaling/unmarshaling in JSON and YAML, 9 | // in addition to String() and AsInt64() accessors. 10 | // 11 | // The serialization format is: 12 | // 13 | // ``` 14 | // ::= 15 | // 16 | // (Note that may be empty, from the "" case in .) 17 | // 18 | // ::= 0 | 1 | ... | 9 19 | // ::= | 20 | // ::= | . | . | . 21 | // ::= "+" | "-" 22 | // ::= | 23 | // ::= | | 24 | // ::= Ki | Mi | Gi | Ti | Pi | Ei 25 | // 26 | // (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) 27 | // 28 | // ::= m | "" | k | M | G | T | P | E 29 | // 30 | // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) 31 | // 32 | // ::= "e" | "E" 33 | // ``` 34 | // 35 | // No matter which of the three exponent forms is used, no quantity may represent 36 | // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal 37 | // places. Numbers larger or more precise will be capped or rounded up. 38 | // (E.g.: 0.1m will rounded up to 1m.) 39 | // This may be extended in the future if we require larger or smaller quantities. 40 | // 41 | // When a Quantity is parsed from a string, it will remember the type of suffix 42 | // it had, and will use the same type again when it is serialized. 43 | // 44 | // Before serializing, Quantity will be put in "canonical form". 45 | // This means that Exponent/suffix will be adjusted up or down (with a 46 | // corresponding increase or decrease in Mantissa) such that: 47 | // 48 | // - No precision is lost 49 | // - No fractional digits will be emitted 50 | // - The exponent (or suffix) is as large as possible. 51 | // 52 | // The sign will be omitted unless the number is negative. 53 | // 54 | // Examples: 55 | // 56 | // - 1.5 will be serialized as "1500m" 57 | // - 1.5Gi will be serialized as "1536Mi" 58 | // 59 | // Note that the quantity will NEVER be internally represented by a 60 | // floating point number. That is the whole point of this exercise. 61 | // 62 | // Non-canonical values will still parse as long as they are well formed, 63 | // but will be re-emitted in their canonical form. (So always use canonical 64 | // form, or don't diff.) 65 | // 66 | // This format is intended to make it difficult to use these numbers without 67 | // writing some sort of special handling code in the hopes that that will 68 | // cause implementors to also use a fixed point implementation. 69 | // 70 | // +protobuf=true 71 | // +protobuf.embed=string 72 | // +protobuf.options.marshal=false 73 | // +protobuf.options.(gogoproto.goproto_stringer)=false 74 | // +k8s:deepcopy-gen=true 75 | // +k8s:openapi-gen=true 76 | #Quantity: _ 77 | 78 | // CanonicalValue allows a quantity amount to be converted to a string. 79 | #CanonicalValue: _ 80 | 81 | // Format lists the three possible formattings of a quantity. 82 | #Format: string // #enumFormat 83 | 84 | #enumFormat: 85 | #DecimalExponent | 86 | #BinarySI | 87 | #DecimalSI 88 | 89 | #DecimalExponent: #Format & "DecimalExponent" 90 | #BinarySI: #Format & "BinarySI" 91 | #DecimalSI: #Format & "DecimalSI" 92 | 93 | // splitREString is used to separate a number from its suffix; as such, 94 | // this is overly permissive, but that's OK-- it will be checked later. 95 | _#splitREString: "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" 96 | 97 | _#int64QuantityExpectedBytes: 18 98 | 99 | // QuantityValue makes it possible to use a Quantity as value for a command 100 | // line parameter. 101 | // 102 | // +protobuf=true 103 | // +protobuf.embed=string 104 | // +protobuf.options.marshal=false 105 | // +protobuf.options.(gogoproto.goproto_stringer)=false 106 | // +k8s:deepcopy-gen=true 107 | #QuantityValue: _ 108 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/kafka/metrics.cue: -------------------------------------------------------------------------------- 1 | package kafka 2 | 3 | import "encoding/yaml" 4 | 5 | Kafka: "netmeta": spec: { 6 | kafkaExporter: { 7 | topicRegex: ".*" 8 | groupRegex: ".*" 9 | } 10 | 11 | kafka: metricsConfig: { 12 | type: "jmxPrometheusExporter" 13 | valueFrom: configMapKeyRef: { 14 | name: "kafka-config" 15 | key: "jmx-exporter" 16 | } 17 | } 18 | } 19 | 20 | ConfigMap: "kafka-config": data: "jmx-exporter": yaml.Marshal({ 21 | // Inspired by config from Kafka 2.0.0 example rules: 22 | // https://github.com/prometheus/jmx_exporter/blob/master/example_configs/kafka-2_0_0.yml 23 | lowercaseOutputName: true 24 | rules: [{ 25 | // Special cases and very specific rules 26 | pattern: "kafka.server<>Value" 27 | name: "kafka_server_$1_$2" 28 | type: "GAUGE" 29 | labels: { 30 | clientId: "$3" 31 | topic: "$4" 32 | partition: "$5" 33 | } 34 | }, { 35 | pattern: "kafka.server<>Value" 36 | name: "kafka_server_$1_$2" 37 | type: "GAUGE" 38 | labels: { 39 | clientId: "$3" 40 | broker: "$4:$5" 41 | } 42 | }, { 43 | // Some percent metrics use MeanRate attribute 44 | // Ex) kafka.server<>MeanRate 45 | pattern: "kafka.(\\w+)<>MeanRate" 46 | name: "kafka_$1_$2_$3_percent" 47 | type: "GAUGE" 48 | }, { 49 | // Generic gauges for percents 50 | pattern: "kafka.(\\w+)<>Value" 51 | name: "kafka_$1_$2_$3_percent" 52 | type: "GAUGE" 53 | }, { 54 | pattern: "kafka.(\\w+)<>Value" 55 | name: "kafka_$1_$2_$3_percent" 56 | type: "GAUGE" 57 | labels: $4: "$5" 58 | }, { 59 | // Generic per-second counters with 0-2 key/value pairs 60 | pattern: "kafka.(\\w+)<>Count" 61 | name: "kafka_$1_$2_$3_total" 62 | type: "COUNTER" 63 | labels: { 64 | $4: "$5" 65 | $6: "$7" 66 | } 67 | }, { 68 | pattern: "kafka.(\\w+)<>Count" 69 | name: "kafka_$1_$2_$3_total" 70 | type: "COUNTER" 71 | labels: $4: "$5" 72 | }, { 73 | pattern: "kafka.(\\w+)<>Count" 74 | name: "kafka_$1_$2_$3_total" 75 | type: "COUNTER" 76 | }, { 77 | // Generic gauges with 0-2 key/value pairs 78 | pattern: "kafka.(\\w+)<>Value" 79 | name: "kafka_$1_$2_$3" 80 | type: "GAUGE" 81 | labels: { 82 | $4: "$5" 83 | $6: "$7" 84 | } 85 | }, { 86 | pattern: "kafka.(\\w+)<>Value" 87 | name: "kafka_$1_$2_$3" 88 | type: "GAUGE" 89 | labels: $4: "$5" 90 | }, { 91 | pattern: "kafka.(\\w+)<>Value" 92 | name: "kafka_$1_$2_$3" 93 | type: "GAUGE" 94 | }, { 95 | // Emulate Prometheus 'Summary' metrics for the exported 'Histogram's. 96 | // Note that these are missing the '_sum' metric! 97 | pattern: "kafka.(\\w+)<>Count" 98 | name: "kafka_$1_$2_$3_count" 99 | type: "COUNTER" 100 | labels: { 101 | $4: "$5" 102 | $6: "$7" 103 | } 104 | }, { 105 | pattern: "kafka.(\\w+)<>(\\d+)thPercentile" 106 | name: "kafka_$1_$2_$3" 107 | type: "GAUGE" 108 | labels: { 109 | $4: "$5" 110 | $6: "$7" 111 | quantile: "0.$8" 112 | } 113 | }, { 114 | pattern: "kafka.(\\w+)<>Count" 115 | name: "kafka_$1_$2_$3_count" 116 | type: "COUNTER" 117 | labels: $4: "$5" 118 | }, { 119 | pattern: "kafka.(\\w+)<>(\\d+)thPercentile" 120 | name: "kafka_$1_$2_$3" 121 | type: "GAUGE" 122 | labels: { 123 | $4: "$5" 124 | quantile: "0.$6" 125 | } 126 | }, { 127 | pattern: "kafka.(\\w+)<>Count" 128 | name: "kafka_$1_$2_$3_count" 129 | type: "COUNTER" 130 | }, { 131 | pattern: "kafka.(\\w+)<>(\\d+)thPercentile" 132 | name: "kafka_$1_$2_$3" 133 | type: "GAUGE" 134 | labels: quantile: "0.$4" 135 | }] 136 | }) 137 | -------------------------------------------------------------------------------- /third_party/rules_oci/pr-715.patch: -------------------------------------------------------------------------------- 1 | From 399a62ece130a6f78b472cca4c513a0714a9fa18 Mon Sep 17 00:00:00 2001 2 | From: Red Daly 3 | Date: Fri, 11 Oct 2024 07:22:54 -0700 4 | Subject: [PATCH 1/2] feat: Output the image id of an oci_load target. 5 | 6 | --- 7 | oci/private/image_id.sh.tpl | 18 +++++++++++ 8 | oci/private/load.bzl | 29 +++++++++++++++++ 9 | oci/private/load.sh.tpl | 1 + 10 | 4 files changed, 48 insertions(+), 64 deletions(-) 11 | create mode 100644 oci/private/image_id.sh.tpl 12 | delete mode 100644 oci/private/push.sh.tpl 13 | 14 | diff --git a/oci/private/image_id.sh.tpl b/oci/private/image_id.sh.tpl 15 | new file mode 100644 16 | index 00000000..71f5fb83 17 | --- /dev/null 18 | +++ b/oci/private/image_id.sh.tpl 19 | @@ -0,0 +1,18 @@ 20 | +#!/usr/bin/env bash 21 | +# Read the configuration json sha256 digest from the manifest.json file passed 22 | +# as the first positional argument. Output that sha256 to the 23 | +# file provided as the second positional argument. 24 | +set -o pipefail -o errexit -o nounset 25 | + 26 | +readonly JQ="{{jq_path}}" 27 | + 28 | +CONFIG_PATH=$("$JQ" -r '.[0].Config' "$1") 29 | +# CONFIG_PATH will be blobs/sha256/ 30 | +SHA256_OF_CONFIG="${CONFIG_PATH#blobs/sha256/}" 31 | + 32 | +if [[ "$SHA256_OF_CONFIG" == "$CONFIG_PATH" ]]; then 33 | + echo "Error: Failed to extract SHA256 digest from CONFIG_PATH" >&2 34 | + exit 1 35 | +fi 36 | + 37 | +echo "$SHA256_OF_CONFIG" > "$2" 38 | \ No newline at end of file 39 | diff --git a/oci/private/load.bzl b/oci/private/load.bzl 40 | index 525d0278..07f8c410 100644 41 | --- a/oci/private/load.bzl 42 | +++ b/oci/private/load.bzl 43 | @@ -129,6 +129,7 @@ attrs = { 44 | allow_single_file = True, 45 | ), 46 | "_tarball_sh": attr.label(allow_single_file = True, default = "//oci/private:tarball.sh.tpl"), 47 | + "_image_id_sh": attr.label(allow_single_file = True, default = "//oci/private:image_id.sh.tpl"), 48 | "_runfiles": attr.label(default = "@bazel_tools//tools/bash/runfiles"), 49 | "_windows_constraint": attr.label(default = "@platforms//os:windows"), 50 | } 51 | @@ -143,6 +144,7 @@ def _load_impl(ctx): 52 | 53 | mtree_spec = ctx.actions.declare_file("{}/tarball.spec".format(ctx.label.name)) 54 | executable = ctx.actions.declare_file("{}/load.sh".format(ctx.label.name)) 55 | + image_id_sh = ctx.actions.declare_file("{}/image_id.sh".format(ctx.label.name)) 56 | manifest_json = ctx.actions.declare_file("{}/manifest.json".format(ctx.label.name)) 57 | 58 | # Represents either manifest.json or index.json depending on the image format 59 | @@ -181,6 +183,32 @@ def _load_impl(ctx): 60 | ], 61 | mnemonic = "OCITarballManifest", 62 | ) 63 | + ctx.actions.expand_template( 64 | + template = ctx.file._image_id_sh, 65 | + output = image_id_sh, 66 | + is_executable = True, 67 | + substitutions = { 68 | + "{{jq_path}}": jq.jqinfo.bin.path, 69 | + }, 70 | + ) 71 | + image_id = ctx.actions.declare_file("{}/image_id.txt".format(ctx.label.name)) 72 | + image_id_args = ctx.actions.args() 73 | + image_id_args.add(manifest_json) 74 | + image_id_args.add(image_id) 75 | + ctx.actions.run( 76 | + executable = image_id_sh, 77 | + inputs = [manifest_json], 78 | + outputs = [image_id], 79 | + arguments = [image_id_args], 80 | + tools = [ 81 | + jq.jqinfo.bin, 82 | + ], 83 | + env = { 84 | + "MANIFEST_JSON_PATH": manifest_json.path, 85 | + "OUTPUT_PATH": image_id.path, 86 | + }, 87 | + mnemonic = "ImageId", 88 | + ) 89 | 90 | # This action produces a large output and should rarely be used as it puts load on the cache. 91 | # It will only run if the "tarball" output_group is explicitly requested 92 | @@ -228,6 +256,7 @@ def _load_impl(ctx): 93 | DefaultInfo( 94 | runfiles = runfiles, 95 | executable = runnable_loader, 96 | + files = depset([image_id]), 97 | ), 98 | OutputGroupInfo(tarball = depset([tarball])), 99 | ] 100 | diff --git a/oci/private/load.sh.tpl b/oci/private/load.sh.tpl 101 | index ccd4d6ea..0c0be29b 100644 102 | --- a/oci/private/load.sh.tpl 103 | +++ b/oci/private/load.sh.tpl 104 | @@ -8,6 +8,7 @@ runfiles_export_envvars 105 | readonly TAR="$(rlocation "{{tar}}")" 106 | readonly MTREE="$(rlocation "{{mtree_path}}")" 107 | readonly LOADER="$(rlocation "{{loader}}")" 108 | +readonly JQ="$(rlocation "{{jq_path}}")" 109 | 110 | if [ -f "$LOADER" ]; then 111 | CONTAINER_CLI="$LOADER" 112 | -------------------------------------------------------------------------------- /deploy/single-node/tests/base.cue: -------------------------------------------------------------------------------- 1 | package tests 2 | 3 | import netmeta "github.com/monogon-dev/netmeta/deploy/single-node:k8s" 4 | 5 | test: [string]: T={ 6 | config: netmeta.#NetMetaConfig 7 | out: (netmeta & {netmeta: config: T.config}) 8 | asserts: true 9 | } 10 | 11 | _requiredDefaults: { 12 | grafanaInitialAdminPassword: "grafanaInitialAdminPassword" 13 | clickhouseAdminPassword: "clickhouseAdminPassword" 14 | clickhouseReadonlyPassword: "clickhouseReadonlyPassword" 15 | sessionSecret: "sessionSecret" 16 | 17 | publicHostname: "publicHostname" 18 | } 19 | 20 | test: "traefik: letsencrypt: off": T={ 21 | config: { 22 | _requiredDefaults 23 | letsencryptMode: "off" 24 | } 25 | 26 | asserts: T.out.k8s.Deployment.traefik.spec.template.spec.containers[0].args == ["--accesslog", "--entrypoints.web.Address=:80", "--entrypoints.websecure.Address=:443", "--providers.kubernetescrd"] 27 | } 28 | 29 | test: "traefik: letsencrypt: staging": T={ 30 | config: { 31 | _requiredDefaults 32 | letsencryptMode: "staging" 33 | letsencryptAccountMail: "letsencrypt@example.com" 34 | } 35 | 36 | asserts: T.out.k8s.Deployment.traefik.spec.template.spec.containers[0].args == ["--accesslog", "--entrypoints.web.Address=:80", "--entrypoints.websecure.Address=:443", "--providers.kubernetescrd", "--certificatesresolvers.publicHostnameResolver.acme.tlschallenge", "--certificatesresolvers.publicHostnameResolver.acme.email=letsencrypt@example.com", "--certificatesresolvers.publicHostnameResolver.acme.storage=/data/acme-staging.json", "--certificatesresolvers.publicHostnameResolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory"] 37 | } 38 | 39 | test: "traefik: letsencrypt: production": T={ 40 | config: { 41 | _requiredDefaults 42 | letsencryptMode: "production" 43 | letsencryptAccountMail: "letsencrypt@example.com" 44 | } 45 | 46 | asserts: T.out.k8s.Deployment.traefik.spec.template.spec.containers[0].args == ["--accesslog", "--entrypoints.web.Address=:80", "--entrypoints.websecure.Address=:443", "--providers.kubernetescrd", "--certificatesresolvers.publicHostnameResolver.acme.tlschallenge", "--certificatesresolvers.publicHostnameResolver.acme.email=letsencrypt@example.com", "--certificatesresolvers.publicHostnameResolver.acme.storage=/data/acme-production.json"] 47 | } 48 | 49 | 50 | test: "clickhouse: minimal config": T={ 51 | config: { 52 | _requiredDefaults 53 | letsencryptMode: "off" 54 | } 55 | 56 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."InterfaceNames.tsv" == "" 57 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."SamplerConfig.tsv" == "" 58 | } 59 | 60 | test: "clickhouse: empty sampler": T={ 61 | config: { 62 | _requiredDefaults 63 | letsencryptMode: "off" 64 | 65 | sampler: "::ffff:100.0.0.1": { 66 | } 67 | } 68 | 69 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."InterfaceNames.tsv" == "" 70 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."SamplerConfig.tsv" == "::ffff:100.0.0.1\tNULL\tNULL\tfalse" 71 | } 72 | 73 | test: "clickhouse: sampler with desc": T={ 74 | config: { 75 | _requiredDefaults 76 | letsencryptMode: "off" 77 | 78 | sampler: "::ffff:100.0.0.1": { 79 | description: "foo" 80 | } 81 | } 82 | 83 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."InterfaceNames.tsv" == "" 84 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."SamplerConfig.tsv" == "::ffff:100.0.0.1\tNULL\tfoo\tfalse" 85 | } 86 | 87 | test: "clickhouse: sampler with interface": T={ 88 | config: { 89 | _requiredDefaults 90 | letsencryptMode: "off" 91 | 92 | sampler: "::ffff:100.0.0.1": { 93 | interface: "858": description: "TRANSIT-ABC" 94 | } 95 | 96 | } 97 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."InterfaceNames.tsv" == "::ffff:100.0.0.1\t858\tTRANSIT-ABC" 98 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."SamplerConfig.tsv" == "::ffff:100.0.0.1\tNULL\tNULL\tfalse" 99 | } 100 | 101 | test: "clickhouse: sampler with desc and interfaces": T={ 102 | config: { 103 | _requiredDefaults 104 | letsencryptMode: "off" 105 | 106 | sampler: "::ffff:100.0.0.1": { 107 | description: "foo" 108 | interface: "858": description: "TRANSIT-ABC" 109 | } 110 | } 111 | 112 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."InterfaceNames.tsv" == "::ffff:100.0.0.1\t858\tTRANSIT-ABC" 113 | asserts: T.out.k8s.ClickHouseInstallation.netmeta.spec.configuration.files."SamplerConfig.tsv" == "::ffff:100.0.0.1\tNULL\tfoo\tfalse" 114 | } 115 | -------------------------------------------------------------------------------- /deploy/single-node/schema/views.cue: -------------------------------------------------------------------------------- 1 | package schema 2 | 3 | view: flows_raw_view: { 4 | from: "flows_queue" 5 | to: "flows_raw" 6 | query: #""" 7 | SELECT * REPLACE ( 8 | if( 9 | dictGet('SamplerConfig', 'AnonymizeAddresses', IPv6NumToString(SamplerAddress)), 10 | toIPv6(cutIPv6(SrcAddr, 8, 1)), 11 | SrcAddr 12 | ) AS SrcAddr, 13 | if( 14 | dictGet('SamplerConfig', 'AnonymizeAddresses', IPv6NumToString(SamplerAddress)), 15 | toIPv6(cutIPv6(DstAddr, 8, 1)), 16 | DstAddr 17 | ) AS DstAddr 18 | ) 19 | FROM ( 20 | SELECT toDate(TimeReceived) AS Date, 21 | * REPLACE ( 22 | ParseGoFlowAddress(SamplerAddress) AS SamplerAddress, 23 | ParseGoFlowAddress(SrcAddr) AS SrcAddr, 24 | ParseGoFlowAddress(DstAddr) AS DstAddr, 25 | if( 26 | NextHop != toFixedString('', 16), 27 | ParseGoFlowAddress(NextHop), 28 | toIPv6('::') 29 | ) AS NextHop, 30 | if( 31 | SrcAS == 0, 32 | dictGetUInt32('risinfo', 'asnum', SrcAddr), 33 | SrcAS 34 | ) as SrcAS, 35 | if( 36 | DstAS == 0, 37 | dictGetUInt32('risinfo', 'asnum', DstAddr), 38 | DstAS 39 | ) as DstAS, 40 | coalesce( 41 | dictGet('SamplerConfig', 'SamplingRate', IPv6NumToString(SamplerAddress)), 42 | SamplingRate 43 | ) as SamplingRate 44 | ) 45 | FROM %%from%% 46 | ) 47 | """# 48 | } 49 | 50 | if #Config.fastNetMon != _|_ { 51 | view: fastnetmon_view: { 52 | from: "fastnetmon_queue" 53 | to: "flows_raw" 54 | query: 55 | // language=clickhouse 56 | #""" 57 | SELECT * REPLACE( 58 | if( 59 | SrcAS == 0, 60 | dictGetUInt32('risinfo', 'asnum', SrcAddr), 61 | SrcAS 62 | ) as SrcAS, 63 | if( 64 | DstAS == 0, 65 | dictGetUInt32('risinfo', 'asnum', DstAddr), 66 | DstAS 67 | ) as DstAS, 68 | if( 69 | dictGet('SamplerConfig', 'AnonymizeAddresses', IPv6NumToString(SamplerAddress)), 70 | toIPv6(cutIPv6(SrcAddr, 8, 1)), 71 | SrcAddr 72 | ) AS SrcAddr, 73 | if( 74 | dictGet('SamplerConfig', 'AnonymizeAddresses', IPv6NumToString(SamplerAddress)), 75 | toIPv6(cutIPv6(SrcAddr, 8, 1)), 76 | DstAddr 77 | ) AS DstAddr 78 | ) 79 | FROM ( 80 | SELECT toDate(timestamp_seconds) as Date, 81 | 0 as FlowType, 82 | timestamp_seconds as TimeReceived, 83 | sampling_ratio as SamplingRate, 84 | traffic_direction as FlowDirection, 85 | ParseFastNetMonAddress(agent_address) AS SamplerAddress, 86 | octets as Bytes, 87 | packets as Packets, 88 | ParseFastNetMonAddress(source_ip) AS SrcAddr, 89 | ParseFastNetMonAddress(destination_ip) AS DstAddr, 90 | protocol as Proto, 91 | source_port as SrcPort, 92 | destination_port as DstPort, 93 | input_interface as InIf, 94 | output_interface as OutIf, 95 | ttl as IPTTL, 96 | tcp_flags as TCPFlags, 97 | source_asn as SrcAS, 98 | destination_asn as DstAS 99 | FROM %%from%% 100 | ) 101 | """# 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /cmd/portmirror/iface.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "strings" 7 | "sync" 8 | "sync/atomic" 9 | 10 | "github.com/gopacket/gopacket" 11 | "github.com/gopacket/gopacket/afpacket" 12 | "github.com/gopacket/gopacket/layers" 13 | "github.com/netsampler/goflow2/format" 14 | flowpb "github.com/netsampler/goflow2/pb" 15 | "github.com/netsampler/goflow2/transport" 16 | "github.com/sirupsen/logrus" 17 | "github.com/vishvananda/netlink" 18 | ) 19 | 20 | type tapInterface struct { 21 | name string 22 | ifIndex int 23 | isRX bool 24 | counter uint64 25 | } 26 | 27 | type tapPair struct { 28 | samplerAddress net.IP 29 | RX *tapInterface 30 | TX *tapInterface 31 | } 32 | 33 | func loadConfig() []*tapPair { 34 | if len(interfaces) == 0 { 35 | logrus.Fatal("please provide interface pairs") 36 | } 37 | 38 | var tapPairs []*tapPair 39 | for _, v := range strings.Split(interfaces, ",") { 40 | if !strings.Contains(v, ":") { 41 | logrus.Fatalf("invalid interface pair given: %q", v) 42 | } 43 | 44 | split := strings.Split(v, ":") 45 | if len(split) != 2 { 46 | logrus.Fatal("invalid interface pair given: %q", v) 47 | } 48 | 49 | rxName, txName := split[0], split[1] 50 | rxLink, err := netlink.LinkByName(rxName) 51 | if err != nil { 52 | logrus.Fatalf("cant find interface: %q", rxName) 53 | } 54 | 55 | if err := netlink.SetPromiscOn(rxLink); err != nil { 56 | logrus.Fatalf("cant set promisc mode on interface: %q", rxName) 57 | } 58 | 59 | txLink, err := netlink.LinkByName(txName) 60 | if err != nil { 61 | logrus.Fatalf("cant find interface: %q", txName) 62 | } 63 | 64 | if err := netlink.SetPromiscOn(txLink); err != nil { 65 | logrus.Fatalf("cant set promisc mode on interface: %q", txName) 66 | } 67 | 68 | t := &tapPair{ 69 | samplerAddress: samplerAddress, 70 | RX: &tapInterface{ 71 | name: rxName, 72 | isRX: true, 73 | ifIndex: rxLink.Attrs().Index, 74 | }, 75 | TX: &tapInterface{ 76 | name: txName, 77 | ifIndex: txLink.Attrs().Index, 78 | }, 79 | } 80 | 81 | tapPairs = append(tapPairs, t) 82 | } 83 | 84 | return tapPairs 85 | } 86 | 87 | func (ti *tapInterface) Worker(ctx context.Context, startGroup *sync.WaitGroup, endGroup *sync.WaitGroup, t *transport.Transport) { 88 | defer endGroup.Done() 89 | 90 | fmt, err := format.FindFormat(ctx, "pb") 91 | if err != nil { 92 | logrus.Fatalf("fetching formatter: %v", err) 93 | } 94 | 95 | handle, err := afpacket.NewTPacket( 96 | afpacket.OptInterface(ti.name), 97 | afpacket.SocketRaw, 98 | afpacket.TPacketVersion3, 99 | ) 100 | if err != nil { 101 | logrus.Fatalf("opening interface %q: %v", ti.name, err) 102 | } 103 | err = handle.SetFanout(afpacket.FanoutLoadBalance, uint16(fanoutBase*ti.ifIndex)) 104 | if err != nil { 105 | logrus.Fatalf("setting fanout on interface %q: %v", ti.name, err) 106 | } 107 | defer handle.Close() 108 | 109 | startGroup.Done() 110 | startGroup.Wait() 111 | 112 | var flowDirection uint32 113 | var inIf, outIf uint32 114 | if ti.isRX { 115 | flowDirection = 0 // Inbound traffic because this is a RX Mirror interface 116 | inIf = uint32(ti.ifIndex) 117 | } else { 118 | flowDirection = 1 // Outbound traffic because this is a TX Mirror interface 119 | outIf = uint32(ti.ifIndex) 120 | } 121 | 122 | for { 123 | select { 124 | case <-ctx.Done(): 125 | return 126 | default: 127 | } 128 | 129 | data, ci, _ := handle.ZeroCopyReadPacketData() 130 | if atomic.AddUint64(&ti.counter, 1)%uint64(sampleRate) != 0 { 131 | continue 132 | } 133 | 134 | info := readFrameInfo(gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.Default)) 135 | msg := &flowpb.FlowMessage{ 136 | Type: flowpb.FlowMessage_SFLOW_5, 137 | TimeReceived: uint64(ci.Timestamp.Unix()), 138 | SequenceNum: info.SeqNum, 139 | SamplingRate: uint64(sampleRate), 140 | FlowDirection: flowDirection, 141 | SamplerAddress: samplerAddress, 142 | TimeFlowStart: uint64(ci.Timestamp.Unix()), 143 | TimeFlowEnd: uint64(ci.Timestamp.Unix()), 144 | Bytes: uint64(ci.Length), 145 | Packets: 1, 146 | SrcAddr: info.SrcIP, 147 | DstAddr: info.DstIP, 148 | Etype: info.EthernetType, 149 | Proto: info.Protocol, 150 | SrcPort: info.SrcPort, 151 | DstPort: info.DstPort, 152 | InIf: inIf, 153 | OutIf: outIf, 154 | SrcMac: macToUint64(info.SrcMAC), 155 | DstMac: macToUint64(info.DstMAC), 156 | IpTos: uint32(info.IPTOS), 157 | IpTtl: uint32(info.IPTTL), 158 | TcpFlags: info.TCPFlags, 159 | Ipv6FlowLabel: info.FlowLabel, 160 | } 161 | 162 | key, value, err := fmt.Format(msg) 163 | if err != nil { 164 | return 165 | } 166 | 167 | if err := t.Send(key, value); err != nil { 168 | return 169 | } 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /doc/ingest.md: -------------------------------------------------------------------------------- 1 | # Ingest 2 | 3 | ## sFlow vs Netflow/IPFIX 4 | 5 | **sFlow** is a very simple protocol. In its simplest form, every n-th packet is sampled and the packet header is sent 6 | to the collector. That's it. The rate of samples sent to the collector and the bandwidth required is very predictable 7 | and proportional to observed pps. 8 | 9 | **Netflow/IPFIX** also samples every n-th packet, but pre-aggregates data on the network device, typically identified by 10 | its 5-tuple, and exports metadata for all active flows at regular intervals. Netflow aggregation is stateful and the 11 | device needs to maintain a flow table. This is particularly useful for use cases that care about individual flows 12 | (connections), like network security monitoring. As long as most packets belong to a small number of flows, much fewer 13 | samples are sent to the collector at the same sampling rate. 14 | 15 | Of course, pre-aggregation means that we lose data about the individual packets. This is fine for use cases like traffic 16 | accounting, but has disadvantages for observability: 17 | 18 | 19 | * Resolution is inherently limited by the aggregation interval (flow timeout). The lower end for this is typically 10s 20 | or more - you couldn't distinguish a 1s burst at 100 Gbps from a 10s burst at 10 Gbps. 21 | 22 | * Does this flow consist of only SYN packets? SYN-ACKs? A mixture of both? We can't tell, because the TCP flags in the 23 | flow metadata are a union of all individual packet's TCP flags. 24 | 25 | * In adversarial network conditions, like DDoS attacks using random source IP and ports, each packet can represent a new 26 | flow. This can quickly fill up the flow table, resulting in dropped flows, losing visibility in a situation where it 27 | would be particularly useful. The rate of samples sent to the collector is hard to reason about, since it is 28 | implementation-specific and depends on flow cardinality, table size and timeouts. 29 | 30 | * Maintaining the stateful flow table in the router is very expensive, especially at high rates. Modern column stores 31 | like ClickHouse are extremely efficient at aggregating data in arbitrary dimensions - there's no need to do it on the router. 32 | 33 | If possible, we recommend using sFlow. For a 10 Gbps link, a typical sampling rate is 1:2000. 34 | A worst-case flood at line rate would generate 7kpps of sFlow samples. 35 | 36 | If Netflow/IPFIX is used, make sure to pick an appropriate sampling rate and flow table size for worst-case workloads. 37 | There are many different implementations that perform very differently - refer to vendor documentation for specifics. 38 | 39 | Depending on your aggregation interval, you may want to set a minimum display interval in the NetMeta config: 40 | 41 | dashboardDisplay: minInterval: "15s" 42 | 43 | ## Host sFlow collector 44 | 45 | We recommend [hsflowd](https://github.com/sflow/host-sflow) for host-based flow collection. 46 | 47 | Example /etc/hsflowd.conf config: 48 | 49 | sflow { 50 | collector { ip=flowmon.example.com udpport=6343 } 51 | nflog { group = 5 probability = 0.0025 } 52 | } 53 | 54 | You need to create iptables (or nftables) rules that send samples to hsflowd's nflog group. 55 | An example config for plain iptables looks like this: 56 | 57 | ```bash 58 | # hsflowd sampling. Probability needs to match /etc/hsflowd.conf (it will be used to calculate sampling rate). 59 | 60 | MOD_STATISTIC="-m statistic --mode random --probability 0.0025" 61 | NFLOG_CONFIG="--nflog-group 5 --nflog-prefix SFLOW" 62 | INTERFACE=eno1 63 | 64 | iptables -t raw -I PREROUTING -i $INTERFACE -j NFLOG $MOD_STATISTIC $NFLOG_CONFIG 65 | iptables -t nat -I POSTROUTING -o $INTERFACE -j NFLOG $MOD_STATISTIC $NFLOG_CONFIG 66 | iptables -t nat -I OUTPUT -o $INTERFACE -j NFLOG $MOD_STATISTIC $NFLOG_CONFIG 67 | ip6tables -t raw -I PREROUTING -i $INTERFACE -j NFLOG $MOD_STATISTIC $NFLOG_CONFIG 68 | ip6tables -t nat -I POSTROUTING -o $INTERFACE -j NFLOG $MOD_STATISTIC $NFLOG_CONFIG 69 | ip6tables -t nat -I OUTPUT -o $INTERFACE -j NFLOG $MOD_STATISTIC $NFLOG_CONFIG 70 | ``` 71 | 72 | For Linux hosts, there's a custom agent on the roadmap that directly pushes to Kafka to avoid the sFlow detour 73 | and get better data that includes the flow direction. 74 | In the meantime, hsflowd is your best bet for collecting samples from a host. 75 | 76 | ## Port Mirror collector 77 | 78 | If you have an interface pair that receives a copy of your traffic like a port mirror or fibre tap, you can use 79 | NetMeta's integrated collector to directly sample traffic without requiring an sFlow collector. 80 | 81 | You can either deploy the collector to a remote host, or have it deployed automatically on your monitoring host: 82 | 83 | ```cue 84 | netmeta: config: { 85 | [...] 86 | portMirror: { 87 | interfaces: "tap_rx:tap_tx,tap2_rx:tap2_tx" 88 | sampleRate: 100 89 | } 90 | } 91 | ``` 92 | 93 | You have to configure which interfaces the collector should listen on. You can have multiple pairs by 94 | separating them with a comma. You can also configure the sample rate. 95 | -------------------------------------------------------------------------------- /.aspect/bazelrc/correctness.bazelrc: -------------------------------------------------------------------------------- 1 | # Do not upload locally executed action results to the remote cache. 2 | # This should be the default for local builds so local builds cannot poison the remote cache. 3 | # It should be flipped to `--remote_upload_local_results` on CI 4 | # by using `--bazelrc=.aspect/bazelrc/ci.bazelrc`. 5 | # Docs: https://bazel.build/reference/command-line-reference#flag--remote_upload_local_results 6 | build --noremote_upload_local_results 7 | 8 | # Don't allow network access for build actions in the sandbox. 9 | # Ensures that you don't accidentally make non-hermetic actions/tests which depend on remote 10 | # services. 11 | # Developers should tag targets with `tags=["requires-network"]` to opt-out of the enforcement. 12 | # Docs: https://bazel.build/reference/command-line-reference#flag--sandbox_default_allow_network 13 | build --sandbox_default_allow_network=false 14 | 15 | # Warn if a test's timeout is significantly longer than the test's actual execution time. 16 | # Bazel's default for test_timeout is medium (5 min), but most tests should instead be short (1 min). 17 | # While a test's timeout should be set such that it is not flaky, a test that has a highly 18 | # over-generous timeout can hide real problems that crop up unexpectedly. 19 | # For instance, a test that normally executes in a minute or two should not have a timeout of 20 | # ETERNAL or LONG as these are much, much too generous. 21 | # Docs: https://bazel.build/docs/user-manual#test-verbose-timeout-warnings 22 | test --test_verbose_timeout_warnings 23 | 24 | # Allow the Bazel server to check directory sources for changes. Ensures that the Bazel server 25 | # notices when a directory changes, if you have a directory listed in the srcs of some target. 26 | # Recommended when using 27 | # [copy_directory](https://github.com/bazel-contrib/bazel-lib/blob/main/docs/copy_directory.md) and 28 | # [rules_js](https://github.com/aspect-build/rules_js) since npm package are source directories 29 | # inputs to copy_directory actions. 30 | # Docs: https://bazel.build/reference/command-line-reference#flag--host_jvm_args 31 | startup --host_jvm_args=-DBAZEL_TRACK_SOURCE_DIRECTORIES=1 32 | 33 | # Allow exclusive tests to run in the sandbox. Fixes a bug where Bazel doesn't enable sandboxing for 34 | # tests with `tags=["exclusive"]`. 35 | # Docs: https://bazel.build/reference/command-line-reference#flag--incompatible_exclusive_test_sandboxed 36 | test --incompatible_exclusive_test_sandboxed 37 | 38 | # Use a static value for `PATH` and does not inherit `LD_LIBRARY_PATH`. Doesn't let environment 39 | # variables like `PATH` sneak into the build, which can cause massive cache misses when they change. 40 | # Use `--action_env=ENV_VARIABLE` if you want to inherit specific environment variables from the 41 | # client, but note that doing so can prevent cross-user caching if a shared cache is used. 42 | # Docs: https://bazel.build/reference/command-line-reference#flag--incompatible_strict_action_env 43 | build --incompatible_strict_action_env 44 | 45 | # Propagate tags from a target declaration to the actions' execution requirements. 46 | # Ensures that tags applied in your BUILD file, like `tags=["no-remote"]` 47 | # get propagated to actions created by the rule. 48 | # Without this option, you rely on rules authors to manually check the tags you passed 49 | # and apply relevant ones to the actions they create. 50 | # See https://github.com/bazelbuild/bazel/issues/8830 for details. 51 | # Docs: https://bazel.build/reference/command-line-reference#flag--experimental_allow_tags_propagation 52 | build --experimental_allow_tags_propagation 53 | fetch --experimental_allow_tags_propagation 54 | query --experimental_allow_tags_propagation 55 | 56 | # Do not automatically create `__init__.py` files in the runfiles of Python targets. Fixes the wrong 57 | # default that comes from Google's internal monorepo by using `__init__.py` to delimit a Python 58 | # package. Precisely, when a `py_binary` or `py_test` target has `legacy_create_init` set to `auto (the 59 | # default), it is treated as false if and only if this flag is set. See 60 | # https://github.com/bazelbuild/bazel/issues/10076. 61 | # Docs: https://bazel.build/reference/command-line-reference#flag--incompatible_default_to_explicit_init_py 62 | build --incompatible_default_to_explicit_init_py 63 | 64 | # Set default value of `allow_empty` to `False` in `glob()`. This prevents a common mistake when 65 | # attempting to use `glob()` to match files in a subdirectory that is opaque to the current package 66 | # because it contains a BUILD file. See https://github.com/bazelbuild/bazel/issues/8195. 67 | # Docs: https://bazel.build/reference/command-line-reference#flag--incompatible_disallow_empty_glob 68 | common --incompatible_disallow_empty_glob 69 | 70 | # Always download coverage files for tests from the remote cache. By default, coverage files are not 71 | # downloaded on test result cache hits when --remote_download_minimal is enabled, making it impossible 72 | # to generate a full coverage report. 73 | # Docs: https://bazel.build/reference/command-line-reference#flag--experimental_fetch_all_coverage_outputs 74 | # detching remote cache results 75 | test --experimental_fetch_all_coverage_outputs 76 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/clickhouse/static_files.cue: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | schema "github.com/monogon-dev/netmeta/deploy/single-node/schema" 5 | ) 6 | 7 | // curl -s https://www.iana.org/assignments/protocol-numbers/protocol-numbers-1.csv | awk -F ',' '{ print $1 "\t" $2 }' 8 | // plus some manual post-processing. Most of them are never seen on the wire, but doesn't hurt to have the full list. 9 | _files: IPProtocols: cfg: { 10 | layout: flat: null 11 | structure: { 12 | id: name: "ID" 13 | attribute: { 14 | name: "Name" 15 | type: "String" 16 | null_value: null 17 | } 18 | } 19 | } 20 | _files: IPProtocols: data: #""" 21 | 0 NULL 22 | 1 ICMP 23 | 2 IGMP 24 | 3 GGP 25 | 4 IPv4 26 | 5 ST 27 | 6 TCP 28 | 7 CBT 29 | 8 EGP 30 | 9 IGP 31 | 10 BBN-RCC-MON 32 | 11 NVP-II 33 | 12 PUP 34 | 14 EMCON 35 | 15 XNET 36 | 16 CHAOS 37 | 17 UDP 38 | 18 MUX 39 | 19 DCN-MEAS 40 | 20 HMP 41 | 21 PRM 42 | 22 XNS-IDP 43 | 23 TRUNK-1 44 | 24 TRUNK-2 45 | 25 LEAF-1 46 | 26 LEAF-2 47 | 27 RDP 48 | 28 IRTP 49 | 29 ISO-TP4 50 | 30 NETBLT 51 | 31 MFE-NSP 52 | 32 MERIT-INP 53 | 33 DCCP 54 | 34 3PC 55 | 35 IDPR 56 | 36 XTP 57 | 37 DDP 58 | 38 IDPR-CMTP 59 | 39 TP++ 60 | 40 IL 61 | 41 IPv6 62 | 42 SDRP 63 | 43 IPv6-Route 64 | 44 IPv6-Frag 65 | 45 IDRP 66 | 46 RSVP 67 | 47 GRE 68 | 48 DSR 69 | 49 BNA 70 | 50 ESP 71 | 51 AH 72 | 52 I-NLSP 73 | 54 NARP 74 | 55 MOBILE 75 | 56 TLSP 76 | 57 SKIP 77 | 58 IPv6-ICMP 78 | 59 IPv6-NoNxt 79 | 60 IPv6-Opts 80 | 62 CFTP 81 | 64 SAT-EXPAK 82 | 65 KRYPTOLAN 83 | 66 RVD 84 | 67 IPPC 85 | 69 SAT-MON 86 | 70 VISA 87 | 71 IPCV 88 | 72 CPNX 89 | 73 CPHB 90 | 74 WSN 91 | 75 PVP 92 | 76 BR-SAT-MON 93 | 77 SUN-ND 94 | 78 WB-MON 95 | 79 WB-EXPAK 96 | 80 ISO-IP 97 | 81 VMTP 98 | 82 SECURE-VMTP 99 | 83 VINES 100 | 84 TTP 101 | 84 IPTM 102 | 85 NSFNET-IGP 103 | 86 DGP 104 | 87 TCF 105 | 88 EIGRP 106 | 89 OSPFIGP 107 | 90 Sprite-RPC 108 | 91 LARP 109 | 92 MTP 110 | 93 AX.25 111 | 94 IPIP 112 | 96 SCC-SP 113 | 97 ETHERIP 114 | 98 ENCAP 115 | 100 GMTP 116 | 101 IFMP 117 | 102 PNNI 118 | 103 PIM 119 | 104 ARIS 120 | 105 SCPS 121 | 106 QNX 122 | 107 A/N 123 | 108 IPComp 124 | 109 SNP 125 | 110 Compaq-Peer 126 | 111 IPX-in-IP 127 | 112 VRRP 128 | 113 PGM 129 | 115 L2TP 130 | 116 DDX 131 | 117 IATP 132 | 118 STP 133 | 119 SRP 134 | 120 UTI 135 | 121 SMP 136 | 123 PTP 137 | 124 ISISv4 138 | 125 FIRE 139 | 126 CRTP 140 | 127 CRUDP 141 | 128 SSCOPMCE 142 | 129 IPLT 143 | 130 SPS 144 | 131 PIPE 145 | 132 SCTP 146 | 133 FC 147 | 134 RSVP-E2E-IGNORE 148 | 135 Mobility Header 149 | 136 UDPLite 150 | 137 MPLS-in-IP 151 | 138 manet 152 | 139 HIP 153 | 140 Shim6 154 | 141 WESP 155 | 142 ROHC 156 | 143 Ethernet 157 | """# 158 | 159 | // Adapted from https://en.wikipedia.org/wiki/EtherType 160 | // Full list at https://www.iana.org/assignments/ieee-802-numbers/ieee-802-numbers.xhtml 161 | // 162 | // print('\n'.join('\t'.join((str(int(a, 16)), b)) for a, b in (x.strip().split('\t') for x in inf.strip().split('\n')))) 163 | _files: EtherTypes: cfg: { 164 | layout: flat: null 165 | structure: { 166 | id: name: "ID" 167 | attribute: { 168 | name: "Name" 169 | type: "String" 170 | null_value: 0 171 | } 172 | } 173 | } 174 | _files: EtherTypes: data: #""" 175 | 0 NULL 176 | 2048 IPv4 177 | 2054 ARP 178 | 2114 Wake-on-LAN 179 | 8944 AVTP 180 | 8947 IETF TRILL 181 | 8938 Stream Reservation Protocol 182 | 24578 DEC MOP RC 183 | 24579 DECnet Phase IV 184 | 24580 DEC LAT 185 | 32821 RARP 186 | 32923 AppleTalk 187 | 33011 AppleTalk AARP 188 | 33024 IEEE 802.1Q 189 | 33026 SLPP 190 | 33027 VLACP 191 | 33079 IPX 192 | 33284 QNX Qnet 193 | 34525 IPv6 194 | 34824 Ethernet flow control 195 | 34825 LACP 196 | 34841 CobraNet 197 | 34887 MPLS unicast 198 | 34888 MPLS multicast 199 | 34915 PPPoE Discovery 200 | 34916 PPPoE Session 201 | 34939 HomePlug 1.0 MME 202 | 34958 EAP over LAN 203 | 34962 PROFINET Protocol 204 | 34970 HyperSCSI 205 | 34978 ATA over Ethernet 206 | 34980 EtherCAT Protocol 207 | 34984 Q-in-Q S-Tag 208 | 34987 Powerlink 209 | 35000 GOOSE 210 | 35001 GSE 211 | 35002 Sampled Value Transmission 212 | 35007 MikroTik RoMON 213 | 35020 LLDP 214 | 35021 SERCOS III 215 | 35036 WSMP 216 | 35043 IEC62439-2 217 | 35045 MACSEC 218 | 35047 IEEE 802.1ah PBB 219 | 35063 Precision Time Protocol 220 | 35064 NC-SI 221 | 35067 PRP 222 | 35074 ITU-T Y.1731 (OAM) 223 | 35078 FCoE 224 | 35092 FCoE Init 225 | 35093 RDMA RoCE 226 | 35101 TTE 227 | 35119 HSR 228 | 36864 ECTP 229 | 37120 IEEE 802.1Q double tag 230 | 61889 IEEE 802.1CB 231 | """# 232 | 233 | // Source: https://tools.ietf.org/html/rfc793 234 | _files: TCPFlags: cfg: { 235 | layout: flat: null 236 | structure: { 237 | id: name: "ID" 238 | attribute: { 239 | name: "Name" 240 | type: "String" 241 | null_value: null 242 | } 243 | } 244 | } 245 | _files: TCPFlags: data: #""" 246 | 1 FIN 247 | 2 SYN 248 | 4 RST 249 | 8 PSH 250 | 16 ACK 251 | 32 URG 252 | 64 ECE 253 | 128 CWR 254 | """# 255 | 256 | ClickHouseInstallation: netmeta: spec: configuration: files: { 257 | for k, v in schema.file { 258 | "\(k)": v 259 | } 260 | } -------------------------------------------------------------------------------- /third_party/gazelle/add-prepatching.patch: -------------------------------------------------------------------------------- 1 | From 6d876e488124d7f0f6d660164c112a1a5d375218 Mon Sep 17 00:00:00 2001 2 | From: Tim Windelschmidt 3 | Date: Wed, 17 Jul 2024 18:27:41 +0200 4 | Subject: [PATCH] Add support for prepatching 5 | 6 | --- 7 | internal/bzlmod/go_deps.bzl | 13 +++++++++++++ 8 | internal/go_repository.bzl | 10 +++++++++- 9 | 2 files changed, 22 insertions(+), 1 deletion(-) 10 | 11 | diff --git a/internal/bzlmod/go_deps.bzl b/internal/bzlmod/go_deps.bzl 12 | index dcd0db3..7170506 100644 13 | --- a/internal/bzlmod/go_deps.bzl 14 | +++ b/internal/bzlmod/go_deps.bzl 15 | @@ -159,6 +159,9 @@ def _get_build_extra_args(path, gazelle_overrides, gazelle_default_attributes): 16 | def _get_patches(path, module_overrides): 17 | return _get_override_or_default(module_overrides, struct(), {}, path, [], "patches") 18 | 19 | +def _get_pre_patches(path, module_overrides): 20 | + return _get_override_or_default(module_overrides, struct(), {}, path, [], "pre_patches") 21 | + 22 | def _get_patch_args(path, module_overrides): 23 | override = _get_override_or_default(module_overrides, struct(), {}, path, None, "patch_strip") 24 | return ["-p{}".format(override)] if override else [] 25 | @@ -235,6 +238,7 @@ def _process_gazelle_override(gazelle_override_tag): 26 | def _process_module_override(module_override_tag): 27 | return struct( 28 | patches = module_override_tag.patches, 29 | + pre_patches = module_override_tag.pre_patches, 30 | patch_strip = module_override_tag.patch_strip, 31 | ) 32 | 33 | @@ -243,6 +247,7 @@ def _process_archive_override(archive_override_tag): 34 | urls = archive_override_tag.urls, 35 | sha256 = archive_override_tag.sha256, 36 | strip_prefix = archive_override_tag.strip_prefix, 37 | + pre_patches = archive_override_tag.pre_patches, 38 | patches = archive_override_tag.patches, 39 | patch_strip = archive_override_tag.patch_strip, 40 | ) 41 | @@ -611,6 +616,7 @@ def _go_deps_impl(module_ctx): 42 | "build_directives": _get_directives(path, gazelle_overrides, gazelle_default_attributes), 43 | "build_file_generation": _get_build_file_generation(path, gazelle_overrides, gazelle_default_attributes), 44 | "build_extra_args": _get_build_extra_args(path, gazelle_overrides, gazelle_default_attributes), 45 | + "pre_patches": _get_pre_patches(path, module_overrides), 46 | "patches": _get_patches(path, module_overrides), 47 | "patch_args": _get_patch_args(path, module_overrides), 48 | "debug_mode": debug_mode, 49 | @@ -622,6 +628,7 @@ def _go_deps_impl(module_ctx): 50 | "urls": archive_override.urls, 51 | "strip_prefix": archive_override.strip_prefix, 52 | "sha256": archive_override.sha256, 53 | + "pre_patches": _get_pre_patches(path, archive_overrides), 54 | "patches": _get_patches(path, archive_overrides), 55 | "patch_args": _get_patch_args(path, archive_overrides), 56 | }) 57 | @@ -775,6 +782,9 @@ _archive_override_tag = tag_class( 58 | SHA-256 sum of the downloaded archive. When set, Bazel will verify the archive 59 | against this sum before extracting it.""", 60 | ), 61 | + "pre_patches": attr.label_list( 62 | + doc = "A list of patches to apply to the repository before gazelle runs.", 63 | + ), 64 | "patches": attr.label_list( 65 | doc = "A list of patches to apply to the repository *after* gazelle runs.", 66 | ), 67 | @@ -813,6 +823,9 @@ _module_override_tag = tag_class( 68 | extension within this Bazel module.""", 69 | mandatory = True, 70 | ), 71 | + "pre_patches": attr.label_list( 72 | + doc = "A list of patches to apply to the repository before gazelle runs.", 73 | + ), 74 | "patches": attr.label_list( 75 | doc = "A list of patches to apply to the repository *after* gazelle runs.", 76 | ), 77 | diff --git a/internal/go_repository.bzl b/internal/go_repository.bzl 78 | index 48a9d14..e3efa5b 100644 79 | --- a/internal/go_repository.bzl 80 | +++ b/internal/go_repository.bzl 81 | @@ -286,6 +286,11 @@ def _go_repository_impl(ctx): 82 | if result.return_code: 83 | fail("%s: %s" % (ctx.name, result.stderr)) 84 | 85 | + # TODO(lorenz): Replace this with patch() once the patches argument no longer gets merged with 86 | + # the attribute pulled from ctx. 87 | + for p in ctx.attr.pre_patches: 88 | + ctx.patch(p, 1) 89 | + 90 | # Repositories are fetched. Determine if build file generation is needed. 91 | build_file_names = ctx.attr.build_file_name.split(",") 92 | existing_build_file = "" 93 | @@ -623,7 +628,10 @@ go_repository = repository_rule( 94 | prefixed with `#` automatically. A common use case is to pass a list of 95 | Gazelle directives.""", 96 | ), 97 | - 98 | + # Patches to apply before running gazelle. 99 | + "pre_patches": attr.label_list( 100 | + doc = "A list of patches to apply to the repository before gazelle runs.", 101 | + ), 102 | # Patches to apply after running gazelle. 103 | "patches": attr.label_list( 104 | doc = "A list of patches to apply to the repository after gazelle runs.", 105 | -- 106 | 2.44.1 107 | -------------------------------------------------------------------------------- /deploy/single-node/schema/functions.cue: -------------------------------------------------------------------------------- 1 | package schema 2 | 3 | import "strings" 4 | 5 | import "list" 6 | 7 | function: HostToString: { 8 | arguments: ["Sampler", "Host"] 9 | query: "dictGetStringOrDefault('HostNames', 'Description', (IPv6NumToString(Sampler), Host), IPv6ToString(Host))" 10 | } 11 | 12 | function: SamplerToString: { 13 | arguments: ["Sampler"] 14 | query: "coalesce(dictGet('SamplerConfig', 'Description', IPv6NumToString(Sampler)), IPv6ToString(Sampler))" 15 | } 16 | 17 | function: ASNToString: { 18 | arguments: ["ASN"] 19 | query: "substring(dictGetString('autnums', 'name', toUInt64(ASN)), 1, 25) || ' AS' || toString(ASN)" 20 | } 21 | 22 | function: VLANToString: { 23 | arguments: ["Sampler", "VLAN"] 24 | query: "dictGetStringOrDefault('VlanNames', 'Description', (IPv6NumToString(Sampler), VLAN), VLAN)" 25 | } 26 | 27 | function: InterfaceToString: { 28 | arguments: ["Sampler", "Interface"] 29 | query: #""" 30 | if( 31 | isNull(dictGetOrNull('InterfaceNames', 'Description', (IPv6NumToString(Sampler), Interface))), 32 | SamplerToString(Sampler) || ' - ' || toString(Interface), 33 | SamplerToString(Sampler) || ' - ' || toString(Interface) || ' [' || 34 | dictGetString('InterfaceNames', 'Description', (IPv6NumToString(Sampler), Interface)) || ']' 35 | ) 36 | """# 37 | } 38 | 39 | function: isIncomingFlow: { 40 | arguments: ["SamplerAddress", "SrcAddr", "DstAddr", "SrcAS", "DstAS", "FlowDirection"] 41 | 42 | #ColumnExpression: { 43 | _function_column_handler: { 44 | SamplerAddressInRange: "isIPAddressInRange(toString(SamplerAddress, toIPv6Net('\(_strValue)'))" 45 | SrcAddrInRange: "isIPAddressInRange(toString(SrcAddr, toIPv6Net('\(_strValue)'))" 46 | DstAddrInRange: "isIPAddressInRange(toString(DstAddr, toIPv6Net('\(_strValue)'))" 47 | } 48 | _function_columns: [ for k, _ in _function_column_handler {k}] 49 | 50 | _valid: true & (list.Contains(arguments, column) | list.Contains(_function_columns, column)) 51 | column: string 52 | value: string | int 53 | _strValue: "\(value)" 54 | 55 | #out: string | *"toString(\(column)) == '\(_strValue)'" 56 | 57 | // if the value is an int, we can skip toString 58 | if (value & int) != _|_ { 59 | #out: "\(column) == \(_strValue)" 60 | } 61 | 62 | // if it is a special handler 63 | if list.Contains(_function_columns, column) { 64 | #out: _function_column_handler[column] 65 | } 66 | } 67 | 68 | #SamplerExpression: { 69 | sampler: string 70 | expressions: [...string] 71 | #out: """ 72 | if( 73 | toString(SamplerAddress) == '\(sampler)', 74 | \(strings.Join(expressions, " OR ")), 75 | NULL 76 | ) 77 | """ 78 | } 79 | 80 | // dear reader I am very sorry for this monstrosity, but when I used a pretty version with placeholders, 81 | // CUE just stackoverflowed. I hope that with a future version this can be pretty again, but for now 82 | // we have to live with it. 83 | _expressions: [ 84 | for device, cfg in #Config.sampler if len(cfg.isIncomingFlow) != 0 { 85 | 86 | // join the column expressions together 87 | (#SamplerExpression & { 88 | sampler: device 89 | _valid: true & len(expressions) > 0 90 | 91 | expressions: [ for e in cfg.isIncomingFlow if len(e) != 0 { 92 | // for each instance create the column expressions and join them by AND 93 | _expressions: [ for c, v in e { 94 | 95 | // create the sql expression for this column 96 | (#ColumnExpression & { 97 | column: c 98 | value: v 99 | }).#out 100 | }] 101 | 102 | "( " + strings.Join(_expressions, " AND ") + " )" 103 | }] 104 | }).#out 105 | }, 106 | 107 | // Fallback to FlowDirection == 0 108 | "FlowDirection == 0", 109 | ] 110 | 111 | query: "coalesce( \(strings.Join(_expressions, ",\n")) )" 112 | } 113 | 114 | function: toIPv6Net: { 115 | arguments: ["Net"] 116 | query: #""" 117 | if( 118 | isIPv4String(splitByChar('/', Net)[1]), 119 | '::ffff:' || arrayStringConcat( 120 | arrayMap((v, i) -> if(i, toString(toInt8(v) + 96), v), splitByChar('/', Net), [false, true]), 121 | '/'), 122 | Net 123 | ) 124 | """# 125 | } 126 | 127 | function: IPv6ToString: { 128 | arguments: ["Address"] 129 | query: #""" 130 | if( 131 | startsWith(reinterpret(Address, 'FixedString(16)'), repeat(unhex('00'), 10) || repeat(unhex('FF'), 2)), 132 | IPv4NumToString(CAST(reinterpret(reverse(substring(reinterpret(Address, 'FixedString(16)'), 13, 16)), 'UInt32') AS IPv4)), 133 | IPv6NumToString(Address) 134 | ) 135 | """# 136 | } 137 | 138 | function: ParseGoFlowAddress: { 139 | arguments: ["Address"] 140 | query: #""" 141 | if( 142 | -- endsWith IPv6v4NullPadding 143 | endsWith(reinterpret(Address, 'FixedString(16)'), repeat(unhex('00'), 12)), 144 | -- prepend ::ffff: 145 | CAST(toFixedString(repeat(unhex('00'), 10) || repeat(unhex('FF'), 2) || substring(reinterpret(Address, 'FixedString(16)'), 1, 4), 16) AS IPv6), 146 | CAST(Address AS IPv6) 147 | ) 148 | """# 149 | } 150 | 151 | function: switchEndian: { 152 | arguments: ["s"] 153 | query: 154 | #""" 155 | unhex( 156 | arrayStringConcat( 157 | arrayMap(x -> substring(hex(s), x, 2), reverse(range(1, length(s) * 2, 2))) 158 | ) 159 | ) 160 | """# 161 | } 162 | 163 | function: ParseFastNetMonAddress: { 164 | arguments: ["Address"] 165 | query: #""" 166 | if( 167 | length(Address) == 4, 168 | IPv4ToIPv6(CAST(reinterpret(switchEndian(Address), 'UInt32') AS IPv4)), 169 | CAST(toFixedString(Address, 16) AS IPv6) 170 | ) 171 | """# 172 | } 173 | 174 | // Wrapper for returning the ColumnIndex of a table 175 | // currently required to select a field inside a SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection 176 | // see isIncomingFlow for usage 177 | function: ColumnIndex: { 178 | arguments: ["Database", "Table", "Column"] 179 | query: #""" 180 | (SELECT position from system.columns where database = Database and table = Table and name = Column) 181 | """# 182 | } 183 | -------------------------------------------------------------------------------- /cmd/reconciler/reconciler.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "database/sql" 7 | "encoding/hex" 8 | "fmt" 9 | "github.com/ClickHouse/clickhouse-go/v2" 10 | "log" 11 | ) 12 | 13 | type Reconciler struct { 14 | conn clickhouse.Conn 15 | cfg *Config 16 | } 17 | 18 | func (r *Reconciler) Reconcile() error { 19 | for _, function := range r.cfg.Functions { 20 | if err := r.reconcileFunction(function); err != nil { 21 | return err 22 | } 23 | } 24 | 25 | for _, table := range r.cfg.SourceTables { 26 | if err := r.reconcileTable(table); err != nil { 27 | return err 28 | } 29 | } 30 | 31 | for _, view := range r.cfg.MaterializedViews { 32 | if err := r.reconcileMaterializedView(view); err != nil { 33 | return err 34 | } 35 | } 36 | 37 | return nil 38 | } 39 | 40 | func (r *Reconciler) reconcileTable(t Table) error { 41 | currentQuery, err := r.fetchTable(t.Name) 42 | if err != nil && err != sql.ErrNoRows { 43 | return err 44 | } 45 | 46 | createQuery, err := t.CreateQuery(r.cfg.Database) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | if currentQuery != "" { 52 | // fetchTable returns the CREATE TABLE statement 53 | equal, err := r.isEqual(createQuery, currentQuery) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | // current table is equal -> skip 59 | if equal { 60 | log.Printf("table %q is equal: skipping", t.Name) 61 | return nil 62 | } 63 | 64 | log.Printf("table %q is not equal: dropping", t.Name) 65 | if err := r.conn.Exec(context.Background(), t.DropQuery(r.cfg.Database)); err != nil { 66 | return err 67 | } 68 | } 69 | 70 | log.Printf("table %q is missing: creating", t.Name) 71 | // create missing view 72 | return r.conn.Exec(context.Background(), createQuery) 73 | } 74 | 75 | func (r *Reconciler) reconcileMaterializedView(mv MaterializedView) error { 76 | currentHashString, err := r.fetchMaterializedView(mv.Name) 77 | if err != nil && err != sql.ErrNoRows { 78 | return err 79 | } 80 | 81 | newHash := sha256.Sum256([]byte(mv.CreateQuery(r.cfg.Database))) 82 | newHashString := hex.EncodeToString(newHash[:]) 83 | 84 | if err != sql.ErrNoRows { 85 | // current mv is equal -> skip 86 | if newHashString == currentHashString { 87 | log.Printf("materializedview %q is equal: skipping", mv.Name) 88 | return nil 89 | } 90 | 91 | log.Printf("materializedview %q is not equal: dropping", mv.Name) 92 | if err := r.conn.Exec(context.Background(), mv.DropQuery(r.cfg.Database)); err != nil { 93 | return err 94 | } 95 | } 96 | 97 | log.Printf("materializedview %q is missing: creating", mv.Name) 98 | // create missing view 99 | if err := r.conn.Exec(context.Background(), mv.CreateQuery(r.cfg.Database)); err != nil { 100 | return err 101 | } 102 | 103 | return r.conn.Exec(context.Background(), fmt.Sprintf("ALTER TABLE %s.%s MODIFY COMMENT ?", r.cfg.Database, mv.Name), newHashString) 104 | } 105 | 106 | func (r *Reconciler) reconcileFunction(f Function) error { 107 | currentQuery, err := r.fetchFunction(f.Name) 108 | if err != nil && err != sql.ErrNoRows { 109 | return err 110 | } 111 | 112 | if currentQuery != "" { 113 | // fetchFunction returns the original CREATE FUNCTION statement 114 | equal, err := r.isEqual(f.CreateQuery(), currentQuery) 115 | if err != nil { 116 | return err 117 | } 118 | 119 | // current function is equal -> skip 120 | if equal { 121 | log.Printf("function %q is equal: skipping", f.Name) 122 | return nil 123 | } 124 | 125 | log.Printf("function %q is not equal: replacing", f.Name) 126 | // replace function 127 | return r.conn.Exec(context.Background(), f.CreateOrReplaceQuery()) 128 | } 129 | 130 | // create missing function 131 | return r.conn.Exec(context.Background(), f.CreateQuery()) 132 | } 133 | 134 | func (r *Reconciler) fetchTable(name string) (string, error) { 135 | row := r.conn.QueryRow(context.Background(), 136 | "SELECT create_table_query FROM system.tables WHERE database = ? AND name = ?", 137 | r.cfg.Database, name) 138 | if err := row.Err(); err != nil { 139 | return "", err 140 | } 141 | 142 | var createTableQuery string 143 | if err := row.Scan(&createTableQuery); err != nil { 144 | return "", err 145 | } 146 | 147 | return createTableQuery, nil 148 | } 149 | 150 | func (r *Reconciler) fetchFunction(name string) (string, error) { 151 | row := r.conn.QueryRow(context.Background(), 152 | "SELECT create_query FROM system.functions WHERE name = ?", 153 | name) 154 | if err := row.Err(); err != nil { 155 | return "", err 156 | } 157 | 158 | var createQuery string 159 | if err := row.Scan(&createQuery); err != nil { 160 | return "", err 161 | } 162 | 163 | return createQuery, nil 164 | } 165 | 166 | func (r *Reconciler) fetchMaterializedView(name string) (string, error) { 167 | row := r.conn.QueryRow(context.Background(), 168 | "SELECT comment FROM system.tables WHERE database = ? AND name = ?", 169 | "default", name) 170 | if err := row.Err(); err != nil { 171 | return "", err 172 | } 173 | 174 | var comment string 175 | if err := row.Scan(&comment); err != nil { 176 | return "", err 177 | } 178 | 179 | return comment, nil 180 | } 181 | 182 | func (r *Reconciler) formatQuery(query string) (string, error) { 183 | row := r.conn.QueryRow(context.Background(), "SELECT formatQuery(?)", query) 184 | if err := row.Err(); err != nil { 185 | return "", err 186 | } 187 | 188 | var result string 189 | if err := row.Scan(&result); err != nil { 190 | return "", err 191 | } 192 | 193 | return result, nil 194 | } 195 | 196 | func (r *Reconciler) isEqual(want, is string) (bool, error) { 197 | if want == "" { 198 | return false, fmt.Errorf("missing %q", "want") 199 | } 200 | if is == "" { 201 | return false, fmt.Errorf("missing %q", "is") 202 | } 203 | 204 | var err error 205 | want, err = r.formatQuery(want) 206 | if err != nil { 207 | return false, fmt.Errorf("formatting %q: %v", "want", err) 208 | } 209 | 210 | is, err = r.formatQuery(is) 211 | if err != nil { 212 | return false, fmt.Errorf("formatting %q: %v", "is", err) 213 | } 214 | 215 | return want == is, nil 216 | } 217 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/monogon-dev/netmeta 2 | 3 | go 1.23 4 | 5 | require ( 6 | cuelang.org/go v0.11.0 7 | github.com/ClickHouse/clickhouse-go/v2 v2.28.3 8 | github.com/emicklei/proto v1.13.2 9 | github.com/gopacket/gopacket v1.3.1 10 | github.com/huandu/go-sqlbuilder v1.32.0 11 | github.com/netsampler/goflow2 v1.3.7 12 | github.com/netsampler/goflow2/v2 v2.2.1 13 | github.com/osrg/gobgp v2.0.0+incompatible 14 | github.com/pressly/goose/v3 v3.22.1 15 | github.com/sirupsen/logrus v1.9.3 16 | github.com/vishvananda/netlink v1.3.0 17 | k8s.io/klog/v2 v2.130.1 18 | ) 19 | 20 | require ( 21 | cuelabs.dev/go/oci/ociregistry v0.0.0-20240906074133-82eb438dd565 // indirect 22 | filippo.io/edwards25519 v1.1.0 // indirect 23 | github.com/ClickHouse/ch-go v0.61.5 // indirect 24 | github.com/Shopify/sarama v1.38.1 // indirect 25 | github.com/andybalholm/brotli v1.1.0 // indirect 26 | github.com/antlr4-go/antlr/v4 v4.13.0 // indirect 27 | github.com/beorn7/perks v1.0.1 // indirect 28 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 29 | github.com/cockroachdb/apd/v3 v3.2.1 // indirect 30 | github.com/coder/websocket v1.8.12 // indirect 31 | github.com/davecgh/go-spew v1.1.1 // indirect 32 | github.com/dustin/go-humanize v1.0.1 // indirect 33 | github.com/eapache/go-resiliency v1.3.0 // indirect 34 | github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect 35 | github.com/eapache/queue v1.1.0 // indirect 36 | github.com/elastic/go-sysinfo v1.11.2 // indirect 37 | github.com/elastic/go-windows v1.0.1 // indirect 38 | github.com/go-faster/city v1.0.1 // indirect 39 | github.com/go-faster/errors v0.7.1 // indirect 40 | github.com/go-logr/logr v1.4.1 // indirect 41 | github.com/go-sql-driver/mysql v1.8.1 // indirect 42 | github.com/golang-jwt/jwt/v4 v4.5.0 // indirect 43 | github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect 44 | github.com/golang-sql/sqlexp v0.1.0 // indirect 45 | github.com/golang/protobuf v1.5.4 // indirect 46 | github.com/golang/snappy v0.0.4 // indirect 47 | github.com/google/uuid v1.6.0 // indirect 48 | github.com/hashicorp/errwrap v1.0.0 // indirect 49 | github.com/hashicorp/go-multierror v1.1.1 // indirect 50 | github.com/hashicorp/go-uuid v1.0.3 // indirect 51 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 52 | github.com/huandu/xstrings v1.4.0 // indirect 53 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 54 | github.com/jackc/pgpassfile v1.0.0 // indirect 55 | github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect 56 | github.com/jackc/pgx/v5 v5.7.1 // indirect 57 | github.com/jackc/puddle/v2 v2.2.2 // indirect 58 | github.com/jcmturner/aescts/v2 v2.0.0 // indirect 59 | github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect 60 | github.com/jcmturner/gofork v1.7.6 // indirect 61 | github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect 62 | github.com/jcmturner/rpc/v2 v2.0.3 // indirect 63 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect 64 | github.com/jonboulle/clockwork v0.4.0 // indirect 65 | github.com/klauspost/compress v1.17.9 // indirect 66 | github.com/libp2p/go-reuseport v0.4.0 // indirect 67 | github.com/mattn/go-isatty v0.0.20 // indirect 68 | github.com/mfridman/interpolate v0.0.2 // indirect 69 | github.com/mfridman/xflag v0.0.0-20240825232106-efb77353e578 // indirect 70 | github.com/microsoft/go-mssqldb v1.7.2 // indirect 71 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect 72 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 73 | github.com/ncruces/go-strftime v0.1.9 // indirect 74 | github.com/opencontainers/go-digest v1.0.0 // indirect 75 | github.com/opencontainers/image-spec v1.1.0 // indirect 76 | github.com/paulmach/orb v0.11.1 // indirect 77 | github.com/pelletier/go-toml/v2 v2.2.3 // indirect 78 | github.com/pierrec/lz4/v4 v4.1.21 // indirect 79 | github.com/pkg/errors v0.9.1 // indirect 80 | github.com/prometheus/client_golang v1.20.0 // indirect 81 | github.com/prometheus/client_model v0.6.1 // indirect 82 | github.com/prometheus/common v0.55.0 // indirect 83 | github.com/prometheus/procfs v0.15.1 // indirect 84 | github.com/protocolbuffers/txtpbfmt v0.0.0-20240823084532-8e6b51fa9bef // indirect 85 | github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect 86 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect 87 | github.com/rogpeppe/go-internal v1.13.1 // indirect 88 | github.com/segmentio/asm v1.2.0 // indirect 89 | github.com/sethvargo/go-retry v0.3.0 // indirect 90 | github.com/shopspring/decimal v1.4.0 // indirect 91 | github.com/spf13/cobra v1.8.1 // indirect 92 | github.com/spf13/pflag v1.0.5 // indirect 93 | github.com/tetratelabs/wazero v1.6.0 // indirect 94 | github.com/tursodatabase/libsql-client-go v0.0.0-20240902231107-85af5b9d094d // indirect 95 | github.com/vertica/vertica-sql-go v1.3.3 // indirect 96 | github.com/vishvananda/netns v0.0.4 // indirect 97 | github.com/xdg-go/pbkdf2 v1.0.0 // indirect 98 | github.com/xdg-go/scram v1.1.2 // indirect 99 | github.com/xdg-go/stringprep v1.0.4 // indirect 100 | github.com/ydb-platform/ydb-go-genproto v0.0.0-20240528144234-5d5a685e41f7 // indirect 101 | github.com/ydb-platform/ydb-go-sdk/v3 v3.80.2 // indirect 102 | github.com/ziutek/mymysql v1.5.4 // indirect 103 | go.opentelemetry.io/otel v1.26.0 // indirect 104 | go.opentelemetry.io/otel/trace v1.26.0 // indirect 105 | go.uber.org/multierr v1.11.0 // indirect 106 | golang.org/x/crypto v0.28.0 // indirect 107 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect 108 | golang.org/x/mod v0.21.0 // indirect 109 | golang.org/x/net v0.30.0 // indirect 110 | golang.org/x/oauth2 v0.23.0 // indirect 111 | golang.org/x/sync v0.8.0 // indirect 112 | golang.org/x/sys v0.26.0 // indirect 113 | golang.org/x/text v0.19.0 // indirect 114 | golang.org/x/tools v0.26.0 // indirect 115 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect 116 | google.golang.org/grpc v1.62.1 // indirect 117 | google.golang.org/protobuf v1.34.2 // indirect 118 | gopkg.in/yaml.v2 v2.4.0 // indirect 119 | gopkg.in/yaml.v3 v3.0.1 // indirect 120 | howett.net/plist v1.0.0 // indirect 121 | modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect 122 | modernc.org/mathutil v1.6.0 // indirect 123 | modernc.org/memory v1.8.0 // indirect 124 | modernc.org/sqlite v1.33.0 // indirect 125 | modernc.org/strutil v1.2.0 // indirect 126 | modernc.org/token v1.1.0 // indirect 127 | ) 128 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NetMeta 2 |

3 | 4 | github issues 5 | 6 | 7 | github forks 8 | 9 | 10 | github stars 11 | 12 | 13 | github license 14 | 15 |

16 | 17 | | ⚠️ **PRE-RELEASE**: This is a work in progress - please watch this repo for news. | 18 | |-------------------------------------------------------------------------------------------| 19 | 20 | NetMeta is a scalable network observability toolkit optimized for performance. 21 | 22 | Flows are not pre-aggregated and stored with one second resolution. This allows for 23 | queries in arbitrary dimensions with high-fidelity graphs. 24 | 25 | ## ⚡️ Features 26 | 27 | NetMeta captures, aggregates and analyzes events from a variety of data sources: 28 | 29 | * sFlow 30 | * NetFlow/IPFIX 31 | * Port Mirror (AF_PACKET) 32 | * Linux NFLOG (soon) 33 | * Linux conntrack (soon) 34 | * Scamper traces (soon) 35 | * GCP VPC Flow Logs (soon) 36 | * AWS VPC Flow Logs (soon) 37 | 38 | The different pros/cons between these and a more informations about the ingest of events can be found [here](doc/ingest.md) 39 | 40 | NetMeta also allows to integrate FastNetMon Attack notifications on the Dashboards. 41 | The docs to setup this and other Grafana related settings can be found [here](doc/grafana.md) 42 | 43 | Sampling rate is detected automatically. Different devices with different sampling rates can be mixed. 44 | IPv6 is fully supported throughout the stack. 45 | 46 | ## 👀 Demo 47 | 48 | 49 | 50 | 51 | 52 | ## 💥 API Stability 53 | 54 | NetMeta is **beta software** and subject to change. It exposes the following APIs: 55 | 56 | * The cluster configuration file for single-node deployments. 57 | * ClickHouse SQL schema for raw database access. 58 | * Protobuf schemas for ingestion for writing custom processors. 59 | 60 | One NetMeta has stabilized, these APIs will be stable and backwards compatible. 61 | 62 | ## 🛠 Deployment 63 | ### [Single-node deployment](deploy/single-node/README.md) 64 | 65 | NetMeta includes a production-ready single node deployment that scales to up to ~100k events/s and billions of database 66 | rows. More infos can be found [here](deploy/single-node/README.md) 67 | 68 | Ingestion performance is limited by CPU performance and disk bandwidth. 69 | Query performance is limited by disk and memory bandwidth, as well as total amount of available memory for larger 70 | in-memory aggregations. 71 | 72 | Most reads/writes are sequential due to heavy use of batching in all parts of the stack, 73 | and it works fine even on network storage or spinning disks. We recommend local NVMe drives for best performance. 74 | 75 | NetMeta can scale to millions of events per seconds in multi-node deployments. 76 | 77 | ### Multi-node deployment 78 | 79 | We are currently finalizing the design for multi-node deployments. Please contact us if you're interested in 80 | large-scale deployments - we want your feedback! 81 | 82 | ### Monogon OS 83 | 84 | NetMeta will be a first-class citizen on [Monogon OS](https://monogon.tech/monogon_os.html) - stay tuned! 85 | 86 | ### ☸️ Kubernetes 87 | 88 | NetMeta works on any Kubernetes cluster that supports LoadBalancer and Ingress objects and can provision storage. 89 | It's up to you to carefully read the deployment code and cluster role assigments to make sure it works with your 90 | cluster. 91 | Note that we use two operators, which require cluster-admin permissions since CRDs are global 92 | ([Strimzi](https://strimzi.io/docs/master) for Kafka 93 | and [clickhouse-operator](https://github.com/Altinity/clickhouse-operator)). 94 | 95 | All pieces of NetMeta are installed into a single namespace. By default, this is ``default``, which is 96 | probably not what 97 | you want. 98 | You can change the target namespace in the deployment config. 99 | 100 | Please contact us if you need help porting NetMeta to an existing k8s cluster. 101 | 102 | ## 💼 Support 103 | 104 | [Please contact us](https://monogon.tech/pricing.html) 105 | for support and consulting. If you are using NetMeta in production, we'd love to hear from you! 106 | 107 | ## 🧩 Related 108 | 109 | NetMeta is powered by a number of great open source projects, we use: 110 | 111 | - [ClickHouse](https://clickhouse.tech) as the main database 112 | - [Kafka](https://kafka.apache.org) as a queue in front of ClickHouse 113 | - [Grafana](https://grafana.com/) with 114 | - [clickhouse-grafana](https://github.com/Vertamedia/clickhouse-grafana) as frontend 115 | - [goflow](https://github.com/cloudflare/goflow) as the sFlow/Netflow collector 116 | - [Strimzi](https://strimzi.io/) to deploy Kafka, 117 | - [clickhouse-operator](https://github.com/Altinity/clickhouse-operator) to deploy ClickHouse, as well as 118 | - [Kubernetes](https://kubernetes.io/) and Rancher's [k3s](https://k3s.io/). 119 | 120 | ## 🏰 Architecture 121 | 122 | ```mermaid 123 | flowchart TD; 124 | sFlow --> goflow 125 | IPFIX --> goflow 126 | Netflow --> goflow 127 | 128 | kafka[Kafka Broker] 129 | clickhouse["ClickHouse Server
MergeTree
(hourly partitions)"] 130 | 131 | goflow --> kafka 132 | ntm-agent --> kafka 133 | kafka --> clickhouse 134 | clickhouse --> |SQL| grafana[Grafana] 135 | asmap --> clickhouse 136 | nexthop --> clickhouse 137 | resolver --> clickhouse 138 | 139 | ``` 140 | 141 | --- 142 | 143 | (C) 2022 [Monogon SE](https://monogon.tech). 144 | 145 | This software is provided "as-is" and 146 | without any express or implied warranties, including, without limitation, the implied warranties of 147 | merchantability and fitness for a particular purpose. 148 | -------------------------------------------------------------------------------- /deploy/single-node/k8s/clickhouse/files.cue: -------------------------------------------------------------------------------- 1 | package clickhouse 2 | 3 | import ( 4 | "strings" 5 | "strconv" 6 | "netmeta.monogon.tech/xml" 7 | ) 8 | 9 | // template for TSV dictionaries 10 | _files: [NAME=string]: { 11 | cfg: { 12 | layout: _ 13 | structure: _ 14 | } 15 | data: _ 16 | 17 | _cfg: yandex: dictionary: { 18 | cfg 19 | 20 | name: NAME 21 | source: [{ 22 | file: { 23 | path: "/etc/clickhouse-server/config.d/\(NAME).tsv" 24 | format: "TSV" 25 | } 26 | settings: format_tsv_null_representation: "NULL" 27 | }] 28 | lifetime: 60 29 | } 30 | } 31 | 32 | // Iterate over all defined files in _files and generate the config files for clickhouse 33 | ClickHouseInstallation: netmeta: spec: configuration: files: { 34 | for k, v in _files { 35 | "\(k).conf": (xml.#Marshal & {in: v._cfg}).out 36 | "\(k).tsv": v.data 37 | } 38 | } 39 | 40 | // Dictionary for user-defined interface name lookup 41 | _files: InterfaceNames: { 42 | data: strings.Join([ for s in #Config.sampler for i in s.interface { 43 | strings.Join([s.device, "\(i.id)", i.description], "\t") 44 | }], "\n") 45 | 46 | cfg: { 47 | layout: complex_key_hashed: null 48 | structure: { 49 | key: [{ 50 | attribute: { 51 | name: "Device" 52 | type: "String" 53 | } 54 | }, { 55 | attribute: { 56 | name: "Index" 57 | type: "UInt32" 58 | } 59 | }] 60 | attribute: { 61 | name: "Description" 62 | type: "String" 63 | null_value: null 64 | } 65 | } 66 | } 67 | } 68 | 69 | // Dictionary for user-defined sampler settings lookup 70 | _files: SamplerConfig: { 71 | data: strings.Join([ for s in #Config.sampler { 72 | let samplingRate = [ 73 | if s.samplingRate == 0 { 74 | "NULL" 75 | }, 76 | "\(s.samplingRate)", 77 | ][0] 78 | 79 | let description = [ 80 | if s.description == "" { 81 | "NULL" 82 | }, 83 | "\(s.description)", 84 | ][0] 85 | 86 | strings.Join([s.device, samplingRate, description, strconv.FormatBool(s.anonymizeAddresses)], "\t") 87 | }], "\n") 88 | 89 | cfg: { 90 | layout: complex_key_hashed: null 91 | structure: [{ 92 | key: [{ 93 | attribute: { 94 | name: "Device" 95 | type: "String" 96 | } 97 | }] 98 | }, { 99 | attribute: { 100 | name: "SamplingRate" 101 | type: "Nullable(UInt64)" 102 | null_value: null 103 | } 104 | }, { 105 | attribute: { 106 | name: "Description" 107 | type: "Nullable(String)" 108 | null_value: null 109 | } 110 | }, { 111 | attribute: { 112 | name: "AnonymizeAddresses" 113 | type: "Bool" 114 | null_value: false 115 | } 116 | }] 117 | } 118 | } 119 | 120 | // Dictionary for user-defined vlan name lookup 121 | _files: VlanNames: { 122 | data: strings.Join([ for s in #Config.sampler for v in s.vlan { 123 | strings.Join([s.device, "\(v.id)", v.description], "\t") 124 | }], "\n") 125 | 126 | cfg: { 127 | layout: complex_key_hashed: null 128 | structure: { 129 | key: [{ 130 | attribute: { 131 | name: "Device" 132 | type: "String" 133 | } 134 | }, { 135 | attribute: { 136 | name: "Index" 137 | type: "UInt32" 138 | } 139 | }] 140 | attribute: { 141 | name: "Description" 142 | type: "String" 143 | null_value: null 144 | } 145 | } 146 | } 147 | } 148 | 149 | // Dictionary for user-defined host name lookup 150 | _files: HostNames: { 151 | data: strings.Join([ for s in #Config.sampler for h in s.host { 152 | strings.Join([s.device, h.device, h.description], "\t") 153 | }], "\n") 154 | 155 | cfg: { 156 | layout: complex_key_hashed: null 157 | structure: { 158 | key: [{ 159 | attribute: { 160 | name: "Sampler" 161 | type: "String" 162 | } 163 | }, { 164 | attribute: { 165 | name: "Device" 166 | type: "String" 167 | } 168 | }] 169 | attribute: { 170 | name: "Description" 171 | type: "String" 172 | null_value: null 173 | } 174 | } 175 | } 176 | } 177 | 178 | _files: user_autnums: { 179 | data: strings.Join([ for _, e in #Config.userData.autnums { 180 | strings.Join(["\(e.asn)", e.name, e.country], "\t") 181 | }], "\n") 182 | 183 | cfg: { 184 | layout: flat: null 185 | structure: [{ 186 | id: name: "asnum" 187 | }, { 188 | attribute: { 189 | name: "name" 190 | type: "String" 191 | null_value: null 192 | } 193 | }, { 194 | attribute: { 195 | name: "country" 196 | type: "String" 197 | null_value: null 198 | } 199 | }] 200 | } 201 | } 202 | 203 | ClickHouseInstallation: netmeta: spec: configuration: files: "risinfo.conf": (xml.#Marshal & {in: { 204 | yandex: dictionary: { 205 | name: "risinfo" 206 | source: http: { 207 | url: "http://risinfo/rib.tsv" 208 | format: "TabSeparated" 209 | } 210 | lifetime: 3600 211 | layout: ip_trie: access_to_key_from_attributes: true 212 | structure: key: attribute: { 213 | name: "prefix" 214 | type: "String" 215 | } 216 | structure: attribute: { 217 | name: "asnum" 218 | type: "UInt32" 219 | null_value: 0 220 | } 221 | } 222 | }}).out 223 | 224 | ClickHouseInstallation: netmeta: spec: configuration: files: "autnums.conf": (xml.#Marshal & {in: { 225 | yandex: dictionary: { 226 | name: "autnums" 227 | source: clickhouse: { 228 | query: 229 | #""" 230 | SELECT * FROM dictionaries.risinfo_autnums 231 | UNION ALL 232 | SELECT * FROM dictionaries.user_autnums 233 | """# 234 | } 235 | lifetime: 3600 236 | layout: flat: null 237 | structure: [{ 238 | id: name: "asnum" 239 | }, { 240 | attribute: { 241 | name: "name" 242 | type: "String" 243 | null_value: null 244 | } 245 | }, { 246 | attribute: { 247 | name: "country" 248 | type: "String" 249 | null_value: null 250 | } 251 | }] 252 | } 253 | }}).out 254 | 255 | ClickHouseInstallation: netmeta: spec: configuration: files: "risinfo_autnums.conf": (xml.#Marshal & {in: { 256 | yandex: dictionary: { 257 | name: "risinfo_autnums" 258 | source: http: { 259 | url: "http://risinfo/autnums.tsv" 260 | format: "TabSeparated" 261 | } 262 | lifetime: 86400 263 | layout: flat: null 264 | structure: [{ 265 | id: name: "asnum" 266 | }, { 267 | attribute: { 268 | name: "name" 269 | type: "String" 270 | null_value: null 271 | } 272 | }, { 273 | attribute: { 274 | name: "country" 275 | type: "String" 276 | null_value: null 277 | } 278 | }] 279 | } 280 | }}).out 281 | 282 | ClickHouseInstallation: netmeta: spec: configuration: files: "format_function.xml": (xml.#Marshal & {in: { 283 | yandex: functions: { 284 | type: "executable" 285 | name: "formatQuery" 286 | return_type: "String" 287 | argument: [{ 288 | type: "String" 289 | name: "query" 290 | }] 291 | format: "LineAsString" 292 | command: "clickhouse format --oneline" 293 | execute_direct: "0" 294 | } 295 | }}).out 296 | -------------------------------------------------------------------------------- /deploy/dashboards/NetMeta_Relations.cue: -------------------------------------------------------------------------------- 1 | package dashboards 2 | 3 | import "list" 4 | 5 | _asRelationQueries: { 6 | "Inbound traffic relations (Top 20)": 7 | #""" 8 | SELECT 9 | ASNToString(SrcAS) AS SrcASName, 10 | ASNToString(DstAS) AS DstASName, 11 | (sum(Bytes * SamplingRate) / 1024) as Bytes 12 | FROM flows_raw 13 | WHERE $__timeFilter(TimeReceived) 14 | \#(_filtersWithHost) 15 | AND isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection) 16 | GROUP BY SrcAS, DstAS 17 | ORDER BY Bytes DESC 18 | LIMIT 20 19 | """# 20 | 21 | "Outbound traffic relations (Top 20)": 22 | #""" 23 | SELECT 24 | ASNToString(DstAS) AS DstASName, 25 | ASNToString(SrcAS) AS SrcASName, 26 | (sum(Bytes * SamplingRate) / 1024) as Bytes 27 | FROM flows_raw 28 | WHERE $__timeFilter(TimeReceived) 29 | \#(_filtersWithHost) 30 | AND NOT isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection) 31 | GROUP BY SrcAS, DstAS 32 | ORDER BY Bytes DESC 33 | LIMIT 20 34 | """# 35 | 36 | "Inbound traffic relations via interface (Top 30)": 37 | #""" 38 | SELECT 39 | ASNToString(SrcAS) AS SrcASName, 40 | InterfaceToString(SamplerAddress, OutIf) AS OutIfName, 41 | ASNToString(DstAS) AS DstASName, 42 | (sum(Bytes * SamplingRate) / 1024) as Bytes 43 | FROM flows_raw 44 | WHERE $__timeFilter(TimeReceived) 45 | \#(_filtersWithHost) 46 | AND isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection) 47 | GROUP BY SrcAS, DstAS, SamplerAddress, OutIf 48 | ORDER BY Bytes DESC 49 | LIMIT 30 50 | """# 51 | 52 | "Outbound traffic relations via interface (Top 30)": 53 | #""" 54 | SELECT 55 | ASNToString(DstAS) AS DstASName, 56 | InterfaceToString(SamplerAddress, OutIf) AS OutIfName, 57 | ASNToString(SrcAS) AS SrcASName, 58 | (sum(Bytes * SamplingRate) / 1024) as Bytes 59 | FROM flows_raw 60 | WHERE $__timeFilter(TimeReceived) 61 | \#(_filtersWithHost) 62 | AND NOT isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection) 63 | GROUP BY SrcAS, DstAS, SamplerAddress, OutIf 64 | ORDER BY Bytes DESC 65 | LIMIT 30 66 | """# 67 | } 68 | 69 | _asRelations: [{ 70 | title: "AS Relations" 71 | gridPos: y: 6 72 | type: "row" 73 | }, { 74 | title: "Inbound traffic relations (Top 20)" 75 | type: "netsage-sankey-panel" 76 | gridPos: {h: 24, w: 12, x: 0, y: 7} 77 | options: nodePadding: 6 78 | options: nodeWidth: 23 79 | options: iteration: 15 80 | targets: [{ 81 | rawSql: _asRelationQueries[title] 82 | }] 83 | }, { 84 | title: "Outbound traffic relations (Top 20)" 85 | type: "netsage-sankey-panel" 86 | gridPos: {h: 24, w: 12, x: 12, y: 7} 87 | options: nodePadding: 6 88 | options: nodeWidth: 23 89 | options: iteration: 15 90 | targets: [{ 91 | rawSql: _asRelationQueries[title] 92 | }] 93 | }, { 94 | title: "Inbound traffic relations via interface (Top 30)" 95 | type: "netsage-sankey-panel" 96 | gridPos: {h: 24, w: 12, x: 0, y: 31} 97 | options: nodePadding: 6 98 | options: nodeWidth: 23 99 | options: iteration: 15 100 | targets: [{ 101 | rawSql: _asRelationQueries[title] 102 | }] 103 | }, { 104 | title: "Outbound traffic relations via interface (Top 30)" 105 | type: "netsage-sankey-panel" 106 | gridPos: {h: 24, w: 12, x: 12, y: 31} 107 | options: nodePadding: 6 108 | options: nodeWidth: 23 109 | options: iteration: 15 110 | targets: [{ 111 | rawSql: _asRelationQueries[title] 112 | }] 113 | }] 114 | 115 | _topFlowSankeyQueries: { 116 | "Top 30 Flows (per IP)": 117 | #""" 118 | SELECT 119 | if($showHostnames, HostToString(SamplerAddress, SrcAddr), IPv6ToString(SrcAddr)) AS Src, 120 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) AS Dst, 121 | (sum(Bytes * SamplingRate) / 1024) as Bytes 122 | FROM flows_raw 123 | WHERE $__timeFilter(TimeReceived) 124 | \#(_filtersWithHost) 125 | GROUP BY SamplerAddress, SrcAddr, DstAddr 126 | ORDER BY Bytes DESC 127 | LIMIT 30 128 | """# 129 | 130 | "Top 30 Flows (per IP+Port)": 131 | #""" 132 | SELECT 133 | if($showHostnames, HostToString(SamplerAddress, SrcAddr), IPv6ToString(SrcAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(SrcPort) as Src, 134 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(DstPort) as Dst, 135 | (sum(Bytes * SamplingRate) / 1024) as Bytes 136 | FROM flows_raw 137 | WHERE $__timeFilter(TimeReceived) 138 | \#(_filtersWithHost) 139 | GROUP BY SamplerAddress, SrcAddr, SrcPort, Proto, DstAddr, DstPort 140 | ORDER BY Bytes DESC 141 | LIMIT 30 142 | """# 143 | } 144 | 145 | _topFlowSankey: [{ 146 | title: "AS Relations" 147 | gridPos: y: 32 148 | type: "row" 149 | }, { 150 | title: "Top 30 Flows (per IP)" 151 | type: "netsage-sankey-panel" 152 | gridPos: {h: 24, w: 12, x: 0, y: 33} 153 | options: nodePadding: 6 154 | options: nodeWidth: 28 155 | targets: [{ 156 | rawSql: _topFlowSankeyQueries[title] 157 | }] 158 | }, { 159 | title: "Top 30 Flows (per IP+Port)" 160 | type: "netsage-sankey-panel" 161 | gridPos: {h: 24, w: 12, x: 12, y: 33} 162 | options: nodePadding: 11 163 | options: nodeWidth: 28 164 | targets: [{ 165 | rawSql: _topFlowSankeyQueries[title] 166 | }] 167 | }] 168 | 169 | _flowsPerASNQueries: { 170 | "Top 30 ASN per service (inbound)": 171 | #""" 172 | SELECT 173 | ASNToString(SrcAS) AS SrcASName, 174 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(DstPort) as Dst, 175 | (sum(Bytes * SamplingRate) / 1024) as Bytes 176 | FROM flows_raw 177 | WHERE $__timeFilter(TimeReceived) 178 | \#(_filtersWithHost) 179 | AND isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection) 180 | GROUP BY SrcAS, SamplerAddress, DstAddr, Proto, DstPort 181 | ORDER BY Bytes DESC 182 | LIMIT 30 183 | """# 184 | 185 | "Top 30 ASN per service (outbound)": 186 | #""" 187 | SELECT 188 | ASNToString(SrcAS) AS SrcASName, 189 | if($showHostnames, HostToString(SamplerAddress, DstAddr), IPv6ToString(DstAddr)) || ' ' || dictGetString('IPProtocols', 'Name', toUInt64(Proto)) || toString(DstPort) as Dst, 190 | (sum(Bytes * SamplingRate) / 1024) as Bytes 191 | FROM flows_raw 192 | WHERE $__timeFilter(TimeReceived) 193 | \#(_filtersWithHost) 194 | AND NOT isIncomingFlow(SamplerAddress, SrcAddr, DstAddr, SrcAS, DstAS, FlowDirection) 195 | GROUP BY SrcAS, SamplerAddress, DstAddr, Proto, DstPort 196 | ORDER BY Bytes DESC 197 | LIMIT 30 198 | """# 199 | } 200 | 201 | _flowsPerASN: [{ 202 | title: "Flows per ASN" 203 | gridPos: y: 34 204 | type: "row" 205 | }, { 206 | title: "Top 30 ASN per service (inbound)" 207 | type: "netsage-sankey-panel" 208 | gridPos: {h: 24, w: 12, x: 0, y: 60} 209 | options: nodePadding: 11 210 | options: nodeWidth: 28 211 | targets: [{ 212 | rawSql: _flowsPerASNQueries[title] 213 | }] 214 | }, { 215 | title: "Top 30 ASN per service (outbound)" 216 | type: "netsage-sankey-panel" 217 | gridPos: {h: 24, w: 12, x: 12, y: 60} 218 | options: nodePadding: 11 219 | options: nodeWidth: 28 220 | targets: [{ 221 | rawSql: _flowsPerASNQueries[title] 222 | }] 223 | }] 224 | 225 | dashboards: "Traffic Relations": { 226 | #folder: "NetMeta" 227 | title: "Traffic Relations" 228 | uid: "5pH2j5ank" 229 | _panels: list.Concat([ 230 | _disclaimerPanels, 231 | _infoPanels, 232 | _asRelations, 233 | _topFlowSankey, 234 | _flowsPerASN, 235 | ]) 236 | } 237 | --------------------------------------------------------------------------------