= ({
11 | selectedNamespace,
12 | onNamespaceChange,
13 | namespaces = ['default'],
14 | }) => {
15 | return (
16 |
17 |
18 |
21 |
35 |
36 | );
37 | };
38 |
39 | export default NamespaceSelector;
40 |
--------------------------------------------------------------------------------
/controller/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "kguardian"
3 | version = "1.6.0"
4 | license = "BUSL-1.1"
5 | edition = "2021"
6 |
7 | [build-dependencies]
8 | libbpf-cargo = "0.25.0"
9 | vmlinux = { version = "0.0", git = "https://github.com/libbpf/vmlinux.h.git", rev = "8f91e9fd5b488ff57074e589e3960940f3387830" }
10 |
11 | [dependencies]
12 | anyhow = "1.0.96"
13 | libbpf-rs = "0.25.0"
14 | libbpf-sys = "1.5.1"
15 | plain = "0.2"
16 | time = { version = "0.3", features = ["formatting", "local-offset", "macros"]}
17 | tokio = { version = "1.48", features = ["macros", "rt-multi-thread", "fs", "time"] }
18 | kube = { version = "2.0.0", features = ["runtime", "derive"] }
19 | k8s-openapi = { version = "0.26", features = ["latest"] }
20 | futures = "0.3.31"
21 | thiserror = "2.0.17"
22 | tracing = "0.1.37"
23 | serde = "1.0.228"
24 | serde_derive = "1.0.228"
25 | serde_json = "1.0.143"
26 | tracing-subscriber = { version = "0.3.16", features = ["json", "env-filter", "local-time"] }
27 | containerd-client = "0.6.0"
28 | regex = "1.11.2"
29 | procfs = "0.18.0"
30 | reqwest = { version = "0.12.23", features = ["json"] }
31 | uuid = { version = "1.18.0", features = ["v4"]}
32 | chrono = { version = "0.4.41", features = ["serde"] }
33 | openssl = { version = "0.10.73", features = ["vendored"] }
34 | lazy_static = "1.5.0"
35 | libseccomp = "0.4"
36 | moka = { version = "0.12.10", features = ["future"]}
37 | dashmap = "6.1"
38 |
--------------------------------------------------------------------------------
/frontend/public/vite.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/api-reference/endpoints/pods.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Pod Endpoints"
3 | description: "Add and retrieve pod metadata"
4 | ---
5 |
6 | ## POST /pods
7 |
8 | Add or update pod metadata.
9 |
10 | ### Request
11 |
12 | ```json
13 | {
14 | "pod_ip": "10.244.1.5",
15 | "pod_name": "my-app-7d9f6b8c4-x5z2w",
16 | "pod_namespace": "production"
17 | }
18 | ```
19 |
20 | ### Response
21 |
22 | ```json
23 | {
24 | "uuid": "550e8400-e29b-41d4-a716-446655440000",
25 | "pod_ip": "10.244.1.5",
26 | "pod_name": "my-app-7d9f6b8c4-x5z2w",
27 | "pod_namespace": "production"
28 | }
29 | ```
30 |
31 | ---
32 |
33 | ## POST /pod/spec
34 |
35 | Add full pod specification including labels and metadata.
36 |
37 | ### Request
38 |
39 | ```json
40 | {
41 | "pod_ip": "10.244.1.5",
42 | "pod_name": "my-app",
43 | "pod_namespace": "production",
44 | "pod_obj": {
45 | "metadata": {
46 | "labels": {
47 | "app": "my-app",
48 | "version": "v1.0.0"
49 | }
50 | },
51 | "spec": { /* full pod spec */ }
52 | }
53 | }
54 | ```
55 |
56 | ---
57 |
58 | ## GET /pod/ip/:ip
59 |
60 | Retrieve pod details by IP address.
61 |
62 | ### Example
63 |
64 | ```bash
65 | curl http://localhost:9090/pod/ip/10.244.1.5
66 | ```
67 |
68 | ### Response
69 |
70 | ```json
71 | {
72 | "uuid": "550e8400-e29b-41d4-a716-446655440000",
73 | "pod_ip": "10.244.1.5",
74 | "pod_name": "my-app",
75 | "pod_namespace": "production",
76 | "pod_obj": { /* full pod spec */ }
77 | }
78 | ```
79 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Mintlify Starter Kit
2 |
3 | Use the starter kit to get your docs deployed and ready to customize.
4 |
5 | Click the green **Use this template** button at the top of this repo to copy the Mintlify starter kit. The starter kit contains examples with
6 |
7 | - Guide pages
8 | - Navigation
9 | - Customizations
10 | - API reference pages
11 | - Use of popular components
12 |
13 | **[Follow the full quickstart guide](https://starter.mintlify.com/quickstart)**
14 |
15 | ## Development
16 |
17 | Install the [Mintlify CLI](https://www.npmjs.com/package/mint) to preview your documentation changes locally. To install, use the following command:
18 |
19 | ```
20 | npm i -g mint
21 | ```
22 |
23 | Run the following command at the root of your documentation, where your `docs.json` is located:
24 |
25 | ```
26 | mint dev
27 | ```
28 |
29 | View your local preview at `http://localhost:3000`.
30 |
31 | ## Publishing changes
32 |
33 | Install our GitHub app from your [dashboard](https://dashboard.mintlify.com/settings/organization/github-app) to propagate changes from your repo to your deployment. Changes are deployed to production automatically after pushing to the default branch.
34 |
35 | ## Need help?
36 |
37 | ### Troubleshooting
38 |
39 | - If your dev environment isn't running: Run `mint update` to ensure you have the most recent version of the CLI.
40 | - If a page loads as a 404: Make sure you are running in a folder with a valid `docs.json`.
41 |
42 | ### Resources
43 | - [Mintlify documentation](https://mintlify.com/docs)
44 |
--------------------------------------------------------------------------------
/docs/cli/overview.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "CLI Overview"
3 | description: "kubectl kguardian command reference"
4 | icon: "terminal"
5 | ---
6 |
7 | ## kubectl kguardian
8 |
9 | The kguardian CLI is a kubectl plugin that generates security policies from observed runtime behavior.
10 |
11 | ## Installation
12 |
13 | See the [Installation Guide](/installation#install-the-cli-plugin) for detailed instructions.
14 |
15 | ## Global Flags
16 |
17 | Available for all commands:
18 |
19 | | Flag | Description | Default |
20 | |------|-------------|---------|
21 | | `--kubeconfig` | Path to kubeconfig file | `$KUBECONFIG` or `~/.kube/config` |
22 | | `--context` | Kubernetes context to use | Current context |
23 | | `-n, --namespace` | Namespace scope | Current namespace |
24 | | `--debug` | Enable debug logging | `false` |
25 |
26 | ## Commands
27 |
28 |
29 |
30 | Generate Network Policies from observed traffic
31 |
32 |
33 | Generate Seccomp profiles from syscall usage
34 |
35 |
36 |
37 | ## Examples
38 |
39 | ```bash
40 | # Generate network policy for a pod
41 | kubectl kguardian gen networkpolicy my-app -n production
42 |
43 | # Generate seccomp for all pods in namespace
44 | kubectl kguardian gen seccomp --all -n staging
45 |
46 | # Generate Cilium policies cluster-wide
47 | kubectl kguardian gen netpol -A --type cilium
48 | ```
49 |
--------------------------------------------------------------------------------
/advisor/pkg/k8s/labels_test.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | api "github.com/kguardian-dev/kguardian/advisor/pkg/api"
8 | v1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/client-go/kubernetes"
11 | )
12 |
13 | func TestDetectSelectorLabels(t *testing.T) {
14 | clientset := &kubernetes.Clientset{}
15 | pod := &v1.Pod{
16 | ObjectMeta: metav1.ObjectMeta{
17 | Labels: map[string]string{
18 | "app": "test-app",
19 | },
20 | },
21 | }
22 | podDetail := &api.PodDetail{
23 | Pod: v1.Pod{
24 | ObjectMeta: metav1.ObjectMeta{
25 | Labels: map[string]string{
26 | "app": "test-app",
27 | },
28 | },
29 | },
30 | }
31 | serviceDetail := &api.SvcDetail{
32 | Service: v1.Service{
33 | Spec: v1.ServiceSpec{
34 | Selector: map[string]string{
35 | "app": "test-app",
36 | },
37 | },
38 | },
39 | }
40 |
41 | labels1, err1 := detectSelectorLabels(clientset, pod)
42 | assert.NoError(t, err1)
43 | assert.Equal(t, map[string]string{"app": "test-app"}, labels1)
44 |
45 | labels2, err2 := detectSelectorLabels(clientset, podDetail)
46 | assert.NoError(t, err2)
47 | assert.Equal(t, map[string]string{"app": "test-app"}, labels2)
48 |
49 | labels3, err3 := detectSelectorLabels(clientset, serviceDetail)
50 | assert.NoError(t, err3)
51 | assert.Equal(t, map[string]string{"app": "test-app"}, labels3)
52 |
53 | _, err4 := detectSelectorLabels(clientset, "unknown type")
54 | assert.Error(t, err4)
55 | }
56 |
--------------------------------------------------------------------------------
/docs/cli/gen-networkpolicy.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "gen networkpolicy"
3 | description: "Generate Kubernetes or Cilium Network Policies"
4 | icon: "network-wired"
5 | ---
6 |
7 | ## Synopsis
8 |
9 | Generate Network Policies based on observed pod traffic.
10 |
11 | ```bash
12 | kubectl kguardian gen networkpolicy [POD_NAME] [flags]
13 | kubectl kguardian gen netpol [POD_NAME] [flags] # Alias
14 | ```
15 |
16 | ## Flags
17 |
18 | | Flag | Type | Description | Default |
19 | |------|------|-------------|---------|
20 | | `-n, --namespace` | string | Namespace of the pod | Current namespace |
21 | | `-a, --all` | bool | Generate for all pods in namespace | `false` |
22 | | `-A, --all-namespaces` | bool | Generate for all pods cluster-wide | `false` |
23 | | `-t, --type` | string | Policy type: `kubernetes` or `cilium` | `kubernetes` |
24 | | `--output-dir` | string | Directory to save policies | `network-policies` |
25 | | `--dry-run` | bool | Generate without applying | `true` |
26 |
27 | ## Examples
28 |
29 | ```bash
30 | # Single pod
31 | kubectl kguardian gen networkpolicy my-app -n prod --output-dir ./policies
32 |
33 | # All pods in namespace
34 | kubectl kguardian gen netpol --all -n staging --output-dir ./staging-policies
35 |
36 | # Cilium policies cluster-wide
37 | kubectl kguardian gen netpol -A --type cilium --output-dir ./cilium
38 |
39 | # Generate and apply
40 | kubectl kguardian gen netpol my-app --dry-run=false
41 | ```
42 |
43 | ---
44 |
45 | See [Generating Network Policies](/guides/generating-network-policies) for detailed usage.
46 |
--------------------------------------------------------------------------------
/.github/workflows/charts-release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: "Charts: Release to GHCR OCI"
4 |
5 | on:
6 | workflow_dispatch: {}
7 | push:
8 | tags:
9 | - "chart/v*"
10 |
11 | env:
12 | CHARTS_SRC_DIR: "kguardian"
13 | TARGET_REGISTRY: ghcr.io
14 |
15 | jobs:
16 | release-charts:
17 | name: Release Charts
18 | runs-on: ubuntu-latest
19 | permissions: write-all
20 | steps:
21 | - name: Checkout
22 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
23 | with:
24 | fetch-depth: 0
25 |
26 | - name: Configure Git
27 | run: |
28 | git config user.name "$GITHUB_ACTOR"
29 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
30 |
31 | - name: Login to GitHub Container Registry
32 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
33 | with:
34 | registry: ${{ env.TARGET_REGISTRY }}
35 | username: ${{ github.actor }}
36 | password: ${{ secrets.GITHUB_TOKEN }}
37 |
38 | - name: Install Helm
39 | uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
40 |
41 | - name: Publish Helm Chart to GHCR
42 | run: |
43 | helm package charts/${{ env.CHARTS_SRC_DIR }}
44 | helm push kguardian-*.tgz oci://${{ env.TARGET_REGISTRY }}/${{ github.repository_owner }}/charts
45 | env:
46 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
47 |
--------------------------------------------------------------------------------
/.versionrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "types": [
3 | {
4 | "type": "feat",
5 | "section": "Features"
6 | },
7 | {
8 | "type": "fix",
9 | "section": "Bug Fixes"
10 | },
11 | {
12 | "type": "perf",
13 | "section": "Performance Improvements"
14 | },
15 | {
16 | "type": "revert",
17 | "section": "Reverts"
18 | },
19 | {
20 | "type": "docs",
21 | "section": "Documentation"
22 | },
23 | {
24 | "type": "refactor",
25 | "section": "Code Refactoring"
26 | },
27 | {
28 | "type": "test",
29 | "section": "Tests",
30 | "hidden": true
31 | },
32 | {
33 | "type": "build",
34 | "section": "Build System",
35 | "hidden": true
36 | },
37 | {
38 | "type": "ci",
39 | "section": "Continuous Integration",
40 | "hidden": true
41 | },
42 | {
43 | "type": "chore",
44 | "section": "Miscellaneous Chores",
45 | "hidden": true
46 | },
47 | {
48 | "type": "style",
49 | "section": "Styles",
50 | "hidden": true
51 | }
52 | ],
53 | "commitUrlFormat": "https://github.com/kguardian-dev/kguardian/commit/{{hash}}",
54 | "compareUrlFormat": "https://github.com/kguardian-dev/kguardian/compare/{{previousTag}}...{{currentTag}}",
55 | "issueUrlFormat": "https://github.com/kguardian-dev/kguardian/issues/{{id}}",
56 | "userUrlFormat": "https://github.com/{{user}}",
57 | "releaseCommitMessageFormat": "chore(release): {{currentTag}}",
58 | "issuePrefixes": ["#"]
59 | }
60 |
--------------------------------------------------------------------------------
/docs/concepts/seccomp-profiles.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Seccomp Profiles"
3 | description: "Understanding seccomp and how kguardian generates syscall allowlists"
4 | icon: "shield-check"
5 | ---
6 |
7 | ## What is Seccomp?
8 |
9 | Seccomp (Secure Computing Mode) is a Linux kernel feature that restricts which **system calls** (syscalls) a process can make.
10 |
11 | ### Why Limit Syscalls?
12 |
13 | Most applications use only 50-100 of Linux's 300+ syscalls. Blocking unused syscalls:
14 | - Reduces attack surface
15 | - Prevents privilege escalation exploits
16 | - Stops malicious code from using dangerous syscalls
17 |
18 | ## How kguardian Generates Profiles
19 |
20 | 1. **Observes** all syscalls made by the container via eBPF
21 | 2. **Aggregates** unique syscall names over observation period
22 | 3. **Generates** JSON profile with allowlist
23 |
24 | Example generated profile:
25 |
26 | ```json
27 | {
28 | "defaultAction": "SCMP_ACT_ERRNO",
29 | "architectures": ["SCMP_ARCH_X86_64"],
30 | "syscalls": [
31 | {
32 | "names": ["read", "write", "open", "close", "socket", "connect"],
33 | "action": "SCMP_ACT_ALLOW"
34 | }
35 | ]
36 | }
37 | ```
38 |
39 | ## Actions
40 |
41 | - `SCMP_ACT_ALLOW`: Allow the syscall
42 | - `SCMP_ACT_ERRNO`: Block with error (default for unlisted)
43 | - `SCMP_ACT_LOG`: Log the syscall but allow it
44 | - `SCMP_ACT_KILL`: Kill the process (most restrictive)
45 |
46 | ---
47 |
48 | **Next steps:**
49 | - [Generate Seccomp Profiles](/guides/generating-seccomp-profiles)
50 | - [CLI Reference](/cli/gen-seccomp)
51 |
--------------------------------------------------------------------------------
/mcp-server/logger/logger.go:
--------------------------------------------------------------------------------
1 | package logger
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/sirupsen/logrus"
7 | )
8 |
9 | var Log *logrus.Logger
10 |
11 | // Init initializes the global logger with the specified log level
12 | func Init(level string) {
13 | Log = logrus.New()
14 |
15 | // Set output to stdout
16 | Log.SetOutput(os.Stdout)
17 |
18 | // Use JSON formatter for structured logging
19 | Log.SetFormatter(&logrus.JSONFormatter{
20 | TimestampFormat: "2006-01-02T15:04:05.000Z07:00",
21 | FieldMap: logrus.FieldMap{
22 | logrus.FieldKeyTime: "timestamp",
23 | logrus.FieldKeyLevel: "level",
24 | logrus.FieldKeyMsg: "message",
25 | },
26 | })
27 |
28 | // Set log level
29 | Log.SetLevel(parseLogLevel(level))
30 | }
31 |
32 | // parseLogLevel converts a string log level to logrus.Level
33 | func parseLogLevel(level string) logrus.Level {
34 | switch level {
35 | case "debug", "DEBUG":
36 | return logrus.DebugLevel
37 | case "info", "INFO":
38 | return logrus.InfoLevel
39 | case "warn", "WARN", "warning", "WARNING":
40 | return logrus.WarnLevel
41 | case "error", "ERROR":
42 | return logrus.ErrorLevel
43 | default:
44 | return logrus.InfoLevel
45 | }
46 | }
47 |
48 | // WithField creates a new logger entry with an additional field
49 | func WithField(key string, value interface{}) *logrus.Entry {
50 | return Log.WithField(key, value)
51 | }
52 |
53 | // WithFields creates a new logger entry with multiple additional fields
54 | func WithFields(fields logrus.Fields) *logrus.Entry {
55 | return Log.WithFields(fields)
56 | }
57 |
--------------------------------------------------------------------------------
/Taskfile.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://taskfile.dev/schema.json
3 | version: '3'
4 |
5 | includes:
6 | advisor: .taskfiles/Advisor
7 | broker: .taskfiles/Broker
8 | controller: .taskfiles/Controller
9 | ui: .taskfiles/UI
10 |
11 | vars:
12 | IMAGE_VERSION: local
13 |
14 | tasks:
15 | all:
16 | # deps: [kind]
17 | desc: "Run all tasks"
18 | cmds:
19 | #- task: advisor:all
20 | - task: broker:all
21 | - task: controller:all
22 |
23 | # kind:
24 | # desc: Create fresh kind cluster
25 | # cmds:
26 | # - kind delete cluster || true
27 | # - kind create cluster
28 |
29 |
30 | install:oci:
31 | deps: [all]
32 | desc: "Install from OCI registry (production-style install)"
33 | cmds:
34 | - helm upgrade kguardian oci://ghcr.io/kguardian-dev/charts/kguardian
35 | --namespace kguardian --create-namespace --set controller.image.tag={{.IMAGE_VERSION}}
36 | --set broker.image.tag={{.IMAGE_VERSION}}
37 | --set controller.image.pullPolicy=IfNotPresent
38 | --set broker.image.pullPolicy=IfNotPresent
39 | --set controller.initContainer.image.pullPolicy=IfNotPresent
40 |
41 | preflight:
42 | desc: "Run preflight checks for all components"
43 | cmds:
44 | - task: advisor:preflight
45 | - task: broker:preflight
46 | - task: controller:preflight
47 | - task: ui:preflight
48 |
49 | preflight-all:
50 | desc: "Run preflight checks and then all tasks"
51 | cmds:
52 | - task: preflight
53 | - task: all
54 |
--------------------------------------------------------------------------------
/docs/logo/light.svg:
--------------------------------------------------------------------------------
1 |
29 |
--------------------------------------------------------------------------------
/docs/logo/dark.svg:
--------------------------------------------------------------------------------
1 |
29 |
--------------------------------------------------------------------------------
/charts/kguardian/templates/frontend/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.frontend.ingress.enabled -}}
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: {{ include "kguardian.name" . }}-frontend
6 | labels:
7 | {{- include "kguardian.labels" . | nindent 4 }}
8 | {{- with .Values.frontend.ingress.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | spec:
13 | {{- if .Values.frontend.ingress.className }}
14 | ingressClassName: {{ .Values.frontend.ingress.className }}
15 | {{- end }}
16 | {{- if .Values.frontend.ingress.tls }}
17 | tls:
18 | {{- range .Values.frontend.ingress.tls }}
19 | - hosts:
20 | {{- range .hosts }}
21 | - {{ . | quote }}
22 | {{- end }}
23 | secretName: {{ .secretName }}
24 | {{- end }}
25 | {{- end }}
26 | rules:
27 | {{- range .Values.frontend.ingress.hosts }}
28 | - host: {{ .host | quote }}
29 | http:
30 | paths:
31 | # Frontend static files
32 | - path: /
33 | pathType: Prefix
34 | backend:
35 | service:
36 | name: {{ $.Values.frontend.service.name }}
37 | port:
38 | number: {{ $.Values.frontend.service.port }}
39 | # Proxy /api requests to broker
40 | - path: /api
41 | pathType: Prefix
42 | backend:
43 | service:
44 | name: {{ $.Values.broker.service.name }}
45 | port:
46 | number: {{ $.Values.broker.service.port }}
47 | {{- end }}
48 | {{- end }}
49 |
--------------------------------------------------------------------------------
/docs/cli/gen-seccomp.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "gen seccomp"
3 | description: "Generate Seccomp profiles from syscall observations"
4 | icon: "shield-check"
5 | ---
6 |
7 | ## Synopsis
8 |
9 | Generate Seccomp profiles based on observed syscall usage.
10 |
11 | ```bash
12 | kubectl kguardian gen seccomp [POD_NAME] [flags]
13 | kubectl kguardian gen secp [POD_NAME] [flags] # Alias
14 | ```
15 |
16 | ## Flags
17 |
18 | | Flag | Type | Description | Default |
19 | |------|------|-------------|---------|
20 | | `-n, --namespace` | string | Namespace of the pod | Current namespace |
21 | | `-a, --all` | bool | Generate for all pods in namespace | `false` |
22 | | `-A, --all-namespaces` | bool | Generate for all pods cluster-wide | `false` |
23 | | `--output-dir` | string | Directory to save profiles | `seccomp-profiles` |
24 | | `--default-action` | string | Action for unlisted syscalls | `SCMP_ACT_ERRNO` |
25 |
26 | ## Default Actions
27 |
28 | - `SCMP_ACT_ERRNO` - Return error for unlisted syscalls (recommended)
29 | - `SCMP_ACT_LOG` - Log unlisted syscalls but allow them (audit mode)
30 | - `SCMP_ACT_KILL` - Kill process on unlisted syscall (strictest)
31 |
32 | ## Examples
33 |
34 | ```bash
35 | # Single pod
36 | kubectl kguardian gen seccomp my-app -n prod --output-dir ./seccomp
37 |
38 | # All pods with logging for unlisted
39 | kubectl kguardian gen secp --all -n staging --default-action SCMP_ACT_LOG
40 |
41 | # Cluster-wide with strict mode
42 | kubectl kguardian gen secp -A --default-action SCMP_ACT_KILL
43 | ```
44 |
45 | ---
46 |
47 | See [Generating Seccomp Profiles](/guides/generating-seccomp-profiles) for detailed usage.
48 |
--------------------------------------------------------------------------------
/frontend/src/types/index.ts:
--------------------------------------------------------------------------------
1 | // Matches broker's PodDetail type
2 | export interface PodInfo {
3 | pod_name: string;
4 | pod_ip: string;
5 | pod_namespace: string | null;
6 | pod_obj?: any;
7 | time_stamp: string;
8 | node_name: string;
9 | is_dead: boolean;
10 | pod_identity?: string | null;
11 | workload_selector_labels?: Record | null;
12 | }
13 |
14 | // Matches broker's PodTraffic type
15 | export interface NetworkTraffic {
16 | uuid: string;
17 | pod_name: string | null;
18 | pod_namespace: string | null;
19 | pod_ip: string | null;
20 | pod_port: string | null;
21 | ip_protocol: string | null;
22 | traffic_type: string | null;
23 | traffic_in_out_ip: string | null;
24 | traffic_in_out_port: string | null;
25 | decision: string | null; // ALLOW or DROP
26 | time_stamp: string;
27 | }
28 |
29 | // Matches broker's PodSyscalls type
30 | export interface SyscallInfo {
31 | pod_name: string;
32 | pod_namespace: string;
33 | syscalls: string; // Comma-separated string
34 | arch: string;
35 | time_stamp: string;
36 | }
37 |
38 | export interface PodNodeData {
39 | id: string;
40 | label: string;
41 | pod: PodInfo; // Primary pod (for backward compatibility and single-pod identities)
42 | pods: PodInfo[]; // All pods in this identity group
43 | traffic: NetworkTraffic[];
44 | syscalls?: SyscallInfo[];
45 | isExpanded: boolean;
46 | }
47 |
48 | // Matches broker's SvcDetail type
49 | export interface ServiceInfo {
50 | svc_ip: string;
51 | svc_name: string | null;
52 | svc_namespace: string | null;
53 | service_spec?: any; // Full Kubernetes Service object
54 | }
55 |
--------------------------------------------------------------------------------
/frontend/src/contexts/ThemeContext.tsx:
--------------------------------------------------------------------------------
1 | import React, { createContext, useContext, useEffect, useState } from 'react';
2 |
3 | type Theme = 'light' | 'dark';
4 |
5 | interface ThemeContextType {
6 | theme: Theme;
7 | toggleTheme: () => void;
8 | setTheme: (theme: Theme) => void;
9 | }
10 |
11 | const ThemeContext = createContext(undefined);
12 |
13 | export const ThemeProvider: React.FC<{ children: React.ReactNode }> = ({ children }) => {
14 | // Initialize theme from localStorage or default to dark
15 | const [theme, setThemeState] = useState(() => {
16 | const savedTheme = localStorage.getItem('kguardian-theme');
17 | return (savedTheme === 'light' || savedTheme === 'dark') ? savedTheme : 'dark';
18 | });
19 |
20 | // Apply theme to document root
21 | useEffect(() => {
22 | const root = document.documentElement;
23 | root.classList.remove('light', 'dark');
24 | root.classList.add(theme);
25 | localStorage.setItem('kguardian-theme', theme);
26 | }, [theme]);
27 |
28 | const toggleTheme = () => {
29 | setThemeState(prev => prev === 'dark' ? 'light' : 'dark');
30 | };
31 |
32 | const setTheme = (newTheme: Theme) => {
33 | setThemeState(newTheme);
34 | };
35 |
36 | return (
37 |
38 | {children}
39 |
40 | );
41 | };
42 |
43 | export const useTheme = () => {
44 | const context = useContext(ThemeContext);
45 | if (context === undefined) {
46 | throw new Error('useTheme must be used within a ThemeProvider');
47 | }
48 | return context;
49 | };
50 |
--------------------------------------------------------------------------------
/docs/essentials/images.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Images and embeds'
3 | description: 'Add image, video, and other HTML elements'
4 | icon: 'image'
5 | ---
6 |
7 |
11 |
12 | ## Image
13 |
14 | ### Using Markdown
15 |
16 | The [markdown syntax](https://www.markdownguide.org/basic-syntax/#images) lets you add images using the following code
17 |
18 | ```md
19 | 
20 | ```
21 |
22 | Note that the image file size must be less than 5MB. Otherwise, we recommend hosting on a service like [Cloudinary](https://cloudinary.com/) or [S3](https://aws.amazon.com/s3/). You can then use that URL and embed.
23 |
24 | ### Using embeds
25 |
26 | To get more customizability with images, you can also use [embeds](/writing-content/embed) to add images
27 |
28 | ```html
29 |
30 | ```
31 |
32 | ## Embeds and HTML elements
33 |
34 |
44 |
45 |
46 |
47 |
48 |
49 | Mintlify supports [HTML tags in Markdown](https://www.markdownguide.org/basic-syntax/#html). This is helpful if you prefer HTML tags to Markdown syntax, and lets you create documentation with infinite flexibility.
50 |
51 |
52 |
53 | ### iFrames
54 |
55 | Loads another HTML page within the document. Most commonly used for embedding videos.
56 |
57 | ```html
58 |
59 | ```
60 |
--------------------------------------------------------------------------------
/mcp-server/tools/cluster_pods.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
10 | "github.com/modelcontextprotocol/go-sdk/mcp"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | // ClusterPodsInput defines the input parameters (no params needed)
15 | type ClusterPodsInput struct{}
16 |
17 | // ClusterPodsOutput defines the output structure
18 | type ClusterPodsOutput struct {
19 | Data string `json:"data" jsonschema:"All pod details in the cluster in JSON format"`
20 | }
21 |
22 | // ClusterPodsHandler handles the get_cluster_pods tool
23 | type ClusterPodsHandler struct {
24 | client *BrokerClient
25 | }
26 |
27 | // Call implements the tool handler
28 | func (h ClusterPodsHandler) Call(
29 | ctx context.Context,
30 | req *mcp.CallToolRequest,
31 | input ClusterPodsInput,
32 | ) (*mcp.CallToolResult, ClusterPodsOutput, error) {
33 | startTime := time.Now()
34 | logger.Log.Info("Received get_cluster_pods request")
35 |
36 | data, err := h.client.GetAllPods()
37 | if err != nil {
38 | logger.Log.WithFields(logrus.Fields{
39 | "error": err.Error(),
40 | "total_duration": time.Since(startTime).String(),
41 | }).Error("Error fetching cluster pods")
42 | return nil, ClusterPodsOutput{}, fmt.Errorf("error fetching cluster pods: %w", err)
43 | }
44 |
45 | jsonData, err := json.MarshalIndent(data, "", " ")
46 | if err != nil {
47 | logger.Log.WithField("error", err.Error()).Error("Error marshaling response")
48 | return nil, ClusterPodsOutput{}, fmt.Errorf("error marshaling response: %w", err)
49 | }
50 |
51 | logger.Log.WithFields(logrus.Fields{
52 | "response_bytes": len(jsonData),
53 | "total_duration": time.Since(startTime).String(),
54 | }).Info("Successfully fetched cluster pods")
55 |
56 | return nil, ClusterPodsOutput{Data: string(jsonData)}, nil
57 | }
58 |
--------------------------------------------------------------------------------
/frontend/src/types/seccompProfile.ts:
--------------------------------------------------------------------------------
1 | export interface SeccompProfile {
2 | defaultAction: SeccompAction;
3 | architectures?: string[];
4 | syscalls?: SeccompSyscall[];
5 | }
6 |
7 | export interface SeccompSyscall {
8 | names: string[];
9 | action: SeccompAction;
10 | }
11 |
12 | export type SeccompAction =
13 | | 'SCMP_ACT_ALLOW'
14 | | 'SCMP_ACT_ERRNO'
15 | | 'SCMP_ACT_KILL'
16 | | 'SCMP_ACT_KILL_PROCESS'
17 | | 'SCMP_ACT_KILL_THREAD'
18 | | 'SCMP_ACT_LOG'
19 | | 'SCMP_ACT_TRACE'
20 | | 'SCMP_ACT_TRAP';
21 |
22 | export const SECCOMP_ACTIONS: SeccompAction[] = [
23 | 'SCMP_ACT_ALLOW',
24 | 'SCMP_ACT_ERRNO',
25 | 'SCMP_ACT_KILL',
26 | 'SCMP_ACT_KILL_PROCESS',
27 | 'SCMP_ACT_KILL_THREAD',
28 | 'SCMP_ACT_LOG',
29 | 'SCMP_ACT_TRACE',
30 | 'SCMP_ACT_TRAP',
31 | ];
32 |
33 | // Action descriptions from https://kubernetes.io/docs/reference/node/seccomp/
34 | export const SECCOMP_ACTION_DESCRIPTIONS: Record = {
35 | 'SCMP_ACT_ALLOW': 'Allow the syscall to be executed',
36 | 'SCMP_ACT_ERRNO': 'Return an error code (reject syscall)',
37 | 'SCMP_ACT_KILL': 'Kill only the thread',
38 | 'SCMP_ACT_KILL_PROCESS': 'Kill the entire process',
39 | 'SCMP_ACT_KILL_THREAD': 'Kill only the thread',
40 | 'SCMP_ACT_LOG': 'Allow the syscall and log it to syslog or auditd',
41 | 'SCMP_ACT_TRACE': 'Notify a tracing process with the specified value',
42 | 'SCMP_ACT_TRAP': 'Throw a SIGSYS signal',
43 | };
44 |
45 | export const ARCHITECTURES = [
46 | 'SCMP_ARCH_X86_64',
47 | 'SCMP_ARCH_X86',
48 | 'SCMP_ARCH_X32',
49 | 'SCMP_ARCH_ARM',
50 | 'SCMP_ARCH_AARCH64',
51 | 'SCMP_ARCH_MIPS',
52 | 'SCMP_ARCH_MIPS64',
53 | 'SCMP_ARCH_MIPS64N32',
54 | 'SCMP_ARCH_MIPSEL',
55 | 'SCMP_ARCH_MIPSEL64',
56 | 'SCMP_ARCH_MIPSEL64N32',
57 | 'SCMP_ARCH_PPC',
58 | 'SCMP_ARCH_PPC64',
59 | 'SCMP_ARCH_PPC64LE',
60 | 'SCMP_ARCH_S390',
61 | 'SCMP_ARCH_S390X',
62 | ];
63 |
--------------------------------------------------------------------------------
/docs/concepts/ebpf-monitoring.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "eBPF Monitoring"
3 | description: "How kguardian uses eBPF for kernel-level observability"
4 | icon: "radar"
5 | ---
6 |
7 | ## What is eBPF?
8 |
9 | eBPF (extended Berkeley Packet Filter) is a revolutionary Linux kernel technology that allows running custom programs inside the kernel without changing kernel source code or loading kernel modules.
10 |
11 | ## How kguardian Uses eBPF
12 |
13 | kguardian attaches eBPF programs to kernel hooks to observe pod behavior:
14 |
15 | ### Network Traffic Monitoring
16 |
17 | **Hook points:**
18 | - `tcp_connect` - Outbound TCP connections
19 | - `tcp_sendmsg` / `tcp_recvmsg` - Data transmission
20 | - `udp_sendmsg` / `udp_recvmsg` - UDP traffic
21 |
22 | **Captured data:**
23 | - Source and destination IP addresses
24 | - Source and destination ports
25 | - Protocol (TCP/UDP)
26 | - Network namespace (to map to containers)
27 |
28 | ### Syscall Monitoring
29 |
30 | **Hook points:**
31 | - `sys_enter_*` - Entry to any syscall
32 | - `sys_exit_*` - Exit from syscall
33 |
34 | **Captured data:**
35 | - Syscall name (e.g., `open`, `read`, `socket`)
36 | - Process ID and container namespace
37 | - Architecture (x86_64, arm64, etc.)
38 |
39 | ## Why eBPF?
40 |
41 |
42 |
43 | ~1-2% CPU overhead vs 10-20% for proxy-based solutions
44 |
45 |
46 | Verifier ensures programs can't crash the kernel
47 |
48 |
49 | No code changes, sidecars, or pod restarts needed
50 |
51 |
52 | See everything, including encrypted connections
53 |
54 |
55 |
56 | ---
57 |
58 | **Learn more:**
59 | - [Architecture Overview](/architecture)
60 | - [Controller Implementation](/development/controller)
61 |
--------------------------------------------------------------------------------
/.github/workflows/charts-lint.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: Lint and Test Charts
4 |
5 | on:
6 | pull_request:
7 | branches:
8 | - main
9 | paths:
10 | - '.github/workflows/charts-lint.yaml'
11 | - 'charts/**'
12 |
13 | jobs:
14 | lint-test:
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Checkout
18 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
19 | with:
20 | fetch-depth: 0
21 |
22 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6
23 | with:
24 | python-version: '3.11'
25 | check-latest: true
26 |
27 | - name: Set up Helm
28 | uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
29 | with:
30 | version: v3.14.4
31 |
32 | - name: Set up chart-testing
33 | uses: helm/chart-testing-action@6ec842c01de15ebb84c8627d2744a0c2f2755c9f # v2.8.0
34 |
35 | - name: Run chart-testing (list-changed)
36 | id: list-changed
37 | run: |
38 | changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
39 | if [[ -n "$changed" ]]; then
40 | echo "changed=true" >> "$GITHUB_OUTPUT"
41 | fi
42 |
43 | - name: Run chart-testing (lint)
44 | if: steps.list-changed.outputs.changed == 'true'
45 | run: ct lint --config .github/ct.yaml --target-branch ${{ github.event.repository.default_branch }}
46 |
47 | - name: Create kind cluster
48 | if: steps.list-changed.outputs.changed == 'true'
49 | uses: helm/kind-action@92086f6be054225fa813e0a4b13787fc9088faab # v1.13.0
50 |
51 | - name: Run chart-testing (install)
52 | if: steps.list-changed.outputs.changed == 'true'
53 | run: ct install --config .github/ct.yaml --target-branch ${{ github.event.repository.default_branch }}
54 |
--------------------------------------------------------------------------------
/advisor/pkg/api/pod_syscall.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "io"
7 | "net/http"
8 | "strings"
9 | "time"
10 |
11 | "github.com/rs/zerolog/log"
12 | )
13 |
14 | type PodSysCall struct {
15 | Syscalls []string `json:"syscalls"`
16 | Arch string `json:"arch"`
17 | }
18 |
19 | type PodSysCallResponse struct {
20 | PodName string `json:"pod_name"`
21 | PodNamespace string `json:"pod_namespace"`
22 | Syscalls string `json:"syscalls"`
23 | Arch string `json:"arch"`
24 | }
25 |
26 | func GetPodSysCall(podName string) (PodSysCall, error) {
27 | time.Sleep(3 * time.Second)
28 | apiURL := "http://127.0.0.1:9090/pod/syscalls/" + podName
29 |
30 | resp, err := http.Get(apiURL)
31 | if err != nil {
32 | log.Error().Err(err).Msg("GetPodSysCall: Error making GET request")
33 | return PodSysCall{}, err
34 | }
35 | defer func() {
36 | if closeErr := resp.Body.Close(); closeErr != nil {
37 | log.Error().Err(closeErr).Msg("GetPodSysCall: Error closing response body")
38 | }
39 | }()
40 |
41 | if resp.StatusCode != http.StatusOK {
42 | return PodSysCall{}, fmt.Errorf("GetPodSysCall: received non-OK HTTP status code: %v", resp.StatusCode)
43 | }
44 |
45 | body, err := io.ReadAll(resp.Body)
46 | if err != nil {
47 | log.Error().Err(err).Msg("GetPodSysCall: Error reading response body")
48 | return PodSysCall{}, err
49 | }
50 |
51 | var podSysCallsResponse []PodSysCallResponse
52 | if err := json.Unmarshal(body, &podSysCallsResponse); err != nil {
53 | log.Error().Err(err).Msg("GetPodSysCall: Error unmarshalling JSON")
54 | return PodSysCall{}, err
55 | }
56 |
57 | if len(podSysCallsResponse) == 0 {
58 | return PodSysCall{}, fmt.Errorf("GetPodSysCall: No pod syscall found in database")
59 | }
60 |
61 | var podSysCalls PodSysCall
62 |
63 | podSysCalls.Syscalls = strings.Split(podSysCallsResponse[0].Syscalls, ",")
64 | podSysCalls.Arch = podSysCallsResponse[0].Arch
65 |
66 | return podSysCalls, nil
67 | }
68 |
--------------------------------------------------------------------------------
/controller/src/bpf/syscall.bpf.c:
--------------------------------------------------------------------------------
1 | #include "vmlinux.h"
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "helper.h"
7 |
8 | struct
9 | {
10 | __uint(type, BPF_MAP_TYPE_RINGBUF);
11 | __uint(max_entries, 128 * 1024); // 128KB ring buffer
12 | } syscall_events SEC(".maps");
13 |
14 | struct data_t
15 | {
16 | __u64 inum;
17 | __u64 sysnbr;
18 | };
19 |
20 | SEC("tracepoint/raw_syscalls/sys_enter")
21 | int trace_execve(struct trace_event_raw_sys_enter *ctx)
22 | {
23 | struct task_struct *task;
24 | u32 *inum = 0;
25 |
26 | task = (struct task_struct *)bpf_get_current_task();
27 | __u64 net_ns = BPF_CORE_READ(task, nsproxy, net_ns, ns.inum);
28 |
29 | // Early exit if not in tracked namespace
30 | inum = bpf_map_lookup_elem(&inode_num, &net_ns);
31 | if (!inum)
32 | return 0;
33 |
34 | // Filter syscalls using allowlist if populated
35 | // If allowlist is empty (no entries), trace all syscalls (backward compatible)
36 | u32 syscall_id = (__u32)ctx->id;
37 | u32 *allowed = bpf_map_lookup_elem(&allowed_syscalls, &syscall_id);
38 |
39 | // If allowlist has entries, only trace allowed syscalls
40 | // Check if map is populated by testing a known syscall (0)
41 | u32 zero = 0;
42 | u32 *test = bpf_map_lookup_elem(&allowed_syscalls, &zero);
43 |
44 | // If allowlist is populated (test returns non-NULL) but current syscall not found, skip
45 | if (test && !allowed)
46 | return 0;
47 |
48 | // Reserve space in ring buffer
49 | struct data_t *data;
50 | data = bpf_ringbuf_reserve(&syscall_events, sizeof(*data), 0);
51 | if (!data)
52 | return 0; // Buffer full, drop event
53 |
54 | // Fill event data
55 | data->sysnbr = ctx->id;
56 | data->inum = net_ns;
57 |
58 | // Submit to userspace
59 | bpf_ringbuf_submit(data, 0);
60 |
61 | return 0;
62 | }
63 |
64 | char LICENSE[] SEC("license") = "GPL";
65 |
--------------------------------------------------------------------------------
/docs/concepts/network-policies.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Network Policies"
3 | description: "Understanding Kubernetes Network Policies and how kguardian generates them"
4 | icon: "network-wired"
5 | ---
6 |
7 | ## What are Network Policies?
8 |
9 | Kubernetes Network Policies are firewall rules for your pods. They control:
10 | - **Ingress**: What can connect TO your pod
11 | - **Egress**: What your pod can connect TO
12 |
13 | Without Network Policies, all pods can communicate with all other pods (flat network).
14 |
15 | ## Structure of a Network Policy
16 |
17 | ```yaml
18 | apiVersion: networking.k8s.io/v1
19 | kind: NetworkPolicy
20 | metadata:
21 | name: my-app
22 | spec:
23 | podSelector: # Which pods this policy applies to
24 | matchLabels:
25 | app: my-app
26 | policyTypes:
27 | - Ingress
28 | - Egress
29 | ingress: # Allow incoming from...
30 | - from:
31 | - podSelector:
32 | matchLabels:
33 | app: frontend
34 | ports:
35 | - port: 8080
36 | egress: # Allow outgoing to...
37 | - to:
38 | - podSelector:
39 | matchLabels:
40 | app: database
41 | ```
42 |
43 | ## How kguardian Generates Policies
44 |
45 | 1. **Observes traffic** via eBPF for 5+ minutes
46 | 2. **Identifies peers** by resolving IPs to pods/services
47 | 3. **Groups rules** by protocol and port
48 | 4. **Deduplicates** to create minimal policies
49 | 5. **Generates YAML** ready to apply
50 |
51 | ## Default-Deny Strategy
52 |
53 | Best practice: Start with default-deny, then allowlist:
54 |
55 | ```yaml
56 | # 1. Deny all traffic
57 | apiVersion: networking.k8s.io/v1
58 | kind: NetworkPolicy
59 | metadata:
60 | name: default-deny
61 | spec:
62 | podSelector: {}
63 | policyTypes:
64 | - Ingress
65 | - Egress
66 |
67 | # 2. Apply kguardian-generated allowlist policies
68 | ```
69 |
70 | ---
71 |
72 | **Next steps:**
73 | - [Generate Network Policies](/guides/generating-network-policies)
74 | - [Cilium Policies](/advanced/cilium-policies)
75 |
--------------------------------------------------------------------------------
/charts/kguardian/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "kguardian.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "kguardian.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | This gets around an problem within helm discussed here
28 | https://github.com/helm/helm/issues/5358
29 | */}}
30 | {{- define "kguardian.namespace" -}}
31 | {{ .Values.namespace.name | default .Release.Namespace }}
32 | {{- end -}}
33 |
34 | {{/*
35 | Create chart name and version as used by the chart label.
36 | */}}
37 | {{- define "kguardian.chart" -}}
38 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
39 | {{- end }}
40 |
41 | {{/*
42 | Common labels
43 | */}}
44 | {{- define "kguardian.labels" -}}
45 | {{ include "kguardian.selectorLabels" . }}
46 | app.kubernetes.io/managed-by: {{ .Release.Service }}
47 | {{- if .Values.global.labels}}
48 | {{ toYaml .Values.global.labels }}
49 | {{- end }}
50 | {{- end }}
51 |
52 | {{/*
53 | Common Annotations
54 | */}}
55 | {{- define "kguardian.annotations" -}}
56 | {{- if .Values.global.annotations -}}
57 | {{- toYaml .Values.global.annotations | nindent 2 }}
58 | {{- end }}
59 | {{- end }}
60 |
61 | {{/*
62 | Selector labels
63 | */}}
64 | {{- define "kguardian.selectorLabels" -}}
65 | app.kubernetes.io/instance: {{ .Release.Name }}
66 | {{- end }}
67 |
--------------------------------------------------------------------------------
/controller/build.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 | use std::ffi::OsStr;
3 | use std::path::PathBuf;
4 |
5 | use libbpf_cargo::SkeletonBuilder;
6 | use vmlinux;
7 |
8 | const SYSCALL_SRC: &str = "src/bpf/syscall.bpf.c";
9 | const TCP_PROBE_SRC: &str = "src/bpf/network_probe.bpf.c";
10 | const PACKET_DROP_SRC: &str = "src/bpf/netpolicy_drop.bpf.c";
11 |
12 | fn main() {
13 | let out = PathBuf::from(
14 | env::var_os("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set in build script"),
15 | )
16 | .join("src")
17 | .join("bpf")
18 | .join("syscall.skel.rs");
19 |
20 | let pkt_drop_out = PathBuf::from(
21 | env::var_os("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set in build script"),
22 | )
23 | .join("src")
24 | .join("bpf")
25 | .join("netpolicy_drop.skel.rs");
26 |
27 | let tcp_probe_out = PathBuf::from(
28 | env::var_os("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR must be set in build script"),
29 | )
30 | .join("src")
31 | .join("bpf")
32 | .join("network_probe.skel.rs");
33 |
34 | let arch = env::var("CARGO_CFG_TARGET_ARCH")
35 | .expect("CARGO_CFG_TARGET_ARCH must be set in build script");
36 |
37 | SkeletonBuilder::new()
38 | .source(SYSCALL_SRC)
39 | .clang_args([
40 | OsStr::new("-I"),
41 | vmlinux::include_path_root().join(&arch).as_os_str(),
42 | ])
43 | .build_and_generate(&out)
44 | .unwrap();
45 |
46 | SkeletonBuilder::new()
47 | .source(TCP_PROBE_SRC)
48 | .clang_args([
49 | OsStr::new("-I"),
50 | vmlinux::include_path_root().join(&arch).as_os_str(),
51 | ])
52 | .build_and_generate(&tcp_probe_out)
53 | .unwrap();
54 |
55 | SkeletonBuilder::new()
56 | .source(PACKET_DROP_SRC)
57 | .clang_args([
58 | OsStr::new("-I"),
59 | vmlinux::include_path_root().join(arch).as_os_str(),
60 | ])
61 | .build_and_generate(&pkt_drop_out)
62 | .unwrap();
63 |
64 | println!("cargo:rerun-if-changed=src/bpf");
65 | }
66 |
--------------------------------------------------------------------------------
/docs/api-reference/introduction.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Broker API Reference"
3 | description: "REST API for querying kguardian telemetry data"
4 | icon: "code"
5 | ---
6 |
7 | ## Overview
8 |
9 | The kguardian Broker exposes a REST API for querying collected telemetry data. The CLI uses this API internally, but you can also integrate directly with it for custom workflows.
10 |
11 | ## Base URL
12 |
13 | ```
14 | http://kguardian-broker.kguardian.svc.cluster.local:9090
15 | ```
16 |
17 | For external access, use port-forwarding:
18 |
19 | ```bash
20 | kubectl port-forward -n kguardian svc/kguardian-broker 9090:9090
21 | ```
22 |
23 | ## Authentication
24 |
25 |
26 | The current version (v1.0.0) has **no authentication**. The API should only be accessible within the cluster network. For production deployments, consider using NetworkPolicies, service mesh, or an API gateway with authentication.
27 |
28 |
29 | ## API Endpoints
30 |
31 |
32 |
33 | Add and retrieve pod metadata
34 |
35 |
36 | Query network traffic data
37 |
38 |
39 | Retrieve syscall observations
40 |
41 |
42 | Service IP to metadata mapping
43 |
44 |
45 |
46 | ## Response Format
47 |
48 | All responses are JSON with standard HTTP status codes:
49 |
50 | - `200 OK` - Success
51 | - `400 Bad Request` - Invalid parameters
52 | - `404 Not Found` - Resource not found
53 | - `500 Internal Server Error` - Server error
54 |
55 | ## Common Patterns
56 |
57 | ### Pagination
58 |
59 | Currently not supported. All matching records are returned.
60 |
61 | ### Filtering
62 |
63 | Some endpoints support filtering by namespace, pod name, or time range (coming soon).
64 |
65 | ### Rate Limiting
66 |
67 | No rate limiting is currently enforced.
68 |
--------------------------------------------------------------------------------
/llm-bridge/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## [1.1.1](https://github.com/kguardian-dev/kguardian/compare/llm-bridge/v1.1.0...llm-bridge/v1.1.1) (2025-12-12)
4 |
5 |
6 | ### Bug Fixes
7 |
8 | * llm with mcp ([0797192](https://github.com/kguardian-dev/kguardian/commit/079719225cabfdae169556af303a09c01d7e2243))
9 | * llm with mcp ([453003f](https://github.com/kguardian-dev/kguardian/commit/453003ff9fbf2b00be1bb12d5c4f75b9f398727a))
10 |
11 | ## [1.1.0](https://github.com/kguardian-dev/kguardian/compare/llm-bridge/v1.0.0...llm-bridge/v1.1.0) (2025-11-06)
12 |
13 |
14 | ### Features
15 |
16 | * add LLM + MCP ([0364874](https://github.com/kguardian-dev/kguardian/commit/03648744eabcf6005ff6a35cf761df608e239a81))
17 | * add LLM + MCP integration ([a165a51](https://github.com/kguardian-dev/kguardian/commit/a165a5168ef91afe71bdb17e726baeb5df024511))
18 |
19 |
20 | ### Bug Fixes
21 |
22 | * connect llm-bridge to MCP server for all 6 tools ([d0e8d5a](https://github.com/kguardian-dev/kguardian/commit/d0e8d5a588ea7ddc46700de3f2c7b27875aba5f8))
23 | * **deps:** update dependency dotenv to v17 ([1f234d3](https://github.com/kguardian-dev/kguardian/commit/1f234d35873d01b7c828965d65d04979cfb82926))
24 | * **deps:** update dependency dotenv to v17 ([def6bc7](https://github.com/kguardian-dev/kguardian/commit/def6bc7d92db00c8a29bd3700c1f914c0f918a43))
25 | * **deps:** update dependency express to v5 ([a42ebe6](https://github.com/kguardian-dev/kguardian/commit/a42ebe65ff6d95cfd2c503fc23618aca31608260))
26 | * **deps:** update dependency express to v5 ([a735730](https://github.com/kguardian-dev/kguardian/commit/a73573040ae9914ea9f3dbdb90e7266fa223d0c3))
27 | * **deps:** update dependency zod to v4 ([29ac796](https://github.com/kguardian-dev/kguardian/commit/29ac79631a978fbf2434868f229f97c0efbec763))
28 | * **deps:** update dependency zod to v4 ([7e71d16](https://github.com/kguardian-dev/kguardian/commit/7e71d160fced4cd46cfd0aeb0854ab3724169e57))
29 | * docker builds ([0a449c8](https://github.com/kguardian-dev/kguardian/commit/0a449c859b93e839333955bcb6dd574042eaedc1))
30 | * resolve OpenAI 400 error with proper tool message formatting ([b0e3adc](https://github.com/kguardian-dev/kguardian/commit/b0e3adcd1d4aad8078e74d87fd1d5bde9616a431))
31 |
--------------------------------------------------------------------------------
/docs/concepts/overview.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Core Concepts"
3 | description: "Understanding the fundamentals of kguardian's security approach"
4 | icon: "book-open"
5 | ---
6 |
7 | ## What is kguardian?
8 |
9 | kguardian implements **observed-based security** - it watches what your applications actually do at runtime and generates security policies that match that behavior. This is fundamentally different from traditional approaches where you manually write policies and hope they're correct.
10 |
11 | ## Key Concepts
12 |
13 | ### eBPF-Powered Observability
14 |
15 | kguardian uses eBPF (extended Berkeley Packet Filter) technology to observe kernel-level events:
16 |
17 | - **Network connections**: TCP/UDP socket operations
18 | - **System calls**: File access, process creation, etc.
19 | - **Zero code changes**: No sidecars, agents, or instrumentation needed
20 |
21 |
22 | eBPF runs safely in the kernel with minimal overhead (~1-2% CPU), making it perfect for production observability.
23 |
24 |
25 | ### Least-Privilege Security
26 |
27 | The core principle: **allow only what's actually used**.
28 |
29 | Instead of guessing what your app needs, kguardian:
30 | 1. Observes actual behavior
31 | 2. Generates policies that allow exactly that behavior
32 | 3. Blocks everything else (default-deny)
33 |
34 | ### Runtime to Policy Pipeline
35 |
36 | ```mermaid
37 | graph LR
38 | A[Application Runs] --> B[eBPF Observes]
39 | B --> C[Controller Enriches]
40 | C --> D[Broker Stores]
41 | D --> E[CLI Analyzes]
42 | E --> F[Policy Generated]
43 | ```
44 |
45 | ---
46 |
47 |
48 |
49 | Deep dive into how eBPF observability works
50 |
51 |
52 | Understanding Kubernetes network policies
53 |
54 |
55 | What are seccomp profiles and why they matter
56 |
57 |
58 | See how all components work together
59 |
60 |
61 |
--------------------------------------------------------------------------------
/broker/src/schema.rs:
--------------------------------------------------------------------------------
1 | // @generated automatically by Diesel CLI.
2 |
3 | diesel::table! {
4 | pod_details (pod_ip) {
5 | pod_name -> Varchar,
6 | pod_ip -> Varchar,
7 | pod_namespace -> Nullable,
8 | pod_obj -> Nullable,
9 | time_stamp -> Timestamp,
10 | node_name -> Varchar,
11 | is_dead -> Bool,
12 | pod_identity -> Nullable,
13 | workload_selector_labels -> Nullable,
14 | }
15 | }
16 |
17 | diesel::table! {
18 | pod_traffic (uuid) {
19 | uuid -> Varchar,
20 | pod_name -> Nullable,
21 | pod_namespace -> Nullable,
22 | pod_ip -> Nullable,
23 | pod_port -> Nullable,
24 | ip_protocol -> Nullable,
25 | traffic_type -> Nullable,
26 | traffic_in_out_ip -> Nullable,
27 | traffic_in_out_port -> Nullable,
28 | decision -> Nullable,
29 | time_stamp -> Timestamp,
30 | }
31 | }
32 |
33 | diesel::table! {
34 | pod_packet_drop (uuid) {
35 | uuid -> Varchar,
36 | pod_name -> Nullable,
37 | pod_namespace -> Nullable,
38 | pod_ip -> Nullable,
39 | pod_port -> Nullable,
40 | ip_protocol -> Nullable,
41 | traffic_type -> Nullable,
42 | traffic_in_out_ip -> Nullable,
43 | traffic_in_out_port -> Nullable,
44 | drop_reason -> Nullable,
45 | time_stamp -> Timestamp,
46 | }
47 | }
48 |
49 | diesel::table! {
50 | pod_syscalls (pod_name) {
51 | pod_name -> Varchar,
52 | pod_namespace -> Varchar,
53 | syscalls -> Varchar,
54 | arch -> Varchar,
55 | time_stamp -> Timestamp,
56 | }
57 | }
58 |
59 | diesel::table! {
60 | svc_details (svc_ip) {
61 | svc_ip -> Varchar,
62 | svc_name -> Nullable,
63 | svc_namespace -> Nullable,
64 | service_spec -> Nullable,
65 | time_stamp -> Timestamp,
66 | }
67 | }
68 |
69 | diesel::allow_tables_to_appear_in_same_query!(pod_details, pod_traffic, svc_details, pod_syscalls,);
70 |
--------------------------------------------------------------------------------
/mcp-server/tools/pod_details.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
10 | "github.com/modelcontextprotocol/go-sdk/mcp"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | // PodDetailsInput defines the input parameters for getting pod details by IP
15 | type PodDetailsInput struct {
16 | IP string `json:"ip" jsonschema:"The IP address of the pod to query"`
17 | }
18 |
19 | // PodDetailsOutput defines the output structure
20 | type PodDetailsOutput struct {
21 | Data string `json:"data" jsonschema:"Pod details in JSON format"`
22 | }
23 |
24 | // PodDetailsHandler handles the get_pod_details tool
25 | type PodDetailsHandler struct {
26 | client *BrokerClient
27 | }
28 |
29 | // Call implements the tool handler
30 | func (h PodDetailsHandler) Call(
31 | ctx context.Context,
32 | req *mcp.CallToolRequest,
33 | input PodDetailsInput,
34 | ) (*mcp.CallToolResult, PodDetailsOutput, error) {
35 | startTime := time.Now()
36 | logger.Log.WithField("ip", input.IP).Info("Received get_pod_details request")
37 |
38 | if input.IP == "" {
39 | logger.Log.Error("IP address is required but not provided")
40 | return nil, PodDetailsOutput{}, fmt.Errorf("IP address is required")
41 | }
42 |
43 | data, err := h.client.GetPodByIP(input.IP)
44 | if err != nil {
45 | logger.Log.WithFields(logrus.Fields{
46 | "ip": input.IP,
47 | "error": err.Error(),
48 | "total_duration": time.Since(startTime).String(),
49 | }).Error("Error fetching pod details")
50 | return nil, PodDetailsOutput{}, fmt.Errorf("error fetching pod details: %w", err)
51 | }
52 |
53 | jsonData, err := json.MarshalIndent(data, "", " ")
54 | if err != nil {
55 | logger.Log.WithField("error", err.Error()).Error("Error marshaling response")
56 | return nil, PodDetailsOutput{}, fmt.Errorf("error marshaling response: %w", err)
57 | }
58 |
59 | logger.Log.WithFields(logrus.Fields{
60 | "ip": input.IP,
61 | "response_bytes": len(jsonData),
62 | "total_duration": time.Since(startTime).String(),
63 | }).Info("Successfully fetched pod details")
64 |
65 | return nil, PodDetailsOutput{Data: string(jsonData)}, nil
66 | }
67 |
--------------------------------------------------------------------------------
/advisor/pkg/k8s/portforward_test.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "k8s.io/client-go/kubernetes"
9 | "k8s.io/client-go/rest"
10 | )
11 |
12 | func TestPortForward(t *testing.T) {
13 | // Test basic validation failures
14 | // Test nil config
15 | stopChan, errChan, done := PortForward(nil)
16 | select {
17 | case err := <-errChan:
18 | assert.Error(t, err)
19 | assert.Contains(t, err.Error(), "nil Kubernetes configuration")
20 | case <-time.After(time.Second):
21 | t.Fatal("Expected error but none received")
22 | }
23 | <-done // Wait for done signal
24 | close(stopChan) // Clean up
25 |
26 | // Test nil clientset
27 | nilClientConfig := &Config{
28 | Clientset: nil,
29 | Config: &rest.Config{},
30 | }
31 | stopChan, errChan, done = PortForward(nilClientConfig)
32 | select {
33 | case err := <-errChan:
34 | assert.Error(t, err)
35 | assert.Contains(t, err.Error(), "nil Kubernetes clientset")
36 | case <-time.After(time.Second):
37 | t.Fatal("Expected error but none received")
38 | }
39 | <-done // Wait for done signal
40 | close(stopChan) // Clean up
41 |
42 | // Test nil REST config
43 | nilRestConfig := &Config{
44 | Clientset: &kubernetes.Clientset{},
45 | Config: nil,
46 | }
47 | stopChan, errChan, done = PortForward(nilRestConfig)
48 | select {
49 | case err := <-errChan:
50 | assert.Error(t, err)
51 | assert.Contains(t, err.Error(), "nil REST configuration")
52 | case <-time.After(time.Second):
53 | t.Fatal("Expected error but none received")
54 | }
55 | <-done // Wait for done signal
56 | close(stopChan) // Clean up
57 | }
58 |
59 | func TestWriterFunc(t *testing.T) {
60 | // Test the writerFunc adapter
61 | var called bool
62 | var capturedData []byte
63 |
64 | // Create a writerFunc that captures the data
65 | w := writerFunc(func(p []byte) (int, error) {
66 | called = true
67 | capturedData = make([]byte, len(p))
68 | copy(capturedData, p)
69 | return len(p), nil
70 | })
71 |
72 | testData := []byte("test data")
73 | n, err := w.Write(testData)
74 |
75 | assert.NoError(t, err)
76 | assert.Equal(t, len(testData), n)
77 | assert.True(t, called)
78 | assert.Equal(t, testData, capturedData)
79 | }
80 |
--------------------------------------------------------------------------------
/.github/workflows/advisor-test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: Build and Test Advisor
4 |
5 | on:
6 | workflow_dispatch: {}
7 | push:
8 | branches:
9 | - main
10 | paths:
11 | - .github/workflows/build-go.yaml
12 | - 'advisor/*.go'
13 | pull_request:
14 | branches:
15 | - main
16 | paths:
17 | - .github/workflows/build-go.yaml
18 | - 'advisor/*.go'
19 |
20 | permissions:
21 | contents: read
22 |
23 | defaults:
24 | run:
25 | working-directory: ./advisor
26 |
27 | jobs:
28 | lint:
29 | runs-on: ubuntu-latest
30 | strategy:
31 | matrix:
32 | go-version: [ '1.24.x' ]
33 | steps:
34 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
35 |
36 | - name: Setup Go
37 | uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6
38 | with:
39 | go-version: ${{ matrix.go-version }}
40 | cache: false
41 |
42 | - name: golangci-lint
43 | uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9
44 | with:
45 | version: latest
46 | working-directory: ./advisor
47 | args: --timeout=5m
48 |
49 | build:
50 | runs-on: ubuntu-latest
51 | needs: [lint]
52 | strategy:
53 | matrix:
54 | go-version: [ '1.24.x' ]
55 | steps:
56 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
57 |
58 | - name: Setup Go ${{ matrix.go-version }}
59 | uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6
60 | with:
61 | go-version: ${{ matrix.go-version }}
62 |
63 | - name: Install dependencies
64 | run: go get -v ./...
65 |
66 | - name: Build
67 | run: go build -v ./...
68 |
69 | - name: Test with the Go CLI
70 | run: go test -v ./... > test-results-${{ matrix.go-version }}.json
71 |
72 | - name: Upload Go test results
73 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
74 | with:
75 | name: test-results-${{ matrix.go-version }}
76 | path: test-results-${{ matrix.go-version }}.json
77 |
--------------------------------------------------------------------------------
/advisor/cmd/version.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 | "runtime"
6 |
7 | log "github.com/rs/zerolog/log"
8 | "github.com/spf13/cobra"
9 | "github.com/kguardian-dev/kguardian/advisor/pkg/k8s"
10 | )
11 |
12 | // Version information - these will be set during build
13 | var (
14 | Version = "development"
15 | BuildDate = "unknown"
16 | GitCommit = "unknown"
17 | )
18 |
19 | var versionCmd = &cobra.Command{
20 | Use: "version",
21 | Short: "Print the client and server version information",
22 | Long: `Display the client version and, if connected to a Kubernetes server, the server version as well.`,
23 | Run: func(cmd *cobra.Command, args []string) {
24 | // Set up the logger first, so we get useful debug output
25 | setupLogger()
26 |
27 | // Display client version information
28 | fmt.Printf("Client Version:\n")
29 | fmt.Printf(" Version: %s\n", Version)
30 | fmt.Printf(" Git Commit: %s\n", GitCommit)
31 | fmt.Printf(" Build Date: %s\n", BuildDate)
32 | fmt.Printf(" Go Version: %s\n", runtime.Version())
33 | fmt.Printf(" Platform: %s/%s\n", runtime.GOOS, runtime.GOARCH)
34 |
35 | // Try to get server version information
36 | fmt.Printf("\nServer Version:\n")
37 |
38 | // Get Kubernetes config
39 | config, err := k8s.GetConfig(true) // Use dry-run mode
40 | if err != nil {
41 | log.Debug().Err(err).Msg("Failed to get Kubernetes configuration")
42 | fmt.Printf(" Unable to connect to Kubernetes server: %v\n", err)
43 | return
44 | }
45 |
46 | if config.Clientset == nil {
47 | log.Debug().Msg("Kubernetes clientset is nil")
48 | fmt.Printf(" Not connected to a Kubernetes server\n")
49 | return
50 | }
51 |
52 | // Get server version
53 | serverVersion, err := config.Clientset.Discovery().ServerVersion()
54 | if err != nil {
55 | log.Debug().Err(err).Msg("Failed to get server version")
56 | fmt.Printf(" Unable to retrieve server version: %v\n", err)
57 | return
58 | }
59 |
60 | fmt.Printf(" Version: %s\n", serverVersion.GitVersion)
61 | fmt.Printf(" Platform: %s/%s\n", serverVersion.Platform, serverVersion.GoVersion)
62 | fmt.Printf(" Build Date: %s\n", serverVersion.BuildDate)
63 | },
64 | }
65 |
66 | func init() {
67 | rootCmd.AddCommand(versionCmd)
68 | }
69 |
--------------------------------------------------------------------------------
/.github/workflows/frontend-release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: Build and Push UI Docker Images
4 |
5 | on:
6 | workflow_dispatch:
7 | push:
8 | tags:
9 | - "frontend/v*"
10 |
11 | jobs:
12 | build_and_push:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
17 |
18 | - name: Cache Node modules
19 | uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5
20 | with:
21 | path: |
22 | ~/.npm
23 | frontend/node_modules
24 | key: ${{ runner.os }}-node-${{ hashFiles('frontend/package-lock.json') }}
25 | restore-keys: |
26 | ${{ runner.os }}-node-
27 |
28 | - name: Set up QEMU
29 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
30 | with:
31 | platforms: all
32 |
33 | - name: Set up Docker Buildx
34 | id: buildx
35 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
36 | with:
37 | install: true
38 | version: latest
39 |
40 | - name: Login to GHCR
41 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
42 | with:
43 | registry: ghcr.io
44 | username: ${{ github.actor }}
45 | password: ${{ secrets.GITHUB_TOKEN }}
46 |
47 | - name: Extract tag name
48 | id: extract_tag
49 | run: |
50 | TAG="${GITHUB_REF#refs/tags/}"
51 | VERSION="${TAG#frontend/}"
52 | echo "TAG_NAME=${VERSION}" >> $GITHUB_OUTPUT
53 |
54 | - name: Build and Push
55 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
56 | with:
57 | context: frontend/
58 | file: frontend/Dockerfile
59 | platforms: linux/amd64,linux/arm64
60 | push: true
61 | tags: |
62 | ghcr.io/kguardian-dev/kguardian/frontend:${{ steps.extract_tag.outputs.TAG_NAME }}
63 | ghcr.io/kguardian-dev/kguardian/frontend:latest
64 | cache-from: type=gha
65 | cache-to: type=gha,mode=max
66 |
--------------------------------------------------------------------------------
/frontend/src/services/aiApi.ts:
--------------------------------------------------------------------------------
1 | import axios from 'axios';
2 |
3 | // LLM Bridge URL - use relative path for proxy in production, or direct URL in development
4 | // In production (Vite preview), this proxies through /llm-api to the llm-bridge service
5 | // In development, this can connect directly to localhost:8080 or use the dev proxy
6 | const LLM_BRIDGE_URL = import.meta.env.PROD ? '/llm-api' : (import.meta.env.VITE_LLM_BRIDGE_URL || 'http://localhost:8080');
7 |
8 | export type LLMProvider = 'openai' | 'anthropic' | 'gemini' | 'copilot';
9 |
10 | export interface HistoryMessage {
11 | role: 'user' | 'assistant' | 'system';
12 | content: string;
13 | }
14 |
15 | export interface ChatMessage {
16 | message: string;
17 | conversation_id?: string;
18 | provider?: LLMProvider;
19 | model?: string;
20 | system_prompt?: string;
21 | history?: HistoryMessage[];
22 | }
23 |
24 | export interface ChatResponse {
25 | message: string;
26 | provider: LLMProvider;
27 | model: string;
28 | conversation_id?: string;
29 | }
30 |
31 | /**
32 | * Send a chat message to the AI assistant
33 | * @param message The user's message
34 | * @param history Optional: Previous conversation messages for context
35 | * @param provider Optional: Specify which LLM provider to use (openai, anthropic, gemini, copilot)
36 | * @param conversationId Optional: Continue an existing conversation
37 | * @returns The AI's response
38 | */
39 | export async function sendChatMessage(
40 | message: string,
41 | history?: HistoryMessage[],
42 | provider?: LLMProvider,
43 | conversationId?: string
44 | ): Promise {
45 | try {
46 | const response = await axios.post(`${LLM_BRIDGE_URL}/api/chat`, {
47 | message,
48 | history,
49 | provider,
50 | conversationId,
51 | });
52 |
53 | return response.data;
54 | } catch (error) {
55 | if (axios.isAxiosError(error)) {
56 | const errorMessage = error.response?.data?.error || error.message;
57 | const details = error.response?.data?.details;
58 | throw new Error(
59 | `Failed to get AI response: ${errorMessage}${details ? ` - ${details}` : ''}`
60 | );
61 | }
62 | throw new Error('An unexpected error occurred while calling the AI API');
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/frontend/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite'
2 | import react from '@vitejs/plugin-react'
3 |
4 | // https://vite.dev/config/
5 | export default defineConfig({
6 | plugins: [react()],
7 |
8 | // Development server configuration
9 | server: {
10 | allowedHosts: true,
11 | proxy: {
12 | '/api': {
13 | target: process.env.VITE_API_URL || 'http://localhost:9090',
14 | changeOrigin: true,
15 | rewrite: (path) => path.replace(/^\/api/, ''),
16 | },
17 | '/llm-api': {
18 | target: process.env.VITE_LLM_BRIDGE_URL || 'http://localhost:8080',
19 | changeOrigin: true,
20 | rewrite: (path) => path.replace(/^\/llm-api/, ''),
21 | },
22 | },
23 | },
24 |
25 | // Production build configuration
26 | build: {
27 | // Output directory for production build
28 | outDir: 'dist',
29 |
30 | // Generate sourcemaps for production debugging (optional, disable for smaller builds)
31 | sourcemap: false,
32 |
33 | // Target modern browsers for smaller bundles
34 | target: 'esnext',
35 |
36 | // Optimize chunk splitting
37 | rollupOptions: {
38 | output: {
39 | // Manual chunk splitting for better caching
40 | manualChunks: {
41 | // Vendor chunks
42 | 'react-vendor': ['react', 'react-dom'],
43 | 'react-flow-vendor': ['reactflow'],
44 | },
45 | },
46 | },
47 |
48 | // Chunk size warning limit (500 KB)
49 | chunkSizeWarningLimit: 500,
50 |
51 | // Minification
52 | minify: 'esbuild',
53 |
54 | // Asset optimization
55 | assetsInlineLimit: 4096, // 4kb - inline assets smaller than this
56 | },
57 |
58 | // Preview server configuration (for production)
59 | preview: {
60 | port: 5173,
61 | host: '0.0.0.0',
62 | strictPort: true,
63 | allowedHosts: true,
64 | proxy: {
65 | '/api': {
66 | target: process.env.VITE_API_URL || 'http://localhost:9090',
67 | changeOrigin: true,
68 | rewrite: (path) => path.replace(/^\/api/, ''),
69 | },
70 | '/llm-api': {
71 | target: process.env.VITE_LLM_BRIDGE_URL || 'http://localhost:8080',
72 | changeOrigin: true,
73 | rewrite: (path) => path.replace(/^\/llm-api/, ''),
74 | },
75 | },
76 | },
77 | })
78 |
--------------------------------------------------------------------------------
/mcp-server/tools/service_details.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
10 | "github.com/modelcontextprotocol/go-sdk/mcp"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | // ServiceDetailsInput defines the input parameters for getting service details by IP
15 | type ServiceDetailsInput struct {
16 | IP string `json:"ip" jsonschema:"The IP address of the service to query"`
17 | }
18 |
19 | // ServiceDetailsOutput defines the output structure
20 | type ServiceDetailsOutput struct {
21 | Data string `json:"data" jsonschema:"Service details in JSON format"`
22 | }
23 |
24 | // ServiceDetailsHandler handles the get_service_details tool
25 | type ServiceDetailsHandler struct {
26 | client *BrokerClient
27 | }
28 |
29 | // Call implements the tool handler
30 | func (h ServiceDetailsHandler) Call(
31 | ctx context.Context,
32 | req *mcp.CallToolRequest,
33 | input ServiceDetailsInput,
34 | ) (*mcp.CallToolResult, ServiceDetailsOutput, error) {
35 | startTime := time.Now()
36 | logger.Log.WithField("ip", input.IP).Info("Received get_service_details request")
37 |
38 | if input.IP == "" {
39 | logger.Log.Error("IP address is required but not provided")
40 | return nil, ServiceDetailsOutput{}, fmt.Errorf("IP address is required")
41 | }
42 |
43 | data, err := h.client.GetServiceByIP(input.IP)
44 | if err != nil {
45 | logger.Log.WithFields(logrus.Fields{
46 | "ip": input.IP,
47 | "error": err.Error(),
48 | "total_duration": time.Since(startTime).String(),
49 | }).Error("Error fetching service details")
50 | return nil, ServiceDetailsOutput{}, fmt.Errorf("error fetching service details: %w", err)
51 | }
52 |
53 | jsonData, err := json.MarshalIndent(data, "", " ")
54 | if err != nil {
55 | logger.Log.WithField("error", err.Error()).Error("Error marshaling response")
56 | return nil, ServiceDetailsOutput{}, fmt.Errorf("error marshaling response: %w", err)
57 | }
58 |
59 | logger.Log.WithFields(logrus.Fields{
60 | "ip": input.IP,
61 | "response_bytes": len(jsonData),
62 | "total_duration": time.Since(startTime).String(),
63 | }).Info("Successfully fetched service details")
64 |
65 | return nil, ServiceDetailsOutput{Data: string(jsonData)}, nil
66 | }
67 |
--------------------------------------------------------------------------------
/.github/workflows/mcp-server-release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: Build and Push MCP Server Docker Images
4 |
5 | on:
6 | workflow_dispatch:
7 | push:
8 | tags:
9 | - "mcp-server/v*"
10 |
11 | jobs:
12 | build_and_push:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
17 |
18 | - name: Cache Go modules
19 | uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5
20 | with:
21 | path: |
22 | ~/.cache/go-build
23 | ~/go/pkg/mod
24 | key: ${{ runner.os }}-go-mcp-server-${{ hashFiles('mcp-server/go.sum') }}
25 | restore-keys: |
26 | ${{ runner.os }}-go-mcp-server-
27 | ${{ runner.os }}-go-
28 |
29 | - name: Set up QEMU
30 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
31 | with:
32 | platforms: all
33 |
34 | - name: Set up Docker Buildx
35 | id: buildx
36 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
37 | with:
38 | install: true
39 | version: latest
40 |
41 | - name: Login to GHCR
42 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
43 | with:
44 | registry: ghcr.io
45 | username: ${{ github.actor }}
46 | password: ${{ secrets.GITHUB_TOKEN }}
47 |
48 | - name: Extract tag name
49 | id: extract_tag
50 | run: |
51 | TAG="${GITHUB_REF#refs/tags/}"
52 | VERSION="${TAG#mcp-server/}"
53 | echo "TAG_NAME=${VERSION}" >> $GITHUB_OUTPUT
54 |
55 | - name: Build and Push
56 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
57 | with:
58 | context: mcp-server/
59 | file: mcp-server/Dockerfile
60 | platforms: linux/amd64,linux/arm64
61 | push: true
62 | tags: |
63 | ghcr.io/kguardian-dev/kguardian/mcp-server:${{ steps.extract_tag.outputs.TAG_NAME }}
64 | ghcr.io/kguardian-dev/kguardian/mcp-server:latest
65 | cache-from: type=gha
66 | cache-to: type=gha,mode=max
67 |
--------------------------------------------------------------------------------
/docs/essentials/navigation.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Navigation'
3 | description: 'The navigation field in docs.json defines the pages that go in the navigation menu'
4 | icon: 'map'
5 | ---
6 |
7 | The navigation menu is the list of links on every website.
8 |
9 | You will likely update `docs.json` every time you add a new page. Pages do not show up automatically.
10 |
11 | ## Navigation syntax
12 |
13 | Our navigation syntax is recursive which means you can make nested navigation groups. You don't need to include `.mdx` in page names.
14 |
15 |
16 |
17 | ```json Regular Navigation
18 | "navigation": {
19 | "tabs": [
20 | {
21 | "tab": "Docs",
22 | "groups": [
23 | {
24 | "group": "Getting Started",
25 | "pages": ["quickstart"]
26 | }
27 | ]
28 | }
29 | ]
30 | }
31 | ```
32 |
33 | ```json Nested Navigation
34 | "navigation": {
35 | "tabs": [
36 | {
37 | "tab": "Docs",
38 | "groups": [
39 | {
40 | "group": "Getting Started",
41 | "pages": [
42 | "quickstart",
43 | {
44 | "group": "Nested Reference Pages",
45 | "pages": ["nested-reference-page"]
46 | }
47 | ]
48 | }
49 | ]
50 | }
51 | ]
52 | }
53 | ```
54 |
55 |
56 |
57 | ## Folders
58 |
59 | Simply put your MDX files in folders and update the paths in `docs.json`.
60 |
61 | For example, to have a page at `https://yoursite.com/your-folder/your-page` you would make a folder called `your-folder` containing an MDX file called `your-page.mdx`.
62 |
63 |
64 |
65 | You cannot use `api` for the name of a folder unless you nest it inside another folder. Mintlify uses Next.js which reserves the top-level `api` folder for internal server calls. A folder name such as `api-reference` would be accepted.
66 |
67 |
68 |
69 | ```json Navigation With Folder
70 | "navigation": {
71 | "tabs": [
72 | {
73 | "tab": "Docs",
74 | "groups": [
75 | {
76 | "group": "Group Name",
77 | "pages": ["your-folder/your-page"]
78 | }
79 | ]
80 | }
81 | ]
82 | }
83 | ```
84 |
85 | ## Hidden pages
86 |
87 | MDX files not included in `docs.json` will not show up in the sidebar but are accessible through the search bar and by linking directly to them.
88 |
--------------------------------------------------------------------------------
/.github/workflows/llm-bridge-release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: Build and Push LLM Bridge Docker Images
4 |
5 | on:
6 | workflow_dispatch:
7 | push:
8 | tags:
9 | - "llm-bridge/v*"
10 |
11 | jobs:
12 | build_and_push:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
17 |
18 | - name: Cache Node modules
19 | uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5
20 | with:
21 | path: |
22 | ~/.npm
23 | llm-bridge/node_modules
24 | key: ${{ runner.os }}-node-llm-bridge-${{ hashFiles('llm-bridge/package-lock.json') }}
25 | restore-keys: |
26 | ${{ runner.os }}-node-llm-bridge-
27 | ${{ runner.os }}-node-
28 |
29 | - name: Set up QEMU
30 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
31 | with:
32 | platforms: all
33 |
34 | - name: Set up Docker Buildx
35 | id: buildx
36 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
37 | with:
38 | install: true
39 | version: latest
40 |
41 | - name: Login to GHCR
42 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
43 | with:
44 | registry: ghcr.io
45 | username: ${{ github.actor }}
46 | password: ${{ secrets.GITHUB_TOKEN }}
47 |
48 | - name: Extract tag name
49 | id: extract_tag
50 | run: |
51 | TAG="${GITHUB_REF#refs/tags/}"
52 | VERSION="${TAG#llm-bridge/}"
53 | echo "TAG_NAME=${VERSION}" >> $GITHUB_OUTPUT
54 |
55 | - name: Build and Push
56 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
57 | with:
58 | context: llm-bridge/
59 | file: llm-bridge/Dockerfile
60 | platforms: linux/amd64,linux/arm64
61 | push: true
62 | tags: |
63 | ghcr.io/kguardian-dev/kguardian/llm-bridge:${{ steps.extract_tag.outputs.TAG_NAME }}
64 | ghcr.io/kguardian-dev/kguardian/llm-bridge:latest
65 | cache-from: type=gha
66 | cache-to: type=gha,mode=max
67 |
--------------------------------------------------------------------------------
/charts/kguardian/README.md.gotmpl:
--------------------------------------------------------------------------------
1 | # kguardian Helm Chart
2 |
3 | This chart bootstraps the [kguardian]() controlplane onto a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
4 |
5 | {{ template "chart.versionBadge" . }}
6 |
7 | ## Overview
8 |
9 | This Helm chart deploys:
10 |
11 | - A kguardian control plane configured to your specifications
12 | - Additional features and components (optional)
13 |
14 | ## Prerequisites
15 |
16 | - Linux Kernel 6.2+
17 | - Kubernetes 1.19+
18 | - kubectl v1.19+
19 | - Helm 3.0+
20 |
21 | ## Install the Chart
22 |
23 | To install the chart with the release name `kguardian`:
24 |
25 | ### Install from OCI Registry (Recommended)
26 |
27 | ```bash
28 | helm install kguardian oci://ghcr.io/kguardian-dev/charts/kguardian \
29 | --namespace kguardian \
30 | --create-namespace
31 | ```
32 |
33 | You can also specify a version:
34 |
35 | ```bash
36 | helm install kguardian oci://ghcr.io/kguardian-dev/charts/kguardian \
37 | --version 1.1.1 \
38 | --namespace kguardian \
39 | --create-namespace
40 | ```
41 |
42 | **Note:** *If you have the [Pod Securty Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/) enabled for your cluster you will need to add the following annotation to the namespace that the chart is deployed*
43 |
44 | Example:
45 |
46 | ```yaml
47 | apiVersion: v1
48 | kind: Namespace
49 | metadata:
50 | labels:
51 | pod-security.kubernetes.io/enforce: privileged
52 | pod-security.kubernetes.io/warn: privileged
53 | name: kguardian
54 | ```
55 |
56 | ## Directory Structure
57 |
58 | The following shows the directory structure of the Helm chart.
59 |
60 | ```bash
61 | charts/kguardian/
62 | ├── .helmignore # Contains patterns to ignore when packaging Helm charts.
63 | ├── Chart.yaml # Information about your chart
64 | ├── values.yaml # The default values for your templates
65 | ├── charts/ # Charts that this chart depends on
66 | └── templates/ # The template files
67 | └── tests/ # The test files
68 | ```
69 |
70 | ## Configuration
71 |
72 | The following table lists the configurable parameters of the kguardian chart and their default values.
73 |
74 | {{ template "chart.valuesTable" . }}
75 |
76 | ## Uninstalling the Chart
77 |
78 | To uninstall/delete the my-release deployment:
79 |
80 | ```bash
81 | helm uninstall my-release
82 | ```
83 |
--------------------------------------------------------------------------------
/advisor/pkg/network/types_test.go:
--------------------------------------------------------------------------------
1 | package network
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | "github.com/kguardian-dev/kguardian/advisor/pkg/api"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | )
10 |
11 | func TestGetPolicyName(t *testing.T) {
12 | assert.Equal(t, "test-pod-standard-policy", GetPolicyName("test-pod", "standard-policy"))
13 | assert.Equal(t, "another-pod-cilium-policy", GetPolicyName("another-pod", "cilium-policy"))
14 | }
15 |
16 | func TestCreateStandardLabels(t *testing.T) {
17 | expected := map[string]string{
18 | "app.kubernetes.io/name": "my-pod",
19 | "app.kubernetes.io/component": "networkpolicy",
20 | "app.kubernetes.io/part-of": "kguardian",
21 | }
22 | assert.Equal(t, expected, CreateStandardLabels("my-pod", "networkpolicy"))
23 | }
24 |
25 | func TestCreateTypeMeta(t *testing.T) {
26 | expected := metav1.TypeMeta{
27 | Kind: "NetworkPolicy",
28 | APIVersion: "networking.k8s.io/v1",
29 | }
30 | assert.Equal(t, expected, CreateTypeMeta("NetworkPolicy", "networking.k8s.io/v1"))
31 | }
32 |
33 | func TestCreateObjectMeta(t *testing.T) {
34 | labels := map[string]string{"app": "test"}
35 | expected := metav1.ObjectMeta{
36 | Name: "test-name",
37 | Namespace: "test-ns",
38 | Labels: labels,
39 | }
40 | assert.Equal(t, expected, CreateObjectMeta("test-name", "test-ns", labels))
41 | }
42 |
43 | func TestIsIngressTraffic(t *testing.T) {
44 | podDetail := &api.PodDetail{PodIP: "192.168.1.100"}
45 |
46 | ingressTraffic := api.PodTraffic{TrafficType: "INGRESS"}
47 | assert.True(t, IsIngressTraffic(ingressTraffic, podDetail))
48 |
49 | egressTraffic := api.PodTraffic{TrafficType: "EGRESS"}
50 | assert.False(t, IsIngressTraffic(egressTraffic, podDetail))
51 |
52 | otherTraffic := api.PodTraffic{TrafficType: "OTHER"}
53 | assert.False(t, IsIngressTraffic(otherTraffic, podDetail))
54 | }
55 |
56 | func TestIsEgressTraffic(t *testing.T) {
57 | podDetail := &api.PodDetail{PodIP: "192.168.1.100"}
58 |
59 | ingressTraffic := api.PodTraffic{TrafficType: "INGRESS"}
60 | assert.False(t, IsEgressTraffic(ingressTraffic, podDetail))
61 |
62 | egressTraffic := api.PodTraffic{TrafficType: "EGRESS"}
63 | assert.True(t, IsEgressTraffic(egressTraffic, podDetail))
64 |
65 | otherTraffic := api.PodTraffic{TrafficType: "OTHER"}
66 | assert.False(t, IsEgressTraffic(otherTraffic, podDetail))
67 | }
68 |
--------------------------------------------------------------------------------
/mcp-server/tools/syscalls.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
10 | "github.com/modelcontextprotocol/go-sdk/mcp"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | // SyscallsInput defines the input parameters for the syscalls tool
15 | type SyscallsInput struct {
16 | Namespace string `json:"namespace" jsonschema:"The Kubernetes namespace of the pod"`
17 | PodName string `json:"pod_name" jsonschema:"The name of the pod"`
18 | }
19 |
20 | // SyscallsOutput defines the output for the syscalls tool
21 | type SyscallsOutput struct {
22 | Data string `json:"data" jsonschema:"Syscall data in JSON format"`
23 | }
24 |
25 | // SyscallsHandler handles the get_pod_syscalls tool
26 | type SyscallsHandler struct {
27 | client *BrokerClient
28 | }
29 |
30 | // Call implements the tool handler
31 | func (h SyscallsHandler) Call(
32 | ctx context.Context,
33 | req *mcp.CallToolRequest,
34 | input SyscallsInput,
35 | ) (*mcp.CallToolResult, SyscallsOutput, error) {
36 | startTime := time.Now()
37 | logger.Log.WithFields(logrus.Fields{
38 | "namespace": input.Namespace,
39 | "pod_name": input.PodName,
40 | }).Info("Received get_pod_syscalls request")
41 |
42 | // Fetch data from broker
43 | data, err := h.client.GetPodSyscalls(input.Namespace, input.PodName)
44 | if err != nil {
45 | logger.Log.WithFields(logrus.Fields{
46 | "namespace": input.Namespace,
47 | "pod_name": input.PodName,
48 | "error": err.Error(),
49 | "total_duration": time.Since(startTime).String(),
50 | }).Error("Error fetching syscalls")
51 | return nil, SyscallsOutput{}, fmt.Errorf("error fetching syscalls: %w", err)
52 | }
53 |
54 | // Convert to JSON string
55 | jsonData, err := json.MarshalIndent(data, "", " ")
56 | if err != nil {
57 | logger.Log.WithField("error", err.Error()).Error("Error marshaling response")
58 | return nil, SyscallsOutput{}, fmt.Errorf("error marshaling response: %w", err)
59 | }
60 |
61 | logger.Log.WithFields(logrus.Fields{
62 | "namespace": input.Namespace,
63 | "pod_name": input.PodName,
64 | "response_bytes": len(jsonData),
65 | "total_duration": time.Since(startTime).String(),
66 | }).Info("Successfully fetched syscalls")
67 |
68 | return nil, SyscallsOutput{
69 | Data: string(jsonData),
70 | }, nil
71 | }
72 |
--------------------------------------------------------------------------------
/controller/src/bpf/helper.h:
--------------------------------------------------------------------------------
1 | #include "vmlinux.h"
2 | #include
3 | #include
4 | #include
5 | #include
6 |
7 | // Use LRU_HASH for automatic eviction of stale entries
8 | struct
9 | {
10 | __uint(type, BPF_MAP_TYPE_LRU_HASH);
11 | __uint(max_entries, 10240);
12 | __type(key, u64);
13 | __type(value, u32);
14 | } inode_num SEC(".maps");
15 |
16 | struct
17 | {
18 | __uint(type, BPF_MAP_TYPE_LRU_HASH);
19 | __uint(max_entries, 10240);
20 | __type(key, u32);
21 | __type(value, u32);
22 | } ignore_ips SEC(".maps");
23 |
24 | struct
25 | {
26 | __uint(type, BPF_MAP_TYPE_HASH);
27 | __uint(max_entries, 512);
28 | __type(key, u32);
29 | __type(value, u32);
30 | } allowed_syscalls SEC(".maps");
31 |
32 | // Common filtering helper to avoid code duplication
33 | // Optimized to check cheap conditions first before map lookups
34 | static __always_inline bool should_filter_traffic(__u32 saddr, __u32 daddr)
35 | {
36 | // Fast path: check cheap conditions first (no map lookups)
37 |
38 | // Filter same source and destination
39 | if (saddr == daddr)
40 | return true;
41 |
42 | // Filter localhost (127.0.0.1) - 0x7F000001 in network byte order is 0x0100007F
43 | __u32 localhost = 0x0100007F;
44 | if (saddr == localhost || daddr == localhost)
45 | return true;
46 |
47 | // Filter zero addresses
48 | if (saddr == 0 || daddr == 0)
49 | return true;
50 |
51 | // Slow path: map lookups only if cheap checks passed
52 | // Check ignore list (typically empty or small, so lookups are rare)
53 | if (bpf_map_lookup_elem(&ignore_ips, &saddr))
54 | return true;
55 |
56 | if (bpf_map_lookup_elem(&ignore_ips, &daddr))
57 | return true;
58 |
59 | return false;
60 | }
61 |
62 | // Helper to get user space inode and validate it exists
63 | static __always_inline bool get_and_validate_inum(struct sock *sk, __u64 *inum_out)
64 | {
65 | if (!sk)
66 | return false;
67 |
68 | __u32 net_ns_inum = 0;
69 | BPF_CORE_READ_INTO(&net_ns_inum, sk, __sk_common.skc_net.net, ns.inum);
70 |
71 | __u64 key = (__u64)net_ns_inum;
72 | __u32 *user_space_inum_ptr = bpf_map_lookup_elem(&inode_num, &key);
73 |
74 | if (!user_space_inum_ptr)
75 | return false;
76 |
77 | *inum_out = key;
78 | return true;
79 | }
--------------------------------------------------------------------------------
/advisor/pkg/k8s/generic.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "context"
5 |
6 | log "github.com/rs/zerolog/log"
7 | corev1 "k8s.io/api/core/v1"
8 | )
9 |
10 | // Version is set at build time using -ldflags
11 | var Version = "development" // default value
12 |
13 | // ModeType defines the mode of operation for generating network policies
14 | type ModeType int
15 |
16 | const (
17 | SinglePod ModeType = iota
18 | AllPodsInNamespace
19 | AllPodsInAllNamespaces
20 | )
21 |
22 | // GenerateOptions holds options for the GenerateNetworkPolicy function
23 | type GenerateOptions struct {
24 | Mode ModeType
25 | PodName string // Used if Mode is SinglePod
26 | Namespace string // Used if Mode is AllPodsInNamespace or SinglePod
27 | }
28 |
29 | // Exportable function variables for testing - REMOVED
30 |
31 | func GetResource(options GenerateOptions, config *Config) []corev1.Pod {
32 | var pods []corev1.Pod
33 | ctx := context.TODO() // Or pass a context if available
34 |
35 | switch options.Mode {
36 | case SinglePod:
37 | // Fetch the specified pod
38 | fetchedPod, err := GetPod(ctx, config, options.Namespace, options.PodName)
39 | if err != nil {
40 | // Log the error and return an empty slice instead of fatally exiting.
41 | log.Error().Err(err).Msgf("failed to get running pod %s in namespace %s", options.PodName, options.Namespace)
42 | return []corev1.Pod{}
43 | }
44 | pods = append(pods, *fetchedPod)
45 |
46 | case AllPodsInNamespace:
47 | // Fetch all running pods in the given namespace
48 | fetchedPods, err := GetPodsInNamespace(ctx, config, options.Namespace)
49 | if err != nil {
50 | log.Error().Err(err).Msgf("failed to fetch running pods in namespace %s", options.Namespace)
51 | // Return empty list on error, or handle differently as needed
52 | return []corev1.Pod{}
53 | }
54 | pods = append(pods, fetchedPods...)
55 |
56 | case AllPodsInAllNamespaces:
57 | // Fetch all running pods in all namespaces
58 | fetchedPods, err := GetAllPodsInAllNamespaces(ctx, config)
59 | if err != nil {
60 | log.Error().Err(err).Msgf("failed to fetch all running pods in all namespaces")
61 | // Return empty list on error, or handle differently as needed
62 | return []corev1.Pod{}
63 | }
64 | pods = append(pods, fetchedPods...)
65 | default:
66 | log.Error().Msgf("Unknown mode type: %v", options.Mode)
67 | return []corev1.Pod{}
68 | }
69 | return pods
70 | }
71 |
--------------------------------------------------------------------------------
/frontend/src/index.css:
--------------------------------------------------------------------------------
1 | @import "tailwindcss";
2 |
3 | /* Internal theme variables */
4 | :root,
5 | :root.dark {
6 | --theme-bg-dark: #0E1726;
7 | --theme-bg-darker: #0A0F1C;
8 | --theme-bg-card: #1A2332;
9 | --theme-border: #2A3647;
10 | --theme-text-primary: #F3F4F6;
11 | --theme-text-secondary: #9CA3AF;
12 | --theme-text-tertiary: #6B7280;
13 | }
14 |
15 | /* Light theme override */
16 | :root.light {
17 | --theme-bg-dark: #F3F4F6;
18 | --theme-bg-darker: #FFFFFF;
19 | --theme-bg-card: #F9FAFB;
20 | --theme-border: #E5E7EB;
21 | --theme-text-primary: #111827;
22 | --theme-text-secondary: #4B5563;
23 | --theme-text-tertiary: #6B7280;
24 | }
25 |
26 | @theme {
27 | /* Hubble colors using theme-aware CSS variables */
28 | --color-hubble-dark: var(--theme-bg-dark);
29 | --color-hubble-darker: var(--theme-bg-darker);
30 | --color-hubble-card: var(--theme-bg-card);
31 | --color-hubble-border: var(--theme-border);
32 |
33 | /* Theme-aware text colors (generates text-primary, text-secondary, text-tertiary) */
34 | --color-primary: var(--theme-text-primary);
35 | --color-secondary: var(--theme-text-secondary);
36 | --color-tertiary: var(--theme-text-tertiary);
37 |
38 | /* Accent colors (same for both themes) */
39 | --color-hubble-accent: #3B82F6;
40 | --color-hubble-success: #10B981;
41 | --color-hubble-warning: #F59E0B;
42 | --color-hubble-error: #EF4444;
43 | }
44 |
45 | @layer base {
46 | body {
47 | background-color: var(--theme-bg-darker);
48 | color: var(--theme-text-primary);
49 | margin: 0;
50 | font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
51 | transition: background-color 0.3s ease, color 0.3s ease;
52 | }
53 |
54 | #root {
55 | width: 100%;
56 | height: 100vh;
57 | }
58 | }
59 |
60 | /* React Flow custom styles */
61 | .react-flow {
62 | @apply bg-hubble-dark;
63 | }
64 |
65 | .react-flow__node {
66 | @apply rounded-lg shadow-lg;
67 | }
68 |
69 | .react-flow__edge-path {
70 | stroke-width: 2;
71 | }
72 |
73 | .react-flow__handle {
74 | @apply opacity-0;
75 | }
76 |
77 | .react-flow__controls {
78 | @apply bg-hubble-card border-hubble-border;
79 | }
80 |
81 | .react-flow__controls-button {
82 | @apply bg-hubble-card border-hubble-border text-secondary;
83 | }
84 |
85 | .react-flow__controls-button:hover {
86 | @apply bg-hubble-dark;
87 | }
88 |
--------------------------------------------------------------------------------
/.github/workflows/broker-release.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: Build and Push Broker Docker Images
4 |
5 | on:
6 | workflow_dispatch:
7 | push:
8 | tags:
9 | - "broker/v*"
10 |
11 | jobs:
12 | build_and_push:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
17 |
18 | - name: Cache Rust dependencies
19 | uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5
20 | with:
21 | path: |
22 | ~/.cargo/bin/
23 | ~/.cargo/registry/index/
24 | ~/.cargo/registry/cache/
25 | ~/.cargo/git/db/
26 | broker/target/
27 | key: ${{ runner.os }}-cargo-broker-${{ hashFiles('broker/Cargo.lock') }}
28 | restore-keys: |
29 | ${{ runner.os }}-cargo-broker-
30 | ${{ runner.os }}-cargo-
31 |
32 | - name: Set up QEMU
33 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
34 | with:
35 | platforms: all
36 |
37 | - name: Set up Docker Buildx
38 | id: buildx
39 | uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
40 | with:
41 | install: true
42 | version: latest
43 |
44 | - name: Login to GHCR
45 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
46 | with:
47 | registry: ghcr.io
48 | username: ${{ github.actor }}
49 | password: ${{ secrets.GITHUB_TOKEN }}
50 |
51 | - name: Extract tag name
52 | id: extract_tag
53 | run: |
54 | TAG="${GITHUB_REF#refs/tags/}"
55 | VERSION="${TAG#broker/}"
56 | echo "TAG_NAME=${VERSION}" >> $GITHUB_OUTPUT
57 |
58 | - name: Build and Push
59 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
60 | with:
61 | context: broker/
62 | file: broker/Dockerfile
63 | platforms: linux/amd64 #,linux/arm64
64 | push: true
65 | tags: |
66 | ghcr.io/kguardian-dev/kguardian/broker:${{ steps.extract_tag.outputs.TAG_NAME }}
67 | ghcr.io/kguardian-dev/kguardian/broker:latest
68 | cache-from: type=gha
69 | cache-to: type=gha,mode=max
70 |
--------------------------------------------------------------------------------
/.github/workflows/renovate.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json
3 | name: "Renovate"
4 |
5 | on:
6 | merge_group:
7 | workflow_dispatch:
8 | inputs:
9 | # https://docs.renovatebot.com/self-hosted-configuration/#dryrun
10 | dryRun:
11 | description: "Dry Run"
12 | default: "false"
13 | required: false
14 | # https://docs.renovatebot.com/examples/self-hosting/#about-the-log-level-numbers
15 | logLevel:
16 | description: "Log Level"
17 | default: "debug"
18 | required: false
19 | version:
20 | description: Renovate version
21 | default: latest
22 | required: false
23 | schedule:
24 | - cron: "0 * * * *"
25 | push:
26 | branches:
27 | - main
28 | paths:
29 | - .github/workflows/renovate.yaml
30 | - .github/renovate.json5
31 | - .github/renovate/**.json5
32 |
33 | concurrency:
34 | group: ${{ github.workflow }}-${{ github.event.number || github.ref }}
35 | cancel-in-progress: true
36 |
37 | env:
38 | LOG_LEVEL: "${{ inputs.logLevel || 'debug' }}"
39 | RENOVATE_AUTODISCOVER: true
40 | RENOVATE_AUTODISCOVER_FILTER: "${{ github.repository }}"
41 | RENOVATE_DRY_RUN: "${{ inputs.dryRun == true }}"
42 | RENOVATE_PLATFORM: github
43 | RENOVATE_PLATFORM_COMMIT: true
44 | WORKFLOW_RENOVATE_VERSION: "${{ inputs.version || 'latest' }}"
45 |
46 | jobs:
47 | renovate:
48 | name: Renovate
49 | runs-on: ubuntu-latest
50 | steps:
51 | - name: Checkout
52 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
53 |
54 | - name: Validate Renovate Configuration
55 | uses: suzuki-shunsuke/github-action-renovate-config-validator@c22827f47f4f4a5364bdba19e1fe36907ef1318e # v1.1.1
56 |
57 | - name: Generate Token
58 | uses: actions/create-github-app-token@29824e69f54612133e76f7eaac726eef6c875baf # v2
59 | id: app-token
60 | with:
61 | app-id: "${{ secrets.ARX_GITHUB_APP_ID }}"
62 | private-key: "${{ secrets.ARX_GITHUB_APP_PRIVATE_KEY }}"
63 |
64 | - name: Renovate
65 | uses: renovatebot/github-action@502904f1cefdd70cba026cb1cbd8c53a1443e91b # v44.1.0
66 | with:
67 | configurationFile: .github/renovate.json5
68 | token: "x-access-token:${{ steps.app-token.outputs.token }}"
69 | renovate-version: "${{ env.WORKFLOW_RENOVATE_VERSION }}"
70 |
--------------------------------------------------------------------------------
/charts/kguardian/templates/database/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ .Values.database.name }}
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app.kubernetes.io/name: {{ .Values.database.service.name }}
10 | template:
11 | metadata:
12 | {{- with .Values.database.podAnnotations }}
13 | annotations:
14 | {{- toYaml . | nindent 8 }}
15 | {{- end }}
16 | labels:
17 | {{- include "kguardian.labels" . | nindent 8 }}
18 | app.kubernetes.io/name: {{ .Values.database.service.name }}
19 | spec:
20 | serviceAccountName: {{ default "database" .Values.database.serviceAccount.name }}
21 | automountServiceAccountToken: true
22 | securityContext:
23 | {{- toYaml .Values.database.podSecurityContext | nindent 8 }}
24 | containers:
25 | - name: postgresdb
26 | {{- if .Values.database.image.sha }}
27 | image: "{{ .Values.database.image.repository }}@{{ .Values.database.image.sha }}"
28 | {{- else }}
29 | image: "{{ .Values.database.image.repository }}:{{ .Values.database.image.tag }}"
30 | {{- end }}
31 | imagePullPolicy: {{ .Values.database.image.pullPolicy }}
32 | ports:
33 | - containerPort: 5432
34 | securityContext:
35 | {{- toYaml .Values.database.securityContext | nindent 12 }}
36 | env:
37 | - name: POSTGRES_USER
38 | value: rust
39 | - name: POSTGRES_HOST_AUTH_METHOD
40 | value: trust
41 | - name: POSTGRES_DB
42 | value: kube
43 | volumeMounts:
44 | - mountPath: /var/lib/postgres/data
45 | name: db-data
46 | volumes:
47 | - name: db-data
48 | {{- if .Values.database.persistence.enabled }}
49 | persistentVolumeClaim:
50 | claimName: {{ .Values.database.persistence.existingClaim }}
51 | {{- else }}
52 | emptyDir: {}
53 | {{- end -}}
54 | {{- with .Values.database.nodeSelector }}
55 | nodeSelector:
56 | {{- toYaml . | nindent 8 }}
57 | {{- end }}
58 | {{- with .Values.database.affinity }}
59 | affinity:
60 | {{- toYaml . | nindent 8 }}
61 | {{- end }}
62 | {{- with .Values.database.tolerations }}
63 | tolerations:
64 | {{- toYaml . | nindent 8 }}
65 | {{- end }}
66 |
--------------------------------------------------------------------------------
/mcp-server/go.sum:
--------------------------------------------------------------------------------
1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
4 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
5 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
6 | github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q=
7 | github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
8 | github.com/modelcontextprotocol/go-sdk v1.1.0 h1:Qjayg53dnKC4UZ+792W21e4BpwEZBzwgRW6LrjLWSwA=
9 | github.com/modelcontextprotocol/go-sdk v1.1.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10=
10 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
11 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
12 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
13 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
14 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
15 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
16 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
17 | github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
18 | github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
19 | golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
20 | golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
21 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
22 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
23 | golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
24 | golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
25 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
26 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
27 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
28 |
--------------------------------------------------------------------------------
/mcp-server/tools/cluster_traffic.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
10 | "github.com/modelcontextprotocol/go-sdk/mcp"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | // ClusterTrafficInput defines the input parameters (no params needed)
15 | type ClusterTrafficInput struct{}
16 |
17 | // ClusterTrafficOutput defines the output structure
18 | type ClusterTrafficOutput struct {
19 | Data string `json:"data" jsonschema:"All pod traffic data in the cluster in JSON format"`
20 | }
21 |
22 | // ClusterTrafficHandler handles the get_cluster_traffic tool
23 | type ClusterTrafficHandler struct {
24 | client *BrokerClient
25 | }
26 |
27 | // Call implements the tool handler
28 | func (h ClusterTrafficHandler) Call(
29 | ctx context.Context,
30 | req *mcp.CallToolRequest,
31 | input ClusterTrafficInput,
32 | ) (*mcp.CallToolResult, ClusterTrafficOutput, error) {
33 | startTime := time.Now()
34 | logger.Log.Info("Received get_cluster_traffic request")
35 |
36 | fetchStart := time.Now()
37 | data, err := h.client.GetAllPodTraffic()
38 | fetchDuration := time.Since(fetchStart)
39 |
40 | if err != nil {
41 | logger.Log.WithFields(logrus.Fields{
42 | "error": err.Error(),
43 | "fetch_duration": fetchDuration.String(),
44 | "total_duration": time.Since(startTime).String(),
45 | }).Error("Error fetching cluster traffic")
46 | return nil, ClusterTrafficOutput{}, fmt.Errorf("error fetching cluster traffic: %w", err)
47 | }
48 |
49 | marshalStart := time.Now()
50 | jsonData, err := json.MarshalIndent(data, "", " ")
51 | marshalDuration := time.Since(marshalStart)
52 |
53 | if err != nil {
54 | logger.Log.WithFields(logrus.Fields{
55 | "error": err.Error(),
56 | "fetch_duration": fetchDuration.String(),
57 | "marshal_duration": marshalDuration.String(),
58 | "total_duration": time.Since(startTime).String(),
59 | }).Error("Error marshaling response")
60 | return nil, ClusterTrafficOutput{}, fmt.Errorf("error marshaling response: %w", err)
61 | }
62 |
63 | totalDuration := time.Since(startTime)
64 | logger.Log.WithFields(logrus.Fields{
65 | "response_bytes": len(jsonData),
66 | "fetch_duration": fetchDuration.String(),
67 | "marshal_duration": marshalDuration.String(),
68 | "total_duration": totalDuration.String(),
69 | }).Info("Successfully fetched cluster traffic")
70 |
71 | return nil, ClusterTrafficOutput{Data: string(jsonData)}, nil
72 | }
73 |
--------------------------------------------------------------------------------
/mcp-server/tools/network_traffic.go:
--------------------------------------------------------------------------------
1 | package tools
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
10 | "github.com/modelcontextprotocol/go-sdk/mcp"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | // NetworkTrafficInput defines the input parameters for the network traffic tool
15 | type NetworkTrafficInput struct {
16 | Namespace string `json:"namespace" jsonschema:"The Kubernetes namespace of the pod"`
17 | PodName string `json:"pod_name" jsonschema:"The name of the pod"`
18 | }
19 |
20 | // NetworkTrafficOutput defines the output for the network traffic tool
21 | type NetworkTrafficOutput struct {
22 | Data string `json:"data" jsonschema:"Network traffic data in JSON format"`
23 | }
24 |
25 | // NetworkTrafficHandler handles the get_pod_network_traffic tool
26 | type NetworkTrafficHandler struct {
27 | client *BrokerClient
28 | }
29 |
30 | // Call implements the tool handler
31 | func (h NetworkTrafficHandler) Call(
32 | ctx context.Context,
33 | req *mcp.CallToolRequest,
34 | input NetworkTrafficInput,
35 | ) (*mcp.CallToolResult, NetworkTrafficOutput, error) {
36 | startTime := time.Now()
37 | logger.Log.WithFields(logrus.Fields{
38 | "namespace": input.Namespace,
39 | "pod_name": input.PodName,
40 | }).Info("Received get_pod_network_traffic request")
41 |
42 | // Fetch data from broker
43 | data, err := h.client.GetPodNetworkTraffic(input.Namespace, input.PodName)
44 | if err != nil {
45 | logger.Log.WithFields(logrus.Fields{
46 | "namespace": input.Namespace,
47 | "pod_name": input.PodName,
48 | "error": err.Error(),
49 | "total_duration": time.Since(startTime).String(),
50 | }).Error("Error fetching network traffic")
51 | return nil, NetworkTrafficOutput{}, fmt.Errorf("error fetching network traffic: %w", err)
52 | }
53 |
54 | // Convert to JSON string
55 | jsonData, err := json.MarshalIndent(data, "", " ")
56 | if err != nil {
57 | logger.Log.WithField("error", err.Error()).Error("Error marshaling response")
58 | return nil, NetworkTrafficOutput{}, fmt.Errorf("error marshaling response: %w", err)
59 | }
60 |
61 | logger.Log.WithFields(logrus.Fields{
62 | "namespace": input.Namespace,
63 | "pod_name": input.PodName,
64 | "response_bytes": len(jsonData),
65 | "total_duration": time.Since(startTime).String(),
66 | }).Info("Successfully fetched network traffic")
67 |
68 | return nil, NetworkTrafficOutput{
69 | Data: string(jsonData),
70 | }, nil
71 | }
72 |
--------------------------------------------------------------------------------
/charts/kguardian/templates/mcp-server/mcpserver-crd.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.mcpServer.enabled .Values.mcpServer.useKmcp }}
2 | ---
3 | # MCPServer Custom Resource (managed by kmcp controller)
4 | # This replaces the standard Deployment when kmcp is enabled
5 | apiVersion: kagent.dev/v1alpha1
6 | kind: MCPServer
7 | metadata:
8 | name: {{ .Values.mcpServer.service.name }}
9 | namespace: {{ include "kguardian.namespace" . }}
10 | labels:
11 | {{- include "kguardian.labels" . | nindent 4 }}
12 | spec:
13 | # Deployment configuration - defines the container-based MCP server
14 | deployment:
15 | # Container image for the MCP server
16 | {{- if .Values.mcpServer.image.sha }}
17 | image: {{ .Values.mcpServer.image.repository }}@{{ .Values.mcpServer.image.sha }}
18 | {{- else }}
19 | image: {{ .Values.mcpServer.image.repository }}:{{ .Values.mcpServer.image.tag }}
20 | {{- end }}
21 |
22 | # Port the MCP server listens on
23 | port: {{ .Values.mcpServer.container.port }}
24 |
25 | # Environment variables (as key-value map)
26 | env:
27 | BROKER_URL: "http://{{ .Values.broker.service.name }}.{{ include "kguardian.namespace" . }}.svc.cluster.local:{{ .Values.broker.container.port }}"
28 | PORT: "{{ .Values.mcpServer.container.port }}"
29 | {{- range .Values.mcpServer.env }}
30 | {{ .name }}: {{ .value | quote }}
31 | {{- end }}
32 |
33 | # Optional: Secret references for sensitive data
34 | {{- with .Values.mcpServer.kmcp.secretRefs }}
35 | secretRefs:
36 | {{- toYaml . | nindent 6 }}
37 | {{- end }}
38 |
39 | # Transport type - how clients communicate with the MCP server
40 | # Valid values: stdio, http
41 | transportType: {{ .Values.mcpServer.kmcp.transport.type | default "http" }}
42 |
43 | # HTTP transport configuration
44 | {{- if or (eq (.Values.mcpServer.kmcp.transport.type | default "http") "http") (not .Values.mcpServer.kmcp.transport.type) }}
45 | httpTransport:
46 | # Target port for HTTP service
47 | targetPort: {{ .Values.mcpServer.container.port }}
48 | # Path where MCP server is accessible
49 | path: {{ .Values.mcpServer.kmcp.transport.path | default "/" }}
50 | {{- end }}
51 |
52 | # Optional: Authentication configuration
53 | {{- with .Values.mcpServer.kmcp.authn }}
54 | authn:
55 | {{- toYaml . | nindent 4 }}
56 | {{- end }}
57 |
58 | # Optional: Authorization rules (CEL-based)
59 | {{- with .Values.mcpServer.kmcp.authz }}
60 | authz:
61 | {{- toYaml . | nindent 4 }}
62 | {{- end }}
63 | {{- end }}
64 |
--------------------------------------------------------------------------------
/charts/kguardian/templates/broker/deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: {{ include "kguardian.name" . }}-broker
6 | labels:
7 | {{- include "kguardian.labels" . | nindent 4 }}
8 | spec:
9 | {{- if not .Values.broker.autoscaling.enabled }}
10 | replicas: {{ .Values.broker.replicaCount }}
11 | {{- end }}
12 | selector:
13 | matchLabels:
14 | app.kubernetes.io/name: {{ .Values.broker.service.name }}
15 | template:
16 | metadata:
17 | {{- with .Values.broker.podAnnotations }}
18 | annotations:
19 | {{- toYaml . | nindent 8 }}
20 | {{- end }}
21 | labels:
22 | {{- include "kguardian.labels" . | nindent 8 }}
23 | app.kubernetes.io/name: {{ .Values.broker.service.name }}
24 | spec:
25 | automountServiceAccountToken: true
26 | {{- with .Values.broker.imagePullSecrets }}
27 | imagePullSecrets:
28 | {{- toYaml . | nindent 8 }}
29 | {{- end }}
30 | serviceAccountName: {{ default "broker" .Values.broker.serviceAccount.name }}
31 | securityContext:
32 | {{- toYaml .Values.broker.podSecurityContext | nindent 8 }}
33 | containers:
34 | - name: broker
35 | securityContext:
36 | {{- toYaml .Values.broker.securityContext | nindent 12 }}
37 | {{- if .Values.broker.image.sha }}
38 | image: "{{ .Values.broker.image.repository }}@{{ .Values.broker.image.sha }}"
39 | {{- else }}
40 | image: "{{ .Values.broker.image.repository }}:{{ .Values.broker.image.tag }}"
41 | {{- end }}
42 | imagePullPolicy: {{ .Values.broker.image.pullPolicy }}
43 | ports:
44 | - name: http
45 | containerPort: {{ .Values.broker.container.port }}
46 | protocol: TCP
47 | env:
48 | - name: DATABASE_URL
49 | value: "postgres://rust@{{ .Values.database.service.name }}.{{ include "kguardian.namespace" . }}.svc.cluster.local:{{ .Values.database.container.port }}/kube"
50 | {{- with .Values.broker.resources }}
51 | resources:
52 | {{- toYaml . | nindent 12 }}
53 | {{- end }}
54 | {{- with .Values.broker.nodeSelector }}
55 | nodeSelector:
56 | {{- toYaml . | nindent 8 }}
57 | {{- end }}
58 | {{- with .Values.broker.affinity }}
59 | affinity:
60 | {{- toYaml . | nindent 8 }}
61 | {{- end }}
62 | {{- with .Values.broker.tolerations }}
63 | tolerations:
64 | {{- toYaml . | nindent 8 }}
65 | {{- end }}
66 |
--------------------------------------------------------------------------------
/scripts/quick-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Define the GitHub owner and repository
4 | GITHUB_OWNER="kguardian-dev"
5 | GITHUB_REPO="kguardian"
6 | RELEASE_BINARY_NAME="advisor"
7 | BINARY_NAME="kguardian"
8 | INSTALL_DIR="/usr/local/bin"
9 | TMP_DIR=$(mktemp -d)
10 | BINARY_PATH="$TMP_DIR/$BINARY_NAME"
11 |
12 | echo "Starting the installation of kubectl-$BINARY_NAME..."
13 |
14 |
15 |
16 | # Trap to ensure that the temporary directory gets cleaned up
17 | cleanup() {
18 | echo "Cleaning up temporary files..."
19 | rm -rf "$TMP_DIR"
20 | }
21 | trap cleanup EXIT
22 |
23 | # Detect OS and Arch
24 | echo "Detecting OS and architecture..."
25 | OS=$(uname -s | tr '[:upper:]' '[:lower:]')
26 | ARCH=$(uname -m)
27 | if [ "$ARCH" = "x86_64" ]; then
28 | ARCH="amd64"
29 | elif [ "$ARCH" = "aarch64" ]; then
30 | ARCH="arm64"
31 | fi
32 |
33 | echo "Detected OS: $OS, Arch: $ARCH"
34 |
35 | # Get the latest advisor release tag
36 | echo "Fetching the latest advisor release tag..."
37 | LATEST_RELEASE_TAG=$(curl -s "https://api.github.com/repos/$GITHUB_OWNER/$GITHUB_REPO/releases?per_page=100" | \
38 | grep '"tag_name"' | \
39 | grep 'advisor/' | \
40 | cut -d '"' -f 4 | \
41 | sed 's/advisor\///' | \
42 | sort -V -r | \
43 | head -n 1 | \
44 | sed 's/^/advisor\//')
45 |
46 | # Check if the latest release was found
47 | if [ -z "$LATEST_RELEASE_TAG" ]; then
48 | echo "Error: Failed to fetch the latest advisor release."
49 | exit 1
50 | fi
51 |
52 | echo "Latest advisor release tag: $LATEST_RELEASE_TAG"
53 |
54 | # Construct the download URL
55 | BINARY_URL="https://github.com/$GITHUB_OWNER/$GITHUB_REPO/releases/download/$LATEST_RELEASE_TAG/$RELEASE_BINARY_NAME-$OS-$ARCH"
56 | echo "Download URL: $BINARY_URL"
57 |
58 | # Download the release and set it as executable
59 | echo "Downloading the kubectl-$BINARY_NAME binary..."
60 | curl -sL "$BINARY_URL" -o "$BINARY_PATH"
61 | if [ $? -ne 0 ]; then
62 | echo "Error: Failed to download the binary."
63 | exit 1
64 | fi
65 |
66 | chmod +x "$BINARY_PATH"
67 |
68 | # Notify user about the need for elevated permissions
69 | echo "The kubectl-$BINARY_NAME binary needs to be moved to $INSTALL_DIR, which requires elevated permissions."
70 | echo "You may need to provide your password for sudo access."
71 |
72 | # Move the binary to /usr/local/bin and rename it
73 | sudo mv "$BINARY_PATH" "$INSTALL_DIR/kubectl-$BINARY_NAME"
74 |
75 | echo "Installation successful! 'kubectl-$BINARY_NAME' is now available in your PATH."
76 | echo "You can start using it with 'kubectl $BINARY_NAME'."
77 |
78 | # Cleanup is handled by the trap, but you can call it explicitly if desired
79 | cleanup
80 |
--------------------------------------------------------------------------------
/broker/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::error::Error;
2 |
3 | use actix_cors::Cors;
4 | use actix_web::{get, web, App, HttpResponse, HttpServer};
5 | use api::{
6 | add_pod_details, add_pods, add_pods_batch, add_pods_syscalls, add_svc_details, mark_pod_dead,
7 | establish_connection, get_pod_by_ip, get_pod_by_name, get_pod_details, get_pod_syscall_name, get_pod_traffic,
8 | get_pod_traffic_name, get_pods_by_node, get_svc_by_ip,
9 | };
10 |
11 | use diesel::r2d2;
12 | use telemetry::init_logging;
13 | mod telemetry;
14 |
15 | use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
16 | use tracing::info;
17 | pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./db/migrations");
18 |
19 | type DB = diesel::pg::Pg;
20 |
21 | fn run_migrations(
22 | connection: &mut impl MigrationHarness,
23 | ) -> Result<(), Box> {
24 | connection.run_pending_migrations(MIGRATIONS)?;
25 | Ok(())
26 | }
27 |
28 | #[actix_web::main]
29 | async fn main() -> Result<(), std::io::Error> {
30 | init_logging();
31 | let manager = establish_connection();
32 | let pool = r2d2::Pool::builder()
33 | .build(manager)
34 | .expect("Failed to create pool.");
35 | // RUN the migration schema
36 | let mut x = pool.get().unwrap();
37 | let r = run_migrations(&mut x);
38 | if let Err(e) = r {
39 | panic!("DB Set up failed {}", e);
40 | } else {
41 | info!("DB setup success");
42 | }
43 | HttpServer::new(move || {
44 | let cors = Cors::default()
45 | .allow_any_origin()
46 | .allow_any_method()
47 | .allow_any_header()
48 | .max_age(3600);
49 |
50 | App::new()
51 | .wrap(cors)
52 | .app_data(web::Data::new(pool.clone()))
53 | .service(add_pods)
54 | .service(add_pods_batch)
55 | .service(add_pod_details)
56 | .service(add_pods_syscalls)
57 | .service(get_pod_traffic)
58 | .service(get_pod_details)
59 | .service(add_svc_details)
60 | .service(get_pod_by_ip)
61 | .service(get_pod_by_name)
62 | .service(get_svc_by_ip)
63 | .service(get_pod_traffic_name)
64 | .service(get_pod_syscall_name)
65 | .service(get_pods_by_node)
66 | .service(mark_pod_dead)
67 | .service(health_check)
68 | })
69 | .bind(("0.0.0.0", 9090))?
70 | .run()
71 | .await
72 | }
73 |
74 | #[get("/health")]
75 | pub async fn health_check() -> HttpResponse {
76 | HttpResponse::Ok()
77 | .content_type("application/json")
78 | .body("Healthy!")
79 | }
80 |
--------------------------------------------------------------------------------
/frontend/src/utils/seccompProfileGenerator.ts:
--------------------------------------------------------------------------------
1 | import type { PodNodeData } from '../types';
2 | import type { SeccompProfile, SeccompSyscall } from '../types/seccompProfile';
3 | import { parseSyscallString } from './syscalls';
4 |
5 | export function generateSeccompProfile(pod: PodNodeData): SeccompProfile {
6 | // Collect all unique syscalls from the pod's observed behavior
7 | const uniqueSyscalls = new Set();
8 |
9 | pod.syscalls?.forEach((syscallRecord) => {
10 | if (syscallRecord.syscalls) {
11 | // Split comma-separated syscalls and validate
12 | const { valid } = parseSyscallString(syscallRecord.syscalls);
13 |
14 | // Add valid syscalls to set
15 | valid.forEach(syscall => uniqueSyscalls.add(syscall));
16 | }
17 | });
18 |
19 | // Create syscall rules - group all observed syscalls into one allow rule
20 | const syscallRules: SeccompSyscall[] = [];
21 |
22 | if (uniqueSyscalls.size > 0) {
23 | syscallRules.push({
24 | names: Array.from(uniqueSyscalls).sort(),
25 | action: 'SCMP_ACT_ALLOW',
26 | });
27 | }
28 |
29 | // Create the seccomp profile
30 | const profile: SeccompProfile = {
31 | defaultAction: 'SCMP_ACT_ERRNO', // Default to deny all syscalls not explicitly allowed
32 | architectures: [
33 | 'SCMP_ARCH_X86_64',
34 | 'SCMP_ARCH_X86',
35 | 'SCMP_ARCH_X32',
36 | ],
37 | syscalls: syscallRules,
38 | };
39 |
40 | return profile;
41 | }
42 |
43 | export function profileToJSON(profile: SeccompProfile): string {
44 | return JSON.stringify(profile, null, 2);
45 | }
46 |
47 | export function profileToYAML(profile: SeccompProfile, resourceName: string, namespace: string): string {
48 | const yaml: string[] = [];
49 |
50 | // Create a Kubernetes SeccompProfile CRD format
51 | yaml.push('apiVersion: security.kubernetes.io/v1alpha1');
52 | yaml.push('kind: SeccompProfile');
53 | yaml.push('metadata:');
54 | yaml.push(` name: ${resourceName}-seccomp`);
55 | yaml.push(` namespace: ${namespace}`);
56 | yaml.push('spec:');
57 | yaml.push(` defaultAction: ${profile.defaultAction}`);
58 |
59 | if (profile.architectures && profile.architectures.length > 0) {
60 | yaml.push(' architectures:');
61 | profile.architectures.forEach(arch => {
62 | yaml.push(` - ${arch}`);
63 | });
64 | }
65 |
66 | if (profile.syscalls && profile.syscalls.length > 0) {
67 | yaml.push(' syscalls:');
68 | profile.syscalls.forEach((syscall) => {
69 | yaml.push(' - names:');
70 | syscall.names.forEach(name => {
71 | yaml.push(` - ${name}`);
72 | });
73 | yaml.push(` action: ${syscall.action}`);
74 | });
75 | }
76 |
77 | return yaml.join('\n');
78 | }
79 |
--------------------------------------------------------------------------------
/mcp-server/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "net/http"
6 | "os"
7 | "os/signal"
8 | "syscall"
9 | "time"
10 |
11 | "github.com/kguardian-dev/kguardian/mcp-server/logger"
12 | "github.com/kguardian-dev/kguardian/mcp-server/tools"
13 | "github.com/modelcontextprotocol/go-sdk/mcp"
14 | "github.com/sirupsen/logrus"
15 | )
16 |
17 | func main() {
18 | // Initialize logger
19 | logLevel := os.Getenv("LOG_LEVEL")
20 | if logLevel == "" {
21 | logLevel = "info"
22 | }
23 | logger.Init(logLevel)
24 |
25 | // Get configuration from environment
26 | brokerURL := os.Getenv("BROKER_URL")
27 | if brokerURL == "" {
28 | brokerURL = "http://broker.kguardian.svc.cluster.local:9090"
29 | }
30 |
31 | port := os.Getenv("PORT")
32 | if port == "" {
33 | port = "8081"
34 | }
35 |
36 | logger.Log.WithFields(logrus.Fields{
37 | "port": port,
38 | "broker_url": brokerURL,
39 | "log_level": logLevel,
40 | }).Info("Initializing kguardian MCP server")
41 |
42 | // Create MCP server
43 | server := mcp.NewServer(
44 | &mcp.Implementation{
45 | Name: "kguardian-mcp",
46 | Version: "1.0.0",
47 | },
48 | nil,
49 | )
50 |
51 | // Register tools
52 | tools.RegisterTools(server, brokerURL)
53 |
54 | // Create HTTP handler using StreamableHTTPHandler
55 | handler := mcp.NewStreamableHTTPHandler(func(req *http.Request) *mcp.Server {
56 | return server
57 | }, nil)
58 |
59 | // Setup HTTP server
60 | httpServer := &http.Server{
61 | Addr: ":" + port,
62 | Handler: handler,
63 | ReadTimeout: 30 * time.Second,
64 | WriteTimeout: 120 * time.Second, // Allow enough time for broker queries and large responses
65 | IdleTimeout: 120 * time.Second,
66 | }
67 |
68 | // Start server in a goroutine
69 | go func() {
70 | logger.Log.WithFields(logrus.Fields{
71 | "port": port,
72 | "address": ":" + port,
73 | }).Info("kguardian MCP server starting")
74 |
75 | if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
76 | logger.Log.WithField("error", err.Error()).Error("Failed to start HTTP server")
77 | os.Exit(1)
78 | }
79 | }()
80 |
81 | // Wait for interrupt signal
82 | quit := make(chan os.Signal, 1)
83 | signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
84 | sig := <-quit
85 |
86 | logger.Log.WithField("signal", sig.String()).Info("Received shutdown signal")
87 |
88 | // Graceful shutdown
89 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
90 | defer cancel()
91 |
92 | if err := httpServer.Shutdown(ctx); err != nil {
93 | logger.Log.WithField("error", err.Error()).Error("Server forced to shutdown")
94 | }
95 |
96 | logger.Log.Info("Server stopped gracefully")
97 | }
98 |
--------------------------------------------------------------------------------
/advisor/pkg/k8s/labels.go:
--------------------------------------------------------------------------------
1 | package k8s
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | api "github.com/kguardian-dev/kguardian/advisor/pkg/api"
8 | v1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/client-go/kubernetes"
11 | )
12 |
13 | // DetectLabels detects the labels of a pod.
14 | func detectSelectorLabels(clientset *kubernetes.Clientset, origin interface{}) (map[string]string, error) {
15 | // Use type assertion to check the specific type
16 | switch o := origin.(type) {
17 | case *v1.Pod:
18 | return GetOwnerRef(clientset, o)
19 | case *api.PodDetail:
20 | return GetOwnerRef(clientset, &o.Pod)
21 | case *api.SvcDetail:
22 | var svc v1.Service
23 | svc = o.Service
24 | return svc.Spec.Selector, nil
25 | default:
26 | return nil, fmt.Errorf("detectSelectorLabels: unknown type")
27 | }
28 | }
29 |
30 | func GetOwnerRef(clientset *kubernetes.Clientset, pod *v1.Pod) (map[string]string, error) {
31 | ctx := context.TODO()
32 |
33 | // Check if the Pod has an owner
34 | if len(pod.OwnerReferences) > 0 {
35 | owner := pod.OwnerReferences[0]
36 |
37 | // TODO: If the resource no longer exists but the database has the log/entry this will cause it to break for this netpol
38 |
39 | // Based on the owner, get the controller object to check its labels
40 | switch owner.Kind {
41 | case "ReplicaSet":
42 | replicaSet, err := clientset.AppsV1().ReplicaSets(pod.Namespace).Get(ctx, owner.Name, metav1.GetOptions{})
43 | if err != nil {
44 | return nil, err
45 | }
46 | deployment, err := clientset.AppsV1().Deployments(pod.Namespace).Get(ctx, replicaSet.OwnerReferences[0].Name, metav1.GetOptions{})
47 | if err != nil {
48 | return nil, err
49 | }
50 | return deployment.Spec.Selector.MatchLabels, nil
51 |
52 | case "StatefulSet":
53 | statefulSet, err := clientset.AppsV1().StatefulSets(pod.Namespace).Get(ctx, owner.Name, metav1.GetOptions{})
54 | if err != nil {
55 | return nil, err
56 | }
57 | return statefulSet.Spec.Selector.MatchLabels, nil
58 |
59 | case "DaemonSet":
60 | daemonSet, err := clientset.AppsV1().DaemonSets(pod.Namespace).Get(ctx, owner.Name, metav1.GetOptions{})
61 | if err != nil {
62 | return nil, err
63 | }
64 | return daemonSet.Spec.Selector.MatchLabels, nil
65 |
66 | case "Job":
67 | job, err := clientset.BatchV1().Jobs(pod.Namespace).Get(ctx, owner.Name, metav1.GetOptions{})
68 | if err != nil {
69 | return nil, err
70 | }
71 | return job.Spec.Selector.MatchLabels, nil
72 |
73 | // Add more controller kinds here if needed
74 |
75 | default:
76 | return nil, fmt.Errorf("unknown or unsupported ownerReference: %s", owner.String())
77 | }
78 | }
79 | return pod.Labels, nil
80 | }
81 |
--------------------------------------------------------------------------------
/docs/ai-tools/claude-code.mdx:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Claude Code setup"
3 | description: "Configure Claude Code for your documentation workflow"
4 | icon: "asterisk"
5 | ---
6 |
7 | Claude Code is Anthropic's official CLI tool. This guide will help you set up Claude Code to help you write and maintain your documentation.
8 |
9 | ## Prerequisites
10 |
11 | - Active Claude subscription (Pro, Max, or API access)
12 |
13 | ## Setup
14 |
15 | 1. Install Claude Code globally:
16 |
17 | ```bash
18 | npm install -g @anthropic-ai/claude-code
19 | ```
20 |
21 | 2. Navigate to your docs directory.
22 | 3. (Optional) Add the `CLAUDE.md` file below to your project.
23 | 4. Run `claude` to start.
24 |
25 | ## Create `CLAUDE.md`
26 |
27 | Create a `CLAUDE.md` file at the root of your documentation repository to train Claude Code on your specific documentation standards:
28 |
29 | ````markdown
30 | # Mintlify documentation
31 |
32 | ## Working relationship
33 | - You can push back on ideas-this can lead to better documentation. Cite sources and explain your reasoning when you do so
34 | - ALWAYS ask for clarification rather than making assumptions
35 | - NEVER lie, guess, or make up information
36 |
37 | ## Project context
38 | - Format: MDX files with YAML frontmatter
39 | - Config: docs.json for navigation, theme, settings
40 | - Components: Mintlify components
41 |
42 | ## Content strategy
43 | - Document just enough for user success - not too much, not too little
44 | - Prioritize accuracy and usability of information
45 | - Make content evergreen when possible
46 | - Search for existing information before adding new content. Avoid duplication unless it is done for a strategic reason
47 | - Check existing patterns for consistency
48 | - Start by making the smallest reasonable changes
49 |
50 | ## Frontmatter requirements for pages
51 | - title: Clear, descriptive page title
52 | - description: Concise summary for SEO/navigation
53 |
54 | ## Writing standards
55 | - Second-person voice ("you")
56 | - Prerequisites at start of procedural content
57 | - Test all code examples before publishing
58 | - Match style and formatting of existing pages
59 | - Include both basic and advanced use cases
60 | - Language tags on all code blocks
61 | - Alt text on all images
62 | - Relative paths for internal links
63 |
64 | ## Git workflow
65 | - NEVER use --no-verify when committing
66 | - Ask how to handle uncommitted changes before starting
67 | - Create a new branch when no clear branch exists for changes
68 | - Commit frequently throughout development
69 | - NEVER skip or disable pre-commit hooks
70 |
71 | ## Do not
72 | - Skip frontmatter on any MDX file
73 | - Use absolute URLs for internal links
74 | - Include untested code examples
75 | - Make assumptions - always ask for clarification
76 | ````
77 |
--------------------------------------------------------------------------------
/frontend/src/hooks/policyEditor/usePolicyExport.ts:
--------------------------------------------------------------------------------
1 | import { useState } from 'react';
2 | import type { NetworkPolicy } from '../../types/networkPolicy';
3 | import type { SeccompProfile } from '../../types/seccompProfile';
4 | import { policyToYAML } from '../../utils/networkPolicyGenerator';
5 | import { profileToYAML, profileToJSON } from '../../utils/seccompProfileGenerator';
6 |
7 | export type PolicyType = 'network' | 'seccomp';
8 |
9 | interface UsePolicyExportProps {
10 | policyType: PolicyType;
11 | policy: NetworkPolicy | null;
12 | seccompProfile: SeccompProfile | null;
13 | podName: string;
14 | podIdentity?: string;
15 | podNamespace: string;
16 | yamlView: boolean;
17 | }
18 |
19 | export const usePolicyExport = ({
20 | policyType,
21 | policy,
22 | seccompProfile,
23 | podName,
24 | podIdentity,
25 | podNamespace,
26 | yamlView,
27 | }: UsePolicyExportProps) => {
28 | const [copiedToClipboard, setCopiedToClipboard] = useState(false);
29 |
30 | const getExportContent = (): string | null => {
31 | if (policyType === 'network' && policy) {
32 | return policyToYAML(policy);
33 | } else if (policyType === 'seccomp' && seccompProfile) {
34 | // Use pod identity for resource name, fallback to pod name
35 | const resourceName = podIdentity || podName;
36 | return yamlView
37 | ? profileToYAML(seccompProfile, resourceName, podNamespace)
38 | : profileToJSON(seccompProfile);
39 | }
40 | return null;
41 | };
42 |
43 | const handleCopy = () => {
44 | const content = getExportContent();
45 | if (!content) return;
46 |
47 | navigator.clipboard.writeText(content);
48 | setCopiedToClipboard(true);
49 | setTimeout(() => setCopiedToClipboard(false), 2000);
50 | };
51 |
52 | const handleDownload = () => {
53 | const content = getExportContent();
54 | if (!content) return;
55 |
56 | let filename: string;
57 | let mimeType: string;
58 |
59 | if (policyType === 'network' && policy) {
60 | filename = `${policy.metadata.name}.yaml`;
61 | mimeType = 'text/yaml';
62 | } else if (policyType === 'seccomp') {
63 | if (yamlView) {
64 | filename = `${podName}-seccomp.yaml`;
65 | mimeType = 'text/yaml';
66 | } else {
67 | filename = `${podName}-seccomp.json`;
68 | mimeType = 'application/json';
69 | }
70 | } else {
71 | return;
72 | }
73 |
74 | const blob = new Blob([content], { type: mimeType });
75 | const url = URL.createObjectURL(blob);
76 | const a = document.createElement('a');
77 | a.href = url;
78 | a.download = filename;
79 | a.click();
80 | URL.revokeObjectURL(url);
81 | };
82 |
83 | return {
84 | copiedToClipboard,
85 | handleCopy,
86 | handleDownload,
87 | };
88 | };
89 |
--------------------------------------------------------------------------------