├── plugins ├── README.md └── iptracker │ └── iptracker_networkpolicy.go ├── OWNERS ├── pkg ├── network │ ├── test-data │ │ ├── udp.pcap │ │ └── tcp-ipv4.pcap │ └── packet.go ├── networkpolicy │ ├── helpers.go │ ├── logging.go │ ├── engine.go │ └── helpers_test.go ├── api │ ├── helper.go │ ├── kubenetworkpolicies.proto │ └── interfaces.go ├── podinfo │ ├── fallback_provider.go │ └── podinfo.go ├── ipcache │ ├── ipcache.go │ ├── local_test.go │ ├── local.go │ ├── ipcache_test.go │ ├── etcd_test.go │ ├── bbolt_test.go │ ├── lru_test.go │ ├── lru.go │ └── bbolt.go ├── dataplane │ ├── conntrack.go │ ├── conntrack_test.go │ └── metrics.go ├── dns │ ├── domainmap.go │ └── domainmap_test.go ├── cmd │ └── cmd.go └── runner │ └── bounded_frequency_runner.go ├── docs └── testing │ ├── network_policies_latency.png │ ├── network_policies_packet_rate.png │ ├── job_poller.yaml │ ├── backend.yaml │ ├── README.md │ └── monitoring.yaml ├── code-of-conduct.md ├── charts └── kube-network-policies │ ├── Chart.yaml │ ├── .helmignore │ ├── values.yaml │ └── templates │ ├── serviceaccount.yaml │ ├── _helpers.tpl │ └── daemonset.yaml ├── hack ├── build-image.sh ├── lint.sh ├── generate-proto.sh ├── init-buildx.sh └── boskos.sh ├── .gitignore ├── cloudbuild.yaml ├── .github ├── workflows │ ├── test.yaml │ ├── bats.yml │ ├── e2e.yml │ └── iptracker.yml └── dependabot.yml ├── RELEASE.md ├── SECURITY_CONTACTS ├── .golangci.yaml ├── Dockerfile ├── Dockerfile.iptracker ├── SECURITY.md ├── tests ├── README.md ├── setup_suite.bash ├── e2e_npa_v1alpha2.bats └── e2e_iptracker.bats ├── CONTRIBUTING.md ├── install.yaml ├── install-anp.yaml ├── install-cnp.yaml ├── Makefile ├── install-iptracker.yaml ├── go.mod └── cmd └── kube-network-policies ├── standard └── main.go ├── npa-v1alpha1 └── main.go ├── iptracker └── main.go └── npa-v1alpha2 └── main.go /plugins/README.md: -------------------------------------------------------------------------------- 1 | Source code for compile-time plugins 2 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - aojea 5 | - danwinship 6 | - thockin 7 | -------------------------------------------------------------------------------- /pkg/network/test-data/udp.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes-sigs/kube-network-policies/HEAD/pkg/network/test-data/udp.pcap -------------------------------------------------------------------------------- /pkg/network/test-data/tcp-ipv4.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes-sigs/kube-network-policies/HEAD/pkg/network/test-data/tcp-ipv4.pcap -------------------------------------------------------------------------------- /docs/testing/network_policies_latency.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes-sigs/kube-network-policies/HEAD/docs/testing/network_policies_latency.png -------------------------------------------------------------------------------- /docs/testing/network_policies_packet_rate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes-sigs/kube-network-policies/HEAD/docs/testing/network_policies_packet_rate.png -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Community Code of Conduct 2 | 3 | Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) 4 | -------------------------------------------------------------------------------- /charts/kube-network-policies/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kube-network-policies 3 | description: A Helm chart for Kubernetes Network Policies 4 | type: application 5 | version: 0.0.1 6 | appVersion: "0.0.1" 7 | -------------------------------------------------------------------------------- /hack/build-image.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/bin/bash 3 | 4 | set -o errexit -o nounset -o pipefail 5 | 6 | # cd to the repo root 7 | REPO_ROOT=$(git rev-parse --show-toplevel) 8 | cd "${REPO_ROOT}" 9 | 10 | docker build . -t aojea/kube-netpol:"${1:-test}" 11 | -------------------------------------------------------------------------------- /hack/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. 8 | 9 | cd $REPO_ROOT 10 | docker run --rm -v $(pwd):/app -w /app golangci/golangci-lint:v2.1.6 golangci-lint run -v 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | bin/ 17 | dist/ 18 | _artifacts/ 19 | -------------------------------------------------------------------------------- /cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | # See https://cloud.google.com/cloud-build/docs/build-config 2 | options: 3 | substitution_option: ALLOW_LOOSE 4 | machineType: E2_HIGHCPU_32 5 | steps: 6 | - name: gcr.io/k8s-staging-test-infra/gcb-docker-gcloud 7 | entrypoint: make 8 | env: 9 | - REGISTRY=gcr.io/k8s-staging-networking 10 | - IMAGE_NAME=kube-network-policies 11 | args: ['release'] 12 | -------------------------------------------------------------------------------- /docs/testing/job_poller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: abtest 5 | spec: 6 | completions: 50 7 | parallelism: 10 8 | template: 9 | spec: 10 | containers: 11 | - name: ab 12 | image: httpd:2 13 | command: ["ab", "-n", "10000", "-c", "1000", "-v", "1", "http://test-service:80/"] 14 | restartPolicy: Never 15 | -------------------------------------------------------------------------------- /hack/generate-proto.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. 8 | 9 | cd $REPO_ROOT 10 | go install google.golang.org/protobuf/cmd/protoc-gen-go@latest 11 | go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest 12 | protoc --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. pkg/api/kubenetworkpolicies.proto 13 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | test: 7 | strategy: 8 | fail-fast: false 9 | matrix: 10 | go-version: [1.24.x] 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/setup-go@v6 14 | with: 15 | go-version: ${{ matrix.go-version }} 16 | - uses: actions/checkout@v5 17 | - run: sudo make test 18 | - run: make lint 19 | 20 | -------------------------------------------------------------------------------- /charts/kube-network-policies/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Process 2 | 3 | The Kubernetes Template Project is released on an as-needed basis. The process is as follows: 4 | 5 | 1. An issue is proposing a new release with a changelog since the last release 6 | 1. All [OWNERS](OWNERS) must LGTM this release 7 | 1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` 8 | 1. The release issue is closed 9 | 1. An announcement email is sent to `dev@kubernetes.io` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` 10 | -------------------------------------------------------------------------------- /pkg/networkpolicy/helpers.go: -------------------------------------------------------------------------------- 1 | package networkpolicy 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | "k8s.io/apimachinery/pkg/labels" 6 | "k8s.io/klog/v2" 7 | ) 8 | 9 | // MatchesSelector returns true if the selector matches the given labels. 10 | func MatchesSelector(selector *metav1.LabelSelector, lbls map[string]string) bool { 11 | s, err := metav1.LabelSelectorAsSelector(selector) 12 | if err != nil { 13 | klog.Errorf("error parsing label selector: %v", err) 14 | return false 15 | } 16 | return s.Matches(labels.Set(lbls)) 17 | } 18 | -------------------------------------------------------------------------------- /SECURITY_CONTACTS: -------------------------------------------------------------------------------- 1 | # Defined below are the security contacts for this repo. 2 | # 3 | # They are the contact point for the Security Response Committee to reach out 4 | # to for triaging and handling of incoming issues. 5 | # 6 | # The below names agree to abide by the 7 | # [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) 8 | # and will be removed and replaced if they violate that agreement. 9 | # 10 | # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE 11 | # INSTRUCTIONS AT https://kubernetes.io/security/ 12 | 13 | aojea 14 | danwinship 15 | thockin 16 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | tests: false 4 | linters: 5 | default: none 6 | enable: 7 | - errcheck 8 | - gocritic 9 | - govet 10 | - ineffassign 11 | - staticcheck 12 | exclusions: 13 | generated: lax 14 | presets: 15 | - comments 16 | - common-false-positives 17 | - legacy 18 | - std-error-handling 19 | rules: 20 | - linters: 21 | - staticcheck 22 | path: pkg/networkpolicy/metrics.go 23 | paths: 24 | - third_party$ 25 | - builtin$ 26 | - examples$ 27 | formatters: 28 | exclusions: 29 | generated: lax 30 | paths: 31 | - third_party$ 32 | - builtin$ 33 | - examples$ 34 | -------------------------------------------------------------------------------- /charts/kube-network-policies/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: registry.k8s.io/networking/kube-network-policies 3 | pullPolicy: IfNotPresent 4 | tag: "v0.6.0" 5 | 6 | nameOverride: "" 7 | fullnameOverride: "" 8 | 9 | adminNetworkPolicy: true 10 | 11 | serviceAccount: 12 | annotations: {} 13 | name: kube-network-policies 14 | 15 | daemonset: 16 | annotations: {} 17 | labels: {} 18 | nodeSelector: 19 | kubernetes.io/os: linux 20 | tolerations: 21 | - operator: Exists 22 | effect: NoSchedule 23 | extraEnv: [] 24 | securityContext: 25 | privileged: true 26 | capabilities: 27 | add: ["NET_ADMIN"] 28 | resources: 29 | requests: 30 | cpu: "100m" 31 | memory: "50Mi" 32 | -------------------------------------------------------------------------------- /docs/testing/backend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: server-deployment 5 | labels: 6 | app: MyApp 7 | spec: 8 | replicas: 10 9 | selector: 10 | matchLabels: 11 | app: MyApp 12 | template: 13 | metadata: 14 | labels: 15 | app: MyApp 16 | spec: 17 | containers: 18 | - name: agnhost 19 | image: k8s.gcr.io/e2e-test-images/agnhost:2.39 20 | args: 21 | - netexec 22 | - --http-port=80 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: test-service 28 | spec: 29 | type: ClusterIP 30 | selector: 31 | app: MyApp 32 | ports: 33 | - protocol: TCP 34 | port: 80 35 | targetPort: 80 36 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an ARG to select which build target to compile and use 2 | ARG TARGET_BUILD=standard 3 | 4 | FROM --platform=$BUILDPLATFORM golang:1.24 AS builder 5 | WORKDIR /src 6 | 7 | # Get target architecture for cross-compilation 8 | ARG TARGETOS 9 | ARG TARGETARCH 10 | ARG TARGET_BUILD 11 | 12 | COPY go.mod go.sum ./ 13 | RUN go mod download 14 | COPY . . 15 | 16 | # Build the specific binary based on the build argument and target architecture 17 | RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} make build-${TARGET_BUILD} 18 | 19 | # STEP 2: Build small image 20 | FROM gcr.io/distroless/static-debian12 21 | ARG TARGET_BUILD 22 | COPY --from=builder /src/bin/kube-network-policies-${TARGET_BUILD} /bin/netpol 23 | 24 | # The entrypoint is always the same, regardless of the build 25 | CMD ["/bin/netpol"] -------------------------------------------------------------------------------- /Dockerfile.iptracker: -------------------------------------------------------------------------------- 1 | # Use an ARG to select which build target to compile and use 2 | ARG TARGET_BUILD=standard 3 | 4 | FROM --platform=$BUILDPLATFORM golang:1.24 AS builder 5 | 6 | WORKDIR /src 7 | 8 | # Get target architecture for cross-compilation 9 | ARG TARGETOS 10 | ARG TARGETARCH 11 | ARG TARGET_BUILD 12 | 13 | COPY . . 14 | RUN go mod download 15 | 16 | # Build the specific binary based on the build argument and target architecture 17 | RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} make build-kube-ip-tracker-${TARGET_BUILD} 18 | 19 | # STEP 2: Build small image 20 | FROM gcr.io/distroless/static-debian12 21 | ARG TARGET_BUILD 22 | COPY --from=builder /src/bin/kube-ip-tracker-${TARGET_BUILD} /bin/kube-ip-tracker 23 | 24 | # The entrypoint is always the same, regardless of the build 25 | CMD ["/bin/kube-ip-tracker"] 26 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | groups: 13 | k8s-deps: 14 | patterns: 15 | - "*k8s.io*" 16 | etcd-deps: 17 | patterns: 18 | - "*go.etcd.io*" 19 | 20 | - package-ecosystem: "github-actions" 21 | directory: "/" 22 | schedule: 23 | interval: "weekly" 24 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Security Announcements 4 | 5 | Join the [kubernetes-security-announce] group for security and vulnerability announcements. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | Instructions for reporting a vulnerability can be found on the 10 | [Kubernetes Security and Disclosure Information] page. 11 | 12 | ## Supported Versions 13 | 14 | Information about supported Kubernetes versions can be found on the 15 | [Kubernetes version and version skew support policy] page on the Kubernetes website. 16 | 17 | [kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce 18 | [Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions 19 | [Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability 20 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # Integration tests 2 | 3 | 4 | 1. Install `bats` https://bats-core.readthedocs.io/en/stable/installation.html 5 | 6 | 2. Install `kind` https://kind.sigs.k8s.io/ 7 | 8 | 3. Run `bats tests/` 9 | 10 | ## Troubleshooting test failures 11 | 12 | `bats -x -o _artifacts --print-output-on-failure --filter "network policy drops established connections" tests/e2e_standard.bats` 13 | 14 | You can modify or comment the `tests/setup_suite.bash` hooks to avoid creating and recreating the cluster. 15 | 16 | ```diff 17 | diff --git a/tests/setup_suite.bash b/tests/setup_suite.bash 18 | index f34cc39..8006903 100644 19 | --- a/tests/setup_suite.bash 20 | +++ b/tests/setup_suite.bash 21 | @@ -29,5 +29,5 @@ EOF 22 | 23 | function teardown_suite { 24 | kind export logs "$BATS_TEST_DIRNAME"/../_artifacts --name "$CLUSTER_NAME" 25 | - kind delete cluster --name "$CLUSTER_NAME" 26 | + # kind delete cluster --name "$CLUSTER_NAME" 27 | } 28 | ``` -------------------------------------------------------------------------------- /.github/workflows/bats.yml: -------------------------------------------------------------------------------- 1 | name: bats 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | tags: 8 | - 'v*' 9 | pull_request: 10 | branches: [ main ] 11 | workflow_dispatch: 12 | 13 | env: 14 | GO_VERSION: "1.24" 15 | K8S_VERSION: "v1.33.1" 16 | KIND_VERSION: "v0.29.0" 17 | IMAGE_NAME: registry.k8s.io/networking/kube-network-policies 18 | KIND_CLUSTER_NAME: kind 19 | 20 | jobs: 21 | bats_tests: 22 | runs-on: ubuntu-22.04 23 | name: Bats e2e tests 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v5 27 | - name: Setup Bats and bats libs 28 | id: setup-bats 29 | uses: bats-core/bats-action@3.0.1 30 | - name: Bats tests 31 | shell: bash 32 | env: 33 | BATS_LIB_PATH: ${{ steps.setup-bats.outputs.lib-path }} 34 | TERM: xterm 35 | run: bats -o _artifacts --print-output-on-failure tests/ 36 | 37 | - name: Upload logs 38 | if: always() 39 | uses: actions/upload-artifact@v5 40 | with: 41 | name: kind-logs-${{ env.JOB_NAME }}-${{ github.run_id }} 42 | path: ./_artifacts 43 | 44 | -------------------------------------------------------------------------------- /pkg/api/helper.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "time" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | // NewPodInfo creates a PodInfo object from a Pod and its corresponding Namespace and Node labels. 10 | // This helper is useful for populators that have access to the full Kubernetes objects since 11 | // contain all the necessary information for Network Policies selectors 12 | func NewPodInfo(pod *v1.Pod, nsLabels map[string]string, nodeLabels map[string]string, clusterID string) *PodInfo { 13 | if pod == nil { 14 | return nil 15 | } 16 | 17 | containerPorts := make([]*ContainerPort, 0) 18 | for _, container := range pod.Spec.Containers { 19 | for _, port := range container.Ports { 20 | containerPorts = append(containerPorts, &ContainerPort{ 21 | Name: port.Name, 22 | Port: port.ContainerPort, 23 | Protocol: string(port.Protocol), 24 | }) 25 | } 26 | } 27 | 28 | return &PodInfo{ 29 | Name: pod.Name, 30 | Labels: pod.Labels, 31 | ContainerPorts: containerPorts, 32 | Namespace: &Namespace{ 33 | Name: pod.Namespace, 34 | Labels: nsLabels, 35 | }, 36 | Node: &Node{ 37 | Name: pod.Spec.NodeName, 38 | Labels: nodeLabels, 39 | }, 40 | ClusterId: clusterID, 41 | LastUpdated: time.Now().Unix(), // TODO: maybe get it from the managedFields metadata 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /tests/setup_suite.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | function setup_suite { 6 | export BATS_TEST_TIMEOUT=120 7 | # Define the name of the kind cluster 8 | export CLUSTER_NAME="netpol-test-cluster" 9 | if kind get clusters | grep -q "^${CLUSTER_NAME}$"; then 10 | echo "Kind cluster ${CLUSTER_NAME} already exists. Skipping creation." 11 | kind get kubeconfig --name "$CLUSTER_NAME" > "$BATS_SUITE_TMPDIR/kubeconfig" 12 | export KUBECONFIG="$BATS_SUITE_TMPDIR/kubeconfig" 13 | return 14 | fi 15 | # Create cluster 16 | cat < 14 | 15 | - [Contributor License Agreement](https://git.k8s.io/community/CLA.md) - Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests 16 | - [Kubernetes Contributor Guide](https://k8s.dev/guide) - Main contributor documentation, or you can just jump directly to the [contributing page](https://k8s.dev/docs/guide/contributing/) 17 | - [Contributor Cheat Sheet](https://k8s.dev/cheatsheet) - Common resources for existing developers 18 | 19 | ## Mentorship 20 | 21 | - [Mentoring Initiatives](https://k8s.dev/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! 22 | 23 | ## Contact Information 24 | 25 | - [Slack channel](https://kubernetes.slack.com/messages/sig-network) 26 | - [Mailing list](https://groups.google.com/g/kubernetes-sig-network) 27 | -------------------------------------------------------------------------------- /pkg/api/kubenetworkpolicies.proto: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | syntax = "proto3"; 17 | 18 | package sigs.k8s.io.kubenetworkpolicies; 19 | 20 | option go_package = "sigs.k8s.io/kube-network-policies/pkg/api"; 21 | 22 | // Represents a Kubernetes Namespace 23 | message Namespace { 24 | string name = 1; 25 | map labels = 2; 26 | } 27 | 28 | // Represents a Kubernetes Node 29 | message Node { 30 | string name = 1; 31 | map labels = 2; 32 | } 33 | 34 | // ContainerPort mimics the Kubernetes ContainerPort type. 35 | message ContainerPort { 36 | string name = 1; 37 | int32 port = 2; 38 | string protocol = 3; 39 | } 40 | 41 | 42 | // PodInfo contains the necessary information to match on network policies 43 | message PodInfo { 44 | // The name of the Pod 45 | string name = 1; 46 | // The labels of the Pod 47 | map labels = 2; 48 | // The ports of the Containers on the Pod 49 | repeated ContainerPort container_ports = 3; 50 | Namespace namespace = 4; 51 | Node node = 5; 52 | int64 last_updated = 6; 53 | string cluster_id = 7; 54 | } -------------------------------------------------------------------------------- /pkg/podinfo/fallback_provider.go: -------------------------------------------------------------------------------- 1 | // pkg/podinfo/fallback_provider.go 2 | 3 | package podinfo 4 | 5 | import ( 6 | "k8s.io/klog/v2" 7 | "sigs.k8s.io/kube-network-policies/pkg/api" 8 | ) 9 | 10 | // FallbackPodInfoProvider uses a primary provider (like a remote ipcache) and 11 | // falls back to a local, NRI-based provider for immediate information on new pods. 12 | // This solves the race condition where a pod's traffic is evaluated before its 13 | // information has propagated to the central cache. 14 | type FallbackPodInfoProvider struct { 15 | primaryProvider api.PodInfoProvider // The ipcache client 16 | localProvider api.PodInfoProvider // The local, NRI-based provider 17 | } 18 | 19 | var _ api.PodInfoProvider = &FallbackPodInfoProvider{} 20 | 21 | // NewFallbackPodInfoProvider creates a new FallbackPodInfoProvider. 22 | func NewFallbackPodInfoProvider( 23 | primaryProvider api.PodInfoProvider, 24 | localProvider api.PodInfoProvider, 25 | ) api.PodInfoProvider { 26 | return &FallbackPodInfoProvider{ 27 | primaryProvider: primaryProvider, 28 | localProvider: localProvider, 29 | } 30 | } 31 | 32 | // GetPodInfoByIP implements the api.PodInfoProvider interface. 33 | func (p *FallbackPodInfoProvider) GetPodInfoByIP(ip string) (*api.PodInfo, bool) { 34 | // 1. Try the primary provider (ipcache) first. This is the authoritative source. 35 | if podInfo, found := p.primaryProvider.GetPodInfoByIP(ip); found { 36 | return podInfo, true 37 | } 38 | 39 | // 2. If not found, fall back to the local NRI-based provider for immediate data. 40 | klog.V(4).Infof("IP %s not found in primary provider, falling back to local NRI cache", ip) 41 | return p.localProvider.GetPodInfoByIP(ip) 42 | } 43 | -------------------------------------------------------------------------------- /pkg/ipcache/ipcache.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "sigs.k8s.io/kube-network-policies/pkg/api" 21 | ) 22 | 23 | // Store defines the interface for a key-value store that holds PodInfo. 24 | type Store interface { 25 | GetPodInfoByIP(ip string) (*api.PodInfo, bool) 26 | Upsert(ip string, info *api.PodInfo) error 27 | Delete(ip string) error 28 | List() ([]*api.PodInfo, error) 29 | Clear() error // Clear removes all entries from the store. 30 | Close() error 31 | } 32 | 33 | // SyncMetadata contains the necessary information for a client to resume 34 | // watching for changes from the correct point and ensure server identity. 35 | type SyncMetadata struct { 36 | Revision int64 37 | ClusterID uint64 38 | MemberID uint64 39 | } 40 | 41 | // SyncMetadataStore defines the interface for a store that persists synchronization metadata. 42 | type SyncMetadataStore interface { 43 | // GetSyncMetadata retrieves the last saved synchronization state. 44 | GetSyncMetadata() (*SyncMetadata, error) 45 | 46 | // SetSyncMetadata saves the current synchronization state. 47 | SetSyncMetadata(meta *SyncMetadata) error 48 | } 49 | -------------------------------------------------------------------------------- /hack/init-buildx.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2020 The Kubernetes Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -o errexit -o nounset -o pipefail 17 | 18 | # We can skip setup if the current builder already has multi-arch 19 | # AND if it isn't the docker driver, which doesn't work 20 | current_builder="$(docker buildx inspect)" 21 | # linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6 22 | if ! grep -q "^Driver: docker$" <<<"${current_builder}" && \ 23 | grep -q "linux/amd64" <<<"${current_builder}" && \ 24 | grep -q "linux/arm64" <<<"${current_builder}"; then 25 | exit 0 26 | fi 27 | 28 | # Ensure qemu is in binfmt_misc 29 | # Docker desktop already has these in versions recent enough to have buildx 30 | # We only need to do this setup on linux hosts 31 | if [ "$(uname)" == 'Linux' ]; then 32 | # NOTE: this is pinned to a digest for a reason! 33 | docker run --rm --privileged tonistiigi/binfmt:qemu-v7.0.0-28@sha256:66e11bea77a5ea9d6f0fe79b57cd2b189b5d15b93a2bdb925be22949232e4e55 --install all 34 | fi 35 | 36 | # Ensure we use a builder that can leverage it (the default on linux will not) 37 | docker buildx rm knp-builder || true 38 | docker buildx create --use --name=knp-builder 39 | -------------------------------------------------------------------------------- /charts/kube-network-policies/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ include "kube-network-policies.serviceAccountName" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "kube-network-policies.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | --- 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | kind: ClusterRole 15 | metadata: 16 | name: {{ include "kube-network-policies.serviceAccountName" . }} 17 | labels: 18 | {{- include "kube-network-policies.labels" . | nindent 4 }} 19 | rules: 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - pods 24 | - namespaces 25 | {{- if .Values.baselineAdminNetworkPolicy }} 26 | - nodes 27 | {{- end }} 28 | verbs: 29 | - list 30 | - watch 31 | - apiGroups: 32 | - "networking.k8s.io" 33 | resources: 34 | - networkpolicies 35 | verbs: 36 | - list 37 | - watch 38 | {{- if .Values.baselineAdminNetworkPolicy }} 39 | - apiGroups: 40 | - "policy.networking.k8s.io" 41 | resources: 42 | - adminnetworkpolicies 43 | - baselineadminnetworkpolicies 44 | verbs: 45 | - list 46 | - watch 47 | {{- end }} 48 | --- 49 | apiVersion: rbac.authorization.k8s.io/v1 50 | kind: ClusterRoleBinding 51 | metadata: 52 | name: {{ include "kube-network-policies.serviceAccountName" . }} 53 | labels: 54 | {{- include "kube-network-policies.labels" . | nindent 4 }} 55 | roleRef: 56 | apiGroup: rbac.authorization.k8s.io 57 | kind: ClusterRole 58 | name: {{ include "kube-network-policies.serviceAccountName" . }} 59 | subjects: 60 | - kind: ServiceAccount 61 | name: {{ include "kube-network-policies.serviceAccountName" . }} 62 | namespace: {{ .Release.Namespace }} 63 | -------------------------------------------------------------------------------- /charts/kube-network-policies/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "kube-network-policies.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "kube-network-policies.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "kube-network-policies.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "kube-network-policies.labels" -}} 37 | helm.sh/chart: {{ include "kube-network-policies.chart" . }} 38 | {{ include "kube-network-policies.selectorLabels" . }} 39 | k8s-app: {{ include "kube-network-policies.name" . }} 40 | tier: node 41 | {{- if .Chart.AppVersion }} 42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 43 | {{- end }} 44 | app.kubernetes.io/managed-by: {{ .Release.Service }} 45 | {{- end }} 46 | 47 | {{/* 48 | Selector labels 49 | */}} 50 | {{- define "kube-network-policies.selectorLabels" -}} 51 | app: {{ include "kube-network-policies.name" . }} 52 | {{- end }} 53 | 54 | {{/* 55 | Create the name of the service account to use 56 | */}} 57 | {{- define "kube-network-policies.serviceAccountName" -}} 58 | {{- if .Values.serviceAccount.create }} 59 | {{- default (include "kube-network-policies.fullname" .) .Values.serviceAccount.name }} 60 | {{- else }} 61 | {{- default "default" .Values.serviceAccount.name }} 62 | {{- end }} 63 | {{- end }} 64 | -------------------------------------------------------------------------------- /pkg/ipcache/local_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUTHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "testing" 21 | 22 | "github.com/google/go-cmp/cmp" 23 | "google.golang.org/protobuf/testing/protocmp" 24 | "sigs.k8s.io/kube-network-policies/pkg/api" 25 | ) 26 | 27 | func TestLocalIPCache(t *testing.T) { 28 | cache := NewLocalIPCache() 29 | 30 | tests := []struct { 31 | name string 32 | ip string 33 | podInfo *api.PodInfo 34 | }{ 35 | { 36 | name: "ipv4", 37 | ip: "192.168.1.1", 38 | podInfo: &api.PodInfo{ 39 | Name: "pod1", 40 | }, 41 | }, 42 | { 43 | name: "ipv6", 44 | ip: "2001:db8::1", 45 | podInfo: &api.PodInfo{ 46 | Name: "pod2", 47 | }, 48 | }, 49 | } 50 | 51 | for _, tt := range tests { 52 | t.Run(tt.name, func(t *testing.T) { 53 | // Test Upsert 54 | err := cache.Upsert(tt.ip, tt.podInfo) 55 | if err != nil { 56 | t.Fatalf("Upsert() error = %v", err) 57 | } 58 | 59 | // Test Get 60 | got, found := cache.GetPodInfoByIP(tt.ip) 61 | if !found { 62 | t.Fatalf("GetPodInfoForIP() not found for ip %s", tt.ip) 63 | } 64 | // Use protocmp.Transform() for a correct protobuf comparison 65 | if diff := cmp.Diff(tt.podInfo, got, protocmp.Transform()); diff != "" { 66 | t.Errorf("GetPodInfoForIP() mismatch (-want +got):\n%s", diff) 67 | } 68 | 69 | // Test Delete 70 | err = cache.Delete(tt.ip) 71 | if err != nil { 72 | t.Fatalf("Delete() error = %v", err) 73 | } 74 | 75 | // Verify deletion 76 | _, found = cache.GetPodInfoByIP(tt.ip) 77 | if found { 78 | t.Fatalf("GetPodInfoForIP() found for ip %s after deletion", tt.ip) 79 | } 80 | }) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /pkg/ipcache/local.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "net/netip" 21 | "sync" 22 | 23 | "sigs.k8s.io/kube-network-policies/pkg/api" 24 | ) 25 | 26 | // LocalIPCache is an in-memory implementation of the IPCache interface. 27 | type LocalIPCache struct { 28 | mu sync.RWMutex 29 | data map[netip.Addr]*api.PodInfo 30 | } 31 | 32 | var _ Store = &LocalIPCache{} 33 | var _ api.PodInfoProvider = &LocalIPCache{} 34 | 35 | // NewLocalIPCache creates a new in-memory IP cache. 36 | func NewLocalIPCache() *LocalIPCache { 37 | return &LocalIPCache{ 38 | data: make(map[netip.Addr]*api.PodInfo), 39 | } 40 | } 41 | 42 | func (c *LocalIPCache) GetPodInfoByIP(ip string) (*api.PodInfo, bool) { 43 | c.mu.RLock() 44 | defer c.mu.RUnlock() 45 | key, err := netip.ParseAddr(ip) 46 | if err != nil { 47 | return nil, false 48 | } 49 | info, found := c.data[key] 50 | return info, found 51 | } 52 | 53 | func (c *LocalIPCache) Upsert(ip string, info *api.PodInfo) error { 54 | c.mu.Lock() 55 | defer c.mu.Unlock() 56 | key, err := netip.ParseAddr(ip) 57 | if err != nil { 58 | return err 59 | } 60 | c.data[key] = info 61 | return nil 62 | } 63 | 64 | func (c *LocalIPCache) Delete(ip string) error { 65 | c.mu.Lock() 66 | defer c.mu.Unlock() 67 | key, err := netip.ParseAddr(ip) 68 | if err != nil { 69 | return err 70 | } 71 | delete(c.data, key) 72 | return nil 73 | } 74 | 75 | func (c *LocalIPCache) List() ([]*api.PodInfo, error) { 76 | c.mu.RLock() 77 | defer c.mu.RUnlock() 78 | var list []*api.PodInfo 79 | for _, info := range c.data { 80 | list = append(list, info) 81 | } 82 | return list, nil 83 | } 84 | 85 | func (c *LocalIPCache) Clear() error { 86 | c.mu.Lock() 87 | defer c.mu.Unlock() 88 | c.data = make(map[netip.Addr]*api.PodInfo) 89 | return nil 90 | } 91 | 92 | func (c *LocalIPCache) Close() error { 93 | return nil 94 | } 95 | -------------------------------------------------------------------------------- /pkg/api/interfaces.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "net/netip" 7 | 8 | "sigs.k8s.io/kube-network-policies/pkg/network" 9 | ) 10 | 11 | // PodInfoProvider defines an interface for components that can provide PodInfo. 12 | type PodInfoProvider interface { 13 | GetPodInfoByIP(podIP string) (*PodInfo, bool) 14 | } 15 | 16 | // DomainResolver provides an interface for resolving domain names to IP addresses. 17 | type DomainResolver interface { 18 | ContainsIP(domain string, ip net.IP) bool 19 | } 20 | 21 | // SyncFunc is a callback function that an evaluator can invoke to trigger 22 | // a dataplane reconciliation. 23 | type SyncFunc func() 24 | 25 | // Verdict represents the outcome of a packet evaluation. 26 | type Verdict int 27 | 28 | const ( 29 | // VerdictAccept allows the packet. In a directional pipeline, this means 30 | // the packet is allowed for that stage. 31 | VerdictAccept Verdict = iota 32 | // VerdictDeny denies the packet. This is a final decision for that direction. 33 | VerdictDeny 34 | // VerdictNext continues to the next evaluator in the pipeline. 35 | VerdictNext 36 | ) 37 | 38 | // PolicyEvaluator is the complete interface for a policy plugin. 39 | // It is responsible for both evaluating packets against its policies and 40 | // providing the necessary configuration to the dataplane. 41 | type PolicyEvaluator interface { 42 | // Name returns the identifier for this evaluator. 43 | Name() string 44 | // Ready returns true if the evaluator is initialized and ready to work. 45 | Ready() bool 46 | 47 | // EvaluateIngress/EvaluateEgress perform the runtime packet evaluation. 48 | EvaluateIngress(ctx context.Context, p *network.Packet, srcPod, dstPod *PodInfo) (Verdict, error) 49 | EvaluateEgress(ctx context.Context, p *network.Packet, srcPod, dstPod *PodInfo) (Verdict, error) 50 | 51 | // SetDataplaneSyncCallback allows the dataplane to provide a callback function. 52 | // The evaluator MUST call this function whenever its state changes in a way 53 | // that requires the dataplane rules to be re-synced. 54 | SetDataplaneSyncCallback(syncFn SyncFunc) 55 | 56 | // ManagedIPs returns the set of Pod IPs that this policy evaluator manages. 57 | // The dataplane uses this to build optimized nftables sets. 58 | // It can also return 'divertAll = true' to signal that all traffic 59 | // must be sent to the nfqueue, disabling the IP set optimization. 60 | ManagedIPs(ctx context.Context) (ips []netip.Addr, divertAll bool, err error) 61 | } 62 | -------------------------------------------------------------------------------- /pkg/networkpolicy/logging.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: APACHE-2.0 2 | 3 | package networkpolicy 4 | 5 | import ( 6 | "context" 7 | "net/netip" 8 | 9 | "k8s.io/klog/v2" 10 | "sigs.k8s.io/kube-network-policies/pkg/api" 11 | "sigs.k8s.io/kube-network-policies/pkg/network" 12 | ) 13 | 14 | // LoggingPolicy implements the PolicyEvaluator interface to log packet details. 15 | // It is intended to be the first evaluator in the engine to provide visibility. 16 | type LoggingPolicy struct{} 17 | 18 | // NewLoggingPolicy creates a new logging policy evaluator. 19 | func NewLoggingPolicy() *LoggingPolicy { 20 | return &LoggingPolicy{} 21 | } 22 | 23 | // Name returns the name of the policy evaluator. 24 | func (l *LoggingPolicy) Name() string { 25 | return "LoggingPolicy" 26 | } 27 | 28 | func (l *LoggingPolicy) Ready() bool { 29 | return true 30 | } 31 | 32 | func (l *LoggingPolicy) SetDataplaneSyncCallback(syncFn api.SyncFunc) { 33 | // No-op for AdminNetworkPolicy as it doesn't directly control dataplane rules. 34 | // The controller will handle syncing based on policy changes. 35 | } 36 | 37 | func (l *LoggingPolicy) ManagedIPs(ctx context.Context) ([]netip.Addr, bool, error) { 38 | // ManagedIPs returns nil as the logging policy does not manage any IPs. 39 | // It also returns false for divertAll as it does not divert all traffic. 40 | return nil, false, nil 41 | } 42 | 43 | // EvaluateIngress logs the details of an ingress packet and passes it to the next evaluator. 44 | func (l *LoggingPolicy) EvaluateIngress(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) { 45 | logPacket(ctx, "Ingress", p, srcPod, dstPod) 46 | return api.VerdictNext, nil 47 | } 48 | 49 | // EvaluateEgress logs the details of an egress packet and passes it to the next evaluator. 50 | func (l *LoggingPolicy) EvaluateEgress(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) { 51 | logPacket(ctx, "Egress", p, srcPod, dstPod) 52 | return api.VerdictNext, nil 53 | } 54 | 55 | // logPacket is a helper function to format and write the log message. 56 | func logPacket(ctx context.Context, direction string, p *network.Packet, srcPod, dstPod *api.PodInfo) { 57 | logger := klog.FromContext(ctx) 58 | 59 | srcPodStr, dstPodStr := "external", "external" 60 | if srcPod != nil { 61 | srcPodStr = srcPod.Namespace.Name + "/" + srcPod.Name 62 | } 63 | if dstPod != nil { 64 | dstPodStr = dstPod.Namespace.Name + "/" + dstPod.Name 65 | } 66 | 67 | logger.Info("Evaluating packet", 68 | "direction", direction, 69 | "srcPod", srcPodStr, 70 | "dstPod", dstPodStr, 71 | "packet", p, 72 | ) 73 | } 74 | -------------------------------------------------------------------------------- /charts/kube-network-policies/templates/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: {{ template "kube-network-policies.fullname" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "kube-network-policies.labels" . | nindent 4 }} 8 | {{- with .Values.daemonset.labels }} 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | {{- with .Values.daemonset.annotations }} 12 | annotations: 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | spec: 16 | selector: 17 | matchLabels: 18 | {{- include "kube-network-policies.selectorLabels" . | nindent 6 }} 19 | template: 20 | metadata: 21 | labels: 22 | {{- include "kube-network-policies.labels" . | nindent 8 }} 23 | {{- with .Values.daemonset.labels }} 24 | {{- toYaml . | nindent 8 }} 25 | {{- end }} 26 | spec: 27 | hostNetwork: true 28 | dnsPolicy: ClusterFirst 29 | {{- with .Values.daemonset.nodeSelector }} 30 | nodeSelector: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | {{- with .Values.daemonset.tolerations }} 34 | tolerations: 35 | {{- toYaml . | nindent 8 }} 36 | {{- end }} 37 | serviceAccountName: {{ include "kube-network-policies.serviceAccountName" . }} 38 | containers: 39 | - name: kube-network-policies 40 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 41 | args: 42 | - /bin/netpol 43 | - --hostname-override=$(MY_NODE_NAME) 44 | - --v=2 45 | {{- if .Values.adminNetworkPolicy }} 46 | - --nfqueue-id=99 47 | - --admin-network-policy=true 48 | - --baseline-admin-network-policy=true 49 | {{- else }} 50 | - --nfqueue-id=98 51 | {{- end }} 52 | {{- with .Values.daemonset.resources }} 53 | resources: 54 | {{- toYaml . | nindent 12 }} 55 | {{- end }} 56 | {{- with .Values.daemonset.securityContext }} 57 | securityContext: 58 | {{- toYaml . | nindent 12 }} 59 | {{- end }} 60 | env: 61 | - name: MY_NODE_NAME 62 | valueFrom: 63 | fieldRef: 64 | fieldPath: spec.nodeName 65 | {{- with .Values.daemonset.extraEnv }} 66 | {{- toYaml . | nindent 12 }} 67 | {{- end }} 68 | volumeMounts: 69 | - name: lib-modules 70 | mountPath: /lib/modules 71 | readOnly: true 72 | volumes: 73 | - name: lib-modules 74 | hostPath: 75 | path: /lib/modules 76 | -------------------------------------------------------------------------------- /install.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: kube-network-policies 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - namespaces 12 | verbs: 13 | - list 14 | - watch 15 | - apiGroups: 16 | - "networking.k8s.io" 17 | resources: 18 | - networkpolicies 19 | verbs: 20 | - list 21 | - watch 22 | --- 23 | kind: ClusterRoleBinding 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | metadata: 26 | name: kube-network-policies 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: kube-network-policies 31 | subjects: 32 | - kind: ServiceAccount 33 | name: kube-network-policies 34 | namespace: kube-system 35 | --- 36 | apiVersion: v1 37 | kind: ServiceAccount 38 | metadata: 39 | name: kube-network-policies 40 | namespace: kube-system 41 | --- 42 | apiVersion: apps/v1 43 | kind: DaemonSet 44 | metadata: 45 | name: kube-network-policies 46 | namespace: kube-system 47 | labels: 48 | tier: node 49 | app: kube-network-policies 50 | k8s-app: kube-network-policies 51 | spec: 52 | selector: 53 | matchLabels: 54 | app: kube-network-policies 55 | template: 56 | metadata: 57 | labels: 58 | tier: node 59 | app: kube-network-policies 60 | k8s-app: kube-network-policies 61 | spec: 62 | hostNetwork: true 63 | nodeSelector: 64 | kubernetes.io/os: linux 65 | tolerations: 66 | - operator: Exists 67 | effect: NoSchedule 68 | serviceAccountName: kube-network-policies 69 | containers: 70 | - name: kube-network-policies 71 | image: registry.k8s.io/networking/kube-network-policies:v0.9.2 72 | args: 73 | - /bin/netpol 74 | - --hostname-override=$(MY_NODE_NAME) 75 | - --v=2 76 | - --nfqueue-id=98 77 | volumeMounts: 78 | - name: nri-plugin 79 | mountPath: /var/run/nri 80 | - name: netns 81 | mountPath: /var/run/netns 82 | mountPropagation: HostToContainer 83 | resources: 84 | requests: 85 | cpu: "100m" 86 | memory: "50Mi" 87 | securityContext: 88 | privileged: true 89 | capabilities: 90 | add: ["NET_ADMIN"] 91 | env: 92 | - name: MY_NODE_NAME 93 | valueFrom: 94 | fieldRef: 95 | fieldPath: spec.nodeName 96 | volumes: 97 | - name: nri-plugin 98 | hostPath: 99 | path: /var/run/nri 100 | - name: netns 101 | hostPath: 102 | path: /var/run/netns 103 | --- 104 | -------------------------------------------------------------------------------- /pkg/podinfo/podinfo.go: -------------------------------------------------------------------------------- 1 | package podinfo 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | coreinformers "k8s.io/client-go/informers/core/v1" 6 | corelisters "k8s.io/client-go/listers/core/v1" 7 | "k8s.io/client-go/tools/cache" 8 | "sigs.k8s.io/kube-network-policies/pkg/api" 9 | ) 10 | 11 | // InformerProvider is an implementation of Provider that uses 12 | // Kubernetes informers and an optional NRI plugin to find pod information. 13 | type InformerProvider struct { 14 | podIndexer cache.Indexer 15 | nsLister corelisters.NamespaceLister 16 | nodeLister corelisters.NodeLister 17 | resolvers []IPResolver 18 | } 19 | 20 | // NewInformerProvider creates a new pod info provider. 21 | // The nodeLister can be nil if node information is not required. 22 | func NewInformerProvider( 23 | podInformer coreinformers.PodInformer, 24 | nsInfomer coreinformers.NamespaceInformer, 25 | nodeInformer coreinformers.NodeInformer, 26 | resolvers []IPResolver, 27 | ) *InformerProvider { 28 | provider := &InformerProvider{ 29 | podIndexer: podInformer.Informer().GetIndexer(), 30 | nsLister: nsInfomer.Lister(), 31 | resolvers: resolvers, 32 | } 33 | 34 | // nodeInformer is optional only used for AdminNetworkPolicies 35 | if nodeInformer != nil { 36 | provider.nodeLister = nodeInformer.Lister() 37 | } 38 | 39 | return provider 40 | } 41 | 42 | // getPodByIP finds a running pod by its IP address using the informer index 43 | // and falling back to the NRI plugin if available. 44 | func (p *InformerProvider) getPodByIP(podIP string) (*v1.Pod, bool) { 45 | for _, resolver := range p.resolvers { 46 | if podKey, ok := resolver.LookupPod(podIP); ok { 47 | obj, exists, err := p.podIndexer.GetByKey(podKey) 48 | if err == nil && exists { 49 | return obj.(*v1.Pod), true 50 | } 51 | } 52 | } 53 | if len(p.resolvers) > 0 { 54 | return nil, false 55 | } 56 | 57 | // if not resolver is provided use a linear search 58 | for _, obj := range p.podIndexer.List() { 59 | pod, ok := obj.(*v1.Pod) 60 | if !ok || pod.Spec.HostNetwork || len(pod.Status.PodIP) == 0 { 61 | continue 62 | } 63 | 64 | for _, ip := range pod.Status.PodIPs { 65 | if ip.IP == podIP { 66 | return pod, true 67 | } 68 | } 69 | } 70 | return nil, false 71 | } 72 | 73 | // GetPodInfoByIP implements the Provider interface. 74 | func (p *InformerProvider) GetPodInfoByIP(podIP string) (*api.PodInfo, bool) { 75 | pod, ok := p.getPodByIP(podIP) 76 | if !ok { 77 | return nil, false 78 | } 79 | 80 | var nsLabels, nodeLabels map[string]string 81 | 82 | if p.nsLister != nil { 83 | ns, err := p.nsLister.Get(pod.Namespace) 84 | if err == nil { 85 | nsLabels = ns.Labels 86 | } 87 | } 88 | 89 | if p.nodeLister != nil { 90 | node, err := p.nodeLister.Get(pod.Spec.NodeName) 91 | if err == nil { 92 | nodeLabels = node.Labels 93 | } 94 | } 95 | 96 | return api.NewPodInfo(pod, nsLabels, nodeLabels, ""), true 97 | } 98 | -------------------------------------------------------------------------------- /install-anp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: kube-network-policies 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - namespaces 12 | - nodes 13 | verbs: 14 | - list 15 | - watch 16 | - apiGroups: 17 | - "networking.k8s.io" 18 | resources: 19 | - networkpolicies 20 | verbs: 21 | - list 22 | - watch 23 | - apiGroups: 24 | - "policy.networking.k8s.io" 25 | resources: 26 | - adminnetworkpolicies 27 | - baselineadminnetworkpolicies 28 | verbs: 29 | - list 30 | - watch 31 | --- 32 | kind: ClusterRoleBinding 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | metadata: 35 | name: kube-network-policies 36 | roleRef: 37 | apiGroup: rbac.authorization.k8s.io 38 | kind: ClusterRole 39 | name: kube-network-policies 40 | subjects: 41 | - kind: ServiceAccount 42 | name: kube-network-policies 43 | namespace: kube-system 44 | --- 45 | apiVersion: v1 46 | kind: ServiceAccount 47 | metadata: 48 | name: kube-network-policies 49 | namespace: kube-system 50 | --- 51 | apiVersion: apps/v1 52 | kind: DaemonSet 53 | metadata: 54 | name: kube-network-policies 55 | namespace: kube-system 56 | labels: 57 | tier: node 58 | app: kube-network-policies 59 | k8s-app: kube-network-policies 60 | spec: 61 | selector: 62 | matchLabels: 63 | app: kube-network-policies 64 | template: 65 | metadata: 66 | labels: 67 | tier: node 68 | app: kube-network-policies 69 | k8s-app: kube-network-policies 70 | spec: 71 | hostNetwork: true 72 | nodeSelector: 73 | kubernetes.io/os: linux 74 | tolerations: 75 | - operator: Exists 76 | effect: NoSchedule 77 | serviceAccountName: kube-network-policies 78 | containers: 79 | - name: kube-network-policies 80 | image: registry.k8s.io/networking/kube-network-policies:v0.8.0 81 | args: 82 | - /bin/netpol 83 | - --hostname-override=$(MY_NODE_NAME) 84 | - --v=4 85 | - --nfqueue-id=89 86 | volumeMounts: 87 | - name: nri-plugin 88 | mountPath: /var/run/nri 89 | - name: netns 90 | mountPath: /var/run/netns 91 | mountPropagation: HostToContainer 92 | resources: 93 | requests: 94 | cpu: "100m" 95 | memory: "50Mi" 96 | securityContext: 97 | privileged: true 98 | capabilities: 99 | add: ["NET_ADMIN"] 100 | env: 101 | - name: MY_NODE_NAME 102 | valueFrom: 103 | fieldRef: 104 | fieldPath: spec.nodeName 105 | volumes: 106 | - name: nri-plugin 107 | hostPath: 108 | path: /var/run/nri 109 | - name: netns 110 | hostPath: 111 | path: /var/run/netns 112 | --- 113 | -------------------------------------------------------------------------------- /pkg/dataplane/conntrack.go: -------------------------------------------------------------------------------- 1 | package dataplane 2 | 3 | import ( 4 | "github.com/vishvananda/netlink" 5 | "golang.org/x/sys/unix" 6 | v1 "k8s.io/api/core/v1" 7 | "k8s.io/klog/v2" 8 | "sigs.k8s.io/kube-network-policies/pkg/network" 9 | ) 10 | 11 | var ( 12 | mapIPFamilyToString = map[uint8]v1.IPFamily{ 13 | unix.AF_INET: v1.IPv4Protocol, 14 | unix.AF_INET6: v1.IPv6Protocol, 15 | } 16 | mapProtocolToString = map[uint8]v1.Protocol{ 17 | unix.IPPROTO_TCP: v1.ProtocolTCP, 18 | unix.IPPROTO_UDP: v1.ProtocolUDP, 19 | unix.IPPROTO_SCTP: v1.ProtocolSCTP, 20 | } 21 | ) 22 | 23 | func PacketFromFlow(flow *netlink.ConntrackFlow) *network.Packet { 24 | if flow == nil { 25 | return nil 26 | } 27 | packet := network.Packet{ 28 | SrcIP: flow.Forward.SrcIP, 29 | DstIP: flow.Reverse.SrcIP, 30 | SrcPort: int(flow.Forward.SrcPort), 31 | DstPort: int(flow.Reverse.SrcPort), 32 | } 33 | 34 | if family, ok := mapIPFamilyToString[flow.FamilyType]; ok { 35 | packet.Family = family 36 | } else { 37 | klog.InfoS("Unknown IP family", "family", flow.FamilyType, "flow", flow) 38 | return nil 39 | } 40 | 41 | if protocol, ok := mapProtocolToString[flow.Forward.Protocol]; ok { 42 | packet.Proto = protocol 43 | } else { 44 | klog.InfoS("Unknown protocol", "protocol", flow.Forward.Protocol, "flow", flow) 45 | return nil 46 | } 47 | 48 | return &packet 49 | } 50 | 51 | // generateLabelMask creates a 16-byte (128-bit) mask with a single bit set at the 52 | // specified bitIndex. 53 | // If the bit index is out of the valid range [0, 127], it returns a 16-byte 54 | // slice of all zeros. 55 | // This function implements a Big Endia 128-bit layout. This means the 56 | // most significant byte (containing bits 127-120) is at index 0 of the 57 | // slice, and the least significant *byte* (containing bits 7-0) is at 58 | // index 15. 59 | func generateLabelMask(bitIndex int) []byte { 60 | labelMask := make([]byte, 16) 61 | if bitIndex < 0 || bitIndex > 127 { 62 | return labelMask 63 | } 64 | 65 | arrayIndex := len(labelMask) - (bitIndex / 8) - 1 66 | bitPos := uint(bitIndex % 8) 67 | mask := uint8(1) << bitPos 68 | labelMask[arrayIndex] = mask 69 | return labelMask 70 | } 71 | 72 | // clearLabelBit clears a specific bit in a 16-byte (128-bit) label and returns 73 | // a new 16-byte slice with the modified label. The original slice (currentLabel) 74 | // is not modified. 75 | // If currentLabel is not 16 bytes long, it returns a new, empty 16-byte slice. 76 | // If bitIndex is out of the valid range [0, 127], it returns a copy of the 77 | // original label. 78 | func clearLabelBit(currentLabel []byte, bitIndex int) []byte { 79 | newLabel := make([]byte, 16) 80 | if len(currentLabel) != 16 { 81 | return newLabel 82 | } 83 | 84 | copy(newLabel, currentLabel) 85 | if bitIndex < 0 || bitIndex > 127 { 86 | return newLabel 87 | } 88 | arrayIndex := len(newLabel) - (bitIndex / 8) - 1 89 | bitPos := uint(bitIndex % 8) 90 | zeroMask := ^(uint8(1) << bitPos) 91 | newLabel[arrayIndex] &= zeroMask 92 | return newLabel 93 | } 94 | -------------------------------------------------------------------------------- /install-cnp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: kube-network-policies 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - namespaces 12 | - nodes 13 | verbs: 14 | - list 15 | - watch 16 | - apiGroups: 17 | - "networking.k8s.io" 18 | resources: 19 | - networkpolicies 20 | verbs: 21 | - list 22 | - watch 23 | - apiGroups: 24 | - "policy.networking.k8s.io" 25 | resources: 26 | - clusternetworkpolicies 27 | verbs: 28 | - list 29 | - watch 30 | --- 31 | kind: ClusterRoleBinding 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | metadata: 34 | name: kube-network-policies 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: ClusterRole 38 | name: kube-network-policies 39 | subjects: 40 | - kind: ServiceAccount 41 | name: kube-network-policies 42 | namespace: kube-system 43 | --- 44 | apiVersion: v1 45 | kind: ServiceAccount 46 | metadata: 47 | name: kube-network-policies 48 | namespace: kube-system 49 | --- 50 | apiVersion: apps/v1 51 | kind: DaemonSet 52 | metadata: 53 | name: kube-network-policies 54 | namespace: kube-system 55 | labels: 56 | tier: node 57 | app: kube-network-policies 58 | k8s-app: kube-network-policies 59 | spec: 60 | selector: 61 | matchLabels: 62 | app: kube-network-policies 63 | template: 64 | metadata: 65 | labels: 66 | tier: node 67 | app: kube-network-policies 68 | k8s-app: kube-network-policies 69 | spec: 70 | hostNetwork: true 71 | dnsPolicy: ClusterFirst 72 | nodeSelector: 73 | kubernetes.io/os: linux 74 | tolerations: 75 | - operator: Exists 76 | effect: NoSchedule 77 | serviceAccountName: kube-network-policies 78 | containers: 79 | - name: kube-network-policies 80 | image: registry.k8s.io/networking/kube-network-policies:v0.9.2-npa-v1alpha2 81 | args: 82 | - /bin/netpol 83 | - --hostname-override=$(MY_NODE_NAME) 84 | - --v=4 85 | - --nfqueue-id=89 86 | volumeMounts: 87 | - name: nri-plugin 88 | mountPath: /var/run/nri 89 | - name: netns 90 | mountPath: /var/run/netns 91 | mountPropagation: HostToContainer 92 | resources: 93 | requests: 94 | cpu: "100m" 95 | memory: "50Mi" 96 | securityContext: 97 | privileged: true 98 | capabilities: 99 | add: ["NET_ADMIN"] 100 | env: 101 | - name: MY_NODE_NAME 102 | valueFrom: 103 | fieldRef: 104 | fieldPath: spec.nodeName 105 | volumes: 106 | - name: nri-plugin 107 | hostPath: 108 | path: /var/run/nri 109 | - name: netns 110 | hostPath: 111 | path: /var/run/netns 112 | --- 113 | -------------------------------------------------------------------------------- /pkg/ipcache/ipcache_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "testing" 21 | 22 | "github.com/google/go-cmp/cmp" 23 | "google.golang.org/protobuf/testing/protocmp" 24 | v1 "k8s.io/api/core/v1" 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | 27 | "sigs.k8s.io/kube-network-policies/pkg/api" 28 | ) 29 | 30 | func TestPodAndNamespaceToPodInfo(t *testing.T) { 31 | pod := &v1.Pod{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: "test-pod", 34 | Namespace: "test-ns", 35 | Labels: map[string]string{"app": "test"}, 36 | }, 37 | Spec: v1.PodSpec{ 38 | NodeName: "test-node", 39 | Containers: []v1.Container{ 40 | { 41 | Name: "container-1", 42 | Ports: []v1.ContainerPort{ 43 | { 44 | Name: "http", 45 | ContainerPort: 80, 46 | Protocol: v1.ProtocolTCP, 47 | }, 48 | { 49 | Name: "metrics", 50 | ContainerPort: 9090, 51 | Protocol: v1.ProtocolTCP, 52 | }, 53 | }, 54 | }, 55 | { 56 | Name: "container-2", 57 | Ports: []v1.ContainerPort{ 58 | { 59 | Name: "dns", 60 | ContainerPort: 53, 61 | Protocol: v1.ProtocolUDP, 62 | }, 63 | }, 64 | }, 65 | }, 66 | }, 67 | } 68 | 69 | namespace := &v1.Namespace{ 70 | ObjectMeta: metav1.ObjectMeta{ 71 | Name: "test-ns", 72 | Labels: map[string]string{"kubernetes.io/metadata.name": "test-ns"}, 73 | }, 74 | } 75 | 76 | expectedPodInfo := &api.PodInfo{ 77 | Name: "test-pod", 78 | Labels: map[string]string{"app": "test"}, 79 | Namespace: &api.Namespace{ 80 | Name: "test-ns", 81 | Labels: map[string]string{"kubernetes.io/metadata.name": "test-ns"}, 82 | }, 83 | Node: &api.Node{ 84 | Name: "test-node", 85 | }, 86 | ClusterId: "test-cluster", 87 | ContainerPorts: []*api.ContainerPort{ 88 | {Name: "http", Port: 80, Protocol: "TCP"}, 89 | {Name: "metrics", Port: 9090, Protocol: "TCP"}, 90 | {Name: "dns", Port: 53, Protocol: "UDP"}, 91 | }, 92 | } 93 | 94 | podInfo := api.NewPodInfo(pod, namespace.Labels, nil, "test-cluster") 95 | podInfo.LastUpdated = 0 // clear it for the comparison 96 | 97 | if diff := cmp.Diff(expectedPodInfo, podInfo, protocmp.Transform()); diff != "" { 98 | t.Errorf("PodAndNamespaceToPodInfo() mismatch (-want +got):\n%s", diff) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /hack/boskos.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2024 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Source this file to be able to acquire a boskos project using 18 | # the acquire_project function. Must be run in prow, since acquiring 19 | # a boskos project expects the owner to be the prow job name (JOB_NAME). 20 | # Can set a custom boskos url by passing the BOSKOS_URL env var. 21 | 22 | set -o errexit 23 | set -o nounset 24 | set -o pipefail 25 | set -o xtrace 26 | 27 | if [[ -z "${BOSKOS_URL:-}" ]]; then 28 | BOSKOS_URL="http://boskos.test-pods.svc.cluster.local" 29 | fi 30 | 31 | # Acquires a "gce" project from boskos. Returns project by setting/exporting PROJECT env var. 32 | # Parameter: JOB_NAME is an env var set by prow. Parameter: BOSKOS_URL is an env var set above 33 | # by either passing in the env var, or using the default url. Starts/runs a heartbeat with 34 | # the returned bosko project. Returns an error if unable to acquire the boskos project. 35 | acquire_project() { 36 | local project="" 37 | local project_type="gce-project" 38 | 39 | boskos_response=$(curl -X POST "${BOSKOS_URL}/acquire?type=${project_type}&state=free&dest=busy&owner=${JOB_NAME}") 40 | echo 41 | echo "DEBUG--Boskos Response: ${boskos_response}" 42 | echo 43 | if project=$(echo "${boskos_response}" | jq -r '.name'); then 44 | echo "Using GCP project: ${project}" 45 | PROJECT="${project}" 46 | export PROJECT 47 | heartbeat_project_forever & 48 | BOSKOS_HEARTBEAT_PID=$! 49 | export BOSKOS_HEARTBEAT_PID 50 | else 51 | (>&2 echo "ERROR: failed to acquire GCP project. boskos response was: ${boskos_response}") 52 | exit 1 53 | fi 54 | } 55 | 56 | # release the project back to boskos 57 | release_project() { 58 | curl -X POST "${BOSKOS_URL}/release?name=${PROJECT}&owner=${JOB_NAME}&dest=dirty" 59 | } 60 | 61 | # send a heartbeat to boskos for the project 62 | heartbeat_project() { 63 | curl -X POST "${BOSKOS_URL}/update?name=${PROJECT}&state=busy&owner=${JOB_NAME}" > /dev/null 2>&1 64 | } 65 | 66 | # heartbeat_project in an infinite loop 67 | heartbeat_project_forever() { 68 | set +x; 69 | local heartbeat_seconds=10 70 | while : 71 | do 72 | # always heartbeat, ignore failures 73 | heartbeat_project || true 74 | sleep ${heartbeat_seconds} 75 | done 76 | } 77 | 78 | cleanup_boskos () { 79 | # stop heartbeating 80 | kill "${BOSKOS_HEARTBEAT_PID}" || true 81 | # mark the project as dirty 82 | release_project 83 | } 84 | -------------------------------------------------------------------------------- /pkg/ipcache/etcd_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "fmt" 21 | "net" 22 | "testing" 23 | 24 | "github.com/google/go-cmp/cmp" 25 | "google.golang.org/protobuf/testing/protocmp" 26 | "sigs.k8s.io/kube-network-policies/pkg/api" 27 | ) 28 | 29 | func newTestEtcdStore(t *testing.T) *EtcdStore { 30 | t.Helper() 31 | etcdDir := t.TempDir() 32 | 33 | // Find a free port for the etcd server 34 | l, err := net.Listen("tcp", "127.0.0.1:0") 35 | if err != nil { 36 | t.Fatalf("failed to find free port: %v", err) 37 | } 38 | listenURL := fmt.Sprintf("http://%s", l.Addr().String()) 39 | l.Close() 40 | 41 | store, err := NewEtcdStore(listenURL, etcdDir) 42 | if err != nil { 43 | t.Fatalf("failed to create test etcd store: %v", err) 44 | } 45 | t.Cleanup(func() { store.Close() }) 46 | return store 47 | } 48 | 49 | func TestEtcdStore_Store(t *testing.T) { 50 | store := newTestEtcdStore(t) 51 | 52 | podInfo1 := &api.PodInfo{Name: "pod1"} 53 | podInfo2 := &api.PodInfo{Name: "pod2"} 54 | 55 | tests := []struct { 56 | name string 57 | ip string 58 | podInfo *api.PodInfo 59 | }{ 60 | { 61 | name: "pod1", 62 | ip: "192.168.1.1", 63 | podInfo: podInfo1, 64 | }, 65 | { 66 | name: "pod2", 67 | ip: "192.168.1.2", 68 | podInfo: podInfo2, 69 | }, 70 | } 71 | 72 | // Test Upsert and Get 73 | for _, tt := range tests { 74 | t.Run(tt.name, func(t *testing.T) { 75 | err := store.Upsert(tt.ip, tt.podInfo) 76 | if err != nil { 77 | t.Fatalf("Upsert() error = %v", err) 78 | } 79 | 80 | got, found := store.GetPodInfoByIP(tt.ip) 81 | if !found { 82 | t.Fatalf("GetPodInfoByIP() not found for ip %s", tt.ip) 83 | } 84 | if diff := cmp.Diff(tt.podInfo, got, protocmp.Transform()); diff != "" { 85 | t.Errorf("GetPodInfoByIP() mismatch (-want +got):\n%s", diff) 86 | } 87 | }) 88 | } 89 | 90 | // Test List 91 | list, err := store.List() 92 | if err != nil { 93 | t.Fatalf("List() error = %v", err) 94 | } 95 | if len(list) != 2 { 96 | t.Errorf("len(List()) = %d; want 2", len(list)) 97 | } 98 | 99 | // Test Delete 100 | err = store.Delete("192.168.1.1") 101 | if err != nil { 102 | t.Fatalf("Delete() error = %v", err) 103 | } 104 | 105 | // Verify deletion 106 | _, found := store.GetPodInfoByIP("192.168.1.1") 107 | if found { 108 | t.Error("item found after deletion") 109 | } 110 | 111 | // Verify that the other item still exists 112 | _, found = store.GetPodInfoByIP("192.168.1.2") 113 | if !found { 114 | t.Error("item not found after deleting another item") 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /pkg/dns/domainmap.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: APACHE-2.0 2 | 3 | package dns 4 | 5 | import ( 6 | "net" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/armon/go-radix" 12 | "k8s.io/utils/clock" 13 | ) 14 | 15 | type ipEntries map[string]time.Time // ip : expire time 16 | 17 | type domainMap struct { 18 | mu sync.RWMutex 19 | clock clock.Clock 20 | tree *radix.Tree 21 | } 22 | 23 | // reverseDomain reverses a domain name string. 24 | func reverseDomain(domain string) string { 25 | parts := strings.Split(domain, ".") 26 | for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { 27 | parts[i], parts[j] = parts[j], parts[i] 28 | } 29 | return strings.Join(parts, ".") 30 | } 31 | 32 | func (i *domainMap) add(domain string, ips []net.IP, ttl int) { 33 | i.mu.Lock() 34 | defer i.mu.Unlock() 35 | // reverse the domain since the radix tree match on prefixes 36 | domain = reverseDomain(domain) 37 | // default ttl 38 | var finalTTL time.Duration 39 | if ttl == 0 { 40 | finalTTL = expireTimeout 41 | } else { 42 | finalTTL = time.Duration(ttl) * time.Second 43 | } 44 | // cap max ttl 45 | if finalTTL > maxTTL { 46 | finalTTL = maxTTL 47 | } 48 | expireTime := i.clock.Now().Add(finalTTL) 49 | var entries ipEntries 50 | v, ok := i.tree.Get(domain) 51 | if !ok { 52 | entries = make(ipEntries) 53 | } else { 54 | entries = v.(ipEntries) 55 | } 56 | for _, ip := range ips { 57 | entries[ip.String()] = expireTime 58 | } 59 | i.tree.Insert(domain, entries) 60 | } 61 | 62 | // contains returns true if the given domain contains the specified IP 63 | func (i *domainMap) containsIP(domain string, ip net.IP) bool { 64 | i.mu.RLock() 65 | defer i.mu.RUnlock() 66 | 67 | // reverse the domain since the radix tree match on prefixes 68 | domain = reverseDomain(domain) 69 | // wildcard 70 | var foundInWildcard bool 71 | if strings.HasSuffix(domain, "*") { 72 | i.tree.WalkPrefix(strings.TrimSuffix(domain, "*"), func(d string, v interface{}) bool { 73 | entries, ok := v.(ipEntries) 74 | if !ok { 75 | return false 76 | } 77 | if v, ok := entries[ip.String()]; ok && v.After(i.clock.Now()) { 78 | foundInWildcard = true 79 | return true 80 | } 81 | return false 82 | }) 83 | return foundInWildcard 84 | } else { 85 | // exact match 86 | v, ok := i.tree.Get(domain) 87 | if !ok { 88 | return false 89 | } 90 | 91 | entries, ok := v.(ipEntries) 92 | if !ok { 93 | return false 94 | } 95 | 96 | // check if the entry is still valid 97 | if v, ok := entries[ip.String()]; ok && v.After(i.clock.Now()) { 98 | return true 99 | } 100 | return false 101 | } 102 | } 103 | 104 | func (i *domainMap) gc() { 105 | i.mu.Lock() 106 | defer i.mu.Unlock() 107 | now := i.clock.Now() 108 | newTree := radix.New() 109 | i.tree.Walk(func(domain string, v interface{}) bool { 110 | entries, ok := v.(ipEntries) 111 | if !ok { 112 | return false 113 | } 114 | newEntries := make(ipEntries) 115 | for ip, expiredTime := range entries { 116 | if expiredTime.After(now) { 117 | newEntries[ip] = expiredTime 118 | } 119 | } 120 | if len(newEntries) > 0 { 121 | newTree.Insert(domain, newEntries) 122 | } 123 | return false 124 | }) 125 | 126 | i.tree = nil 127 | i.tree = newTree 128 | } 129 | -------------------------------------------------------------------------------- /pkg/ipcache/bbolt_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | "github.com/google/go-cmp/cmp" 24 | "google.golang.org/protobuf/testing/protocmp" 25 | "sigs.k8s.io/kube-network-policies/pkg/api" 26 | ) 27 | 28 | func newTestBoltStore(t *testing.T) *BoltStore { 29 | t.Helper() 30 | dbPath := filepath.Join(t.TempDir(), "test.db") 31 | store, err := NewBoltStore(dbPath) 32 | if err != nil { 33 | t.Fatalf("failed to create test bolt store: %v", err) 34 | } 35 | t.Cleanup(func() { store.Close() }) 36 | return store 37 | } 38 | 39 | func TestBoltStore_Store(t *testing.T) { 40 | store := newTestBoltStore(t) 41 | 42 | podInfo1 := &api.PodInfo{Name: "pod1"} 43 | podInfo2 := &api.PodInfo{Name: "pod2"} 44 | 45 | tests := []struct { 46 | name string 47 | ip string 48 | podInfo *api.PodInfo 49 | }{ 50 | { 51 | name: "pod1", 52 | ip: "192.168.1.1", 53 | podInfo: podInfo1, 54 | }, 55 | { 56 | name: "pod2", 57 | ip: "192.168.1.2", 58 | podInfo: podInfo2, 59 | }, 60 | } 61 | 62 | // Test Upsert and Get 63 | for _, tt := range tests { 64 | t.Run(tt.name, func(t *testing.T) { 65 | err := store.Upsert(tt.ip, tt.podInfo) 66 | if err != nil { 67 | t.Fatalf("Upsert() error = %v", err) 68 | } 69 | 70 | got, found := store.GetPodInfoByIP(tt.ip) 71 | if !found { 72 | t.Fatalf("GetPodInfoByIP() not found for ip %s", tt.ip) 73 | } 74 | if diff := cmp.Diff(tt.podInfo, got, protocmp.Transform()); diff != "" { 75 | t.Errorf("GetPodInfoByIP() mismatch (-want +got):\n%s", diff) 76 | } 77 | }) 78 | } 79 | 80 | // Test List 81 | list, err := store.List() 82 | if err != nil { 83 | t.Fatalf("List() error = %v", err) 84 | } 85 | if len(list) != 2 { 86 | t.Errorf("len(List()) = %d; want 2", len(list)) 87 | } 88 | 89 | // Test Delete 90 | err = store.Delete("192.168.1.1") 91 | if err != nil { 92 | t.Fatalf("Delete() error = %v", err) 93 | } 94 | 95 | // Verify deletion 96 | _, found := store.GetPodInfoByIP("192.168.1.1") 97 | if found { 98 | t.Error("item found after deletion") 99 | } 100 | 101 | // Verify that the other item still exists 102 | _, found = store.GetPodInfoByIP("192.168.1.2") 103 | if !found { 104 | t.Error("item not found after deleting another item") 105 | } 106 | } 107 | 108 | func TestBoltStore_SyncMetadata(t *testing.T) { 109 | store := newTestBoltStore(t) 110 | 111 | metadata := &SyncMetadata{ 112 | Revision: 12345, 113 | ClusterID: 67890, 114 | MemberID: 54321, 115 | } 116 | 117 | err := store.SetSyncMetadata(metadata) 118 | if err != nil { 119 | t.Fatalf("SetSyncMetadata() error = %v", err) 120 | } 121 | 122 | got, err := store.GetSyncMetadata() 123 | if err != nil { 124 | t.Fatalf("GetSyncMetadata() error = %v", err) 125 | } 126 | 127 | if diff := cmp.Diff(metadata, got); diff != "" { 128 | t.Errorf("GetSyncMetadata() mismatch (-want +got):\n%s", diff) 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /pkg/ipcache/lru_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "testing" 21 | 22 | "github.com/google/go-cmp/cmp" 23 | "google.golang.org/protobuf/testing/protocmp" 24 | "sigs.k8s.io/kube-network-policies/pkg/api" 25 | ) 26 | 27 | func TestLRUStore(t *testing.T) { 28 | store := NewLocalIPCache() 29 | lruStore := NewLRUStore(store, 2) 30 | 31 | podInfo1 := &api.PodInfo{Name: "pod1"} 32 | podInfo2 := &api.PodInfo{Name: "pod2"} 33 | podInfo3 := &api.PodInfo{Name: "pod3"} 34 | 35 | tests := []struct { 36 | name string 37 | ip string 38 | podInfo *api.PodInfo 39 | }{ 40 | { 41 | name: "pod1", 42 | ip: "192.168.1.1", 43 | podInfo: podInfo1, 44 | }, 45 | { 46 | name: "pod2", 47 | ip: "192.168.1.2", 48 | podInfo: podInfo2, 49 | }, 50 | { 51 | name: "pod3", 52 | ip: "192.168.1.3", 53 | podInfo: podInfo3, 54 | }, 55 | } 56 | 57 | // Test Upsert and Get 58 | for _, tt := range tests { 59 | t.Run(tt.name, func(t *testing.T) { 60 | err := lruStore.Upsert(tt.ip, tt.podInfo) 61 | if err != nil { 62 | t.Fatalf("Upsert() error = %v", err) 63 | } 64 | 65 | // Verify that the item is in the LRU cache 66 | got, found := lruStore.lru.Get(tt.ip) 67 | if !found { 68 | t.Fatalf("item not found in LRU cache for ip %s", tt.ip) 69 | } 70 | if diff := cmp.Diff(tt.podInfo, got.(*api.PodInfo), protocmp.Transform()); diff != "" { 71 | t.Errorf("LRU cache mismatch (-want +got):\n%s", diff) 72 | } 73 | 74 | // Verify that the item is in the underlying store 75 | got, found = store.GetPodInfoByIP(tt.ip) 76 | if !found { 77 | t.Fatalf("item not found in store for ip %s", tt.ip) 78 | } 79 | if diff := cmp.Diff(tt.podInfo, got, protocmp.Transform()); diff != "" { 80 | t.Errorf("store mismatch (-want +got):\n%s", diff) 81 | } 82 | }) 83 | } 84 | 85 | // Test LRU eviction 86 | _, found := lruStore.lru.Get("192.168.1.1") 87 | if found { 88 | t.Error("expected item to be evicted from LRU cache") 89 | } 90 | 91 | // Test Delete 92 | err := lruStore.Delete("192.168.1.2") 93 | if err != nil { 94 | t.Fatalf("Delete() error = %v", err) 95 | } 96 | 97 | // Verify that the item is not in the LRU cache 98 | _, found = lruStore.lru.Get("192.168.1.2") 99 | if found { 100 | t.Error("item found in LRU cache after deletion") 101 | } 102 | 103 | // Verify that the item is not in the underlying store 104 | _, found = store.GetPodInfoByIP("192.168.1.2") 105 | if found { 106 | t.Error("item found in store after deletion") 107 | } 108 | 109 | // Test List 110 | list, err := lruStore.List() 111 | if err != nil { 112 | t.Fatalf("List() error = %v", err) 113 | } 114 | if len(list) != 2 { 115 | t.Errorf("len(List()) = %d; want 2", len(list)) 116 | } 117 | 118 | // Test List with no store 119 | lruStoreNoStore := NewLRUStore(nil, 2) 120 | _, err = lruStoreNoStore.List() 121 | if err == nil { 122 | t.Errorf("List() with no store should return an error") 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /pkg/cmd/cmd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "net/http" 8 | "os" 9 | "runtime/debug" 10 | 11 | "github.com/prometheus/client_golang/prometheus/promhttp" 12 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 13 | "k8s.io/klog/v2" 14 | 15 | "sigs.k8s.io/kube-network-policies/pkg/dataplane" 16 | "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" 17 | ) 18 | 19 | // Options contains the common command-line options. 20 | type Options struct { 21 | Kubeconfig string 22 | FailOpen bool 23 | QueueID int 24 | MetricsBindAddress string 25 | HostnameOverride string 26 | NetfilterBug1766Fix bool 27 | DisableNRI bool 28 | StrictMode bool 29 | } 30 | 31 | // NewOptions creates a new Options object with default values. 32 | func NewOptions() *Options { 33 | return &Options{} 34 | } 35 | 36 | // AddFlags adds the common flags to the provided flag set. 37 | func (o *Options) AddFlags(fs *flag.FlagSet) { 38 | fs.StringVar(&o.Kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file") 39 | fs.BoolVar(&o.FailOpen, "fail-open", true, "If set, don't drop packets if the controller is not running") 40 | fs.IntVar(&o.QueueID, "nfqueue-id", 100, "Number of the nfqueue used") 41 | fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":9080", "The IP address and port for the metrics server to serve on") 42 | fs.StringVar(&o.HostnameOverride, "hostname-override", "", "If non-empty, will be used as the name of the Node that kube-network-policies is running on. If unset, the node name is assumed to be the same as the node's hostname.") 43 | fs.BoolVar(&o.NetfilterBug1766Fix, "netfilter-bug-1766-fix", true, "If set, process DNS packets on the PREROUTING hooks to avoid the race condition on the conntrack subsystem, not needed for kernels 6.12+ (see https://bugzilla.netfilter.org/show_bug.cgi?id=1766)") 44 | fs.BoolVar(&o.DisableNRI, "disable-nri", false, "If set, disable NRI, that is used to get the Pod IP information directly from the runtime to avoid the race explained in https://issues.k8s.io/85966") 45 | fs.BoolVar(&o.StrictMode, "strict-mode", true, "If set, changes to network policies also affect established connections") 46 | 47 | fs.Usage = func() { 48 | fmt.Fprint(os.Stderr, "Usage: kube-network-policies [options]\n\n") 49 | fs.PrintDefaults() 50 | } 51 | } 52 | 53 | // Start starts the common application components. 54 | func Start(ctx context.Context, policyEngine *networkpolicy.PolicyEngine, dpConfig dataplane.Config, metricsBindAddress string) { 55 | logger := klog.FromContext(ctx) 56 | 57 | printVersion() 58 | 59 | // Start metrics server 60 | http.Handle("/metrics", promhttp.Handler()) 61 | go func() { 62 | err := http.ListenAndServe(metricsBindAddress, nil) 63 | if err != nil { 64 | utilruntime.HandleError(fmt.Errorf("metrics server failed: %w", err)) 65 | } 66 | }() 67 | 68 | // Start dataplane controller 69 | networkPolicyController, err := dataplane.NewController( 70 | policyEngine, 71 | dpConfig, 72 | ) 73 | if err != nil { 74 | logger.Error(err, "failed to create dataplane controller") 75 | // It's better to crash loud 76 | panic(err) 77 | } 78 | go func() { 79 | if err := networkPolicyController.Run(ctx); err != nil { 80 | utilruntime.HandleError(fmt.Errorf("dataplane controller failed: %w", err)) 81 | } 82 | }() 83 | } 84 | 85 | func printVersion() { 86 | info, ok := debug.ReadBuildInfo() 87 | if !ok { 88 | return 89 | } 90 | var vcsRevision, vcsTime string 91 | for _, f := range info.Settings { 92 | switch f.Key { 93 | case "vcs.revision": 94 | vcsRevision = f.Value 95 | case "vcs.time": 96 | vcsTime = f.Value 97 | } 98 | } 99 | klog.Infof("kube-network-policies go %s build: %s time: %s", info.GoVersion, vcsRevision, vcsTime) 100 | } 101 | -------------------------------------------------------------------------------- /pkg/ipcache/lru.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | package ipcache 17 | 18 | import ( 19 | "errors" 20 | "sync" 21 | 22 | "k8s.io/klog/v2" 23 | "k8s.io/utils/lru" 24 | "sigs.k8s.io/kube-network-policies/pkg/api" 25 | ) 26 | 27 | // LRUStore is a decorator for a Store that adds an in-memory LRU cache. 28 | // It ensures that the LRU cache is kept consistent with the underlying store 29 | // for operations passing through it. 30 | type LRUStore struct { 31 | mu sync.Mutex 32 | lru *lru.Cache 33 | store Store 34 | } 35 | 36 | var _ Store = &LRUStore{} 37 | var _ api.PodInfoProvider = &LRUStore{} 38 | 39 | // NewLRUStore creates a new LRUStore. 40 | // Size 0 means no limit. 41 | func NewLRUStore(store Store, size int) *LRUStore { 42 | return &LRUStore{ 43 | lru: lru.New(size), 44 | store: store, 45 | } 46 | } 47 | 48 | // Get first checks the LRU cache. If the item is not found, it falls back 49 | // to the underlying store and adds the item to the LRU cache for future lookups. 50 | func (s *LRUStore) GetPodInfoByIP(ip string) (*api.PodInfo, bool) { 51 | s.mu.Lock() 52 | defer s.mu.Unlock() 53 | klog.V(7).Infof("Get LRU(%s)", ip) 54 | if val, ok := s.lru.Get(ip); ok { 55 | return val.(*api.PodInfo), true 56 | } 57 | if s.store != nil { 58 | klog.V(7).Infof("Get Store(%s)", ip) 59 | info, found := s.store.GetPodInfoByIP(ip) 60 | if found { 61 | s.lru.Add(ip, info) 62 | } 63 | return info, found 64 | } 65 | 66 | return nil, false 67 | } 68 | 69 | // Upsert adds/updates the item in the LRU cache and then passes the operation 70 | // to the underlying store. 71 | func (s *LRUStore) Upsert(ip string, info *api.PodInfo) error { 72 | s.mu.Lock() 73 | defer s.mu.Unlock() 74 | klog.V(7).Infof("Upsert LRU(%s)", ip) 75 | s.lru.Add(ip, info) 76 | if s.store != nil { 77 | klog.V(7).Infof("Upsert Store(%s)", ip) 78 | return s.store.Upsert(ip, info) 79 | } 80 | return nil 81 | } 82 | 83 | // Delete removes the item from the LRU cache and then passes the operation 84 | // to the underlying store. 85 | func (s *LRUStore) Delete(ip string) error { 86 | s.mu.Lock() 87 | defer s.mu.Unlock() 88 | klog.V(7).Infof("Delete LRU(%s)", ip) 89 | s.lru.Remove(ip) 90 | if s.store != nil { 91 | klog.V(7).Infof("Delete Store(%s)", ip) 92 | return s.store.Delete(ip) 93 | } 94 | return nil 95 | 96 | } 97 | 98 | // List returns all items from the underlying store. 99 | // Note: This operation does not interact with the LRU cache and will 100 | // return an error if the store is not configured. 101 | func (s *LRUStore) List() ([]*api.PodInfo, error) { 102 | s.mu.Lock() 103 | defer s.mu.Unlock() 104 | if s.store != nil { 105 | return s.store.List() 106 | } 107 | // The LRU cache itself does not support listing all items. 108 | return nil, errors.New("List operation is not supported for in-memory-only cache") 109 | } 110 | 111 | // Clear removes all items from the underlying store and the LRU cache. 112 | func (s *LRUStore) Clear() error { 113 | s.mu.Lock() 114 | defer s.mu.Unlock() 115 | s.lru.Clear() // Clear the LRU cache 116 | if s.store != nil { 117 | return s.store.Clear() 118 | } 119 | return nil 120 | } 121 | 122 | // Close closes the underlying store, if it exists. 123 | func (s *LRUStore) Close() error { 124 | s.mu.Lock() 125 | defer s.mu.Unlock() 126 | if s.store != nil { 127 | return s.store.Close() 128 | } 129 | return nil 130 | } 131 | -------------------------------------------------------------------------------- /pkg/dns/domainmap_test.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: APACHE-2.0 2 | 3 | package dns 4 | 5 | import ( 6 | "net" 7 | "testing" 8 | "time" 9 | 10 | "github.com/armon/go-radix" 11 | testingclock "k8s.io/utils/clock/testing" 12 | ) 13 | 14 | func TestReverseDomain(t *testing.T) { 15 | tests := []struct { 16 | input string 17 | want string 18 | }{ 19 | {"", ""}, 20 | {"com", "com"}, 21 | {"example.com", "com.example"}, 22 | {"www.example.com", "com.example.www"}, 23 | {"a.b.c.d", "d.c.b.a"}, 24 | {"*.b.c.d", "d.c.b.*"}, 25 | } 26 | for _, tt := range tests { 27 | t.Run(tt.input, func(t *testing.T) { 28 | if got := reverseDomain(tt.input); got != tt.want { 29 | t.Errorf("reverseDomain(%q) = %q, want %q", tt.input, got, tt.want) 30 | } 31 | }) 32 | } 33 | } 34 | 35 | func TestIPCache(t *testing.T) { 36 | clock := testingclock.NewFakeClock(time.Now()) 37 | c := &domainMap{ 38 | clock: clock, 39 | tree: radix.New(), 40 | } 41 | 42 | hostv4 := "host.com" 43 | hostv6 := "hostv6.com" 44 | ip4 := net.ParseIP("1.2.3.4") 45 | ip4_2 := net.ParseIP("5.6.7.8") 46 | ip6 := net.ParseIP("2001:db8::1") 47 | 48 | // Test adding and retrieving IPv4 and IPv6 entries 49 | c.add(hostv4, []net.IP{ip4, ip4_2}, int(maxTTL.Seconds())) 50 | c.add(hostv6, []net.IP{ip6}, int(maxTTL.Seconds())) 51 | 52 | if ok := c.containsIP(hostv4, ip4); !ok { 53 | t.Errorf("Failed to retrieve IPv4 entry") 54 | } 55 | if ok := c.containsIP(hostv4, ip4_2); !ok { 56 | t.Errorf("Failed to retrieve IPv4 entry") 57 | } 58 | if ok := c.containsIP(hostv6, ip6); !ok { 59 | t.Errorf("Failed to retrieve IPv6 entry") 60 | } 61 | 62 | // Test retrieving non-existent entry 63 | if ok := c.containsIP("nonexistent.com", ip4); ok { 64 | t.Errorf("Retrieved non-existent entry") 65 | } 66 | 67 | // Test expire entries 68 | clock.SetTime(clock.Now().Add(time.Hour)) 69 | 70 | if ok := c.containsIP(hostv4, ip4); ok { 71 | t.Errorf("Unexpected entry") 72 | } 73 | if ok := c.containsIP(hostv6, ip6); ok { 74 | t.Errorf("Unexpected entry") 75 | } 76 | 77 | } 78 | 79 | func TestIPCacheGC(t *testing.T) { 80 | clock := testingclock.NewFakeClock(time.Now()) 81 | c := &domainMap{ 82 | clock: clock, 83 | tree: radix.New(), 84 | } 85 | 86 | hostv4 := "host.com" 87 | hostv6 := "hostv6.com" 88 | ip4 := net.ParseIP("1.2.3.4") 89 | ip6 := net.ParseIP("2001:db8::1") 90 | 91 | // Test adding and retrieving IPv4 and IPv6 entries 92 | c.add(hostv4, []net.IP{ip4}, int(expireTimeout.Seconds())) 93 | c.add(hostv6, []net.IP{ip6}, int(maxTTL.Seconds())) 94 | 95 | if ok := c.containsIP(hostv4, ip4); !ok { 96 | t.Errorf("Failed to retrieve IPv4 entry") 97 | } 98 | if ok := c.containsIP(hostv6, ip6); !ok { 99 | t.Errorf("Failed to retrieve IPv6 entry") 100 | } 101 | // Test expire entries 102 | clock.SetTime(clock.Now().Add(maxTTL - 1*time.Second)) 103 | c.gc() 104 | 105 | if ok := c.containsIP(hostv4, ip4); ok { 106 | t.Errorf("Unexpected entry") 107 | } 108 | if ok := c.containsIP(hostv6, ip6); !ok { 109 | t.Errorf("expected entry") 110 | } 111 | } 112 | 113 | func TestDomainMap_Wildcard(t *testing.T) { 114 | now := time.Now() 115 | clock := testingclock.NewFakeClock(now) 116 | c := &domainMap{ 117 | clock: clock, 118 | tree: radix.New(), 119 | } 120 | 121 | wildcardHost := "*.example.com" 122 | specificHost := "www.example.com" 123 | otherHost := "test.org" 124 | ipExample := net.ParseIP("5.6.7.8") 125 | ipOther := net.ParseIP("4.3.2.1") 126 | 127 | c.add(specificHost, []net.IP{ipExample}, int(expireTimeout.Seconds())) 128 | c.add(otherHost, []net.IP{ipOther}, int(expireTimeout.Seconds())) 129 | 130 | t.Run("Wildcard match", func(t *testing.T) { 131 | if ok := c.containsIP(wildcardHost, ipExample); !ok { 132 | t.Errorf("containsIP(%q, %q) with wildcard = false, want true", wildcardHost, ipExample) 133 | } 134 | }) 135 | 136 | t.Run("Wildcard no match IP", func(t *testing.T) { 137 | if ok := c.containsIP(wildcardHost, ipOther); ok { 138 | t.Errorf("containsIP(%q, %q) with wildcard = true, want false", wildcardHost, ipOther) 139 | } 140 | }) 141 | 142 | t.Run("Wildcard no match domain", func(t *testing.T) { 143 | if ok := c.containsIP("another.domain.org", ipExample); ok { 144 | t.Errorf("containsIP(%q, %q) with wildcard = true, want false", "another.domain.org", ipExample) 145 | } 146 | }) 147 | 148 | t.Run("Wildcard expiration", func(t *testing.T) { 149 | clock.SetTime(now.Add(expireTimeout).Add(time.Second)) 150 | if ok := c.containsIP(wildcardHost, ipExample); ok { 151 | t.Errorf("containsIP(%q, %q) after wildcard expiration = true, want false", specificHost, ipExample) 152 | } 153 | }) 154 | } 155 | -------------------------------------------------------------------------------- /tests/e2e_npa_v1alpha2.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup_file() { 4 | export REGISTRY="registry.k8s.io/networking" 5 | export IMAGE_NAME="kube-network-policies" 6 | export TAG="test" 7 | 8 | # Build the image for the specific binary and architecture 9 | ( 10 | cd "$BATS_TEST_DIRNAME"/.. 11 | TAG="$TAG" make image-build-npa-v1alpha2 12 | ) 13 | 14 | # Apply CRDs required ClusterNetworkPolicy, use experimental for FQDN support 15 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/network-policy-api/main/config/crd/experimental/policy.networking.k8s.io_clusternetworkpolicies.yaml 16 | 17 | # Load the Docker image into the kind cluster 18 | kind load docker-image "$REGISTRY/$IMAGE_NAME:$TAG"-npa-v1alpha2 --name "$CLUSTER_NAME" 19 | 20 | # Install kube-network-policies 21 | _install=$(sed "s#$REGISTRY/$IMAGE_NAME.*#$REGISTRY/$IMAGE_NAME:$TAG-npa-v1alpha2#" < "$BATS_TEST_DIRNAME"/../install-cnp.yaml) 22 | printf '%s' "${_install}" | kubectl apply -f - 23 | kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-network-policies 24 | } 25 | 26 | teardown_file() { 27 | _install=$(sed "s#$REGISTRY/$IMAGE_NAME.*#$REGISTRY/$IMAGE_NAME:$TAG-npa-v1alpha2#" < "$BATS_TEST_DIRNAME"/../install-cnp.yaml) 28 | printf '%s' "${_install}" | kubectl delete -f - 29 | 30 | kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/network-policy-api/main/config/crd/experimental/policy.networking.k8s.io_clusternetworkpolicies.yaml 31 | } 32 | 33 | setup() { 34 | kubectl create namespace dev 35 | kubectl label namespace/dev purpose=testing 36 | 37 | kubectl create namespace prod 38 | kubectl label namespace/prod purpose=production 39 | } 40 | 41 | teardown() { 42 | kubectl delete namespace prod 43 | kubectl delete namespace dev 44 | } 45 | 46 | @test "Maintaining an allowlist of domains" { 47 | # https://network-policy-api.sigs.k8s.io/npeps/npep-133-fqdn-egress-selector/#maintaining-an-allowlist-of-domains 48 | 49 | kubectl apply -f - < 27 | Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ 28 | Licensed to The Apache Software Foundation, http://www.apache.org/ 29 | 30 | Benchmarking test-service (be patient) 31 | Completed 1000 requests 32 | Completed 2000 requests 33 | Completed 3000 requests 34 | Completed 4000 requests 35 | Completed 5000 requests 36 | Completed 6000 requests 37 | Completed 7000 requests 38 | Completed 8000 requests 39 | Completed 9000 requests 40 | Completed 10000 requests 41 | Finished 10000 requests 42 | 43 | 44 | Server Software: 45 | Server Hostname: test-service 46 | Server Port: 80 47 | 48 | Document Path: / 49 | Document Length: 60 bytes 50 | 51 | Concurrency Level: 1000 52 | Time taken for tests: 4.317 seconds 53 | Complete requests: 10000 54 | Failed requests: 1274 55 | (Connect: 0, Receive: 0, Length: 1274, Exceptions: 0) 56 | Total transferred: 1768597 bytes 57 | HTML transferred: 598597 bytes 58 | Requests per second: 2316.61 [#/sec] (mean) 59 | Time per request: 431.666 [ms] (mean) 60 | Time per request: 0.432 [ms] (mean, across all concurrent requests) 61 | Transfer rate: 400.11 [Kbytes/sec] received 62 | 63 | Connection Times (ms) 64 | min mean[+/-sd] median max 65 | Connect: 0 188 571.9 4 4121 66 | Processing: 0 2 5.3 0 42 67 | Waiting: 0 1 2.8 0 32 68 | Total: 0 190 571.8 5 4122 69 | 70 | Percentage of the requests served within a certain time (ms) 71 | 50% 5 72 | 66% 7 73 | 75% 22 74 | 80% 24 75 | 90% 1023 76 | 95% 1046 77 | 98% 2063 78 | 99% 3080 79 | 100% 4122 (longest request) 80 | ``` 81 | 82 | You have to tune your system as it is most likely you reach limits in some of the different resources, specially in the conntrack table 83 | 84 | ``` 85 | [1825525.815672] net_ratelimit: 411 callbacks suppressed 86 | [1825525.815676] nf_conntrack: nf_conntrack: table full, dropping packet 87 | [1825525.827617] nf_conntrack: nf_conntrack: table full, dropping packet 88 | [1825525.834317] nf_conntrack: nf_conntrack: table full, dropping packet 89 | [1825525.841058] nf_conntrack: nf_conntrack: table full, dropping packet 90 | [1825525.847764] nf_conntrack: nf_conntrack: table full, dropping packet 91 | [1825525.854458] nf_conntrack: nf_conntrack: table full, dropping packet 92 | [1825525.861131] nf_conntrack: nf_conntrack: table full, dropping packet 93 | [1825525.867814] nf_conntrack: nf_conntrack: table full, dropping packet 94 | [1825525.874505] nf_conntrack: nf_conntrack: table full, dropping packet 95 | [1825525.881186] nf_conntrack: nf_conntrack: table full, dropping packet 96 | ``` 97 | 98 | Check the current max number of conntrack entries allowed and tune accordenly 99 | 100 | ``` 101 | cat /proc/sys/net/netfilter/nf_conntrack_max 102 | 262144 103 | ``` 104 | 105 | 106 | 4. Observe the metrics in prometheus or grafana 107 | 108 | 109 | ![Packet Processing Latency](network_policies_latency.png "Packet Processing Latency") 110 | ![Packet Rate](network_policies_packet_rate.png "Packet Rate") 111 | 112 | 113 | ## Future work 114 | 115 | We are interested in understanding the following variables 116 | 117 | * Memory and CPU consumption 118 | * Latency on packet processing 119 | * Latency to apply a network policy since it has been created 120 | 121 | This can microbencharked easily, using one Node or a Kind cluster and adding fake nodes and pods https://developer.ibm.com/tutorials/awb-using-kwok-to-simulate-a-large-kubernetes-openshift-cluster/ and running scenarios in just one node with the different variables 122 | 123 | 124 | Inputs: 125 | 126 | * New connections per seconds 127 | * Number of Pods on the cluster (affected or not affected by network policies) 128 | * Number of Network Policies impacting the connections 129 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REPO_ROOT:=${CURDIR} 2 | OUT_DIR=$(REPO_ROOT)/bin 3 | 4 | # Go build settings 5 | GO111MODULE=on 6 | CGO_ENABLED=0 7 | export GO111MODULE CGO_ENABLED 8 | 9 | # Docker image settings 10 | IMAGE_NAME?=kube-network-policies 11 | REGISTRY?=gcr.io/k8s-staging-networking 12 | TAG?=$(shell echo "$$(date +v%Y%m%d)-$$(git describe --always --dirty)") 13 | PLATFORMS?=linux/amd64,linux/arm64,linux/s390x 14 | 15 | .PHONY: all build build-standard build-npa-v1alpha1 build-npa-v1alpha2 build-iptracker build-kube-ip-tracker-standard 16 | 17 | build: build-standard build-npa-v1alpha1 build-npa-v1alpha2 build-iptracker build-kube-ip-tracker-standard 18 | 19 | build-standard: 20 | @echo "Building standard binary..." 21 | GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o ./bin/kube-network-policies-standard ./cmd/kube-network-policies/standard 22 | 23 | build-npa-v1alpha1: 24 | @echo "Building npa-v1alpha1 binary..." 25 | GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o ./bin/kube-network-policies-npa-v1alpha1 ./cmd/kube-network-policies/npa-v1alpha1 26 | 27 | build-npa-v1alpha2: 28 | @echo "Building npa-v1alpha2 binary..." 29 | GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o ./bin/kube-network-policies-npa-v1alpha2 ./cmd/kube-network-policies/npa-v1alpha2 30 | 31 | build-iptracker: 32 | @echo "Building iptracker binary..." 33 | GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o ./bin/kube-network-policies-iptracker ./cmd/kube-network-policies/iptracker 34 | 35 | build-kube-ip-tracker-standard: 36 | @echo "Building kube-ip-tracker binary..." 37 | GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o ./bin/kube-ip-tracker-standard ./cmd/kube-ip-tracker/standard 38 | 39 | clean: 40 | rm -rf "$(OUT_DIR)/" 41 | 42 | test: 43 | CGO_ENABLED=1 go test -short -v -race -count 1 ./... 44 | 45 | lint: 46 | hack/lint.sh 47 | 48 | update: 49 | go mod tidy 50 | 51 | proto: 52 | hack/generate-proto.sh 53 | 54 | .PHONY: ensure-buildx 55 | ensure-buildx: 56 | ./hack/init-buildx.sh 57 | 58 | # Individual image build targets (load into local docker) 59 | image-build-standard: build-standard 60 | docker buildx build . \ 61 | --build-arg TARGET_BUILD=standard \ 62 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)" \ 63 | --load 64 | 65 | image-build-npa-v1alpha1: build-npa-v1alpha1 66 | docker buildx build . \ 67 | --build-arg TARGET_BUILD=npa-v1alpha1 \ 68 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)-npa-v1alpha1" \ 69 | --load 70 | 71 | image-build-npa-v1alpha2: build-npa-v1alpha2 72 | docker buildx build . \ 73 | --build-arg TARGET_BUILD=npa-v1alpha2 \ 74 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)-npa-v1alpha2" \ 75 | --load 76 | 77 | image-build-iptracker: build-iptracker 78 | docker buildx build . \ 79 | --build-arg TARGET_BUILD=iptracker \ 80 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)-iptracker" \ 81 | --load 82 | 83 | image-build-kube-ip-tracker-standard: build-kube-ip-tracker-standard 84 | docker buildx build . -f Dockerfile.iptracker \ 85 | --build-arg TARGET_BUILD=standard \ 86 | --tag="${REGISTRY}/kube-ip-tracker:$(TAG)" \ 87 | --load 88 | 89 | # Individual image push targets (multi-platform) 90 | image-push-standard: build-standard 91 | docker buildx build . \ 92 | --build-arg TARGET_BUILD=standard \ 93 | --platform="${PLATFORMS}" \ 94 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)" \ 95 | --push 96 | 97 | image-push-npa-v1alpha1: build-npa-v1alpha1 98 | docker buildx build . \ 99 | --build-arg TARGET_BUILD=npa-v1alpha1 \ 100 | --platform="${PLATFORMS}" \ 101 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)-npa-v1alpha1" \ 102 | --push 103 | 104 | image-push-npa-v1alpha2: build-npa-v1alpha2 105 | docker buildx build . \ 106 | --build-arg TARGET_BUILD=npa-v1alpha2 \ 107 | --platform="${PLATFORMS}" \ 108 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)-npa-v1alpha2" \ 109 | --push 110 | 111 | image-push-iptracker: build-iptracker 112 | docker buildx build . \ 113 | --build-arg TARGET_BUILD=iptracker \ 114 | --platform="${PLATFORMS}" \ 115 | --tag="${REGISTRY}/$(IMAGE_NAME):$(TAG)-iptracker" \ 116 | --push 117 | 118 | image-push-kube-ip-tracker-standard: build-kube-ip-tracker-standard 119 | docker buildx build . -f Dockerfile.iptracker \ 120 | --build-arg TARGET_BUILD=standard \ 121 | --platform="${PLATFORMS}" \ 122 | --tag="${REGISTRY}/kube-ip-tracker:$(TAG)" \ 123 | --push 124 | 125 | # --- Aggregate Targets --- 126 | .PHONY: images-build images-push release 127 | 128 | # Build all image variants and load them into the local Docker daemon 129 | images-build: ensure-buildx image-build-standard image-build-npa-v1alpha1 image-build-npa-v1alpha2 image-build-iptracker image-build-kube-ip-tracker-standard 130 | 131 | # Build and push all multi-platform image variants to the registry 132 | images-push: ensure-buildx image-push-standard image-push-npa-v1alpha1 image-push-npa-v1alpha2 image-push-iptracker image-push-kube-ip-tracker-standard 133 | 134 | # The main release target, which pushes all images 135 | release: images-push -------------------------------------------------------------------------------- /install-iptracker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: kube-ip-tracker 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - nodes 11 | - pods 12 | - namespaces 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | --- 18 | kind: ClusterRoleBinding 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | metadata: 21 | name: kube-ip-tracker 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: ClusterRole 25 | name: kube-ip-tracker 26 | subjects: 27 | - kind: ServiceAccount 28 | name: kube-ip-tracker 29 | namespace: kube-system 30 | --- 31 | apiVersion: v1 32 | kind: ServiceAccount 33 | metadata: 34 | name: kube-ip-tracker 35 | namespace: kube-system 36 | --- 37 | apiVersion: v1 38 | kind: Service 39 | metadata: 40 | name: kube-ip-tracker 41 | namespace: kube-system 42 | spec: 43 | clusterIP: None 44 | selector: 45 | app: kube-ip-tracker 46 | ports: 47 | - name: grpc 48 | port: 10999 49 | targetPort: 10999 50 | --- 51 | apiVersion: apps/v1 52 | kind: Deployment 53 | metadata: 54 | name: kube-ip-tracker 55 | namespace: kube-system 56 | labels: 57 | app: kube-ip-tracker 58 | k8s-app: kube-ip-tracker 59 | spec: 60 | replicas: 2 61 | selector: 62 | matchLabels: 63 | app: kube-ip-tracker 64 | template: 65 | metadata: 66 | labels: 67 | app: kube-ip-tracker 68 | k8s-app: kube-ip-tracker 69 | spec: 70 | hostNetwork: true # do not depend on the network of the cluster 71 | serviceAccountName: kube-ip-tracker 72 | containers: 73 | - name: kube-ip-tracker 74 | image: registry.k8s.io/networking/kube-ip-tracker:v0.9.2 75 | args: 76 | - /bin/kube-ip-tracker 77 | - --listen-address=http://0.0.0.0:10999 78 | - --v=2 79 | ports: 80 | - containerPort: 10999 81 | name: grpc 82 | resources: 83 | requests: 84 | cpu: "100m" 85 | memory: "50Mi" 86 | --- 87 | kind: ClusterRole 88 | apiVersion: rbac.authorization.k8s.io/v1 89 | metadata: 90 | name: kube-network-policies 91 | rules: 92 | - apiGroups: 93 | - "" 94 | resources: 95 | - namespaces 96 | verbs: 97 | - get 98 | - list 99 | - watch 100 | - apiGroups: 101 | - "networking.k8s.io" 102 | resources: 103 | - networkpolicies 104 | verbs: 105 | - list 106 | - watch 107 | --- 108 | kind: ClusterRoleBinding 109 | apiVersion: rbac.authorization.k8s.io/v1 110 | metadata: 111 | name: kube-network-policies 112 | roleRef: 113 | apiGroup: rbac.authorization.k8s.io 114 | kind: ClusterRole 115 | name: kube-network-policies 116 | subjects: 117 | - kind: ServiceAccount 118 | name: kube-network-policies 119 | namespace: kube-system 120 | --- 121 | apiVersion: v1 122 | kind: ServiceAccount 123 | metadata: 124 | name: kube-network-policies 125 | namespace: kube-system 126 | --- 127 | apiVersion: apps/v1 128 | kind: DaemonSet 129 | metadata: 130 | name: kube-network-policies 131 | namespace: kube-system 132 | labels: 133 | tier: node 134 | app: kube-network-policies 135 | k8s-app: kube-network-policies 136 | spec: 137 | selector: 138 | matchLabels: 139 | app: kube-network-policies 140 | template: 141 | metadata: 142 | labels: 143 | tier: node 144 | app: kube-network-policies 145 | k8s-app: kube-network-policies 146 | spec: 147 | hostNetwork: true 148 | dnsPolicy: ClusterFirstWithHostNet 149 | nodeSelector: 150 | kubernetes.io/os: linux 151 | tolerations: 152 | - operator: Exists 153 | effect: NoSchedule 154 | serviceAccountName: kube-network-policies 155 | containers: 156 | - name: kube-network-policies 157 | image: registry.k8s.io/networking/kube-network-policies:v0.9.2-iptracker 158 | args: 159 | - /bin/netpol 160 | - --hostname-override=$(MY_NODE_NAME) 161 | - --v=2 162 | - --nfqueue-id=198 163 | - --ip-tracker-address=kube-ip-tracker.kube-system.svc.cluster.local:10999 164 | volumeMounts: 165 | - name: nri-plugin 166 | mountPath: /var/run/nri 167 | - name: netns 168 | mountPath: /var/run/netns 169 | mountPropagation: HostToContainer 170 | resources: 171 | requests: 172 | cpu: "100m" 173 | memory: "50Mi" 174 | securityContext: 175 | privileged: true 176 | capabilities: 177 | add: ["NET_ADMIN"] 178 | env: 179 | - name: MY_NODE_NAME 180 | valueFrom: 181 | fieldRef: 182 | fieldPath: spec.nodeName 183 | volumes: 184 | - name: nri-plugin 185 | hostPath: 186 | path: /var/run/nri 187 | - name: netns 188 | hostPath: 189 | path: /var/run/netns 190 | --- 191 | -------------------------------------------------------------------------------- /pkg/networkpolicy/engine.go: -------------------------------------------------------------------------------- 1 | // SPDX-License-Identifier: APACHE-2.0 2 | 3 | package networkpolicy 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "net/netip" 9 | 10 | "k8s.io/apimachinery/pkg/util/sets" 11 | "k8s.io/klog/v2" 12 | "sigs.k8s.io/kube-network-policies/pkg/api" 13 | "sigs.k8s.io/kube-network-policies/pkg/network" 14 | ) 15 | 16 | // EvaluatorFunc is the function signature for any logic that evaluates a packet. 17 | type EvaluatorFunc func(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) 18 | 19 | // Evaluator is a function that can determine a verdict for a packet. 20 | type Evaluator struct { 21 | Name string 22 | Evaluate EvaluatorFunc 23 | } 24 | 25 | // PolicyEngine orchestrates network policy evaluation by running a fixed 26 | // sequence of policy-specific evaluators. 27 | type PolicyEngine struct { 28 | podInfoProvider api.PodInfoProvider 29 | evaluators []api.PolicyEvaluator 30 | } 31 | 32 | // NewPolicyEngine creates a new engine with a predefined evaluation order. 33 | func NewPolicyEngine(podInfoProvider api.PodInfoProvider, evaluators []api.PolicyEvaluator) *PolicyEngine { 34 | return &PolicyEngine{ 35 | podInfoProvider: podInfoProvider, 36 | evaluators: evaluators, 37 | } 38 | } 39 | 40 | // EvaluatePacket runs the full ingress and egress evaluation pipelines. 41 | func (e *PolicyEngine) EvaluatePacket(ctx context.Context, packet *network.Packet) (bool, error) { 42 | logger := klog.FromContext(ctx) 43 | 44 | // Only run podInfoProvider once per packet to guarantee consistency 45 | // across the pipeline and for efficiency. 46 | srcPod, _ := e.podInfoProvider.GetPodInfoByIP(packet.SrcIP.String()) 47 | dstPod, _ := e.podInfoProvider.GetPodInfoByIP(packet.DstIP.String()) 48 | 49 | // 1. Evaluate Egress 50 | verdict, err := e.runEgressPipeline(ctx, packet, srcPod, dstPod) 51 | if err != nil { 52 | logger.Error(err, "Egress pipeline evaluation failed") 53 | return false, err 54 | } 55 | if verdict == api.VerdictDeny { 56 | logger.V(2).Info("Packet denied by egress policy") 57 | return false, nil 58 | } 59 | 60 | // 2. Evaluate Ingress 61 | verdict, err = e.runIngressPipeline(ctx, packet, srcPod, dstPod) 62 | if err != nil { 63 | logger.Error(err, "Ingress pipeline evaluation failed") 64 | return false, err 65 | } 66 | if verdict == api.VerdictDeny { 67 | logger.V(2).Info("Packet denied by ingress policy") 68 | return false, nil 69 | } 70 | 71 | logger.V(2).Info("Packet accepted by policy") 72 | return true, nil 73 | } 74 | 75 | // runEgressPipeline executes the sequence of egress evaluators. 76 | func (e *PolicyEngine) runEgressPipeline(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) { 77 | for _, evaluator := range e.evaluators { 78 | verdict, err := evaluator.EvaluateEgress(ctx, p, srcPod, dstPod) 79 | if err != nil { 80 | return api.VerdictDeny, err 81 | } 82 | // Accept or Deny are final verdicts 83 | if verdict != api.VerdictNext { 84 | return verdict, nil 85 | } 86 | } 87 | return api.VerdictAccept, nil 88 | } 89 | 90 | // runIngressPipeline executes the sequence of ingress evaluators. 91 | func (e *PolicyEngine) runIngressPipeline(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) { 92 | for _, evaluator := range e.evaluators { 93 | verdict, err := evaluator.EvaluateIngress(ctx, p, srcPod, dstPod) 94 | if err != nil { 95 | return api.VerdictDeny, err 96 | } 97 | // Accept or Deny are final verdicts 98 | if verdict != api.VerdictNext { 99 | return verdict, nil 100 | } 101 | } 102 | return api.VerdictAccept, nil 103 | } 104 | 105 | // SetDataplaneSyncCallbacks iterates through all evaluators and registers the 106 | // dataplane's sync function with each one. 107 | func (e *PolicyEngine) SetDataplaneSyncCallbacks(syncFn api.SyncFunc) { 108 | for _, evaluator := range e.evaluators { 109 | evaluator.SetDataplaneSyncCallback(syncFn) 110 | } 111 | } 112 | 113 | // Ready returns true if all evaluators are ready. 114 | func (e *PolicyEngine) Ready() bool { 115 | for _, evaluator := range e.evaluators { 116 | if !evaluator.Ready() { 117 | return false 118 | } 119 | } 120 | return true 121 | } 122 | 123 | // GetManagedIPs aggregates the IPs and diversion signals from all registered evaluators. 124 | // This is the single method the dataplane controller will call to get its configuration. 125 | func (e *PolicyEngine) GetManagedIPs(ctx context.Context) (allIPs []netip.Addr, divertAll bool, err error) { 126 | ipSet := sets.New[netip.Addr]() 127 | 128 | for _, evaluator := range e.evaluators { 129 | ips, divert, err := evaluator.ManagedIPs(ctx) 130 | if err != nil { 131 | return nil, false, fmt.Errorf("failed to get managed IPs from evaluator %s: %w", evaluator.Name(), err) 132 | } 133 | 134 | // If any single evaluator requires diverting all traffic, the whole system must. 135 | if divert { 136 | return nil, true, nil 137 | } 138 | 139 | // Add the IPs from this evaluator to the global set to handle duplicates. 140 | for _, ip := range ips { 141 | ipSet.Insert(ip) 142 | } 143 | } 144 | 145 | return ipSet.UnsortedList(), false, nil 146 | } 147 | -------------------------------------------------------------------------------- /plugins/iptracker/iptracker_networkpolicy.go: -------------------------------------------------------------------------------- 1 | package iptracker 2 | 3 | import ( 4 | "context" 5 | "net/netip" 6 | 7 | networkingv1 "k8s.io/api/networking/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/labels" 10 | networkinginformers "k8s.io/client-go/informers/networking/v1" 11 | networkinglisters "k8s.io/client-go/listers/networking/v1" 12 | "k8s.io/client-go/tools/cache" 13 | "k8s.io/klog/v2" 14 | "sigs.k8s.io/kube-network-policies/pkg/api" 15 | "sigs.k8s.io/kube-network-policies/pkg/network" 16 | "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" 17 | ) 18 | 19 | // IPTrackerNetworkPolicy implements the PolicyEvaluator interface for standard Kubernetes NetworkPolicies using iptracker. 20 | type IPTrackerNetworkPolicy struct { 21 | clusterID string 22 | nodeName string 23 | 24 | networkpolicyLister networkinglisters.NetworkPolicyLister 25 | networkpoliciesSynced cache.InformerSynced 26 | syncCallback api.SyncFunc 27 | } 28 | 29 | // Ensure IPTrackerNetworkPolicy implements the PolicyEvaluator interface. 30 | var _ api.PolicyEvaluator = &IPTrackerNetworkPolicy{} 31 | 32 | // NewIPTrackerNetworkPolicy creates a new IPTrackerNetworkPolicy implementation. 33 | func NewIPTrackerNetworkPolicy( 34 | clusterID string, 35 | nodeName string, 36 | networkpolicyInformer networkinginformers.NetworkPolicyInformer, 37 | ) *IPTrackerNetworkPolicy { 38 | s := &IPTrackerNetworkPolicy{ 39 | clusterID: clusterID, 40 | nodeName: nodeName, 41 | networkpolicyLister: networkpolicyInformer.Lister(), 42 | networkpoliciesSynced: networkpolicyInformer.Informer().HasSynced, 43 | syncCallback: func() {}, 44 | } 45 | 46 | _, _ = networkpolicyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ 47 | AddFunc: func(obj interface{}) { s.syncCallback() }, 48 | UpdateFunc: func(old, cur interface{}) { s.syncCallback() }, 49 | DeleteFunc: func(obj interface{}) { s.syncCallback() }, 50 | }) 51 | 52 | return s 53 | } 54 | 55 | // Name returns the name of the evaluator. 56 | func (s *IPTrackerNetworkPolicy) Name() string { 57 | return "IPTrackerNetworkPolicy" 58 | } 59 | 60 | // SetDataplaneSyncCallback stores the sync function provided by the controller. 61 | func (s *IPTrackerNetworkPolicy) SetDataplaneSyncCallback(syncFn api.SyncFunc) { 62 | if syncFn != nil { 63 | s.syncCallback = syncFn 64 | } 65 | } 66 | 67 | // Ready returns true if all required informers have synced. 68 | func (s *IPTrackerNetworkPolicy) Ready() bool { 69 | return s.networkpoliciesSynced() 70 | } 71 | 72 | // ManagedIPs returns the IP addresses of all local pods that are selected by a NetworkPolicy. 73 | func (s *IPTrackerNetworkPolicy) ManagedIPs(ctx context.Context) ([]netip.Addr, bool, error) { 74 | return nil, true, nil 75 | } 76 | 77 | // EvaluateIngress evaluates the ingress traffic for a pod. 78 | func (s *IPTrackerNetworkPolicy) EvaluateIngress(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) { 79 | logger := klog.FromContext(ctx) 80 | 81 | policies := s.getNetworkPoliciesForPod(dstPod) 82 | if len(policies) == 0 { 83 | logger.V(2).Info("Ingress NetworkPolicies does not apply") 84 | return api.VerdictNext, nil 85 | } 86 | if !networkpolicy.EvaluatePolicyDirection(ctx, policies, networkingv1.PolicyTypeIngress, dstPod, p.DstPort, srcPod, p.SrcIP, p.SrcPort, p.Proto) { 87 | return api.VerdictDeny, nil 88 | } 89 | return api.VerdictAccept, nil 90 | } 91 | 92 | // EvaluateEgress evaluates the egress traffic for a pod. 93 | func (s *IPTrackerNetworkPolicy) EvaluateEgress(ctx context.Context, p *network.Packet, srcPod, dstPod *api.PodInfo) (api.Verdict, error) { 94 | logger := klog.FromContext(ctx) 95 | 96 | policies := s.getNetworkPoliciesForPod(srcPod) 97 | if len(policies) == 0 { 98 | logger.V(2).Info("Ingress NetworkPolicies does not apply") 99 | return api.VerdictNext, nil 100 | } 101 | 102 | if !networkpolicy.EvaluatePolicyDirection(ctx, policies, networkingv1.PolicyTypeEgress, srcPod, p.SrcPort, dstPod, p.DstIP, p.DstPort, p.Proto) { 103 | return api.VerdictDeny, nil 104 | } 105 | return api.VerdictAccept, nil 106 | } 107 | 108 | func (s *IPTrackerNetworkPolicy) getNetworkPoliciesForPod(pod *api.PodInfo) []*networkingv1.NetworkPolicy { 109 | if pod == nil { 110 | return nil 111 | } 112 | // keeping the scope local to not enforce policies for a pod in another node 113 | // or it will be difficult to debug and increase the blast radious of bugs 114 | // in the agent. 115 | if pod.Node.Name != s.nodeName { 116 | return nil 117 | } 118 | 119 | // If the pod has a ClusterId set, and it's not the current cluster's ID, then this agent shouldn't manage it. 120 | if pod.ClusterId != "" && pod.ClusterId != s.clusterID { 121 | return nil 122 | } 123 | 124 | networkPolices, err := s.networkpolicyLister.NetworkPolicies(pod.Namespace.Name).List(labels.Everything()) 125 | if err != nil { 126 | return nil 127 | } 128 | var result []*networkingv1.NetworkPolicy 129 | for _, policy := range networkPolices { 130 | podSelector, err := metav1.LabelSelectorAsSelector(&policy.Spec.PodSelector) 131 | if err != nil { 132 | continue 133 | } 134 | if podSelector.Matches(labels.Set(pod.Labels)) { 135 | result = append(result, policy) 136 | } 137 | } 138 | return result 139 | } 140 | -------------------------------------------------------------------------------- /pkg/network/packet.go: -------------------------------------------------------------------------------- 1 | package network 2 | 3 | import ( 4 | "encoding/binary" 5 | "encoding/hex" 6 | "fmt" 7 | "net" 8 | "syscall" 9 | 10 | v1 "k8s.io/api/core/v1" 11 | ) 12 | 13 | type Packet struct { 14 | ID uint32 15 | Family v1.IPFamily 16 | SrcIP net.IP 17 | DstIP net.IP 18 | Proto v1.Protocol 19 | SrcPort int 20 | DstPort int 21 | Payload []byte 22 | } 23 | 24 | var ErrorTooShort = fmt.Errorf("packet too short") 25 | var ErrorCorrupted = fmt.Errorf("packet corrupted") 26 | 27 | func (p Packet) String() string { 28 | return fmt.Sprintf("[%d] %s:%d %s:%d %s\n%s", p.ID, p.SrcIP.String(), p.SrcPort, p.DstIP.String(), p.DstPort, p.Proto, hex.Dump(p.Payload)) 29 | } 30 | 31 | // This function is used for JSON output (interface logr.Marshaler) 32 | func (p Packet) MarshalLog() any { 33 | return &struct { 34 | ID uint32 35 | Family v1.IPFamily 36 | SrcIP net.IP 37 | DstIP net.IP 38 | Proto v1.Protocol 39 | SrcPort int 40 | DstPort int 41 | }{ 42 | p.ID, 43 | p.Family, 44 | p.SrcIP, 45 | p.DstIP, 46 | p.Proto, 47 | p.SrcPort, 48 | p.DstPort, 49 | } 50 | } 51 | 52 | // https://en.wikipedia.org/wiki/Internet_Protocol_version_4#Packet_structure 53 | // https://en.wikipedia.org/wiki/IPv6_packet 54 | // https://github.com/golang/net/blob/master/ipv4/header.go 55 | func ParsePacket(b []byte) (Packet, error) { 56 | t := Packet{} 57 | if len(b) < 20 { 58 | // 20 is the minimum length of an IPv4 header (IPv6 is 40) 59 | return t, ErrorTooShort 60 | } 61 | version := int(b[0] >> 4) 62 | // initialize variables 63 | var protocol, l4offset, nxtHeader int 64 | switch version { 65 | case 4: 66 | t.Family = v1.IPv4Protocol 67 | hdrlen := int(b[0]&0x0f) * 4 // (header length in 32-bit words) 68 | if hdrlen < 20 { 69 | return t, ErrorCorrupted 70 | } 71 | l4offset = hdrlen 72 | if l4offset >= len(b) { 73 | return t, ErrorTooShort 74 | } 75 | t.SrcIP = net.IPv4(b[12], b[13], b[14], b[15]) 76 | t.DstIP = net.IPv4(b[16], b[17], b[18], b[19]) 77 | protocol = int(b[9]) 78 | // IPv4 fragments: 79 | // Since the conntracker is always used in K8s, IPv4 fragments 80 | // will never be passed via the nfqueue. Packets are 81 | // re-assembled by the kernel. Please see: 82 | // https://unix.stackexchange.com/questions/650790/unwanted-defragmentation-of-forwarded-ipv4-packets 83 | case 6: 84 | t.Family = v1.IPv6Protocol 85 | if len(b) < 48 { 86 | // 40 is the minimum length of an IPv6 header, and 8 is 87 | // the minimum lenght of an extension or L4 header 88 | return t, ErrorTooShort 89 | } 90 | t.SrcIP = make(net.IP, net.IPv6len) 91 | copy(t.SrcIP, b[8:24]) 92 | t.DstIP = make(net.IP, net.IPv6len) 93 | copy(t.DstIP, b[24:40]) 94 | // Handle extension headers. 95 | nxtHeader = int(b[6]) 96 | l4offset = 40 97 | for nxtHeader == syscall.IPPROTO_DSTOPTS || nxtHeader == syscall.IPPROTO_HOPOPTS || nxtHeader == syscall.IPPROTO_ROUTING { 98 | // These headers have a lenght in 8-octet units, not 99 | // including the first 8 octets 100 | nxtHeader = int(b[l4offset]) 101 | l4offset += (8 + int(b[l4offset+1])*8) 102 | // Now l4offset points to either another extension header, 103 | // or an L4 header. So we must have at least 8 byte data 104 | // after this (minimum extension header size) 105 | if (l4offset + 8) >= len(b) { 106 | return t, ErrorTooShort 107 | } 108 | } 109 | if nxtHeader == syscall.IPPROTO_FRAGMENT { 110 | // Only the first fragment has the L4 header 111 | fragOffset := int(binary.BigEndian.Uint16(b[l4offset+2 : l4offset+4])) 112 | if fragOffset&0xfff8 == 0 { 113 | nxtHeader = int(b[l4offset]) 114 | l4offset += 8 115 | // Here it's assumed that the fragment is the last 116 | // extension header before the L4 header. But more 117 | // IPPROTO_DSTOPTS are allowed by the recommended order. 118 | // TODO: handle extra IPPROTO_DSTOPTS. 119 | } else { 120 | // If this is NOT the first fragment, we have no L4 121 | // header and the payload begins after this 122 | // header. Return a packet with t.proto unset 123 | return t, nil 124 | } 125 | } 126 | protocol = nxtHeader 127 | default: 128 | return t, fmt.Errorf("unknown version %d", version) 129 | } 130 | 131 | // The payload follows immediately after the L4 header, pointed 132 | // out by 'l4offset'. So payloadOffset will be (l4offset + the 133 | // L4header len) The L4header len is 8 byte for udp and sctp, but 134 | // may vary for tcp (the dataOffset) 135 | var payloadOffset int 136 | switch protocol { 137 | case syscall.IPPROTO_TCP: 138 | t.Proto = v1.ProtocolTCP 139 | dataOffset := int(b[l4offset+12]>>4) * 4 140 | if dataOffset < 20 { 141 | return t, ErrorCorrupted 142 | } 143 | payloadOffset = l4offset + dataOffset 144 | case syscall.IPPROTO_UDP: 145 | t.Proto = v1.ProtocolUDP 146 | payloadOffset = l4offset + 8 147 | case syscall.IPPROTO_SCTP: 148 | t.Proto = v1.ProtocolSCTP 149 | payloadOffset = l4offset + 8 150 | default: 151 | // Return a packet with t.proto unset, and ports 0 152 | return t, nil 153 | 154 | } 155 | if payloadOffset > len(b) { 156 | // If the payloadOffset is beyond the packet size, we have an 157 | // incomplete L4 header 158 | return t, ErrorTooShort 159 | } 160 | t.SrcPort = int(binary.BigEndian.Uint16(b[l4offset : l4offset+2])) 161 | t.DstPort = int(binary.BigEndian.Uint16(b[l4offset+2 : l4offset+4])) 162 | 163 | // TODO allow to filter by the payload 164 | t.Payload = b[payloadOffset:] 165 | return t, nil 166 | } 167 | -------------------------------------------------------------------------------- /docs/testing/monitoring.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring 6 | --- 7 | apiVersion: v1 8 | kind: Service 9 | metadata: 10 | name: prometheus-service 11 | namespace: monitoring 12 | annotations: 13 | prometheus.io/scrape: 'true' 14 | prometheus.io/port: '9090' 15 | spec: 16 | selector: 17 | app: prometheus-server 18 | type: NodePort 19 | ports: 20 | - port: 8080 21 | targetPort: 9090 22 | --- 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | kind: ClusterRole 25 | metadata: 26 | name: prometheus 27 | rules: 28 | - apiGroups: [""] 29 | resources: 30 | - nodes 31 | - nodes/proxy 32 | - services 33 | - endpoints 34 | - pods 35 | verbs: ["get", "list", "watch"] 36 | - apiGroups: 37 | - extensions 38 | resources: 39 | - ingresses 40 | verbs: ["get", "list", "watch"] 41 | - nonResourceURLs: ["/metrics"] 42 | verbs: ["get"] 43 | --- 44 | apiVersion: rbac.authorization.k8s.io/v1 45 | kind: ClusterRoleBinding 46 | metadata: 47 | name: prometheus 48 | roleRef: 49 | apiGroup: rbac.authorization.k8s.io 50 | kind: ClusterRole 51 | name: prometheus 52 | subjects: 53 | - kind: ServiceAccount 54 | name: default 55 | namespace: monitoring 56 | --- 57 | apiVersion: v1 58 | kind: ConfigMap 59 | metadata: 60 | name: prometheus-server-conf 61 | labels: 62 | name: prometheus-server-conf 63 | namespace: monitoring 64 | data: 65 | prometheus.yml: |- 66 | global: 67 | scrape_interval: 5s 68 | evaluation_interval: 5s 69 | scrape_configs: 70 | - job_name: 'kubernetes-apiservers' 71 | kubernetes_sd_configs: 72 | - role: endpoints 73 | scheme: https 74 | tls_config: 75 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 76 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 77 | relabel_configs: 78 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] 79 | action: keep 80 | regex: default;kubernetes;https 81 | 82 | - job_name: 'kubernetes-controller-manager' 83 | honor_labels: true 84 | scheme: https 85 | tls_config: 86 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 87 | insecure_skip_verify: true 88 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 89 | static_configs: 90 | - targets: 91 | - 127.0.0.1:10257 92 | 93 | - job_name: 'kubernetes-nodes' 94 | scheme: https 95 | tls_config: 96 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 97 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 98 | kubernetes_sd_configs: 99 | - role: node 100 | relabel_configs: 101 | - action: labelmap 102 | regex: __meta_kubernetes_node_label_(.+) 103 | - target_label: __address__ 104 | replacement: localhost:6443 105 | - source_labels: [__meta_kubernetes_node_name] 106 | regex: (.+) 107 | target_label: __metrics_path__ 108 | replacement: /api/v1/nodes/${1}/proxy/metrics 109 | 110 | - job_name: 'kubernetes-cadvisor' 111 | scheme: https 112 | tls_config: 113 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 114 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token 115 | kubernetes_sd_configs: 116 | - role: node 117 | relabel_configs: 118 | - action: labelmap 119 | regex: __meta_kubernetes_node_label_(.+) 120 | - target_label: __address__ 121 | replacement: localhost:6443 122 | - source_labels: [__meta_kubernetes_node_name] 123 | regex: (.+) 124 | target_label: __metrics_path__ 125 | replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor 126 | 127 | - job_name: 'kubernetes-network-policies' 128 | honor_labels: true 129 | kubernetes_sd_configs: 130 | - role: pod 131 | relabel_configs: 132 | - action: keep 133 | source_labels: 134 | - __meta_kubernetes_namespace 135 | - __meta_kubernetes_pod_name 136 | separator: '/' 137 | regex: 'kube-system/kube-network-policies.+' 138 | - source_labels: 139 | - __address__ 140 | action: replace 141 | target_label: __address__ 142 | regex: (.+?)(\\:\\d+)? 143 | replacement: $1:9080/metrics 144 | --- 145 | apiVersion: v1 146 | kind: Pod 147 | metadata: 148 | name: prometheus 149 | namespace: monitoring 150 | labels: 151 | app: prometheus-server 152 | spec: 153 | hostNetwork: true 154 | nodeSelector: 155 | node-role.kubernetes.io/control-plane: "" 156 | tolerations: 157 | - key: CriticalAddonsOnly 158 | operator: Exists 159 | - effect: NoSchedule 160 | key: node-role.kubernetes.io/master 161 | - effect: NoSchedule 162 | key: node-role.kubernetes.io/control-plane 163 | containers: 164 | - name: prometheus 165 | image: prom/prometheus:v2.51.1 166 | args: 167 | - "--config.file=/etc/prometheus/prometheus.yml" 168 | - "--storage.tsdb.path=/prometheus/" 169 | - "--web.enable-admin-api" 170 | ports: 171 | - containerPort: 9090 172 | volumeMounts: 173 | - name: prometheus-config-volume 174 | mountPath: /etc/prometheus/ 175 | - name: prometheus-storage-volume 176 | mountPath: /prometheus/ 177 | volumes: 178 | - name: prometheus-config-volume 179 | configMap: 180 | defaultMode: 420 181 | name: prometheus-server-conf 182 | - name: prometheus-storage-volume 183 | emptyDir: {} -------------------------------------------------------------------------------- /pkg/runner/bounded_frequency_runner.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package runner 18 | 19 | import ( 20 | "fmt" 21 | "time" 22 | 23 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 24 | "k8s.io/klog/v2" 25 | "k8s.io/utils/clock" 26 | ) 27 | 28 | // BoundedFrequencyRunner manages runs of a user-provided work function. 29 | type BoundedFrequencyRunner struct { 30 | name string // the name of this instance 31 | 32 | minInterval time.Duration // the min time between runs 33 | retryInterval time.Duration // the time between a run and a retry 34 | maxInterval time.Duration // the max time between runs 35 | 36 | run chan struct{} // try an async run 37 | 38 | fn func() error // the work function 39 | minIntervalTimer clock.Timer 40 | nextRunTimer clock.Timer // Combined timer for maxInterval and retryInterval logic 41 | clock clock.Clock 42 | } 43 | 44 | // NewBoundedFrequencyRunner creates and returns a new BoundedFrequencyRunner. 45 | // This runner manages the execution frequency of the provided function `fn`. 46 | // 47 | // The runner guarantees two properties: 48 | // 1. Minimum Interval (`minInterval`): At least `minInterval` must pass between 49 | // the *completion* of one execution and the *start* of the next. Calls to 50 | // `Run()` during this cooldown period are coalesced and deferred until the 51 | // interval expires. This prevents burst executions. 52 | // 2. Maximum Interval (`maxInterval`): The function `fn` is guaranteed to run 53 | // at least once per `maxInterval`, ensuring periodic execution even without 54 | // explicit `Run()` calls (e.g., for refreshing state). 55 | // 56 | // `maxInterval` must be greater than or equal to `minInterval`; otherwise, 57 | // this function will panic. 58 | // 59 | // If `fn` returns an error, then it will be run again no later than `retryInterval` 60 | // (unless another trigger, like `Run()` or `maxInterval`, causes it to run sooner). Any 61 | // successful run will abort the retry attempt. 62 | func NewBoundedFrequencyRunner(name string, fn func() error, minInterval, retryInterval, maxInterval time.Duration) *BoundedFrequencyRunner { 63 | return construct(name, fn, minInterval, retryInterval, maxInterval, clock.RealClock{}) 64 | } 65 | 66 | // Make an instance with dependencies injected. 67 | func construct(name string, fn func() error, minInterval, retryInterval, maxInterval time.Duration, clock clock.Clock) *BoundedFrequencyRunner { 68 | if maxInterval < minInterval { 69 | panic(fmt.Sprintf("%s: maxInterval (%v) must be >= minInterval (%v)", name, maxInterval, minInterval)) 70 | } 71 | 72 | bfr := &BoundedFrequencyRunner{ 73 | name: name, 74 | fn: fn, 75 | 76 | minInterval: minInterval, 77 | retryInterval: retryInterval, 78 | maxInterval: maxInterval, 79 | 80 | run: make(chan struct{}, 1), 81 | clock: clock, 82 | } 83 | 84 | return bfr 85 | } 86 | 87 | // Loop handles the periodic timer and run requests. This is expected to be 88 | // called as a goroutine. 89 | func (bfr *BoundedFrequencyRunner) Loop(stop <-chan struct{}) { 90 | klog.V(3).InfoS("Loop running", "runner", bfr.name) 91 | defer close(bfr.run) 92 | 93 | bfr.minIntervalTimer = bfr.clock.NewTimer(bfr.minInterval) 94 | defer bfr.minIntervalTimer.Stop() 95 | 96 | // Initialize nextRunTimer with maxInterval 97 | bfr.nextRunTimer = bfr.clock.NewTimer(bfr.maxInterval) 98 | defer bfr.nextRunTimer.Stop() 99 | 100 | for { 101 | select { 102 | case <-stop: 103 | klog.V(3).InfoS("Loop stopping", "runner", bfr.name) 104 | return 105 | case <-bfr.nextRunTimer.C(): // Wait on the single timer 106 | case <-bfr.run: 107 | } 108 | 109 | // stop the timers here to allow the tests using the fake clock to synchronize 110 | // with the fakeClock.HasWaiters() method. The timers are reset after the function 111 | // is executed. 112 | bfr.minIntervalTimer.Stop() 113 | bfr.nextRunTimer.Stop() 114 | 115 | var err error 116 | // avoid crashing if the function executed crashes 117 | func() { 118 | defer utilruntime.HandleCrash() 119 | err = bfr.fn() 120 | }() 121 | 122 | // Determine the next interval based on the result 123 | nextInterval := bfr.maxInterval 124 | if err != nil { 125 | // If error, ensure next run is within retryInterval and maxInterval 126 | if bfr.retryInterval < nextInterval { 127 | nextInterval = bfr.retryInterval 128 | } 129 | klog.V(3).InfoS("scheduling retry", "runner", bfr.name, "interval", nextInterval, "error", err) 130 | } 131 | // Reset the timers 132 | bfr.minIntervalTimer.Reset(bfr.minInterval) 133 | bfr.nextRunTimer.Reset(nextInterval) 134 | 135 | // Wait for minInterval before looping 136 | select { 137 | case <-stop: 138 | klog.V(3).InfoS("Loop stopping", "runner", bfr.name) 139 | return 140 | case <-bfr.minIntervalTimer.C(): 141 | } 142 | } 143 | } 144 | 145 | // Run the work function as soon as possible. If this is called while Loop is not 146 | // running, the call may be deferred indefinitely. 147 | // Once there is a queued request to call the work function, further calls to 148 | // Run() will have no effect until after it runs. 149 | func (bfr *BoundedFrequencyRunner) Run() { 150 | // If bfr.run is empty, push an element onto it. Otherwise, do nothing. 151 | select { 152 | case bfr.run <- struct{}{}: 153 | default: 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module sigs.k8s.io/kube-network-policies 2 | 3 | go 1.24.3 4 | 5 | require ( 6 | github.com/armon/go-radix v1.0.0 7 | github.com/containerd/nri v0.10.0 8 | github.com/florianl/go-nfqueue/v2 v2.0.2 9 | github.com/google/go-cmp v0.7.0 10 | github.com/google/nftables v0.3.0 11 | github.com/mdlayher/netlink v1.8.0 12 | github.com/prometheus/client_golang v1.23.2 13 | github.com/vishvananda/netlink v1.3.2-0.20251022194116-03b8f90390d9 14 | github.com/vishvananda/netns v0.0.5 15 | go.etcd.io/bbolt v1.4.3 16 | go.etcd.io/etcd/api/v3 v3.6.6 17 | go.etcd.io/etcd/client/v3 v3.6.6 18 | go.etcd.io/etcd/server/v3 v3.6.6 19 | golang.org/x/net v0.46.0 20 | golang.org/x/sys v0.37.0 21 | golang.org/x/time v0.14.0 22 | google.golang.org/protobuf v1.36.10 23 | k8s.io/api v0.34.2 24 | k8s.io/apimachinery v0.34.2 25 | k8s.io/client-go v0.34.2 26 | k8s.io/component-base v0.34.2 27 | k8s.io/component-helpers v0.34.2 28 | k8s.io/klog/v2 v2.130.1 29 | k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 30 | // Temporarily reference latest network-policy-api in anticipation of Beta. 31 | sigs.k8s.io/network-policy-api v0.1.8-0.20251003212904-40eeb18096dd 32 | ) 33 | 34 | require ( 35 | github.com/beorn7/perks v1.0.1 // indirect 36 | github.com/blang/semver/v4 v4.0.0 // indirect 37 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect 38 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 39 | github.com/containerd/log v0.1.0 // indirect 40 | github.com/containerd/ttrpc v1.2.7 // indirect 41 | github.com/coreos/go-semver v0.3.1 // indirect 42 | github.com/coreos/go-systemd/v22 v22.5.0 // indirect 43 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 44 | github.com/dustin/go-humanize v1.0.1 // indirect 45 | github.com/emicklei/go-restful/v3 v3.12.2 // indirect 46 | github.com/fxamacker/cbor/v2 v2.9.0 // indirect 47 | github.com/go-logr/logr v1.4.2 // indirect 48 | github.com/go-logr/stdr v1.2.2 // indirect 49 | github.com/go-logr/zapr v1.3.0 // indirect 50 | github.com/go-openapi/jsonpointer v0.21.1 // indirect 51 | github.com/go-openapi/jsonreference v0.21.0 // indirect 52 | github.com/go-openapi/swag v0.23.1 // indirect 53 | github.com/gogo/protobuf v1.3.2 // indirect 54 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect 55 | github.com/golang/protobuf v1.5.4 // indirect 56 | github.com/google/btree v1.1.3 // indirect 57 | github.com/google/gnostic-models v0.7.0 // indirect 58 | github.com/google/uuid v1.6.0 // indirect 59 | github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect 60 | github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1 // indirect 61 | github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect 62 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect 63 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 64 | github.com/jonboulle/clockwork v0.5.0 // indirect 65 | github.com/josharian/intern v1.0.0 // indirect 66 | github.com/json-iterator/go v1.1.12 // indirect 67 | github.com/knqyf263/go-plugin v0.9.0 // indirect 68 | github.com/mailru/easyjson v0.9.0 // indirect 69 | github.com/mdlayher/socket v0.5.1 // indirect 70 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 71 | github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect 72 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 73 | github.com/opencontainers/runtime-spec v1.2.1 // indirect 74 | github.com/pkg/errors v0.9.1 // indirect 75 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 76 | github.com/prometheus/client_model v0.6.2 // indirect 77 | github.com/prometheus/common v0.66.1 // indirect 78 | github.com/prometheus/procfs v0.16.1 // indirect 79 | github.com/sirupsen/logrus v1.9.3 // indirect 80 | github.com/soheilhy/cmux v0.1.5 // indirect 81 | github.com/spf13/cobra v1.9.1 // indirect 82 | github.com/spf13/pflag v1.0.6 // indirect 83 | github.com/tetratelabs/wazero v1.9.0 // indirect 84 | github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect 85 | github.com/x448/float16 v0.8.4 // indirect 86 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect 87 | go.etcd.io/etcd/client/pkg/v3 v3.6.6 // indirect 88 | go.etcd.io/etcd/pkg/v3 v3.6.6 // indirect 89 | go.etcd.io/raft/v3 v3.6.0 // indirect 90 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 91 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect 92 | go.opentelemetry.io/otel v1.35.0 // indirect 93 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect 94 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect 95 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 96 | go.opentelemetry.io/otel/sdk v1.34.0 // indirect 97 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 98 | go.opentelemetry.io/proto/otlp v1.5.0 // indirect 99 | go.uber.org/multierr v1.11.0 // indirect 100 | go.uber.org/zap v1.27.0 // indirect 101 | go.yaml.in/yaml/v2 v2.4.2 // indirect 102 | go.yaml.in/yaml/v3 v3.0.4 // indirect 103 | golang.org/x/crypto v0.43.0 // indirect 104 | golang.org/x/oauth2 v0.30.0 // indirect 105 | golang.org/x/sync v0.17.0 // indirect 106 | golang.org/x/term v0.36.0 // indirect 107 | golang.org/x/text v0.30.0 // indirect 108 | google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect 109 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250505200425-f936aa4a68b2 // indirect 110 | google.golang.org/grpc v1.72.1 // indirect 111 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 112 | gopkg.in/inf.v0 v0.9.1 // indirect 113 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect 114 | gopkg.in/yaml.v3 v3.0.1 // indirect 115 | k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect 116 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect 117 | sigs.k8s.io/randfill v1.0.0 // indirect 118 | sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect 119 | sigs.k8s.io/yaml v1.6.0 // indirect 120 | ) 121 | -------------------------------------------------------------------------------- /pkg/ipcache/bbolt.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2025 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package ipcache 18 | 19 | import ( 20 | "encoding/binary" 21 | "errors" 22 | "time" 23 | 24 | "go.etcd.io/bbolt" 25 | bbolterrors "go.etcd.io/bbolt/errors" 26 | "google.golang.org/protobuf/proto" 27 | "sigs.k8s.io/kube-network-policies/pkg/api" 28 | ) 29 | 30 | var ( 31 | // Bucket names 32 | dataBucketName = []byte("ipcache_data") 33 | metaBucketName = []byte("ipcache_meta") 34 | 35 | // Metadata keys 36 | lastRevisionKey = []byte("lastRevision") 37 | lastClusterIDKey = []byte("lastClusterID") 38 | lastMemberIDKey = []byte("lastMemberID") 39 | 40 | errNotFound = errors.New("not found") 41 | ) 42 | 43 | // BoltStore implements both the Store and SyncMetadataStore interfaces. 44 | type BoltStore struct { 45 | db *bbolt.DB 46 | } 47 | 48 | var _ Store = &BoltStore{} 49 | var _ SyncMetadataStore = &BoltStore{} 50 | var _ api.PodInfoProvider = &BoltStore{} 51 | 52 | // NewBoltStore creates or opens a BoltDB database and ensures the required buckets exist. 53 | func NewBoltStore(path string) (*BoltStore, error) { 54 | db, err := bbolt.Open(path, 0600, &bbolt.Options{Timeout: 1 * time.Second}) 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | // Ensure both data and metadata buckets exist. 60 | err = db.Update(func(tx *bbolt.Tx) error { 61 | if _, err := tx.CreateBucketIfNotExists(dataBucketName); err != nil { 62 | return err 63 | } 64 | if _, err := tx.CreateBucketIfNotExists(metaBucketName); err != nil { 65 | return err 66 | } 67 | return nil 68 | }) 69 | if err != nil { 70 | db.Close() 71 | return nil, err 72 | } 73 | 74 | return &BoltStore{db: db}, nil 75 | } 76 | 77 | // --- Store Interface Implementation --- 78 | 79 | func (s *BoltStore) GetPodInfoByIP(ip string) (*api.PodInfo, bool) { 80 | var podInfo api.PodInfo 81 | err := s.db.View(func(tx *bbolt.Tx) error { 82 | b := tx.Bucket(dataBucketName) 83 | val := b.Get([]byte(ip)) 84 | if val == nil { 85 | return errNotFound 86 | } 87 | return proto.Unmarshal(val, &podInfo) 88 | }) 89 | 90 | if err != nil { 91 | return nil, false 92 | } 93 | return &podInfo, true 94 | } 95 | 96 | func (s *BoltStore) Upsert(ip string, info *api.PodInfo) error { 97 | return s.db.Update(func(tx *bbolt.Tx) error { 98 | b := tx.Bucket(dataBucketName) 99 | data, err := proto.Marshal(info) 100 | if err != nil { 101 | return err 102 | } 103 | return b.Put([]byte(ip), data) 104 | }) 105 | } 106 | 107 | func (s *BoltStore) Delete(ip string) error { 108 | return s.db.Update(func(tx *bbolt.Tx) error { 109 | b := tx.Bucket(dataBucketName) 110 | return b.Delete([]byte(ip)) 111 | }) 112 | } 113 | 114 | func (s *BoltStore) List() ([]*api.PodInfo, error) { 115 | var infos []*api.PodInfo 116 | err := s.db.View(func(tx *bbolt.Tx) error { 117 | b := tx.Bucket(dataBucketName) 118 | return b.ForEach(func(k, v []byte) error { 119 | var podInfo api.PodInfo 120 | if err := proto.Unmarshal(v, &podInfo); err == nil { 121 | infos = append(infos, &podInfo) 122 | } 123 | return nil 124 | }) 125 | }) 126 | if err != nil { 127 | return nil, err 128 | } 129 | return infos, nil 130 | } 131 | 132 | // Clear atomically deletes and recreates the main data bucket, effectively 133 | // removing all entries. 134 | func (s *BoltStore) Clear() error { 135 | return s.db.Update(func(tx *bbolt.Tx) error { 136 | if err := tx.DeleteBucket(dataBucketName); err != nil { 137 | if !errors.Is(err, bbolterrors.ErrBucketNotFound) { 138 | return err 139 | } 140 | } 141 | _, err := tx.CreateBucket(dataBucketName) 142 | if err != nil { 143 | return err 144 | } 145 | if err := tx.DeleteBucket(metaBucketName); err != nil { 146 | if !errors.Is(err, bbolterrors.ErrBucketNotFound) { 147 | return err 148 | } 149 | } 150 | _, err = tx.CreateBucket(metaBucketName) 151 | if err != nil { 152 | return err 153 | } 154 | return nil 155 | }) 156 | } 157 | 158 | func (s *BoltStore) Close() error { 159 | return s.db.Close() 160 | } 161 | 162 | // --- SyncMetadataStore Interface Implementation --- 163 | 164 | func (s *BoltStore) GetSyncMetadata() (*SyncMetadata, error) { 165 | meta := &SyncMetadata{} 166 | err := s.db.View(func(tx *bbolt.Tx) error { 167 | b := tx.Bucket(metaBucketName) 168 | 169 | revBytes := b.Get(lastRevisionKey) 170 | if revBytes != nil { 171 | meta.Revision = int64(binary.BigEndian.Uint64(revBytes)) 172 | } 173 | clusterIDBytes := b.Get(lastClusterIDKey) 174 | if clusterIDBytes != nil { 175 | meta.ClusterID = binary.BigEndian.Uint64(clusterIDBytes) 176 | } 177 | memberIDBytes := b.Get(lastMemberIDKey) 178 | if memberIDBytes != nil { 179 | meta.MemberID = binary.BigEndian.Uint64(memberIDBytes) 180 | } 181 | return nil 182 | }) 183 | return meta, err 184 | } 185 | 186 | func (s *BoltStore) SetSyncMetadata(meta *SyncMetadata) error { 187 | return s.db.Update(func(tx *bbolt.Tx) error { 188 | b := tx.Bucket(metaBucketName) 189 | 190 | revBytes := make([]byte, 8) 191 | binary.BigEndian.PutUint64(revBytes, uint64(meta.Revision)) 192 | if err := b.Put(lastRevisionKey, revBytes); err != nil { 193 | return err 194 | } 195 | 196 | clusterIDBytes := make([]byte, 8) 197 | binary.BigEndian.PutUint64(clusterIDBytes, meta.ClusterID) 198 | if err := b.Put(lastClusterIDKey, clusterIDBytes); err != nil { 199 | return err 200 | } 201 | 202 | memberIDBytes := make([]byte, 8) 203 | binary.BigEndian.PutUint64(memberIDBytes, meta.MemberID) 204 | return b.Put(lastMemberIDKey, memberIDBytes) 205 | }) 206 | } 207 | -------------------------------------------------------------------------------- /cmd/kube-network-policies/standard/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "net" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | "time" 12 | 13 | "sigs.k8s.io/kube-network-policies/pkg/api" 14 | "sigs.k8s.io/kube-network-policies/pkg/cmd" 15 | "sigs.k8s.io/kube-network-policies/pkg/dataplane" 16 | "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" 17 | "sigs.k8s.io/kube-network-policies/pkg/podinfo" 18 | 19 | "k8s.io/apimachinery/pkg/api/meta" 20 | "k8s.io/client-go/informers" 21 | "k8s.io/client-go/tools/clientcmd" 22 | 23 | "k8s.io/client-go/kubernetes" 24 | "k8s.io/client-go/rest" 25 | "k8s.io/component-base/logs" 26 | logsapi "k8s.io/component-base/logs/api/v1" 27 | _ "k8s.io/component-base/logs/json/register" 28 | nodeutil "k8s.io/component-helpers/node/util" 29 | "k8s.io/klog/v2" 30 | ) 31 | 32 | // This is a pattern to ensure that deferred functions executes before os.Exit 33 | func main() { 34 | os.Exit(run()) 35 | } 36 | 37 | func run() int { 38 | // Setup logging 39 | logCfg := logsapi.NewLoggingConfiguration() 40 | logsapi.AddGoFlags(logCfg, flag.CommandLine) 41 | 42 | // Setup flags 43 | opts := cmd.NewOptions() 44 | opts.AddFlags(flag.CommandLine) 45 | 46 | flag.Parse() 47 | 48 | // init logging 49 | logs.InitLogs() 50 | if err := logsapi.ValidateAndApply(logCfg, nil); err != nil { 51 | fmt.Fprintf(os.Stderr, "%v\n", err) 52 | return 1 53 | } 54 | 55 | // Create a context for structured logging, and catch termination signals 56 | ctx, cancel := signal.NotifyContext( 57 | context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) 58 | defer cancel() 59 | 60 | logger := klog.FromContext(ctx) 61 | logger.Info("called", "args", flag.Args()) 62 | 63 | flag.VisitAll(func(flag *flag.Flag) { 64 | logger.Info("flag", "name", flag.Name, "value", flag.Value) 65 | }) 66 | 67 | if _, _, err := net.SplitHostPort(opts.MetricsBindAddress); err != nil { 68 | logger.Error(err, "parsing metrics bind address", "address", opts.MetricsBindAddress) 69 | return 1 70 | } 71 | 72 | nodeName, err := nodeutil.GetHostname(opts.HostnameOverride) 73 | if err != nil { 74 | klog.Fatalf("can not obtain the node name, use the hostname-override flag if you want to set it to a specific value: %v", err) 75 | } 76 | 77 | dpCfg := dataplane.Config{ 78 | FailOpen: opts.FailOpen, 79 | QueueID: opts.QueueID, 80 | NetfilterBug1766Fix: opts.NetfilterBug1766Fix, 81 | StrictMode: opts.StrictMode, 82 | } 83 | 84 | var config *rest.Config 85 | if opts.Kubeconfig != "" { 86 | config, err = clientcmd.BuildConfigFromFlags("", opts.Kubeconfig) 87 | } else { 88 | // creates the in-cluster config 89 | config, err = rest.InClusterConfig() 90 | } 91 | if err != nil { 92 | klog.Fatalf("can not create client-go configuration: %v", err) 93 | } 94 | 95 | // use protobuf for better performance at scale 96 | // https://kubernetes.io/docs/reference/using-api/api-concepts/#alternate-representations-of-resources 97 | config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" 98 | config.ContentType = "application/vnd.kubernetes.protobuf" 99 | 100 | // creates the clientset 101 | clientset, err := kubernetes.NewForConfig(config) 102 | if err != nil { 103 | panic(err.Error()) 104 | } 105 | 106 | informersFactory := informers.NewSharedInformerFactory(clientset, 0) 107 | nsInformer := informersFactory.Core().V1().Namespaces() 108 | networkPolicyInfomer := informersFactory.Networking().V1().NetworkPolicies() 109 | podInformer := informersFactory.Core().V1().Pods() 110 | 111 | // Set the memory-saving transform function on the pod informer. 112 | err = podInformer.Informer().SetTransform(func(obj interface{}) (interface{}, error) { 113 | if accessor, err := meta.Accessor(obj); err == nil { 114 | accessor.SetManagedFields(nil) 115 | } 116 | return obj, nil 117 | }) 118 | if err != nil { 119 | klog.Fatalf("Failed to set pod informer transform: %v", err) 120 | } 121 | 122 | // Create the Pod IP resolvers. 123 | // First, given an IP address they return the Pod name/namespace. 124 | informerResolver, err := podinfo.NewInformerResolver(podInformer.Informer()) 125 | if err != nil { 126 | klog.Fatalf("Failed to create informer resolver: %v", err) 127 | } 128 | resolvers := []podinfo.IPResolver{informerResolver} 129 | 130 | // Create an NRI Pod IP resolver if enabled, since NRI connects to the container runtime 131 | // the Pod and IP information is provided at the time the Pod Sandbox is created and before 132 | // the containers start running, so policies can be enforced without race conditions. 133 | if !opts.DisableNRI { 134 | nriIPResolver, err := podinfo.NewNRIResolver(ctx, nodeName, nil) 135 | if err != nil { 136 | klog.Infof("failed to create NRI plugin, using apiserver information only: %v", err) 137 | } 138 | resolvers = append(resolvers, nriIPResolver) 139 | } 140 | 141 | // Create the pod info provider to obtain the Pod information 142 | // necessary for the network policy evaluation, it uses the resolvers 143 | // to obtain the key (Pod name and namespace) and use the informers to obtain 144 | // the labels that are necessary to match the network policies. 145 | podInfoProvider := podinfo.NewInformerProvider( 146 | podInformer, 147 | nsInformer, 148 | nil, 149 | resolvers) 150 | 151 | // Create the evaluators for the Pipeline to process the packets 152 | // and take a network policy action. The evaluators are processed 153 | // by the order in the array. 154 | evaluators := []api.PolicyEvaluator{} 155 | 156 | // Logging evaluator must go first if enabled. 157 | if klog.V(2).Enabled() { 158 | evaluators = append(evaluators, networkpolicy.NewLoggingPolicy()) 159 | } 160 | 161 | // Standard Network Policy goes after AdminNetworkPolicy and before BaselineAdminNetworkPolicy 162 | evaluators = append(evaluators, networkpolicy.NewStandardNetworkPolicy( 163 | nodeName, 164 | nsInformer, 165 | podInformer, 166 | networkPolicyInfomer, 167 | )) 168 | 169 | informersFactory.Start(ctx.Done()) 170 | 171 | cmd.Start(ctx, networkpolicy.NewPolicyEngine(podInfoProvider, evaluators), dpCfg, opts.MetricsBindAddress) 172 | 173 | <-ctx.Done() 174 | logger.Info("Received termination signal, starting cleanup...") 175 | // grace period to cleanup resources 176 | time.Sleep(5 * time.Second) 177 | logger.Info("Cleanup completed, exiting...") 178 | return 0 179 | } 180 | -------------------------------------------------------------------------------- /.github/workflows/e2e.yml: -------------------------------------------------------------------------------- 1 | name: e2e 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | tags: 8 | - 'v*' 9 | pull_request: 10 | branches: [ main ] 11 | workflow_dispatch: 12 | 13 | env: 14 | GO_VERSION: "1.24" 15 | K8S_VERSION: "v1.33.1" 16 | KIND_VERSION: "v0.29.0" 17 | KIND_CLUSTER_NAME: kind 18 | 19 | permissions: write-all 20 | 21 | jobs: 22 | build: 23 | name: build 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: Set up Go 27 | uses: actions/setup-go@v6 28 | with: 29 | go-version: ${{ env.GO_VERSION }} 30 | id: go 31 | 32 | - name: Check out code 33 | uses: actions/checkout@v5 34 | 35 | - name: Build 36 | run: | 37 | REGISTRY="registry.k8s.io/networking" IMAGE_NAME="kube-network-policies" TAG="test" make image-build-standard 38 | mkdir _output 39 | docker save registry.k8s.io/networking/kube-network-policies:test > _output/kube-network-policies-image.tar 40 | 41 | - uses: actions/upload-artifact@v5 42 | with: 43 | name: test-image 44 | path: _output/kube-network-policies-image.tar 45 | 46 | e2e: 47 | name: e2e 48 | runs-on: ubuntu-22.04 49 | timeout-minutes: 100 50 | needs: 51 | - build 52 | strategy: 53 | fail-fast: false 54 | matrix: 55 | # TODO add "dual", waiting on KEP https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/3705-cloud-node-ips 56 | ipFamily: ["ipv4", "ipv6"] 57 | # TODO add nftables (beta in 1.31) waiting for 1.31 images 58 | proxyMode: ["iptables", "ipvs"] 59 | env: 60 | JOB_NAME: "kube-network-policies-${{ matrix.ipFamily }}-${{ matrix.proxyMode }}" 61 | IP_FAMILY: ${{ matrix.ipFamily }} 62 | KUBEPROXY_MODE: ${{ matrix.proxyMode }} 63 | steps: 64 | - name: Check out code 65 | uses: actions/checkout@v5 66 | 67 | - name: Enable ipv4 and ipv6 forwarding 68 | run: | 69 | sudo sysctl -w net.ipv6.conf.all.forwarding=1 70 | sudo sysctl -w net.ipv4.ip_forward=1 71 | 72 | - name: Set up environment (download dependencies) 73 | run: | 74 | TMP_DIR=$(mktemp -d) 75 | # Test binaries 76 | curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/kubernetes-test-linux-amd64.tar.gz -o ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz 77 | tar xvzf ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz \ 78 | --directory ${TMP_DIR} \ 79 | --strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test 80 | # kubectl 81 | curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl 82 | # kind 83 | curl -Lo ${TMP_DIR}/kind https://kind.sigs.k8s.io/dl/${{ env.KIND_VERSION }}/kind-linux-amd64 84 | # Install 85 | sudo cp ${TMP_DIR}/ginkgo /usr/local/bin/ginkgo 86 | sudo cp ${TMP_DIR}/e2e.test /usr/local/bin/e2e.test 87 | sudo cp ${TMP_DIR}/kubectl /usr/local/bin/kubectl 88 | sudo cp ${TMP_DIR}/kind /usr/local/bin/kind 89 | sudo chmod +x /usr/local/bin/ginkgo 90 | sudo chmod +x /usr/local/bin/e2e.test 91 | sudo chmod +x /usr/local/bin/kubectl 92 | sudo chmod +x /usr/local/bin/kind 93 | 94 | - name: Create multi node cluster 95 | run: | 96 | # output_dir 97 | mkdir -p _artifacts 98 | # create cluster 99 | cat < _artifacts/kubeconfig.conf 115 | 116 | - uses: actions/download-artifact@v6 117 | with: 118 | name: test-image 119 | 120 | - name: Install kube-network-policies 121 | run: | 122 | # stop kindnet of applying network policies 123 | kubectl -n kube-system set image ds kindnet kindnet-cni=docker.io/kindest/kindnetd:v20230809-80a64d96 124 | # preload kube-network-policies image 125 | docker load --input kube-network-policies-image.tar 126 | /usr/local/bin/kind load docker-image registry.k8s.io/networking/kube-network-policies:test --name ${{ env.KIND_CLUSTER_NAME}} 127 | sed -i s#registry.k8s.io/networking/kube-network-policies.*#registry.k8s.io/networking/kube-network-policies:test# install.yaml 128 | /usr/local/bin/kubectl apply -f ./install.yaml 129 | 130 | - name: Get Cluster status 131 | run: | 132 | # wait network is ready 133 | sleep 5 134 | /usr/local/bin/kubectl get nodes -o wide 135 | /usr/local/bin/kubectl get pods -A 136 | /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns 137 | /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods --namespace=kube-system -l app=kube-network-policies 138 | 139 | - name: Run tests 140 | run: | 141 | export KUBERNETES_CONFORMANCE_TEST='y' 142 | export E2E_REPORT_DIR=${PWD}/_artifacts 143 | 144 | # Run tests 145 | /usr/local/bin/ginkgo --nodes=25 \ 146 | --focus="Netpol" \ 147 | /usr/local/bin/e2e.test \ 148 | -- \ 149 | --kubeconfig=${PWD}/_artifacts/kubeconfig.conf \ 150 | --provider=local \ 151 | --dump-logs-on-failure=false \ 152 | --report-dir=${E2E_REPORT_DIR} \ 153 | --disable-log-dump=true 154 | 155 | - name: Upload Junit Reports 156 | if: always() 157 | uses: actions/upload-artifact@v5 158 | with: 159 | name: kind-junit-${{ env.JOB_NAME }}-${{ github.run_id }} 160 | path: './_artifacts/*.xml' 161 | 162 | - name: Export logs 163 | if: always() 164 | run: | 165 | /usr/local/bin/kind export logs --name ${KIND_CLUSTER_NAME} ./_artifacts/logs 166 | 167 | - name: Upload logs 168 | if: always() 169 | uses: actions/upload-artifact@v5 170 | with: 171 | name: kind-logs-${{ env.JOB_NAME }}-${{ github.run_id }} 172 | path: ./_artifacts/logs 173 | -------------------------------------------------------------------------------- /tests/e2e_iptracker.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | setup_file() { 4 | export REGISTRY="registry.k8s.io/networking" 5 | export IMAGE_NAME="kube-network-policies" 6 | export TAG="test" 7 | 8 | # Build the image for the iptracker binary and amd64 architecture 9 | ( 10 | cd "$BATS_TEST_DIRNAME"/.. 11 | TAG="$TAG" make image-build-iptracker 12 | TAG="$TAG" make image-build-kube-ip-tracker-standard 13 | ) 14 | 15 | # Load the Docker image into the kind cluster 16 | kind load docker-image "$REGISTRY/$IMAGE_NAME:$TAG"-iptracker --name "$CLUSTER_NAME" 17 | kind load docker-image "$REGISTRY/kube-ip-tracker:$TAG" --name "$CLUSTER_NAME" 18 | 19 | # Install kube-network-policies 20 | _install=$(sed "s#$REGISTRY/$IMAGE_NAME.*#$REGISTRY/$IMAGE_NAME:$TAG-iptracker#" < "$BATS_TEST_DIRNAME"/../install-iptracker.yaml | sed "s#$REGISTRY/kube-ip-tracker.*#$REGISTRY/kube-ip-tracker:$TAG#") 21 | printf '%s' "${_install}" | kubectl apply -f - 22 | kubectl wait --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-network-policies 23 | } 24 | 25 | teardown_file() { 26 | _install=$(sed "s#$REGISTRY/$IMAGE_NAME.*#$REGISTRY/$IMAGE_NAME:$TAG-iptracker#" < "$BATS_TEST_DIRNAME"/../install-iptracker.yaml | sed "s#$REGISTRY/kube-ip-tracker.*#$REGISTRY/kube-ip-tracker:$TAG#") 27 | printf '%s' "${_install}" | kubectl delete -f - 28 | } 29 | 30 | setup() { 31 | kubectl create namespace dev 32 | kubectl label namespace/dev purpose=testing 33 | 34 | kubectl create namespace prod 35 | kubectl label namespace/prod purpose=production 36 | } 37 | 38 | teardown() { 39 | kubectl delete namespace prod 40 | kubectl delete namespace dev 41 | } 42 | 43 | # https://github.com/kubernetes-sigs/kube-network-policies/issues/150 44 | @test "iptracker: liveness probes" { 45 | kubectl apply -f - </dev/null || echo "") 176 | test -n "$TARGET_IPv6" 177 | 178 | # query should work 179 | output=$(kubectl exec client-pod -n dev -- ping6 -c 2 -W 5 "$TARGET_IPv6" > /dev/null 2>&1 && echo ok || echo fail) 180 | test "$output" = "ok" 181 | 182 | kubectl apply -f - < /dev/null 2>&1 && echo ok || echo fail) 202 | test "$output" = "fail" 203 | 204 | # cleanup 205 | kubectl -n dev delete pod client-pod 206 | kubectl -n prod delete pod target-pod 207 | kubectl -n prod delete networkpolicy allow-same-namespace 208 | } 209 | -------------------------------------------------------------------------------- /pkg/dataplane/conntrack_test.go: -------------------------------------------------------------------------------- 1 | package dataplane 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | ) 7 | 8 | func TestGenerateLabelMask(t *testing.T) { 9 | // The expected results are derived from the nftables debug output, 10 | // serialized as a 16-byte Big-Endian array (MSW first, LSW last). 11 | tests := []struct { 12 | name string 13 | bitIndex int 14 | expected string // Expected 16-byte hex string 15 | }{ 16 | { 17 | name: "Bit 10 (LSW)", 18 | bitIndex: 10, 19 | // Bit 10 is 2^10 = 0x400. This is in the LSW (last 8 bytes). 20 | expected: "00000000000000000000000000000400", 21 | }, 22 | { 23 | name: "Bit 126 (MSW)", 24 | bitIndex: 126, 25 | // Bit 126 is 2^62 within the 64-bit MSW (first 8 bytes). 0x4000000000000000 26 | expected: "40000000000000000000000000000000", 27 | }, 28 | { 29 | name: "Bit 127 (MSW)", 30 | bitIndex: 127, 31 | // Bit 127 is 2^63 within the 64-bit MSW (first 8 bytes). 0x8000000000000000 32 | expected: "80000000000000000000000000000000", 33 | }, 34 | { 35 | name: "Bit 0 (LSW Start)", 36 | bitIndex: 0, 37 | // 2^0 = 0x1. In the LSW (last byte). 38 | expected: "00000000000000000000000000000001", 39 | }, 40 | { 41 | name: "Bit 63 (LSW End)", 42 | bitIndex: 63, 43 | // 2^63 = 0x8000000000000000. In the LSW (last 8 bytes). 44 | expected: "00000000000000008000000000000000", 45 | }, 46 | { 47 | name: "Bit 64 (MSW Start)", 48 | bitIndex: 64, 49 | // 2^0 (within the MSW). In the MSW (first 8 bytes). 50 | expected: "00000000000000010000000000000000", 51 | }, 52 | { 53 | name: "Out of Range (128)", 54 | bitIndex: 128, 55 | // Expected 16 zero bytes: "00...00" 56 | expected: "00000000000000000000000000000000", 57 | }, 58 | { 59 | name: "Out of Range (-1)", 60 | bitIndex: -1, 61 | // Expected 16 zero bytes: "00...00" 62 | expected: "00000000000000000000000000000000", 63 | }, 64 | } 65 | 66 | for _, tt := range tests { 67 | t.Run(tt.name, func(t *testing.T) { 68 | // Call the function 69 | result := generateLabelMask(tt.bitIndex) 70 | 71 | // Convert result to hex string for easy comparison 72 | actualHex := hex.EncodeToString(result) 73 | 74 | // Compare the actual result with the expected hex string 75 | if actualHex != tt.expected { 76 | t.Errorf("generateLabelMask() for index %d:\n Got: %v\n Want: %v", tt.bitIndex, actualHex, tt.expected) 77 | } 78 | }) 79 | } 80 | } 81 | 82 | // TestClearLabelBit tests the clearLabelBit function across various scenarios. 83 | func TestClearLabelBit(t *testing.T) { 84 | // Helper function to convert a hex string to a byte slice 85 | mustDecodeHex := func(s string) []byte { 86 | b, err := hex.DecodeString(s) 87 | if err != nil { 88 | panic(err) 89 | } 90 | return b 91 | } 92 | 93 | // A base label with bits 10, 63, 64, and 127 set. 94 | // Bit 127 (MSW: 0x8000000000000000) 95 | // Bit 64 (MSW: 0x0000000000000001) 96 | // Bit 63 (LSW: 0x8000000000000000) 97 | // Bit 10 (LSW: 0x0000000000000400) 98 | // Base Hex: 80000000000000018000000000000400 99 | baseLabelHex := "80000000000000018000000000000400" 100 | baseLabel := mustDecodeHex(baseLabelHex) 101 | 102 | tests := []struct { 103 | name string 104 | initialLabel []byte 105 | bitIndex int 106 | expectedHex string 107 | expectChange bool // Used to verify if the original array remains untouched 108 | }{ 109 | { 110 | name: "Clear Bit 10 (LSW Middle)", 111 | initialLabel: baseLabel, 112 | bitIndex: 10, 113 | // Expected: Bit 10 (0x400) cleared -> 8000...018000...0000 114 | expectedHex: "80000000000000018000000000000000", 115 | expectChange: true, 116 | }, 117 | { 118 | name: "Clear Bit 127 (MSW End)", 119 | initialLabel: baseLabel, 120 | bitIndex: 127, 121 | // Expected: Bit 127 (0x80...) cleared -> 0000...018000...0400 122 | expectedHex: "00000000000000018000000000000400", 123 | expectChange: true, 124 | }, 125 | { 126 | name: "Clear Bit 63 (LSW End Boundary)", 127 | initialLabel: baseLabel, 128 | bitIndex: 63, 129 | // Expected: Bit 63 (0x80...) cleared -> 8000...010000...0400 130 | expectedHex: "80000000000000010000000000000400", 131 | expectChange: true, 132 | }, 133 | { 134 | name: "Clear Bit 64 (MSW Start Boundary)", 135 | initialLabel: baseLabel, 136 | bitIndex: 64, 137 | // Expected: Bit 64 (0x01) cleared -> 8000...008000...0400 138 | expectedHex: "80000000000000008000000000000400", 139 | expectChange: true, 140 | }, 141 | { 142 | name: "Clear Bit 0 (LSW Start Boundary)", 143 | initialLabel: mustDecodeHex("00000000000000000000000000000001"), // Only bit 0 set 144 | bitIndex: 0, 145 | // Expected: All zeros 146 | expectedHex: "00000000000000000000000000000000", 147 | expectChange: true, 148 | }, 149 | { 150 | name: "Clear Bit Already Zero (Bit 50)", 151 | initialLabel: baseLabel, 152 | bitIndex: 50, // Bit 50 is zero in the base label 153 | expectedHex: baseLabelHex, 154 | expectChange: true, // A copy is still returned, but the content is the same 155 | }, 156 | { 157 | name: "Out of Range (128)", 158 | initialLabel: baseLabel, 159 | bitIndex: 128, 160 | expectedHex: baseLabelHex, 161 | expectChange: true, // A copy is still returned, but the content is the same 162 | }, 163 | { 164 | name: "Out of Range (-1)", 165 | initialLabel: baseLabel, 166 | bitIndex: -1, 167 | expectedHex: baseLabelHex, 168 | expectChange: true, // A copy is still returned, but the content is the same 169 | }, 170 | { 171 | name: "Invalid Length (10 bytes)", 172 | initialLabel: mustDecodeHex("F0F0F0F0F0"), // Only 5 bytes 173 | bitIndex: 10, 174 | expectedHex: "00000000000000000000000000000000", // Should return 16 zero bytes 175 | expectChange: true, 176 | }, 177 | } 178 | 179 | for _, tt := range tests { 180 | t.Run(tt.name, func(t *testing.T) { 181 | // Save the original hex string for verification 182 | originalHex := hex.EncodeToString(tt.initialLabel) 183 | 184 | // Execute the function 185 | result := clearLabelBit(tt.initialLabel, tt.bitIndex) 186 | 187 | actualHex := hex.EncodeToString(result) 188 | if actualHex != tt.expectedHex { 189 | t.Errorf("Result Mismatch for index %d:\n Got: %s\n Want: %s", tt.bitIndex, actualHex, tt.expectedHex) 190 | } 191 | 192 | if len(tt.initialLabel) == 16 && originalHex != hex.EncodeToString(tt.initialLabel) { 193 | t.Errorf("Original array was modified!\n Initial: %s\n After call: %s", originalHex, hex.EncodeToString(tt.initialLabel)) 194 | } 195 | }) 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /pkg/dataplane/metrics.go: -------------------------------------------------------------------------------- 1 | package dataplane 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "io" 8 | "os" 9 | "strconv" 10 | "strings" 11 | "sync" 12 | 13 | "github.com/prometheus/client_golang/prometheus" 14 | "k8s.io/klog/v2" 15 | ) 16 | 17 | var ( 18 | packetProcessingHist = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 19 | Namespace: "kube_network_policies", 20 | Name: "packet_process_time", 21 | Help: "Time it has taken to process each packet (microseconds)", 22 | Buckets: []float64{1, 10, 50, 200, 500, 750, 1000, 2000, 5000, 10000, 100000}, 23 | }, []string{"protocol", "family"}) 24 | 25 | packetProcessingSum = prometheus.NewSummary(prometheus.SummaryOpts{ 26 | Namespace: "kube_network_policies", 27 | Name: "packet_process_duration_microseconds", 28 | Help: "A summary of the packet processing durations in microseconds.", 29 | Objectives: map[float64]float64{ 30 | 0.5: 0.05, // 50th percentile with a max. absolute error of 0.05. 31 | 0.9: 0.01, // 90th percentile with a max. absolute error of 0.01. 32 | 0.99: 0.001, // 99th percentile with a max. absolute error of 0.001. 33 | }, 34 | }) 35 | 36 | packetCounterVec = prometheus.NewCounterVec(prometheus.CounterOpts{ 37 | Namespace: "kube_network_policies", 38 | Name: "packet_count", 39 | Help: "Number of packets", 40 | }, []string{"protocol", "family", "verdict"}) 41 | 42 | nfqueueQueueTotal = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 43 | Namespace: "kube_network_policies", 44 | Name: "nfqueue_queue_total", 45 | Help: "The number of packets currently queued and waiting to be processed by the application", 46 | }, []string{"queue"}) 47 | 48 | nfqueueQueueDropped = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 49 | Namespace: "kube_network_policies", 50 | Name: "nfqueue_queue_dropped", 51 | Help: "Number of packets that had to be dropped by the kernel because too many packets are already waiting for user space to send back the mandatory accept/drop verdicts", 52 | }, []string{"queue"}) 53 | 54 | nfqueueUserDropped = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 55 | Namespace: "kube_network_policies", 56 | Name: "nfqueue_user_dropped", 57 | Help: "Number of packets that were dropped within the netlink subsystem. Such drops usually happen when the corresponding socket buffer is full; that is, user space is not able to read messages fast enough", 58 | }, []string{"queue"}) 59 | 60 | nfqueuePacketID = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 61 | Namespace: "kube_network_policies", 62 | Name: "nfqueue_packet_id", 63 | Help: "ID of the most recent packet queued.", 64 | }, []string{"queue"}) 65 | ) 66 | 67 | var registerMetricsOnce sync.Once 68 | 69 | // RegisterMetrics registers kube-proxy metrics. 70 | func registerMetrics(ctx context.Context) { 71 | registerMetricsOnce.Do(func() { 72 | klog.Infof("Registering metrics") 73 | prometheus.MustRegister(packetProcessingHist) 74 | prometheus.MustRegister(packetProcessingSum) 75 | prometheus.MustRegister(packetCounterVec) 76 | prometheus.MustRegister(nfqueueQueueTotal) 77 | prometheus.MustRegister(nfqueueQueueDropped) 78 | prometheus.MustRegister(nfqueueUserDropped) 79 | prometheus.MustRegister(nfqueuePacketID) 80 | }) 81 | } 82 | 83 | // https://man7.org/linux/man-pages/man5/proc.5.html 84 | type nfnetlinkQueue struct { 85 | queue_number string // The ID of the queue. This matches what is specified in the --queue-num or --queue-balance options. 86 | peer_portid int // The netlink port ID subscribed to the queue. 87 | queue_total int // The number of packets currently queued and waiting to be processed by the application. 88 | copy_mode int // The copy mode of the queue. It is either 1 (metadata only) or 2 (also copy payload data to user space). 89 | copy_range int // Copy range; that is, how many bytes of packet payload should be copied to user space at most. 90 | queue_dropped int // Number of packets that had to be dropped by the kernel because too many packets are already waiting for user space to send back the mandatory accept/drop verdicts. 91 | user_dropped int // Number of packets that were dropped within the netlink subsystem. Such drops usually happen when the corresponding socket buffer is full; that is, user space is not able to read messages fast enough. 92 | id_sequence int // sequence number. Every queued packet is associated with a (32-bit) monotonically increasing sequence number. This shows the ID of the most recent packet queued. 93 | // dummy int // Field is always ‘1’ and is ignored, only kept for compatibility reasons. 94 | } 95 | 96 | func readNfnetlinkQueueStats() ([]nfnetlinkQueue, error) { 97 | const maxBufferSize = 1024 * 1024 98 | 99 | f, err := os.Open("/proc/net/netfilter/nfnetlink_queue") 100 | if err != nil { 101 | return nil, err 102 | } 103 | defer f.Close() 104 | 105 | entries := []nfnetlinkQueue{} 106 | reader := io.LimitReader(f, maxBufferSize) 107 | 108 | scanner := bufio.NewScanner(reader) 109 | for scanner.Scan() { 110 | fields := strings.Fields(scanner.Text()) 111 | if len(fields) != 9 { 112 | return nil, fmt.Errorf("unexpected number of entries, got %d expected %d", len(fields), 9) 113 | } 114 | 115 | queue_number := fields[0] 116 | 117 | peer_portid, err := parseNfqueueField(fields[1]) 118 | if err != nil { 119 | return nil, err 120 | } 121 | queue_total, err := parseNfqueueField(fields[2]) 122 | if err != nil { 123 | return nil, err 124 | } 125 | copy_mode, err := parseNfqueueField(fields[3]) 126 | if err != nil { 127 | return nil, err 128 | } 129 | copy_range, err := parseNfqueueField(fields[4]) 130 | if err != nil { 131 | return nil, err 132 | } 133 | queue_dropped, err := parseNfqueueField(fields[5]) 134 | if err != nil { 135 | return nil, err 136 | } 137 | user_dropped, err := parseNfqueueField(fields[6]) 138 | if err != nil { 139 | return nil, err 140 | } 141 | id_sequence, err := parseNfqueueField(fields[7]) 142 | if err != nil { 143 | return nil, err 144 | } 145 | 146 | nfqueueEntry := nfnetlinkQueue{ 147 | queue_number: queue_number, 148 | peer_portid: peer_portid, 149 | queue_total: queue_total, 150 | copy_mode: copy_mode, 151 | copy_range: copy_range, 152 | queue_dropped: queue_dropped, 153 | user_dropped: user_dropped, 154 | id_sequence: id_sequence, 155 | } 156 | 157 | entries = append(entries, nfqueueEntry) 158 | } 159 | return entries, nil 160 | } 161 | 162 | func parseNfqueueField(field string) (int, error) { 163 | val, err := strconv.Atoi(field) 164 | if err != nil { 165 | return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) 166 | } 167 | return val, err 168 | } 169 | -------------------------------------------------------------------------------- /pkg/networkpolicy/helpers_test.go: -------------------------------------------------------------------------------- 1 | package networkpolicy 2 | 3 | import ( 4 | "testing" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // TestMatchesSelector contains the unit tests. 10 | func TestMatchesSelector(t *testing.T) { 11 | // Define the test table 12 | tests := []struct { 13 | name string 14 | selector *metav1.LabelSelector 15 | labels map[string]string 16 | want bool 17 | }{ 18 | { 19 | name: "nil selector should not match", 20 | selector: nil, 21 | labels: map[string]string{"app": "test"}, 22 | want: false, 23 | }, 24 | { 25 | name: "empty selector matches any labels", 26 | selector: &metav1.LabelSelector{}, 27 | labels: map[string]string{"app": "test"}, 28 | want: true, 29 | }, 30 | { 31 | name: "empty selector matches empty labels", 32 | selector: &metav1.LabelSelector{}, 33 | labels: map[string]string{}, 34 | want: true, 35 | }, 36 | { 37 | name: "empty selector matches nil labels", 38 | selector: &metav1.LabelSelector{}, 39 | labels: nil, 40 | want: true, 41 | }, 42 | { 43 | name: "MatchLabels: simple match", 44 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, 45 | labels: map[string]string{"app": "test"}, 46 | want: true, 47 | }, 48 | { 49 | name: "MatchLabels: subset match", 50 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, 51 | labels: map[string]string{"app": "test", "env": "prod"}, 52 | want: true, 53 | }, 54 | { 55 | name: "MatchLabels: multi-label match", 56 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test", "env": "prod"}}, 57 | labels: map[string]string{"app": "test", "env": "prod"}, 58 | want: true, 59 | }, 60 | { 61 | name: "MatchLabels: value mismatch", 62 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, 63 | labels: map[string]string{"app": "wrong"}, 64 | want: false, 65 | }, 66 | { 67 | name: "MatchLabels: key missing", 68 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, 69 | labels: map[string]string{"environment": "prod"}, 70 | want: false, 71 | }, 72 | { 73 | name: "MatchLabels: label empty", 74 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, 75 | labels: map[string]string{}, 76 | want: false, 77 | }, 78 | { 79 | name: "MatchLabels: label missing", 80 | selector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "test"}}, 81 | labels: nil, 82 | want: false, 83 | }, 84 | { 85 | name: "MatchExpressions: 'In' operator match", 86 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 87 | {Key: "tier", Operator: metav1.LabelSelectorOpIn, Values: []string{"frontend", "backend"}}, 88 | }}, 89 | labels: map[string]string{"tier": "frontend"}, 90 | want: true, 91 | }, 92 | { 93 | name: "MatchExpressions: 'In' operator no match", 94 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 95 | {Key: "tier", Operator: metav1.LabelSelectorOpIn, Values: []string{"frontend", "backend"}}, 96 | }}, 97 | labels: map[string]string{"tier": "database"}, 98 | want: false, 99 | }, 100 | { 101 | name: "MatchExpressions: 'NotIn' operator match", 102 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 103 | {Key: "tier", Operator: metav1.LabelSelectorOpNotIn, Values: []string{"frontend", "backend"}}, 104 | }}, 105 | labels: map[string]string{"tier": "database"}, 106 | want: true, 107 | }, 108 | { 109 | name: "MatchExpressions: 'NotIn' operator no match", 110 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 111 | {Key: "tier", Operator: metav1.LabelSelectorOpNotIn, Values: []string{"frontend", "backend"}}, 112 | }}, 113 | labels: map[string]string{"tier": "frontend"}, 114 | want: false, 115 | }, 116 | { 117 | name: "MatchExpressions: 'Exists' operator match", 118 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 119 | {Key: "env", Operator: metav1.LabelSelectorOpExists}, 120 | }}, 121 | labels: map[string]string{"env": "production"}, 122 | want: true, 123 | }, 124 | { 125 | name: "MatchExpressions: 'Exists' operator no match", 126 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 127 | {Key: "env", Operator: metav1.LabelSelectorOpExists}, 128 | }}, 129 | labels: map[string]string{"app": "test"}, 130 | want: false, 131 | }, 132 | { 133 | name: "MatchExpressions: 'DoesNotExist' operator match", 134 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 135 | {Key: "env", Operator: metav1.LabelSelectorOpDoesNotExist}, 136 | }}, 137 | labels: map[string]string{"app": "test"}, 138 | want: true, 139 | }, 140 | { 141 | name: "MatchExpressions: 'DoesNotExist' operator no match", 142 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 143 | {Key: "env", Operator: metav1.LabelSelectorOpDoesNotExist}, 144 | }}, 145 | labels: map[string]string{"app": "test", "env": "prod"}, 146 | want: false, 147 | }, 148 | { 149 | name: "Combined: MatchLabels and MatchExpressions both match", 150 | selector: &metav1.LabelSelector{ 151 | MatchLabels: map[string]string{"app": "database"}, 152 | MatchExpressions: []metav1.LabelSelectorRequirement{ 153 | {Key: "tier", Operator: metav1.LabelSelectorOpIn, Values: []string{"cache", "storage"}}, 154 | }, 155 | }, 156 | labels: map[string]string{"app": "database", "tier": "storage"}, 157 | want: true, 158 | }, 159 | { 160 | name: "Combined: MatchLabels fail", 161 | selector: &metav1.LabelSelector{ 162 | MatchLabels: map[string]string{"app": "database"}, 163 | MatchExpressions: []metav1.LabelSelectorRequirement{ 164 | {Key: "tier", Operator: metav1.LabelSelectorOpIn, Values: []string{"cache", "storage"}}, 165 | }, 166 | }, 167 | labels: map[string]string{"app": "wrong", "tier": "storage"}, 168 | want: false, 169 | }, 170 | { 171 | name: "Combined: MatchExpressions fail", 172 | selector: &metav1.LabelSelector{ 173 | MatchLabels: map[string]string{"app": "database"}, 174 | MatchExpressions: []metav1.LabelSelectorRequirement{ 175 | {Key: "tier", Operator: metav1.LabelSelectorOpIn, Values: []string{"cache", "storage"}}, 176 | }, 177 | }, 178 | labels: map[string]string{"app": "database", "tier": "frontend"}, 179 | want: false, 180 | }, 181 | { 182 | name: "Invalid operator should not match", 183 | selector: &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{ 184 | {Key: "tier", Operator: "bad-operator", Values: []string{"val"}}, 185 | }}, 186 | labels: map[string]string{"tier": "val"}, 187 | want: false, // Fails because LabelSelectorAsSelector returns an error 188 | }, 189 | } 190 | 191 | // Run the tests 192 | for _, tt := range tests { 193 | t.Run(tt.name, func(t *testing.T) { 194 | if got := MatchesSelector(tt.selector, tt.labels); got != tt.want { 195 | t.Errorf("matchesSelector() = %v, want %v", got, tt.want) 196 | } 197 | }) 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /.github/workflows/iptracker.yml: -------------------------------------------------------------------------------- 1 | name: e2e_iptracker 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | tags: 8 | - 'v*' 9 | pull_request: 10 | branches: [ main ] 11 | workflow_dispatch: 12 | 13 | env: 14 | GO_VERSION: "1.24" 15 | K8S_VERSION: "v1.33.1" 16 | KIND_VERSION: "v0.29.0" 17 | KIND_CLUSTER_NAME: kind 18 | 19 | permissions: write-all 20 | 21 | jobs: 22 | build: 23 | name: build 24 | runs-on: ubuntu-latest 25 | steps: 26 | - name: Set up Go 27 | uses: actions/setup-go@v6 28 | with: 29 | go-version: ${{ env.GO_VERSION }} 30 | id: go 31 | 32 | - name: Check out code 33 | uses: actions/checkout@v5 34 | 35 | - name: Build 36 | run: | 37 | mkdir _output 38 | # Build kube-network-policies image 39 | REGISTRY="registry.k8s.io/networking" IMAGE_NAME="kube-network-policies" TAG="test" make image-build-iptracker 40 | docker save registry.k8s.io/networking/kube-network-policies:test-iptracker > _output/kube-network-policies-image.tar 41 | # Build kube-ip-tracker image 42 | REGISTRY="registry.k8s.io/networking" IMAGE_NAME="kube-ip-tracker" TAG="test" make image-build-kube-ip-tracker-standard 43 | docker save registry.k8s.io/networking/kube-ip-tracker:test > _output/kube-ip-tracker-image.tar 44 | 45 | - uses: actions/upload-artifact@v5 46 | with: 47 | name: test-image 48 | path: _output/kube-network-policies-image.tar 49 | 50 | - uses: actions/upload-artifact@v5 51 | with: 52 | name: test-image-tracker 53 | path: _output/kube-ip-tracker-image.tar 54 | 55 | e2e_iptracker: 56 | name: e2e 57 | runs-on: ubuntu-22.04 58 | timeout-minutes: 100 59 | needs: 60 | - build 61 | strategy: 62 | fail-fast: false 63 | matrix: 64 | # TODO add "dual", waiting on KEP https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/3705-cloud-node-ips 65 | ipFamily: ["ipv4", "ipv6"] 66 | proxyMode: ["iptables"] 67 | env: 68 | JOB_NAME: "kube-network-policies-${{ matrix.ipFamily }}-${{ matrix.proxyMode }}" 69 | IP_FAMILY: ${{ matrix.ipFamily }} 70 | KUBEPROXY_MODE: ${{ matrix.proxyMode }} 71 | steps: 72 | - name: Check out code 73 | uses: actions/checkout@v5 74 | 75 | - name: Enable ipv4 and ipv6 forwarding 76 | run: | 77 | sudo sysctl -w net.ipv6.conf.all.forwarding=1 78 | sudo sysctl -w net.ipv4.ip_forward=1 79 | 80 | - name: Set up environment (download dependencies) 81 | run: | 82 | TMP_DIR=$(mktemp -d) 83 | # Test binaries 84 | curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/kubernetes-test-linux-amd64.tar.gz -o ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz 85 | tar xvzf ${TMP_DIR}/kubernetes-test-linux-amd64.tar.gz \ 86 | --directory ${TMP_DIR} \ 87 | --strip-components=3 kubernetes/test/bin/ginkgo kubernetes/test/bin/e2e.test 88 | # kubectl 89 | curl -L https://dl.k8s.io/${{ env.K8S_VERSION }}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl 90 | # kind 91 | curl -Lo ${TMP_DIR}/kind https://kind.sigs.k8s.io/dl/${{ env.KIND_VERSION }}/kind-linux-amd64 92 | # Install 93 | sudo cp ${TMP_DIR}/ginkgo /usr/local/bin/ginkgo 94 | sudo cp ${TMP_DIR}/e2e.test /usr/local/bin/e2e.test 95 | sudo cp ${TMP_DIR}/kubectl /usr/local/bin/kubectl 96 | sudo cp ${TMP_DIR}/kind /usr/local/bin/kind 97 | sudo chmod +x /usr/local/bin/ginkgo 98 | sudo chmod +x /usr/local/bin/e2e.test 99 | sudo chmod +x /usr/local/bin/kubectl 100 | sudo chmod +x /usr/local/bin/kind 101 | 102 | - name: Create multi node cluster 103 | run: | 104 | # output_dir 105 | mkdir -p _artifacts 106 | # create cluster 107 | cat < _artifacts/kubeconfig.conf 123 | 124 | - uses: actions/download-artifact@v6 125 | with: 126 | name: test-image 127 | 128 | - uses: actions/download-artifact@v6 129 | with: 130 | name: test-image-tracker 131 | 132 | - name: Install kube-network-policies 133 | run: | 134 | # stop kindnet of applying network policies 135 | kubectl -n kube-system set image ds kindnet kindnet-cni=docker.io/kindest/kindnetd:v20230809-80a64d96 136 | # preload kube-network-policies image 137 | docker load --input kube-network-policies-image.tar 138 | /usr/local/bin/kind load docker-image registry.k8s.io/networking/kube-network-policies:test-iptracker --name ${{ env.KIND_CLUSTER_NAME}} 139 | sed -i s#registry.k8s.io/networking/kube-network-policies.*#registry.k8s.io/networking/kube-network-policies:test-iptracker# install-iptracker.yaml 140 | # preload kube-ip-tracker image 141 | docker load --input kube-ip-tracker-image.tar 142 | /usr/local/bin/kind load docker-image registry.k8s.io/networking/kube-ip-tracker:test --name ${{ env.KIND_CLUSTER_NAME}} 143 | sed -i s#registry.k8s.io/networking/kube-ip-tracker.*#registry.k8s.io/networking/kube-ip-tracker:test# install-iptracker.yaml 144 | /usr/local/bin/kubectl apply -f ./install-iptracker.yaml 145 | 146 | - name: Get Cluster status 147 | run: | 148 | # wait network is ready 149 | sleep 5 150 | /usr/local/bin/kubectl get nodes -o wide 151 | /usr/local/bin/kubectl get pods -A 152 | /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods --namespace=kube-system -l k8s-app=kube-dns 153 | /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods --namespace=kube-system -l app=kube-network-policies 154 | /usr/local/bin/kubectl wait --timeout=1m --for=condition=ready pods --namespace=kube-system -l app=kube-ip-tracker 155 | 156 | - name: Run tests 157 | run: | 158 | export KUBERNETES_CONFORMANCE_TEST='y' 159 | export E2E_REPORT_DIR=${PWD}/_artifacts 160 | 161 | # Run tests 162 | /usr/local/bin/ginkgo --nodes=25 \ 163 | --focus="Netpol" \ 164 | /usr/local/bin/e2e.test \ 165 | -- \ 166 | --kubeconfig=${PWD}/_artifacts/kubeconfig.conf \ 167 | --provider=local \ 168 | --dump-logs-on-failure=false \ 169 | --report-dir=${E2E_REPORT_DIR} \ 170 | --disable-log-dump=true 171 | 172 | - name: Upload Junit Reports 173 | if: always() 174 | uses: actions/upload-artifact@v5 175 | with: 176 | name: kind-junit-${{ env.JOB_NAME }}-${{ github.run_id }} 177 | path: './_artifacts/*.xml' 178 | 179 | - name: Export logs 180 | if: always() 181 | run: | 182 | /usr/local/bin/kind export logs --name ${KIND_CLUSTER_NAME} ./_artifacts/logs 183 | 184 | - name: Upload logs 185 | if: always() 186 | uses: actions/upload-artifact@v5 187 | with: 188 | name: kind-logs-${{ env.JOB_NAME }}-${{ github.run_id }} 189 | path: ./_artifacts/logs 190 | -------------------------------------------------------------------------------- /cmd/kube-network-policies/npa-v1alpha1/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "net" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | "time" 12 | 13 | "sigs.k8s.io/kube-network-policies/pkg/api" 14 | "sigs.k8s.io/kube-network-policies/pkg/cmd" 15 | "sigs.k8s.io/kube-network-policies/pkg/dataplane" 16 | "sigs.k8s.io/kube-network-policies/pkg/dns" 17 | "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" 18 | "sigs.k8s.io/kube-network-policies/pkg/podinfo" 19 | pluginsnpav1alpha1 "sigs.k8s.io/kube-network-policies/plugins/npa-v1alpha1" 20 | npaclient "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned" 21 | npainformers "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions" 22 | 23 | "k8s.io/apimachinery/pkg/api/meta" 24 | "k8s.io/client-go/informers" 25 | "k8s.io/client-go/tools/clientcmd" 26 | 27 | "k8s.io/client-go/kubernetes" 28 | "k8s.io/client-go/rest" 29 | "k8s.io/component-base/logs" 30 | logsapi "k8s.io/component-base/logs/api/v1" 31 | _ "k8s.io/component-base/logs/json/register" 32 | nodeutil "k8s.io/component-helpers/node/util" 33 | "k8s.io/klog/v2" 34 | ) 35 | 36 | // This is a pattern to ensure that deferred functions executes before os.Exit 37 | func main() { 38 | os.Exit(run()) 39 | } 40 | 41 | func run() int { 42 | // Setup logging 43 | logCfg := logsapi.NewLoggingConfiguration() 44 | logsapi.AddGoFlags(logCfg, flag.CommandLine) 45 | 46 | // Setup flags 47 | opts := cmd.NewOptions() 48 | opts.AddFlags(flag.CommandLine) 49 | 50 | flag.Parse() 51 | 52 | // init logging 53 | logs.InitLogs() 54 | if err := logsapi.ValidateAndApply(logCfg, nil); err != nil { 55 | fmt.Fprintf(os.Stderr, "%v\n", err) 56 | return 1 57 | } 58 | 59 | // Create a context for structured logging, and catch termination signals 60 | ctx, cancel := signal.NotifyContext( 61 | context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) 62 | defer cancel() 63 | 64 | logger := klog.FromContext(ctx) 65 | logger.Info("called", "args", flag.Args()) 66 | 67 | flag.VisitAll(func(flag *flag.Flag) { 68 | logger.Info("flag", "name", flag.Name, "value", flag.Value) 69 | }) 70 | 71 | if _, _, err := net.SplitHostPort(opts.MetricsBindAddress); err != nil { 72 | logger.Error(err, "parsing metrics bind address", "address", opts.MetricsBindAddress) 73 | return 1 74 | } 75 | 76 | nodeName, err := nodeutil.GetHostname(opts.HostnameOverride) 77 | if err != nil { 78 | klog.Fatalf("can not obtain the node name, use the hostname-override flag if you want to set it to a specific value: %v", err) 79 | } 80 | 81 | dpCfg := dataplane.Config{ 82 | FailOpen: opts.FailOpen, 83 | QueueID: opts.QueueID, 84 | NetfilterBug1766Fix: opts.NetfilterBug1766Fix, 85 | } 86 | 87 | var config *rest.Config 88 | if opts.Kubeconfig != "" { 89 | config, err = clientcmd.BuildConfigFromFlags("", opts.Kubeconfig) 90 | } else { 91 | // creates the in-cluster config 92 | config, err = rest.InClusterConfig() 93 | } 94 | if err != nil { 95 | klog.Fatalf("can not create client-go configuration: %v", err) 96 | } 97 | 98 | // use protobuf for better performance at scale 99 | // https://kubernetes.io/docs/reference/using-api/api-concepts/#alternate-representations-of-resources 100 | npaConfig := config // shallow copy because CRDs does not support proto 101 | config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" 102 | config.ContentType = "application/vnd.kubernetes.protobuf" 103 | 104 | // creates the clientset 105 | clientset, err := kubernetes.NewForConfig(config) 106 | if err != nil { 107 | panic(err.Error()) 108 | } 109 | 110 | informersFactory := informers.NewSharedInformerFactory(clientset, 0) 111 | nsInformer := informersFactory.Core().V1().Namespaces() 112 | networkPolicyInfomer := informersFactory.Networking().V1().NetworkPolicies() 113 | podInformer := informersFactory.Core().V1().Pods() 114 | nodeInformer := informersFactory.Core().V1().Nodes() 115 | 116 | // Set the memory-saving transform function on the pod informer. 117 | err = podInformer.Informer().SetTransform(func(obj interface{}) (interface{}, error) { 118 | if accessor, err := meta.Accessor(obj); err == nil { 119 | accessor.SetManagedFields(nil) 120 | } 121 | return obj, nil 122 | }) 123 | if err != nil { 124 | klog.Fatalf("Failed to set pod informer transform: %v", err) 125 | } 126 | 127 | npaClient, err := npaclient.NewForConfig(npaConfig) 128 | if err != nil { 129 | klog.Fatalf("Failed to create Network client: %v", err) 130 | } 131 | npaInformerFactory := npainformers.NewSharedInformerFactory(npaClient, 0) 132 | anpInformer := npaInformerFactory.Policy().V1alpha1().AdminNetworkPolicies() 133 | banpInformer := npaInformerFactory.Policy().V1alpha1().BaselineAdminNetworkPolicies() 134 | 135 | // Create the Pod IP resolvers. 136 | // First, given an IP address they return the Pod name/namespace. 137 | informerResolver, err := podinfo.NewInformerResolver(podInformer.Informer()) 138 | if err != nil { 139 | klog.Fatalf("Failed to create informer resolver: %v", err) 140 | } 141 | resolvers := []podinfo.IPResolver{informerResolver} 142 | 143 | // Create an NRI Pod IP resolver if enabled, since NRI connects to the container runtime 144 | // the Pod and IP information is provided at the time the Pod Sandbox is created and before 145 | // the containers start running, so policies can be enforced without race conditions. 146 | if !opts.DisableNRI { 147 | nriIPResolver, err := podinfo.NewNRIResolver(ctx, nodeName, nil) 148 | if err != nil { 149 | klog.Infof("failed to create NRI plugin, using apiserver information only: %v", err) 150 | } 151 | resolvers = append(resolvers, nriIPResolver) 152 | } 153 | 154 | // Create the pod info provider to obtain the Pod information 155 | // necessary for the network policy evaluation, it uses the resolvers 156 | // to obtain the key (Pod name and namespace) and use the informers to obtain 157 | // the labels that are necessary to match the network policies. 158 | podInfoProvider := podinfo.NewInformerProvider( 159 | podInformer, 160 | nsInformer, 161 | nodeInformer, 162 | resolvers) 163 | 164 | // Create the evaluators for the Pipeline to process the packets 165 | // and take a network policy action. The evaluators are processed 166 | // by the order in the array. 167 | evaluators := []api.PolicyEvaluator{} 168 | 169 | // Logging evaluator must go first if enabled. 170 | if klog.V(2).Enabled() { 171 | evaluators = append(evaluators, networkpolicy.NewLoggingPolicy()) 172 | } 173 | 174 | // Admin Network Policy need to associate IP addresses to Domains 175 | // NewDomainCache implements the interface DomainResolver using 176 | // nftables to create a cache with the resolved IP addresses from the 177 | // Pod domain queries. 178 | domainResolver := dns.NewDomainCache(opts.QueueID + 1) 179 | go func() { 180 | err := domainResolver.Run(ctx) 181 | if err != nil { 182 | klog.Infof("domain cache controller exited: %v", err) 183 | } 184 | }() 185 | 186 | evaluators = append(evaluators, pluginsnpav1alpha1.NewAdminNetworkPolicy( 187 | anpInformer, 188 | domainResolver, 189 | )) 190 | 191 | // Standard Network Policy goes after AdminNetworkPolicy and before BaselineAdminNetworkPolicy 192 | evaluators = append(evaluators, networkpolicy.NewStandardNetworkPolicy( 193 | nodeName, 194 | nsInformer, 195 | podInformer, 196 | networkPolicyInfomer, 197 | )) 198 | 199 | evaluators = append(evaluators, pluginsnpav1alpha1.NewBaselineAdminNetworkPolicy( 200 | banpInformer, 201 | )) 202 | 203 | informersFactory.Start(ctx.Done()) 204 | npaInformerFactory.Start(ctx.Done()) 205 | 206 | cmd.Start(ctx, networkpolicy.NewPolicyEngine(podInfoProvider, evaluators), dpCfg, opts.MetricsBindAddress) 207 | 208 | <-ctx.Done() 209 | logger.Info("Received termination signal, starting cleanup...") 210 | // grace period to cleanup resources 211 | time.Sleep(5 * time.Second) 212 | logger.Info("Cleanup completed, exiting...") 213 | return 0 214 | } 215 | -------------------------------------------------------------------------------- /cmd/kube-network-policies/iptracker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "flag" 8 | "fmt" 9 | "net" 10 | "os" 11 | "os/signal" 12 | "path/filepath" 13 | "syscall" 14 | "time" 15 | 16 | nodeutil "k8s.io/component-helpers/node/util" 17 | "sigs.k8s.io/kube-network-policies/pkg/api" 18 | "sigs.k8s.io/kube-network-policies/pkg/cmd" 19 | "sigs.k8s.io/kube-network-policies/pkg/dataplane" 20 | "sigs.k8s.io/kube-network-policies/pkg/ipcache" 21 | "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" 22 | "sigs.k8s.io/kube-network-policies/pkg/podinfo" 23 | pluginsiptracker "sigs.k8s.io/kube-network-policies/plugins/iptracker" 24 | 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | "k8s.io/client-go/informers" 27 | "k8s.io/client-go/kubernetes" 28 | "k8s.io/client-go/rest" 29 | "k8s.io/client-go/tools/clientcmd" 30 | "k8s.io/component-base/logs" 31 | logsapi "k8s.io/component-base/logs/api/v1" 32 | _ "k8s.io/component-base/logs/json/register" 33 | "k8s.io/klog/v2" 34 | ) 35 | 36 | var ( 37 | ipTrackerAddress string 38 | ipTrackerCAFile string 39 | ipTrackerCertFile string 40 | ipTrackerKeyFile string 41 | ) 42 | 43 | func init() { 44 | flag.StringVar(&ipTrackerAddress, "ip-tracker-address", "", "The IP address and port for the IP tracker to serve on, if empty it will use the Kubernetes API") 45 | flag.StringVar(&ipTrackerCAFile, "ip-tracker-ca-file", "", "The CA file for the IP tracker") 46 | flag.StringVar(&ipTrackerCertFile, "ip-tracker-cert-file", "", "The certificate file for the IP tracker") 47 | flag.StringVar(&ipTrackerKeyFile, "ip-tracker-key-file", "", "The key file for the IP tracker") 48 | 49 | } 50 | 51 | // This is a pattern to ensure that deferred functions executes before os.Exit 52 | func main() { 53 | os.Exit(run()) 54 | } 55 | 56 | func newTLSConfig(caFile, certFile, keyFile string) (*tls.Config, error) { 57 | // Load client cert 58 | cert, err := tls.LoadX509KeyPair(certFile, keyFile) 59 | if err != nil { 60 | return nil, fmt.Errorf("failed to load client key pair: %w", err) 61 | } 62 | 63 | // Load CA cert 64 | caCert, err := os.ReadFile(caFile) 65 | if err != nil { 66 | return nil, fmt.Errorf("failed to read CA file: %w", err) 67 | } 68 | caCertPool := x509.NewCertPool() 69 | caCertPool.AppendCertsFromPEM(caCert) 70 | 71 | return &tls.Config{ 72 | Certificates: []tls.Certificate{cert}, 73 | RootCAs: caCertPool, 74 | }, nil 75 | } 76 | 77 | func run() int { 78 | // Setup logging 79 | logCfg := logsapi.NewLoggingConfiguration() 80 | logsapi.AddGoFlags(logCfg, flag.CommandLine) 81 | 82 | // Setup flags 83 | opts := cmd.NewOptions() 84 | opts.AddFlags(flag.CommandLine) 85 | 86 | flag.Parse() 87 | 88 | // init logging 89 | logs.InitLogs() 90 | if err := logsapi.ValidateAndApply(logCfg, nil); err != nil { 91 | fmt.Fprintf(os.Stderr, "%v\n", err) 92 | return 1 93 | } 94 | 95 | // Create a context for structured logging, and catch termination signals 96 | ctx, cancel := signal.NotifyContext( 97 | context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) 98 | defer cancel() 99 | 100 | logger := klog.FromContext(ctx) 101 | logger.Info("called", "args", flag.Args()) 102 | 103 | flag.VisitAll(func(flag *flag.Flag) { 104 | logger.Info("flag", "name", flag.Name, "value", flag.Value) 105 | }) 106 | 107 | if _, _, err := net.SplitHostPort(opts.MetricsBindAddress); err != nil { 108 | logger.Error(err, "parsing metrics bind address", "address", opts.MetricsBindAddress) 109 | return 1 110 | } 111 | 112 | if ipTrackerAddress == "" { 113 | logger.Info("ip-tracker address required") 114 | return 1 115 | } 116 | 117 | nodeName, err := nodeutil.GetHostname(opts.HostnameOverride) 118 | if err != nil { 119 | klog.Fatalf("can not obtain the node name, use the hostname-override flag if you want to set it to a specific value: %v", err) 120 | } 121 | 122 | dpCfg := dataplane.Config{ 123 | FailOpen: opts.FailOpen, 124 | QueueID: opts.QueueID, 125 | NetfilterBug1766Fix: opts.NetfilterBug1766Fix, 126 | StrictMode: opts.StrictMode, 127 | } 128 | 129 | var config *rest.Config 130 | if opts.Kubeconfig != "" { 131 | config, err = clientcmd.BuildConfigFromFlags("", opts.Kubeconfig) 132 | } else { 133 | // creates the in-cluster config 134 | config, err = rest.InClusterConfig() 135 | } 136 | if err != nil { 137 | klog.Fatalf("can not create client-go configuration: %v", err) 138 | } 139 | 140 | // use protobuf for better performance at scale 141 | // https://kubernetes.io/docs/reference/using-api/api-concepts/#alternate-representations-of-resources 142 | config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" 143 | config.ContentType = "application/vnd.kubernetes.protobuf" 144 | 145 | // creates the clientset 146 | clientset, err := kubernetes.NewForConfig(config) 147 | if err != nil { 148 | panic(err.Error()) 149 | } 150 | 151 | nsKubeSystem, err := clientset.CoreV1().Namespaces().Get(ctx, metav1.NamespaceSystem, metav1.GetOptions{}) 152 | if err != nil { 153 | klog.Fatalf("Failed to get kube-system namespace: %v", err) 154 | } 155 | clusterID := string(nsKubeSystem.UID) 156 | 157 | informersFactory := informers.NewSharedInformerFactory(clientset, 0) 158 | networkPolicyInfomer := informersFactory.Networking().V1().NetworkPolicies() 159 | 160 | // Create the pod info provider to obtain the Pod information 161 | // necessary for the network policy evaluation, it uses the resolvers 162 | // to obtain the key (Pod name and namespace) and use the informers to obtain 163 | // the labels that are necessary to match the network policies. 164 | dbPath := filepath.Join(os.TempDir(), "ipcache.bolt") 165 | boltStore, err := ipcache.NewBoltStore(dbPath) 166 | if err != nil { 167 | klog.Fatalf("Failed to create bolt store: %v", err) 168 | } 169 | lruStore := ipcache.NewLRUStore(boltStore, 256) 170 | var tlsConfig *tls.Config 171 | if ipTrackerCAFile != "" && ipTrackerCertFile != "" && ipTrackerKeyFile != "" { 172 | tlsConfig, err = newTLSConfig(ipTrackerCAFile, ipTrackerCertFile, ipTrackerKeyFile) 173 | if err != nil { 174 | klog.Fatalf("Failed to create TLS config: %v", err) 175 | } 176 | } 177 | ipcacheClient, err := ipcache.NewClient(ctx, ipTrackerAddress, tlsConfig, lruStore, boltStore, nodeName) 178 | if err != nil { 179 | klog.Fatalf("Failed to create ipcache client: %v", err) 180 | } 181 | var podInfoProvider api.PodInfoProvider 182 | // Create an NRI Pod IP resolver if enabled, since NRI connects to the container runtime 183 | // the Pod and IP information is provided at the time the Pod Sandbox is created and before 184 | // the containers start running, so policies can be enforced without race conditions. 185 | if !opts.DisableNRI { 186 | nriIPResolver, err := podinfo.NewNRIResolver(ctx, nodeName, informersFactory.Core().V1().Namespaces()) 187 | if err != nil { 188 | klog.Infof("failed to create NRI plugin, using apiserver information only: %v", err) 189 | } 190 | podInfoProvider = podinfo.NewFallbackPodInfoProvider(ipcacheClient, nriIPResolver) 191 | } else { 192 | podInfoProvider = ipcacheClient 193 | } 194 | 195 | // Create the evaluators for the Pipeline to process the packets 196 | // and take a network policy action. The evaluators are processed 197 | // by the order in the array. 198 | evaluators := []api.PolicyEvaluator{} 199 | 200 | // Logging evaluator must go first if enabled. 201 | if klog.V(2).Enabled() { 202 | evaluators = append(evaluators, networkpolicy.NewLoggingPolicy()) 203 | } 204 | 205 | evaluators = append(evaluators, pluginsiptracker.NewIPTrackerNetworkPolicy(clusterID, nodeName, networkPolicyInfomer)) 206 | 207 | informersFactory.Start(ctx.Done()) 208 | 209 | cmd.Start(ctx, networkpolicy.NewPolicyEngine(podInfoProvider, evaluators), dpCfg, opts.MetricsBindAddress) 210 | 211 | <-ctx.Done() 212 | logger.Info("Received termination signal, starting cleanup...") 213 | // grace period to cleanup resources 214 | time.Sleep(5 * time.Second) 215 | logger.Info("Cleanup completed, exiting...") 216 | return 0 217 | } 218 | -------------------------------------------------------------------------------- /cmd/kube-network-policies/npa-v1alpha2/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "net" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | "time" 12 | 13 | "sigs.k8s.io/kube-network-policies/pkg/api" 14 | "sigs.k8s.io/kube-network-policies/pkg/cmd" 15 | "sigs.k8s.io/kube-network-policies/pkg/dataplane" 16 | "sigs.k8s.io/kube-network-policies/pkg/dns" 17 | "sigs.k8s.io/kube-network-policies/pkg/networkpolicy" 18 | "sigs.k8s.io/kube-network-policies/pkg/podinfo" 19 | pluginsnpav1alpha2 "sigs.k8s.io/kube-network-policies/plugins/npa-v1alpha2" 20 | npav1alpha2 "sigs.k8s.io/network-policy-api/apis/v1alpha2" 21 | npaclient "sigs.k8s.io/network-policy-api/pkg/client/clientset/versioned" 22 | npainformers "sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions" 23 | 24 | "k8s.io/apimachinery/pkg/api/meta" 25 | "k8s.io/client-go/informers" 26 | "k8s.io/client-go/tools/clientcmd" 27 | 28 | "k8s.io/client-go/kubernetes" 29 | "k8s.io/client-go/rest" 30 | "k8s.io/component-base/logs" 31 | logsapi "k8s.io/component-base/logs/api/v1" 32 | _ "k8s.io/component-base/logs/json/register" 33 | nodeutil "k8s.io/component-helpers/node/util" 34 | "k8s.io/klog/v2" 35 | ) 36 | 37 | // This is a pattern to ensure that deferred functions executes before os.Exit 38 | func main() { 39 | os.Exit(run()) 40 | } 41 | 42 | func run() int { 43 | // Setup logging 44 | logCfg := logsapi.NewLoggingConfiguration() 45 | logsapi.AddGoFlags(logCfg, flag.CommandLine) 46 | 47 | // Setup flags 48 | opts := cmd.NewOptions() 49 | opts.AddFlags(flag.CommandLine) 50 | 51 | flag.Parse() 52 | 53 | // init logging 54 | logs.InitLogs() 55 | if err := logsapi.ValidateAndApply(logCfg, nil); err != nil { 56 | fmt.Fprintf(os.Stderr, "%v\n", err) 57 | return 1 58 | } 59 | 60 | // Create a context for structured logging, and catch termination signals 61 | ctx, cancel := signal.NotifyContext( 62 | context.Background(), os.Interrupt, syscall.SIGINT, syscall.SIGTERM) 63 | defer cancel() 64 | 65 | logger := klog.FromContext(ctx) 66 | logger.Info("called", "args", flag.Args()) 67 | 68 | flag.VisitAll(func(flag *flag.Flag) { 69 | logger.Info("flag", "name", flag.Name, "value", flag.Value) 70 | }) 71 | 72 | if _, _, err := net.SplitHostPort(opts.MetricsBindAddress); err != nil { 73 | logger.Error(err, "parsing metrics bind address", "address", opts.MetricsBindAddress) 74 | return 1 75 | } 76 | 77 | nodeName, err := nodeutil.GetHostname(opts.HostnameOverride) 78 | if err != nil { 79 | klog.Fatalf("can not obtain the node name, use the hostname-override flag if you want to set it to a specific value: %v", err) 80 | } 81 | 82 | dpCfg := dataplane.Config{ 83 | FailOpen: opts.FailOpen, 84 | QueueID: opts.QueueID, 85 | NetfilterBug1766Fix: opts.NetfilterBug1766Fix, 86 | StrictMode: opts.StrictMode, 87 | } 88 | 89 | var config *rest.Config 90 | if opts.Kubeconfig != "" { 91 | config, err = clientcmd.BuildConfigFromFlags("", opts.Kubeconfig) 92 | } else { 93 | // creates the in-cluster config 94 | config, err = rest.InClusterConfig() 95 | } 96 | if err != nil { 97 | klog.Fatalf("can not create client-go configuration: %v", err) 98 | } 99 | 100 | // use protobuf for better performance at scale 101 | // https://kubernetes.io/docs/reference/using-api/api-concepts/#alternate-representations-of-resources 102 | npaConfig := config // shallow copy because CRDs does not support proto 103 | config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" 104 | config.ContentType = "application/vnd.kubernetes.protobuf" 105 | 106 | // creates the clientset 107 | clientset, err := kubernetes.NewForConfig(config) 108 | if err != nil { 109 | panic(err.Error()) 110 | } 111 | 112 | informersFactory := informers.NewSharedInformerFactory(clientset, 0) 113 | nsInformer := informersFactory.Core().V1().Namespaces() 114 | networkPolicyInfomer := informersFactory.Networking().V1().NetworkPolicies() 115 | podInformer := informersFactory.Core().V1().Pods() 116 | nodeInformer := informersFactory.Core().V1().Nodes() 117 | 118 | // Set the memory-saving transform function on the pod informer. 119 | err = podInformer.Informer().SetTransform(func(obj interface{}) (interface{}, error) { 120 | if accessor, err := meta.Accessor(obj); err == nil { 121 | accessor.SetManagedFields(nil) 122 | } 123 | return obj, nil 124 | }) 125 | if err != nil { 126 | klog.Fatalf("Failed to set pod informer transform: %v", err) 127 | } 128 | 129 | npaClient, err := npaclient.NewForConfig(npaConfig) 130 | if err != nil { 131 | klog.Fatalf("Failed to create Network client: %v", err) 132 | } 133 | npaInformerFactory := npainformers.NewSharedInformerFactory(npaClient, 0) 134 | cnpInformer := npaInformerFactory.Policy().V1alpha2().ClusterNetworkPolicies() 135 | 136 | // Create the Pod IP resolvers. 137 | // First, given an IP address they return the Pod name/namespace. 138 | informerResolver, err := podinfo.NewInformerResolver(podInformer.Informer()) 139 | if err != nil { 140 | klog.Fatalf("Failed to create informer resolver: %v", err) 141 | } 142 | resolvers := []podinfo.IPResolver{informerResolver} 143 | 144 | // Create an NRI Pod IP resolver if enabled, since NRI connects to the container runtime 145 | // the Pod and IP information is provided at the time the Pod Sandbox is created and before 146 | // the containers start running, so policies can be enforced without race conditions. 147 | if !opts.DisableNRI { 148 | nriIPResolver, err := podinfo.NewNRIResolver(ctx, nodeName, nil) 149 | if err != nil { 150 | klog.Infof("failed to create NRI plugin, using apiserver information only: %v", err) 151 | } 152 | resolvers = append(resolvers, nriIPResolver) 153 | } 154 | 155 | // Create the pod info provider to obtain the Pod information 156 | // necessary for the network policy evaluation, it uses the resolvers 157 | // to obtain the key (Pod name and namespace) and use the informers to obtain 158 | // the labels that are necessary to match the network policies. 159 | podInfoProvider := podinfo.NewInformerProvider( 160 | podInformer, 161 | nsInformer, 162 | nodeInformer, 163 | resolvers) 164 | 165 | // Create the evaluators for the Pipeline to process the packets 166 | // and take a network policy action. The evaluators are processed 167 | // by the order in the array. 168 | evaluators := []api.PolicyEvaluator{} 169 | 170 | // Logging evaluator must go first if enabled. 171 | if klog.V(2).Enabled() { 172 | evaluators = append(evaluators, networkpolicy.NewLoggingPolicy()) 173 | } 174 | 175 | // Admin Network Policy need to associate IP addresses to Domains 176 | // NewDomainCache implements the interface DomainResolver using 177 | // nftables to create a cache with the resolved IP addresses from the 178 | // Pod domain queries. 179 | domainResolver := dns.NewDomainCache(opts.QueueID + 1) 180 | go func() { 181 | err := domainResolver.Run(ctx) 182 | if err != nil { 183 | klog.Infof("domain cache controller exited: %v", err) 184 | } 185 | }() 186 | 187 | evaluators = append(evaluators, pluginsnpav1alpha2.NewClusterNetworkPolicy( 188 | npav1alpha2.AdminTier, 189 | cnpInformer, 190 | domainResolver, 191 | )) 192 | 193 | // Standard Network Policy goes after AdminNetworkPolicy and before BaselineAdminNetworkPolicy 194 | evaluators = append(evaluators, networkpolicy.NewStandardNetworkPolicy( 195 | nodeName, 196 | nsInformer, 197 | podInformer, 198 | networkPolicyInfomer, 199 | )) 200 | 201 | evaluators = append(evaluators, pluginsnpav1alpha2.NewClusterNetworkPolicy( 202 | npav1alpha2.BaselineTier, 203 | cnpInformer, 204 | domainResolver, 205 | )) 206 | 207 | informersFactory.Start(ctx.Done()) 208 | npaInformerFactory.Start(ctx.Done()) 209 | 210 | cmd.Start(ctx, networkpolicy.NewPolicyEngine(podInfoProvider, evaluators), dpCfg, opts.MetricsBindAddress) 211 | 212 | <-ctx.Done() 213 | logger.Info("Received termination signal, starting cleanup...") 214 | // grace period to cleanup resources 215 | time.Sleep(5 * time.Second) 216 | logger.Info("Cleanup completed, exiting...") 217 | return 0 218 | } 219 | --------------------------------------------------------------------------------