├── .github ├── .codecov.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── support-request-question.md ├── actions │ ├── build-and-push-image │ │ └── action.yaml │ └── install-dependencies │ │ └── action.yaml ├── dependabot.yml └── workflows │ ├── e2e-conformance.yaml │ ├── manual-e2e-tests.yaml │ ├── performance-tests.yaml │ └── pr-tests.yaml ├── .gitignore ├── .go-version ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── Dockerfile.test ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── THIRD-PARTY ├── api └── v1alpha1 │ ├── groupversion_info.go │ ├── policyendpoints_types.go │ └── zz_generated.deepcopy.go ├── cmd ├── cli │ ├── cli-selector │ │ ├── cli-all.go │ │ └── cli.go │ └── main.go └── cliv6 │ ├── cli-selector-v6 │ ├── cli-all.go │ └── cli.go │ └── main.go ├── controllers ├── policyendpoints_controller.go └── policyendpoints_controller_test.go ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go ├── mocks └── controller-runtime │ └── client │ └── client_mocks.go ├── pkg ├── aws │ ├── cloud.go │ ├── cloud_config.go │ └── services │ │ ├── cloudwatchlogs.go │ │ └── ec2_metadata.go ├── clihelper │ └── show.go ├── config │ ├── controller_config.go │ └── runtime_config.go ├── ebpf │ ├── bpf_client.go │ ├── bpf_client_mock.go │ ├── bpf_client_test.go │ ├── c │ │ ├── helper.h │ │ ├── tc.v4egress.bpf.c │ │ ├── tc.v4ingress.bpf.c │ │ ├── tc.v6egress.bpf.c │ │ ├── tc.v6ingress.bpf.c │ │ ├── v4events.bpf.c │ │ └── v6events.bpf.c │ ├── conntrack │ │ └── conntrack_client.go │ └── events │ │ └── events.go ├── logger │ ├── logger.go │ └── zaplogger.go ├── metrics │ └── metrics.go ├── rpc │ └── rpc_handler.go ├── rpcclient │ └── client_wrapper.go ├── utils │ ├── cp │ │ └── cp.go │ ├── imds │ │ └── imds.go │ ├── utils.go │ └── utils_test.go └── version │ └── version.go ├── scripts ├── README.md ├── ebpf_sdk_override │ ├── cleanup.sh │ └── setup.sh ├── gen_mocks.sh ├── lib │ ├── cleanup.sh │ ├── cloudwatch.sh │ ├── cluster.sh │ ├── common.sh │ ├── network-policy.sh │ ├── tests.sh │ └── verify_test_results.py ├── run-cyclonus-tests.sh ├── run-tests.sh ├── test │ └── check-cleanup-pod.yaml └── update-node-agent-image.sh └── test ├── agent ├── Dockerfile ├── README.md ├── cmd │ └── check-bpf-cleanup-agent │ │ └── main.go ├── go.mod └── go.sum ├── framework ├── framework.go ├── manifest │ ├── container.go │ ├── deployment.go │ ├── networkpolicy.go │ ├── networkpolicyrules.go │ ├── pod.go │ └── service.go ├── options.go ├── resources │ └── k8s │ │ ├── deployment │ │ └── resource.go │ │ ├── namespace │ │ └── resource.go │ │ ├── networkpolicy │ │ └── resource.go │ │ ├── pod │ │ └── resource.go │ │ └── service │ │ └── resource.go └── utils │ ├── poll.go │ └── utils.go └── integration ├── policy ├── default_allow_test.go ├── policy_suite_test.go └── policy_test.go └── strict ├── strict_mode_suite_test.go └── strict_mode_test.go /.github/.codecov.yml: -------------------------------------------------------------------------------- 1 | # To validate: 2 | # cat codecov.yml | curl --data-binary @- https://codecov.io/validate 3 | 4 | codecov: 5 | # Avoid "Missing base report" 6 | # https://docs.codecov.io/docs/comparing-commits 7 | allow_coverage_offsets: true 8 | notify: 9 | require_ci_to_pass: yes 10 | 11 | coverage: 12 | precision: 2 13 | round: down 14 | range: "50...75" 15 | 16 | status: 17 | project: 18 | default: 19 | threshold: 1 20 | unittest: 21 | threshold: 1 22 | only_pulls: true 23 | flags: 24 | - "unittest" 25 | # Disable patch since it is noisy and not correct 26 | patch: 27 | default: 28 | enabled: no 29 | if_not_found: success 30 | 31 | comment: false 32 | 33 | ignore: 34 | - "api/v1alpha1/**/*" 35 | - "hack/**/*" 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Report a bug in aws-network-policy-agent project. 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | 16 | 17 | **What happened**: 18 | 21 | 22 | **Attach logs** 23 | 26 | 27 | **What you expected to happen**: 28 | 29 | **How to reproduce it (as minimally and precisely as possible)**: 30 | 31 | **Anything else we need to know?**: 32 | 33 | **Environment**: 34 | - Kubernetes version (use `kubectl version`): 35 | - CNI Version 36 | - Network Policy Agent Version 37 | - OS (e.g: `cat /etc/os-release`): 38 | - Kernel (e.g. `uname -a`): 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an enhancement to the aws-network-policy-agent project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | 17 | 18 | **What would you like to be added**: 19 | 20 | **Why is this needed**: 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/support-request-question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Support Request/Question 3 | about: Support request or question relating to aws-network-policy-agent project. 4 | title: '' 5 | labels: needs investigation, question 6 | assignees: '' 7 | 8 | --- 9 | 10 | 16 | 17 | **What happened**: 18 | 19 | 24 | 25 | **Environment**: 26 | - Kubernetes version (use `kubectl version`): 27 | - CNI Version 28 | - Network Policy Agent Version 29 | - OS (e.g: `cat /etc/os-release`): 30 | - Kernel (e.g. `uname -a`): 31 | -------------------------------------------------------------------------------- /.github/actions/build-and-push-image/action.yaml: -------------------------------------------------------------------------------- 1 | name: Build Image and Push 2 | description: 'Builds Multi-arch Network Policy Agent image and pushes to ECR' 3 | inputs: 4 | aws-region: 5 | description: AWS region 6 | required: true 7 | outputs: 8 | image_uri: 9 | description: "Network Policy Agent Image" 10 | value: ${{ steps.build.outputs.image_uri }} 11 | runs: 12 | using: "composite" 13 | steps: 14 | - name: Set up Docker QEMU 15 | uses: docker/setup-qemu-action@v2 16 | - name: Set up Docker Buildx 17 | uses: docker/setup-buildx-action@v2 18 | - name: Build and Push Image 19 | id: build 20 | shell: bash 21 | env: 22 | REGION: ${{ inputs.aws-region }} 23 | AWS_ECR_REPO_NAME: amazon/aws-network-policy-agent 24 | run: | 25 | IMAGE_VERSION=$(git rev-parse HEAD) 26 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 27 | AWS_ECR_REGISTRY="$AWS_ACCOUNT_ID.dkr.ecr.$REGION.amazonaws.com" 28 | 29 | aws ecr get-login-password --region $REGION | docker login --username AWS --password-stdin ${AWS_ECR_REGISTRY} 30 | if ! `aws ecr describe-repositories --registry-id $AWS_ACCOUNT_ID --repository-names $AWS_ECR_REPO_NAME >/dev/null 2>&1`; then 31 | echo "creating ECR repo with name $AWS_ECR_REPO_NAME" 32 | aws ecr create-repository --repository-name $AWS_ECR_REPO_NAME 33 | fi 34 | 35 | if [[ $(aws ecr batch-get-image --repository-name=$AWS_ECR_REPO_NAME --image-ids imageTag=$IMAGE_VERSION \ 36 | --query 'images[].imageId.imageTag' --region $REGION) != "[]" ]]; then 37 | echo "Image $AWS_ECR_REPO_NAME:$IMAGE_VERSION already exists. Skipping image build." 38 | else 39 | echo "Building AWS Network Policy Agent latest image" 40 | 41 | docker buildx create --name="network-policy-agent-builder" --buildkitd-flags '--allow-insecure-entitlement network.host' --use >/dev/null 42 | make multi-arch-build-and-push VERSION=$IMAGE_VERSION IMAGE=$AWS_ECR_REGISTRY/$AWS_ECR_REPO_NAME 43 | 44 | docker buildx rm network-policy-agent-builder 45 | fi 46 | image_uri=$AWS_ECR_REGISTRY/$AWS_ECR_REPO_NAME:$IMAGE_VERSION 47 | echo "image_uri=$(echo $image_uri)" >> $GITHUB_OUTPUT 48 | -------------------------------------------------------------------------------- /.github/actions/install-dependencies/action.yaml: -------------------------------------------------------------------------------- 1 | name: InstallDependencies 2 | description: 'Installs Go, Docker, Ginkgo, EKSCTL binaries' 3 | runs: 4 | using: "composite" 5 | steps: 6 | - uses: actions/setup-go@v4 7 | with: 8 | go-version-file: go.mod 9 | check-latest: true 10 | - name: Set up ginkgo 11 | shell: bash 12 | run: | 13 | # Install ginkgo version from go.mod 14 | go install -mod=mod github.com/onsi/ginkgo/v2/ginkgo 15 | - name: Set up eksctl 16 | shell: bash 17 | run: | 18 | curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp 19 | sudo mv /tmp/eksctl /usr/local/bin/ -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # See https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates#package-ecosystem 2 | version: 2 3 | updates: 4 | - package-ecosystem: "gomod" 5 | directory: "/" 6 | schedule: 7 | interval: "monthly" 8 | -------------------------------------------------------------------------------- /.github/workflows/e2e-conformance.yaml: -------------------------------------------------------------------------------- 1 | name: E2E Conformance Tests 2 | 3 | on: 4 | workflow_dispatch: {} 5 | schedule: 6 | - cron: "0 0 * * *" # Run Everyday at Midnight 7 | 8 | permissions: 9 | id-token: write 10 | contents: read 11 | 12 | jobs: 13 | build-image: 14 | if: github.repository == 'aws/aws-network-policy-agent' 15 | runs-on: ubuntu-latest 16 | outputs: 17 | AWS_EKS_NODEAGENT_IMAGE: ${{steps.build-and-push-image.outputs.image_uri}} 18 | steps: 19 | - name: Checkout latest commit 20 | uses: actions/checkout@v3 21 | - name: Install Dependencies 22 | uses: ./.github/actions/install-dependencies 23 | - uses: aws-actions/configure-aws-credentials@v3 24 | with: 25 | role-to-assume: ${{ secrets.OSS_ROLE_ARN }} 26 | aws-region: us-west-2 27 | role-duration-seconds: 3600 # 1 hour 28 | - name: Build and Push Network Policy Image 29 | id: build-and-push-image 30 | uses: ./.github/actions/build-and-push-image 31 | with: 32 | aws-region: us-west-2 33 | e2e-conformance-tests: 34 | needs: build-image 35 | strategy: 36 | fail-fast: false 37 | matrix: 38 | ip-family: [ IPv4, IPv6 ] 39 | instance-type: ["t3.large", "t4g.large"] 40 | # kubernetes-versions: ["1.25", "1.26", "1.27"] 41 | if: github.repository == 'aws/aws-network-policy-agent' 42 | runs-on: ubuntu-latest 43 | steps: 44 | - name: Checkout latest commit 45 | uses: actions/checkout@v3 46 | - name: Install Dependencies 47 | uses: ./.github/actions/install-dependencies 48 | - uses: aws-actions/configure-aws-credentials@v3 49 | with: 50 | role-to-assume: ${{ secrets.OSS_ROLE_ARN }} 51 | aws-region: us-west-2 52 | role-duration-seconds: 21600 # 6 hours 53 | - name: Run e2e conformance test 54 | env: 55 | RUN_CONFORMANCE_TESTS: true 56 | IP_FAMILY: ${{ matrix.ip-family }} 57 | INSTANCE_TYPE: ${{ matrix.instance-type }} 58 | AWS_EKS_NODEAGENT_IMAGE: ${{ needs.build-image.outputs.AWS_EKS_NODEAGENT_IMAGE }} 59 | TEST_IMAGE_REGISTRY: ${{ secrets.TEST_IMAGE_REGISTRY }} 60 | run: | 61 | ./scripts/run-tests.sh -------------------------------------------------------------------------------- /.github/workflows/manual-e2e-tests.yaml: -------------------------------------------------------------------------------- 1 | name: Manual E2E tests 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | instance_type: 7 | type: choice 8 | description: Select Instance type (ARM - t4g.large, AMD - t3.large, GPU - p3.8xlarge) 9 | options: 10 | - t3.large 11 | - t4g.large 12 | - p3.8xlarge 13 | 14 | permissions: 15 | id-token: write 16 | contents: read 17 | 18 | jobs: 19 | manual-e2e-conformance-tests: 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | ip-family: [ IPv4, IPv6 ] 24 | kubernetes-versions: ["1.25", "1.26", "1.27", "1.28"] 25 | if: github.repository == 'aws/aws-network-policy-agent' 26 | runs-on: ubuntu-latest 27 | steps: 28 | - name: Checkout latest commit 29 | uses: actions/checkout@v3 30 | - name: Install Dependencies 31 | uses: ./.github/actions/install-dependencies 32 | - uses: aws-actions/configure-aws-credentials@v3 33 | with: 34 | role-to-assume: ${{ secrets.OSS_ROLE_ARN }} 35 | aws-region: us-west-2 36 | role-duration-seconds: 18000 # 5 hours 37 | - name: Run manual e2e conformance test 38 | env: 39 | RUN_CONFORMANCE_TESTS: true 40 | IP_FAMILY: ${{ matrix.ip-family }} 41 | INSTANCE_TYPE: ${{ github.event.inputs.instance_type }} 42 | K8S_VERSION: ${{ matrix.kubernetes-versions }} 43 | TEST_IMAGE_REGISTRY: ${{ secrets.TEST_IMAGE_REGISTRY }} 44 | run: | 45 | ./scripts/run-tests.sh 46 | -------------------------------------------------------------------------------- /.github/workflows/performance-tests.yaml: -------------------------------------------------------------------------------- 1 | name: Performance tests 2 | 3 | on: 4 | workflow_dispatch: {} 5 | schedule: 6 | - cron: "0 9 * * 2" # every Tuesday 7 | 8 | permissions: 9 | id-token: write 10 | contents: read 11 | 12 | jobs: 13 | build-image: 14 | if: github.repository == 'aws/aws-network-policy-agent' 15 | runs-on: ubuntu-latest 16 | outputs: 17 | AWS_EKS_NODEAGENT_IMAGE: ${{steps.build-and-push-image.outputs.image_uri}} 18 | steps: 19 | - name: Checkout latest commit 20 | uses: actions/checkout@v3 21 | - name: Install Dependencies 22 | uses: ./.github/actions/install-dependencies 23 | - uses: aws-actions/configure-aws-credentials@v3 24 | with: 25 | role-to-assume: ${{ secrets.OSS_ROLE_ARN }} 26 | aws-region: us-west-2 27 | role-duration-seconds: 3600 # 1 hour 28 | - name: Build and Push Network Policy Image 29 | id: build-and-push-image 30 | uses: ./.github/actions/build-and-push-image 31 | with: 32 | aws-region: us-west-2 33 | performance-tests: 34 | needs: build-image 35 | strategy: 36 | fail-fast: false 37 | matrix: 38 | ip-family: [ IPv4, IPv6 ] 39 | # kubernetes-versions: ["1.25", "1.26", "1.27"] 40 | if: github.repository == 'aws/aws-network-policy-agent' 41 | runs-on: ubuntu-latest 42 | steps: 43 | - name: Checkout latest commit 44 | uses: actions/checkout@v3 45 | - name: Install Dependencies 46 | uses: ./.github/actions/install-dependencies 47 | - uses: aws-actions/configure-aws-credentials@v3 48 | with: 49 | role-to-assume: ${{ secrets.OSS_ROLE_ARN }} 50 | aws-region: us-west-2 51 | role-duration-seconds: 18000 # 5 hours 52 | - name: Run performance tests 53 | env: 54 | RUN_PERFORMANCE_TESTS: true 55 | NODES_CAPACITY: 3 56 | INSTANCE_TYPE: c5.xlarge 57 | IP_FAMILY: ${{ matrix.ip-family }} 58 | AWS_EKS_NODEAGENT_IMAGE: ${{ needs.build-image.outputs.AWS_EKS_NODEAGENT_IMAGE }} 59 | run: | 60 | ./scripts/run-tests.sh -------------------------------------------------------------------------------- /.github/workflows/pr-tests.yaml: -------------------------------------------------------------------------------- 1 | name: Automatic Pull Request test 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - "main" 7 | - "release*" 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | unit-test: 14 | name: Unit test 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout latest commit in the PR 18 | uses: actions/checkout@v3 19 | - name: Set up Go 20 | uses: actions/setup-go@v4 21 | with: 22 | go-version-file: go.mod 23 | check-latest: true 24 | cache-dependency-path: "**/go.sum" 25 | - uses: actions/cache@v3 26 | with: 27 | path: | 28 | ~/go/bin 29 | - name: Set up tools 30 | run: | 31 | go install golang.org/x/lint/golint@latest 32 | go install golang.org/x/tools/cmd/goimports@latest 33 | - name: Run code checks 34 | run: | 35 | make check-format 36 | make vet 37 | - name: Build 38 | run: make build-linux 39 | - name: Unit test 40 | run: make test 41 | - name: Upload code coverage 42 | uses: codecov/codecov-action@v3 43 | docker-build: 44 | name: Build Docker images 45 | runs-on: ubuntu-latest 46 | steps: 47 | - name: Checkout latest commit in the PR 48 | uses: actions/checkout@v3 49 | - name: Set up QEMU 50 | uses: docker/setup-qemu-action@v2 51 | - name: Set up Docker Buildx 52 | uses: docker/setup-buildx-action@v2 53 | - name: Build Network Policy Agent images 54 | run: make docker-buildx 55 | deprecated-apigroups: 56 | name: Detect deprecated apiGroups 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/checkout@v3 60 | - run: | 61 | version=$(curl -sL https://api.github.com/repos/FairwindsOps/pluto/releases/latest | jq -r ".tag_name") 62 | number=${version:1} 63 | wget https://github.com/FairwindsOps/pluto/releases/download/${version}/pluto_${number}_linux_amd64.tar.gz 64 | sudo tar -C /usr/local -xzf pluto_${number}_linux_amd64.tar.gz 65 | - run: | 66 | /usr/local/pluto detect-files -d . 67 | vuln_check: 68 | runs-on: ubuntu-latest 69 | timeout-minutes: 5 70 | steps: 71 | - name: Checkout 72 | uses: actions/checkout@v3 73 | - name: Setup Go Version 74 | run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV 75 | - uses: actions/setup-go@v4 76 | with: 77 | go-version: ${{ env.GO_VERSION }} 78 | cache-dependency-path: "**/go.sum" 79 | - name: Install `govulncheck` 80 | run: go install golang.org/x/vuln/cmd/govulncheck@latest 81 | - name: Run `govulncheck` 82 | run: ~/go/bin/govulncheck ./... 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | coverage.txt 3 | *.h 4 | *.o 5 | aws-eks-na-cli 6 | aws-eks-na-cli-v6 7 | controller 8 | bin/ 9 | config/ 10 | vendor/ 11 | scripts/results.log 12 | amazon-network-policy-controller-k8s 13 | 14 | # Test build files 15 | test/build/ 16 | -------------------------------------------------------------------------------- /.go-version: -------------------------------------------------------------------------------- 1 | 1.24.2 2 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @aws/eks-networking -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | ARG golang_image 3 | 4 | FROM $golang_image as builder 5 | 6 | ARG TARGETOS 7 | ARG TARGETARCH 8 | 9 | # Env configuration 10 | ENV GOPROXY=direct 11 | 12 | WORKDIR /workspace 13 | 14 | COPY go.mod go.sum ./ 15 | # cache deps before building and copying source so that we don't need to re-download as much 16 | # and so that source changes don't invalidate our downloaded layer 17 | RUN go mod download 18 | 19 | COPY . ./ 20 | 21 | RUN make build-linux 22 | 23 | # Vmlinux 24 | FROM public.ecr.aws/amazonlinux/amazonlinux:2023 as vmlinuxbuilder 25 | WORKDIR /vmlinuxbuilder 26 | RUN yum update -y && \ 27 | yum install -y iproute procps-ng && \ 28 | yum install -y llvm clang make gcc && \ 29 | yum install -y kernel-devel elfutils-libelf-devel zlib-devel libbpf-devel bpftool && \ 30 | yum clean all 31 | COPY . ./ 32 | RUN make vmlinuxh 33 | 34 | # Build BPF 35 | FROM public.ecr.aws/amazonlinux/amazonlinux:2 as bpfbuilder 36 | WORKDIR /bpfbuilder 37 | RUN yum update -y && \ 38 | yum install -y iproute procps-ng && \ 39 | yum install -y llvm clang make gcc && \ 40 | yum install -y kernel-devel elfutils-libelf-devel zlib-devel libbpf-devel && \ 41 | yum clean all 42 | 43 | COPY . ./ 44 | COPY --from=vmlinuxbuilder /vmlinuxbuilder/pkg/ebpf/c/vmlinux.h ./pkg/ebpf/c/ 45 | RUN make build-bpf 46 | 47 | # Container base image 48 | FROM public.ecr.aws/eks-distro-build-tooling/eks-distro-minimal-base-glibc:latest.2 49 | 50 | WORKDIR / 51 | COPY --from=builder /workspace/controller . 52 | COPY --from=builder /workspace/aws-eks-na-cli . 53 | COPY --from=builder /workspace/aws-eks-na-cli-v6 . 54 | COPY --from=bpfbuilder /bpfbuilder/pkg/ebpf/c/tc.v4ingress.bpf.o . 55 | COPY --from=bpfbuilder /bpfbuilder/pkg/ebpf/c/tc.v4egress.bpf.o . 56 | COPY --from=bpfbuilder /bpfbuilder/pkg/ebpf/c/tc.v6ingress.bpf.o . 57 | COPY --from=bpfbuilder /bpfbuilder/pkg/ebpf/c/tc.v6egress.bpf.o . 58 | COPY --from=bpfbuilder /bpfbuilder/pkg/ebpf/c/v4events.bpf.o . 59 | COPY --from=bpfbuilder /bpfbuilder/pkg/ebpf/c/v6events.bpf.o . 60 | 61 | ENTRYPOINT ["/controller"] 62 | -------------------------------------------------------------------------------- /Dockerfile.test: -------------------------------------------------------------------------------- 1 | ARG golang_image 2 | 3 | FROM $golang_image 4 | 5 | WORKDIR /go/src/github.com/aws/aws-network-policy-agent 6 | 7 | # Force the go compiler to use modules. 8 | ENV GO111MODULE=on 9 | ENV GOPROXY=direct 10 | 11 | # Add goimports 12 | RUN go install golang.org/x/tools/cmd/goimports@latest 13 | 14 | # go.mod and go.sum go into their own layers. 15 | COPY go.mod . 16 | COPY go.sum . 17 | 18 | # This ensures `go mod download` happens only when go.mod and go.sum change. 19 | RUN go mod download 20 | 21 | COPY . . 22 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the networking v1alpha1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=networking.k8s.aws 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "networking.k8s.aws", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1alpha1/policyendpoints_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | networking "k8s.io/api/networking/v1" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | // PolicyReference is the reference to the network policy resource 26 | type PolicyReference struct { 27 | // Name is the name of the Policy 28 | Name string `json:"name"` 29 | 30 | // Namespace is the namespace of the Policy 31 | Namespace string `json:"namespace"` 32 | } 33 | 34 | type NetworkAddress string 35 | 36 | // Port contains information about the transport port/protocol 37 | type Port struct { 38 | // Protocol specifies the transport protocol, default TCP 39 | Protocol *corev1.Protocol `json:"protocol,omitempty"` 40 | 41 | // Port specifies the numerical port for the protocol. If empty applies to all ports 42 | Port *int32 `json:"port,omitempty"` 43 | 44 | // Endport specifies the port range port to endPort 45 | // port must be defined and an integer, endPort > port 46 | EndPort *int32 `json:"endPort,omitempty"` 47 | } 48 | 49 | // EndpointInfo defines the network endpoint information for the policy ingress/egress 50 | type EndpointInfo struct { 51 | // CIDR is the network address(s) of the endpoint 52 | CIDR NetworkAddress `json:"cidr"` 53 | 54 | // Except is the exceptions to the CIDR ranges mentioned above. 55 | Except []NetworkAddress `json:"except,omitempty"` 56 | 57 | // Ports is the list of ports 58 | Ports []Port `json:"ports,omitempty"` 59 | } 60 | 61 | // PodEndpoint defines the summary information for the pods 62 | type PodEndpoint struct { 63 | // HostIP is the IP address of the host the pod is currently running on 64 | HostIP NetworkAddress `json:"hostIP"` 65 | // PodIP is the IP address of the pod 66 | PodIP NetworkAddress `json:"podIP"` 67 | // Name is the pod name 68 | Name string `json:"name"` 69 | // Namespace is the pod namespace 70 | Namespace string `json:"namespace"` 71 | } 72 | 73 | // PolicyEndpointSpec defines the desired state of PolicyEndpoint 74 | type PolicyEndpointSpec struct { 75 | // PodSelector is the podSelector from the policy resource 76 | PodSelector *metav1.LabelSelector `json:"podSelector,omitempty"` 77 | 78 | // PolicyRef is a reference to the Kubernetes NetworkPolicy resource. 79 | PolicyRef PolicyReference `json:"policyRef"` 80 | 81 | // PodIsolation specifies whether the pod needs to be isolated for a 82 | // particular traffic direction Ingress or Egress, or both. If default isolation is not 83 | // specified, and there are no ingress/egress rules, then the pod is not isolated 84 | // from the point of view of this policy. This follows the NetworkPolicy spec.PolicyTypes. 85 | PodIsolation []networking.PolicyType `json:"podIsolation,omitempty"` 86 | 87 | // PodSelectorEndpoints contains information about the pods 88 | // matching the podSelector 89 | PodSelectorEndpoints []PodEndpoint `json:"podSelectorEndpoints,omitempty"` 90 | 91 | // Ingress is the list of ingress rules containing resolved network addresses 92 | Ingress []EndpointInfo `json:"ingress,omitempty"` 93 | 94 | // Egress is the list of egress rules containing resolved network addresses 95 | Egress []EndpointInfo `json:"egress,omitempty"` 96 | } 97 | 98 | // PolicyEndpointStatus defines the observed state of PolicyEndpoint 99 | type PolicyEndpointStatus struct { 100 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 101 | // Important: Run "make" to regenerate code after modifying this file 102 | } 103 | 104 | //+kubebuilder:object:root=true 105 | //+kubebuilder:subresource:status 106 | 107 | // PolicyEndpoint is the Schema for the policyendpoints API 108 | type PolicyEndpoint struct { 109 | metav1.TypeMeta `json:",inline"` 110 | metav1.ObjectMeta `json:"metadata,omitempty"` 111 | 112 | Spec PolicyEndpointSpec `json:"spec,omitempty"` 113 | Status PolicyEndpointStatus `json:"status,omitempty"` 114 | } 115 | 116 | //+kubebuilder:object:root=true 117 | 118 | // PolicyEndpointList contains a list of PolicyEndpoint 119 | type PolicyEndpointList struct { 120 | metav1.TypeMeta `json:",inline"` 121 | metav1.ListMeta `json:"metadata,omitempty"` 122 | Items []PolicyEndpoint `json:"items"` 123 | } 124 | 125 | func init() { 126 | SchemeBuilder.Register(&PolicyEndpoint{}, &PolicyEndpointList{}) 127 | } 128 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | 3 | /* 4 | Copyright 2023. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | // Code generated by controller-gen. DO NOT EDIT. 20 | 21 | package v1alpha1 22 | 23 | import ( 24 | v1 "k8s.io/api/core/v1" 25 | networkingv1 "k8s.io/api/networking/v1" 26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 27 | runtime "k8s.io/apimachinery/pkg/runtime" 28 | ) 29 | 30 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 31 | func (in *EndpointInfo) DeepCopyInto(out *EndpointInfo) { 32 | *out = *in 33 | if in.Except != nil { 34 | in, out := &in.Except, &out.Except 35 | *out = make([]NetworkAddress, len(*in)) 36 | copy(*out, *in) 37 | } 38 | if in.Ports != nil { 39 | in, out := &in.Ports, &out.Ports 40 | *out = make([]Port, len(*in)) 41 | for i := range *in { 42 | (*in)[i].DeepCopyInto(&(*out)[i]) 43 | } 44 | } 45 | } 46 | 47 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInfo. 48 | func (in *EndpointInfo) DeepCopy() *EndpointInfo { 49 | if in == nil { 50 | return nil 51 | } 52 | out := new(EndpointInfo) 53 | in.DeepCopyInto(out) 54 | return out 55 | } 56 | 57 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 58 | func (in *PodEndpoint) DeepCopyInto(out *PodEndpoint) { 59 | *out = *in 60 | } 61 | 62 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodEndpoint. 63 | func (in *PodEndpoint) DeepCopy() *PodEndpoint { 64 | if in == nil { 65 | return nil 66 | } 67 | out := new(PodEndpoint) 68 | in.DeepCopyInto(out) 69 | return out 70 | } 71 | 72 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 73 | func (in *PolicyEndpoint) DeepCopyInto(out *PolicyEndpoint) { 74 | *out = *in 75 | out.TypeMeta = in.TypeMeta 76 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 77 | in.Spec.DeepCopyInto(&out.Spec) 78 | out.Status = in.Status 79 | } 80 | 81 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyEndpoint. 82 | func (in *PolicyEndpoint) DeepCopy() *PolicyEndpoint { 83 | if in == nil { 84 | return nil 85 | } 86 | out := new(PolicyEndpoint) 87 | in.DeepCopyInto(out) 88 | return out 89 | } 90 | 91 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 92 | func (in *PolicyEndpoint) DeepCopyObject() runtime.Object { 93 | if c := in.DeepCopy(); c != nil { 94 | return c 95 | } 96 | return nil 97 | } 98 | 99 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 100 | func (in *PolicyEndpointList) DeepCopyInto(out *PolicyEndpointList) { 101 | *out = *in 102 | out.TypeMeta = in.TypeMeta 103 | in.ListMeta.DeepCopyInto(&out.ListMeta) 104 | if in.Items != nil { 105 | in, out := &in.Items, &out.Items 106 | *out = make([]PolicyEndpoint, len(*in)) 107 | for i := range *in { 108 | (*in)[i].DeepCopyInto(&(*out)[i]) 109 | } 110 | } 111 | } 112 | 113 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyEndpointList. 114 | func (in *PolicyEndpointList) DeepCopy() *PolicyEndpointList { 115 | if in == nil { 116 | return nil 117 | } 118 | out := new(PolicyEndpointList) 119 | in.DeepCopyInto(out) 120 | return out 121 | } 122 | 123 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 124 | func (in *PolicyEndpointList) DeepCopyObject() runtime.Object { 125 | if c := in.DeepCopy(); c != nil { 126 | return c 127 | } 128 | return nil 129 | } 130 | 131 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 132 | func (in *PolicyEndpointSpec) DeepCopyInto(out *PolicyEndpointSpec) { 133 | *out = *in 134 | if in.PodSelector != nil { 135 | in, out := &in.PodSelector, &out.PodSelector 136 | *out = new(metav1.LabelSelector) 137 | (*in).DeepCopyInto(*out) 138 | } 139 | out.PolicyRef = in.PolicyRef 140 | if in.PodIsolation != nil { 141 | in, out := &in.PodIsolation, &out.PodIsolation 142 | *out = make([]networkingv1.PolicyType, len(*in)) 143 | copy(*out, *in) 144 | } 145 | if in.PodSelectorEndpoints != nil { 146 | in, out := &in.PodSelectorEndpoints, &out.PodSelectorEndpoints 147 | *out = make([]PodEndpoint, len(*in)) 148 | copy(*out, *in) 149 | } 150 | if in.Ingress != nil { 151 | in, out := &in.Ingress, &out.Ingress 152 | *out = make([]EndpointInfo, len(*in)) 153 | for i := range *in { 154 | (*in)[i].DeepCopyInto(&(*out)[i]) 155 | } 156 | } 157 | if in.Egress != nil { 158 | in, out := &in.Egress, &out.Egress 159 | *out = make([]EndpointInfo, len(*in)) 160 | for i := range *in { 161 | (*in)[i].DeepCopyInto(&(*out)[i]) 162 | } 163 | } 164 | } 165 | 166 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyEndpointSpec. 167 | func (in *PolicyEndpointSpec) DeepCopy() *PolicyEndpointSpec { 168 | if in == nil { 169 | return nil 170 | } 171 | out := new(PolicyEndpointSpec) 172 | in.DeepCopyInto(out) 173 | return out 174 | } 175 | 176 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 177 | func (in *PolicyEndpointStatus) DeepCopyInto(out *PolicyEndpointStatus) { 178 | *out = *in 179 | } 180 | 181 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyEndpointStatus. 182 | func (in *PolicyEndpointStatus) DeepCopy() *PolicyEndpointStatus { 183 | if in == nil { 184 | return nil 185 | } 186 | out := new(PolicyEndpointStatus) 187 | in.DeepCopyInto(out) 188 | return out 189 | } 190 | 191 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 192 | func (in *PolicyReference) DeepCopyInto(out *PolicyReference) { 193 | *out = *in 194 | } 195 | 196 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyReference. 197 | func (in *PolicyReference) DeepCopy() *PolicyReference { 198 | if in == nil { 199 | return nil 200 | } 201 | out := new(PolicyReference) 202 | in.DeepCopyInto(out) 203 | return out 204 | } 205 | 206 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 207 | func (in *Port) DeepCopyInto(out *Port) { 208 | *out = *in 209 | if in.Protocol != nil { 210 | in, out := &in.Protocol, &out.Protocol 211 | *out = new(v1.Protocol) 212 | **out = **in 213 | } 214 | if in.Port != nil { 215 | in, out := &in.Port, &out.Port 216 | *out = new(int32) 217 | **out = **in 218 | } 219 | if in.EndPort != nil { 220 | in, out := &in.EndPort, &out.EndPort 221 | *out = new(int32) 222 | **out = **in 223 | } 224 | } 225 | 226 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. 227 | func (in *Port) DeepCopy() *Port { 228 | if in == nil { 229 | return nil 230 | } 231 | out := new(Port) 232 | in.DeepCopyInto(out) 233 | return out 234 | } 235 | -------------------------------------------------------------------------------- /cmd/cli/cli-selector/cli-all.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/aws/aws-network-policy-agent/pkg/clihelper" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var subCmd = &cobra.Command{ 12 | Use: "ebpf", 13 | Aliases: []string{"ebpf"}, 14 | Short: "Dump all ebpf related data", 15 | Run: func(cmd *cobra.Command, args []string) { 16 | 17 | }, 18 | } 19 | 20 | var progCmd = &cobra.Command{ 21 | Use: "progs", 22 | Aliases: []string{"p"}, 23 | Short: "Dump all ebpf program related data", 24 | Args: cobra.ExactArgs(0), 25 | Run: func(cmd *cobra.Command, args []string) { 26 | err := clihelper.ProgShow() 27 | if err != nil { 28 | fmt.Println("Failed to execute the cmd - ", err) 29 | } 30 | }, 31 | } 32 | 33 | var mapCmd = &cobra.Command{ 34 | Use: "maps", 35 | Aliases: []string{"m"}, 36 | Short: "Dump all ebpf maps related data", 37 | Args: cobra.ExactArgs(0), 38 | Run: func(cmd *cobra.Command, args []string) { 39 | err := clihelper.MapShow() 40 | if err != nil { 41 | fmt.Println("Failed to execute the cmd - ", err) 42 | } 43 | }, 44 | } 45 | 46 | var ebpfdataCmd = &cobra.Command{ 47 | Use: "loaded-ebpfdata", 48 | Aliases: []string{"e"}, 49 | Short: "Dump all ebpf related data", 50 | Args: cobra.ExactArgs(0), 51 | Run: func(cmd *cobra.Command, args []string) { 52 | err := clihelper.Show() 53 | if err != nil { 54 | fmt.Println("Failed to execute the cmd - ", err) 55 | } 56 | }, 57 | } 58 | 59 | var mapWalkCmd = &cobra.Command{ 60 | Use: "dump-maps", 61 | Aliases: []string{"d"}, 62 | Short: "Dump all ebpf maps related data", 63 | Args: cobra.ExactArgs(1), 64 | Run: func(cmd *cobra.Command, args []string) { 65 | mapID := args[0] 66 | strMapID, _ := strconv.Atoi(mapID) 67 | err := clihelper.MapWalk(strMapID) 68 | if err != nil { 69 | fmt.Println("Failed to execute the cmd - ", err) 70 | } 71 | }, 72 | } 73 | 74 | func init() { 75 | 76 | subCmd.AddCommand(progCmd) 77 | subCmd.AddCommand(mapCmd) 78 | subCmd.AddCommand(ebpfdataCmd) 79 | subCmd.AddCommand(mapWalkCmd) 80 | } 81 | -------------------------------------------------------------------------------- /cmd/cli/cli-selector/cli.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | var rootCmd = &cobra.Command{ 8 | Use: "aws-eks-na-cli", 9 | Short: "aws-eks-na-cli - a CLI to dump BPF states", 10 | Long: `aws-eks-na-cli CLI can be used to dump eBPF maps, 11 | programs, qdiscs and so on`, 12 | Run: func(cmd *cobra.Command, args []string) { 13 | 14 | }, 15 | } 16 | 17 | func init() { 18 | rootCmd.AddCommand(subCmd) 19 | } 20 | 21 | func Execute() error { 22 | return rootCmd.Execute() 23 | } 24 | -------------------------------------------------------------------------------- /cmd/cli/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | cli "github.com/aws/aws-network-policy-agent/cmd/cli/cli-selector" 8 | ) 9 | 10 | func main() { 11 | 12 | if err := cli.Execute(); err != nil { 13 | fmt.Fprintf(os.Stderr, "There was an error while executing your CLI '%s'", err) 14 | os.Exit(1) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /cmd/cliv6/cli-selector-v6/cli-all.go: -------------------------------------------------------------------------------- 1 | package cliv6 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/aws/aws-network-policy-agent/pkg/clihelper" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var subCmd = &cobra.Command{ 12 | Use: "ebpf", 13 | Aliases: []string{"ebpf"}, 14 | Short: "Dump all ebpf related data", 15 | Run: func(cmd *cobra.Command, args []string) { 16 | 17 | }, 18 | } 19 | 20 | var progCmd = &cobra.Command{ 21 | Use: "progs", 22 | Aliases: []string{"p"}, 23 | Short: "Dump all ebpf program related data", 24 | Args: cobra.ExactArgs(0), 25 | Run: func(cmd *cobra.Command, args []string) { 26 | err := clihelper.ProgShow() 27 | if err != nil { 28 | fmt.Println("Failed to execute the cmd - ", err) 29 | } 30 | }, 31 | } 32 | 33 | var mapCmd = &cobra.Command{ 34 | Use: "maps", 35 | Aliases: []string{"m"}, 36 | Short: "Dump all ebpf maps related data", 37 | Args: cobra.ExactArgs(0), 38 | Run: func(cmd *cobra.Command, args []string) { 39 | err := clihelper.MapShow() 40 | if err != nil { 41 | fmt.Println("Failed to execute the cmd - ", err) 42 | } 43 | }, 44 | } 45 | 46 | var ebpfdataCmd = &cobra.Command{ 47 | Use: "loaded-ebpfdata", 48 | Aliases: []string{"e"}, 49 | Short: "Dump all ebpf related data", 50 | Args: cobra.ExactArgs(0), 51 | Run: func(cmd *cobra.Command, args []string) { 52 | err := clihelper.Show() 53 | if err != nil { 54 | fmt.Println("Failed to execute the cmd - ", err) 55 | } 56 | }, 57 | } 58 | 59 | var mapWalkCmd = &cobra.Command{ 60 | Use: "dump-maps", 61 | Aliases: []string{"d"}, 62 | Short: "Dump all ebpf maps related data", 63 | Args: cobra.ExactArgs(1), 64 | Run: func(cmd *cobra.Command, args []string) { 65 | mapID := args[0] 66 | strMapID, _ := strconv.Atoi(mapID) 67 | err := clihelper.MapWalkv6(strMapID) 68 | if err != nil { 69 | fmt.Println("Failed to execute the cmd - ", err) 70 | } 71 | }, 72 | } 73 | 74 | func init() { 75 | 76 | subCmd.AddCommand(progCmd) 77 | subCmd.AddCommand(mapCmd) 78 | subCmd.AddCommand(ebpfdataCmd) 79 | subCmd.AddCommand(mapWalkCmd) 80 | } 81 | -------------------------------------------------------------------------------- /cmd/cliv6/cli-selector-v6/cli.go: -------------------------------------------------------------------------------- 1 | package cliv6 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | var rootCmd = &cobra.Command{ 8 | Use: "aws-eks-na-cli", 9 | Short: "aws-eks-na-cli - a CLI to dump BPF states", 10 | Long: `aws-eks-na-cli CLI can be used to dump eBPF maps, 11 | programs, qdiscs and so on`, 12 | Run: func(cmd *cobra.Command, args []string) { 13 | 14 | }, 15 | } 16 | 17 | func init() { 18 | rootCmd.AddCommand(subCmd) 19 | } 20 | 21 | func Execute() error { 22 | return rootCmd.Execute() 23 | } 24 | -------------------------------------------------------------------------------- /cmd/cliv6/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | cli "github.com/aws/aws-network-policy-agent/cmd/cliv6/cli-selector-v6" 8 | ) 9 | 10 | func main() { 11 | 12 | if err := cli.Execute(); err != nil { 13 | fmt.Fprintf(os.Stderr, "There was an error while executing your CLI '%s'", err) 14 | os.Exit(1) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/aws/aws-network-policy-agent 2 | 3 | go 1.24.2 4 | 5 | require ( 6 | github.com/aws/amazon-vpc-cni-k8s v1.19.3-rc1 7 | github.com/aws/aws-ebpf-sdk-go v1.0.12 8 | github.com/aws/aws-sdk-go v1.55.5 9 | github.com/go-logr/logr v1.4.2 10 | github.com/go-logr/zapr v1.3.0 11 | github.com/golang/mock v1.6.0 12 | github.com/google/go-cmp v0.7.0 13 | github.com/google/uuid v1.6.0 14 | github.com/hashicorp/go-multierror v1.1.1 15 | github.com/onsi/ginkgo/v2 v2.23.4 16 | github.com/onsi/gomega v1.36.3 17 | github.com/pkg/errors v0.9.1 18 | github.com/prometheus/client_golang v1.20.5 19 | github.com/spf13/cobra v1.8.1 20 | github.com/spf13/pflag v1.0.5 21 | github.com/stretchr/testify v1.10.0 22 | github.com/vishvananda/netlink v1.3.0 23 | go.uber.org/zap v1.27.0 24 | golang.org/x/sys v0.33.0 25 | google.golang.org/grpc v1.69.2 26 | google.golang.org/protobuf v1.36.5 27 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 28 | k8s.io/api v0.31.3 29 | k8s.io/apimachinery v0.31.3 30 | k8s.io/client-go v0.31.3 31 | sigs.k8s.io/controller-runtime v0.19.3 32 | ) 33 | 34 | require ( 35 | github.com/beorn7/perks v1.0.1 // indirect 36 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 37 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 38 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 39 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 40 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 41 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 42 | github.com/go-openapi/jsonreference v0.20.2 // indirect 43 | github.com/go-openapi/swag v0.22.4 // indirect 44 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 45 | github.com/gogo/protobuf v1.3.2 // indirect 46 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 47 | github.com/golang/protobuf v1.5.4 // indirect 48 | github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect 49 | github.com/google/gofuzz v1.2.0 // indirect 50 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 51 | github.com/hashicorp/errwrap v1.1.0 // indirect 52 | github.com/imdario/mergo v0.3.16 // indirect 53 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 54 | github.com/jmespath/go-jmespath v0.4.0 // indirect 55 | github.com/josharian/intern v1.0.0 // indirect 56 | github.com/json-iterator/go v1.1.12 // indirect 57 | github.com/klauspost/compress v1.17.9 // indirect 58 | github.com/mailru/easyjson v0.7.7 // indirect 59 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 60 | github.com/modern-go/reflect2 v1.0.2 // indirect 61 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 62 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 63 | github.com/prometheus/client_model v0.6.1 // indirect 64 | github.com/prometheus/common v0.62.0 // indirect 65 | github.com/prometheus/procfs v0.15.1 // indirect 66 | github.com/samber/lo v1.50.0 67 | github.com/vishvananda/netns v0.0.4 // indirect 68 | github.com/x448/float16 v0.8.4 // indirect 69 | go.uber.org/automaxprocs v1.6.0 // indirect 70 | go.uber.org/multierr v1.11.0 // indirect 71 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect 72 | golang.org/x/net v0.40.0 // indirect 73 | golang.org/x/oauth2 v0.27.0 // indirect 74 | golang.org/x/term v0.32.0 // indirect 75 | golang.org/x/text v0.25.0 // indirect 76 | golang.org/x/time v0.5.0 // indirect 77 | golang.org/x/tools v0.31.0 // indirect 78 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 79 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241015192408-796eee8c2d53 // indirect 80 | gopkg.in/inf.v0 v0.9.1 // indirect 81 | gopkg.in/yaml.v2 v2.4.0 // indirect 82 | gopkg.in/yaml.v3 v3.0.1 // indirect 83 | k8s.io/apiextensions-apiserver v0.31.3 // indirect 84 | k8s.io/klog/v2 v2.130.1 // indirect 85 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect 86 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect 87 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 88 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 89 | sigs.k8s.io/yaml v1.4.0 // indirect 90 | ) 91 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "os" 21 | 22 | "github.com/aws/aws-network-policy-agent/pkg/rpc" 23 | 24 | "github.com/aws/aws-network-policy-agent/pkg/logger" 25 | 26 | "github.com/spf13/pflag" 27 | 28 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 29 | // to ensure that exec-entrypoint and run can make use of them. 30 | _ "k8s.io/client-go/plugin/pkg/client/auth" 31 | 32 | policyk8sawsv1 "github.com/aws/aws-network-policy-agent/api/v1alpha1" 33 | "github.com/aws/aws-network-policy-agent/controllers" 34 | "github.com/aws/aws-network-policy-agent/pkg/config" 35 | "github.com/aws/aws-network-policy-agent/pkg/metrics" 36 | "k8s.io/apimachinery/pkg/runtime" 37 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 38 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 39 | ctrl "sigs.k8s.io/controller-runtime" 40 | "sigs.k8s.io/controller-runtime/pkg/healthz" 41 | //+kubebuilder:scaffold:imports 42 | ) 43 | 44 | var ( 45 | scheme = runtime.NewScheme() 46 | ) 47 | 48 | func init() { 49 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 50 | 51 | utilruntime.Must(policyk8sawsv1.AddToScheme(scheme)) 52 | //+kubebuilder:scaffold:scheme 53 | } 54 | 55 | func main() { 56 | initLogger := logger.New("info", "") 57 | 58 | ctrlConfig, err := loadControllerConfig() 59 | if err != nil { 60 | initLogger.Errorf("unable to load policy endpoint controller config %v", err) 61 | os.Exit(1) 62 | } 63 | 64 | log := logger.New(ctrlConfig.LogLevel, ctrlConfig.LogFile) 65 | log.Infof("Starting network policy agent with log level: %s", ctrlConfig.LogLevel) 66 | 67 | ctrl.SetLogger(logger.GetControllerRuntimeLogger()) 68 | restCFG, err := config.BuildRestConfig(ctrlConfig.RuntimeConfig) 69 | if err != nil { 70 | log.Errorf("unable to build REST config %v", err) 71 | os.Exit(1) 72 | } 73 | 74 | runtimeOpts := config.BuildRuntimeOptions(ctrlConfig.RuntimeConfig, scheme) 75 | mgr, err := ctrl.NewManager(restCFG, runtimeOpts) 76 | if err != nil { 77 | log.Errorf("unable to create controller manager %v", err) 78 | os.Exit(1) 79 | } 80 | 81 | err = ctrlConfig.ValidControllerFlags() 82 | if err != nil { 83 | log.Errorf("Controller flags validation failed %v", err) 84 | os.Exit(1) 85 | } 86 | 87 | ctx := ctrl.SetupSignalHandler() 88 | var policyEndpointController *controllers.PolicyEndpointsReconciler 89 | if ctrlConfig.EnableNetworkPolicy { 90 | log.Info("Network Policy is enabled, registering the policyEndpointController...") 91 | policyEndpointController, err = controllers.NewPolicyEndpointsReconciler(mgr.GetClient(), 92 | ctrlConfig.EnablePolicyEventLogs, ctrlConfig.EnableCloudWatchLogs, 93 | ctrlConfig.EnableIPv6, ctrlConfig.EnableNetworkPolicy, ctrlConfig.ConntrackCacheCleanupPeriod, ctrlConfig.ConntrackCacheTableSize) 94 | if err != nil { 95 | log.Errorf("unable to setup controller, PolicyEndpoints init failed %v", err) 96 | os.Exit(1) 97 | } 98 | 99 | if err = policyEndpointController.SetupWithManager(ctx, mgr); err != nil { 100 | log.Errorf("unable to create controller PolicyEndpoints %v", err) 101 | os.Exit(1) 102 | } 103 | } else { 104 | log.Info("Network Policy is disabled, skip the policyEndpointController registration") 105 | } 106 | 107 | //+kubebuilder:scaffold:builder 108 | 109 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 110 | log.Errorf("unable to set up health check %v", err) 111 | os.Exit(1) 112 | } 113 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 114 | log.Errorf("unable to set up ready check %v", err) 115 | os.Exit(1) 116 | } 117 | 118 | // CNI makes rpc calls to NP agent regardless NP is enabled or not 119 | // need to start rpc always 120 | go func() { 121 | if err := rpc.RunRPCHandler(policyEndpointController); err != nil { 122 | log.Errorf("Failed to set up gRPC Handler %v", err) 123 | os.Exit(1) 124 | } 125 | }() 126 | 127 | go metrics.ServeMetrics() 128 | 129 | log.Info("starting manager") 130 | if err := mgr.Start(ctx); err != nil { 131 | log.Errorf("problem running manager %v", err) 132 | os.Exit(1) 133 | } 134 | 135 | } 136 | 137 | // loadControllerConfig loads the controller configuration 138 | func loadControllerConfig() (config.ControllerConfig, error) { 139 | controllerConfig := config.ControllerConfig{} 140 | fs := pflag.NewFlagSet("", pflag.ExitOnError) 141 | controllerConfig.BindFlags(fs) 142 | 143 | if err := fs.Parse(os.Args); err != nil { 144 | return controllerConfig, err 145 | } 146 | 147 | return controllerConfig, nil 148 | } 149 | -------------------------------------------------------------------------------- /mocks/controller-runtime/client/client_mocks.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: sigs.k8s.io/controller-runtime/pkg/client (interfaces: Client) 3 | 4 | // Package mock_client is a generated GoMock package. 5 | package mock_client 6 | 7 | import ( 8 | context "context" 9 | reflect "reflect" 10 | 11 | gomock "github.com/golang/mock/gomock" 12 | meta "k8s.io/apimachinery/pkg/api/meta" 13 | runtime "k8s.io/apimachinery/pkg/runtime" 14 | schema "k8s.io/apimachinery/pkg/runtime/schema" 15 | types "k8s.io/apimachinery/pkg/types" 16 | client "sigs.k8s.io/controller-runtime/pkg/client" 17 | ) 18 | 19 | // MockClient is a mock of Client interface. 20 | type MockClient struct { 21 | ctrl *gomock.Controller 22 | recorder *MockClientMockRecorder 23 | } 24 | 25 | // MockClientMockRecorder is the mock recorder for MockClient. 26 | type MockClientMockRecorder struct { 27 | mock *MockClient 28 | } 29 | 30 | // NewMockClient creates a new mock instance. 31 | func NewMockClient(ctrl *gomock.Controller) *MockClient { 32 | mock := &MockClient{ctrl: ctrl} 33 | mock.recorder = &MockClientMockRecorder{mock} 34 | return mock 35 | } 36 | 37 | // EXPECT returns an object that allows the caller to indicate expected use. 38 | func (m *MockClient) EXPECT() *MockClientMockRecorder { 39 | return m.recorder 40 | } 41 | 42 | // Create mocks base method. 43 | func (m *MockClient) Create(arg0 context.Context, arg1 client.Object, arg2 ...client.CreateOption) error { 44 | m.ctrl.T.Helper() 45 | varargs := []interface{}{arg0, arg1} 46 | for _, a := range arg2 { 47 | varargs = append(varargs, a) 48 | } 49 | ret := m.ctrl.Call(m, "Create", varargs...) 50 | ret0, _ := ret[0].(error) 51 | return ret0 52 | } 53 | 54 | // Create indicates an expected call of Create. 55 | func (mr *MockClientMockRecorder) Create(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { 56 | mr.mock.ctrl.T.Helper() 57 | varargs := append([]interface{}{arg0, arg1}, arg2...) 58 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockClient)(nil).Create), varargs...) 59 | } 60 | 61 | // Delete mocks base method. 62 | func (m *MockClient) Delete(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteOption) error { 63 | m.ctrl.T.Helper() 64 | varargs := []interface{}{arg0, arg1} 65 | for _, a := range arg2 { 66 | varargs = append(varargs, a) 67 | } 68 | ret := m.ctrl.Call(m, "Delete", varargs...) 69 | ret0, _ := ret[0].(error) 70 | return ret0 71 | } 72 | 73 | // Delete indicates an expected call of Delete. 74 | func (mr *MockClientMockRecorder) Delete(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { 75 | mr.mock.ctrl.T.Helper() 76 | varargs := append([]interface{}{arg0, arg1}, arg2...) 77 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), varargs...) 78 | } 79 | 80 | // DeleteAllOf mocks base method. 81 | func (m *MockClient) DeleteAllOf(arg0 context.Context, arg1 client.Object, arg2 ...client.DeleteAllOfOption) error { 82 | m.ctrl.T.Helper() 83 | varargs := []interface{}{arg0, arg1} 84 | for _, a := range arg2 { 85 | varargs = append(varargs, a) 86 | } 87 | ret := m.ctrl.Call(m, "DeleteAllOf", varargs...) 88 | ret0, _ := ret[0].(error) 89 | return ret0 90 | } 91 | 92 | // DeleteAllOf indicates an expected call of DeleteAllOf. 93 | func (mr *MockClientMockRecorder) DeleteAllOf(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { 94 | mr.mock.ctrl.T.Helper() 95 | varargs := append([]interface{}{arg0, arg1}, arg2...) 96 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllOf", reflect.TypeOf((*MockClient)(nil).DeleteAllOf), varargs...) 97 | } 98 | 99 | // Get mocks base method. 100 | func (m *MockClient) Get(arg0 context.Context, arg1 types.NamespacedName, arg2 client.Object, arg3 ...client.GetOption) error { 101 | m.ctrl.T.Helper() 102 | varargs := []interface{}{arg0, arg1, arg2} 103 | for _, a := range arg3 { 104 | varargs = append(varargs, a) 105 | } 106 | ret := m.ctrl.Call(m, "Get", varargs...) 107 | ret0, _ := ret[0].(error) 108 | return ret0 109 | } 110 | 111 | // Get indicates an expected call of Get. 112 | func (mr *MockClientMockRecorder) Get(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { 113 | mr.mock.ctrl.T.Helper() 114 | varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) 115 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), varargs...) 116 | } 117 | 118 | // GroupVersionKindFor mocks base method. 119 | func (m *MockClient) GroupVersionKindFor(arg0 runtime.Object) (schema.GroupVersionKind, error) { 120 | m.ctrl.T.Helper() 121 | ret := m.ctrl.Call(m, "GroupVersionKindFor", arg0) 122 | ret0, _ := ret[0].(schema.GroupVersionKind) 123 | ret1, _ := ret[1].(error) 124 | return ret0, ret1 125 | } 126 | 127 | // GroupVersionKindFor indicates an expected call of GroupVersionKindFor. 128 | func (mr *MockClientMockRecorder) GroupVersionKindFor(arg0 interface{}) *gomock.Call { 129 | mr.mock.ctrl.T.Helper() 130 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GroupVersionKindFor", reflect.TypeOf((*MockClient)(nil).GroupVersionKindFor), arg0) 131 | } 132 | 133 | // IsObjectNamespaced mocks base method. 134 | func (m *MockClient) IsObjectNamespaced(arg0 runtime.Object) (bool, error) { 135 | m.ctrl.T.Helper() 136 | ret := m.ctrl.Call(m, "IsObjectNamespaced", arg0) 137 | ret0, _ := ret[0].(bool) 138 | ret1, _ := ret[1].(error) 139 | return ret0, ret1 140 | } 141 | 142 | // IsObjectNamespaced indicates an expected call of IsObjectNamespaced. 143 | func (mr *MockClientMockRecorder) IsObjectNamespaced(arg0 interface{}) *gomock.Call { 144 | mr.mock.ctrl.T.Helper() 145 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsObjectNamespaced", reflect.TypeOf((*MockClient)(nil).IsObjectNamespaced), arg0) 146 | } 147 | 148 | // List mocks base method. 149 | func (m *MockClient) List(arg0 context.Context, arg1 client.ObjectList, arg2 ...client.ListOption) error { 150 | m.ctrl.T.Helper() 151 | varargs := []interface{}{arg0, arg1} 152 | for _, a := range arg2 { 153 | varargs = append(varargs, a) 154 | } 155 | ret := m.ctrl.Call(m, "List", varargs...) 156 | ret0, _ := ret[0].(error) 157 | return ret0 158 | } 159 | 160 | // List indicates an expected call of List. 161 | func (mr *MockClientMockRecorder) List(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { 162 | mr.mock.ctrl.T.Helper() 163 | varargs := append([]interface{}{arg0, arg1}, arg2...) 164 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockClient)(nil).List), varargs...) 165 | } 166 | 167 | // Patch mocks base method. 168 | func (m *MockClient) Patch(arg0 context.Context, arg1 client.Object, arg2 client.Patch, arg3 ...client.PatchOption) error { 169 | m.ctrl.T.Helper() 170 | varargs := []interface{}{arg0, arg1, arg2} 171 | for _, a := range arg3 { 172 | varargs = append(varargs, a) 173 | } 174 | ret := m.ctrl.Call(m, "Patch", varargs...) 175 | ret0, _ := ret[0].(error) 176 | return ret0 177 | } 178 | 179 | // Patch indicates an expected call of Patch. 180 | func (mr *MockClientMockRecorder) Patch(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { 181 | mr.mock.ctrl.T.Helper() 182 | varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) 183 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Patch", reflect.TypeOf((*MockClient)(nil).Patch), varargs...) 184 | } 185 | 186 | // RESTMapper mocks base method. 187 | func (m *MockClient) RESTMapper() meta.RESTMapper { 188 | m.ctrl.T.Helper() 189 | ret := m.ctrl.Call(m, "RESTMapper") 190 | ret0, _ := ret[0].(meta.RESTMapper) 191 | return ret0 192 | } 193 | 194 | // RESTMapper indicates an expected call of RESTMapper. 195 | func (mr *MockClientMockRecorder) RESTMapper() *gomock.Call { 196 | mr.mock.ctrl.T.Helper() 197 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RESTMapper", reflect.TypeOf((*MockClient)(nil).RESTMapper)) 198 | } 199 | 200 | // Scheme mocks base method. 201 | func (m *MockClient) Scheme() *runtime.Scheme { 202 | m.ctrl.T.Helper() 203 | ret := m.ctrl.Call(m, "Scheme") 204 | ret0, _ := ret[0].(*runtime.Scheme) 205 | return ret0 206 | } 207 | 208 | // Scheme indicates an expected call of Scheme. 209 | func (mr *MockClientMockRecorder) Scheme() *gomock.Call { 210 | mr.mock.ctrl.T.Helper() 211 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Scheme", reflect.TypeOf((*MockClient)(nil).Scheme)) 212 | } 213 | 214 | // Status mocks base method. 215 | func (m *MockClient) Status() client.SubResourceWriter { 216 | m.ctrl.T.Helper() 217 | ret := m.ctrl.Call(m, "Status") 218 | ret0, _ := ret[0].(client.SubResourceWriter) 219 | return ret0 220 | } 221 | 222 | // Status indicates an expected call of Status. 223 | func (mr *MockClientMockRecorder) Status() *gomock.Call { 224 | mr.mock.ctrl.T.Helper() 225 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockClient)(nil).Status)) 226 | } 227 | 228 | // SubResource mocks base method. 229 | func (m *MockClient) SubResource(arg0 string) client.SubResourceClient { 230 | m.ctrl.T.Helper() 231 | ret := m.ctrl.Call(m, "SubResource", arg0) 232 | ret0, _ := ret[0].(client.SubResourceClient) 233 | return ret0 234 | } 235 | 236 | // SubResource indicates an expected call of SubResource. 237 | func (mr *MockClientMockRecorder) SubResource(arg0 interface{}) *gomock.Call { 238 | mr.mock.ctrl.T.Helper() 239 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockClient)(nil).SubResource), arg0) 240 | } 241 | 242 | // Update mocks base method. 243 | func (m *MockClient) Update(arg0 context.Context, arg1 client.Object, arg2 ...client.UpdateOption) error { 244 | m.ctrl.T.Helper() 245 | varargs := []interface{}{arg0, arg1} 246 | for _, a := range arg2 { 247 | varargs = append(varargs, a) 248 | } 249 | ret := m.ctrl.Call(m, "Update", varargs...) 250 | ret0, _ := ret[0].(error) 251 | return ret0 252 | } 253 | 254 | // Update indicates an expected call of Update. 255 | func (mr *MockClientMockRecorder) Update(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { 256 | mr.mock.ctrl.T.Helper() 257 | varargs := append([]interface{}{arg0, arg1}, arg2...) 258 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockClient)(nil).Update), varargs...) 259 | } 260 | -------------------------------------------------------------------------------- /pkg/aws/cloud.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | import ( 4 | "github.com/aws/aws-network-policy-agent/pkg/aws/services" 5 | "github.com/aws/aws-network-policy-agent/pkg/utils" 6 | "github.com/aws/aws-sdk-go/aws" 7 | "github.com/aws/aws-sdk-go/aws/ec2metadata" 8 | "github.com/aws/aws-sdk-go/aws/endpoints" 9 | "github.com/aws/aws-sdk-go/aws/session" 10 | "github.com/aws/aws-sdk-go/service/ec2" 11 | "github.com/aws/aws-sdk-go/service/ec2/ec2iface" 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | const ( 16 | resourceID = "resource-id" 17 | resourceKey = "key" 18 | ) 19 | 20 | var ( 21 | clusterNameTags = []string{ 22 | "aws:eks:cluster-name", 23 | } 24 | ) 25 | 26 | type Cloud interface { 27 | //CloudWatch provides API access to AWS Cloudwatch Service 28 | CloudWatchLogs() services.CloudWatchLogs 29 | 30 | // AccountID provides AccountID for the kubernetes cluster 31 | AccountID() string 32 | 33 | // Region for the kubernetes cluster 34 | Region() string 35 | 36 | // Cluster Name 37 | ClusterName() string 38 | } 39 | 40 | func NewCloud(cfg CloudConfig) (Cloud, error) { 41 | sess := session.Must(session.NewSession(aws.NewConfig())) 42 | //injectUserAgent(&sess.Handlers) 43 | 44 | metadata := services.NewEC2Metadata(sess) 45 | if len(cfg.Region) == 0 { 46 | region, err := metadata.Region() 47 | if err != nil { 48 | return nil, errors.Wrap(err, "failed to introspect region from EC2Metadata, specify --aws-region instead if EC2Metadata is unavailable") 49 | } 50 | cfg.Region = region 51 | } 52 | 53 | awsCfg := aws.NewConfig().WithRegion(cfg.Region).WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint) 54 | sess = sess.Copy(awsCfg) 55 | 56 | instanceIdentityDocument, err := metadata.GetInstanceIdentityDocument() 57 | if err != nil { 58 | return nil, errors.Wrap(err, "failed to get instanceIdentityDocument from EC2Metadata") 59 | } 60 | ec2ServiceClient := ec2.New(sess) 61 | cfg.ClusterName = getClusterName(ec2ServiceClient, instanceIdentityDocument) 62 | 63 | return &defaultCloud{ 64 | cfg: cfg, 65 | cloudWatchlogs: services.NewCloudWatchLogs(sess), 66 | }, nil 67 | } 68 | 69 | var _ Cloud = &defaultCloud{} 70 | 71 | type defaultCloud struct { 72 | cfg CloudConfig 73 | 74 | cloudWatchlogs services.CloudWatchLogs 75 | } 76 | 77 | func (c *defaultCloud) CloudWatchLogs() services.CloudWatchLogs { 78 | return c.cloudWatchlogs 79 | } 80 | 81 | func (c *defaultCloud) AccountID() string { 82 | return c.cfg.AccountID 83 | } 84 | 85 | func (c *defaultCloud) Region() string { 86 | return c.cfg.Region 87 | } 88 | 89 | func (c *defaultCloud) ClusterName() string { 90 | return c.cfg.ClusterName 91 | } 92 | 93 | func getClusterName(ec2ServiceClient ec2iface.EC2API, instanceIdentityDocument ec2metadata.EC2InstanceIdentityDocument) string { 94 | var clusterName string 95 | var err error 96 | for _, tag := range clusterNameTags { 97 | clusterName, err = getClusterTag(tag, ec2ServiceClient, instanceIdentityDocument) 98 | if err == nil && clusterName != "" { 99 | break 100 | } 101 | } 102 | if clusterName == "" { 103 | clusterName = utils.DEFAULT_CLUSTER_NAME 104 | } 105 | return clusterName 106 | } 107 | 108 | // getClusterTag is used to retrieve a tag from the ec2 instance 109 | func getClusterTag(tagKey string, ec2ServiceClient ec2iface.EC2API, instanceIdentityDocument ec2metadata.EC2InstanceIdentityDocument) (string, error) { 110 | input := ec2.DescribeTagsInput{ 111 | Filters: []*ec2.Filter{ 112 | { 113 | Name: aws.String(resourceID), 114 | Values: []*string{ 115 | aws.String(instanceIdentityDocument.InstanceID), 116 | }, 117 | }, { 118 | Name: aws.String(resourceKey), 119 | Values: []*string{ 120 | aws.String(tagKey), 121 | }, 122 | }, 123 | }, 124 | } 125 | 126 | //log.Infof("Calling DescribeTags with key %s", tagKey) 127 | results, err := ec2ServiceClient.DescribeTags(&input) 128 | if err != nil { 129 | return "", errors.Wrap(err, "GetClusterTag: Unable to obtain EC2 instance tags") 130 | } 131 | 132 | if len(results.Tags) < 1 { 133 | return "", errors.Errorf("GetClusterTag: No tag matching key: %s", tagKey) 134 | } 135 | 136 | return aws.StringValue(results.Tags[0].Value), nil 137 | } 138 | -------------------------------------------------------------------------------- /pkg/aws/cloud_config.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | import ( 4 | "github.com/spf13/pflag" 5 | ) 6 | 7 | const ( 8 | flagAWSRegion = "aws-region" 9 | flagAWSAccountID = "aws-account-id" 10 | ) 11 | 12 | type CloudConfig struct { 13 | // AWS Region for the kubernetes cluster 14 | Region string 15 | // AccountID for the kubernetes cluster 16 | AccountID string 17 | // Cluster Name for the kubernetes cluster 18 | ClusterName string 19 | } 20 | 21 | func (cfg *CloudConfig) BindFlags(fs *pflag.FlagSet) { 22 | fs.StringVar(&cfg.Region, flagAWSRegion, "", "AWS Region for the kubernetes cluster") 23 | fs.StringVar(&cfg.AccountID, flagAWSAccountID, "", "AWS AccountID for the kubernetes cluster") 24 | } 25 | -------------------------------------------------------------------------------- /pkg/aws/services/cloudwatchlogs.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws/session" 5 | "github.com/aws/aws-sdk-go/service/cloudwatchlogs" 6 | "github.com/aws/aws-sdk-go/service/cloudwatchlogs/cloudwatchlogsiface" 7 | ) 8 | 9 | type CloudWatchLogs interface { 10 | cloudwatchlogsiface.CloudWatchLogsAPI 11 | } 12 | 13 | func NewCloudWatchLogs(session *session.Session) CloudWatchLogs { 14 | return &defaultCloudWatchLogs{ 15 | CloudWatchLogsAPI: cloudwatchlogs.New(session), 16 | } 17 | } 18 | 19 | type defaultCloudWatchLogs struct { 20 | cloudwatchlogsiface.CloudWatchLogsAPI 21 | } 22 | -------------------------------------------------------------------------------- /pkg/aws/services/ec2_metadata.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws/ec2metadata" 5 | "github.com/aws/aws-sdk-go/aws/session" 6 | ) 7 | 8 | type EC2Metadata interface { 9 | Region() (string, error) 10 | GetInstanceIdentityDocument() (ec2metadata.EC2InstanceIdentityDocument, error) 11 | } 12 | 13 | // NewEC2Metadata constructs new EC2Metadata implementation. 14 | func NewEC2Metadata(session *session.Session) EC2Metadata { 15 | return &defaultEC2Metadata{ 16 | EC2Metadata: ec2metadata.New(session), 17 | } 18 | } 19 | 20 | type defaultEC2Metadata struct { 21 | *ec2metadata.EC2Metadata 22 | } 23 | -------------------------------------------------------------------------------- /pkg/config/controller_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/spf13/pflag" 7 | ) 8 | 9 | const ( 10 | flagLogLevel = "log-level" 11 | flagLogFile = "log-file" 12 | flagMaxConcurrentReconciles = "max-concurrent-reconciles" 13 | defaultLogLevel = "debug" 14 | defaultLogFile = "/var/log/aws-routed-eni/network-policy-agent.log" 15 | defaultMaxConcurrentReconciles = 3 16 | defaultConntrackCacheCleanupPeriod = 300 17 | defaultConntrackCacheTableSize = 512 * 1024 18 | flagEnablePolicyEventLogs = "enable-policy-event-logs" 19 | flagEnableCloudWatchLogs = "enable-cloudwatch-logs" 20 | flagEnableIPv6 = "enable-ipv6" 21 | flagEnableNetworkPolicy = "enable-network-policy" 22 | flagConntrackCacheCleanupPeriod = "conntrack-cache-cleanup-period" 23 | flagConntrackCacheTableSize = "conntrack-cache-table-size" 24 | ) 25 | 26 | // ControllerConfig contains the controller configuration 27 | type ControllerConfig struct { 28 | // Log level for the controller logs 29 | LogLevel string 30 | // Local log file for Network Policy Agent 31 | LogFile string 32 | // MaxConcurrentReconciles specifies the max number of reconcile loops 33 | MaxConcurrentReconciles int 34 | // Enable Policy decision logs 35 | EnablePolicyEventLogs bool 36 | // Enable Policy decision logs streaming to CloudWatch 37 | EnableCloudWatchLogs bool 38 | // Enable IPv6 mode 39 | EnableIPv6 bool 40 | // Enable Network Policy 41 | EnableNetworkPolicy bool 42 | // ConntrackCacheCleanupPeriod specifies the cleanup period 43 | ConntrackCacheCleanupPeriod int 44 | // ConntrackTableSize specifies the conntrack table size for the agent 45 | ConntrackCacheTableSize int 46 | // Configurations for the Controller Runtime 47 | RuntimeConfig RuntimeConfig 48 | } 49 | 50 | func (cfg *ControllerConfig) BindFlags(fs *pflag.FlagSet) { 51 | fs.StringVar(&cfg.LogLevel, flagLogLevel, defaultLogLevel, 52 | "Set the controller log level - info, debug") 53 | fs.StringVar(&cfg.LogFile, flagLogFile, defaultLogFile, ""+ 54 | "Set the controller log file - if not specified logs are written to stdout") 55 | fs.IntVar(&cfg.MaxConcurrentReconciles, flagMaxConcurrentReconciles, defaultMaxConcurrentReconciles, ""+ 56 | "Maximum number of concurrent reconcile loops") 57 | fs.BoolVar(&cfg.EnablePolicyEventLogs, flagEnablePolicyEventLogs, false, "If enabled, policy decision logs will be collected & logged") 58 | fs.BoolVar(&cfg.EnableCloudWatchLogs, flagEnableCloudWatchLogs, false, "If enabled, policy decision logs will be streamed to CloudWatch, requires \"enable-policy-event-logs=true\"") 59 | fs.BoolVar(&cfg.EnableIPv6, flagEnableIPv6, false, "If enabled, Network Policy agent will operate in IPv6 mode") 60 | fs.BoolVar(&cfg.EnableNetworkPolicy, flagEnableNetworkPolicy, false, "If enabled, Network Policy agent will initialize BPF maps and start reconciler") 61 | fs.IntVar(&cfg.ConntrackCacheCleanupPeriod, flagConntrackCacheCleanupPeriod, defaultConntrackCacheCleanupPeriod, ""+ 62 | "Cleanup interval for network policy agent conntrack cache") 63 | fs.IntVar(&cfg.ConntrackCacheTableSize, flagConntrackCacheTableSize, defaultConntrackCacheTableSize, ""+ 64 | "Table size for network policy agent conntrack cache") 65 | 66 | cfg.RuntimeConfig.BindFlags(fs) 67 | } 68 | 69 | // Validate controller flags 70 | func (cfg *ControllerConfig) ValidControllerFlags() error { 71 | // Validate conntrack cache table size 72 | if cfg.ConntrackCacheTableSize < (32*1024) || cfg.ConntrackCacheTableSize > (1024*1024) { 73 | return errors.New("Invalid conntrack cache table size, should be between 32K and 1024K") 74 | } 75 | return nil 76 | } 77 | -------------------------------------------------------------------------------- /pkg/config/runtime_config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "time" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/runtime" 8 | "k8s.io/client-go/rest" 9 | "k8s.io/client-go/tools/clientcmd" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" 12 | 13 | "github.com/spf13/pflag" 14 | ) 15 | 16 | const ( 17 | flagKubeconfig = "kubeconfig" 18 | flagMetricsBindAddr = "metrics-bind-addr" 19 | flagHealthProbeBindAddr = "health-probe-bind-addr" 20 | 21 | defaultKubeconfig = "" 22 | defaultWatchNamespace = corev1.NamespaceAll 23 | defaultMetricsAddr = ":8162" 24 | defaultHealthProbeBindAddress = ":8163" 25 | defaultQPS = 20 26 | defaultBurst = 100 27 | ) 28 | 29 | // RuntimeConfig stores the configuration for the controller-runtime 30 | type RuntimeConfig struct { 31 | APIServer string 32 | KubeConfig string 33 | MetricsBindAddress string 34 | HealthProbeBindAddress string 35 | SyncPeriod time.Duration 36 | } 37 | 38 | func (c *RuntimeConfig) BindFlags(fs *pflag.FlagSet) { 39 | fs.StringVar(&c.KubeConfig, flagKubeconfig, defaultKubeconfig, 40 | "Path to the kubeconfig file containing authorization and API server information.") 41 | fs.StringVar(&c.MetricsBindAddress, flagMetricsBindAddr, defaultMetricsAddr, 42 | "The address the metric endpoint binds to.") 43 | fs.StringVar(&c.HealthProbeBindAddress, flagHealthProbeBindAddr, defaultHealthProbeBindAddress, 44 | "The address the health probes binds to.") 45 | } 46 | 47 | // BuildRestConfig builds the REST config for the controller runtime 48 | func BuildRestConfig(rtCfg RuntimeConfig) (*rest.Config, error) { 49 | var restCFG *rest.Config 50 | var err error 51 | if rtCfg.KubeConfig == "" { 52 | restCFG, err = rest.InClusterConfig() 53 | } else { 54 | restCFG, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( 55 | &clientcmd.ClientConfigLoadingRules{ExplicitPath: rtCfg.KubeConfig}, &clientcmd.ConfigOverrides{}).ClientConfig() 56 | } 57 | if err != nil { 58 | return nil, err 59 | } 60 | restCFG.QPS = defaultQPS 61 | restCFG.Burst = defaultBurst 62 | return restCFG, nil 63 | } 64 | 65 | // BuildRuntimeOptions builds the options for the controller runtime based on config 66 | func BuildRuntimeOptions(rtCfg RuntimeConfig, scheme *runtime.Scheme) ctrl.Options { 67 | return ctrl.Options{ 68 | Scheme: scheme, 69 | Metrics: metricsserver.Options{BindAddress: rtCfg.MetricsBindAddress}, 70 | HealthProbeBindAddress: rtCfg.HealthProbeBindAddress, 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /pkg/ebpf/bpf_client_mock.go: -------------------------------------------------------------------------------- 1 | package ebpf 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // NewMockBpfClient is an exported helper for tests that returns a mock implementation of BpfClient. 8 | // This function is intended for use in tests in other packages. 9 | func NewMockBpfClient() BpfClient { 10 | return &bpfClient{ 11 | policyEndpointeBPFContext: new(sync.Map), 12 | IngressPodToProgMap: new(sync.Map), 13 | EgressPodToProgMap: new(sync.Map), 14 | IngressProgToPodsMap: new(sync.Map), 15 | EgressProgToPodsMap: new(sync.Map), 16 | GlobalMaps: new(sync.Map), 17 | nodeIP: "127.0.0.1", 18 | enableIPv6: false, 19 | hostMask: "/32", 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /pkg/ebpf/c/helper.h: -------------------------------------------------------------------------------- 1 | #ifndef __HELPERS_H 2 | #define __HELPERS_H 3 | 4 | enum { 5 | BPF_F_NO_PREALLOC = 1, 6 | BPF_F_NO_COMMON_LRU = 2, 7 | BPF_F_NUMA_NODE = 4, 8 | BPF_F_RDONLY = 8, 9 | BPF_F_WRONLY = 16, 10 | BPF_F_STACK_BUILD_ID = 32, 11 | BPF_F_ZERO_SEED = 64, 12 | BPF_F_RDONLY_PROG = 128, 13 | BPF_F_WRONLY_PROG = 256, 14 | BPF_F_CLONE = 512, 15 | BPF_F_MMAPABLE = 1024, 16 | BPF_F_PRESERVE_ELEMS = 2048, 17 | BPF_F_INNER_MAP = 4096, 18 | }; 19 | 20 | #endif 21 | -------------------------------------------------------------------------------- /pkg/ebpf/c/tc.v4egress.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | #include 3 | #include 4 | #include 5 | 6 | #define BPF_F_NO_PREALLOC 1 7 | #define ETH_HLEN 14 8 | #define BPF_MAP_ID_INGRESS_MAP 2 9 | #define MAX_RULES 256 10 | #define MIN_RULES 128 11 | #define PIN_GLOBAL_NS 2 12 | #define RESERVED_IP_PROTOCOL 255 13 | #define ANY_IP_PROTOCOL 254 14 | #define ANY_PORT 0 15 | #define MAX_PORT_PROTOCOL 24 16 | #define CT_VAL_DEFAULT_ALLOW 0 17 | #define CT_VAL_POLICIES_APPLIED 1 18 | #define POLICIES_APPLIED 0 19 | #define DEFAULT_ALLOW 1 20 | #define DEFAULT_DENY 2 21 | 22 | struct bpf_map_def_pvt { 23 | __u32 type; 24 | __u32 key_size; 25 | __u32 value_size; 26 | __u32 max_entries; 27 | __u32 map_flags; 28 | __u32 pinning; 29 | __u32 inner_map_fd; 30 | }; 31 | 32 | struct keystruct 33 | { 34 | __u32 prefix_len; 35 | __u8 ip[4]; 36 | }; 37 | 38 | struct lpm_trie_key { 39 | __u32 prefixlen; 40 | __u32 ip; 41 | }; 42 | 43 | struct lpm_trie_val { 44 | __u32 protocol; 45 | __u32 start_port; 46 | __u32 end_port; 47 | }; 48 | 49 | struct conntrack_key { 50 | __u32 src_ip; 51 | __u16 src_port; 52 | __u32 dest_ip; 53 | __u16 dest_port; 54 | __u8 protocol; 55 | __u32 owner_ip; 56 | }; 57 | 58 | struct conntrack_value { 59 | __u8 val; // 0 => default-allow, 1 => policies-applied 60 | }; 61 | 62 | struct data_t { 63 | __u32 src_ip; 64 | __u32 src_port; 65 | __u32 dest_ip; 66 | __u32 dest_port; 67 | __u32 protocol; 68 | __u32 verdict; 69 | __u32 packet_sz; 70 | __u8 is_egress; 71 | }; 72 | 73 | struct bpf_map_def_pvt SEC("maps") egress_map = { 74 | .type = BPF_MAP_TYPE_LPM_TRIE, 75 | .key_size = sizeof(struct lpm_trie_key), 76 | .value_size = sizeof(struct lpm_trie_val[MAX_PORT_PROTOCOL]), 77 | .max_entries = 65536, 78 | .map_flags = BPF_F_NO_PREALLOC, 79 | .pinning = PIN_GLOBAL_NS, 80 | }; 81 | 82 | struct pod_state { 83 | __u8 state; // 0 => POLICIES_APPLIED, 1 => DEFAULT_ALLOW, 2 => DEFAULT_DENY 84 | }; 85 | 86 | struct bpf_map_def_pvt SEC("maps") egress_pod_state_map = { 87 | .type = BPF_MAP_TYPE_HASH, 88 | .key_size = sizeof(__u32), // default key = 0. We are storing a single state per pod identifier 89 | .value_size = sizeof(struct pod_state), 90 | .max_entries = 1, 91 | .map_flags = BPF_F_NO_PREALLOC, 92 | .pinning = PIN_GLOBAL_NS, 93 | }; 94 | 95 | struct bpf_map_def_pvt aws_conntrack_map; 96 | struct bpf_map_def_pvt policy_events; 97 | 98 | static inline int evaluateByLookUp(struct keystruct trie_key, struct conntrack_key flow_key, struct pod_state *pst, struct data_t evt, struct iphdr *ip, __u32 l4_dst_port) { 99 | struct lpm_trie_val *trie_val; 100 | //Check if it's in the allowed list 101 | trie_val = bpf_map_lookup_elem(&egress_map, &trie_key); 102 | if (trie_val == NULL) { 103 | evt.verdict = 0; 104 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 105 | return BPF_DROP; 106 | } 107 | 108 | for (int i = 0; i < MAX_PORT_PROTOCOL; i++, trie_val++){ 109 | if (trie_val->protocol == RESERVED_IP_PROTOCOL) { 110 | evt.verdict = 0; 111 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 112 | return BPF_DROP; 113 | } 114 | 115 | if ((trie_val->protocol == ANY_IP_PROTOCOL) || (trie_val->protocol == ip->protocol && 116 | ((trie_val->start_port == ANY_PORT) || (l4_dst_port == trie_val->start_port) || 117 | (l4_dst_port > trie_val->start_port && l4_dst_port <= trie_val->end_port)))) { 118 | //Inject in to conntrack map 119 | struct conntrack_value new_flow_val = {}; 120 | if (pst->state == DEFAULT_ALLOW) { 121 | new_flow_val.val = CT_VAL_DEFAULT_ALLOW; 122 | } else { 123 | new_flow_val.val = CT_VAL_POLICIES_APPLIED; 124 | } 125 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, &new_flow_val, 0); // 0 - BPF_ANY 126 | evt.verdict = 1; 127 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 128 | return BPF_OK; 129 | } 130 | } 131 | evt.verdict = 0; 132 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 133 | return BPF_DROP; 134 | } 135 | 136 | SEC("tc_cls") 137 | int handle_egress(struct __sk_buff *skb) 138 | { 139 | struct keystruct trie_key; 140 | __u32 l4_src_port = 0; 141 | __u32 l4_dst_port = 0; 142 | struct conntrack_key flow_key; 143 | struct conntrack_value *flow_val; 144 | struct conntrack_key reverse_flow_key; 145 | struct conntrack_value *reverse_flow_val; 146 | void *data_end = (void *)(long)skb->data_end; 147 | void *data = (void *)(long)skb->data; 148 | __u8 src_ip[4]; 149 | 150 | __builtin_memset(&flow_key, 0, sizeof(flow_key)); 151 | __builtin_memset(&src_ip, 0, sizeof(src_ip)); 152 | __builtin_memset(&reverse_flow_key, 0, sizeof(reverse_flow_key)); 153 | 154 | 155 | struct ethhdr *ether = data; 156 | if (data + sizeof(*ether) > data_end) { 157 | return BPF_OK; 158 | } 159 | 160 | if (ether->h_proto == 0x08U) { // htons(ETH_P_IP) -> 0x08U 161 | data += sizeof(*ether); 162 | struct iphdr *ip = data; 163 | struct tcphdr *l4_tcp_hdr = data + sizeof(struct iphdr); 164 | struct udphdr *l4_udp_hdr = data + sizeof(struct iphdr); 165 | struct sctphdr *l4_sctp_hdr = data + sizeof(struct iphdr); 166 | 167 | if (data + sizeof(*ip) > data_end) { 168 | return BPF_OK; 169 | } 170 | if (ip->version != 4) { 171 | return BPF_OK; 172 | } 173 | 174 | switch (ip->protocol) { 175 | case IPPROTO_TCP: 176 | if (data + sizeof(*ip) + sizeof(*l4_tcp_hdr) > data_end) { 177 | return BPF_OK; 178 | } 179 | l4_src_port = (((((unsigned short)(l4_tcp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_tcp_hdr->source) & 0xFF00) >> 8)); 180 | l4_dst_port = (((((unsigned short)(l4_tcp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_tcp_hdr->dest) & 0xFF00) >> 8)); 181 | break; 182 | case IPPROTO_UDP: 183 | if (data + sizeof(*ip) + sizeof(*l4_udp_hdr) > data_end) { 184 | return BPF_OK; 185 | } 186 | l4_src_port = (((((unsigned short)(l4_udp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_udp_hdr->source) & 0xFF00) >> 8)); 187 | l4_dst_port = (((((unsigned short)(l4_udp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_udp_hdr->dest) & 0xFF00) >> 8)); 188 | break; 189 | case IPPROTO_SCTP: 190 | if (data + sizeof(*ip) + sizeof(*l4_sctp_hdr) > data_end) { 191 | return BPF_OK; 192 | } 193 | l4_src_port = (((((unsigned short)(l4_sctp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_sctp_hdr->source) & 0xFF00) >> 8)); 194 | l4_dst_port = (((((unsigned short)(l4_sctp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_sctp_hdr->dest) & 0xFF00) >> 8)); 195 | break; 196 | } 197 | 198 | trie_key.prefix_len = 32; 199 | trie_key.ip[0] = ip->daddr & 0xff; 200 | trie_key.ip[1] = (ip->daddr >> 8) & 0xff; 201 | trie_key.ip[2] = (ip->daddr >> 16) & 0xff; 202 | trie_key.ip[3] = (ip->daddr >> 24) & 0xff; 203 | 204 | src_ip[0] = ip->saddr & 0xff; 205 | src_ip[1] = (ip->saddr >> 8) & 0xff; 206 | src_ip[2] = (ip->saddr >> 16) & 0xff; 207 | src_ip[3] = (ip->saddr >> 24) & 0xff; 208 | 209 | // Check for an existing flow in the conntrack table 210 | flow_key.src_ip = ip->saddr; 211 | flow_key.src_port = l4_src_port; 212 | flow_key.dest_ip = ip->daddr; 213 | flow_key.dest_port = l4_dst_port; 214 | flow_key.protocol = ip->protocol; 215 | flow_key.owner_ip = ip->saddr; 216 | 217 | struct data_t evt = {}; 218 | evt.src_ip = flow_key.src_ip; 219 | evt.src_port = flow_key.src_port; 220 | evt.dest_ip = flow_key.dest_ip; 221 | evt.dest_port = flow_key.dest_port; 222 | evt.protocol = flow_key.protocol; 223 | evt.is_egress = 1; 224 | evt.packet_sz = skb->len; 225 | __u32 key = 0; 226 | struct pod_state *pst = bpf_map_lookup_elem(&egress_pod_state_map, &key); 227 | // There should always be an entry in pod_state_map. pst returned in above line should never be null. 228 | if (pst == NULL) { 229 | evt.verdict = 0; 230 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 231 | return BPF_DROP; 232 | } 233 | 234 | if (pst->state == DEFAULT_DENY) { 235 | evt.verdict = 0; 236 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 237 | return BPF_DROP; 238 | } 239 | 240 | //Check if it's an existing flow 241 | flow_val = bpf_map_lookup_elem(&aws_conntrack_map, &flow_key); 242 | 243 | if (flow_val != NULL) { 244 | // If it's a "default allow" flow, check if pod has flipped to "policies applied" state 245 | if (flow_val->val == CT_VAL_DEFAULT_ALLOW && pst->state == DEFAULT_ALLOW) { 246 | return BPF_OK; 247 | } 248 | if (flow_val->val == CT_VAL_POLICIES_APPLIED && pst->state == POLICIES_APPLIED) { 249 | return BPF_OK; 250 | } 251 | if (flow_val->val == CT_VAL_POLICIES_APPLIED && pst->state == DEFAULT_ALLOW) { 252 | flow_val->val = CT_VAL_DEFAULT_ALLOW; 253 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, flow_val, 0); // 0 -> BPF_ANY 254 | return BPF_OK; 255 | } 256 | if (flow_val->val == CT_VAL_DEFAULT_ALLOW && pst->state == POLICIES_APPLIED) { 257 | int ret = evaluateByLookUp(trie_key, flow_key, pst, evt, ip, l4_dst_port); 258 | if (ret == BPF_DROP) { 259 | bpf_map_delete_elem(&aws_conntrack_map, &flow_key); 260 | return BPF_DROP; 261 | } 262 | return BPF_OK; 263 | } 264 | } 265 | 266 | //Check for the reverse flow entry in the conntrack table 267 | reverse_flow_key.src_ip = ip->daddr; 268 | reverse_flow_key.src_port = l4_dst_port; 269 | reverse_flow_key.dest_ip = ip->saddr; 270 | reverse_flow_key.dest_port = l4_src_port; 271 | reverse_flow_key.protocol = ip->protocol; 272 | reverse_flow_key.owner_ip = ip->saddr; 273 | 274 | //Check if it's a response packet 275 | reverse_flow_val = bpf_map_lookup_elem(&aws_conntrack_map, &reverse_flow_key); 276 | 277 | if (reverse_flow_val != NULL) { 278 | return BPF_OK; 279 | } 280 | 281 | if (pst->state == DEFAULT_ALLOW) { 282 | struct conntrack_value new_flow_val = {}; 283 | new_flow_val.val = CT_VAL_DEFAULT_ALLOW; 284 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, &new_flow_val, 0); // 0 - BPF_ANY 285 | evt.verdict = 1; 286 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 287 | return BPF_OK; 288 | } 289 | 290 | return evaluateByLookUp(trie_key, flow_key, pst, evt, ip, l4_dst_port); 291 | 292 | } 293 | return BPF_OK; 294 | } 295 | char _license[] SEC("license") = "GPL"; 296 | -------------------------------------------------------------------------------- /pkg/ebpf/c/tc.v4ingress.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | #include 3 | #include 4 | #include 5 | 6 | #define BPF_F_NO_PREALLOC 1 7 | #define ETH_HLEN 14 8 | #define BPF_MAP_ID_INGRESS_MAP 2 9 | #define MAX_RULES 256 10 | #define MIN_RULES 128 11 | #define PIN_GLOBAL_NS 2 12 | #define RESERVED_IP_PROTOCOL 255 13 | #define ANY_IP_PROTOCOL 254 14 | #define ANY_PORT 0 15 | #define MAX_PORT_PROTOCOL 24 16 | #define CT_VAL_DEFAULT_ALLOW 0 17 | #define CT_VAL_POLICIES_APPLIED 1 18 | #define POLICIES_APPLIED 0 19 | #define DEFAULT_ALLOW 1 20 | #define DEFAULT_DENY 2 21 | 22 | struct bpf_map_def_pvt { 23 | __u32 type; 24 | __u32 key_size; 25 | __u32 value_size; 26 | __u32 max_entries; 27 | __u32 map_flags; 28 | __u32 pinning; 29 | __u32 inner_map_fd; 30 | }; 31 | 32 | struct keystruct 33 | { 34 | __u32 prefix_len; 35 | __u8 ip[4]; 36 | }; 37 | 38 | struct lpm_trie_key { 39 | __u32 prefixlen; 40 | __u32 ip; 41 | }; 42 | 43 | struct lpm_trie_val { 44 | __u32 protocol; 45 | __u32 start_port; 46 | __u32 end_port; 47 | }; 48 | 49 | struct conntrack_key { 50 | __u32 src_ip; 51 | __u16 src_port; 52 | __u32 dest_ip; 53 | __u16 dest_port; 54 | __u8 protocol; 55 | __u32 owner_ip; 56 | }; 57 | 58 | struct conntrack_value { 59 | __u8 val; // 0 => default-allow, 1 => policies-applied 60 | }; 61 | 62 | struct data_t { 63 | __u32 src_ip; 64 | __u32 src_port; 65 | __u32 dest_ip; 66 | __u32 dest_port; 67 | __u32 protocol; 68 | __u32 verdict; 69 | __u32 packet_sz; 70 | __u8 is_egress; 71 | }; 72 | 73 | struct bpf_map_def_pvt SEC("maps") ingress_map = { 74 | .type = BPF_MAP_TYPE_LPM_TRIE, 75 | .key_size =sizeof(struct lpm_trie_key), 76 | .value_size = sizeof(struct lpm_trie_val[MAX_PORT_PROTOCOL]), 77 | .max_entries = 65536, 78 | .map_flags = BPF_F_NO_PREALLOC, 79 | .pinning = PIN_GLOBAL_NS, 80 | }; 81 | 82 | struct pod_state { 83 | __u8 state; // 0 => POLICIES_APPLIED, 1 => DEFAULT_ALLOW, 2 => DEFAULT_DENY 84 | }; 85 | 86 | struct bpf_map_def_pvt SEC("maps") ingress_pod_state_map = { 87 | .type = BPF_MAP_TYPE_HASH, 88 | .key_size = sizeof(__u32), // default key = 0. We are storing a single state per pod identifier 89 | .value_size = sizeof(struct pod_state), 90 | .max_entries = 1, 91 | .map_flags = BPF_F_NO_PREALLOC, 92 | .pinning = PIN_GLOBAL_NS, 93 | }; 94 | 95 | struct bpf_map_def_pvt aws_conntrack_map; 96 | struct bpf_map_def_pvt policy_events; 97 | 98 | static inline int evaluateByLookUp(struct keystruct trie_key, struct conntrack_key flow_key, struct pod_state *pst, struct data_t evt, struct iphdr *ip, __u32 l4_dst_port) { 99 | struct lpm_trie_val *trie_val; 100 | //Check if it's in the allowed list 101 | trie_val = bpf_map_lookup_elem(&ingress_map, &trie_key); 102 | if (trie_val == NULL) { 103 | evt.verdict = 0; 104 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 105 | return BPF_DROP; 106 | } 107 | 108 | for (int i = 0; i < MAX_PORT_PROTOCOL; i++, trie_val++){ 109 | if (trie_val->protocol == RESERVED_IP_PROTOCOL) { 110 | evt.verdict = 0; 111 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 112 | return BPF_DROP; 113 | } 114 | 115 | if ((trie_val->protocol == ANY_IP_PROTOCOL) || (trie_val->protocol == ip->protocol && 116 | ((trie_val->start_port == ANY_PORT) || (l4_dst_port == trie_val->start_port) || 117 | (l4_dst_port > trie_val->start_port && l4_dst_port <= trie_val->end_port)))) { 118 | //Inject in to conntrack map 119 | struct conntrack_value new_flow_val = {}; 120 | if (pst->state == DEFAULT_ALLOW) { 121 | new_flow_val.val = CT_VAL_DEFAULT_ALLOW; 122 | } else { 123 | new_flow_val.val = CT_VAL_POLICIES_APPLIED; 124 | } 125 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, &new_flow_val, 0); // 0 - BPF_ANY 126 | evt.verdict = 1; 127 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 128 | return BPF_OK; 129 | } 130 | } 131 | evt.verdict = 0; 132 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 133 | return BPF_DROP; 134 | } 135 | 136 | SEC("tc_cls") 137 | int handle_ingress(struct __sk_buff *skb) 138 | { 139 | struct keystruct trie_key; 140 | __u32 l4_src_port = 0; 141 | __u32 l4_dst_port = 0; 142 | struct conntrack_key flow_key; 143 | struct conntrack_value *flow_val; 144 | struct conntrack_key reverse_flow_key; 145 | struct conntrack_value *reverse_flow_val; 146 | void *data_end = (void *)(long)skb->data_end; 147 | void *data = (void *)(long)skb->data; 148 | __u8 dest_ip[4]; 149 | 150 | __builtin_memset(&flow_key, 0, sizeof(flow_key)); 151 | __builtin_memset(&dest_ip, 0, sizeof(dest_ip)); 152 | __builtin_memset(&reverse_flow_key, 0, sizeof(reverse_flow_key)); 153 | 154 | struct ethhdr *ether = data; 155 | if (data + sizeof(*ether) > data_end) { 156 | return BPF_OK; 157 | } 158 | 159 | if (ether->h_proto == 0x08U) { // htons(ETH_P_IP) -> 0x08U 160 | data += sizeof(*ether); 161 | struct iphdr *ip = data; 162 | struct tcphdr *l4_tcp_hdr = data + sizeof(struct iphdr); 163 | struct udphdr *l4_udp_hdr = data + sizeof(struct iphdr); 164 | struct sctphdr *l4_sctp_hdr = data + sizeof(struct iphdr); 165 | 166 | if (data + sizeof(*ip) > data_end) { 167 | return BPF_OK; 168 | } 169 | if (ip->version != 4) { 170 | return BPF_OK; 171 | } 172 | 173 | switch (ip->protocol) { 174 | case IPPROTO_TCP: 175 | if (data + sizeof(*ip) + sizeof(*l4_tcp_hdr) > data_end) { 176 | return BPF_OK; 177 | } 178 | l4_src_port = (((((unsigned short)(l4_tcp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_tcp_hdr->source) & 0xFF00) >> 8)); 179 | l4_dst_port = (((((unsigned short)(l4_tcp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_tcp_hdr->dest) & 0xFF00) >> 8)); 180 | break; 181 | case IPPROTO_UDP: 182 | if (data + sizeof(*ip) + sizeof(*l4_udp_hdr) > data_end) { 183 | return BPF_OK; 184 | } 185 | l4_src_port = (((((unsigned short)(l4_udp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_udp_hdr->source) & 0xFF00) >> 8)); 186 | l4_dst_port = (((((unsigned short)(l4_udp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_udp_hdr->dest) & 0xFF00) >> 8)); 187 | break; 188 | case IPPROTO_SCTP: 189 | if (data + sizeof(*ip) + sizeof(*l4_sctp_hdr) > data_end) { 190 | return BPF_OK; 191 | } 192 | l4_src_port = (((((unsigned short)(l4_sctp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_sctp_hdr->source) & 0xFF00) >> 8)); 193 | l4_dst_port = (((((unsigned short)(l4_sctp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_sctp_hdr->dest) & 0xFF00) >> 8)); 194 | break; 195 | } 196 | 197 | trie_key.prefix_len = 32; 198 | trie_key.ip[0] = ip->saddr & 0xff; 199 | trie_key.ip[1] = (ip->saddr >> 8) & 0xff; 200 | trie_key.ip[2] = (ip->saddr >> 16) & 0xff; 201 | trie_key.ip[3] = (ip->saddr >> 24) & 0xff; 202 | 203 | dest_ip[0] = ip->daddr & 0xff; 204 | dest_ip[1] = (ip->daddr >> 8) & 0xff; 205 | dest_ip[2] = (ip->daddr >> 16) & 0xff; 206 | dest_ip[3] = (ip->daddr >> 24) & 0xff; 207 | 208 | //Check for the an existing flow in the conntrack table 209 | flow_key.src_ip = ip->saddr; 210 | flow_key.src_port = l4_src_port; 211 | flow_key.dest_ip = ip->daddr; 212 | flow_key.dest_port = l4_dst_port; 213 | flow_key.protocol = ip->protocol; 214 | flow_key.owner_ip = ip->daddr; 215 | 216 | struct data_t evt = {}; 217 | evt.src_ip = flow_key.src_ip; 218 | evt.src_port = flow_key.src_port; 219 | evt.dest_ip = flow_key.dest_ip; 220 | evt.dest_port = flow_key.dest_port; 221 | evt.protocol = flow_key.protocol; 222 | evt.packet_sz = skb->len; 223 | evt.is_egress = 0; 224 | 225 | __u32 key = 0; 226 | struct pod_state *pst = bpf_map_lookup_elem(&ingress_pod_state_map, &key); 227 | // There should always be an entry in pod_state_map. pst returned in above line should never be null. 228 | if (pst == NULL) { 229 | evt.verdict = 0; 230 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 231 | return BPF_DROP; 232 | } 233 | 234 | if (pst->state == DEFAULT_DENY) { 235 | evt.verdict = 0; 236 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 237 | return BPF_DROP; 238 | } 239 | 240 | //Check if it's an existing flow 241 | flow_val = bpf_map_lookup_elem(&aws_conntrack_map, &flow_key); 242 | 243 | if (flow_val != NULL) { 244 | // If it's a "default allow" flow, check if pod has flipped to "policies applied" state 245 | if (flow_val->val == CT_VAL_DEFAULT_ALLOW && pst->state == DEFAULT_ALLOW) { 246 | return BPF_OK; 247 | } 248 | if (flow_val->val == CT_VAL_POLICIES_APPLIED && pst->state == POLICIES_APPLIED) { 249 | return BPF_OK; 250 | } 251 | if (flow_val->val == CT_VAL_POLICIES_APPLIED && pst->state == DEFAULT_ALLOW) { 252 | flow_val->val = CT_VAL_DEFAULT_ALLOW; 253 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, flow_val, 0); // 0 -> BPF_ANY 254 | return BPF_OK; 255 | } 256 | if (flow_val->val == CT_VAL_DEFAULT_ALLOW && pst->state == POLICIES_APPLIED) { 257 | int ret = evaluateByLookUp(trie_key, flow_key, pst, evt, ip, l4_dst_port); 258 | if (ret == BPF_DROP) { 259 | bpf_map_delete_elem(&aws_conntrack_map, &flow_key); 260 | return BPF_DROP; 261 | } 262 | return BPF_OK; 263 | } 264 | } 265 | 266 | //Check for the reverse flow entry in the conntrack table 267 | reverse_flow_key.src_ip = ip->daddr; 268 | reverse_flow_key.src_port = l4_dst_port; 269 | reverse_flow_key.dest_ip = ip->saddr; 270 | reverse_flow_key.dest_port = l4_src_port; 271 | reverse_flow_key.protocol = ip->protocol; 272 | reverse_flow_key.owner_ip = ip->daddr; 273 | 274 | 275 | //Check if it's a response packet 276 | reverse_flow_val = (struct conntrack_value *)bpf_map_lookup_elem(&aws_conntrack_map, &reverse_flow_key); 277 | if (reverse_flow_val != NULL) { 278 | return BPF_OK; 279 | } 280 | 281 | if (pst->state == DEFAULT_ALLOW) { 282 | struct conntrack_value new_flow_val = {}; 283 | new_flow_val.val = CT_VAL_DEFAULT_ALLOW; 284 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, &new_flow_val, 0); // 0 - BPF_ANY 285 | evt.verdict = 1; 286 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 287 | return BPF_OK; 288 | } 289 | 290 | return evaluateByLookUp(trie_key, flow_key, pst, evt, ip, l4_dst_port); 291 | } 292 | return BPF_OK; 293 | } 294 | 295 | char _license[] SEC("license") = "GPL"; 296 | -------------------------------------------------------------------------------- /pkg/ebpf/c/tc.v6egress.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | #include 3 | #include 4 | #include 5 | 6 | #define BPF_F_NO_PREALLOC 1 7 | #define ETH_HLEN 14 8 | #define BPF_MAP_ID_INGRESS_MAP 2 9 | #define MAX_RULES 256 10 | #define MIN_RULES 128 11 | #define PIN_GLOBAL_NS 2 12 | #define RESERVED_IP_PROTOCOL 255 13 | #define ANY_IP_PROTOCOL 254 14 | #define ANY_PORT 0 15 | #define MAX_PORT_PROTOCOL 24 16 | #define CT_VAL_DEFAULT_ALLOW 0 17 | #define CT_VAL_POLICIES_APPLIED 1 18 | #define POLICIES_APPLIED 0 19 | #define DEFAULT_ALLOW 1 20 | #define DEFAULT_DENY 2 21 | 22 | struct bpf_map_def_pvt { 23 | __u32 type; 24 | __u32 key_size; 25 | __u32 value_size; 26 | __u32 max_entries; 27 | __u32 map_flags; 28 | __u32 pinning; 29 | __u32 inner_map_fd; 30 | }; 31 | 32 | struct keystruct 33 | { 34 | __u32 prefix_len; 35 | __u8 ip[16]; 36 | }; 37 | 38 | struct lpm_trie_key { 39 | __u32 prefixlen; 40 | __u8 ip[16]; 41 | }; 42 | 43 | struct lpm_trie_val { 44 | __u32 protocol; 45 | __u32 start_port; 46 | __u32 end_port; 47 | }; 48 | 49 | 50 | struct conntrack_key { 51 | struct in6_addr saddr; 52 | __u16 src_port; 53 | struct in6_addr daddr; 54 | __u16 dest_port; 55 | __u8 protocol; 56 | struct in6_addr owner_addr; 57 | }; 58 | 59 | struct conntrack_value { 60 | __u8 val; // 0 => default-allow, 1 => policies-applied 61 | }; 62 | 63 | struct data_t { 64 | struct in6_addr src_ip; 65 | __u32 src_port; 66 | struct in6_addr dest_ip; 67 | __u32 dest_port; 68 | __u32 protocol; 69 | __u32 verdict; 70 | __u32 packet_sz; 71 | __u8 is_egress; 72 | }; 73 | 74 | struct bpf_map_def_pvt SEC("maps") egress_map = { 75 | .type = BPF_MAP_TYPE_LPM_TRIE, 76 | .key_size = sizeof(struct lpm_trie_key), 77 | .value_size = sizeof(struct lpm_trie_val[MAX_PORT_PROTOCOL]), 78 | .max_entries = 65536, 79 | .map_flags = BPF_F_NO_PREALLOC, 80 | .pinning = PIN_GLOBAL_NS, 81 | }; 82 | 83 | struct pod_state { 84 | __u8 state; // 0 => POLICIES_APPLIED, 1 => DEFAULT_ALLOW, 2 => DEFAULT_DENY 85 | }; 86 | 87 | struct bpf_map_def_pvt SEC("maps") egress_pod_state_map = { 88 | .type = BPF_MAP_TYPE_HASH, 89 | .key_size = sizeof(__u32), // default key = 0. We are storing a single state per pod identifier 90 | .value_size = sizeof(struct pod_state), 91 | .max_entries = 1, 92 | .map_flags = BPF_F_NO_PREALLOC, 93 | .pinning = PIN_GLOBAL_NS, 94 | }; 95 | 96 | struct bpf_map_def_pvt aws_conntrack_map; 97 | struct bpf_map_def_pvt policy_events; 98 | 99 | static inline int evaluateByLookUp(struct keystruct trie_key, struct conntrack_key flow_key, struct pod_state *pst, struct data_t evt, struct ipv6hdr *ip, __u32 l4_dst_port) { 100 | struct lpm_trie_val *trie_val; 101 | //Check if it's in the allowed list 102 | trie_val = bpf_map_lookup_elem(&egress_map, &trie_key); 103 | if (trie_val == NULL) { 104 | evt.verdict = 0; 105 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 106 | return BPF_DROP; 107 | } 108 | 109 | for (int i = 0; i < MAX_PORT_PROTOCOL; i++, trie_val++){ 110 | if (trie_val->protocol == RESERVED_IP_PROTOCOL) { 111 | evt.verdict = 0; 112 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 113 | return BPF_DROP; 114 | } 115 | 116 | if ((trie_val->protocol == ANY_IP_PROTOCOL) || (trie_val->protocol == ip->nexthdr && 117 | ((trie_val->start_port == ANY_PORT) || (l4_dst_port == trie_val->start_port) || 118 | (l4_dst_port > trie_val->start_port && l4_dst_port <= trie_val->end_port)))) { 119 | //Inject in to conntrack map 120 | struct conntrack_value new_flow_val = {}; 121 | if (pst->state == DEFAULT_ALLOW) { 122 | new_flow_val.val = CT_VAL_DEFAULT_ALLOW; 123 | } else { 124 | new_flow_val.val = CT_VAL_POLICIES_APPLIED; 125 | } 126 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, &new_flow_val, 0); // 0 - BPF_ANY 127 | evt.verdict = 1; 128 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 129 | return BPF_OK; 130 | } 131 | } 132 | evt.verdict = 0; 133 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 134 | return BPF_DROP; 135 | } 136 | 137 | SEC("tc_cls") 138 | int handle_egress(struct __sk_buff *skb) 139 | { 140 | 141 | struct keystruct trie_key; 142 | struct lpm_trie_val *trie_val; 143 | __u16 l4_src_port = 0; 144 | __u16 l4_dst_port = 0; 145 | struct conntrack_key flow_key; 146 | struct conntrack_value *flow_val; 147 | struct conntrack_key reverse_flow_key; 148 | struct conntrack_value *reverse_flow_val; 149 | struct data_t evt = {}; 150 | void *data_end = (void *)(long)skb->data_end; 151 | void *data = (void *)(long)skb->data; 152 | 153 | __builtin_memset(&flow_key, 0, sizeof(flow_key)); 154 | __builtin_memset(&reverse_flow_key, 0, sizeof(reverse_flow_key)); 155 | 156 | struct ethhdr *ether = data; 157 | if (data + sizeof(*ether) > data_end) { 158 | return BPF_OK; 159 | } 160 | 161 | if (ether->h_proto == 0xdd86) { // htons(ETH_P_IPV6) -> 0x086ddU 162 | data += sizeof(*ether); 163 | struct ipv6hdr *ip = data; 164 | struct tcphdr *l4_tcp_hdr = data + sizeof(struct ipv6hdr); 165 | struct udphdr *l4_udp_hdr = data + sizeof(struct ipv6hdr); 166 | struct sctphdr *l4_sctp_hdr = data + sizeof(struct ipv6hdr); 167 | 168 | if (data + sizeof(*ip) > data_end) { 169 | return BPF_OK; 170 | } 171 | 172 | if (ip->version != 6) { 173 | return BPF_OK; 174 | } 175 | 176 | //ICMPv6 - Neighbor Discovery Packets 177 | if (ip->nexthdr == 58) { 178 | return BPF_OK; 179 | } 180 | 181 | switch (ip->nexthdr) { 182 | case IPPROTO_TCP: 183 | if (data + sizeof(*ip) + sizeof(*l4_tcp_hdr) > data_end) { 184 | return BPF_OK; 185 | } 186 | l4_src_port = (((((unsigned short)(l4_tcp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_tcp_hdr->source) & 0xFF00) >> 8)); 187 | l4_dst_port = (((((unsigned short)(l4_tcp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_tcp_hdr->dest) & 0xFF00) >> 8)); 188 | break; 189 | case IPPROTO_UDP: 190 | if (data + sizeof(*ip) + sizeof(*l4_udp_hdr) > data_end) { 191 | return BPF_OK; 192 | } 193 | l4_src_port = (((((unsigned short)(l4_udp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_udp_hdr->source) & 0xFF00) >> 8)); 194 | l4_dst_port = (((((unsigned short)(l4_udp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_udp_hdr->dest) & 0xFF00) >> 8)); 195 | break; 196 | case IPPROTO_SCTP: 197 | if (data + sizeof(*ip) + sizeof(*l4_sctp_hdr) > data_end) { 198 | return BPF_OK; 199 | } 200 | l4_src_port = (((((unsigned short)(l4_sctp_hdr->source) & 0xFF)) << 8) | (((unsigned short)(l4_sctp_hdr->source) & 0xFF00) >> 8)); 201 | l4_dst_port = (((((unsigned short)(l4_sctp_hdr->dest) & 0xFF)) << 8) | (((unsigned short)(l4_sctp_hdr->dest) & 0xFF00) >> 8)); 202 | break; 203 | } 204 | 205 | trie_key.prefix_len = 128; 206 | 207 | //Fill the IP Key to be used for lookup 208 | for (int i=0; i<16; i++){ 209 | trie_key.ip[i] = ip->daddr.in6_u.u6_addr8[i]; 210 | } 211 | 212 | //Check for the an existing flow in the conntrack table 213 | flow_key.saddr = ip->saddr; 214 | flow_key.daddr = ip->daddr; 215 | flow_key.src_port = l4_src_port; 216 | flow_key.dest_port = l4_dst_port; 217 | flow_key.protocol = ip->nexthdr; 218 | flow_key.owner_addr = ip->saddr; 219 | 220 | evt.src_ip = ip->saddr; 221 | evt.dest_ip = ip->daddr; 222 | evt.src_port = flow_key.src_port; 223 | evt.dest_port = flow_key.dest_port; 224 | evt.protocol = flow_key.protocol; 225 | evt.is_egress = 1; 226 | evt.packet_sz = skb->len; 227 | 228 | __u32 key = 0; 229 | struct pod_state *pst = bpf_map_lookup_elem(&egress_pod_state_map, &key); 230 | // There should always be an entry in pod_state_map. pst returned in above line should never be null. 231 | if (pst == NULL) { 232 | evt.verdict = 0; 233 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 234 | return BPF_DROP; 235 | } 236 | 237 | if (pst->state == DEFAULT_DENY) { 238 | evt.verdict = 0; 239 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 240 | return BPF_DROP; 241 | } 242 | 243 | //Check if it's an existing flow 244 | flow_val = (struct conntrack_value *)bpf_map_lookup_elem(&aws_conntrack_map, &flow_key); 245 | if (flow_val != NULL) { 246 | // If it's a "default allow" flow, check if pod has flipped to "policies applied" state 247 | if (flow_val->val == CT_VAL_DEFAULT_ALLOW && pst->state == DEFAULT_ALLOW) { 248 | return BPF_OK; 249 | } 250 | if (flow_val->val == CT_VAL_POLICIES_APPLIED && pst->state == POLICIES_APPLIED) { 251 | return BPF_OK; 252 | } 253 | if (flow_val->val == CT_VAL_POLICIES_APPLIED && pst->state == DEFAULT_ALLOW) { 254 | flow_val->val = CT_VAL_DEFAULT_ALLOW; 255 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, flow_val, 0); // 0 -> BPF_ANY 256 | return BPF_OK; 257 | } 258 | if (flow_val->val == CT_VAL_DEFAULT_ALLOW && pst->state == POLICIES_APPLIED) { 259 | int ret = evaluateByLookUp(trie_key, flow_key, pst, evt, ip, l4_dst_port); 260 | if (ret == BPF_DROP) { 261 | bpf_map_delete_elem(&aws_conntrack_map, &flow_key); 262 | return BPF_DROP; 263 | } 264 | return BPF_OK; 265 | } 266 | } 267 | 268 | //Check for the reverse flow entry in the conntrack table 269 | reverse_flow_key.saddr = ip->daddr; 270 | reverse_flow_key.daddr = ip->saddr; 271 | reverse_flow_key.src_port = l4_dst_port; 272 | reverse_flow_key.dest_port = l4_src_port; 273 | reverse_flow_key.protocol = ip->nexthdr; 274 | reverse_flow_key.owner_addr = ip->saddr; 275 | 276 | //Check if it's a response packet 277 | reverse_flow_val = (struct conntrack_value *)bpf_map_lookup_elem(&aws_conntrack_map, &reverse_flow_key); 278 | if (reverse_flow_val != NULL) { 279 | return BPF_OK; 280 | } 281 | 282 | if (pst->state == DEFAULT_ALLOW) { 283 | struct conntrack_value new_flow_val = {}; 284 | new_flow_val.val = CT_VAL_DEFAULT_ALLOW; 285 | bpf_map_update_elem(&aws_conntrack_map, &flow_key, &new_flow_val, 0); // 0 - BPF_ANY 286 | evt.verdict = 1; 287 | bpf_ringbuf_output(&policy_events, &evt, sizeof(evt), 0); 288 | return BPF_OK; 289 | } 290 | 291 | return evaluateByLookUp(trie_key, flow_key, pst, evt, ip, l4_dst_port); 292 | } 293 | return BPF_OK; 294 | } 295 | 296 | char _license[] SEC("license") = "GPL"; 297 | -------------------------------------------------------------------------------- /pkg/ebpf/c/v4events.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | #include 3 | #include 4 | #include 5 | 6 | struct bpf_map_def_pvt { 7 | __u32 type; 8 | __u32 key_size; 9 | __u32 value_size; 10 | __u32 max_entries; 11 | __u32 map_flags; 12 | __u32 pinning; 13 | __u32 inner_map_fd; 14 | }; 15 | 16 | #define PIN_GLOBAL_NS 2 17 | #define BPF_MAP_TYPE_RINGBUF 27 18 | 19 | struct data_t { 20 | __u32 src_ip; 21 | __u32 src_port; 22 | __u32 dest_ip; 23 | __u32 dest_port; 24 | __u32 protocol; 25 | __u32 verdict; 26 | __u32 packet_sz; 27 | __u8 is_egress; 28 | }; 29 | 30 | struct conntrack_key { 31 | __u32 src_ip; 32 | __u16 src_port; 33 | __u32 dest_ip; 34 | __u16 dest_port; 35 | __u8 protocol; 36 | __u32 owner_ip; 37 | }; 38 | 39 | struct conntrack_value { 40 | __u8 val; 41 | }; 42 | 43 | struct bpf_map_def_pvt SEC("maps") aws_conntrack_map = { 44 | .type = BPF_MAP_TYPE_LRU_HASH, 45 | .key_size =sizeof(struct conntrack_key), 46 | .value_size = sizeof(struct conntrack_value), 47 | .max_entries = 512 * 1024, 48 | .pinning = PIN_GLOBAL_NS, 49 | }; 50 | 51 | struct bpf_map_def_pvt SEC("maps") policy_events = { 52 | .type = BPF_MAP_TYPE_RINGBUF, 53 | .max_entries = 512 * 1024, 54 | .pinning = PIN_GLOBAL_NS, 55 | }; 56 | 57 | char _license[] SEC("license") = "GPL"; 58 | -------------------------------------------------------------------------------- /pkg/ebpf/c/v6events.bpf.c: -------------------------------------------------------------------------------- 1 | #include "vmlinux.h" 2 | #include 3 | #include 4 | #include 5 | 6 | struct bpf_map_def_pvt { 7 | __u32 type; 8 | __u32 key_size; 9 | __u32 value_size; 10 | __u32 max_entries; 11 | __u32 map_flags; 12 | __u32 pinning; 13 | __u32 inner_map_fd; 14 | }; 15 | 16 | #define PIN_GLOBAL_NS 2 17 | 18 | #define BPF_MAP_TYPE_RINGBUF 27 19 | 20 | struct data_t { 21 | __u8 src_ip[16]; 22 | __u32 src_port; 23 | __u8 dest_ip[16]; 24 | __u32 dest_port; 25 | __u32 protocol; 26 | __u32 verdict; 27 | __u32 packet_sz; 28 | __u8 is_egress; 29 | }; 30 | 31 | struct conntrack_key { 32 | struct in6_addr saddr; 33 | __u16 src_port; 34 | struct in6_addr daddr; 35 | __u16 dest_port; 36 | __u8 protocol; 37 | struct in6_addr owner_addr; 38 | }; 39 | 40 | 41 | struct conntrack_value { 42 | __u8 val; 43 | }; 44 | 45 | struct bpf_map_def_pvt SEC("maps") aws_conntrack_map = { 46 | .type = BPF_MAP_TYPE_LRU_HASH, 47 | .key_size =sizeof(struct conntrack_key), 48 | .value_size = sizeof(struct conntrack_value), 49 | .max_entries = 512 * 1024, 50 | .pinning = PIN_GLOBAL_NS, 51 | }; 52 | 53 | 54 | struct bpf_map_def_pvt SEC("maps") policy_events = { 55 | .type = BPF_MAP_TYPE_RINGBUF, 56 | .max_entries = 512 * 1024, 57 | .pinning = PIN_GLOBAL_NS, 58 | }; 59 | 60 | char _license[] SEC("license") = "GPL"; 61 | -------------------------------------------------------------------------------- /pkg/logger/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | "github.com/go-logr/zapr" 6 | ) 7 | 8 | var log Logger 9 | 10 | type Fields map[string]interface{} 11 | 12 | type Logger interface { 13 | Debugf(format string, args ...interface{}) 14 | Debug(msg string) 15 | Infof(format string, args ...interface{}) 16 | Info(msg string) 17 | Warnf(format string, args ...interface{}) 18 | Warn(msg string) 19 | Errorf(format string, args ...interface{}) 20 | Error(msg string) 21 | Fatalf(format string, args ...interface{}) 22 | Panicf(format string, args ...interface{}) 23 | WithFields(keyValues Fields) Logger 24 | } 25 | 26 | var ( 27 | DEFAULT_LOG_LEVEL = "info" 28 | DEFAULT_LOG_LOCATION = "/var/log/aws-routed-eni/network-policy-agent.log" 29 | ) 30 | 31 | func New(logLevel string, logLocation string) Logger { 32 | inputLogConfig := &Configuration{ 33 | LogLevel: logLevel, 34 | LogLocation: logLocation, 35 | } 36 | log = inputLogConfig.newZapLogger() 37 | return log 38 | } 39 | 40 | func Get() Logger { 41 | if log == nil { 42 | log = New(DEFAULT_LOG_LEVEL, DEFAULT_LOG_LOCATION) 43 | log.Warn("Logger was not initialized explicitly, using default logger.") 44 | } 45 | return log 46 | } 47 | 48 | func GetControllerRuntimeLogger() logr.Logger { 49 | zapSugared := Get().(*structuredLogger).zapLogger 50 | return zapr.NewLogger(zapSugared.Desugar()) 51 | } 52 | -------------------------------------------------------------------------------- /pkg/logger/zaplogger.go: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"). 4 | // You may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | //limitations under the License. 14 | 15 | package logger 16 | 17 | import ( 18 | "os" 19 | "strings" 20 | 21 | "go.uber.org/zap" 22 | "go.uber.org/zap/zapcore" 23 | lumberjack "gopkg.in/natefinch/lumberjack.v2" 24 | ) 25 | 26 | type structuredLogger struct { 27 | zapLogger *zap.SugaredLogger 28 | } 29 | 30 | // Configuration stores the config for the logger 31 | type Configuration struct { 32 | LogLevel string 33 | LogLocation string 34 | } 35 | 36 | // getZapLevel converts log level string to zapcore.Level 37 | func getZapLevel(inputLogLevel string) zapcore.Level { 38 | lvl := strings.ToLower(inputLogLevel) 39 | 40 | switch lvl { 41 | case "debug": 42 | return zapcore.DebugLevel 43 | case "info": 44 | return zapcore.InfoLevel 45 | case "warn": 46 | return zapcore.WarnLevel 47 | case "error": 48 | return zapcore.ErrorLevel 49 | case "fatal": 50 | return zapcore.FatalLevel 51 | default: 52 | return zapcore.DebugLevel 53 | } 54 | } 55 | 56 | func getEncoder() zapcore.Encoder { 57 | encoderConfig := zap.NewProductionEncoderConfig() 58 | encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder 59 | return zapcore.NewJSONEncoder(encoderConfig) 60 | } 61 | 62 | func (logConfig *Configuration) newZapLogger() *structuredLogger { //Logger { 63 | var cores []zapcore.Core 64 | 65 | logLevel := getZapLevel(logConfig.LogLevel) 66 | 67 | writer := getLogFilePath(logConfig.LogLocation) 68 | 69 | cores = append(cores, zapcore.NewCore(getEncoder(), writer, logLevel)) 70 | 71 | combinedCore := zapcore.NewTee(cores...) 72 | 73 | logger := zap.New(combinedCore, 74 | zap.AddCaller(), 75 | zap.AddCallerSkip(2), 76 | ) 77 | defer logger.Sync() 78 | 79 | sugar := logger.Sugar() 80 | return &structuredLogger{ 81 | zapLogger: sugar, 82 | } 83 | } 84 | 85 | // getLogFilePath returns the writer 86 | func getLogFilePath(logFilePath string) zapcore.WriteSyncer { 87 | var writer zapcore.WriteSyncer 88 | 89 | if logFilePath == "" { 90 | writer = zapcore.Lock(os.Stderr) 91 | } else if strings.ToLower(logFilePath) != "stdout" { 92 | writer = getLogWriter(logFilePath) 93 | } else { 94 | writer = zapcore.Lock(os.Stdout) 95 | } 96 | 97 | return writer 98 | } 99 | 100 | // getLogWriter is for lumberjack 101 | func getLogWriter(logFilePath string) zapcore.WriteSyncer { 102 | lumberJackLogger := &lumberjack.Logger{ 103 | Filename: logFilePath, 104 | MaxSize: 200, 105 | MaxBackups: 8, 106 | MaxAge: 30, 107 | Compress: true, 108 | } 109 | return zapcore.AddSync(lumberJackLogger) 110 | } 111 | 112 | func (l *structuredLogger) Debugf(format string, args ...interface{}) { 113 | l.zapLogger.Debugf(format, args...) 114 | } 115 | 116 | func (l *structuredLogger) Debug(msg string) { 117 | l.zapLogger.Desugar().Debug(msg) 118 | } 119 | 120 | func (l *structuredLogger) Infof(format string, args ...interface{}) { 121 | l.zapLogger.Infof(format, args...) 122 | } 123 | 124 | func (l *structuredLogger) Info(msg string) { 125 | l.zapLogger.Desugar().Info(msg) 126 | } 127 | 128 | func (l *structuredLogger) Warnf(format string, args ...interface{}) { 129 | l.zapLogger.Warnf(format, args...) 130 | } 131 | 132 | func (l *structuredLogger) Warn(msg string) { 133 | l.zapLogger.Desugar().Warn(msg) 134 | } 135 | 136 | func (l *structuredLogger) Errorf(format string, args ...interface{}) { 137 | l.zapLogger.Errorf(format, args...) 138 | } 139 | 140 | func (l *structuredLogger) Error(msg string) { 141 | l.zapLogger.Desugar().Error(msg) 142 | } 143 | 144 | func (l *structuredLogger) Fatalf(format string, args ...interface{}) { 145 | l.zapLogger.Fatalf(format, args...) 146 | } 147 | 148 | func (l *structuredLogger) Panicf(format string, args ...interface{}) { 149 | l.zapLogger.Panicf(format, args...) 150 | } 151 | 152 | func (logf *structuredLogger) WithFields(fields Fields) Logger { 153 | var f = make([]interface{}, 0) 154 | for k, v := range fields { 155 | f = append(f, k) 156 | f = append(f, v) 157 | } 158 | newLogger := logf.zapLogger.With(f...) 159 | return &structuredLogger{newLogger} 160 | } 161 | -------------------------------------------------------------------------------- /pkg/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "net/http" 5 | "strconv" 6 | "sync" 7 | "time" 8 | 9 | "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/retry" 10 | "github.com/aws/aws-network-policy-agent/pkg/logger" 11 | "github.com/prometheus/client_golang/prometheus/promhttp" 12 | ) 13 | 14 | const ( 15 | // metricsPort is the port for prometheus metrics 16 | metricsPort = 61680 17 | ) 18 | 19 | func log() logger.Logger { 20 | return logger.Get() 21 | } 22 | 23 | func ServeMetrics() { 24 | log().Infof("Serving metrics on port %d", metricsPort) 25 | server := setupMetricsServer() 26 | for { 27 | once := sync.Once{} 28 | _ = retry.WithBackoff(retry.NewSimpleBackoff(time.Second, time.Minute, 0.2, 2), func() error { 29 | err := server.ListenAndServe() 30 | once.Do(func() { 31 | log().Errorf("Error running http API: %v", err) 32 | }) 33 | return err 34 | }) 35 | } 36 | } 37 | 38 | func setupMetricsServer() *http.Server { 39 | serveMux := http.NewServeMux() 40 | serveMux.Handle("/metrics", promhttp.Handler()) 41 | server := &http.Server{ 42 | Addr: ":" + strconv.Itoa(metricsPort), 43 | Handler: serveMux, 44 | ReadTimeout: 5 * time.Second, 45 | WriteTimeout: 5 * time.Second, 46 | } 47 | return server 48 | } 49 | -------------------------------------------------------------------------------- /pkg/rpc/rpc_handler.go: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | // not use this file except in compliance with the License. A copy of the 5 | // License is located at 6 | // 7 | // http://aws.amazon.com/apache2.0/ 8 | // 9 | // or in the "license" file accompanying this file. This file is distributed 10 | // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 | // express or implied. See the License for the specific language governing 12 | // permissions and limitations under the License. 13 | 14 | package rpc 15 | 16 | import ( 17 | "context" 18 | "net" 19 | "sync" 20 | 21 | "github.com/aws/aws-network-policy-agent/controllers" 22 | "github.com/aws/aws-network-policy-agent/pkg/logger" 23 | "github.com/aws/aws-network-policy-agent/pkg/utils" 24 | 25 | "github.com/aws/amazon-vpc-cni-k8s/rpc" 26 | "github.com/pkg/errors" 27 | "google.golang.org/grpc" 28 | "google.golang.org/grpc/health" 29 | healthpb "google.golang.org/grpc/health/grpc_health_v1" 30 | "google.golang.org/grpc/reflection" 31 | "k8s.io/apimachinery/pkg/types" 32 | ) 33 | 34 | func log() logger.Logger { 35 | return logger.Get() 36 | } 37 | 38 | var ( 39 | POLICIES_APPLIED = 0 40 | DEFAULT_ALLOW = 1 41 | DEFAULT_DENY = 2 42 | ) 43 | 44 | const ( 45 | npgRPCaddress = "127.0.0.1:50052" 46 | grpcHealthServiceName = "grpc.health.v1.np-agent" 47 | ) 48 | 49 | // server controls RPC service responses. 50 | type server struct { 51 | policyReconciler *controllers.PolicyEndpointsReconciler 52 | } 53 | 54 | // EnforceNpToPod processes CNI Enforce NP network request 55 | func (s *server) EnforceNpToPod(ctx context.Context, in *rpc.EnforceNpRequest) (*rpc.EnforceNpReply, error) { 56 | if s.policyReconciler == nil || s.policyReconciler.GeteBPFClient() == nil { 57 | log().Debug("Network policy is disabled, returning success") 58 | success := rpc.EnforceNpReply{ 59 | Success: true, 60 | } 61 | return &success, nil 62 | } 63 | 64 | log().Infof("Received Enforce Network Policy Request for Pod: %s Namespace: %s Mode: %s", in.K8S_POD_NAME, in.K8S_POD_NAMESPACE, in.NETWORK_POLICY_MODE) 65 | var err error 66 | 67 | if !utils.IsValidNetworkPolicyEnforcingMode(in.NETWORK_POLICY_MODE) { 68 | err = errors.New("Invalid Network Policy Mode") 69 | log().Errorf("Network Policy Mode validation failed: %s, error: %v", in.NETWORK_POLICY_MODE, err) 70 | return nil, err 71 | } 72 | 73 | s.policyReconciler.SetNetworkPolicyMode(in.NETWORK_POLICY_MODE) 74 | podIdentifier := utils.GetPodIdentifier(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) 75 | isFirstPodInPodIdentifier := s.policyReconciler.GeteBPFClient().IsFirstPodInPodIdentifier(podIdentifier) 76 | err = s.policyReconciler.GeteBPFClient().AttacheBPFProbes(types.NamespacedName{Name: in.K8S_POD_NAME, Namespace: in.K8S_POD_NAMESPACE}, 77 | podIdentifier) 78 | if err != nil { 79 | log().Errorf("Attaching eBPF probe failed for pod: %s namespace: %s, error: %v", in.K8S_POD_NAME, in.K8S_POD_NAMESPACE, err) 80 | return nil, err 81 | } 82 | 83 | // We attempt to program eBPF firewall map entries for this pod, if the local agent is aware of the policies 84 | // configured against it. For example, if this is a new replica of an existing pod/deployment then the local 85 | // node agent will have the policy information available to it. If not, we will leave the pod in default allow 86 | // or default deny state based on NP mode until the Network Policy controller reconciles existing policies 87 | // against this pod. 88 | 89 | // Check if there are active policies against the new pod and if there are other pods on the local node that share 90 | // the eBPF firewall maps with the newly launched pod, if already present we can skip the map update and return 91 | policiesAvailableInLocalCache := s.policyReconciler.ArePoliciesAvailableInLocalCache(podIdentifier) 92 | if policiesAvailableInLocalCache && isFirstPodInPodIdentifier { 93 | // If we're here, then the local agent knows the list of active policies that apply to this pod and 94 | // this is the first pod of it's type to land on the local node/cluster 95 | log().Info("Active policies present against this pod and this is a new Pod to the local node, configuring firewall rules....") 96 | 97 | //Derive Ingress and Egress Firewall Rules and Update the relevant eBPF maps 98 | ingressRules, egressRules, _ := 99 | s.policyReconciler.DeriveFireWallRulesPerPodIdentifier(podIdentifier, in.K8S_POD_NAMESPACE) 100 | 101 | err = s.policyReconciler.GeteBPFClient().UpdateEbpfMaps(podIdentifier, ingressRules, egressRules) 102 | if err != nil { 103 | log().Errorf("Map update(s) failed for podIdentifier: %s, error: %v", podIdentifier, err) 104 | return nil, err 105 | } 106 | } else { 107 | // If no active policies present against this pod identifier, set pod_state to default_allow or default_deny 108 | if !policiesAvailableInLocalCache { 109 | log().Debugf("No active policies present for podIdentifier: %s", podIdentifier) 110 | if utils.IsStrictMode(in.NETWORK_POLICY_MODE) { 111 | log().Infof("Updating pod_state map to default_deny for podIdentifier: %s", podIdentifier) 112 | err = s.policyReconciler.GeteBPFClient().UpdatePodStateEbpfMaps(podIdentifier, DEFAULT_DENY, true, true) 113 | if err != nil { 114 | log().Errorf("Map update(s) failed for podIdentifier: %s, error: %v", podIdentifier, err) 115 | return nil, err 116 | } 117 | } else { 118 | log().Infof("Updating pod_state map to default_allow for podIdentifier: %s", podIdentifier) 119 | err = s.policyReconciler.GeteBPFClient().UpdatePodStateEbpfMaps(podIdentifier, DEFAULT_ALLOW, true, true) 120 | if err != nil { 121 | log().Errorf("Map update(s) failed for podIdentifier: %s, error: %v", podIdentifier, err) 122 | return nil, err 123 | } 124 | } 125 | } else { 126 | log().Info("Pod shares the eBPF firewall maps with other local pods. No Map update required..") 127 | } 128 | } 129 | 130 | resp := rpc.EnforceNpReply{ 131 | Success: err == nil, 132 | } 133 | return &resp, nil 134 | } 135 | 136 | // DeletePodNp processes CNI Delete Pod NP network request 137 | func (s *server) DeletePodNp(ctx context.Context, in *rpc.DeleteNpRequest) (*rpc.DeleteNpReply, error) { 138 | if s.policyReconciler == nil || s.policyReconciler.GeteBPFClient() == nil { 139 | log().Debug("Network policy is disabled, returning success") 140 | success := rpc.DeleteNpReply{ 141 | Success: true, 142 | } 143 | return &success, nil 144 | } 145 | 146 | log().Infof("Received Delete Network Policy Request for Pod: %s Namespace: %s", in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) 147 | var err error 148 | podIdentifier := utils.GetPodIdentifier(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) 149 | 150 | value, _ := s.policyReconciler.GeteBPFClient().GetDeletePodIdentifierLockMap().LoadOrStore(podIdentifier, &sync.Mutex{}) 151 | deletePodIdentifierLock := value.(*sync.Mutex) 152 | deletePodIdentifierLock.Lock() 153 | log().Debugf("Got the deletePodIdentifierLock for Pod: %s Namespace: %s PodIdentifier: %s", in.K8S_POD_NAME, in.K8S_POD_NAMESPACE, podIdentifier) 154 | 155 | isProgFdShared, err := s.policyReconciler.IsProgFdShared(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) 156 | s.policyReconciler.GeteBPFClient().DeletePodFromIngressProgPodCaches(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) 157 | s.policyReconciler.GeteBPFClient().DeletePodFromEgressProgPodCaches(in.K8S_POD_NAME, in.K8S_POD_NAMESPACE) 158 | if err == nil && !isProgFdShared { 159 | err = s.policyReconciler.GeteBPFClient().DeleteBPFProgramAndMaps(podIdentifier) 160 | if err != nil { 161 | log().Errorf("BPF programs and Maps delete failed for podIdentifier: %s, error: %v", podIdentifier, err) 162 | } 163 | deletePodIdentifierLock.Unlock() 164 | s.policyReconciler.GeteBPFClient().GetDeletePodIdentifierLockMap().Delete(podIdentifier) 165 | } else { 166 | deletePodIdentifierLock.Unlock() 167 | } 168 | resp := rpc.DeleteNpReply{ 169 | Success: true, 170 | } 171 | return &resp, nil 172 | } 173 | 174 | // RunRPCHandler handles request from gRPC 175 | func RunRPCHandler(policyReconciler *controllers.PolicyEndpointsReconciler) error { 176 | log().Infof("Serving RPC Handler on Address: %s", npgRPCaddress) 177 | listener, err := net.Listen("tcp", npgRPCaddress) 178 | if err != nil { 179 | log().Errorf("Failed to listen gRPC port: %v", err) 180 | return errors.Wrap(err, "network policy agent: failed to listen to gRPC port") 181 | } 182 | grpcServer := grpc.NewServer() 183 | rpc.RegisterNPBackendServer(grpcServer, &server{policyReconciler: policyReconciler}) 184 | healthServer := health.NewServer() 185 | // No need to ever change this to HealthCheckResponse_NOT_SERVING since it's a local service only 186 | healthServer.SetServingStatus(grpcHealthServiceName, healthpb.HealthCheckResponse_SERVING) 187 | healthpb.RegisterHealthServer(grpcServer, healthServer) 188 | 189 | // Register reflection service on gRPC server. 190 | reflection.Register(grpcServer) 191 | if err := grpcServer.Serve(listener); err != nil { 192 | log().Errorf("Failed to start server on gRPC port: %v", err) 193 | return errors.Wrap(err, "network policy agent: failed to start server on gPRC port") 194 | } 195 | log().Info("Done with RPC Handler initialization") 196 | return nil 197 | } 198 | -------------------------------------------------------------------------------- /pkg/rpcclient/client_wrapper.go: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"). You may 4 | // not use this file except in compliance with the License. A copy of the 5 | // License is located at 6 | // 7 | // http://aws.amazon.com/apache2.0/ 8 | // 9 | // or in the "license" file accompanying this file. This file is distributed 10 | // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 11 | // express or implied. See the License for the specific language governing 12 | // permissions and limitations under the License. 13 | 14 | package rpcclient 15 | 16 | import ( 17 | "context" 18 | 19 | "google.golang.org/grpc" 20 | "google.golang.org/grpc/credentials/insecure" 21 | ) 22 | 23 | // GRPC is the ipamd client Dial interface 24 | type GRPCClient interface { 25 | Dial(ctx context.Context, target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) 26 | } 27 | 28 | type NPAgentRPC struct{} 29 | 30 | // New creates a new cniGRPC 31 | func New() GRPCClient { 32 | return &NPAgentRPC{} 33 | } 34 | 35 | func (n *NPAgentRPC) Dial(ctx context.Context, target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { 36 | return grpc.DialContext(ctx, target, opts...) 37 | } 38 | 39 | func GetDefaultServiceRetryConfig() grpc.DialOption { 40 | 41 | // The retry policy for the request made to IPAM server. It waits for the IPAM GRPC to be up before initiating retry policy 42 | config := `{ 43 | "methodConfig": [{ 44 | "name": [{"service": "rpc.ConfigServerBackend"}], 45 | "waitForReady": true, 46 | "retryPolicy": { 47 | "MaxAttempts": 5, 48 | "InitialBackoff": "0.5s", 49 | "MaxBackoff": "10s", 50 | "BackoffMultiplier": 1.1, 51 | "RetryableStatusCodes": [ "UNAVAILABLE", "ABORTED", "UNKNOWN"] 52 | } 53 | }] 54 | }` 55 | return grpc.WithDefaultServiceConfig(config) 56 | } 57 | 58 | func GetInsecureConnectionType() grpc.DialOption { 59 | return grpc.WithTransportCredentials(insecure.NewCredentials()) 60 | } 61 | -------------------------------------------------------------------------------- /pkg/utils/cp/cp.go: -------------------------------------------------------------------------------- 1 | package cp 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | 8 | "github.com/aws/aws-network-policy-agent/pkg/logger" 9 | ) 10 | 11 | var ( 12 | EKS_CLI_BINARY = "aws-eks-na-cli" 13 | EKS_V6_CLI_BINARY = "aws-eks-na-cli-v6" 14 | ) 15 | 16 | func log() logger.Logger { 17 | return logger.Get() 18 | } 19 | 20 | func cp(src, dst string) error { 21 | sourceFileStat, err := os.Stat(src) 22 | if err != nil { 23 | return err 24 | } 25 | 26 | if !sourceFileStat.Mode().IsRegular() { 27 | return fmt.Errorf("%s is not a regular file", src) 28 | } 29 | 30 | source, err := os.Open(src) 31 | if err != nil { 32 | return err 33 | } 34 | defer source.Close() 35 | 36 | destination, err := os.Create(dst) 37 | if err != nil { 38 | return err 39 | } 40 | defer destination.Close() 41 | _, err = io.Copy(destination, source) 42 | return err 43 | } 44 | 45 | func CopyFile(src, dst string) (err error) { 46 | dstTmp := fmt.Sprintf("%s.tmp", dst) 47 | if err := cp(src, dstTmp); err != nil { 48 | return fmt.Errorf("failed to copy file: %s", err) 49 | } 50 | 51 | err = os.Rename(dstTmp, dst) 52 | if err != nil { 53 | return fmt.Errorf("failed to rename file: %s", err) 54 | } 55 | 56 | si, err := os.Stat(src) 57 | if err != nil { 58 | return fmt.Errorf("failed to stat file: %s", err) 59 | } 60 | err = os.Chmod(dst, si.Mode()) 61 | if err != nil { 62 | return fmt.Errorf("failed to chmod file: %s", err) 63 | } 64 | 65 | return nil 66 | } 67 | 68 | func InstallBPFBinaries(pluginBins []string, hostCNIBinPath string) error { 69 | log().Info("Let's install BPF Binaries on to the host path.....") 70 | for _, plugin := range pluginBins { 71 | targetPlugin := plugin 72 | 73 | // CLI binary should always refer to aws-eks-na-cli 74 | if plugin == EKS_V6_CLI_BINARY { 75 | targetPlugin = EKS_CLI_BINARY 76 | } 77 | 78 | target := fmt.Sprintf("%s%s", hostCNIBinPath, targetPlugin) 79 | source := fmt.Sprintf("%s", plugin) 80 | log().Infof("Installing BPF Binary..target %s source %s", target, source) 81 | 82 | if err := CopyFile(source, target); err != nil { 83 | log().Errorf("Failed to install target %s error %v", target, err) 84 | } 85 | log().Infof("Successfully installed - binary %s", target) 86 | } 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /pkg/utils/imds/imds.go: -------------------------------------------------------------------------------- 1 | package imds 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/aws/aws-sdk-go/aws" 7 | ec2metadatasvc "github.com/aws/aws-sdk-go/aws/ec2metadata" 8 | "github.com/aws/aws-sdk-go/aws/session" 9 | ) 10 | 11 | // EC2Metadata wraps the methods from the amazon-sdk-go's ec2metadata package 12 | type EC2Metadata interface { 13 | GetMetadata(path string) (string, error) 14 | Region() (string, error) 15 | } 16 | 17 | func GetMetaData(key string) (string, error) { 18 | awsSession := session.Must(session.NewSession(aws.NewConfig(). 19 | WithMaxRetries(10), 20 | )) 21 | var ec2Metadata EC2Metadata 22 | ec2Metadata = ec2metadatasvc.New(awsSession) 23 | requestedData, err := ec2Metadata.GetMetadata(key) 24 | if err != nil { 25 | return "", fmt.Errorf("get instance metadata: failed to retrieve %s - %s", key, err) 26 | } 27 | return requestedData, nil 28 | } 29 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var ( 4 | GitVersion string 5 | GitCommit string 6 | BuildDate string 7 | ) 8 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | ## Integration Test scripts 2 | 3 | This package contains shell scripts and libraries used for running e2e integration tests. 4 | 5 | ### Shell scripts 6 | 7 | `run-test.sh` - Can run various integration test suites against the current revision in the invoking directory. This script is primarily used for running tests github actions 8 | 9 | `run-cyclonus-tests.sh` - Runs cyclonus tests against an existing cluster and validates the output 10 | 11 | `update-node-agent-image.sh` - Update the node agent image in the cluster to the image specified in `AWS_EKS_NODEAGENT` parameter using helm chart. 12 | 13 | #### Tests 14 | The following tests are valid to run using `run-test.sh` script, and setting the respective environment variable to true will run them: 15 | 1. Conformance Tests - `RUN_CONFORMANCE_TESTS` 16 | 2. Performance Tests - `RUN_PERFORMANCE_TESTS` 17 | 18 | 19 | #### Conformance tests 20 | This runs the upstream cyclonus test suite for testing network policy 21 | 22 | 23 | #### Performance tests 24 | This for now runs the upstream cyclonus tests and only collects the memory metrics during the run 25 | -------------------------------------------------------------------------------- /scripts/ebpf_sdk_override/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf ./vendor 4 | go mod edit -dropreplace github.com/aws/aws-ebpf-sdk-go 5 | go mod tidy 6 | -------------------------------------------------------------------------------- /scripts/ebpf_sdk_override/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | mkdir -p ./vendor/github.com/aws 6 | 7 | SDK_VENDOR_PATH=./vendor/github.com/aws/aws-ebpf-sdk-go 8 | 9 | # Clone the SDK to the vendor path (removing an old one if necessary) 10 | rm -rf $SDK_VENDOR_PATH 11 | git clone --depth 1 git@github.com:aws/aws-ebpf-sdk-go.git $SDK_VENDOR_PATH 12 | 13 | # Use the vendored version of aws-sdk-go 14 | go mod edit -replace github.com/aws/aws-ebpf-sdk-go=./vendor/github.com/aws/aws-ebpf-sdk-go 15 | go mod tidy 16 | -------------------------------------------------------------------------------- /scripts/gen_mocks.sh: -------------------------------------------------------------------------------- 1 | # Note: Put all external mocks in the ./mock directory 2 | # The mocks specific to this project go alongside the original package 3 | 4 | MOCKGEN=${MOCKGEN:-~/go/bin/mockgen} 5 | 6 | $MOCKGEN -package=mock_client -destination=./mocks/controller-runtime/client/client_mocks.go sigs.k8s.io/controller-runtime/pkg/client Client -------------------------------------------------------------------------------- /scripts/lib/cleanup.sh: -------------------------------------------------------------------------------- 1 | 2 | function check_path_cleanup(){ 3 | 4 | local worker_nodes=$(kubectl get nodes -o custom-columns=NAME:.metadata.name --no-headers) 5 | for node in $worker_nodes 6 | do 7 | export NODE=$node 8 | envsubst '$NODE' < ${DIR}/test/check-cleanup-pod.yaml > ${DIR}/test/check-cleanup-pod-$node.yaml 9 | kubectl apply -f ${DIR}/test/check-cleanup-pod-$node.yaml -n default 10 | rm -rf ${DIR}/test/check-cleanup-pod-$node.yaml 11 | done 12 | sleep 20 13 | 14 | for node in $worker_nodes 15 | do 16 | if [[ $(kubectl get pods -n default $node -ojsonpath="{.status.phase}") == "Failed" ]]; then 17 | echo "BPF files not cleaned up on $node" 18 | kubectl logs $node -n default 19 | TEST_FAILED=true 20 | else 21 | echo "BPF files were cleaned up from the node $node" 22 | fi 23 | kubectl delete pods $node -n default 24 | done 25 | 26 | } -------------------------------------------------------------------------------- /scripts/lib/cloudwatch.sh: -------------------------------------------------------------------------------- 1 | function install_cloudwatch_agent(){ 2 | 3 | local perf_cluster_name="" 4 | if [[ $IP_FAMILY == "IPv4" ]]; then 5 | perf_cluster_name="eks-network-policy-perf-v4" 6 | else 7 | perf_cluster_name="eks-network-policy-perf-v6" 8 | fi 9 | 10 | echo "Create IAM Service Account for CW agent" 11 | kubectl create ns $CW_NAMESPACE 12 | 13 | eksctl create iamserviceaccount \ 14 | --cluster $CLUSTER_NAME \ 15 | --name cloudwatch-agent \ 16 | --namespace $CW_NAMESPACE \ 17 | --attach-policy-arn $CW_POLICY_ARN \ 18 | --approve 19 | 20 | echo "Install Cloudwatch Agent DS" 21 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cwagent/cwagent-serviceaccount.yaml 22 | 23 | echo '{ "logs": { "metrics_collected": { "kubernetes": { "metrics_collection_interval": 30, "cluster_name": "'${perf_cluster_name}'" }},"force_flush_interval": 5 }}' | jq > cwagentconfig.json 24 | kubectl create cm -n $CW_NAMESPACE cwagentconfig --from-file cwagentconfig.json 25 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cwagent/cwagent-daemonset.yaml 26 | 27 | # Allow CW agent to startup and push initial logs 28 | sleep 60 29 | } 30 | 31 | function uninstall_cloudwatch_agent(){ 32 | 33 | eksctl delete iamserviceaccount \ 34 | --cluster $CLUSTER_NAME \ 35 | --name cloudwatch-agent \ 36 | --namespace $CW_NAMESPACE || echo " IAM Service Account role not found" 37 | 38 | rm -rf cwagentconfig.json || echo "CW agent config not found" 39 | kubectl delete namespace $CW_NAMESPACE || echo "No namespace: $CW_NAMESPACE found" 40 | } -------------------------------------------------------------------------------- /scripts/lib/cluster.sh: -------------------------------------------------------------------------------- 1 | 2 | 3 | function load_default_values(){ 4 | 5 | CLUSTER_NAME=network-policy-${RANDOM} 6 | REGION="${REGION:=us-west-2}" 7 | AMI_FAMILY="${AMI_FAMILY:=AmazonLinux2}" 8 | NODEGROUP_TYPE="${NODEGROUP_TYPE:=linux}" 9 | NODES_CAPACITY="${NODES_CAPACITY:=3}" 10 | INSTANCE_TYPE="${INSTANCE_TYPE:=t3.large}" 11 | K8S_VERSION="${K8S_VERSION:=""}" 12 | IP_FAMILY="${IP_FAMILY:=IPv4}" 13 | CW_NAMESPACE="${CW_NAMESPACE:=amazon-cloudwatch}" 14 | CW_POLICY_ARN="${CW_POLICY_ARN:=arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy}" 15 | ENDPOINT_FLAG="${ENDPOINT_FLAG:=""}" 16 | HELM_EXTRA_ARGS="${HELM_EXTRA_ARGS:=""}" 17 | 18 | # If Kubernetes version is not passed then use the latest available version 19 | if [[ -z $K8S_VERSION ]]; then 20 | K8S_VERSION=$(eksctl utils describe-cluster-versions --region $REGION | jq -r '.clusterVersions[0].ClusterVersion') 21 | fi 22 | 23 | } 24 | 25 | function create_cluster(){ 26 | 27 | cat < eks-cluster.yaml 28 | apiVersion: eksctl.io/v1alpha5 29 | iam: 30 | withOIDC: true 31 | addons: 32 | - name: vpc-cni 33 | - name: coredns 34 | - name: kube-proxy 35 | kind: ClusterConfig 36 | kubernetesNetworkConfig: 37 | ipFamily: ${IP_FAMILY} 38 | managedNodeGroups: 39 | - amiFamily: ${AMI_FAMILY} 40 | desiredCapacity: ${NODES_CAPACITY} 41 | instanceType: ${INSTANCE_TYPE} 42 | labels: 43 | alpha.eksctl.io/cluster-name: ${CLUSTER_NAME} 44 | alpha.eksctl.io/nodegroup-name: ${CLUSTER_NAME}-${NODEGROUP_TYPE}-nodes 45 | maxSize: ${NODES_CAPACITY} 46 | minSize: 1 47 | name: ${CLUSTER_NAME}-${NODEGROUP_TYPE} 48 | tags: 49 | alpha.eksctl.io/nodegroup-name: ${CLUSTER_NAME}-${NODEGROUP_TYPE}-nodes 50 | alpha.eksctl.io/nodegroup-type: managed 51 | metadata: 52 | name: ${CLUSTER_NAME} 53 | region: ${REGION} 54 | version: "${K8S_VERSION}" 55 | EOF 56 | 57 | eksctl create cluster -f ./eks-cluster.yaml 58 | 59 | echo "Nodes AMI version for cluster: $CLUSTER_NAME" 60 | kubectl get nodes -owide 61 | 62 | local providerID=$(kubectl get nodes -ojson | jq -r '.items[0].spec.providerID') 63 | local amiID=$(aws ec2 describe-instances --instance-ids ${providerID##*/} --region $REGION | jq -r '.Reservations[].Instances[].ImageId') 64 | echo "Nodes AMI ID: $amiID" 65 | } 66 | 67 | function delete_cluster(){ 68 | 69 | eksctl delete cluster -f ./eks-cluster.yaml --disable-nodegroup-eviction || echo "Cluster Delete failed" 70 | rm -rf ./eks-cluster.yaml || echo "Cluster config file not found" 71 | } -------------------------------------------------------------------------------- /scripts/lib/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | check_is_installed() { 4 | local __name="$1" 5 | if ! is_installed "$__name"; then 6 | echo "Please install $__name before running this script." 7 | exit 1 8 | fi 9 | } 10 | 11 | is_installed() { 12 | local __name="$1" 13 | if $(which $__name >/dev/null 2>&1); then 14 | return 0 15 | else 16 | return 1 17 | fi 18 | } 19 | -------------------------------------------------------------------------------- /scripts/lib/network-policy.sh: -------------------------------------------------------------------------------- 1 | 2 | function load_addon_details() { 3 | 4 | ADDON_NAME="vpc-cni" 5 | echo "loading $ADDON_NAME addon details" 6 | LATEST_ADDON_VERSION=$(aws eks describe-addon-versions $ENDPOINT_FLAG --addon-name $ADDON_NAME --kubernetes-version $K8S_VERSION --region $REGION | jq '.addons[0].addonVersions[0].addonVersion' -r) 7 | get_service_account_role_arn 8 | } 9 | 10 | function wait_for_addon_status() { 11 | local expected_status=$1 12 | local retry_attempt=0 13 | if [ "$expected_status" = "DELETED" ]; then 14 | while $(aws eks describe-addon $ENDPOINT_FLAG --cluster-name $CLUSTER_NAME --addon-name $ADDON_NAME --region $REGION >> /dev/null); do 15 | if [ $retry_attempt -ge 30 ]; then 16 | echo "failed to delete addon, qutting after too many attempts" 17 | exit 1 18 | fi 19 | echo "addon is still not deleted" 20 | sleep 5 21 | ((retry_attempt=retry_attempt+1)) 22 | done 23 | echo "addon deleted" 24 | 25 | sleep 10 26 | return 27 | fi 28 | 29 | retry_attempt=0 30 | while true 31 | do 32 | STATUS=$(aws eks describe-addon $ENDPOINT_FLAG --cluster-name "$CLUSTER_NAME" --addon-name $ADDON_NAME --region "$REGION" | jq -r '.addon.status') 33 | if [ "$STATUS" = "$expected_status" ]; then 34 | echo "addon status matches expected status" 35 | return 36 | fi 37 | 38 | if [ $retry_attempt -ge 30 ]; then 39 | echo "failed to get desired add-on status: $STATUS, qutting after too many attempts" 40 | exit 1 41 | fi 42 | echo "addon status is not equal to $expected_status" 43 | sleep 10 44 | ((retry_attempt=retry_attempt+1)) 45 | done 46 | } 47 | 48 | function install_network_policy_mao() { 49 | 50 | local addon_version=$1 51 | if DESCRIBE_ADDON=$(aws eks describe-addon $ENDPOINT_FLAG --cluster-name $CLUSTER_NAME --addon-name $ADDON_NAME --region $REGION); then 52 | local current_addon_version=$(echo "$DESCRIBE_ADDON" | jq '.addon.addonVersion' -r) 53 | echo "deleting the $current_addon_version" 54 | aws eks delete-addon $ENDPOINT_FLAG --cluster-name $CLUSTER_NAME --addon-name $ADDON_NAME --region $REGION 55 | wait_for_addon_status "DELETED" 56 | fi 57 | 58 | echo "Installing addon $addon_version with network policy enabled" 59 | 60 | SA_ROLE_ARN_ARG="" 61 | if [ "$EXISTING_SERVICE_ACCOUNT_ROLE_ARN" != "null" ]; then 62 | SA_ROLE_ARN_ARG="--service-account-role-arn $EXISTING_SERVICE_ACCOUNT_ROLE_ARN" 63 | fi 64 | 65 | aws eks create-addon \ 66 | --cluster-name $CLUSTER_NAME \ 67 | --addon-name $ADDON_NAME \ 68 | --configuration-value '{"enableNetworkPolicy": "true"}' \ 69 | --resolve-conflicts OVERWRITE \ 70 | --addon-version $addon_version \ 71 | --region $REGION $ENDPOINT_FLAG $SA_ROLE_ARN_ARG 72 | 73 | wait_for_addon_status "ACTIVE" 74 | } 75 | 76 | function get_service_account_role_arn(){ 77 | EXISTING_SERVICE_ACCOUNT_ROLE_ARN=$(kubectl get serviceaccount -n kube-system aws-node -o json | jq '.metadata.annotations."eks.amazonaws.com/role-arn"' -r) 78 | } 79 | 80 | function install_network_policy_helm(){ 81 | 82 | helm repo add eks https://aws.github.io/eks-charts 83 | 84 | if [[ $IP_FAMILY == "IPv4" ]]; then 85 | ENABLE_IPv4=true 86 | ENABLE_IPv6=false 87 | ENABLE_PREFIX_DELEGATION=false 88 | else 89 | ENABLE_IPv4=false 90 | ENABLE_IPv6=true 91 | ENABLE_PREFIX_DELEGATION=true 92 | fi 93 | 94 | get_service_account_role_arn 95 | 96 | if [[ ! -z $EXISTING_SERVICE_ACCOUNT_ROLE_ARN ]]; then 97 | HELM_EXTRA_ARGS+=" --set serviceAccount.annotations.\eks\.amazonaws\.com/role-arn=$EXISTING_SERVICE_ACCOUNT_ROLE_ARN" 98 | fi 99 | 100 | echo "Updating annotations and labels on existing resources" 101 | resources=("daemonSet/aws-node" "clusterRole/aws-node" "clusterRoleBinding/aws-node" "serviceAccount/aws-node" "configmap/amazon-vpc-cni") 102 | for kind in ${resources[@]}; do 103 | echo "setting annotations and labels on $kind" 104 | kubectl -n kube-system annotate --overwrite $kind meta.helm.sh/release-name=aws-vpc-cni meta.helm.sh/release-namespace=kube-system || echo "Unable to annotate $kind" 105 | kubectl -n kube-system label --overwrite $kind app.kubernetes.io/managed-by=Helm || echo "Unable to label $kind" 106 | done 107 | 108 | echo "Installing/Updating the aws-vpc-cni helm chart with enableNetworkPolicy=true" 109 | helm upgrade --install aws-vpc-cni eks/aws-vpc-cni --wait --timeout 300s \ 110 | --namespace kube-system \ 111 | --set enableNetworkPolicy=true \ 112 | --set originalMatchLabels=true \ 113 | --set init.env.ENABLE_IPv6=$ENABLE_IPv6 \ 114 | --set env.ENABLE_IPv6=$ENABLE_IPv6 \ 115 | --set nodeAgent.enableIpv6=$ENABLE_IPv6 \ 116 | --set env.ENABLE_PREFIX_DELEGATION=$ENABLE_PREFIX_DELEGATION \ 117 | --set env.ENABLE_IPv4=$ENABLE_IPv4 $HELM_EXTRA_ARGS 118 | 119 | } -------------------------------------------------------------------------------- /scripts/lib/tests.sh: -------------------------------------------------------------------------------- 1 | function generate_manifest_and_apply(){ 2 | 3 | # Use Upstream images by default 4 | IMAGE_REPOSITORY_PARAMETER="" 5 | CYCLONUS_IMAGE_REPOSITORY="mfenwick100" 6 | 7 | if [[ $TEST_IMAGE_REGISTRY != "registry.k8s.io" ]]; then 8 | IMAGE_REPOSITORY_PARAMETER="- --image-repository=$TEST_IMAGE_REGISTRY" 9 | CYCLONUS_IMAGE_REPOSITORY=${TEST_IMAGE_REGISTRY}/networking-e2e-test-images 10 | fi 11 | 12 | cat < /dev/null 2>&1 && break 63 | done 64 | 65 | kubectl logs -n netpol job/cyclonus > ${DIR}/results.log 66 | kubectl get pods -A -owide 67 | 68 | # Cleanup after test finishes 69 | kubectl delete clusterrolebinding cyclonus 70 | kubectl delete ns netpol x y z 71 | 72 | cat ${DIR}/results.log 73 | 74 | echo "Verify results against expected" 75 | python3 ${DIR}/lib/verify_test_results.py -f ${DIR}/results.log -ip $IP_FAMILY || TEST_FAILED=true 76 | } 77 | 78 | function run_performance_tests(){ 79 | run_cyclonus_tests 80 | } 81 | -------------------------------------------------------------------------------- /scripts/lib/verify_test_results.py: -------------------------------------------------------------------------------- 1 | import re 2 | import sys 3 | import argparse 4 | 5 | 6 | def main(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("-f", "--file-name",default="", dest="file_name",help="Cyclonus results log file") 9 | parser.add_argument("-ip", "--ip-family",default="IPv4", dest="ip_family",help="IP Family of the cluster") 10 | args = parser.parse_args() 11 | 12 | # Cyclonus runs 112 test cases in total with each having some steps. Each step runs 81 probes in total across TCP, UDP and SCTP protocol 13 | # AWS Network Policy doesn't support all these combinations. We maintain a mapping of the test number and the number of 14 | # probes that are expected to pass on each testcase+step combination for IPv4 and IPv6 cluster. 15 | # For the test numbers not included in this map, it is expected that all the probes should be passing 16 | if args.ip_family == "IPv6": 17 | expected_results={ 2:{'Step 1': 80}, 3:{'Step 1': 80}, 8:{'Step 1': 80}, 12:{'Step 1': 64}, 23:{'Step 1': 80}, 25:{'Step 1': 80}, 26:{'Step 1': 80}, 28:{'Step 1': 80}, 29:{'Step 1': 80}, 31:{'Step 1': 50}, 32:{'Step 1': 64}, 98:{'Step 1': 79}, 102:{'Step 1': 71}, 104:{'Step 1': 71}, 106:{'Step 1': 71}, 108:{'Step 1': 71}, 111:{'Step 1': 79}, 112:{'Step 1': 80} } 18 | else: 19 | expected_results={ 2:{'Step 1': 80}, 3:{'Step 1': 80}, 8:{'Step 1': 80}, 12:{'Step 1': 80}, 23:{'Step 1': 80}, 25:{'Step 1': 80}, 26:{'Step 1': 80}, 28:{'Step 1': 80}, 29:{'Step 1': 80}, 31:{'Step 1': 50}, 32:{'Step 1': 64}, 98:{'Step 1': 80}, 111:{'Step 1': 80}, 112:{'Step 1': 80}} 20 | 21 | results = capture_results(args.file_name) 22 | verify_results(results,expected_results) 23 | 24 | def capture_results(file_name): 25 | results = {} 26 | rowbreak = False 27 | start_capture = False 28 | test_number = 0 29 | with open(file_name, 'r') as filedata: 30 | for data in filedata: 31 | if start_capture: 32 | if len(data.strip()) == 0: 33 | break 34 | elif data.startswith("+---"): 35 | rowbreak = True 36 | else: 37 | keys = [x.strip() for x in data.split('|')] 38 | if keys[1] == "TEST": 39 | continue 40 | elif rowbreak: 41 | if keys[2] in ["passed", "failed"]: 42 | test_number = int(keys[1].split(":")[0]) 43 | results[test_number] = {} 44 | else: 45 | # Capture all retries for a testcase+step combination to verify 46 | step = keys[3].split(",")[0] 47 | if step not in results[test_number]: 48 | results[test_number][step] = [] 49 | results[test_number][step].append([int(keys[4]),int(keys[5]),int(keys[6])]) 50 | rowbreak = False 51 | else: 52 | continue 53 | elif "SummaryTable:" in data: 54 | start_capture = True 55 | else: 56 | continue 57 | return results 58 | 59 | 60 | def verify_results(results,expected_results): 61 | 62 | is_test_run_failed = False 63 | for test_number in results.keys(): 64 | for step in results[test_number].keys(): 65 | is_test_case_failed = True 66 | expected_correct = 0 67 | 68 | # Verifiying result from each retry for testcase+step 69 | for try_result in results[test_number][step]: 70 | count_failed, count_correct, count_ignored = try_result 71 | # Expected correct count by default for a testcase+step 72 | expected_correct = count_failed + count_correct + count_ignored 73 | 74 | if test_number in expected_results.keys(): 75 | if step in expected_results[test_number]: 76 | expected_correct = expected_results[test_number][step] 77 | 78 | # Check if the number of probes passed in testcase+step are as expected 79 | if count_correct >= expected_correct: 80 | print("Test Number:{test_number} | {step} | Passed -> Correct:{count_correct} Expected:{expected_correct}".format( 81 | test_number=test_number,step=step, 82 | count_correct=try_result[1],expected_correct=expected_correct 83 | )) 84 | is_test_case_failed = False 85 | break 86 | 87 | if is_test_case_failed: 88 | print("Test Number:{test_number} | {step} | Failed -> Try results: {probes} Expected:{expected_correct}".format( 89 | test_number=test_number,step=step, 90 | probes=results[test_number][step],expected_correct=expected_correct 91 | )) 92 | # Mark the entire test run as fail since atleast one test deviated from the expected results 93 | is_test_run_failed=True 94 | 95 | if is_test_run_failed or len(results) !=112: 96 | sys.exit(1) 97 | else: 98 | sys.exit(0) 99 | 100 | if __name__ == "__main__": 101 | main() 102 | -------------------------------------------------------------------------------- /scripts/run-cyclonus-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The script runs Network Policy Cyclonus tests on a existing cluster 4 | # Parameters: 5 | # CLUSTER_NAME: name of the cluster 6 | # KUBECONFIG: Set the variable to the cluster kubeconfig file path 7 | # REGION: defaults to us-west-2 8 | # IP_FAMILY: defaults to IPv4 9 | # ADDON_VERSION: Optional, defaults to the latest version 10 | # ENDPOINT: Optional 11 | # DEPLOY_NETWORK_POLICY_CONTROLLER_ON_DATAPLANE: false 12 | # NP_CONTROLLER_ENDPOINT_CHUNK_SIZE: Optional 13 | # AWS_EKS_NODEAGENT: Optional 14 | # AWS_CNI_IMAGE: Optional 15 | # AWS_CNI_IMAGE_INIT: Optional 16 | 17 | set -euoE pipefail 18 | DIR=$(cd "$(dirname "$0")"; pwd) 19 | GINKGO_TEST_BUILD_DIR="$DIR/../test/build" 20 | 21 | source ${DIR}/lib/cleanup.sh 22 | source ${DIR}/lib/network-policy.sh 23 | source ${DIR}/lib/tests.sh 24 | 25 | : "${ENDPOINT_FLAG:=""}" 26 | : "${ENDPOINT:=""}" 27 | : "${ADDON_VERSION:=""}" 28 | : "${IP_FAMILY:="IPv4"}" 29 | : "${REGION:="us-west-2"}" 30 | : "${SKIP_ADDON_INSTALLATION:="false"}" 31 | : "${SKIP_MAKE_TEST_BINARIES:="false"}" 32 | : "${ENABLE_STRICT_MODE:="false"}" 33 | : "${K8S_VERSION:=""}" 34 | : "${TEST_IMAGE_REGISTRY:="registry.k8s.io"}" 35 | : "${PROD_IMAGE_REGISTRY:=""}" 36 | : "${DEPLOY_NETWORK_POLICY_CONTROLLER_ON_DATAPLANE:="false"}" 37 | : "${NP_CONTROLLER_ENDPOINT_CHUNK_SIZE=""}}" 38 | : "${KUBE_CONFIG_PATH:=$KUBECONFIG}" 39 | 40 | TEST_FAILED="false" 41 | 42 | if [[ ! -z $ENDPOINT ]]; then 43 | ENDPOINT_FLAG="--endpoint-url $ENDPOINT" 44 | fi 45 | 46 | if [[ -z $K8S_VERSION ]]; then 47 | K8S_VERSION=$(aws eks describe-cluster $ENDPOINT_FLAG --name $CLUSTER_NAME --region $REGION | jq -r '.cluster.version') 48 | fi 49 | 50 | echo "Running Cyclonus e2e tests with the following variables 51 | CLUSTER_NAME: $CLUSTER_NAME 52 | REGION: $REGION 53 | IP_FAMILY: $IP_FAMILY 54 | 55 | Optional args 56 | ENDPOINT: $ENDPOINT 57 | ADDON_VERSION: $ADDON_VERSION 58 | K8S_VERSION: $K8S_VERSION 59 | " 60 | 61 | echo "Nodes AMI version for cluster: $CLUSTER_NAME" 62 | kubectl get nodes -owide 63 | 64 | PROVIDER_ID=$(kubectl get nodes -ojson | jq -r '.items[0].spec.providerID') 65 | AMI_ID=$(aws ec2 describe-instances --instance-ids ${PROVIDER_ID##*/} --region $REGION | jq -r '.Reservations[].Instances[].ImageId') 66 | echo "Nodes AMI ID: $AMI_ID" 67 | 68 | if [[ $SKIP_ADDON_INSTALLATION == "false" ]]; then 69 | load_addon_details 70 | 71 | if [[ ! -z $ADDON_VERSION ]]; then 72 | # Install the specified addon version 73 | install_network_policy_mao $ADDON_VERSION 74 | elif [[ ! -z $LATEST_ADDON_VERSION ]]; then 75 | # Install the latest addon version for the k8s version, if available 76 | install_network_policy_mao $LATEST_ADDON_VERSION 77 | else 78 | # Fall back to installing the latest version using helm 79 | install_network_policy_helm 80 | fi 81 | else 82 | echo "Skipping addons installation. Make sure you have enabled network policy support in your cluster before executing the test" 83 | fi 84 | 85 | if [[ $DEPLOY_NETWORK_POLICY_CONTROLLER_ON_DATAPLANE == "true" ]]; then 86 | make deploy-network-policy-controller-on-dataplane NP_CONTROLLER_IMAGE=$PROD_IMAGE_REGISTRY NP_CONTROLLER_ENDPOINT_CHUNK_SIZE=$NP_CONTROLLER_ENDPOINT_CHUNK_SIZE 87 | fi 88 | 89 | run_cyclonus_tests 90 | 91 | check_path_cleanup 92 | 93 | if [[ $ENABLE_STRICT_MODE == "true" ]]; then 94 | 95 | if [[ $SKIP_MAKE_TEST_BINARIES == "false" ]]; then 96 | echo "Making ginkgo test binaries" 97 | (cd $DIR/../ && make build-test-binaries) 98 | else 99 | echo "Skipping making ginkgo test binaries" 100 | fi 101 | 102 | echo "Enable network policy strict mode" 103 | kubectl set env daemonset aws-node -n kube-system -c aws-node NETWORK_POLICY_ENFORCING_MODE=strict 104 | 105 | echo "Check aws-node daemonset status" 106 | kubectl rollout status ds/aws-node -n kube-system --timeout=300s 107 | 108 | CGO_ENABLED=0 ginkgo -v -timeout 15m --no-color --fail-on-pending $GINKGO_TEST_BUILD_DIR/strict.test -- --cluster-kubeconfig=$KUBE_CONFIG_PATH --cluster-name=$CLUSTER_NAME --test-image-registry=$TEST_IMAGE_REGISTRY --ip-family=$IP_FAMILY || TEST_FAILED="true" 109 | 110 | fi 111 | 112 | if [[ $TEST_FAILED == "true" ]]; then 113 | echo "Test run failed" 114 | exit 1 115 | fi 116 | -------------------------------------------------------------------------------- /scripts/run-tests.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | set -Eeuox pipefail 3 | 4 | DIR=$(cd "$(dirname "$0")"; pwd) 5 | 6 | source ${DIR}/lib/common.sh 7 | source ${DIR}/lib/cleanup.sh 8 | source ${DIR}/lib/cloudwatch.sh 9 | source ${DIR}/lib/cluster.sh 10 | source ${DIR}/lib/network-policy.sh 11 | source ${DIR}/lib/tests.sh 12 | 13 | RUN_PERFORMANCE_TESTS="${RUN_PERFORMANCE_TESTS:=false}" 14 | RUN_CONFORMANCE_TESTS="${RUN_CONFORMANCE_TESTS:=false}" 15 | AWS_EKS_NODEAGENT_IMAGE="${AWS_EKS_NODEAGENT_IMAGE:=""}" 16 | TEST_IMAGE_REGISTRY="${TEST_IMAGE_REGISTRY:="registry.k8s.io"}" 17 | TEST_FAILED="false" 18 | 19 | cleanup() { 20 | 21 | if [[ $RUN_PERFORMANCE_TESTS == "true" ]]; then 22 | uninstall_cloudwatch_agent 23 | fi 24 | 25 | delete_cluster 26 | } 27 | 28 | trap cleanup EXIT 29 | 30 | check_is_installed aws 31 | check_is_installed eksctl 32 | check_is_installed helm 33 | 34 | load_default_values 35 | create_cluster 36 | 37 | make update-node-agent-image AWS_EKS_NODEAGENT=$AWS_EKS_NODEAGENT_IMAGE IP_FAMILY=$IP_FAMILY 38 | 39 | if [[ $RUN_PERFORMANCE_TESTS == "true" ]]; then 40 | echo "Runnning Performance tests" 41 | install_cloudwatch_agent 42 | run_performance_tests 43 | elif [[ $RUN_CONFORMANCE_TESTS == "true" ]]; then 44 | echo "Running Conformance tests" 45 | run_cyclonus_tests 46 | fi 47 | 48 | check_path_cleanup 49 | 50 | if [[ $TEST_FAILED == "true" ]]; then 51 | echo "Test run failed, check failures" 52 | exit 1 53 | fi -------------------------------------------------------------------------------- /scripts/test/check-cleanup-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: $NODE 5 | spec: 6 | restartPolicy: Never 7 | nodeName: $NODE 8 | containers: 9 | - image: public.ecr.aws/r7y6e9p2/test-agent:latest 10 | name: check-bpf-cleanup 11 | command: ["./check-bpf-cleanup-agent"] 12 | volumeMounts: 13 | - mountPath: /tmp/sys/ 14 | name: bpf-volume 15 | volumes: 16 | - name: bpf-volume 17 | hostPath: 18 | path: /sys/ 19 | type: DirectoryOrCreate 20 | -------------------------------------------------------------------------------- /scripts/update-node-agent-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Use this script to set the aws-eks-nodeagent image on aws-node daemonset using the latest helm chart 3 | 4 | # Parameters: 5 | # KUBECONFIG: path to the kubeconfig file, default ~/.kube/config 6 | # IP_FAMILY: defaults to IPv4 7 | # AWS_EKS_NODEAGENT: node agent image 8 | 9 | set -e 10 | DIR=$(cd "$(dirname "$0")"; pwd) 11 | 12 | : "${IP_FAMILY:="IPv4"}" 13 | HELM_EXTRA_ARGS="" 14 | 15 | source ${DIR}/lib/network-policy.sh 16 | 17 | if [[ ! -z $AWS_EKS_NODEAGENT ]]; then 18 | echo "Replacing Node Agent Image in aws-vpc-cni helm chart with $AWS_EKS_NODEAGENT" 19 | HELM_EXTRA_ARGS+=" --set nodeAgent.image.override=$AWS_EKS_NODEAGENT" 20 | else 21 | echo "Installing the latest aws-vpc-cni helm chart with default values" 22 | fi 23 | 24 | install_network_policy_helm 25 | 26 | echo "Check aws-node daemonset status" 27 | kubectl rollout status ds/aws-node -n kube-system --timeout=300s -------------------------------------------------------------------------------- /test/agent/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG golang_image 2 | 3 | FROM $golang_image as builder 4 | 5 | WORKDIR /workspace 6 | ENV GOPROXY direct 7 | 8 | COPY go.mod go.mod 9 | 10 | COPY go.sum go.sum 11 | 12 | RUN go mod download 13 | 14 | COPY cmd cmd 15 | 16 | # Package all testing binaries into one docker file 17 | # which can be used for different test scenarios 18 | RUN CGO_ENABLED=0 GOOS=linux GO111MODULE=on go build \ 19 | -a -o check-bpf-cleanup-agent cmd/check-bpf-cleanup-agent/main.go 20 | 21 | FROM public.ecr.aws/eks-distro-build-tooling/eks-distro-minimal-base-iptables:latest.2 22 | 23 | WORKDIR / 24 | COPY --from=builder /workspace/ . 25 | -------------------------------------------------------------------------------- /test/agent/README.md: -------------------------------------------------------------------------------- 1 | ## Test Agent 2 | 3 | This test agent package contains binaries to run the e2e tests 4 | 5 | ### check-bpf-cleanup-agent 6 | 7 | This agent mounts the host file system and checks if the files under `program` and `maps` are cleaned up after finishing the tests 8 | -------------------------------------------------------------------------------- /test/agent/cmd/check-bpf-cleanup-agent/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "strings" 8 | ) 9 | 10 | const baseDir = "/tmp" 11 | const mapsPath = "/sys/fs/bpf/globals/aws/maps" 12 | const programsPath = "/sys/fs/bpf/globals/aws/programs" 13 | const coreDnsPrefix = "coredns" 14 | 15 | func getPodPrefix() string { 16 | podName := os.Getenv("NODE_NAME") 17 | if podName == "" { 18 | log.Println("NODE_NAME environment variable is not set. Cleanup pod filtering may not work correctly.") 19 | return "" 20 | } 21 | 22 | // Split by "." to isolate the first part 23 | parts := strings.SplitN(podName, ".", 2) // Get "ip-192-168-59-7" and ignore the rest 24 | if len(parts) < 2 { 25 | log.Printf("Unexpected node name format: %s", podName) 26 | return "" 27 | } 28 | 29 | // Replace the first "." with "_" to match the BPF map format 30 | prefix := strings.Replace(parts[0], ".", "_", 1) + "_" 31 | 32 | log.Printf("Using prefix to ignore cleanup pod maps/programs: %s", prefix) 33 | return prefix 34 | } 35 | 36 | func leakedMapsFound() error { 37 | 38 | if _, err := os.Stat(baseDir + mapsPath); os.IsNotExist(err) { 39 | log.Printf("Maps directory doesn't exist on the node") 40 | return nil 41 | } 42 | 43 | f, err := os.Open(baseDir + mapsPath) 44 | if err != nil { 45 | return err 46 | } 47 | defer f.Close() 48 | 49 | files, err := f.Readdir(0) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | for _, v := range files { 55 | if v.Name() != "global_aws_conntrack_map" && v.Name() != "global_policy_events" && !strings.HasPrefix(v.Name(), getPodPrefix()) && !strings.HasPrefix(v.Name(), coreDnsPrefix) { 56 | return fmt.Errorf("BPF Maps folder is not cleaned up (except conntrack, policy_events, coreDNS): %v", v.Name()) 57 | } 58 | } 59 | 60 | log.Printf("BPF Maps are cleaned up") 61 | return nil 62 | } 63 | 64 | func leakedProgsFound() error { 65 | 66 | if _, err := os.Stat(baseDir + programsPath); os.IsNotExist(err) { 67 | log.Printf("Programs directory doesn't exist on the node") 68 | return nil 69 | } 70 | 71 | f, err := os.Open(baseDir + programsPath) 72 | if err != nil { 73 | return err 74 | } 75 | defer f.Close() 76 | 77 | files, err := f.Readdir(0) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | for _, v := range files { 83 | progName := v.Name() 84 | // Ignore programs that belong to the cleanup pod 85 | if !strings.HasPrefix(progName, getPodPrefix()) && !strings.HasPrefix(progName, coreDnsPrefix) { 86 | return fmt.Errorf("BPF Programs folder is not cleaned up: %v", progName) 87 | } 88 | } 89 | 90 | log.Printf("BPF Programs are cleaned up") 91 | return nil 92 | } 93 | 94 | func main() { 95 | 96 | err := leakedMapsFound() 97 | if err != nil { 98 | log.Fatal(err) 99 | } 100 | 101 | err = leakedProgsFound() 102 | if err != nil { 103 | log.Fatal(err) 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /test/agent/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/aws/aws-network-policy-agent/test/agent 2 | 3 | go 1.20 -------------------------------------------------------------------------------- /test/agent/go.sum: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-network-policy-agent/92a9c0f9048b30a4f118df677ba99e0f15e71dc2/test/agent/go.sum -------------------------------------------------------------------------------- /test/framework/framework.go: -------------------------------------------------------------------------------- 1 | package framework 2 | 3 | import ( 4 | "github.com/aws/aws-network-policy-agent/test/framework/resources/k8s/deployment" 5 | "github.com/aws/aws-network-policy-agent/test/framework/resources/k8s/namespace" 6 | "github.com/aws/aws-network-policy-agent/test/framework/resources/k8s/networkpolicy" 7 | "github.com/aws/aws-network-policy-agent/test/framework/resources/k8s/pod" 8 | "github.com/aws/aws-network-policy-agent/test/framework/resources/k8s/service" 9 | . "github.com/onsi/gomega" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/client-go/kubernetes" 12 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 13 | "k8s.io/client-go/tools/clientcmd" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | type Framework struct { 18 | Options Options 19 | K8sClient client.Client 20 | DeploymentManager deployment.Manager 21 | NamespaceManager namespace.Manager 22 | ServiceManager service.Manager 23 | PodManager pod.Manager 24 | NetworkPolicyManager networkpolicy.Manager 25 | } 26 | 27 | func New(options Options) *Framework { 28 | err := options.Validate() 29 | Expect(err).NotTo(HaveOccurred()) 30 | 31 | config, err := clientcmd.BuildConfigFromFlags("", options.KubeConfig) 32 | Expect(err).NotTo(HaveOccurred()) 33 | 34 | clientset, err := kubernetes.NewForConfig(config) 35 | Expect(err).NotTo(HaveOccurred()) 36 | 37 | k8sSchema := runtime.NewScheme() 38 | clientgoscheme.AddToScheme(k8sSchema) 39 | 40 | k8sClient, err := client.New(config, client.Options{Scheme: k8sSchema}) 41 | Expect(err).NotTo(HaveOccurred()) 42 | 43 | return &Framework{ 44 | K8sClient: k8sClient, 45 | DeploymentManager: deployment.NewManager(k8sClient), 46 | NamespaceManager: namespace.NewManager(k8sClient), 47 | PodManager: pod.NewManager(k8sClient, clientset), 48 | NetworkPolicyManager: networkpolicy.NewManager(k8sClient), 49 | ServiceManager: service.NewManager(k8sClient), 50 | Options: options, 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /test/framework/manifest/container.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | "fmt" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | type Container struct { 10 | name string 11 | image string 12 | imagePullPolicy v1.PullPolicy 13 | command []string 14 | args []string 15 | containerPorts []v1.ContainerPort 16 | resources v1.ResourceRequirements 17 | imageRepository string 18 | volumeMount []v1.VolumeMount 19 | } 20 | 21 | func NewBusyBoxContainerBuilder() *Container { 22 | return &Container{ 23 | name: "busybox", 24 | image: "e2e-test-images/busybox:1.29-4", 25 | imagePullPolicy: v1.PullIfNotPresent, 26 | command: []string{"sleep", "3600"}, 27 | args: []string{}, 28 | volumeMount: []v1.VolumeMount{}, 29 | } 30 | } 31 | 32 | func NewAgnHostContainerBuilder() *Container { 33 | return &Container{ 34 | name: "agnhost", 35 | image: "e2e-test-images/agnhost:2.45", 36 | command: []string{"/bin/sh", "-c"}, 37 | imagePullPolicy: v1.PullIfNotPresent, 38 | } 39 | } 40 | 41 | func (c *Container) Name(name string) *Container { 42 | c.name = name 43 | return c 44 | } 45 | 46 | func (c *Container) Image(image string) *Container { 47 | c.image = image 48 | return c 49 | } 50 | 51 | func (c *Container) ImagePullPolicy(policy v1.PullPolicy) *Container { 52 | c.imagePullPolicy = policy 53 | return c 54 | } 55 | 56 | func (c *Container) Command(cmd []string) *Container { 57 | c.command = cmd 58 | return c 59 | } 60 | 61 | func (c *Container) Args(arg []string) *Container { 62 | c.args = arg 63 | return c 64 | } 65 | 66 | func (c *Container) AddContainerPort(containerPort v1.ContainerPort) *Container { 67 | c.containerPorts = append(c.containerPorts, containerPort) 68 | return c 69 | } 70 | 71 | func (c *Container) Resources(resource v1.ResourceRequirements) *Container { 72 | c.resources = resource 73 | return c 74 | } 75 | 76 | func (c *Container) ImageRepository(imageRepository string) *Container { 77 | c.imageRepository = fmt.Sprintf("%s/", imageRepository) 78 | return c 79 | } 80 | 81 | func (c *Container) AddVolumeMount(volumeMount v1.VolumeMount) *Container { 82 | c.volumeMount = append(c.volumeMount, volumeMount) 83 | return c 84 | } 85 | 86 | func (w *Container) Build() v1.Container { 87 | return v1.Container{ 88 | Name: w.name, 89 | Image: fmt.Sprintf("%s%s", w.imageRepository, w.image), 90 | Command: w.command, 91 | Args: w.args, 92 | ImagePullPolicy: w.imagePullPolicy, 93 | Ports: w.containerPorts, 94 | Resources: w.resources, 95 | VolumeMounts: w.volumeMount, 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /test/framework/manifest/deployment.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws" 5 | v1 "k8s.io/api/apps/v1" 6 | corev1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | type DeploymentBuilder struct { 11 | namespace string 12 | name string 13 | replicas int 14 | os string 15 | container corev1.Container 16 | labels map[string]string 17 | terminationGracePeriod int 18 | nodeSelector map[string]string 19 | volume []corev1.Volume 20 | } 21 | 22 | func NewDefaultDeploymentBuilder() *DeploymentBuilder { 23 | return &DeploymentBuilder{ 24 | namespace: "default", 25 | name: "deployment", 26 | replicas: 2, 27 | nodeSelector: map[string]string{"kubernetes.io/os": "linux"}, 28 | os: "linux", 29 | container: NewBusyBoxContainerBuilder().Build(), 30 | labels: map[string]string{}, 31 | terminationGracePeriod: 0, 32 | volume: []corev1.Volume{}, 33 | } 34 | } 35 | 36 | func (d *DeploymentBuilder) Namespace(namespace string) *DeploymentBuilder { 37 | d.namespace = namespace 38 | return d 39 | } 40 | 41 | func (d *DeploymentBuilder) TerminationGracePeriod(tg int) *DeploymentBuilder { 42 | d.terminationGracePeriod = tg 43 | return d 44 | } 45 | 46 | func (d *DeploymentBuilder) Name(name string) *DeploymentBuilder { 47 | d.name = name 48 | return d 49 | } 50 | 51 | func (d *DeploymentBuilder) Replicas(replicas int) *DeploymentBuilder { 52 | d.replicas = replicas 53 | return d 54 | } 55 | 56 | func (d *DeploymentBuilder) OS(os string) *DeploymentBuilder { 57 | d.os = os 58 | return d 59 | } 60 | 61 | func (d *DeploymentBuilder) Container(container corev1.Container) *DeploymentBuilder { 62 | d.container = container 63 | return d 64 | } 65 | 66 | func (d *DeploymentBuilder) AddLabel(labelKey string, labelValue string) *DeploymentBuilder { 67 | d.labels[labelKey] = labelValue 68 | return d 69 | } 70 | 71 | func (d *DeploymentBuilder) NodeSelector(selector map[string]string) *DeploymentBuilder { 72 | d.nodeSelector = selector 73 | return d 74 | } 75 | 76 | func (d *DeploymentBuilder) AddVolume(volume corev1.Volume) *DeploymentBuilder { 77 | d.volume = append(d.volume, volume) 78 | return d 79 | } 80 | 81 | func (d *DeploymentBuilder) Build() *v1.Deployment { 82 | return &v1.Deployment{ 83 | ObjectMeta: metav1.ObjectMeta{ 84 | Name: d.name, 85 | Namespace: d.namespace, 86 | }, 87 | Spec: v1.DeploymentSpec{ 88 | Replicas: aws.Int32(int32(d.replicas)), 89 | Selector: &metav1.LabelSelector{ 90 | MatchLabels: d.labels, 91 | }, 92 | Template: corev1.PodTemplateSpec{ 93 | ObjectMeta: metav1.ObjectMeta{ 94 | Labels: d.labels, 95 | }, 96 | Spec: corev1.PodSpec{ 97 | Containers: []corev1.Container{d.container}, 98 | NodeSelector: d.nodeSelector, 99 | TerminationGracePeriodSeconds: aws.Int64(int64(d.terminationGracePeriod)), 100 | Volumes: d.volume, 101 | }, 102 | }, 103 | }, 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /test/framework/manifest/networkpolicy.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | network "k8s.io/api/networking/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | type NetworkPolicyBuilder struct { 9 | namespace string 10 | name string 11 | podSelector map[string]string 12 | egressRules []network.NetworkPolicyEgressRule 13 | ingressRules []network.NetworkPolicyIngressRule 14 | ingress bool 15 | egress bool 16 | } 17 | 18 | func (n *NetworkPolicyBuilder) Build() *network.NetworkPolicy { 19 | 20 | netpol := &network.NetworkPolicy{ 21 | ObjectMeta: metav1.ObjectMeta{ 22 | Name: n.name, 23 | Namespace: n.namespace, 24 | }, 25 | Spec: network.NetworkPolicySpec{ 26 | PodSelector: metav1.LabelSelector{ 27 | MatchLabels: n.podSelector, 28 | }, 29 | }, 30 | } 31 | 32 | if len(n.ingressRules) > 0 { 33 | n.ingress = true 34 | netpol.Spec.Ingress = n.ingressRules 35 | } 36 | 37 | if len(n.egressRules) > 0 { 38 | n.egress = true 39 | netpol.Spec.Egress = n.egressRules 40 | } 41 | 42 | if n.egress { 43 | netpol.Spec.PolicyTypes = append(netpol.Spec.PolicyTypes, network.PolicyTypeEgress) 44 | } 45 | 46 | if n.ingress { 47 | netpol.Spec.PolicyTypes = append(netpol.Spec.PolicyTypes, network.PolicyTypeIngress) 48 | } 49 | 50 | return netpol 51 | } 52 | 53 | func NewNetworkPolicyBuilder() *NetworkPolicyBuilder { 54 | return &NetworkPolicyBuilder{ 55 | namespace: "default", 56 | name: "default-network-policy", 57 | podSelector: map[string]string{}, 58 | egressRules: []network.NetworkPolicyEgressRule{}, 59 | ingressRules: []network.NetworkPolicyIngressRule{}, 60 | } 61 | } 62 | 63 | func (n *NetworkPolicyBuilder) Name(name string) *NetworkPolicyBuilder { 64 | n.name = name 65 | return n 66 | } 67 | 68 | func (n *NetworkPolicyBuilder) Namespace(namespace string) *NetworkPolicyBuilder { 69 | n.namespace = namespace 70 | return n 71 | } 72 | 73 | func (n *NetworkPolicyBuilder) PodSelector(labelKey string, labelValue string) *NetworkPolicyBuilder { 74 | n.podSelector[labelKey] = labelValue 75 | return n 76 | } 77 | 78 | func (n *NetworkPolicyBuilder) AddEgressRule(egressRule network.NetworkPolicyEgressRule) *NetworkPolicyBuilder { 79 | n.egressRules = append(n.egressRules, egressRule) 80 | return n 81 | } 82 | 83 | func (n *NetworkPolicyBuilder) AddIngressRule(ingressRule network.NetworkPolicyIngressRule) *NetworkPolicyBuilder { 84 | n.ingressRules = append(n.ingressRules, ingressRule) 85 | return n 86 | } 87 | 88 | func (n *NetworkPolicyBuilder) SetPolicyType(ingress bool, egress bool) *NetworkPolicyBuilder { 89 | n.ingress = ingress 90 | n.egress = egress 91 | return n 92 | } 93 | -------------------------------------------------------------------------------- /test/framework/manifest/networkpolicyrules.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | network "k8s.io/api/networking/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "k8s.io/apimachinery/pkg/util/intstr" 8 | ) 9 | 10 | type IngressRuleBuilder struct { 11 | From []network.NetworkPolicyPeer 12 | Port []network.NetworkPolicyPort 13 | } 14 | 15 | type EgressRuleBuilder struct { 16 | To []network.NetworkPolicyPeer 17 | Port []network.NetworkPolicyPort 18 | } 19 | 20 | func NewIngressRuleBuilder() *IngressRuleBuilder { 21 | return &IngressRuleBuilder{ 22 | From: []network.NetworkPolicyPeer{}, 23 | Port: []network.NetworkPolicyPort{}, 24 | } 25 | } 26 | 27 | func (ir *IngressRuleBuilder) Build() network.NetworkPolicyIngressRule { 28 | obj := network.NetworkPolicyIngressRule{} 29 | if len(ir.From) > 0 { 30 | obj.From = ir.From 31 | } 32 | if len(ir.Port) > 0 { 33 | obj.Ports = ir.Port 34 | } 35 | return obj 36 | } 37 | 38 | func (ir *IngressRuleBuilder) AddPeer(nsSelector map[string]string, podSelector map[string]string, acceptCIDR string) *IngressRuleBuilder { 39 | peerObj := network.NetworkPolicyPeer{} 40 | 41 | if podSelector != nil { 42 | peerObj.PodSelector = &metav1.LabelSelector{ 43 | MatchLabels: podSelector, 44 | } 45 | } 46 | 47 | if nsSelector != nil { 48 | peerObj.NamespaceSelector = &metav1.LabelSelector{ 49 | MatchLabels: nsSelector, 50 | } 51 | } 52 | 53 | if acceptCIDR != "" { 54 | peerObj.IPBlock = &network.IPBlock{ 55 | CIDR: acceptCIDR, 56 | } 57 | } 58 | ir.From = append(ir.From, peerObj) 59 | return ir 60 | } 61 | 62 | func (ir *IngressRuleBuilder) AddPort(port int, protocol v1.Protocol) *IngressRuleBuilder { 63 | portObj := network.NetworkPolicyPort{ 64 | Protocol: &protocol, 65 | } 66 | 67 | if port != -1 { 68 | val := intstr.FromInt(port) 69 | portObj.Port = &val 70 | } 71 | 72 | ir.Port = append(ir.Port, portObj) 73 | return ir 74 | } 75 | 76 | func NewEgressRuleBuilder() *EgressRuleBuilder { 77 | return &EgressRuleBuilder{ 78 | To: []network.NetworkPolicyPeer{}, 79 | Port: []network.NetworkPolicyPort{}, 80 | } 81 | } 82 | 83 | func (er *EgressRuleBuilder) Build() network.NetworkPolicyEgressRule { 84 | obj := network.NetworkPolicyEgressRule{} 85 | if len(er.To) > 0 { 86 | obj.To = er.To 87 | } 88 | if len(er.Port) > 0 { 89 | obj.Ports = er.Port 90 | } 91 | return obj 92 | } 93 | 94 | func (er *EgressRuleBuilder) AddPeer(nsSelector map[string]string, podSelector map[string]string, acceptCIDR string) *EgressRuleBuilder { 95 | peerObj := network.NetworkPolicyPeer{} 96 | if podSelector != nil { 97 | peerObj.PodSelector = &metav1.LabelSelector{ 98 | MatchLabels: podSelector, 99 | } 100 | } 101 | if nsSelector != nil { 102 | peerObj.NamespaceSelector = &metav1.LabelSelector{ 103 | MatchLabels: nsSelector, 104 | } 105 | } 106 | 107 | if acceptCIDR != "" { 108 | peerObj.IPBlock = &network.IPBlock{ 109 | CIDR: acceptCIDR, 110 | } 111 | } 112 | 113 | er.To = append(er.To, peerObj) 114 | return er 115 | } 116 | 117 | func (er *EgressRuleBuilder) AddPort(port int, protocol v1.Protocol) *EgressRuleBuilder { 118 | portObj := network.NetworkPolicyPort{ 119 | Protocol: &protocol, 120 | } 121 | 122 | if port != -1 { 123 | val := intstr.FromInt(port) 124 | portObj.Port = &val 125 | } 126 | 127 | er.Port = append(er.Port, portObj) 128 | return er 129 | } 130 | -------------------------------------------------------------------------------- /test/framework/manifest/pod.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws" 5 | v1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | type PodBuilder struct { 10 | namespace string 11 | name string 12 | serviceAccountName string 13 | container v1.Container 14 | os string 15 | labels map[string]string 16 | annotations map[string]string 17 | terminationGracePeriod int 18 | restartPolicy v1.RestartPolicy 19 | nodeName string 20 | volume []v1.Volume 21 | } 22 | 23 | func (p *PodBuilder) Build() *v1.Pod { 24 | return &v1.Pod{ 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: p.name, 27 | Namespace: p.namespace, 28 | Labels: p.labels, 29 | Annotations: p.annotations, 30 | }, 31 | Spec: v1.PodSpec{ 32 | NodeName: p.nodeName, 33 | ServiceAccountName: p.serviceAccountName, 34 | Containers: []v1.Container{p.container}, 35 | NodeSelector: map[string]string{"kubernetes.io/os": p.os}, 36 | TerminationGracePeriodSeconds: aws.Int64(int64(p.terminationGracePeriod)), 37 | RestartPolicy: p.restartPolicy, 38 | Volumes: p.volume, 39 | }, 40 | } 41 | } 42 | 43 | func NewDefaultPodBuilder() *PodBuilder { 44 | return &PodBuilder{ 45 | namespace: "default", 46 | name: "pod", 47 | container: NewBusyBoxContainerBuilder().Build(), 48 | os: "linux", 49 | labels: map[string]string{}, 50 | annotations: map[string]string{}, 51 | terminationGracePeriod: 10, 52 | restartPolicy: v1.RestartPolicyNever, 53 | volume: []v1.Volume{}, 54 | } 55 | } 56 | 57 | func (p *PodBuilder) Namespace(namespace string) *PodBuilder { 58 | p.namespace = namespace 59 | return p 60 | } 61 | 62 | func (p *PodBuilder) Name(name string) *PodBuilder { 63 | p.name = name 64 | return p 65 | } 66 | 67 | func (p *PodBuilder) Container(container v1.Container) *PodBuilder { 68 | p.container = container 69 | return p 70 | } 71 | 72 | func (p *PodBuilder) OS(os string) *PodBuilder { 73 | p.os = os 74 | return p 75 | } 76 | 77 | func (p *PodBuilder) RestartPolicy(policy v1.RestartPolicy) *PodBuilder { 78 | p.restartPolicy = policy 79 | return p 80 | } 81 | 82 | func (p *PodBuilder) AddLabel(labelkey string, labelValue string) *PodBuilder { 83 | p.labels[labelkey] = labelValue 84 | return p 85 | } 86 | 87 | func (p *PodBuilder) Annotations(annotations map[string]string) *PodBuilder { 88 | p.annotations = annotations 89 | return p 90 | } 91 | 92 | func (p *PodBuilder) ServiceAccount(serviceAccountName string) *PodBuilder { 93 | p.serviceAccountName = serviceAccountName 94 | return p 95 | } 96 | 97 | func (p *PodBuilder) TerminationGracePeriod(terminationGracePeriod int) *PodBuilder { 98 | p.terminationGracePeriod = terminationGracePeriod 99 | return p 100 | } 101 | 102 | func (p *PodBuilder) NodeName(nodeName string) *PodBuilder { 103 | p.nodeName = nodeName 104 | return p 105 | } 106 | 107 | func (p *PodBuilder) AddVolume(volume v1.Volume) *PodBuilder { 108 | p.volume = append(p.volume, volume) 109 | return p 110 | } 111 | -------------------------------------------------------------------------------- /test/framework/manifest/service.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "k8s.io/apimachinery/pkg/util/intstr" 7 | ) 8 | 9 | type ServiceBuilder struct { 10 | name string 11 | namespace string 12 | port int32 13 | nodePort int32 14 | protocol v1.Protocol 15 | selector map[string]string 16 | serviceType v1.ServiceType 17 | } 18 | 19 | func NewHTTPService() *ServiceBuilder { 20 | return &ServiceBuilder{ 21 | port: 80, 22 | protocol: v1.ProtocolTCP, 23 | selector: map[string]string{}, 24 | serviceType: v1.ServiceTypeClusterIP, 25 | } 26 | } 27 | 28 | func (s *ServiceBuilder) Name(name string) *ServiceBuilder { 29 | s.name = name 30 | return s 31 | } 32 | 33 | func (s *ServiceBuilder) Namespace(namespace string) *ServiceBuilder { 34 | s.namespace = namespace 35 | return s 36 | } 37 | 38 | func (s *ServiceBuilder) Port(port int32) *ServiceBuilder { 39 | s.port = port 40 | return s 41 | } 42 | 43 | func (s *ServiceBuilder) NodePort(nodePort int32) *ServiceBuilder { 44 | s.nodePort = nodePort 45 | return s 46 | } 47 | 48 | func (s *ServiceBuilder) Protocol(protocol v1.Protocol) *ServiceBuilder { 49 | s.protocol = protocol 50 | return s 51 | } 52 | 53 | func (s *ServiceBuilder) Selector(labelKey string, labelVal string) *ServiceBuilder { 54 | s.selector[labelKey] = labelVal 55 | return s 56 | } 57 | 58 | func (s *ServiceBuilder) ServiceType(serviceType v1.ServiceType) *ServiceBuilder { 59 | s.serviceType = serviceType 60 | return s 61 | } 62 | 63 | func (s *ServiceBuilder) Build() *v1.Service { 64 | return &v1.Service{ 65 | ObjectMeta: metaV1.ObjectMeta{ 66 | Name: s.name, 67 | Namespace: s.namespace, 68 | }, 69 | Spec: v1.ServiceSpec{ 70 | Ports: []v1.ServicePort{{ 71 | Name: "", 72 | Protocol: v1.ProtocolTCP, 73 | Port: s.port, 74 | TargetPort: intstr.IntOrString{IntVal: s.port}, 75 | NodePort: s.nodePort, 76 | }}, 77 | Selector: s.selector, 78 | Type: s.serviceType, 79 | }, 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /test/framework/options.go: -------------------------------------------------------------------------------- 1 | package framework 2 | 3 | import ( 4 | "flag" 5 | 6 | "github.com/pkg/errors" 7 | "k8s.io/client-go/tools/clientcmd" 8 | ) 9 | 10 | var GlobalOptions Options 11 | 12 | func init() { 13 | GlobalOptions.BindFlags() 14 | } 15 | 16 | type Options struct { 17 | AWSRegion string 18 | KubeConfig string 19 | ClusterName string 20 | NgNameLabelKey string 21 | NgNameLabelVal string 22 | EKSEndpoint string 23 | TestImageRegistry string 24 | IpFamily string 25 | } 26 | 27 | func (options *Options) BindFlags() { 28 | flag.StringVar(&options.KubeConfig, "cluster-kubeconfig", "", "Path to kubeconfig containing embedded authinfo (required)") 29 | flag.StringVar(&options.AWSRegion, "aws-region", "", `AWS Region for the kubernetes cluster`) 30 | flag.StringVar(&options.ClusterName, "cluster-name", "", `Kubernetes cluster name (required)`) 31 | flag.StringVar(&options.NgNameLabelKey, "ng-name-label-key", "eks.amazonaws.com/nodegroup", "label key used to identify nodegroup name") 32 | flag.StringVar(&options.NgNameLabelVal, "ng-name-label-val", "", "label value with the nodegroup name") 33 | flag.StringVar(&options.EKSEndpoint, "eks-endpoint", "", "optional eks api server endpoint") 34 | flag.StringVar(&options.TestImageRegistry, "test-image-registry", "617930562442.dkr.ecr.us-west-2.amazonaws.com", `AWS registry where the e2e test images are stored`) 35 | flag.StringVar(&options.IpFamily, "ip-family", "IPv4", `IP family for the cluster`) 36 | } 37 | 38 | func (options *Options) Validate() error { 39 | if len(options.KubeConfig) == 0 { 40 | return errors.Errorf("%s must be set!", clientcmd.RecommendedConfigPathFlag) 41 | } 42 | if len(options.ClusterName) == 0 { 43 | return errors.Errorf("%s must be set!", "cluster-name") 44 | } 45 | if len(options.TestImageRegistry) == 0 { 46 | return errors.Errorf("%s must be set!", "test-image-registry") 47 | } 48 | return nil 49 | } 50 | -------------------------------------------------------------------------------- /test/framework/resources/k8s/deployment/resource.go: -------------------------------------------------------------------------------- 1 | package deployment 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/aws/aws-network-policy-agent/test/framework/utils" 8 | appsv1 "k8s.io/api/apps/v1" 9 | "k8s.io/apimachinery/pkg/types" 10 | "k8s.io/apimachinery/pkg/util/wait" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | type Manager interface { 15 | CreateAndWaitUntilDeploymentReady(ctx context.Context, dp *appsv1.Deployment) (*appsv1.Deployment, error) 16 | DeleteAndWaitUntilDeploymentDeleted(ctx context.Context, dp *appsv1.Deployment) error 17 | ScaleDeploymentAndWaitTillReady(ctx context.Context, namespace string, name string, replicas int32) error 18 | UpdateDeploymentImage(ctx context.Context, namespace string, name string, imageReplacement string) error 19 | GetDeployment(ctx context.Context, namespace string, name string) (*appsv1.Deployment, error) 20 | PatchDeployment(ctx context.Context, newDeployment *appsv1.Deployment, oldDeployment *appsv1.Deployment) error 21 | } 22 | 23 | func NewManager(k8sClient client.Client) Manager { 24 | return &defaultManager{k8sClient: k8sClient} 25 | } 26 | 27 | type defaultManager struct { 28 | k8sClient client.Client 29 | } 30 | 31 | func (m *defaultManager) CreateAndWaitUntilDeploymentReady(ctx context.Context, dp *appsv1.Deployment) (*appsv1.Deployment, error) { 32 | err := m.k8sClient.Create(ctx, dp) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | observedDP := &appsv1.Deployment{} 38 | timeout := 5 * time.Minute 39 | return observedDP, wait.PollUntilContextTimeout(ctx, utils.PollIntervalShort, timeout, true, func(context.Context) (bool, error) { 40 | if err := m.k8sClient.Get(ctx, utils.NamespacedName(dp), observedDP); err != nil { 41 | return false, err 42 | } 43 | if observedDP.Status.UpdatedReplicas == (*dp.Spec.Replicas) && 44 | observedDP.Status.Replicas == (*dp.Spec.Replicas) && 45 | observedDP.Status.AvailableReplicas == (*dp.Spec.Replicas) && 46 | observedDP.Status.ObservedGeneration >= dp.Generation { 47 | return true, nil 48 | } 49 | return false, nil 50 | }) 51 | } 52 | 53 | func (m *defaultManager) DeleteAndWaitUntilDeploymentDeleted(ctx context.Context, dp *appsv1.Deployment) error { 54 | err := m.k8sClient.Delete(ctx, dp) 55 | if err != nil { 56 | return err 57 | } 58 | return nil 59 | } 60 | 61 | func (m *defaultManager) ScaleDeploymentAndWaitTillReady(ctx context.Context, namespace string, name string, replicas int32) error { 62 | deployment, err := m.GetDeployment(ctx, namespace, name) 63 | if err != nil { 64 | return err 65 | } 66 | deploymentCopy := deployment.DeepCopy() 67 | deploymentCopy.Spec.Replicas = &replicas 68 | 69 | err = m.k8sClient.Patch(ctx, deploymentCopy, client.MergeFrom(deployment)) 70 | if err != nil { 71 | return err 72 | } 73 | return nil 74 | } 75 | 76 | func (m *defaultManager) UpdateDeploymentImage(ctx context.Context, namespace string, name string, imageReplacement string) error { 77 | deployment, err := m.GetDeployment(ctx, namespace, name) 78 | 79 | if err != nil { 80 | return err 81 | } 82 | deploymentCopy := deployment.DeepCopy() 83 | deploymentCopy.Spec.Template.Spec.Containers[0].Image = imageReplacement 84 | if err = m.k8sClient.Patch(ctx, deploymentCopy, client.MergeFrom(deployment)); err != nil { 85 | return err 86 | } 87 | 88 | return nil 89 | } 90 | 91 | func (m *defaultManager) GetDeployment(ctx context.Context, namespace string, name string) (*appsv1.Deployment, error) { 92 | deployment := &appsv1.Deployment{} 93 | namespacedName := types.NamespacedName{Namespace: namespace, Name: name} 94 | err := m.k8sClient.Get(ctx, namespacedName, deployment) 95 | if err != nil { 96 | return nil, err 97 | } 98 | return deployment, nil 99 | } 100 | 101 | func (m *defaultManager) PatchDeployment(ctx context.Context, newDeployment *appsv1.Deployment, oldDeployment *appsv1.Deployment) error { 102 | return m.k8sClient.Patch(ctx, newDeployment, client.MergeFrom(oldDeployment)) 103 | } 104 | -------------------------------------------------------------------------------- /test/framework/resources/k8s/namespace/resource.go: -------------------------------------------------------------------------------- 1 | package namespace 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-network-policy-agent/test/framework/utils" 7 | "k8s.io/apimachinery/pkg/api/errors" 8 | "k8s.io/apimachinery/pkg/util/wait" 9 | 10 | v1 "k8s.io/api/core/v1" 11 | metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | type Manager interface { 16 | CreateNamespace(ctx context.Context, namespace string) error 17 | DeleteAndWaitTillNamespaceDeleted(ctx context.Context, namespace string) error 18 | } 19 | 20 | func NewManager(k8sClient client.Client) Manager { 21 | return &defaultManager{k8sClient: k8sClient} 22 | } 23 | 24 | type defaultManager struct { 25 | k8sClient client.Client 26 | } 27 | 28 | func (m *defaultManager) CreateNamespace(ctx context.Context, namespace string) error { 29 | return m.k8sClient.Create(ctx, &v1.Namespace{ObjectMeta: metaV1.ObjectMeta{Name: namespace}}) 30 | } 31 | 32 | func (m *defaultManager) DeleteAndWaitTillNamespaceDeleted(ctx context.Context, namespace string) error { 33 | 34 | namespaceObj := &v1.Namespace{ObjectMeta: metaV1.ObjectMeta{Name: namespace}} 35 | err := m.k8sClient.Delete(ctx, namespaceObj) 36 | if err != nil { 37 | return client.IgnoreNotFound(err) 38 | } 39 | 40 | observedNamespace := &v1.Namespace{} 41 | return wait.PollUntilContextCancel(ctx, utils.PollIntervalShort, true, func(context.Context) (done bool, err error) { 42 | err = m.k8sClient.Get(ctx, utils.NamespacedName(namespaceObj), observedNamespace) 43 | if errors.IsNotFound(err) { 44 | return true, nil 45 | } 46 | return false, err 47 | }) 48 | } 49 | -------------------------------------------------------------------------------- /test/framework/resources/k8s/networkpolicy/resource.go: -------------------------------------------------------------------------------- 1 | package networkpolicy 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-network-policy-agent/test/framework/utils" 7 | network "k8s.io/api/networking/v1" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/util/wait" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | type Manager interface { 14 | CreateNetworkPolicy(ctx context.Context, networkpolicy *network.NetworkPolicy) error 15 | DeleteNetworkPolicy(ctx context.Context, networkpolicy *network.NetworkPolicy) error 16 | } 17 | 18 | func NewManager(k8sClient client.Client) Manager { 19 | return &defaultManager{k8sClient: k8sClient} 20 | } 21 | 22 | type defaultManager struct { 23 | k8sClient client.Client 24 | } 25 | 26 | func (m *defaultManager) CreateNetworkPolicy(ctx context.Context, networkpolicy *network.NetworkPolicy) error { 27 | return m.k8sClient.Create(ctx, networkpolicy) 28 | } 29 | 30 | func (m *defaultManager) DeleteNetworkPolicy(ctx context.Context, networkpolicy *network.NetworkPolicy) error { 31 | 32 | err := m.k8sClient.Delete(ctx, networkpolicy) 33 | if err != nil { 34 | return client.IgnoreNotFound(err) 35 | } 36 | 37 | netpol := &network.NetworkPolicy{} 38 | return wait.PollUntilContextCancel(ctx, utils.PollIntervalShort, true, func(context.Context) (done bool, err error) { 39 | if err := m.k8sClient.Get(ctx, utils.NamespacedName(networkpolicy), netpol); err != nil { 40 | return false, err 41 | } 42 | if errors.IsNotFound(err) { 43 | return true, nil 44 | } 45 | return false, err 46 | }) 47 | } 48 | -------------------------------------------------------------------------------- /test/framework/resources/k8s/pod/resource.go: -------------------------------------------------------------------------------- 1 | package pod 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io" 8 | "time" 9 | 10 | "github.com/aws/aws-network-policy-agent/test/framework/utils" 11 | v1 "k8s.io/api/core/v1" 12 | "k8s.io/apimachinery/pkg/api/errors" 13 | "k8s.io/apimachinery/pkg/labels" 14 | "k8s.io/apimachinery/pkg/util/wait" 15 | "k8s.io/client-go/kubernetes" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | ) 18 | 19 | type Manager interface { 20 | CreateAndWaitTillPodIsRunning(context context.Context, pod *v1.Pod, timeOut time.Duration) (*v1.Pod, error) 21 | CreateAndWaitTillPodIsCompleted(context context.Context, pod *v1.Pod) (*v1.Pod, error) 22 | DeleteAndWaitTillPodIsDeleted(context context.Context, pod *v1.Pod) error 23 | GetPodsWithLabel(context context.Context, namespace string, labelKey string, labelValue string) ([]v1.Pod, error) 24 | PatchPod(context context.Context, oldPod *v1.Pod, newPod *v1.Pod) error 25 | PodLogs(namespace string, name string) (string, error) 26 | } 27 | 28 | type defaultManager struct { 29 | k8sClient client.Client 30 | k8sClientSet *kubernetes.Clientset 31 | } 32 | 33 | func NewManager(k8sClient client.Client, k8sclientSet *kubernetes.Clientset) Manager { 34 | return &defaultManager{ 35 | k8sClient: k8sClient, 36 | k8sClientSet: k8sclientSet, 37 | } 38 | } 39 | 40 | func (d *defaultManager) CreateAndWaitTillPodIsRunning(ctx context.Context, pod *v1.Pod, timeOut time.Duration) (*v1.Pod, error) { 41 | err := d.k8sClient.Create(ctx, pod) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | updatedPod := &v1.Pod{} 47 | err = wait.PollUntilContextTimeout(ctx, utils.PollIntervalShort, timeOut, true, func(context.Context) (done bool, err error) { 48 | err = d.k8sClient.Get(ctx, utils.NamespacedName(pod), updatedPod) 49 | if err != nil { 50 | return true, err 51 | } 52 | return isPodReady(updatedPod), nil 53 | }) 54 | 55 | return updatedPod, err 56 | } 57 | 58 | func (d *defaultManager) CreateAndWaitTillPodIsCompleted(ctx context.Context, pod *v1.Pod) (*v1.Pod, error) { 59 | err := d.k8sClient.Create(ctx, pod) 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | updatedPod := &v1.Pod{} 65 | err = wait.PollUntilContextCancel(ctx, utils.PollIntervalShort, true, func(context.Context) (done bool, err error) { 66 | err = d.k8sClient.Get(ctx, utils.NamespacedName(pod), updatedPod) 67 | if err != nil { 68 | return true, err 69 | } 70 | if isPodCompleted(updatedPod) { 71 | return true, nil 72 | } 73 | if isPodFailed(updatedPod) { 74 | return true, fmt.Errorf("pod failed to start") 75 | } 76 | return false, nil 77 | }) 78 | 79 | return updatedPod, err 80 | } 81 | 82 | func (d *defaultManager) GetPodsWithLabel(context context.Context, namespace string, 83 | labelKey string, labelValue string) ([]v1.Pod, error) { 84 | 85 | podList := &v1.PodList{} 86 | err := d.k8sClient.List(context, podList, &client.ListOptions{ 87 | LabelSelector: labels.SelectorFromSet(labels.Set{labelKey: labelValue}), 88 | Namespace: namespace, 89 | }) 90 | 91 | return podList.Items, err 92 | } 93 | 94 | func (d *defaultManager) DeleteAndWaitTillPodIsDeleted(ctx context.Context, pod *v1.Pod) error { 95 | if err := d.k8sClient.Delete(ctx, pod); err != nil { 96 | return client.IgnoreNotFound(err) 97 | } 98 | 99 | observedPod := &v1.Pod{} 100 | return wait.PollUntilContextCancel(ctx, utils.PollIntervalShort, true, func(context.Context) (done bool, err error) { 101 | err = d.k8sClient.Get(ctx, client.ObjectKeyFromObject(pod), observedPod) 102 | if errors.IsNotFound(err) { 103 | return true, nil 104 | } 105 | return false, err 106 | }) 107 | } 108 | 109 | func (d *defaultManager) DeleteAllPodsForcefully(context context.Context, 110 | podLabelKey string, podLabelVal string) error { 111 | 112 | podList := &v1.PodList{} 113 | d.k8sClient.List(context, podList, &client.ListOptions{ 114 | LabelSelector: labels.SelectorFromSet(labels.Set{podLabelKey: podLabelVal}), 115 | }) 116 | 117 | if len(podList.Items) == 0 { 118 | return fmt.Errorf("no pods found with label %s:%s", podLabelKey, podLabelVal) 119 | } 120 | 121 | gracePeriod := int64(0) 122 | for _, pod := range podList.Items { 123 | err := d.k8sClient.Delete(context, &pod, &client.DeleteOptions{ 124 | GracePeriodSeconds: &gracePeriod, 125 | }) 126 | if err != nil { 127 | return err 128 | } 129 | } 130 | return nil 131 | } 132 | 133 | func (d *defaultManager) PatchPod(context context.Context, oldPod *v1.Pod, newPod *v1.Pod) error { 134 | return d.k8sClient.Patch(context, newPod, client.MergeFrom(oldPod)) 135 | } 136 | 137 | func isPodReady(pod *v1.Pod) bool { 138 | for _, condition := range pod.Status.Conditions { 139 | if condition.Status == v1.ConditionTrue && condition.Type == v1.PodReady { 140 | return true 141 | } 142 | } 143 | return false 144 | } 145 | 146 | func isPodCompleted(pod *v1.Pod) bool { 147 | return pod.Status.Phase == v1.PodSucceeded 148 | } 149 | 150 | func isPodFailed(pod *v1.Pod) bool { 151 | return pod.Status.Phase == v1.PodFailed 152 | } 153 | 154 | func (d *defaultManager) PodLogs(namespace string, name string) (string, error) { 155 | podLogOpts := v1.PodLogOptions{} 156 | req := d.k8sClientSet.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts) 157 | 158 | podLogs, err := req.Stream(context.Background()) 159 | if err != nil { 160 | return "error in opening stream", err 161 | } 162 | defer podLogs.Close() 163 | 164 | buf := new(bytes.Buffer) 165 | _, err = io.Copy(buf, podLogs) 166 | 167 | if err != nil { 168 | return "error in copy information from podLogs to buf", err 169 | } 170 | return buf.String(), nil 171 | } 172 | -------------------------------------------------------------------------------- /test/framework/resources/k8s/service/resource.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-network-policy-agent/test/framework/utils" 7 | v1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | "k8s.io/apimachinery/pkg/util/wait" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | type Manager interface { 14 | GetService(ctx context.Context, namespace string, name string) (*v1.Service, error) 15 | CreateService(ctx context.Context, service *v1.Service) (*v1.Service, error) 16 | DeleteService(ctx context.Context, service *v1.Service) error 17 | } 18 | 19 | type defaultManager struct { 20 | k8sClient client.Client 21 | } 22 | 23 | func NewManager(k8sClient client.Client) Manager { 24 | return &defaultManager{k8sClient: k8sClient} 25 | } 26 | 27 | func (s *defaultManager) GetService(ctx context.Context, namespace string, 28 | name string) (*v1.Service, error) { 29 | 30 | service := &v1.Service{} 31 | err := s.k8sClient.Get(ctx, types.NamespacedName{ 32 | Namespace: namespace, 33 | Name: name, 34 | }, service) 35 | 36 | return service, err 37 | } 38 | 39 | func (s *defaultManager) CreateService(ctx context.Context, service *v1.Service) (*v1.Service, error) { 40 | err := s.k8sClient.Create(ctx, service) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | observedService := &v1.Service{} 46 | return observedService, wait.PollUntil(utils.PollIntervalShort, func() (bool, error) { 47 | if err := s.k8sClient.Get(ctx, utils.NamespacedName(service), observedService); err != nil { 48 | return false, err 49 | } 50 | return true, nil 51 | }, ctx.Done()) 52 | } 53 | 54 | func (s *defaultManager) DeleteService(ctx context.Context, service *v1.Service) error { 55 | err := s.k8sClient.Delete(ctx, service) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | return nil 61 | } 62 | -------------------------------------------------------------------------------- /test/framework/utils/poll.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "time" 4 | 5 | const ( 6 | PollIntervalShort = 2 * time.Second 7 | ) 8 | -------------------------------------------------------------------------------- /test/framework/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | "k8s.io/apimachinery/pkg/types" 6 | ) 7 | 8 | // NamespacedName returns the namespaced name for k8s objects 9 | func NamespacedName(obj metav1.Object) types.NamespacedName { 10 | return types.NamespacedName{ 11 | Namespace: obj.GetNamespace(), 12 | Name: obj.GetName(), 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /test/integration/policy/default_allow_test.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/aws/aws-network-policy-agent/test/framework/manifest" 9 | . "github.com/onsi/ginkgo/v2" 10 | . "github.com/onsi/gomega" 11 | appsv1 "k8s.io/api/apps/v1" 12 | network "k8s.io/api/networking/v1" 13 | 14 | v1 "k8s.io/api/core/v1" 15 | ) 16 | 17 | var _ = Describe("Default Allow Mode Test Cases", func() { 18 | Context("When a pod is first launched and traffic starts flowing", func() { 19 | var clientPod *v1.Pod 20 | var podName = "clientpod" 21 | var delayAppDeployment *appsv1.Deployment 22 | var delayService *v1.Service 23 | var clientNetworkPolicy *network.NetworkPolicy 24 | 25 | BeforeEach(func() { 26 | By("Creating a delay server and client pod", func() { 27 | delayApp := manifest.NewBusyBoxContainerBuilder(). 28 | ImageRepository(fw.Options.TestImageRegistry). 29 | Command([]string{"/bin/sh", "-c"}). 30 | Args([]string{"while true; do { sleep 60; echo -e 'HTTP/1.1 200 OK\n\nResponse after 60 seconds'; } | nc -l -p 8080; done"}). 31 | Build() 32 | 33 | delayAppDeployment = manifest.NewDefaultDeploymentBuilder(). 34 | Name("delay-app"). 35 | Replicas(1). 36 | Namespace(namespace). 37 | AddLabel("app", "delay-app"). 38 | Container(delayApp). 39 | Build() 40 | 41 | _, err := fw.DeploymentManager.CreateAndWaitUntilDeploymentReady(ctx, delayAppDeployment) 42 | Expect(err).ToNot(HaveOccurred()) 43 | 44 | delayService = manifest.NewHTTPService(). 45 | Name("delay-service"). 46 | Port(8080). 47 | Namespace(namespace). 48 | Selector("app", "delay-app"). 49 | Build() 50 | 51 | _, err = fw.ServiceManager.CreateService(ctx, delayService) 52 | Expect(err).ToNot(HaveOccurred()) 53 | 54 | clientApp := manifest.NewAgnHostContainerBuilder(). 55 | ImageRepository(fw.Options.TestImageRegistry). 56 | Args([]string{"while true; do wget http://delay-service.policy.svc.cluster.local:8080 --spider -T 90; if [ $? == 0 ]; then echo \"Success\"; else echo \"Fail\"; fi; sleep 60; done "}). 57 | Build() 58 | 59 | clientPod = manifest.NewDefaultPodBuilder(). 60 | Container(clientApp). 61 | Namespace(namespace). 62 | AddLabel("app", "client-app"). 63 | Name(podName). 64 | Build() 65 | 66 | _, err = fw.PodManager.CreateAndWaitTillPodIsRunning(ctx, clientPod, 2*time.Minute) 67 | Expect(err).ToNot(HaveOccurred()) 68 | 69 | }) 70 | }) 71 | 72 | It("by default should not have any drops", func() { 73 | By("Verify pod can reach the delay service", func() { 74 | time.Sleep(2 * time.Minute) 75 | logs, err := fw.PodManager.PodLogs(namespace, podName) 76 | Expect(err).ToNot(HaveOccurred()) 77 | err = processDefaultAllowLogs(logs, true, false) 78 | Expect(err).ToNot(HaveOccurred()) 79 | }) 80 | }) 81 | 82 | It("if egress connection is made before network policy is applied the response packet should be accepted", func() { 83 | By("Creating a network policy which only allows ingress from a different client", func() { 84 | 85 | // Sleep so that the first probe is made before block rules are applied on ingress 86 | time.Sleep(10 * time.Second) 87 | 88 | ingressPeer := manifest.NewIngressRuleBuilder(). 89 | AddPeer(nil, map[string]string{"app": "test-app"}, ""). 90 | AddPort(8080, v1.ProtocolTCP). 91 | Build() 92 | 93 | clientNetworkPolicy = manifest.NewNetworkPolicyBuilder(). 94 | Namespace(namespace). 95 | Name("client-egress-policy"). 96 | PodSelector("app", "client-app"). 97 | AddIngressRule(ingressPeer). 98 | Build() 99 | 100 | err := fw.NetworkPolicyManager.CreateNetworkPolicy(ctx, clientNetworkPolicy) 101 | Expect(err).ToNot(HaveOccurred()) 102 | }) 103 | 104 | By("Check if the first response is accepted and following responses from server is denied", func() { 105 | time.Sleep(2 * time.Minute) 106 | logs, err := fw.PodManager.PodLogs(namespace, podName) 107 | Expect(err).ToNot(HaveOccurred()) 108 | err = processDefaultAllowLogs(logs, false, true) 109 | Expect(err).ToNot(HaveOccurred()) 110 | }) 111 | }) 112 | 113 | AfterEach(func() { 114 | fw.PodManager.DeleteAndWaitTillPodIsDeleted(ctx, clientPod) 115 | fw.ServiceManager.DeleteService(ctx, delayService) 116 | fw.DeploymentManager.DeleteAndWaitUntilDeploymentDeleted(ctx, delayAppDeployment) 117 | }) 118 | }) 119 | }) 120 | 121 | func processDefaultAllowLogs(podlogs string, allAllow bool, deniedAfterAllow bool) error { 122 | 123 | passFlag := false 124 | failFlag := false 125 | trafficDeniedAfterAllow := false 126 | for _, log := range strings.Split(strings.TrimSuffix(podlogs, "\n"), "\n") { 127 | if log == "Fail" { 128 | if passFlag { 129 | trafficDeniedAfterAllow = true 130 | } 131 | failFlag = true 132 | } else if log == "Success" { 133 | passFlag = true 134 | if trafficDeniedAfterAllow { 135 | return fmt.Errorf("Failed as traffic was allowed after denied") 136 | } 137 | } 138 | } 139 | 140 | if !passFlag && !failFlag { 141 | return fmt.Errorf("Error generating traffic probes") 142 | } else if allAllow && failFlag { 143 | return fmt.Errorf("Failed as all traffic probes did not get ACCEPT") 144 | } else if deniedAfterAllow && trafficDeniedAfterAllow { 145 | return fmt.Errorf("Failed as traffic probes did not get DENY first and then ACCEPT") 146 | } 147 | 148 | return nil 149 | } 150 | -------------------------------------------------------------------------------- /test/integration/policy/policy_suite_test.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/aws/aws-network-policy-agent/test/framework" 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | var ( 13 | fw *framework.Framework 14 | ctx context.Context 15 | namespace = "policy" 16 | ) 17 | 18 | func TestStrictModeNetworkPolicy(t *testing.T) { 19 | RegisterFailHandler(Fail) 20 | RunSpecs(t, "Network Policy Test Suite") 21 | } 22 | 23 | var _ = BeforeSuite(func() { 24 | fw = framework.New(framework.GlobalOptions) 25 | ctx = context.Background() 26 | 27 | err := fw.NamespaceManager.CreateNamespace(ctx, namespace) 28 | Expect(err).ToNot(HaveOccurred()) 29 | }) 30 | 31 | var _ = AfterSuite(func() { 32 | err := fw.NamespaceManager.DeleteAndWaitTillNamespaceDeleted(ctx, namespace) 33 | Expect(err).ToNot(HaveOccurred()) 34 | }) 35 | -------------------------------------------------------------------------------- /test/integration/policy/policy_test.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/aws/aws-network-policy-agent/test/framework/manifest" 9 | . "github.com/onsi/ginkgo/v2" 10 | . "github.com/onsi/gomega" 11 | v1 "k8s.io/api/core/v1" 12 | network "k8s.io/api/networking/v1" 13 | ) 14 | 15 | var _ = Describe("Network Policy Test Cases", func() { 16 | Context("A network policy is applied against a pod with '.' in its pod name", func() { 17 | 18 | var clientPod *v1.Pod 19 | var clientNetworkPolicy *network.NetworkPolicy 20 | var podName = "this.pod.has.dots.in.its.name" 21 | 22 | BeforeEach(func() { 23 | By("Creating a network policy that denies all egress traffic", func() { 24 | clientNetworkPolicy = manifest.NewNetworkPolicyBuilder(). 25 | Namespace(namespace). 26 | Name("deny-all-egress"). 27 | PodSelector("app", podName). 28 | SetPolicyType(false, true). 29 | Build() 30 | 31 | err := fw.NetworkPolicyManager.CreateNetworkPolicy(ctx, clientNetworkPolicy) 32 | Expect(err).ToNot(HaveOccurred()) 33 | }) 34 | 35 | By("Creating a pod which tries to reach external network", func() { 36 | agnhostContainer := manifest.NewAgnHostContainerBuilder(). 37 | ImageRepository(fw.Options.TestImageRegistry). 38 | Args([]string{"while true; do wget https://www.google.com --spider -T 1; if [ $? == 0 ]; then echo \"Success\"; else echo \"Fail\"; fi; sleep 1s; done"}). 39 | Build() 40 | 41 | clientPod = manifest.NewDefaultPodBuilder(). 42 | Container(agnhostContainer). 43 | Namespace(namespace). 44 | AddLabel("app", podName). 45 | Name(podName). 46 | Build() 47 | 48 | _, err := fw.PodManager.CreateAndWaitTillPodIsRunning(ctx, clientPod, 2*time.Minute) 49 | Expect(err).ToNot(HaveOccurred()) 50 | }) 51 | }) 52 | 53 | It("should be successfully applied and removed from the pod", func() { 54 | By("Verifying that the pod is unable to make an egress connection", func() { 55 | time.Sleep(1 * time.Minute) 56 | logs, err := fw.PodManager.PodLogs(namespace, podName) 57 | Expect(err).ToNot(HaveOccurred()) 58 | err = validateState(logs, []string{"Success", "Fail"}, []string{"Success", "Fail"}) 59 | Expect(err).ToNot(HaveOccurred()) 60 | }) 61 | 62 | By("Removing the network policy", func() { 63 | fw.NetworkPolicyManager.DeleteNetworkPolicy(ctx, clientNetworkPolicy) 64 | }) 65 | 66 | By("Verifying that the pod is once again able to make an egress connection", func() { 67 | time.Sleep(1 * time.Minute) 68 | logs, err := fw.PodManager.PodLogs(namespace, podName) 69 | Expect(err).ToNot(HaveOccurred()) 70 | err = validateState(logs, []string{"Success", "Fail", "Success"}, []string{"Success", "Fail"}) 71 | Expect(err).ToNot(HaveOccurred()) 72 | }) 73 | }) 74 | 75 | AfterEach(func() { 76 | fw.PodManager.DeleteAndWaitTillPodIsDeleted(ctx, clientPod) 77 | }) 78 | 79 | }) 80 | 81 | }) 82 | 83 | func validateState(podlogs string, expectedStates []string, possibleStates []string) error { 84 | // convert states to a "set" so we can filter out non-state log messages 85 | stateSet := make(map[string]bool) 86 | for _, state := range possibleStates { 87 | stateSet[state] = true 88 | } 89 | 90 | stateIt := 0 91 | for _, log := range strings.Split(strings.TrimSuffix(podlogs, "\n"), "\n") { 92 | if stateSet[log] && expectedStates[stateIt] != log { 93 | stateIt++ 94 | if stateIt >= len(expectedStates) { 95 | return fmt.Errorf("Connection changed to %s but we expected it to remain in %s", log, expectedStates[stateIt-1]) 96 | } 97 | } 98 | } 99 | 100 | if stateIt != len(expectedStates)-1 { 101 | return fmt.Errorf("Expected connection to transition through states %v, but observed that it only transitioned through %v", expectedStates, expectedStates[:stateIt+1]) 102 | } 103 | 104 | return nil 105 | } 106 | -------------------------------------------------------------------------------- /test/integration/strict/strict_mode_suite_test.go: -------------------------------------------------------------------------------- 1 | package strict 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/aws/aws-network-policy-agent/test/framework" 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | var ( 13 | fw *framework.Framework 14 | ctx context.Context 15 | namespace = "strict" 16 | ) 17 | 18 | func TestStrictModeNetworkPolicy(t *testing.T) { 19 | RegisterFailHandler(Fail) 20 | RunSpecs(t, "Strict Mode Network Policy Test Suite") 21 | } 22 | 23 | var _ = BeforeSuite(func() { 24 | fw = framework.New(framework.GlobalOptions) 25 | ctx = context.Background() 26 | 27 | err := fw.NamespaceManager.CreateNamespace(ctx, namespace) 28 | Expect(err).ToNot(HaveOccurred()) 29 | }) 30 | 31 | var _ = AfterSuite(func() { 32 | err := fw.NamespaceManager.DeleteAndWaitTillNamespaceDeleted(ctx, namespace) 33 | Expect(err).ToNot(HaveOccurred()) 34 | }) 35 | -------------------------------------------------------------------------------- /test/integration/strict/strict_mode_test.go: -------------------------------------------------------------------------------- 1 | package strict 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/aws/aws-network-policy-agent/test/framework/manifest" 9 | . "github.com/onsi/ginkgo/v2" 10 | . "github.com/onsi/gomega" 11 | appsv1 "k8s.io/api/apps/v1" 12 | v1 "k8s.io/api/core/v1" 13 | network "k8s.io/api/networking/v1" 14 | ) 15 | 16 | var _ = Describe("Strict Mode Test Cases", func() { 17 | Context("when pod is launched", func() { 18 | var clientPod *v1.Pod 19 | var podName = "clientpod" 20 | 21 | BeforeEach(func() { 22 | By("Creating a pod to which tries to reach to external network", func() { 23 | agnhostContainer := manifest.NewAgnHostContainerBuilder(). 24 | ImageRepository(fw.Options.TestImageRegistry). 25 | Args([]string{"while true; do wget https://www.google.com --spider -T 1; if [ $? == 0 ]; then echo \"Success\"; else echo \"Fail\"; fi; sleep 1s; done"}). 26 | Build() 27 | 28 | clientPod = manifest.NewDefaultPodBuilder(). 29 | Container(agnhostContainer). 30 | Namespace(namespace). 31 | Name(podName). 32 | Build() 33 | 34 | _, err := fw.PodManager.CreateAndWaitTillPodIsRunning(ctx, clientPod, 2*time.Minute) 35 | Expect(err).ToNot(HaveOccurred()) 36 | }) 37 | }) 38 | 39 | It("by default should not have connectivity to external network", func() { 40 | By("Verify pod does not have external connectivity", func() { 41 | time.Sleep(1 * time.Minute) 42 | logs, err := fw.PodManager.PodLogs(namespace, podName) 43 | Expect(err).ToNot(HaveOccurred()) 44 | err = processLogs(logs, true, false, false) 45 | Expect(err).ToNot(HaveOccurred()) 46 | }) 47 | }) 48 | 49 | AfterEach(func() { 50 | fw.PodManager.DeleteAndWaitTillPodIsDeleted(ctx, clientPod) 51 | }) 52 | }) 53 | 54 | Context("when a network policy is applied which allows communication between client and server", func() { 55 | var ( 56 | serverPod *v1.Pod 57 | clientDeployment *appsv1.Deployment 58 | serverPodIP string 59 | newPod string 60 | firstPod string 61 | serverName = "serverpod" 62 | clientName = "clientdeploy" 63 | serverNetworkPolicy *network.NetworkPolicy 64 | clientNetworkPolicy *network.NetworkPolicy 65 | ) 66 | 67 | BeforeEach(func() { 68 | 69 | By("Deploying a server pod with allow all ingress network policy", func() { 70 | 71 | ingressPeer := manifest.NewIngressRuleBuilder(). 72 | AddPeer(nil, nil, "0.0.0.0/0"). 73 | AddPeer(nil, nil, "::/0"). 74 | AddPort(8080, v1.ProtocolTCP). 75 | Build() 76 | 77 | serverNetworkPolicy = manifest.NewNetworkPolicyBuilder(). 78 | Namespace(namespace). 79 | Name("server-ingress-policy"). 80 | PodSelector("app", serverName). 81 | AddIngressRule(ingressPeer). 82 | Build() 83 | 84 | err := fw.NetworkPolicyManager.CreateNetworkPolicy(ctx, serverNetworkPolicy) 85 | Expect(err).ToNot(HaveOccurred()) 86 | 87 | serverContainer := manifest.NewAgnHostContainerBuilder(). 88 | ImageRepository(fw.Options.TestImageRegistry). 89 | Args([]string{"/agnhost netexec"}). 90 | AddContainerPort(v1.ContainerPort{ContainerPort: 8080}). 91 | Build() 92 | 93 | serverPod = manifest.NewDefaultPodBuilder(). 94 | Name(serverName). 95 | Namespace(namespace). 96 | AddLabel("app", serverName). 97 | Container(serverContainer). 98 | Build() 99 | 100 | pod, err := fw.PodManager.CreateAndWaitTillPodIsRunning(ctx, serverPod, 1*time.Minute) 101 | Expect(err).ToNot(HaveOccurred()) 102 | serverPodIP = pod.Status.PodIP 103 | }) 104 | 105 | By("Deploying a client deployment and an egress policy to allow communication with server", func() { 106 | 107 | egressPeer := manifest.NewEgressRuleBuilder(). 108 | AddPeer(nil, map[string]string{"app": serverName}, ""). 109 | AddPort(8080, v1.ProtocolTCP). 110 | Build() 111 | 112 | clientNetworkPolicy = manifest.NewNetworkPolicyBuilder(). 113 | Namespace(namespace). 114 | Name("client-egress-policy"). 115 | PodSelector("app", clientName). 116 | AddEgressRule(egressPeer). 117 | Build() 118 | 119 | if fw.Options.IpFamily == "IPv6" { 120 | serverPodIP = fmt.Sprintf("[%s]", serverPodIP) 121 | } 122 | 123 | err := fw.NetworkPolicyManager.CreateNetworkPolicy(ctx, clientNetworkPolicy) 124 | Expect(err).ToNot(HaveOccurred()) 125 | 126 | clientContainer := manifest.NewAgnHostContainerBuilder(). 127 | ImageRepository(fw.Options.TestImageRegistry). 128 | Command([]string{"/bin/sh", "-c"}). 129 | Args([]string{fmt.Sprintf("while true; do wget %s:8080 --spider -T 2; if [ $? == 0 ]; then echo \"Success\"; else echo \"Fail\"; fi; done", serverPodIP)}). 130 | Build() 131 | 132 | clientDeployment = manifest.NewDefaultDeploymentBuilder(). 133 | Name(clientName). 134 | Replicas(1). 135 | Namespace(namespace). 136 | AddLabel("app", clientName). 137 | Container(clientContainer). 138 | Build() 139 | 140 | _, err = fw.DeploymentManager.CreateAndWaitUntilDeploymentReady(ctx, clientDeployment) 141 | Expect(err).ToNot(HaveOccurred()) 142 | }) 143 | }) 144 | 145 | It("Traffic from first replica of client deployment must be denied initially before succeeding but second replica should be instantaneous", func() { 146 | By("Verify traffic probes are denied and then accepted from first replica of client pod", func() { 147 | time.Sleep(30 * time.Second) 148 | podList, err := fw.PodManager.GetPodsWithLabel(ctx, namespace, "app", clientName) 149 | Expect(err).ToNot(HaveOccurred()) 150 | 151 | firstPod = podList[0].Name 152 | podLog, err := fw.PodManager.PodLogs(namespace, firstPod) 153 | Expect(err).ToNot(HaveOccurred()) 154 | 155 | err = processLogs(podLog, false, false, true) 156 | Expect(err).ToNot(HaveOccurred()) 157 | }) 158 | 159 | By("Scaling the client deployment to 2", func() { 160 | err := fw.DeploymentManager.ScaleDeploymentAndWaitTillReady(ctx, namespace, clientName, 2) 161 | Expect(err).ToNot(HaveOccurred()) 162 | 163 | time.Sleep(10 * time.Second) 164 | 165 | podList, err := fw.PodManager.GetPodsWithLabel(ctx, namespace, "app", clientName) 166 | Expect(err).ToNot(HaveOccurred()) 167 | 168 | for _, pod := range podList { 169 | if pod.Name != firstPod { 170 | newPod = pod.Name 171 | break 172 | } 173 | } 174 | Expect(newPod).ToNot(BeEmpty()) 175 | }) 176 | 177 | By("Verify all traffic probes are accepted from second replica of client pod", func() { 178 | time.Sleep(30 * time.Second) 179 | 180 | podLog, err := fw.PodManager.PodLogs(namespace, newPod) 181 | Expect(err).ToNot(HaveOccurred()) 182 | 183 | err = processLogs(podLog, false, true, false) 184 | Expect(err).ToNot(HaveOccurred()) 185 | }) 186 | }) 187 | 188 | AfterEach(func() { 189 | fw.PodManager.DeleteAndWaitTillPodIsDeleted(ctx, serverPod) 190 | fw.DeploymentManager.DeleteAndWaitUntilDeploymentDeleted(ctx, clientDeployment) 191 | fw.NetworkPolicyManager.DeleteNetworkPolicy(ctx, serverNetworkPolicy) 192 | fw.NetworkPolicyManager.DeleteNetworkPolicy(ctx, clientNetworkPolicy) 193 | }) 194 | }) 195 | }) 196 | 197 | func processLogs(podlogs string, allDeny bool, allAllow bool, mix bool) error { 198 | 199 | passFlag := false 200 | failFlag := false 201 | for _, log := range strings.Split(strings.TrimSuffix(podlogs, "\n"), "\n") { 202 | if log == "Fail" { 203 | if passFlag { 204 | return fmt.Errorf("Connection failed after initial success") 205 | } 206 | failFlag = true 207 | } else if log == "Success" { 208 | passFlag = true 209 | } 210 | } 211 | 212 | if !passFlag && !failFlag { 213 | return fmt.Errorf("Error generating traffic probes") 214 | } else if allDeny && passFlag { 215 | return fmt.Errorf("Failed as all traffic probes did not get DENY") 216 | } else if allAllow && failFlag { 217 | return fmt.Errorf("Failed as all traffic probes did not get ACCEPT") 218 | } else if mix && !(passFlag && failFlag) { 219 | return fmt.Errorf("Failed as traffic probes did not get DENY first and then ACCEPT") 220 | } 221 | 222 | return nil 223 | } 224 | --------------------------------------------------------------------------------