├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── enhancement_request.md ├── pull_request_template.md └── workflows │ ├── ci-bash-checks.yaml │ ├── ci-codeql-analysis.yml │ ├── ci-e2e-checks.yaml │ ├── ci-image-build.yaml │ ├── ci-manifest-checks.yaml │ ├── ci-python-checks.yaml │ ├── ci-rego-checks.yaml │ └── ci-std-checks.yaml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── NOTICE.txt ├── README.md ├── SECURITY.md ├── app ├── magtape-init │ ├── Dockerfile │ ├── Pipfile │ ├── Pipfile.lock │ └── magtape-init.py └── magtape │ ├── Dockerfile │ ├── Pipfile │ ├── Pipfile.lock │ ├── config.py │ ├── magtape.py │ └── test │ ├── test_build_response_message.py │ ├── test_routes.py │ └── test_slack.py ├── deploy ├── install.yaml ├── manifests │ ├── kustomization.yaml │ ├── magtape-cluster-rbac.yaml │ ├── magtape-deploy.yaml │ ├── magtape-env-cm.yaml │ ├── magtape-hpa.yaml │ ├── magtape-ns-rbac.yaml │ ├── magtape-ns.yaml │ ├── magtape-opa-cm.yaml │ ├── magtape-opa-entrypoint-cm.yaml │ ├── magtape-pdb.yaml │ ├── magtape-sa.yaml │ ├── magtape-svc.yaml │ └── magtape-vwc.yaml └── overlays │ ├── development │ ├── deployment-patch.yaml │ ├── env-cm-patch.yaml │ ├── hpa-patch.yaml │ └── kustomization.yaml │ └── production │ ├── deployment-patch.yaml │ ├── env-cm-patch.yaml │ └── kustomization.yaml ├── docs ├── architecture.md ├── install.md ├── policies.md └── release.md ├── hack ├── .shellcheck-selection ├── boilerplate │ ├── boilerplate.py │ ├── boilerplate.py.txt │ └── boilerplate.sh.txt ├── build-single-manifest.sh ├── lint-shell.sh ├── patch-ca-bundle.sh ├── run-functional-tests.sh ├── run-python-tests.sh ├── run-rego-lint.sh ├── ssl-cert-gen.sh └── verify-boilerplate.sh ├── images ├── magtape-logo-1.png ├── magtape-logo-2.png ├── magtape-workflow.png ├── slack-alert-deny-screenshot.png └── slack-alert-fail-screenshot.png ├── metrics ├── grafana │ └── in-cluster-dash │ │ ├── MagTape_Cluster_Stats.json │ │ ├── MagTape_Namespace_Stats.json │ │ └── MagTape_Policy_Stats.json └── prometheus │ └── magtape-servicemonitor.yaml ├── policies ├── policy-emptydir-check.rego ├── policy-host-path-check.rego ├── policy-host-port-check.rego ├── policy-liveness-probe-check.rego ├── policy-node-port-range-check.rego ├── policy-pdb-check.rego ├── policy-port-name-mismatch.rego ├── policy-privileged-pod-check.rego ├── policy-readiness-probe-check.rego ├── policy-resource-limits-check.rego ├── policy-resource-requests-check.rego ├── policy-singleton-pod-check.rego └── test │ ├── test_policy-emptydir-check.rego │ ├── test_policy-emptydir-check_mock.json │ ├── test_policy-host-path-check.rego │ ├── test_policy-host-path-check_mock.json │ ├── test_policy-host-port-check.rego │ ├── test_policy-host-port-check_mock.json │ ├── test_policy-liveness-probe-check.rego │ ├── test_policy-liveness-probe-check_mock.json │ ├── test_policy-node-port-range-check.rego │ ├── test_policy-node-port-range-check_mock.json │ ├── test_policy-pdb-check.rego │ ├── test_policy-pdb-check_mock.json │ ├── test_policy-port-name-mismatch-check.rego │ ├── test_policy-port-name-mismatch-check_mock.json │ ├── test_policy-privileged-pod-check.rego │ ├── test_policy-privileged-pod-check_mock.json │ ├── test_policy-readiness-probe-check.rego │ ├── test_policy-readiness-probe-check_mock.json │ ├── test_policy-resource-limits-check.rego │ ├── test_policy-resource-limits-check_mock.json │ ├── test_policy-resource-requests-check.rego │ ├── test_policy-resource-requests-check_mock.json │ ├── test_policy-singleton-pod-check.rego │ └── test_policy-singleton-pod-check_mock.json └── testing ├── README.md ├── deployments ├── test-deploy01-response.json ├── test-deploy01.json ├── test-deploy01.yaml ├── test-deploy02-response.json ├── test-deploy02.json ├── test-deploy02.yaml ├── test-deploy03-response.json ├── test-deploy03.json ├── test-deploy03.yaml ├── test-deploy04-response.json ├── test-deploy04.json ├── test-deploy04.yaml ├── test-deploy05-response.json ├── test-deploy05.json ├── test-deploy05.yaml ├── test-deploy06-response.json ├── test-deploy06.json ├── test-deploy06.yaml ├── test-deploy07-response.json ├── test-deploy07.json ├── test-deploy07.yaml ├── test-deploy08-response.json ├── test-deploy08.json ├── test-deploy08.yaml ├── test-deploy09-response.json ├── test-deploy09.json ├── test-deploy09.yaml ├── test-deploy10-response.json ├── test-deploy10.json ├── test-deploy10.yaml ├── test-deploy11-response.json ├── test-deploy11.json └── test-deploy11.yaml ├── export-env.sh ├── functional-tests.yaml ├── pdbs ├── test-pdb01.yaml ├── test-pdb02.yaml ├── test-pdb03.yaml ├── test-pdb04.yaml ├── test-pdb05.yaml └── test-pdb06.yaml ├── pods ├── test-pod01-response.json ├── test-pod01.json ├── test-pod01.yaml ├── test-pod02-response.json ├── test-pod02.json ├── test-pod02.yaml ├── test-pod03.json ├── test-pod03.yaml ├── test-pod04.json ├── test-pod04.yaml ├── test-pod05.json ├── test-pod05.yaml ├── test-pod06-response.json ├── test-pod06.json └── test-pod06.yaml ├── services ├── test-svc01.json ├── test-svc01.yaml ├── test-svc02-response.json ├── test-svc02.json ├── test-svc02.yaml ├── test-svc03-response.json ├── test-svc03.json ├── test-svc03.yaml ├── test-svc04.json └── test-svc04.yaml ├── slack └── slack-alert-payload.json └── statefulsets └── test-sts1.yaml /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Lines starting with '#' are comments. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # More details are here: https://help.github.com/articles/about-codeowners/ 5 | 6 | # The '*' pattern is global owners. 7 | 8 | # Order is important. The last matching pattern has the most precedence. 9 | # The folders are ordered as follows: 10 | 11 | # In each subsection folders are ordered first by depth, then alphabetically. 12 | # This should make it easy to add new rules without breaking existing ones. 13 | 14 | # Global rule: 15 | * @tmobile/magtape-maintainers 16 | 17 | # Rego Owners 18 | *.rego @phenixblue 19 | 20 | # CI Machinery/Tooling Owners 21 | /hack/ /.github/workflows/ /testing/ @phenixblue @ilrudie 22 | 23 | # Python Owners 24 | *.py @phenixblue 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Report a bug encountered while using MagTape 4 | labels: bug 5 | 6 | --- 7 | 8 | 10 | 11 | **What happened**: 12 | 13 | **What you expected to happen**: 14 | 15 | **How to reproduce it (as minimally and precisely as possible)**: 16 | 17 | **Anything else we need to know?**: 18 | 19 | **Environment**: 20 | 21 | - Kubernetes version (use `kubectl version`): 22 | - Cloud provider or hardware configuration: 23 | - Others: 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancement_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Enhancement Request 3 | about: Suggest an enhancement to the MagTape project 4 | labels: enhancement 5 | --- 6 | 7 | 8 | 9 | **What would you like to be added**: 10 | 11 | **Why is this needed**: 12 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 8 | 9 | **What type of PR is this?** 10 | > Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespace from that line: 11 | > 12 | > /kind bug 13 | > /kind cleanup 14 | > /kind deprecation 15 | > /kind design 16 | > /kind documentation 17 | > /kind feature 18 | > /kind release 19 | 20 | **What this PR does / why we need it**: 21 | 22 | **Which issue(s) this PR fixes**: 23 | 27 | Fixes # 28 | 29 | **Special notes for your reviewer**: 30 | 31 | **Does this PR introduce a user-facing change?**: 32 | 37 | 38 | ```release-note 39 | 40 | ``` 41 | 42 | **Additional documentation e.g., usage docs, etc.**: 43 | 56 | 57 | ```docs 58 | 59 | ``` 60 | -------------------------------------------------------------------------------- /.github/workflows/ci-bash-checks.yaml: -------------------------------------------------------------------------------- 1 | name: bash-checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | push: 8 | branches: 9 | - master 10 | 11 | # Jobs 12 | jobs: 13 | # lint bash code 14 | shellcheck: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - name: Check out repository 19 | uses: actions/checkout@v2 20 | 21 | - name: Install shellcheck 22 | timeout-minutes: 5 23 | run: | 24 | sudo apt-get update 25 | sudo apt-get install shellcheck 26 | 27 | - name: Lint with shellcheck 28 | timeout-minutes: 5 29 | run: | 30 | make lint-shell-ci 31 | -------------------------------------------------------------------------------- /.github/workflows/ci-codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "code-ql" 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | # The branches below must be a subset of the branches above 8 | branches: [master] 9 | schedule: 10 | - cron: '0 23 * * 3' 11 | 12 | jobs: 13 | analyze: 14 | name: Analyze 15 | runs-on: ubuntu-latest 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | # Override automatic language detection by changing the below list 21 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] 22 | language: ['python'] 23 | # Learn more... 24 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 25 | 26 | steps: 27 | - name: Checkout repository 28 | uses: actions/checkout@v2 29 | with: 30 | # We must fetch at least the immediate parents so that if this is 31 | # a pull request then we can checkout the head. 32 | fetch-depth: 2 33 | 34 | # If this run was triggered by a pull request event, then checkout 35 | # the head of the pull request instead of the merge commit. 36 | - run: git checkout HEAD^2 37 | if: ${{ github.event_name == 'pull_request' }} 38 | 39 | # Initializes the CodeQL tools for scanning. 40 | - name: Initialize CodeQL 41 | uses: github/codeql-action/init@v1 42 | with: 43 | languages: ${{ matrix.language }} 44 | # If you wish to specify custom queries, you can do so here or in a config file. 45 | # By default, queries listed here will override any specified in a config file. 46 | # Prefix the list here with "+" to use these queries and those in the config file. 47 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 48 | 49 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 50 | # If this step fails, then you should remove it and run the build manually (see below) 51 | - name: Autobuild 52 | uses: github/codeql-action/autobuild@v1 53 | 54 | # ℹ️ Command-line programs to run using the OS shell. 55 | # 📚 https://git.io/JvXDl 56 | 57 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 58 | # and modify them (or add more) to build your code if your project 59 | # uses a compiled language 60 | 61 | #- run: | 62 | # make bootstrap 63 | # make release 64 | 65 | - name: Perform CodeQL Analysis 66 | uses: github/codeql-action/analyze@v1 67 | -------------------------------------------------------------------------------- /.github/workflows/ci-e2e-checks.yaml: -------------------------------------------------------------------------------- 1 | name: e2e-checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | push: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | 13 | e2e-tests: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | matrix: 17 | k8s-version: 18 | - v1.22 19 | - v1.21 20 | - v1.20 21 | - v1.19 22 | include: 23 | - k8s-version: v1.22 24 | kind-node-image: kindest/node:v1.22.7@sha256:1dfd72d193bf7da64765fd2f2898f78663b9ba366c2aa74be1fd7498a1873166 25 | - k8s-version: v1.21 26 | kind-node-image: kindest/node:v1.21.10@sha256:84709f09756ba4f863769bdcabe5edafc2ada72d3c8c44d6515fc581b66b029c 27 | - k8s-version: v1.20 28 | kind-node-image: kindest/node:v1.20.15@sha256:393bb9096c6c4d723bb17bceb0896407d7db581532d11ea2839c80b28e5d8deb 29 | - k8s-version: v1.19 30 | kind-node-image: kindest/node:v1.19.16@sha256:81f552397c1e6c1f293f967ecb1344d8857613fb978f963c30e907c32f598467 31 | 32 | name: e2e-tests for K8s ${{ matrix.k8s-version }} 33 | 34 | steps: 35 | 36 | - name: Check out the repo 37 | uses: actions/checkout@v2 38 | 39 | # Collect Release SHA Tag is used to to collect information needed later in the action and expose it so it can be referenced 40 | - name: Collect Release SHA Tag 41 | id: prep 42 | run: | 43 | echo ::set-output name=releasetag::sha-${GITHUB_SHA::7} 44 | 45 | # Part of docker/build-push-action@v2; setting up the build system 46 | - name: Set up Docker Buildx 47 | uses: docker/setup-buildx-action@v1 48 | with: 49 | driver: docker-container 50 | driver-opts: image=moby/buildkit:buildx-stable-1 51 | use: true 52 | 53 | - name: Build magtape-init Container Image 54 | timeout-minutes: 10 55 | uses: docker/build-push-action@v2 56 | with: 57 | context: ./app/magtape-init/ 58 | # file should be specified relative to the repo root rather than relative to the context 59 | file: ./app/magtape-init/Dockerfile 60 | # Don't push the image to a registry 61 | push: false 62 | # Load image into local docker default context 63 | outputs: type=docker 64 | # Uses the releasetag output exposed by the Collect Release SHA Tag step to set the tag under v2 65 | tags: tmobile/magtape-init:${{ steps.prep.outputs.releasetag }} 66 | 67 | 68 | - name: Build magtape Container Image 69 | timeout-minutes: 10 70 | uses: docker/build-push-action@v2 71 | with: 72 | context: ./app/magtape/ 73 | # file should be specified relative to the repo root rather than relative to the context 74 | file: ./app/magtape/Dockerfile 75 | # Don't push the image to a registry 76 | push: false 77 | # Load image into local docker default context 78 | outputs: type=docker 79 | # Uses the releasetag output exposed by the Collect Release SHA Tag step to set the tag under v2 80 | tags: tmobile/magtape:${{ steps.prep.outputs.releasetag }} 81 | 82 | - name: Setup KinD Cluster 83 | timeout-minutes: 10 84 | uses: engineerd/setup-kind@v0.5.0 85 | with: 86 | version: "v0.12.0" 87 | image: ${{ matrix.kind-node-image }} 88 | 89 | - name: Install MagTape 90 | timeout-minutes: 10 91 | # kind load docker-image: loads image from docker default context into kind node image cache 92 | run: | 93 | echo "Loading MagTape images to KinD nodes" 94 | GIT_SHA=${{github.sha}} 95 | kind load docker-image tmobile/magtape-init:sha-${GIT_SHA::7} 96 | kind load docker-image tmobile/magtape:sha-${GIT_SHA::7} 97 | echo "Updating target image to use local ref" 98 | sed -i='' -E "s/(MAGTAPE_VERSION := )(.*$)/\1sha-${GIT_SHA::7}/" Makefile 99 | sed -i='' -E 's/(imagePullPolicy:) Always/\1 IfNotPresent/' deploy/manifests/magtape-deploy.yaml 100 | make set-release-version 101 | make build-single-manifest 102 | echo "Install MagTape" 103 | make install 104 | kubectl wait --for=condition=available --timeout=120s deployment/magtape -n magtape-system 105 | 106 | - name: Configure Test Namespace 107 | timeout-minutes: 5 108 | run: | 109 | make ns-create-test 110 | 111 | - name: Install Tools 112 | timeout-minutes: 5 113 | run: | 114 | sudo add-apt-repository ppa:rmescandon/yq 115 | sudo apt update 116 | sudo apt install yq -y 117 | yq --version 118 | 119 | - name: Execute Functional Tests 120 | timeout-minutes: 5 121 | run: | 122 | make test-functional 123 | -------------------------------------------------------------------------------- /.github/workflows/ci-image-build.yaml: -------------------------------------------------------------------------------- 1 | name: image-build 2 | 3 | on: 4 | release: 5 | # Dynamic releasetag tag is set based on the assumption this ci task only runs on release 6 | types: [published] 7 | 8 | # Variables to be used throughout the workflow manifest 9 | env: 10 | TARGET_PLATFORMS: linux/amd64,linux/arm64 11 | 12 | jobs: 13 | 14 | # Build and push magtape-init container image 15 | build-magtape-init-image: 16 | name: Build and push magtape-init images to DockerHub 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | 21 | - name: Check out the repo 22 | uses: actions/checkout@v2 23 | 24 | # Collect Release Tag is used to to collect information needed later in the action and expose it so it can be referenced 25 | - name: Collect Release Tag 26 | id: prep 27 | # GITHUB_REF variable must exist in action; this may rely on {{ on: release: types: [published] }} gating the action 28 | run: | 29 | echo ::set-output name=releasetag::${GITHUB_REF#refs/tags/} 30 | 31 | # Setup QEMU to support multi-arch builds 32 | - name: Set up QEMU 33 | uses: docker/setup-qemu-action@v1 34 | 35 | # Part of docker/build-push-action@v2; setting up the build system 36 | - name: Set up Docker Buildx 37 | uses: docker/setup-buildx-action@v1 38 | 39 | # Part of docker/build-push-action@v2; login to dockerhub 40 | - name: Login to DockerHub 41 | uses: docker/login-action@v1 42 | with: 43 | username: ${{ secrets.DOCKERHUB_USERNAME }} 44 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 45 | 46 | - name: Build and push magtape-init image to DockerHub 47 | if: github.repository == 'tmobile/magtape' 48 | timeout-minutes: 30 49 | uses: docker/build-push-action@v2 50 | with: 51 | context: ./app/magtape-init/ 52 | # file should be specified relative to the repo root rather than relative to the context 53 | file: ./app/magtape-init/Dockerfile 54 | # Defines the target platform architectures images should be built for 55 | platforms: ${{ env.TARGET_PLATFORMS }} 56 | # push is no longer defaulted to true under v2; to push you must specify push is true 57 | push: true 58 | # Uses the releasetag output exposed by the Collect Release Tag step to set the tag under v2 59 | tags: tmobile/magtape-init:${{ steps.prep.outputs.releasetag }},tmobile/magtape-init:latest 60 | 61 | # Build and push magtape container image 62 | build-magtape-image: 63 | name: Build and push magtape images to DockerHub 64 | runs-on: ubuntu-latest 65 | 66 | steps: 67 | 68 | - name: Check out the repo 69 | uses: actions/checkout@v2 70 | 71 | # Collect Release Tag is used to to collect information needed later in the action and expose it so it can be referenced 72 | - name: Collect Release Tag 73 | id: prep 74 | # GITHUB_REF variable must exist in action; this may rely on {{ on: release: types: [published] }} gating the action 75 | run: | 76 | echo ::set-output name=releasetag::${GITHUB_REF#refs/tags/} 77 | 78 | # Setup QEMU to support multi-arch builds 79 | - name: Set up QEMU 80 | uses: docker/setup-qemu-action@v1 81 | 82 | # Part of docker/build-push-action@v2; setting up the build system 83 | - name: Set up Docker Buildx 84 | uses: docker/setup-buildx-action@v1 85 | 86 | # Part of docker/build-push-action@v2; login to dockerhub 87 | - name: Login to DockerHub 88 | uses: docker/login-action@v1 89 | with: 90 | username: ${{ secrets.DOCKERHUB_USERNAME }} 91 | password: ${{ secrets.DOCKERHUB_PASSWORD }} 92 | 93 | - name: Build and push magtape image to DockerHub 94 | if: github.repository == 'tmobile/magtape' 95 | timeout-minutes: 30 96 | uses: docker/build-push-action@v2 97 | with: 98 | context: ./app/magtape/ 99 | # file should be specified relative to the repo root rather than relative to the context 100 | file: ./app/magtape/Dockerfile 101 | # Defines the target platform architectures images should be built for 102 | platforms: ${{ env.TARGET_PLATFORMS }} 103 | # push is no longer defaulted to true under v2; to push you must specify push is true 104 | push: true 105 | # Uses the releasetag output exposed by the Collect Release Tag step to set the tag under v2 106 | tags: tmobile/magtape:${{ steps.prep.outputs.releasetag }},tmobile/magtape:latest 107 | -------------------------------------------------------------------------------- /.github/workflows/ci-manifest-checks.yaml: -------------------------------------------------------------------------------- 1 | name: manifest-checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | # Commenting out paths for now until GH Actions 8 | # Have good suport for required checks + path filters 9 | # https://github.community/t5/GitHub-Actions/Feature-request-conditional-required-checks/m-p/36938#M2735 10 | #paths: 11 | # - 'deploy/**' 12 | push: 13 | branches: 14 | - master 15 | 16 | # Jobs 17 | jobs: 18 | # Job to check for manifest changes 19 | compare-manifests: 20 | runs-on: ubuntu-latest 21 | 22 | steps: 23 | - uses: actions/checkout@v2 24 | 25 | - name: Setup KinD Cluster 26 | timeout-minutes: 5 27 | uses: engineerd/setup-kind@v0.5.0 28 | with: 29 | version: "v0.11.1" 30 | 31 | - name: Check for manifest changes 32 | timeout-minutes: 5 33 | run: | 34 | make ci-bootstrap 35 | make compare-single-manifest -------------------------------------------------------------------------------- /.github/workflows/ci-python-checks.yaml: -------------------------------------------------------------------------------- 1 | name: python-checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | push: 8 | branches: 9 | - master 10 | 11 | # Jobs 12 | jobs: 13 | # Job to lint code 14 | lint: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: 19 | - 3.8 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v1 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | - name: Install dependencies 30 | timeout-minutes: 5 31 | run: | 32 | python -m pip install --upgrade pip 33 | 34 | - name: Lint with Black 35 | timeout-minutes: 5 36 | # Pinning Black at 19.x for now. Have seen flakey results with 20.x 37 | run: | 38 | pip install black==22.3.0 39 | make ci-lint-python 40 | # Job to execute unit tests 41 | unit-test: 42 | needs: lint 43 | runs-on: ubuntu-latest 44 | strategy: 45 | matrix: 46 | python-version: 47 | - 3.8 48 | 49 | steps: 50 | - uses: actions/checkout@v2 51 | 52 | - name: Set up Python ${{ matrix.python-version }} 53 | timeout-minutes: 5 54 | uses: actions/setup-python@v1 55 | with: 56 | python-version: ${{ matrix.python-version }} 57 | 58 | - name: Install Dependencies 59 | timeout-minutes: 5 60 | run: | 61 | python -m pip install --upgrade pip 62 | pip install pipenv 63 | export PIPENV_PIPFILE="app/magtape/Pipfile" 64 | pipenv install --system --deploy 65 | export PIPENV_PIPFILE="app/magtape-init/Pipfile" 66 | pipenv install --system --deploy 67 | 68 | - name: Execute Unit Tests 69 | timeout-minutes: 5 70 | run: | 71 | pip install coverage 72 | make test-python 73 | -------------------------------------------------------------------------------- /.github/workflows/ci-rego-checks.yaml: -------------------------------------------------------------------------------- 1 | name: rego-checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | push: 8 | branches: 9 | - master 10 | 11 | # Jobs 12 | jobs: 13 | # Job to lint code 14 | lint: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | opa-version: 19 | - v0.37.2 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | 24 | - name: Install Dependencies 25 | timeout-minutes: 5 26 | run: | 27 | mkdir -p "${GITHUB_WORKSPACE}/bin" 28 | curl -L -o "${GITHUB_WORKSPACE}/bin/opa" https://openpolicyagent.org/downloads/${{ matrix.opa-version }}/opa_linux_amd64 29 | chmod +x "${GITHUB_WORKSPACE}/bin/opa" 30 | echo "${GITHUB_WORKSPACE}/bin" >> $GITHUB_PATH 31 | 32 | - name: Lint 33 | timeout-minutes: 5 34 | run: | 35 | make ci-lint-rego 36 | 37 | # Job to execute unit tests 38 | unit-test: 39 | needs: lint 40 | runs-on: ubuntu-latest 41 | strategy: 42 | matrix: 43 | opa-version: 44 | - v0.37.2 45 | 46 | steps: 47 | - uses: actions/checkout@v2 48 | 49 | - name: Install Dependencies 50 | timeout-minutes: 5 51 | run: | 52 | mkdir -p "${GITHUB_WORKSPACE}/bin" 53 | curl -L -o "${GITHUB_WORKSPACE}/bin/opa" https://openpolicyagent.org/downloads/${{ matrix.opa-version }}/opa_linux_amd64 54 | chmod +x "${GITHUB_WORKSPACE}/bin/opa" 55 | echo "${GITHUB_WORKSPACE}/bin" >> $GITHUB_PATH 56 | 57 | - name: Execute Unit Tests 58 | timeout-minutes: 5 59 | run: | 60 | make test-rego 61 | -------------------------------------------------------------------------------- /.github/workflows/ci-std-checks.yaml: -------------------------------------------------------------------------------- 1 | name: repo-checks 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | push: 8 | branches: 9 | - master 10 | 11 | # Jobs 12 | jobs: 13 | # Job to lint code 14 | boilerplate: 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | 20 | - name: Check boilerplate 21 | timeout-minutes: 5 22 | run: | 23 | make boilerplate 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/ssl 2 | **/*.bak 3 | **/*.crt 4 | **/*.key 5 | **/*.pem 6 | **/*.tmp* 7 | **/*-patched.yaml 8 | **/.vscode 9 | **/.DS_Store 10 | **/.coverage 11 | **/__pycache__ 12 | **/pythonenv* 13 | **/scratch 14 | **/*slim* 15 | **= 16 | deploy/overlays/test 17 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at opensource@t-mobile.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | This NOTICE.TXT file has been created for MAGTAPE, as code editor, per the terms and 2 | conditions under the Apache license 2.0, dated January 2004. See the license at 3 | http://www.apache.org/licenses for more details. Attributions and additional notices 4 | pertaining to the source code are to be documented here. The original version of 5 | MAGTAPE was created by T-Mobile, USA, Inc. and released on 2020-02-29 -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Currently security updates are supported for the latest MagTape release. We will review backporting security updates to older releases on a case by case basis. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | If you discover a vulnerability in MagTape or any of the project's tooling, please alert us [here](mailto:hello@magtape.io). 10 | 11 | ## Security Vulnerability Response 12 | 13 | Each report will be reviewed and receipt acknowledged within 3 business days. This will set off a security review process. 14 | 15 | Any vulnerability information shared with the security team stays within the MagTape project and will not be shared with others unless it is necessary to fix the issue. Information is shared only on a need to know basis. 16 | 17 | We ask that vulnerability reporter(s) act in good faith by not disclosing the issue to others. And we strive to act in good faith by acting swiftly, and by justly crediting the vulnerability reporter(s) in writing. 18 | 19 | As the security issue moves through triage, identification, and release the reporter of the security vulnerability will be notified. Additional questions about the vulnerability may also be asked of the reporter. 20 | 21 | ## Public Disclosure 22 | 23 | A public disclosure of security vulnerabilities is released alongside release updates or details that fix the vulnerability. We try to fully disclose vulnerabilities once a mitigation strategy is available. Our goal is to perform a release and public disclosure quickly and in a timetable that works well for users. For example, a release may be ready on a Friday but for the sake of users may be delayed to a Monday. 24 | -------------------------------------------------------------------------------- /app/magtape-init/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | LABEL maintainer=joe@twr.io 4 | 5 | COPY ./Pipfile* /app/ 6 | 7 | WORKDIR /app 8 | 9 | RUN apk add --update --no-cache bind-tools ca-certificates gcc musl-dev python3-dev libffi-dev openssl-dev 10 | 11 | RUN pip install pipenv 12 | 13 | RUN pipenv install --system --deploy 14 | 15 | RUN apk del gcc musl-dev python3-dev libffi-dev openssl-dev 16 | 17 | COPY ./magtape-init.py /app/ 18 | 19 | CMD ["python", "magtape-init.py"] 20 | -------------------------------------------------------------------------------- /app/magtape-init/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | cryptography = "*" 10 | kubernetes = "~=23.3.0" 11 | 12 | [requires] 13 | python_version = "3.8" 14 | -------------------------------------------------------------------------------- /app/magtape/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | 3 | LABEL maintainer=joe@twr.io 4 | 5 | COPY ./Pipfile* /app/ 6 | 7 | WORKDIR /app 8 | 9 | RUN apk add --update --no-cache bind-tools ca-certificates 10 | 11 | RUN pip install pipenv 12 | 13 | RUN pipenv install --system --deploy 14 | 15 | COPY ./magtape.py /app/ 16 | COPY ./config.py /app/ 17 | 18 | CMD ["gunicorn", "magtape:app", "--config=config.py"] 19 | 20 | -------------------------------------------------------------------------------- /app/magtape/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | flask = "*" 10 | kubernetes = "~=23.3.0" 11 | prometheus-client = "~=0.14" 12 | prometheus-flask-exporter = "*" 13 | gunicorn = "*" 14 | werkzeug = "*" 15 | click = "~=8.1" 16 | 17 | [requires] 18 | python_version = "3.8" 19 | -------------------------------------------------------------------------------- /app/magtape/config.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 T-Mobile, USA, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 16 | # its contributors may be used to endorse or promote products derived from this 17 | # software without specific prior written permission. 18 | 19 | from os import environ as env 20 | import multiprocessing 21 | 22 | # Gunicorn config 23 | bind = ":5000" 24 | workers = 2 25 | threads = 2 26 | certfile = "/tls/cert.pem" 27 | keyfile = "/tls/key.pem" 28 | -------------------------------------------------------------------------------- /app/magtape/test/test_routes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | 21 | import json 22 | import sys 23 | import unittest 24 | from unittest.mock import patch 25 | 26 | sys.path.append("./app/magtape/") 27 | from magtape import magtape 28 | 29 | 30 | class TestRoutes(unittest.TestCase): 31 | def setUp(self): 32 | 33 | self.app = magtape.app.test_client() 34 | self.app.testing = True 35 | self.k8s_events_enabled = "FALSE" 36 | 37 | def tearDown(self): 38 | 39 | pass 40 | 41 | def test_healthz(self): 42 | 43 | """Method to test webhook /healthz route""" 44 | 45 | result = self.app.get("/healthz") 46 | 47 | self.assertEqual(result.status_code, 200) 48 | self.assertEqual(json.loads(result.data)["health"], "ok") 49 | self.assertEqual(json.loads(result.data)["pod_name"], "magtape-abc1234") 50 | 51 | @patch("magtape.magtape.build_response_message", return_value="") 52 | def test_webhook_all_pass(self, magtape_build_response_message_function): 53 | 54 | """Method to test webhook with all fail response from OPA sidecar""" 55 | 56 | with open("./testing/deployments/test-deploy01.json") as json_file: 57 | 58 | request_object_json = json.load(json_file) 59 | 60 | result = self.app.post( 61 | "/", 62 | data=json.dumps(request_object_json), 63 | headers={"Content-Type": "application/json"}, 64 | ) 65 | 66 | self.assertEqual(result.status_code, 200) 67 | self.assertEqual(json.loads(result.data)["response"]["allowed"], True) 68 | 69 | @patch("magtape.magtape.k8s_events_enabled", "FALSE") 70 | @patch( 71 | "magtape.magtape.build_response_message", 72 | return_value='[FAIL] HIGH - Found privileged Security Context for container "test-deploy02" (MT2001), [FAIL] LOW - Liveness Probe missing for container "test-deploy02" (MT1001), [FAIL] LOW - Readiness Probe missing for container "test-deploy02" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container "test-deploy02" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container "test-deploy02" (MT1004)', 73 | ) 74 | def test_webhook_all_fail(self, build_response_message_function): 75 | 76 | """Method to test webhook with all fail response from OPA sidecar""" 77 | 78 | with open("./testing/deployments/test-deploy02.json") as json_file: 79 | 80 | request_object_json = json.load(json_file) 81 | 82 | result = self.app.post( 83 | "/", 84 | data=json.dumps(request_object_json), 85 | headers={"Content-Type": "application/json"}, 86 | ) 87 | 88 | self.assertEqual(result.status_code, 200) 89 | self.assertEqual(json.loads(result.data)["response"]["allowed"], False) 90 | self.assertEqual( 91 | json.loads(result.data)["response"]["status"]["message"], 92 | '[FAIL] HIGH - Found privileged Security Context for container "test-deploy02" (MT2001), [FAIL] LOW - Liveness Probe missing for container "test-deploy02" (MT1001), [FAIL] LOW - Readiness Probe missing for container "test-deploy02" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container "test-deploy02" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container "test-deploy02" (MT1004)', 93 | ) 94 | 95 | 96 | if __name__ == "__main__": 97 | unittest.main() 98 | -------------------------------------------------------------------------------- /deploy/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - magtape-cluster-rbac.yaml 3 | - magtape-deploy.yaml 4 | - magtape-env-cm.yaml 5 | - magtape-hpa.yaml 6 | - magtape-ns-rbac.yaml 7 | - magtape-ns.yaml 8 | - magtape-opa-cm.yaml 9 | - magtape-opa-entrypoint-cm.yaml 10 | - magtape-pdb.yaml 11 | - magtape-sa.yaml 12 | - magtape-svc.yaml 13 | 14 | configMapGenerator: 15 | - name: magtape-vwc-template 16 | files: 17 | - magtape-vwc=magtape-vwc.yaml 18 | options: 19 | disableNameSuffixHash: true 20 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-cluster-rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: magtape-write 5 | labels: 6 | app: magtape 7 | rules: 8 | - apiGroups: 9 | - admissionregistration.k8s.io 10 | - certificates.k8s.io 11 | - events.k8s.io 12 | - "" 13 | resources: 14 | - validatingwebhookconfigurations 15 | - certificatesigningrequests 16 | - certificatesigningrequests/approval 17 | - certificatesigningrequests/status 18 | - events 19 | - signers 20 | verbs: 21 | - get 22 | - list 23 | - watch 24 | - create 25 | - patch 26 | - update 27 | - delete 28 | - approve 29 | - sign 30 | 31 | --- 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | kind: ClusterRole 34 | metadata: 35 | name: magtape-read 36 | labels: 37 | app: magtape 38 | rules: 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - namespaces 43 | - pods 44 | - secrets 45 | - services 46 | - configmaps 47 | verbs: 48 | - get 49 | - list 50 | - watch 51 | - apiGroups: 52 | - apps 53 | - extensions 54 | resources: 55 | - deployments 56 | - daemonsets 57 | - statefulsets 58 | verbs: 59 | - get 60 | - list 61 | - watch 62 | - apiGroups: 63 | - policy 64 | resources: 65 | - poddisruptionbudgets 66 | verbs: 67 | - get 68 | - list 69 | - watch 70 | 71 | --- 72 | kind: ClusterRoleBinding 73 | apiVersion: rbac.authorization.k8s.io/v1 74 | metadata: 75 | name: magtape-write-crb 76 | labels: 77 | app: magtape 78 | roleRef: 79 | kind: ClusterRole 80 | name: magtape-write 81 | apiGroup: rbac.authorization.k8s.io 82 | subjects: 83 | - kind: ServiceAccount 84 | name: magtape-sa 85 | namespace: magtape-system 86 | 87 | --- 88 | kind: ClusterRoleBinding 89 | apiVersion: rbac.authorization.k8s.io/v1 90 | metadata: 91 | name: magtape-read-crb 92 | labels: 93 | app: magtape 94 | roleRef: 95 | kind: ClusterRole 96 | name: magtape-read 97 | apiGroup: rbac.authorization.k8s.io 98 | subjects: 99 | - kind: ServiceAccount 100 | name: magtape-sa 101 | namespace: magtape-system -------------------------------------------------------------------------------- /deploy/manifests/magtape-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: magtape 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: magtape 13 | template: 14 | metadata: 15 | labels: 16 | app: magtape 17 | spec: 18 | serviceAccountName: magtape-sa 19 | securityContext: 20 | runAsUser: 1900 21 | runAsGroup: 1900 22 | initContainers: 23 | - name: magtape-init 24 | image: tmobile/magtape-init:v2.4.0 25 | command: [/app/magtape-init.py] 26 | imagePullPolicy: Always 27 | securityContext: 28 | allowPrivilegeEscalation: false 29 | env: 30 | - name: MAGTAPE_POD_NAME 31 | valueFrom: 32 | fieldRef: 33 | fieldPath: metadata.name 34 | - name: MAGTAPE_NAMESPACE_NAME 35 | valueFrom: 36 | fieldRef: 37 | fieldPath: metadata.namespace 38 | envFrom: 39 | - configMapRef: 40 | name: magtape-env 41 | volumeMounts: 42 | - name: magtape-tls 43 | mountPath: /tls 44 | - name: magtape-vwc 45 | mountPath: /vwc 46 | containers: 47 | - name: magtape 48 | image: tmobile/magtape:v2.4.0 49 | ports: 50 | - containerPort: 5000 51 | command: ["gunicorn", "magtape:app", "--config=config.py"] 52 | imagePullPolicy: Always 53 | securityContext: 54 | allowPrivilegeEscalation: false 55 | livenessProbe: 56 | httpGet: 57 | scheme: HTTPS 58 | port: 5000 59 | path: /healthz 60 | initialDelaySeconds: 3 61 | periodSeconds: 5 62 | readinessProbe: 63 | httpGet: 64 | scheme: HTTPS 65 | port: 5000 66 | path: /healthz 67 | initialDelaySeconds: 3 68 | periodSeconds: 5 69 | resources: 70 | limits: 71 | cpu: "1" 72 | memory: 1Gi 73 | requests: 74 | cpu: 50m 75 | memory: 128Mi 76 | env: 77 | - name: MAGTAPE_POD_NAME 78 | valueFrom: 79 | fieldRef: 80 | fieldPath: metadata.name 81 | - name: MAGTAPE_NAMESPACE_NAME 82 | valueFrom: 83 | fieldRef: 84 | fieldPath: metadata.namespace 85 | envFrom: 86 | - configMapRef: 87 | name: magtape-env 88 | volumeMounts: 89 | - name: magtape-tls 90 | mountPath: /tls 91 | - name: opa 92 | image: openpolicyagent/opa:0.37.2-static 93 | args: 94 | - "run" 95 | - "--server" 96 | - "--tls-cert-file=/tls/cert.pem" 97 | - "--tls-private-key-file=/tls/key.pem" 98 | - "--addr=0.0.0.0:8443" 99 | - "--addr=http://127.0.0.1:8181" 100 | securityContext: 101 | allowPrivilegeEscalation: false 102 | readinessProbe: 103 | httpGet: 104 | scheme: HTTPS 105 | port: 8443 106 | path: /health 107 | initialDelaySeconds: 3 108 | periodSeconds: 5 109 | livenessProbe: 110 | httpGet: 111 | scheme: HTTPS 112 | port: 8443 113 | path: /health 114 | initialDelaySeconds: 3 115 | periodSeconds: 5 116 | resources: 117 | limits: 118 | cpu: "500m" 119 | memory: 1Gi 120 | requests: 121 | cpu: 50m 122 | memory: 128Mi 123 | volumeMounts: 124 | - name: magtape-tls 125 | mountPath: /tls 126 | - name: kube-mgmt 127 | image: openpolicyagent/kube-mgmt:4.1.1 128 | args: 129 | - "--policies=magtape-system" 130 | - "--require-policy-label=true" 131 | securityContext: 132 | allowPrivilegeEscalation: false 133 | resources: 134 | limits: 135 | cpu: 500m 136 | memory: 1Gi 137 | requests: 138 | cpu: 50m 139 | memory: 128Mi 140 | volumes: 141 | - name: magtape-vwc 142 | configMap: 143 | name: magtape-vwc-template 144 | items: 145 | - key: magtape-vwc 146 | path: magtape-vwc.yaml 147 | - name: magtape-tls 148 | emptyDir: {} 149 | terminationGracePeriodSeconds: 5 150 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-env-cm.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: magtape-env 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | data: 9 | FLASK_ENV: "production" 10 | PYTHONUNBUFFERED: "TRUE" 11 | MAGTAPE_CLUSTER_NAME: "test-cluster" 12 | MAGTAPE_LOG_LEVEL: "INFO" 13 | MAGTAPE_DENY_LEVEL: "LOW" 14 | MAGTAPE_K8S_EVENTS_ENABLED: "TRUE" 15 | MAGTAPE_SLACK_ENABLED: "FALSE" 16 | MAGTAPE_SLACK_PASSIVE: "FALSE" 17 | MAGTAPE_SLACK_WEBHOOK_URL_DEFAULT: "https://hooks.slack.com/services/XXXXXXXX/XXXXXXXX/XXXXXXXXXXXXXXXXXX" 18 | MAGTAPE_SLACK_USER: "mtbot" 19 | MAGTAPE_SLACK_ICON: ":magtape:" 20 | OPA_BASE_URL: "http://127.0.0.1:8181" 21 | OPA_K8S_PATH: "/v0/data/magtape" -------------------------------------------------------------------------------- /deploy/manifests/magtape-hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | labels: 5 | app: magtape 6 | magtape: hpa 7 | name: magtape 8 | namespace: magtape-system 9 | spec: 10 | maxReplicas: 6 11 | minReplicas: 3 12 | scaleTargetRef: 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | name: magtape 16 | targetCPUUtilizationPercentage: 80 -------------------------------------------------------------------------------- /deploy/manifests/magtape-ns-rbac.yaml: -------------------------------------------------------------------------------- 1 | # Define role for OPA/kube-mgmt to update configmaps with policy status. 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: magtape-ops 6 | namespace: magtape-system 7 | labels: 8 | app: magtape 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - secrets 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | - create 19 | - patch 20 | - update 21 | - delete 22 | - apiGroups: 23 | - "" 24 | resources: 25 | - configmaps 26 | verbs: 27 | - get 28 | - list 29 | - watch 30 | - patch 31 | - update 32 | 33 | --- 34 | kind: RoleBinding 35 | apiVersion: rbac.authorization.k8s.io/v1 36 | metadata: 37 | name: magtape-ops-rb 38 | namespace: magtape-system 39 | labels: 40 | app: magtape 41 | roleRef: 42 | kind: Role 43 | name: magtape-ops 44 | apiGroup: rbac.authorization.k8s.io 45 | subjects: 46 | - kind: ServiceAccount 47 | name: magtape-sa -------------------------------------------------------------------------------- /deploy/manifests/magtape-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: magtape-system 5 | labels: 6 | app: magtape 7 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-opa-cm.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: magtape-opa-default-main 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | data: 9 | main: | 10 | package system 11 | 12 | import data.kubernetes.admission 13 | 14 | main = { 15 | "apiVersion": "admission.k8s.io/v1beta1", 16 | "kind": "AdmissionReview", 17 | "response": response, 18 | } 19 | 20 | default response = {"allowed": true} 21 | 22 | response = { 23 | "allowed": false, 24 | "status": { 25 | "reason": reason, 26 | }, 27 | } { 28 | reason = concat(", ", admission.deny) 29 | reason != "" 30 | } -------------------------------------------------------------------------------- /deploy/manifests/magtape-opa-entrypoint-cm.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: magtape-opa-entrypoint 5 | namespace: magtape-system 6 | labels: 7 | app: opa 8 | openpolicyagent.org/policy: rego 9 | 10 | data: 11 | magtape.rego: |- 12 | package magtape 13 | 14 | # This acts as an entrypoint to call all policies under "kubernetes.admission" 15 | 16 | decisions[{"policy": p, "reasons": reasons}] { 17 | 18 | data.kubernetes.admission[p].matches 19 | reasons := data.kubernetes.admission[p].deny 20 | 21 | } 22 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: magtape-pdb 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | spec: 9 | minAvailable: 1 10 | selector: 11 | matchLabels: 12 | app: magtape 13 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: magtape-sa 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: magtape-svc 5 | labels: 6 | app: magtape 7 | namespace: magtape-system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 443 12 | targetPort: 5000 13 | selector: 14 | app: magtape 15 | sessionAffinity: None 16 | type: ClusterIP 17 | -------------------------------------------------------------------------------- /deploy/manifests/magtape-vwc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1 2 | kind: ValidatingWebhookConfiguration 3 | metadata: 4 | name: magtape-webhook 5 | labels: 6 | app: magtape 7 | webhooks: 8 | - name: magtape.webhook.k8s.t-mobile.com 9 | admissionReviewVersions: 10 | - v1 11 | sideEffects: None 12 | clientConfig: 13 | service: 14 | name: magtape-svc 15 | namespace: magtape-system 16 | path: "/" 17 | caBundle: 18 | failurePolicy: Fail 19 | rules: 20 | - operations: 21 | - CREATE 22 | - UPDATE 23 | apiGroups: 24 | - "*" 25 | apiVersions: 26 | - "*" 27 | resources: 28 | - "deployments" 29 | - "statefulsets" 30 | - "daemonsets" 31 | - "pods" 32 | - "poddisruptionbudgets" 33 | namespaceSelector: 34 | matchLabels: 35 | k8s.t-mobile.com/magtape: "enabled" 36 | -------------------------------------------------------------------------------- /deploy/overlays/development/deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: magtape 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | spec: 9 | # Set Replicas to 1 10 | replicas: 1 11 | template: 12 | spec: 13 | containers: 14 | - name: kube-mgmt 15 | image: openpolicyagent/kube-mgmt:0.10 16 | args: 17 | - "--policies=magtape-system" 18 | - "--require-policy-label=true" 19 | # Add Resources to Cache 20 | - "--replicate=apps/v1/deployments" 21 | - "--replicate=v1/services" 22 | - "--replicate-cluster=v1/namespaces" 23 | -------------------------------------------------------------------------------- /deploy/overlays/development/env-cm-patch.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: magtape-env 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | data: 9 | # Different name for the cluster 10 | MAGTAPE_CLUSTER_NAME: "dev-cluster" 11 | # Set logging level to DEBUG 12 | MAGTAPE_LOG_LEVEL: "DEBUG" 13 | # Enable Slack Alerts 14 | MAGTAPE_SLACK_ENABLED: "TRUE" 15 | # Enable Passive Alerts 16 | MAGTAPE_SLACK_PASSIVE: "TRUE" 17 | -------------------------------------------------------------------------------- /deploy/overlays/development/hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: magtape 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | spec: 9 | maxReplicas: 3 10 | minReplicas: 1 11 | -------------------------------------------------------------------------------- /deploy/overlays/development/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../manifests 3 | 4 | patches: 5 | - env-cm-patch.yaml 6 | - deployment-patch.yaml 7 | - hpa-patch.yaml 8 | -------------------------------------------------------------------------------- /deploy/overlays/production/deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: magtape 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-mgmt 13 | image: openpolicyagent/kube-mgmt:0.10 14 | args: 15 | - "--policies=magtape-system" 16 | - "--require-policy-label=true" 17 | # Add Resources to Cache 18 | - "--replicate=apps/v1/deployments" 19 | - "--replicate=v1/services" 20 | - "--replicate-cluster=v1/namespaces" 21 | -------------------------------------------------------------------------------- /deploy/overlays/production/env-cm-patch.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: magtape-env 5 | namespace: magtape-system 6 | labels: 7 | app: magtape 8 | data: 9 | # Different name for the cluster 10 | MAGTAPE_CLUSTER_NAME: "prod-cluster" 11 | # Set DENY_LEVEL to MED 12 | MAGTAPE_DENY_LEVEL: "MED" 13 | # Enable Slack Alerts 14 | MAGTAPE_SLACK_ENABLED: "TRUE" 15 | # Enable Passive Alerts 16 | MAGTAPE_SLACK_PASSIVE: "TRUE" 17 | -------------------------------------------------------------------------------- /deploy/overlays/production/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../../manifests 3 | 4 | patches: 5 | - env-cm-patch.yaml 6 | - deployment-patch.yaml 7 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | # MagTape Architecture 2 | 3 | ![magtape-architecture](/images/magtape-workflow.png) 4 | 5 | MagTape is a workload that contains a single init container and 3 runtime containers. 6 | 7 | ## Init Containers 8 | 9 | - magtape-init 10 | 11 | The MagTape init container takes care of generating the required cert/key pair for TLS and also manages the creation and patching of the Validating Webhook Configuration. The init container will handle rotation of the cert/key as needed if you utilize the default functionality, which leverages the Kubernetes certificates API. 12 | 13 | ## Runtime Containers 14 | 15 | - magtape 16 | - opa 17 | - kube-mgmt 18 | 19 | The MagTape app itself is a Python Flask application that hosts the required endpoints to receive Admission Requests from the Kubernetes API server. When it receives a request is makes a call to OPA (used as a sidecar in this case) to evaluate the request against the defined policies and produce a response. The response is specifically formatted to allow MagTape to assess additional logic and determine if: 20 | 21 | - If the request should be allowed or denied 22 | - If an alert should send 23 | - If a Kubernetes event should be created 24 | - If, and which, metrics should be incremented 25 | - Etc. 26 | 27 | The kube-mgmt container is setup to build a cache of kubernetes resources (as configured) and replicate them to OPA to allow for policies that include context outside of the request object itself. 28 | -------------------------------------------------------------------------------- /docs/release.md: -------------------------------------------------------------------------------- 1 | # Release Process 2 | 3 | ## Overview 4 | 5 | The release process consists of two phases: versioning and publishing. 6 | 7 | Versioning involves maintaining the following files: 8 | 9 | - **CHANGELOG.md** - this file contains a list of all the important changes in each release. 10 | - **Makefile** - the Makefile contains a few `*_VERSION` variables that define the version of a few components in the project. 11 | 12 | The steps below explain how to update these files. In addition, the repository 13 | should be tagged with the semantic version identifying the release. 14 | 15 | Publishing involves creating a new *Release* on GitHub with the relevant 16 | CHANGELOG.md snippet. 17 | 18 | ## Versioning 19 | 20 | 1. Fork and clone the repository. 21 | 22 | ```shell 23 | $ git clone git@github.com:phenixblue/magtape.git 24 | ``` 25 | 26 | 1. Set version variables within Makefile: 27 | 28 | ```makefile 29 | MAGTAPE_VERSION := 1.0.0 30 | OPA_VERSION := 0.16.1 31 | KUBE_MGMT_VERSION := 0.10 32 | ``` 33 | 34 | NOTE: This version info is used to populate the correct versions throughout several files in the repo. 35 | 36 | 1. Set the release version and generate new single install manifest: 37 | 38 | ```shell 39 | $ make set-release-version 40 | $ make build-single-manifest 41 | ``` 42 | 43 | 1. Update the demo install reference in the README (only for stable releases) 44 | 45 | ```shell 46 | $ kubectl apply -f https://raw.githubusercontent.com/tmobile/magtape//deploy/install.yaml 47 | ``` 48 | 49 | NOTE: The tip of the master branch may not always provide an ideal user experience so we should keep this link pointed at a stable release tag to provide a smooth experience for visitors browsing the repo. 50 | 51 | 1. Commit the changes, push to your fork, and open a PR. 52 | 53 | ```shell 54 | $ git commit -a -s -m "Prepare v release" 55 | $ git push 56 | ``` 57 | 58 | NOTE: Verify CI jobs complete successully, have the PR Reviewed, and then Merge the PR 59 | 60 | 1. Tag repository with release version and push tag. 61 | 62 | ``` 63 | $ git tag v 64 | $ git push origin --tags 65 | ``` 66 | 67 | NOTE: This should be done directly on the MagTape repo, not a fork (ie. You must be a Maintainer) 68 | 69 | ## Publishing 70 | 71 | 1. Open browser and go to https://github.com/tmobile/magtape/releases 72 | 73 | 1. Create a new release for the version. 74 | - Copy the changelog content into the message. 75 | 76 | NOTE: You may have to adjust the Markdown Headers (ie. `#`) since the Headers for a specific release in the CHANGELOG.md file are not top level (ie. They start with `##` instead of `#`) 77 | 78 | ## Container Images 79 | 80 | The `tmobile/magtape-init` and `tmobile/magtape` Docker images are automatically built and published to Docker Hub when a release is created. 81 | 82 | Images are published for the following platforms: 83 | 84 | - linux/amd64 85 | - linux/arm64 86 | - linux/ppc64le 87 | 88 | There are no manual steps involved here. 89 | -------------------------------------------------------------------------------- /hack/.shellcheck-selection: -------------------------------------------------------------------------------- 1 | # Select all .sh files 2 | *.sh 3 | 4 | # Exclude the following files under temporary legacy exemption 5 | !hack/build-single-manifest.sh 6 | !hack/patch-ca-bundle.sh 7 | !hack/run-python-tests.sh 8 | !hack/ssl-cert-gen.sh 9 | !testing/export-env.sh 10 | -------------------------------------------------------------------------------- /hack/boilerplate/boilerplate.py.txt: -------------------------------------------------------------------------------- 1 | # Copyright YEAR T-Mobile, USA, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 16 | # its contributors may be used to endorse or promote products derived from this 17 | # software without specific prior written permission. -------------------------------------------------------------------------------- /hack/boilerplate/boilerplate.sh.txt: -------------------------------------------------------------------------------- 1 | # Copyright YEAR T-Mobile, USA, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 16 | # its contributors may be used to endorse or promote products derived from this 17 | # software without specific prior written permission. -------------------------------------------------------------------------------- /hack/lint-shell.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | 21 | ################################################################################ 22 | #### Variables, Arrays, and Hashes ############################################# 23 | ################################################################################ 24 | 25 | RUN_TYPE="${1}" 26 | SELECTION_FILE="hack/.shellcheck-selection" 27 | 28 | ################################################################################ 29 | #### Main ###################################################################### 30 | ################################################################################ 31 | 32 | # check to see if being run for ci, if yes exclude legacy scripts from linting 33 | if [[ "${RUN_TYPE}" == "ci" ]]; then 34 | 35 | files_to_check="$(git ls-files --exclude-from=$SELECTION_FILE --ignored)" 36 | 37 | else 38 | 39 | files_to_check="$(git ls-files --exclude='*.sh' --ignored)" 40 | 41 | fi 42 | 43 | # variable to count up files that did not lint cleanly 44 | files_with_errors=0 45 | 46 | for file in ${files_to_check}; do 47 | 48 | # run shellcheck, if it doesn't exit clean increment the number of files with errors 49 | shellcheck --color=auto "${file}" || (( files_with_errors += 1 )) 50 | 51 | done 52 | 53 | # if any of the files didn't come back clean from shellcheck exit with status 1 54 | if (( files_with_errors > 0 )); then 55 | 56 | exit 1 57 | 58 | fi 59 | -------------------------------------------------------------------------------- /hack/patch-ca-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | # 21 | # This is based on existing work: 22 | # https://github.com/morvencao/kube-mutating-webhook-tutorial/blob/master/deployment/webhook-patch-ca-bundle.sh 23 | 24 | ROOT=$(cd $(dirname $0)/../../; pwd) 25 | 26 | set -o errexit 27 | set -o nounset 28 | set -o pipefail 29 | 30 | export CA_BUNDLE=$(kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}' | base64 | tr -d '\n') 31 | 32 | if command -v envsubst >/dev/null 2>&1; then 33 | envsubst 34 | else 35 | sed -e "s|==CA_BUNDLE==|${CA_BUNDLE}|g" 36 | fi 37 | -------------------------------------------------------------------------------- /hack/run-python-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | 21 | source ./testing/export-env.sh 22 | 23 | # Run tests and get coverage 24 | coverage run -m unittest discover -v -s app/magtape/test/ 25 | 26 | # Generate and output coverage report 27 | coverage report --include app/magtape/magtape.py 28 | -------------------------------------------------------------------------------- /hack/run-rego-lint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | 21 | 22 | 23 | # Check if there are policy files that need to be formatted 24 | unformatted_policies=$(opa fmt -l policies/) 25 | 26 | if [ -z "${unformatted_policies}" ]; then 27 | 28 | echo "Rego files are formatted correctly." 29 | 30 | else 31 | 32 | echo "The following Rego files need to be formatted. Please run \"make lint-rego\"" 33 | echo 34 | echo "${unformatted_policies}" 35 | echo 36 | opa fmt -d policies/ 37 | echo 38 | exit 1 39 | 40 | fi 41 | -------------------------------------------------------------------------------- /hack/ssl-cert-gen.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | # 21 | # This is based on existing work: 22 | # https://github.com/morvencao/kube-mutating-webhook-tutorial/blob/master/deployment/webhook-create-signed-cert.sh 23 | 24 | set -e 25 | 26 | usage() { 27 | cat <> ${tmpdir}/csr.conf 79 | [req] 80 | req_extensions = v3_req 81 | distinguished_name = req_distinguished_name 82 | [req_distinguished_name] 83 | [ v3_req ] 84 | basicConstraints = CA:FALSE 85 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 86 | extendedKeyUsage = serverAuth 87 | subjectAltName = @alt_names 88 | [alt_names] 89 | DNS.1 = ${service} 90 | DNS.2 = ${service}.${namespace} 91 | DNS.3 = ${service}.${namespace}.svc 92 | EOF 93 | 94 | openssl genrsa -out ${tmpdir}/server-key.pem 2048 95 | openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf 96 | 97 | # clean-up any previously created CSR for our service. Ignore errors if not present. 98 | kubectl delete csr ${csrName} 2>/dev/null || true 99 | 100 | # create server cert/key CSR and send to k8s API 101 | cat <&2 136 | exit 1 137 | fi 138 | echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem 139 | 140 | 141 | # create the secret with CA cert and server cert/key 142 | kubectl create secret generic ${secret} \ 143 | --from-file=key.pem=${tmpdir}/server-key.pem \ 144 | --from-file=cert.pem=${tmpdir}/server-cert.pem \ 145 | --dry-run -o yaml | 146 | kubectl -n ${namespace} apply -f - 147 | -------------------------------------------------------------------------------- /hack/verify-boilerplate.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2020 T-Mobile, USA, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 18 | # its contributors may be used to endorse or promote products derived from this 19 | # software without specific prior written permission. 20 | # 21 | # This is based on existing work from the Kubernetes project: 22 | # https://github.com/kubernetes/kubernetes/blob/master/hack/verify-boilerplate.sh 23 | 24 | set -o errexit 25 | set -o nounset 26 | set -o pipefail 27 | 28 | MAGTAPE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. 29 | 30 | boilerDir="${MAGTAPE_ROOT}/hack/boilerplate" 31 | boiler="${boilerDir}/boilerplate.py" 32 | 33 | files_need_boilerplate=() 34 | while IFS=$'\n' read -r line; do 35 | files_need_boilerplate+=( "$line" ) 36 | done < <("${boiler}" "$@") 37 | 38 | # Run boilerplate check 39 | if [[ ${#files_need_boilerplate[@]} -gt 0 ]]; then 40 | for file in "${files_need_boilerplate[@]}"; do 41 | echo "Boilerplate header is wrong for: ${file}" >&2 42 | done 43 | 44 | exit 1 45 | fi 46 | -------------------------------------------------------------------------------- /images/magtape-logo-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmobile/magtape/046017a1d2d6f090e7d74a4f7776375e4c20ed8a/images/magtape-logo-1.png -------------------------------------------------------------------------------- /images/magtape-logo-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmobile/magtape/046017a1d2d6f090e7d74a4f7776375e4c20ed8a/images/magtape-logo-2.png -------------------------------------------------------------------------------- /images/magtape-workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmobile/magtape/046017a1d2d6f090e7d74a4f7776375e4c20ed8a/images/magtape-workflow.png -------------------------------------------------------------------------------- /images/slack-alert-deny-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmobile/magtape/046017a1d2d6f090e7d74a4f7776375e4c20ed8a/images/slack-alert-deny-screenshot.png -------------------------------------------------------------------------------- /images/slack-alert-fail-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmobile/magtape/046017a1d2d6f090e7d74a4f7776375e4c20ed8a/images/slack-alert-fail-screenshot.png -------------------------------------------------------------------------------- /metrics/prometheus/magtape-servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | annotations: 5 | labels: 6 | component: k8s 7 | k8s-app: magtape 8 | name: magtape 9 | namespace: monitoring 10 | spec: 11 | endpoints: 12 | - interval: 30s 13 | port: https 14 | scheme: https 15 | tlsConfig: 16 | insecureSkipVerify: true 17 | namespaceSelector: 18 | matchNames: 19 | - magtape-system 20 | selector: 21 | matchLabels: 22 | app: magtape 23 | -------------------------------------------------------------------------------- /policies/policy-emptydir-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_emptydir 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-emptydir", 6 | "severity": "MED", 7 | "errcode": "MT1009", 8 | "targets": {"Pod"}, 9 | } 10 | 11 | kind = input.request.kind.kind 12 | 13 | sizeLimit = 100 14 | 15 | matches { 16 | # Verify request object type matches targets 17 | policy_metadata.targets[kind] 18 | } 19 | 20 | deny[info] { 21 | # Find volume spec 22 | volumes := input.request.object.spec.volumes 23 | exceed_err_msg := sprintf("is greater than %v Megabytes", [sizeLimit]) 24 | 25 | # Checks emptydir configuration 26 | volume := volumes[_] 27 | name := volume.name 28 | emptydir_state := check_emptydir(volume, exceed_err_msg, sizeLimit) 29 | 30 | # Build message to return 31 | msg := sprintf("[FAIL] %v - Size limit of emptyDir volume \"%v\" %v (%v)", [policy_metadata.severity, name, emptydir_state, policy_metadata.errcode]) 32 | 33 | info := { 34 | "name": policy_metadata.name, 35 | "severity": policy_metadata.severity, 36 | "errcode": policy_metadata.errcode, 37 | "msg": msg, 38 | } 39 | } 40 | 41 | # check_emptydir accepts three values (volume, exceed_err_msg, sizeLimit) 42 | # returns whether there the sizeLimit configuration for emptyDir is present, in megaBytes, and below the sizeLimit set above 43 | check_emptydir(volume, exceed_err_msg, sizeLimit) = "is not set" { 44 | volume.emptyDir 45 | not volume.emptyDir.sizeLimit 46 | } 47 | 48 | else = "is not in Megabytes" { 49 | volume.emptyDir.sizeLimit 50 | not endswith(trim_space(volume.emptyDir.sizeLimit), "M") 51 | } 52 | 53 | else = exceed_err_msg { 54 | volume.emptyDir.sizeLimit 55 | limit := to_number(trim(trim_space(volume.emptyDir.sizeLimit), "M")) 56 | limit > sizeLimit 57 | } 58 | -------------------------------------------------------------------------------- /policies/policy-host-path-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_host_path 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-host-path", 6 | "severity": "MED", 7 | "errcode": "MT1010", 8 | "targets": {"Pod"}, 9 | } 10 | 11 | kind = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[kind] 16 | } 17 | 18 | deny[info] { 19 | # Find volume spec 20 | volumes := input.request.object.spec.volumes 21 | 22 | # Check for hostPath in each volume spec 23 | volume := volumes[_] 24 | name := volume.name 25 | hostpath_state := check_hostpath(volume) 26 | 27 | # Build message to return 28 | msg := sprintf("[FAIL] %v - %v for volume \"%v\" (%v)", [policy_metadata.severity, hostpath_state, name, policy_metadata.errcode]) 29 | 30 | info := { 31 | "name": policy_metadata.name, 32 | "severity": policy_metadata.severity, 33 | "errcode": policy_metadata.errcode, 34 | "msg": msg, 35 | } 36 | } 37 | 38 | # check_hostpath accepts a value (volume) 39 | # returns whether there is a hostPath configured in the volume 40 | check_hostpath(volume) = "hostPath is configured" { 41 | volume.hostPath 42 | } 43 | -------------------------------------------------------------------------------- /policies/policy-host-port-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_hostport 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-hostport", 6 | "severity": "HIGH", 7 | "errcode": "MT1008", 8 | "targets": {"Pod"}, 9 | } 10 | 11 | kind = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[kind] 16 | } 17 | 18 | deny[info] { 19 | # Find container spec 20 | # Since only target is Pod, containers will always be found in same place 21 | containers := input.request.object.spec.containers 22 | 23 | # Check for hostPort in each container spec 24 | container := containers[_] 25 | name := container.name 26 | port_present := check_hostport(container) 27 | 28 | # Build message to return 29 | msg := sprintf("[FAIL] %v - %v for container \"%v\" (%v)", [policy_metadata.severity, port_present, name, policy_metadata.errcode]) 30 | 31 | info := { 32 | "name": policy_metadata.name, 33 | "severity": policy_metadata.severity, 34 | "errcode": policy_metadata.errcode, 35 | "msg": msg, 36 | } 37 | } 38 | 39 | # check_hostport accepts a value (container) 40 | # returns whether the hostPort is found in config 41 | check_hostport(container) = "hostPort is configured" { 42 | ports := container.ports[_] 43 | ports.hostPort 44 | } 45 | -------------------------------------------------------------------------------- /policies/policy-liveness-probe-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_liveness_probe 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-liveness-probe", 6 | "severity": "LOW", 7 | "errcode": "MT1001", 8 | "targets": {"Deployment", "StatefulSet", "DaemonSet", "Pod"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[servicetype] 16 | } 17 | 18 | deny[info] { 19 | # Find container spec 20 | containers := find_containers(servicetype, policy_metadata) 21 | 22 | # Check for livenessProbe in each container spec 23 | container := containers[_] 24 | name := container.name 25 | not container.livenessProbe 26 | 27 | # Build message to return 28 | msg = sprintf("[FAIL] %v - Liveness Probe missing for container \"%v\" (%v)", [policy_metadata.severity, name, policy_metadata.errcode]) 29 | 30 | info := { 31 | "name": policy_metadata.name, 32 | "severity": policy_metadata.severity, 33 | "errcode": policy_metadata.errcode, 34 | "msg": msg, 35 | } 36 | } 37 | 38 | # find_containers accepts a value (k8s object type) and returns the container spec 39 | find_containers(type, metadata) = input.request.object.spec.containers { 40 | type == "Pod" 41 | } 42 | 43 | else = input.request.object.spec.template.spec.containers { 44 | metadata.targets[type] 45 | } 46 | -------------------------------------------------------------------------------- /policies/policy-node-port-range-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_nodeport_range 2 | 3 | import data.kubernetes.namespaces 4 | 5 | policy_metadata = { 6 | # Set MagTape Policy Info 7 | "name": "policy-nodeport-range", 8 | "severity": "MED", 9 | "errcode": "MT2002", 10 | "targets": {"Service"}, 11 | } 12 | 13 | kind = input.request.kind.kind 14 | 15 | svc_type = input.request.object.spec.type 16 | 17 | exception_val = "na" 18 | 19 | matches { 20 | # Verify request object type matches targets 21 | # Verify service is of type NodePort 22 | policy_metadata.targets[kind] 23 | svc_type == "NodePort" 24 | } 25 | 26 | # Generate violation if nodePort Range is not within allocated range 27 | deny[info] { 28 | # ns_name: namespace connected to service trying to be deployed 29 | # ports: where the hostport config is found within the service 30 | # np_range: pull the information connected to the nodeportRange label in the namespace yaml config 31 | ns_name := input.request.namespace 32 | service_name := input.request.object.metadata.name 33 | ports := input.request.object.spec.ports 34 | 35 | port := ports[_] 36 | np := port.nodePort 37 | np_range := data.kubernetes.namespaces[ns_name].metadata.annotations["k8s.t-mobile.com/nodeportRange"] 38 | port_in_range := check_nodeport_range(np, np_range) 39 | 40 | # Build message to return 41 | msg := sprintf("[FAIL] %v - nodePort %v %v for Service \"%v\" (%v)", [policy_metadata.severity, np, port_in_range, service_name, policy_metadata.errcode]) 42 | 43 | info := { 44 | "name": policy_metadata.name, 45 | "severity": policy_metadata.severity, 46 | "errcode": policy_metadata.errcode, 47 | "msg": msg, 48 | } 49 | } 50 | 51 | # Generate violation if annotation contains anything besides #, commas, hyphen, or exception_val 52 | deny[info] { 53 | # ns_name: namespace connected to service trying to be deployed 54 | # ports: where the hostport config is found within the service 55 | # np_range: pull the information connected to the nodeportRange label in the namespace yaml config 56 | ns_name := input.request.namespace 57 | service_name := input.request.object.metadata.name 58 | ports := input.request.object.spec.ports 59 | 60 | port := ports[_] 61 | np_range := data.kubernetes.namespaces[ns_name].metadata.annotations["k8s.t-mobile.com/nodeportRange"] 62 | annotation_valid := check_annotation(np_range, exception_val) 63 | 64 | # Build message to return 65 | msg := sprintf("[FAIL] %v - Invalid data in nodePort annotation in \"%v\" namespace (%v)", [policy_metadata.severity, ns_name, policy_metadata.errcode]) 66 | info := { 67 | "name": policy_metadata.name, 68 | "severity": policy_metadata.severity, 69 | "errcode": policy_metadata.errcode, 70 | "msg": msg, 71 | } 72 | } 73 | 74 | # Check_annotation accepts two values (np, np_range) 75 | # Returns whether the nodeport range contains unknown symbols and is not the exception value 76 | check_annotation(np_range, exception_val) { 77 | not re_match(`^[-, ]*[0-9 ]+(?:-[0-9 ]+)?(,[0-9 ]+(?:-[0-9 ]+)?)*[-, ]*$`, trim_space(np_range)) 78 | lower(trim_space(np_range)) != exception_val 79 | } 80 | 81 | # Check_nodeport_range accepts two values (np, np_range) 82 | # Returns whether the nodeport(np) is within the range(np_range) 83 | check_nodeport_range(np, np_range) = "is out of defined range" { 84 | contains(np_range, "-") 85 | contains(np_range, ",") 86 | re_match(`^[-, ]*[0-9 ]+(?:-[0-9 ]+)?(,[0-9 ]+(?:-[0-9 ]+)?)*[-, ]*$`, trim_space(np_range)) 87 | range_split := split(np_range, ",") 88 | not range_matches_any(np, range_split) 89 | } 90 | 91 | else = "is out of defined range" { 92 | contains(np_range, "-") 93 | not contains(np_range, ",") 94 | re_match(`^[-, ]*[0-9 ]+(?:-[0-9 ]+)?(,[0-9 ]+(?:-[0-9 ]+)?)*[-, ]*$`, trim_space(np_range)) 95 | not range_matches(np, np_range) 96 | } 97 | 98 | else = "is out of defined range" { 99 | contains(np_range, ",") 100 | not contains(np_range, "-") 101 | re_match(`^[-, ]*[0-9 ]+(?:-[0-9 ]+)?(,[0-9 ]+(?:-[0-9 ]+)?)*[-, ]*$`, trim_space(np_range)) 102 | range_split := split(np_range, ",") 103 | not range_matches_any(np, range_split) 104 | } 105 | 106 | else = "is out of defined range" { 107 | not contains(np_range, ",") 108 | not contains(np_range, "-") 109 | re_match(`^\d+$`, trim_space(np_range)) 110 | to_number(trim_space(np_range)) != to_number(np) 111 | } 112 | 113 | range_matches_any(npNum, list) { 114 | range_matches(npNum, list[_]) 115 | } 116 | 117 | # Checks if nodePort is in comma separated list 118 | range_matches(npNum, list) { 119 | not contains(list, "-") 120 | not contains(list, ",") 121 | count(trim_space(list)) > 0 122 | 123 | to_number(trim_space(list)) == to_number(npNum) 124 | } 125 | 126 | # Checks if nodePort is within range 127 | range_matches(npNum, list) { 128 | contains(list, "-") 129 | range_split := split(list, "-") 130 | count(trim_space(range_split[0])) > 0 131 | count(trim_space(range_split[1])) > 0 132 | 133 | to_number(npNum) >= to_number(trim_space(range_split[0])) 134 | to_number(npNum) <= to_number(trim_space(range_split[1])) 135 | } 136 | -------------------------------------------------------------------------------- /policies/policy-pdb-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_pdb 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-pdb", 6 | "severity": "HIGH", 7 | "errcode": "MT1005", 8 | "targets": {"PodDisruptionBudget"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[servicetype] 16 | } 17 | 18 | limits = { 19 | "minAvailable": [0, 66], 20 | "maxUnavailable": [33, 100], 21 | } 22 | 23 | # Generates a violation if the input doesn't specify a percentage (e.g., they used an absolute.) 24 | deny[info] { 25 | # Get limit type 26 | limits[name] 27 | 28 | # Get limit value 29 | value := input.request.object.spec[name] 30 | 31 | # Verify the value is a percentage 32 | [_, false] = get_percentage(value) 33 | 34 | msg := sprintf("[FAIL] %v - Value \"%v\" for \"%v\" should be a Percentage, not an Integer (%v)", [policy_metadata.severity, value, name, policy_metadata.errcode]) 35 | 36 | info := { 37 | "name": policy_metadata.name, 38 | "severity": policy_metadata.severity, 39 | "errcode": policy_metadata.errcode, 40 | "msg": msg, 41 | } 42 | } 43 | 44 | # Generates a violation if the input specifes a percentage out-of-range. 45 | deny[info] { 46 | # Get limit range 47 | range := limits[name] 48 | 49 | # Get the percentage value 50 | [percent, true] = get_percentage(input.request.object.spec[name]) 51 | 52 | # Verify the percentage is within range 53 | not within_range(percent, range) 54 | 55 | msg := sprintf("[FAIL] %v - Value (%v%%) for \"%v\" not within range %v%%-%v%% (%v)", [policy_metadata.severity, percent, name, range[0], range[1], policy_metadata.errcode]) 56 | 57 | info := { 58 | "name": policy_metadata.name, 59 | "severity": policy_metadata.severity, 60 | "errcode": policy_metadata.errcode, 61 | "msg": msg, 62 | } 63 | } 64 | 65 | within_range(x, [_min, _max]) { 66 | x >= _min 67 | x <= _max 68 | } 69 | 70 | # get_percentage accepts a value and generates a tuple containing the 71 | # numeric percentage value and a boolean value indicating whether the 72 | # input value could be converted to a numeric percentage. 73 | # 74 | # Examples: 75 | # 76 | # get_percentage(50) == [0, false] 77 | # get_percentage("50") == [0, false] 78 | # get_percentage("50%") == [50, true] 79 | get_percentage(value) = [0, false] { 80 | not is_string(value) 81 | } 82 | 83 | else = [0, false] { 84 | not contains(value, "%") 85 | } 86 | 87 | else = [percent, true] { 88 | percent := to_number(trim(value, "%")) 89 | } 90 | -------------------------------------------------------------------------------- /policies/policy-port-name-mismatch.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_port_name_mismatch 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-port-name-mismatch", 6 | "severity": "HIGH", 7 | "errcode": "MT1006", 8 | "targets": {"Service"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | svc_name := input.request.object.metadata.name 14 | 15 | matches { 16 | # Verify request object type matches targets 17 | policy_metadata.targets[servicetype] 18 | } 19 | 20 | deny[info] { 21 | # Find service ports 22 | ports := input.request.object.spec.ports 23 | 24 | # Check all port spec's 25 | port := ports[_] 26 | port_name := port.name 27 | port_number := port.port 28 | 29 | # Check for mismatch between logical port name and port number in service spec 30 | port_name == "http" 31 | port_number == 443 32 | 33 | msg = sprintf("[FAIL] %v - Logical port name \"%v\" mismatch with port number \"%v\" for service \"%v\" (%v)", [policy_metadata.severity, port_name, port_number, svc_name, policy_metadata.errcode]) 34 | 35 | info := { 36 | "name": policy_metadata.name, 37 | "severity": policy_metadata.severity, 38 | "errcode": policy_metadata.errcode, 39 | "msg": msg, 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /policies/policy-privileged-pod-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_privileged_pod 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-privileged-pod", 6 | "severity": "HIGH", 7 | "errcode": "MT2001", 8 | "targets": {"Deployment", "StatefulSet", "DaemonSet", "Pod"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[servicetype] 16 | } 17 | 18 | deny[info] { 19 | # Find container spec 20 | containers := find_containers(servicetype, policy_metadata) 21 | 22 | # Check for Privileged SecurityContext in container spec 23 | container := containers[_] 24 | name := container.name 25 | container.securityContext.privileged 26 | 27 | msg = sprintf("[FAIL] %v - Found privileged Security Context for container \"%v\" (%v)", [policy_metadata.severity, name, policy_metadata.errcode]) 28 | 29 | info := { 30 | "name": policy_metadata.name, 31 | "severity": policy_metadata.severity, 32 | "errcode": policy_metadata.errcode, 33 | "msg": msg, 34 | } 35 | } 36 | 37 | # find_containers accepts a value (k8s object type) and returns the container spec 38 | find_containers(type, metadata) = input.request.object.spec.containers { 39 | type == "Pod" 40 | } 41 | 42 | else = input.request.object.spec.template.spec.containers { 43 | metadata.targets[type] 44 | } 45 | -------------------------------------------------------------------------------- /policies/policy-readiness-probe-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_readiness_probe 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-readiness-probe", 6 | "severity": "LOW", 7 | "errcode": "MT1002", 8 | "targets": {"Deployment", "StatefulSet", "DaemonSet", "Pod"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[servicetype] 16 | } 17 | 18 | deny[info] { 19 | # Find container spec 20 | containers := find_containers(servicetype, policy_metadata) 21 | 22 | # Check for ReadinessProbe in each container spec 23 | container := containers[_] 24 | name := container.name 25 | not container.readinessProbe 26 | 27 | # Build message to return 28 | msg = sprintf("[FAIL] %v - Readiness Probe missing for container \"%v\" (%v)", [policy_metadata.severity, name, policy_metadata.errcode]) 29 | 30 | info := { 31 | "name": policy_metadata.name, 32 | "severity": policy_metadata.severity, 33 | "errcode": policy_metadata.errcode, 34 | "msg": msg, 35 | } 36 | } 37 | 38 | # find_containers accepts a value (k8s object type) and returns the container spec 39 | find_containers(type, metadata) = input.request.object.spec.containers { 40 | type == "Pod" 41 | } 42 | 43 | else = input.request.object.spec.template.spec.containers { 44 | metadata.targets[type] 45 | } 46 | -------------------------------------------------------------------------------- /policies/policy-resource-limits-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_resource_limits 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-resource-limits", 6 | "severity": "LOW", 7 | "errcode": "MT1003", 8 | "targets": {"Deployment", "StatefulSet", "DaemonSet", "Pod"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[servicetype] 16 | } 17 | 18 | deny[info] { 19 | # Find container spec 20 | containers := find_containers(servicetype, policy_metadata) 21 | 22 | # Check for livenessProbe in container spec 23 | container := containers[_] 24 | name := container.name 25 | resource_type := get_resource_type(container) 26 | 27 | # Build message to return 28 | msg = sprintf("[FAIL] %v - Resource limits missing (%v) for container \"%v\" (%v)", [policy_metadata.severity, resource_type, name, policy_metadata.errcode]) 29 | 30 | info := { 31 | "name": policy_metadata.name, 32 | "severity": policy_metadata.severity, 33 | "errcode": policy_metadata.errcode, 34 | "msg": msg, 35 | } 36 | } 37 | 38 | # find_containers accepts a value (k8s object type) and returns the container spec 39 | find_containers(type, metadata) = input.request.object.spec.containers { 40 | type == "Pod" 41 | } 42 | 43 | else = input.request.object.spec.template.spec.containers { 44 | metadata.targets[type] 45 | } 46 | 47 | # get_resource_type accepts a value (containers) and returns the missing resource type based on missing limits 48 | get_resource_type(container) = "CPU/MEM" { 49 | not container.resources.limits.cpu 50 | not container.resources.limits.memory 51 | } 52 | 53 | else = "CPU" { 54 | not container.resources.limits.cpu 55 | } 56 | 57 | else = "MEM" { 58 | not container.resources.limits.memory 59 | } 60 | -------------------------------------------------------------------------------- /policies/policy-resource-requests-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_resource_requests 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-resource-requests", 6 | "severity": "LOW", 7 | "errcode": "MT1004", 8 | "targets": {"Deployment", "StatefulSet", "DaemonSet", "Pod"}, 9 | } 10 | 11 | servicetype = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[servicetype] 16 | } 17 | 18 | deny[info] { 19 | # Find container spec 20 | containers := find_containers(servicetype, policy_metadata) 21 | 22 | # Check for livenessProbe in each container spec 23 | container := containers[_] 24 | name := container.name 25 | resource_type := get_resource_type(container) 26 | 27 | # Build message to return 28 | msg := sprintf("[FAIL] %v - Resource requests missing (%v) for container \"%v\" (%v)", [policy_metadata.severity, resource_type, name, policy_metadata.errcode]) 29 | 30 | info := { 31 | "name": policy_metadata.name, 32 | "severity": policy_metadata.severity, 33 | "errcode": policy_metadata.errcode, 34 | "msg": msg, 35 | } 36 | } 37 | 38 | # find_containers accepts a value (k8s object type) and returns the container spec 39 | find_containers(type, metadata) = input.request.object.spec.containers { 40 | type == "Pod" 41 | } 42 | 43 | else = input.request.object.spec.template.spec.containers { 44 | metadata.targets[type] 45 | } 46 | 47 | # get_resource_type accepts a value (containers) and returns the missing resource type based on missing limits 48 | get_resource_type(container) = "CPU/MEM" { 49 | not container.resources.requests.cpu 50 | not container.resources.requests.memory 51 | } 52 | 53 | else = "CPU" { 54 | not container.resources.requests.cpu 55 | } 56 | 57 | else = "MEM" { 58 | not container.resources.requests.memory 59 | } 60 | -------------------------------------------------------------------------------- /policies/policy-singleton-pod-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_singleton_pod_check 2 | 3 | policy_metadata = { 4 | # Set MagTape Policy Info 5 | "name": "policy-singleton-pod-check", 6 | "severity": "LOW", 7 | "errcode": "MT1007", 8 | "targets": {"Pod"}, 9 | } 10 | 11 | kind = input.request.kind.kind 12 | 13 | matches { 14 | # Verify request object type matches targets 15 | policy_metadata.targets[kind] 16 | } 17 | 18 | deny[info] { 19 | name := input.request.object.metadata.name 20 | 21 | # Check for ownerReferences, will only be present if something is dependent on the Pod 22 | not input.request.object.metadata.ownerReferences 23 | 24 | # Build message to return 25 | msg := sprintf("[FAIL] %v - \"%v\" is a singleton pod. (%v)", [policy_metadata.severity, name, policy_metadata.errcode]) 26 | 27 | info := { 28 | "name": policy_metadata.name, 29 | "severity": policy_metadata.severity, 30 | "errcode": policy_metadata.errcode, 31 | "msg": msg, 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /policies/test/test_policy-emptydir-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_emptydir 2 | 3 | test_emptydir_allowed { 4 | result = deny with input as data.mock.test_emptydir_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_emptydir_large_denied { 9 | result = deny[_] with input as data.mock.test_emptydir_large_denied 10 | result == { 11 | "errcode": "MT1009", 12 | "msg": "[FAIL] MED - Size limit of emptyDir volume \"default-token\" is greater than 100 Megabytes (MT1009)", 13 | "name": "policy-emptydir", 14 | "severity": "MED", 15 | } 16 | } 17 | 18 | test_emptydir_wrong_ending_denied { 19 | result = deny[_] with input as data.mock.test_emptydir_wrong_ending_denied 20 | result == { 21 | "errcode": "MT1009", 22 | "msg": "[FAIL] MED - Size limit of emptyDir volume \"default-token\" is not in Megabytes (MT1009)", 23 | "name": "policy-emptydir", 24 | "severity": "MED", 25 | } 26 | } 27 | 28 | test_emptydir_not_set_denied { 29 | result = deny[_] with input as data.mock.test_emptydir_not_set_denied 30 | result == { 31 | "errcode": "MT1009", 32 | "msg": "[FAIL] MED - Size limit of emptyDir volume \"default-token\" is not set (MT1009)", 33 | "name": "policy-emptydir", 34 | "severity": "MED", 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /policies/test/test_policy-host-path-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_host_path 2 | 3 | test_host_path_allowed { 4 | result = deny with input as data.mock.test_host_path_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_host_path_denied { 9 | result = deny[_] with input as data.mock.test_host_path_denied 10 | result = { 11 | "errcode": "MT1010", 12 | "msg": "[FAIL] MED - hostPath is configured for volume \"default-token\" (MT1010)", 13 | "name": "policy-host-path", 14 | "severity": "MED", 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /policies/test/test_policy-host-path-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_host_path_allowed": { 4 | "apiVersion": "admission.k8s.io/v1beta1", 5 | "kind": "AdmissionReview", 6 | "request": { 7 | "dryRun": false, 8 | "kind": { 9 | "group": "", 10 | "kind": "Pod", 11 | "version": "v1" 12 | }, 13 | "namespace": "test1", 14 | "object": { 15 | "apiVersion": "v1", 16 | "kind": "Pod", 17 | "metadata": { 18 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\",\"ports\":[{\"containerPort\":8080,\"hostPort\":8080}]}],\"volumes\":[{\"hostPath\":{\"path\":\"/data\"},\"name\":\"default-token\"}]}}\n"}, 19 | "creationTimestamp": "2020-02-25T19:23:08Z", 20 | "labels": {"run": "toolbox"}, 21 | "name": "toolbox", 22 | "namespace": "test1", 23 | "uid": "413e9d97-5804-11ea-b876-005056a7db08" 24 | }, 25 | "spec": { 26 | "containers": [{ 27 | "command": [ 28 | "sleep", 29 | "360000" 30 | ], 31 | "image": "jmsearcy/twrtools", 32 | "imagePullPolicy": "Always", 33 | "name": "toolbox", 34 | "resources": {}, 35 | "terminationMessagePath": "/dev/termination-log", 36 | "terminationMessagePolicy": "File", 37 | "volumeMounts": [{ 38 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 39 | "name": "default-token-q999w", 40 | "readOnly": true 41 | }] 42 | }], 43 | "volumes": [{ 44 | "name": "default-token-q999w", 45 | "secret": {"secretName": "default-token-q999w"} 46 | }] 47 | }, 48 | "status": { 49 | "phase": "Pending", 50 | "qosClass": "BestEffort" 51 | } 52 | }, 53 | "oldObject": null, 54 | "operation": "CREATE", 55 | "resource": { 56 | "group": "", 57 | "resource": "pods", 58 | "version": "v1" 59 | }, 60 | "uid": "413ea31f-5804-11ea-b876-005056a7db08", 61 | "userInfo": { 62 | "groups": ["group1"], 63 | "username": "user1" 64 | } 65 | } 66 | }, 67 | "test_host_path_denied": { 68 | "apiVersion": "admission.k8s.io/v1beta1", 69 | "kind": "AdmissionReview", 70 | "request": { 71 | "dryRun": false, 72 | "kind": { 73 | "group": "", 74 | "kind": "Pod", 75 | "version": "v1" 76 | }, 77 | "namespace": "test1", 78 | "object": { 79 | "apiVersion": "v1", 80 | "kind": "Pod", 81 | "metadata": { 82 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\",\"ports\":[{\"containerPort\":8080,\"hostPort\":8080}]}],\"volumes\":[{\"hostPath\":{\"path\":\"/data\"},\"name\":\"default-token\"}]}}\n"}, 83 | "creationTimestamp": "2020-02-25T19:23:08Z", 84 | "labels": {"run": "toolbox"}, 85 | "name": "toolbox", 86 | "namespace": "test1", 87 | "uid": "413e9d97-5804-11ea-b876-005056a7db08" 88 | }, 89 | "spec": { 90 | "containers": [{ 91 | "command": [ 92 | "sleep", 93 | "360000" 94 | ], 95 | "image": "jmsearcy/twrtools", 96 | "imagePullPolicy": "Always", 97 | "name": "toolbox", 98 | "resources": {}, 99 | "terminationMessagePath": "/dev/termination-log", 100 | "terminationMessagePolicy": "File", 101 | "volumeMounts": [{ 102 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 103 | "name": "default-token-q999w", 104 | "readOnly": true 105 | }] 106 | }], 107 | "volumes": [ 108 | { 109 | "hostPath": { 110 | "path": "/data", 111 | "type": "" 112 | }, 113 | "name": "default-token" 114 | }, 115 | { 116 | "name": "default-token-q999w", 117 | "secret": {"secretName": "default-token-q999w"} 118 | } 119 | ] 120 | }, 121 | "status": { 122 | "phase": "Pending", 123 | "qosClass": "BestEffort" 124 | } 125 | }, 126 | "oldObject": null, 127 | "operation": "CREATE", 128 | "resource": { 129 | "group": "", 130 | "resource": "pods", 131 | "version": "v1" 132 | }, 133 | "uid": "413ea31f-5804-11ea-b876-005056a7db08", 134 | "userInfo": { 135 | "groups": ["group1"], 136 | "username": "user1" 137 | } 138 | } 139 | } 140 | } 141 | } -------------------------------------------------------------------------------- /policies/test/test_policy-host-port-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_hostport 2 | 3 | test_host_port_allowed { 4 | result = deny with input as data.mock.test_host_port_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_host_port_denied { 9 | result = deny[_] with input as data.mock.test_host_port_denied 10 | result == { 11 | "errcode": "MT1008", 12 | "msg": "[FAIL] HIGH - hostPort is configured for container \"toolbox\" (MT1008)", 13 | "name": "policy-hostport", 14 | "severity": "HIGH", 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /policies/test/test_policy-host-port-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_host_port_allowed": { 4 | "apiVersion": "admission.k8s.io/v1beta1", 5 | "kind": "AdmissionReview", 6 | "request": { 7 | "dryRun": false, 8 | "kind": { 9 | "group": "", 10 | "kind": "Pod", 11 | "version": "v1" 12 | }, 13 | "namespace": "test1", 14 | "object": { 15 | "apiVersion": "v1", 16 | "kind": "Pod", 17 | "metadata": { 18 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\",\"ports\":[{\"containerPort\":8080,\"hostPort\":8080}]}],\"volumes\":[{\"hostPath\":{\"path\":\"/data\"},\"name\":\"default-token\"}]}}\n"}, 19 | "creationTimestamp": "2020-02-25T19:23:08Z", 20 | "labels": {"run": "toolbox"}, 21 | "name": "toolbox", 22 | "namespace": "test1", 23 | "uid": "413e9d97-5804-11ea-b876-005056a7db08" 24 | }, 25 | "spec": { 26 | "containers": [{ 27 | "command": [ 28 | "sleep", 29 | "360000" 30 | ], 31 | "image": "jmsearcy/twrtools", 32 | "imagePullPolicy": "Always", 33 | "name": "toolbox", 34 | "ports": [{ 35 | "containerPort": 8080, 36 | "protocol": "TCP" 37 | }], 38 | "resources": {}, 39 | "terminationMessagePath": "/dev/termination-log", 40 | "terminationMessagePolicy": "File", 41 | "volumeMounts": [{ 42 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 43 | "name": "default-token-q999w", 44 | "readOnly": true 45 | }] 46 | }], 47 | "volumes": [{ 48 | "name": "default-token-q999w", 49 | "secret": {"secretName": "default-token-q999w"} 50 | }] 51 | }, 52 | "status": { 53 | "phase": "Pending", 54 | "qosClass": "BestEffort" 55 | } 56 | }, 57 | "oldObject": null, 58 | "operation": "CREATE", 59 | "resource": { 60 | "group": "", 61 | "resource": "pods", 62 | "version": "v1" 63 | }, 64 | "uid": "413ea31f-5804-11ea-b876-005056a7db08", 65 | "userInfo": { 66 | "groups": ["group1"], 67 | "username": "user1" 68 | } 69 | } 70 | }, 71 | "test_host_port_denied": { 72 | "apiVersion": "admission.k8s.io/v1beta1", 73 | "kind": "AdmissionReview", 74 | "request": { 75 | "dryRun": false, 76 | "kind": { 77 | "group": "", 78 | "kind": "Pod", 79 | "version": "v1" 80 | }, 81 | "namespace": "test1", 82 | "object": { 83 | "apiVersion": "v1", 84 | "kind": "Pod", 85 | "metadata": { 86 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\",\"ports\":[{\"containerPort\":8080,\"hostPort\":8080}]}],\"volumes\":[{\"hostPath\":{\"path\":\"/data\"},\"name\":\"default-token\"}]}}\n"}, 87 | "creationTimestamp": "2020-02-25T19:23:08Z", 88 | "labels": {"run": "toolbox"}, 89 | "name": "toolbox", 90 | "namespace": "test1", 91 | "uid": "413e9d97-5804-11ea-b876-005056a7db08" 92 | }, 93 | "spec": { 94 | "containers": [{ 95 | "command": [ 96 | "sleep", 97 | "360000" 98 | ], 99 | "image": "jmsearcy/twrtools", 100 | "imagePullPolicy": "Always", 101 | "name": "toolbox", 102 | "ports": [{ 103 | "containerPort": 8080, 104 | "hostPort": 8080, 105 | "protocol": "TCP" 106 | }], 107 | "resources": {}, 108 | "terminationMessagePath": "/dev/termination-log", 109 | "terminationMessagePolicy": "File", 110 | "volumeMounts": [{ 111 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 112 | "name": "default-token-q999w", 113 | "readOnly": true 114 | }] 115 | }], 116 | "volumes": [ 117 | { 118 | "hostPath": { 119 | "path": "/data", 120 | "type": "" 121 | }, 122 | "name": "default-token" 123 | }, 124 | { 125 | "name": "default-token-q999w", 126 | "secret": {"secretName": "default-token-q999w"} 127 | } 128 | ] 129 | }, 130 | "status": { 131 | "phase": "Pending", 132 | "qosClass": "BestEffort" 133 | } 134 | }, 135 | "oldObject": null, 136 | "operation": "CREATE", 137 | "resource": { 138 | "group": "", 139 | "resource": "pods", 140 | "version": "v1" 141 | }, 142 | "uid": "413ea31f-5804-11ea-b876-005056a7db08", 143 | "userInfo": { 144 | "groups": ["group1"], 145 | "username": "user1" 146 | } 147 | } 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /policies/test/test_policy-liveness-probe-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_liveness_probe 2 | 3 | test_liveness_probe_allowed { 4 | result = deny with input as data.mock.liveness_probe_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_liveness_probe_denied { 9 | result = deny[_] with input as data.mock.liveness_probe_denied 10 | result == { 11 | "errcode": "MT1001", 12 | "msg": "[FAIL] LOW - Liveness Probe missing for container \"test-deploy01\" (MT1001)", 13 | "name": "policy-liveness-probe", 14 | "severity": "LOW", 15 | } 16 | } 17 | 18 | test_unmatched_resource { 19 | result = deny with input as data.mock.unmatched_resource 20 | count(result) == 0 21 | } 22 | -------------------------------------------------------------------------------- /policies/test/test_policy-node-port-range-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_nodeport_range 2 | 3 | test_np_single_allowed { 4 | ns = data.mock.test_np_namespace_single_30100 5 | result = deny with input as data.mock.test_np_single_30100 with data.kubernetes.namespaces.test1 as ns 6 | count(result) == 0 7 | } 8 | 9 | test_np_range_comma_allowed { 10 | ns = data.mock.test_np_namespace_range_comma 11 | result = deny with input as data.mock.test_np_single_30100 with data.kubernetes.namespaces.test1 as ns 12 | count(result) == 0 13 | } 14 | 15 | test_np_range_dash_allowed { 16 | ns = data.mock.test_np_namespace_range_dash 17 | result = deny with input as data.mock.test_np_single_30150 with data.kubernetes.namespaces.test1 as ns 18 | count(result) == 0 19 | } 20 | 21 | test_np_range_mixed_allowed { 22 | ns = data.mock.test_np_namespace_range_dash_and_comma 23 | result = deny with input as data.mock.test_np_single_30187 with data.kubernetes.namespaces.test1 as ns 24 | count(result) == 0 25 | } 26 | 27 | test_np_range_exempt_allowed { 28 | ns = data.mock.test_np_namespace_exempt 29 | result = deny with input as data.mock.test_np_single_30187 with data.kubernetes.namespaces.test1 as ns 30 | count(result) == 0 31 | } 32 | 33 | test_np_single_denied { 34 | ns = data.mock.test_np_namespace_single_30100 35 | result = deny[_] with input as data.mock.test_np_single_30101 with data.kubernetes.namespaces.test1 as ns 36 | result == { 37 | "errcode": "MT2002", 38 | "msg": "[FAIL] MED - nodePort 30101 is out of defined range for Service \"test-svc\" (MT2002)", 39 | "name": "policy-nodeport-range", 40 | "severity": "MED", 41 | } 42 | } 43 | 44 | test_np_range_comma_denied { 45 | ns = data.mock.test_np_namespace_range_comma 46 | result = deny[_] with input as data.mock.test_np_single_30101 with data.kubernetes.namespaces.test1 as ns 47 | result == { 48 | "errcode": "MT2002", 49 | "msg": "[FAIL] MED - nodePort 30101 is out of defined range for Service \"test-svc\" (MT2002)", 50 | "name": "policy-nodeport-range", 51 | "severity": "MED", 52 | } 53 | } 54 | 55 | test_np_range_dash_denied { 56 | ns = data.mock.test_np_namespace_range_dash 57 | result = deny[_] with input as data.mock.test_np_single_30075 with data.kubernetes.namespaces.test1 as ns 58 | result == { 59 | "errcode": "MT2002", 60 | "msg": "[FAIL] MED - nodePort 30075 is out of defined range for Service \"test-svc\" (MT2002)", 61 | "name": "policy-nodeport-range", 62 | "severity": "MED", 63 | } 64 | } 65 | 66 | test_np_range_mixed_denied { 67 | ns = data.mock.test_np_namespace_range_dash_and_comma 68 | result = deny[_] with input as data.mock.test_np_single_30075 with data.kubernetes.namespaces.test1 as ns 69 | result == { 70 | "errcode": "MT2002", 71 | "msg": "[FAIL] MED - nodePort 30075 is out of defined range for Service \"test-svc\" (MT2002)", 72 | "name": "policy-nodeport-range", 73 | "severity": "MED", 74 | } 75 | } 76 | 77 | test_np_range_extra_dash_denied { 78 | ns = data.mock.test_np_namespace_range_extra_dash 79 | result = deny[_] with input as data.mock.test_np_single_30075 with data.kubernetes.namespaces.test1 as ns 80 | result == { 81 | "errcode": "MT2002", 82 | "msg": "[FAIL] MED - nodePort 30075 is out of defined range for Service \"test-svc\" (MT2002)", 83 | "name": "policy-nodeport-range", 84 | "severity": "MED", 85 | } 86 | } 87 | 88 | test_np_range_alpha_chars_denied { 89 | ns = data.mock.test_np_namespace_range_alpha_chars 90 | result = deny[_] with input as data.mock.test_np_single_30187 with data.kubernetes.namespaces.test1 as ns 91 | result == { 92 | "errcode": "MT2002", 93 | "msg": "[FAIL] MED - Invalid data in nodePort annotation in \"test1\" namespace (MT2002)", 94 | "name": "policy-nodeport-range", 95 | "severity": "MED", 96 | } 97 | } 98 | 99 | test_np_range_special_chars_denied { 100 | ns = data.mock.test_np_namespace_range_special_chars 101 | result = deny[_] with input as data.mock.test_np_single_30187 with data.kubernetes.namespaces.test1 as ns 102 | result == { 103 | "errcode": "MT2002", 104 | "msg": "[FAIL] MED - Invalid data in nodePort annotation in \"test1\" namespace (MT2002)", 105 | "name": "policy-nodeport-range", 106 | "severity": "MED", 107 | } 108 | } 109 | 110 | test_np_range_empty_denied { 111 | ns = data.mock.test_np_namespace_empty 112 | result = deny[_] with input as data.mock.test_np_single_30187 with data.kubernetes.namespaces.test1 as ns 113 | result == { 114 | "errcode": "MT2002", 115 | "msg": "[FAIL] MED - Invalid data in nodePort annotation in \"test1\" namespace (MT2002)", 116 | "name": "policy-nodeport-range", 117 | "severity": "MED", 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /policies/test/test_policy-pdb-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_pdb 2 | 3 | test_pdb_allowed_min_high { 4 | result = deny with input as data.mock.test_pdb_allowed_min_high 5 | count(result) == 0 6 | } 7 | 8 | test_pdb_allowed_min_low { 9 | result = deny with input as data.mock.test_pdb_allowed_min_low 10 | count(result) == 0 11 | } 12 | 13 | test_pdb_denied_min_percent { 14 | result = deny[_] with input as data.mock.test_pdb_denied_min_percent 15 | result == { 16 | "errcode": "MT1005", 17 | "msg": "[FAIL] HIGH - Value (67%) for \"minAvailable\" not within range 0%-66% (MT1005)", 18 | "name": "policy-pdb", 19 | "severity": "HIGH", 20 | } 21 | } 22 | 23 | test_pdb_denied_min_int { 24 | result = deny[_] with input as data.mock.test_pdb_denied_min_int 25 | result == { 26 | "errcode": "MT1005", 27 | "msg": "[FAIL] HIGH - Value \"10\" for \"minAvailable\" should be a Percentage, not an Integer (MT1005)", 28 | "name": "policy-pdb", 29 | "severity": "HIGH", 30 | } 31 | } 32 | 33 | test_pdb_denied_max_percent { 34 | result = deny[_] with input as data.mock.test_pdb_denied_max_percent 35 | result == { 36 | "errcode": "MT1005", 37 | "msg": "[FAIL] HIGH - Value (32%) for \"maxUnavailable\" not within range 33%-100% (MT1005)", 38 | "name": "policy-pdb", 39 | "severity": "HIGH", 40 | } 41 | } 42 | 43 | test_pdb_denied_max_int { 44 | result = deny[_] with input as data.mock.test_pdb_denied_max_int 45 | result == { 46 | "errcode": "MT1005", 47 | "msg": "[FAIL] HIGH - Value \"10\" for \"maxUnavailable\" should be a Percentage, not an Integer (MT1005)", 48 | "name": "policy-pdb", 49 | "severity": "HIGH", 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /policies/test/test_policy-pdb-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_pdb_allowed_min_high": { 4 | "apiVersion": "policy/v1beta1", 5 | "kind": "PodDisruptionBudget", 6 | "metadata": {"name": "test1-pdb"}, 7 | "request": {"object": {"spec": { 8 | "minAvailable": "66%", 9 | "selector": {"matchLabels": {"app": "test1"}} 10 | }}} 11 | }, 12 | "test_pdb_allowed_min_low": { 13 | "apiVersion": "policy/v1beta1", 14 | "kind": "PodDisruptionBudget", 15 | "metadata": {"name": "test1-pdb"}, 16 | "request": {"object": {"spec": { 17 | "maxUnavailable": "33%", 18 | "selector": {"matchLabels": {"app": "test1"}} 19 | }}} 20 | }, 21 | "test_pdb_denied_min_percent": { 22 | "apiVersion": "policy/v1beta1", 23 | "kind": "PodDisruptionBudget", 24 | "metadata": {"name": "test1-pdb"}, 25 | "request": {"object": {"spec": { 26 | "minAvailable": "67%", 27 | "selector": {"matchLabels": {"app": "test1"}} 28 | }}} 29 | }, 30 | "test_pdb_denied_min_int": { 31 | "apiVersion": "policy/v1beta1", 32 | "kind": "PodDisruptionBudget", 33 | "metadata": {"name": "test1-pdb"}, 34 | "request": {"object": {"spec": { 35 | "minAvailable": "10", 36 | "selector": {"matchLabels": {"app": "test1"}} 37 | }}} 38 | }, 39 | "test_pdb_denied_max_percent": { 40 | "apiVersion": "policy/v1beta1", 41 | "kind": "PodDisruptionBudget", 42 | "metadata": {"name": "test1-pdb"}, 43 | "request": {"object": {"spec": { 44 | "maxUnavailable": "32%", 45 | "selector": {"matchLabels": {"app": "test1"}} 46 | }}} 47 | }, 48 | "test_pdb_denied_max_int": { 49 | "apiVersion": "policy/v1beta1", 50 | "kind": "PodDisruptionBudget", 51 | "metadata": {"name": "test1-pdb"}, 52 | "request": {"object": {"spec": { 53 | "maxUnavailable": "10", 54 | "selector": {"matchLabels": {"app": "test1"}} 55 | }}} 56 | } 57 | } 58 | } -------------------------------------------------------------------------------- /policies/test/test_policy-port-name-mismatch-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_port_name_mismatch 2 | 3 | test_port_name_mismatch_allowed { 4 | result = deny with input as data.mock.test_port_name_mismatch_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_port_name_mismatch_denied { 9 | result = deny[_] with input as data.mock.test_port_name_mismatch_denied 10 | result == { 11 | "errcode": "MT1006", 12 | "msg": "[FAIL] HIGH - Logical port name \"http\" mismatch with port number \"443\" for service \"test-svc\" (MT1006)", 13 | "name": "policy-port-name-mismatch", 14 | "severity": "HIGH", 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /policies/test/test_policy-port-name-mismatch-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_port_name_mismatch_allowed": { 4 | "apiVersion": "admission.k8s.io/v1beta1", 5 | "kind": "AdmissionReview", 6 | "request": { 7 | "dryRun": false, 8 | "kind": { 9 | "group": "", 10 | "kind": "Service", 11 | "version": "v1" 12 | }, 13 | "namespace": "default", 14 | "object": { 15 | "apiVersion": "v1", 16 | "kind": "Service", 17 | "metadata": { 18 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"labels\":{\"app\":\"test-svc\"},\"name\":\"test-svc\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"name\":\"http\",\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"app\":\"test-svc\"},\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n"}, 19 | "creationTimestamp": "2020-02-04T01:16:07Z", 20 | "labels": {"app": "test-svc"}, 21 | "name": "test-svc", 22 | "namespace": "default", 23 | "uid": "ebaa71f7-46eb-11ea-85fd-005056a7b324" 24 | }, 25 | "spec": { 26 | "clusterIP": "198.19.241.208", 27 | "ports": [{ 28 | "name": "https", 29 | "port": 443, 30 | "protocol": "TCP", 31 | "targetPort": 443 32 | }], 33 | "selector": {"app": "test-svc"}, 34 | "sessionAffinity": "None", 35 | "type": "ClusterIP" 36 | }, 37 | "status": {"loadBalancer": {}} 38 | }, 39 | "oldObject": null, 40 | "operation": "CREATE", 41 | "resource": { 42 | "group": "", 43 | "resource": "services", 44 | "version": "v1" 45 | }, 46 | "uid": "ebaa77b8-46eb-11ea-85fd-005056a7b324", 47 | "userInfo": { 48 | "groups": [ 49 | "group1", 50 | "group2" 51 | ], 52 | "username": "user2" 53 | } 54 | } 55 | }, 56 | "test_port_name_mismatch_denied": { 57 | "apiVersion": "admission.k8s.io/v1beta1", 58 | "kind": "AdmissionReview", 59 | "request": { 60 | "dryRun": false, 61 | "kind": { 62 | "group": "", 63 | "kind": "Service", 64 | "version": "v1" 65 | }, 66 | "namespace": "default", 67 | "object": { 68 | "apiVersion": "v1", 69 | "kind": "Service", 70 | "metadata": { 71 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"labels\":{\"app\":\"test-svc\"},\"name\":\"test-svc\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"name\":\"http\",\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"app\":\"test-svc\"},\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n"}, 72 | "creationTimestamp": "2020-02-04T01:16:07Z", 73 | "labels": {"app": "test-svc"}, 74 | "name": "test-svc", 75 | "namespace": "default", 76 | "uid": "ebaa71f7-46eb-11ea-85fd-005056a7b324" 77 | }, 78 | "spec": { 79 | "clusterIP": "198.19.241.208", 80 | "ports": [{ 81 | "name": "http", 82 | "port": 443, 83 | "protocol": "TCP", 84 | "targetPort": 443 85 | }], 86 | "selector": {"app": "test-svc"}, 87 | "sessionAffinity": "None", 88 | "type": "ClusterIP" 89 | }, 90 | "status": {"loadBalancer": {}} 91 | }, 92 | "oldObject": null, 93 | "operation": "CREATE", 94 | "resource": { 95 | "group": "", 96 | "resource": "services", 97 | "version": "v1" 98 | }, 99 | "uid": "ebaa77b8-46eb-11ea-85fd-005056a7b324", 100 | "userInfo": { 101 | "groups": [ 102 | "group1", 103 | "group2" 104 | ], 105 | "username": "user2" 106 | } 107 | } 108 | } 109 | } 110 | } -------------------------------------------------------------------------------- /policies/test/test_policy-privileged-pod-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_privileged_pod 2 | 3 | test_privileged_pod_allowed { 4 | result = deny with input as data.mock.test_privileged_pod_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_privileged_pod_denied { 9 | result = deny[_] with input as data.mock.test_privileged_pod_denied 10 | result == { 11 | "errcode": "MT2001", 12 | "msg": "[FAIL] HIGH - Found privileged Security Context for container \"test-deploy01\" (MT2001)", 13 | "name": "policy-privileged-pod", 14 | "severity": "HIGH", 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /policies/test/test_policy-privileged-pod-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_privileged_pod_allowed": { 4 | "apiVersion": "admission.k8s.io/v1beta1", 5 | "kind": "AdmissionReview", 6 | "request": { 7 | "dryRun": false, 8 | "kind": { 9 | "group": "apps", 10 | "kind": "Deployment", 11 | "version": "v1" 12 | }, 13 | "namespace": "test1", 14 | "object": { 15 | "apiVersion": "apps/v1", 16 | "kind": "Deployment", 17 | "metadata": { 18 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy01\"},\"name\":\"test-deploy01\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy01\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy01\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"test-deploy01\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}]}}}}\n"}, 19 | "creationTimestamp": "2019-08-05T04:58:34Z", 20 | "generation": 1, 21 | "labels": {"app": "test-deploy01"}, 22 | "name": "test-deploy01", 23 | "namespace": "test1", 24 | "uid": "51ab8ca1-93c9-4c71-88d6-479610b0597a" 25 | }, 26 | "spec": {"template": { 27 | "metadata": { 28 | "creationTimestamp": null, 29 | "labels": {"app": "test-deploy01"} 30 | }, 31 | "spec": {"containers": [{ 32 | "name": "test-deploy01", 33 | "livenessProbe": { 34 | "failureThreshold": 3, 35 | "httpGet": { 36 | "httpHeaders": [{ 37 | "name": "X-Custom-Header", 38 | "value": "Awesome" 39 | }], 40 | "path": "/healthz", 41 | "port": 8080, 42 | "scheme": "HTTP" 43 | }, 44 | "initialDelaySeconds": 3, 45 | "periodSeconds": 3, 46 | "successThreshold": 1, 47 | "timeoutSeconds": 1 48 | }, 49 | "resources": { 50 | "limits": { 51 | "cpu": "50m", 52 | "memory": "128Mi" 53 | }, 54 | "requests": { 55 | "cpu": "50m", 56 | "memory": "128Mi" 57 | } 58 | }, 59 | "terminationMessagePath": "/dev/termination-log", 60 | "terminationMessagePolicy": "File" 61 | }]} 62 | }}, 63 | "status": {} 64 | } 65 | } 66 | }, 67 | "test_privileged_pod_denied": { 68 | "apiVersion": "admission.k8s.io/v1beta1", 69 | "kind": "AdmissionReview", 70 | "request": { 71 | "dryRun": false, 72 | "kind": { 73 | "group": "apps", 74 | "kind": "Deployment", 75 | "version": "v1" 76 | }, 77 | "namespace": "test1", 78 | "object": { 79 | "apiVersion": "apps/v1", 80 | "kind": "Deployment", 81 | "metadata": { 82 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy01\"},\"name\":\"test-deploy01\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy01\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy01\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"test-deploy01\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}]}}}}\n"}, 83 | "creationTimestamp": "2019-08-05T04:58:34Z", 84 | "generation": 1, 85 | "labels": {"app": "test-deploy01"}, 86 | "name": "test-deploy01", 87 | "namespace": "test1", 88 | "uid": "51ab8ca1-93c9-4c71-88d6-479610b0597a" 89 | }, 90 | "spec": {"template": { 91 | "metadata": { 92 | "creationTimestamp": null, 93 | "labels": {"app": "test-deploy01"} 94 | }, 95 | "spec": {"containers": [{ 96 | "name": "test-deploy01", 97 | "resources": { 98 | "limits": { 99 | "cpu": "50m", 100 | "memory": "128Mi" 101 | }, 102 | "requests": { 103 | "cpu": "50m", 104 | "memory": "128Mi" 105 | } 106 | }, 107 | "securityContext": {"privileged": true}, 108 | "terminationMessagePath": "/dev/termination-log", 109 | "terminationMessagePolicy": "File" 110 | }]} 111 | }}, 112 | "status": {} 113 | } 114 | } 115 | } 116 | } 117 | } -------------------------------------------------------------------------------- /policies/test/test_policy-readiness-probe-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_readiness_probe 2 | 3 | test_readiness_probe_allowed { 4 | result = deny with input as data.mock.test_readiness_probe_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_readiness_probe_denied { 9 | result = deny[_] with input as data.mock.test_readiness_probe_denied 10 | result == { 11 | "errcode": "MT1002", 12 | "msg": "[FAIL] LOW - Readiness Probe missing for container \"test-deploy01\" (MT1002)", 13 | "name": "policy-readiness-probe", 14 | "severity": "LOW", 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /policies/test/test_policy-readiness-probe-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_readiness_probe_allowed": { 4 | "apiVersion": "admission.k8s.io/v1beta1", 5 | "kind": "AdmissionReview", 6 | "request": { 7 | "dryRun": false, 8 | "kind": { 9 | "group": "apps", 10 | "kind": "Deployment", 11 | "version": "v1" 12 | }, 13 | "namespace": "test1", 14 | "object": { 15 | "apiVersion": "apps/v1", 16 | "kind": "Deployment", 17 | "metadata": { 18 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy01\"},\"name\":\"test-deploy01\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy01\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy01\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"test-deploy01\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}]}}}}\n"}, 19 | "creationTimestamp": "2019-08-05T04:58:34Z", 20 | "generation": 1, 21 | "labels": {"app": "test-deploy01"}, 22 | "name": "test-deploy01", 23 | "namespace": "test1", 24 | "uid": "51ab8ca1-93c9-4c71-88d6-479610b0597a" 25 | }, 26 | "spec": {"template": { 27 | "metadata": { 28 | "creationTimestamp": null, 29 | "labels": {"app": "test-deploy01"} 30 | }, 31 | "spec": {"containers": [{ 32 | "name": "test-deploy01", 33 | "readinessProbe": { 34 | "failureThreshold": 3, 35 | "httpGet": { 36 | "httpHeaders": [{ 37 | "name": "X-Custom-Header", 38 | "value": "Awesome" 39 | }], 40 | "path": "/healthz", 41 | "port": 8080, 42 | "scheme": "HTTP" 43 | }, 44 | "initialDelaySeconds": 3, 45 | "periodSeconds": 3, 46 | "successThreshold": 1, 47 | "timeoutSeconds": 1 48 | }, 49 | "resources": { 50 | "limits": { 51 | "cpu": "50m", 52 | "memory": "128Mi" 53 | }, 54 | "requests": { 55 | "cpu": "50m", 56 | "memory": "128Mi" 57 | } 58 | }, 59 | "terminationMessagePath": "/dev/termination-log", 60 | "terminationMessagePolicy": "File" 61 | }]} 62 | }}, 63 | "status": {} 64 | } 65 | } 66 | }, 67 | "test_readiness_probe_denied": { 68 | "apiVersion": "admission.k8s.io/v1beta1", 69 | "kind": "AdmissionReview", 70 | "request": { 71 | "dryRun": false, 72 | "kind": { 73 | "group": "apps", 74 | "kind": "Deployment", 75 | "version": "v1" 76 | }, 77 | "namespace": "test1", 78 | "object": { 79 | "apiVersion": "apps/v1", 80 | "kind": "Deployment", 81 | "metadata": { 82 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy01\"},\"name\":\"test-deploy01\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy01\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy01\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"test-deploy01\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}]}}}}\n"}, 83 | "creationTimestamp": "2019-08-05T04:58:34Z", 84 | "generation": 1, 85 | "labels": {"app": "test-deploy01"}, 86 | "name": "test-deploy01", 87 | "namespace": "test1", 88 | "uid": "51ab8ca1-93c9-4c71-88d6-479610b0597a" 89 | }, 90 | "spec": {"template": { 91 | "metadata": { 92 | "creationTimestamp": null, 93 | "labels": {"app": "test-deploy01"} 94 | }, 95 | "spec": {"containers": [{ 96 | "name": "test-deploy01", 97 | "resources": { 98 | "limits": { 99 | "cpu": "50m", 100 | "memory": "128Mi" 101 | }, 102 | "requests": { 103 | "cpu": "50m", 104 | "memory": "128Mi" 105 | } 106 | }, 107 | "terminationMessagePath": "/dev/termination-log", 108 | "terminationMessagePolicy": "File" 109 | }]} 110 | }}, 111 | "status": {} 112 | } 113 | } 114 | } 115 | } 116 | } -------------------------------------------------------------------------------- /policies/test/test_policy-resource-limits-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_resource_limits 2 | 3 | test_resource_limits_allowed { 4 | result = deny with input as data.mock.test_resource_limits_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_limits_denied_cpu { 9 | result = deny[_] with input as data.mock.test_limits_denied_cpu 10 | result == { 11 | "errcode": "MT1003", 12 | "msg": "[FAIL] LOW - Resource limits missing (CPU) for container \"test-deploy01\" (MT1003)", 13 | "name": "policy-resource-limits", 14 | "severity": "LOW", 15 | } 16 | } 17 | 18 | test_limits_denied_mem { 19 | result = deny[_] with input as data.mock.test_limits_denied_mem 20 | result == { 21 | "errcode": "MT1003", 22 | "msg": "[FAIL] LOW - Resource limits missing (MEM) for container \"test-deploy01\" (MT1003)", 23 | "name": "policy-resource-limits", 24 | "severity": "LOW", 25 | } 26 | } 27 | 28 | test_limits_denied_mem_cpu { 29 | result = deny[_] with input as data.mock.test_limits_denied_mem_cpu 30 | result == { 31 | "errcode": "MT1003", 32 | "msg": "[FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy01\" (MT1003)", 33 | "name": "policy-resource-limits", 34 | "severity": "LOW", 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /policies/test/test_policy-resource-requests-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_resource_requests 2 | 3 | test_resource_requests_allowed { 4 | result = deny with input as data.mock.test_resource_requests_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_requests_denied_cpu { 9 | result = deny[_] with input as data.mock.test_requests_denied_cpu 10 | result == { 11 | "errcode": "MT1004", 12 | "msg": "[FAIL] LOW - Resource requests missing (CPU) for container \"test-deploy01\" (MT1004)", 13 | "name": "policy-resource-requests", 14 | "severity": "LOW", 15 | } 16 | } 17 | 18 | test_requests_denied_mem { 19 | result = deny[_] with input as data.mock.test_requests_denied_mem 20 | result == { 21 | "errcode": "MT1004", 22 | "msg": "[FAIL] LOW - Resource requests missing (MEM) for container \"test-deploy01\" (MT1004)", 23 | "name": "policy-resource-requests", 24 | "severity": "LOW", 25 | } 26 | } 27 | 28 | test_requests_denied_mem_cpu { 29 | result = deny[_] with input as data.mock.test_requests_denied_mem_cpu 30 | result == { 31 | "errcode": "MT1004", 32 | "msg": "[FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy01\" (MT1004)", 33 | "name": "policy-resource-requests", 34 | "severity": "LOW", 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /policies/test/test_policy-singleton-pod-check.rego: -------------------------------------------------------------------------------- 1 | package kubernetes.admission.policy_singleton_pod_check 2 | 3 | test_pod_singleton_allowed { 4 | result = deny with input as data.mock.test_pod_singleton_allowed 5 | count(result) == 0 6 | } 7 | 8 | test_pod_singleton_denied { 9 | result = deny[_] with input as data.mock.test_pod_singleton_denied 10 | result == { 11 | "errcode": "MT1007", 12 | "msg": "[FAIL] LOW - \"toolbox\" is a singleton pod. (MT1007)", 13 | "name": "policy-singleton-pod-check", 14 | "severity": "LOW", 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /policies/test/test_policy-singleton-pod-check_mock.json: -------------------------------------------------------------------------------- 1 | { 2 | "mock": { 3 | "test_pod_singleton_allowed": { 4 | "apiVersion": "admission.k8s.io/v1beta1", 5 | "kind": "AdmissionReview", 6 | "request": { 7 | "dryRun": false, 8 | "kind": { 9 | "group": "", 10 | "kind": "Pod", 11 | "version": "v1" 12 | }, 13 | "namespace": "test1", 14 | "object": { 15 | "apiVersion": "v1", 16 | "kind": "Pod", 17 | "metadata": { 18 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\",\"ownerReferences\":[{\"apiVersion\":\"v1\",\"kind\":\"Replica\",\"name\":\"my-repset\",\"uid\":\"uidexa1\"}]},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"toolbox\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}],\"volumes\":[{\"emptyDir\":{\"sizeLimit\":\"50M\"},\"name\":\"default-token\"}]}}\n"}, 19 | "creationTimestamp": "2020-03-04T21:10:51Z", 20 | "labels": {"run": "toolbox"}, 21 | "name": "toolbox", 22 | "namespace": "test1", 23 | "ownerReferences": [{ 24 | "apiVersion": "v1", 25 | "kind": "Replica", 26 | "name": "my-repset", 27 | "uid": "uidexa1" 28 | }], 29 | "uid": "a084008f-5e5c-11ea-a33d-005056a72b7b" 30 | } 31 | } 32 | } 33 | }, 34 | "test_pod_singleton_denied": { 35 | "apiVersion": "admission.k8s.io/v1beta1", 36 | "kind": "AdmissionReview", 37 | "request": { 38 | "dryRun": false, 39 | "kind": { 40 | "group": "", 41 | "kind": "Pod", 42 | "version": "v1" 43 | }, 44 | "namespace": "test1", 45 | "object": { 46 | "apiVersion": "v1", 47 | "kind": "Pod", 48 | "metadata": { 49 | "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\",\"ownerReferences\":[{\"apiVersion\":\"v1\",\"kind\":\"Replica\",\"name\":\"my-repset\",\"uid\":\"uidexa1\"}]},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"toolbox\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}],\"volumes\":[{\"emptyDir\":{\"sizeLimit\":\"50M\"},\"name\":\"default-token\"}]}}\n"}, 50 | "creationTimestamp": "2020-03-04T21:10:51Z", 51 | "labels": {"run": "toolbox"}, 52 | "name": "toolbox", 53 | "namespace": "test1", 54 | "uid": "a084008f-5e5c-11ea-a33d-005056a72b7b" 55 | } 56 | } 57 | } 58 | } 59 | } 60 | } -------------------------------------------------------------------------------- /testing/README.md: -------------------------------------------------------------------------------- 1 | # Testing Info 2 | 3 | MagTape uses the files collected in this directory for various testing strategies (unit tests, functional tests, regression test, etc.). Test files will generally fall into one of three categories: 4 | 5 | - YAML File - Used for applying directly to a Kubernetes cluster 6 | - JSON Request Object File - Used for testing the MagTape application outside of Kubernetes 7 | - JSON Response File - Used for validating responses from various functions/calls of the MagTape application 8 | 9 | ## Functional Tests 10 | 11 | Every policy for MagTape should have one or more functional tests associated with it. Functional tests are typically YAML manifests for Kubernetes resources with specific configuration to test the functionality of a policy. Manifests should be placed in the a directory associated with the target resource type (ie. `./testing/deployments/`, `./testing/services/`, etc.). 12 | 13 | The [functional-tests.yaml](./functional-tests.yaml) file contains the tests that get executed within the CI workflows and what results are expected (pass or fail). Each test should fall under the appropriate resource and result section of the file. The script field can be used to specify a bash script which can be used to execute setup, teardown and between (each manifest being applied) tasks to modify the environment making it suitable for executing the test. 14 | 15 | - **Setup** tasks would be run before any of the manifests of a specific kind/desired combination are run. 16 | - **Teardown** would run after the kind/desired combination's manifests have been tested. 17 | - **Between** is run in between applying each manifest for the associated kind/desired combination. 18 | 19 | An [example script](https://gist.github.com/ilrudie/43823733444ba7976b2f567f30706620) can be used as a starting point for implementing these setup, teardown and between functions for your tests. 20 | 21 | ```yaml 22 | resources: 23 | - kind: deployments 24 | desired: pass 25 | script: 26 | manifests: 27 | - name: "Deployment - Pass all policies" 28 | file: test-deploy01.yaml 29 | - name: "Deployment - No Liveness Probe" 30 | file: test-deploy03.yaml 31 | - kind: deployments 32 | desired: fail 33 | script: 34 | manifests: 35 | - name: "Deployment - Fail all policies" 36 | file: test-deploy02.yaml 37 | ``` 38 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy01-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": true 4 | } 5 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy01.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy01 5 | labels: 6 | app: test-deploy01 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy01 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy01 16 | spec: 17 | containers: 18 | - name: test-deploy01 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | limits: 42 | cpu: "50m" 43 | memory: "128Mi" 44 | requests: 45 | cpu: "50m" 46 | memory: "128Mi" 47 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy02-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] HIGH - Found privileged Security Context for container \"test-deploy02\" (MT2001), [FAIL] LOW - Liveness Probe missing for container \"test-deploy02\" (MT1001), [FAIL] LOW - Readiness Probe missing for container \"test-deploy02\" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy02\" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy02\" (MT1004)" 6 | }, 7 | "uid": "ec7255ef-1925-473c-902c-8a0fb9bc6b96" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy02.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "apps", 8 | "kind": "Deployment", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "apps/v1", 14 | "kind": "Deployment", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy02\"},\"name\":\"test-deploy02\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy02\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy02\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"name\":\"test-deploy02\",\"securityContext\":{\"privileged\":true}}]}}}}\n" 18 | }, 19 | "creationTimestamp": "2019-08-05T05:11:41Z", 20 | "generation": 1, 21 | "labels": { 22 | "app": "test-deploy02" 23 | }, 24 | "name": "test-deploy02", 25 | "namespace": "test1", 26 | "uid": "622f1faa-ea2d-4f33-9e13-6c1a4a84e9e3" 27 | }, 28 | "spec": { 29 | "progressDeadlineSeconds": 600, 30 | "replicas": 1, 31 | "revisionHistoryLimit": 10, 32 | "selector": { 33 | "matchLabels": { 34 | "app": "test-deploy02" 35 | } 36 | }, 37 | "strategy": { 38 | "rollingUpdate": { 39 | "maxSurge": "25%", 40 | "maxUnavailable": "25%" 41 | }, 42 | "type": "RollingUpdate" 43 | }, 44 | "template": { 45 | "metadata": { 46 | "creationTimestamp": null, 47 | "labels": { 48 | "app": "test-deploy02" 49 | } 50 | }, 51 | "spec": { 52 | "containers": [ 53 | { 54 | "args": [ 55 | "/server" 56 | ], 57 | "image": "k8s.gcr.io/liveness", 58 | "imagePullPolicy": "Always", 59 | "name": "test-deploy02", 60 | "resources": {}, 61 | "securityContext": { 62 | "privileged": true 63 | }, 64 | "terminationMessagePath": "/dev/termination-log", 65 | "terminationMessagePolicy": "File" 66 | } 67 | ], 68 | "dnsPolicy": "ClusterFirst", 69 | "restartPolicy": "Always", 70 | "schedulerName": "default-scheduler", 71 | "securityContext": {}, 72 | "terminationGracePeriodSeconds": 30 73 | } 74 | } 75 | }, 76 | "status": {} 77 | }, 78 | "oldObject": null, 79 | "operation": "CREATE", 80 | "options": { 81 | "apiVersion": "meta.k8s.io/v1", 82 | "kind": "CreateOptions" 83 | }, 84 | "requestKind": { 85 | "group": "apps", 86 | "kind": "Deployment", 87 | "version": "v1" 88 | }, 89 | "requestResource": { 90 | "group": "apps", 91 | "resource": "deployments", 92 | "version": "v1" 93 | }, 94 | "resource": { 95 | "group": "apps", 96 | "resource": "deployments", 97 | "version": "v1" 98 | }, 99 | "uid": "86a0903c-3887-441e-a180-e9436bfa5d14", 100 | "userInfo": { 101 | "groups": [ 102 | "system:masters", 103 | "system:authenticated" 104 | ], 105 | "username": "kubernetes-admin" 106 | } 107 | } 108 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy02.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy02 5 | labels: 6 | app: test-deploy02 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy02 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy02 16 | spec: 17 | containers: 18 | - name: test-deploy02 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | securityContext: 23 | privileged: true 24 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy03-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Liveness Probe missing for container \"test-deploy03\" (MT1001)" 6 | }, 7 | "uid": "a7947390-70b8-458b-8e52-87f52fdea04a" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy03.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "apps", 8 | "kind": "Deployment", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "apps/v1", 14 | "kind": "Deployment", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy03\"},\"name\":\"test-deploy03\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy03\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy03\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"name\":\"test-deploy03\",\"readinessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}]}}}}\n" 18 | }, 19 | "creationTimestamp": "2019-08-05T05:20:12Z", 20 | "generation": 1, 21 | "labels": { 22 | "app": "test-deploy03" 23 | }, 24 | "name": "test-deploy03", 25 | "namespace": "test1", 26 | "uid": "5a3c9553-64f6-44b6-8b97-d0d9c56c341f" 27 | }, 28 | "spec": { 29 | "progressDeadlineSeconds": 600, 30 | "replicas": 1, 31 | "revisionHistoryLimit": 10, 32 | "selector": { 33 | "matchLabels": { 34 | "app": "test-deploy03" 35 | } 36 | }, 37 | "strategy": { 38 | "rollingUpdate": { 39 | "maxSurge": "25%", 40 | "maxUnavailable": "25%" 41 | }, 42 | "type": "RollingUpdate" 43 | }, 44 | "template": { 45 | "metadata": { 46 | "creationTimestamp": null, 47 | "labels": { 48 | "app": "test-deploy03" 49 | } 50 | }, 51 | "spec": { 52 | "containers": [ 53 | { 54 | "args": [ 55 | "/server" 56 | ], 57 | "image": "k8s.gcr.io/liveness", 58 | "imagePullPolicy": "Always", 59 | "name": "test-deploy03", 60 | "readinessProbe": { 61 | "failureThreshold": 3, 62 | "httpGet": { 63 | "httpHeaders": [ 64 | { 65 | "name": "X-Custom-Header", 66 | "value": "Awesome" 67 | } 68 | ], 69 | "path": "/healthz", 70 | "port": 8080, 71 | "scheme": "HTTP" 72 | }, 73 | "initialDelaySeconds": 3, 74 | "periodSeconds": 3, 75 | "successThreshold": 1, 76 | "timeoutSeconds": 1 77 | }, 78 | "resources": { 79 | "limits": { 80 | "cpu": "50m", 81 | "memory": "128Mi" 82 | }, 83 | "requests": { 84 | "cpu": "50m", 85 | "memory": "128Mi" 86 | } 87 | }, 88 | "terminationMessagePath": "/dev/termination-log", 89 | "terminationMessagePolicy": "File" 90 | } 91 | ], 92 | "dnsPolicy": "ClusterFirst", 93 | "restartPolicy": "Always", 94 | "schedulerName": "default-scheduler", 95 | "securityContext": {}, 96 | "terminationGracePeriodSeconds": 30 97 | } 98 | } 99 | }, 100 | "status": {} 101 | }, 102 | "oldObject": null, 103 | "operation": "CREATE", 104 | "options": { 105 | "apiVersion": "meta.k8s.io/v1", 106 | "kind": "CreateOptions" 107 | }, 108 | "requestKind": { 109 | "group": "apps", 110 | "kind": "Deployment", 111 | "version": "v1" 112 | }, 113 | "requestResource": { 114 | "group": "apps", 115 | "resource": "deployments", 116 | "version": "v1" 117 | }, 118 | "resource": { 119 | "group": "apps", 120 | "resource": "deployments", 121 | "version": "v1" 122 | }, 123 | "uid": "a7947390-70b8-458b-8e52-87f52fdea04a", 124 | "userInfo": { 125 | "groups": [ 126 | "system:masters", 127 | "system:authenticated" 128 | ], 129 | "username": "kubernetes-admin" 130 | } 131 | } 132 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy03.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy03 5 | labels: 6 | app: test-deploy03 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy03 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy03 16 | spec: 17 | containers: 18 | - name: test-deploy03 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | readinessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | resources: 32 | limits: 33 | cpu: "50m" 34 | memory: "128Mi" 35 | requests: 36 | cpu: "50m" 37 | memory: "128Mi" 38 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy04-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Readiness Probe missing for container \"test-deploy04\" (MT1002)" 6 | }, 7 | "uid": "f34ee74a-e154-4a87-b460-f1521bf16c65" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy04.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "apps", 8 | "kind": "Deployment", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "apps/v1", 14 | "kind": "Deployment", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy04\"},\"name\":\"test-deploy04\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy04\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy04\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"livenessProbe\":{\"httpGet\":{\"httpHeaders\":[{\"name\":\"X-Custom-Header\",\"value\":\"Awesome\"}],\"path\":\"/healthz\",\"port\":8080},\"initialDelaySeconds\":3,\"periodSeconds\":3},\"name\":\"test-deploy04\",\"resources\":{\"limits\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"},\"requests\":{\"cpu\":\"50m\",\"memory\":\"128Mi\"}}}]}}}}\n" 18 | }, 19 | "creationTimestamp": "2019-08-05T05:23:18Z", 20 | "generation": 1, 21 | "labels": { 22 | "app": "test-deploy04" 23 | }, 24 | "name": "test-deploy04", 25 | "namespace": "test1", 26 | "uid": "2c178f37-3703-4eb4-b440-880e273c03e2" 27 | }, 28 | "spec": { 29 | "progressDeadlineSeconds": 600, 30 | "replicas": 1, 31 | "revisionHistoryLimit": 10, 32 | "selector": { 33 | "matchLabels": { 34 | "app": "test-deploy04" 35 | } 36 | }, 37 | "strategy": { 38 | "rollingUpdate": { 39 | "maxSurge": "25%", 40 | "maxUnavailable": "25%" 41 | }, 42 | "type": "RollingUpdate" 43 | }, 44 | "template": { 45 | "metadata": { 46 | "creationTimestamp": null, 47 | "labels": { 48 | "app": "test-deploy04" 49 | } 50 | }, 51 | "spec": { 52 | "containers": [ 53 | { 54 | "args": [ 55 | "/server" 56 | ], 57 | "image": "k8s.gcr.io/liveness", 58 | "imagePullPolicy": "Always", 59 | "livenessProbe": { 60 | "failureThreshold": 3, 61 | "httpGet": { 62 | "httpHeaders": [ 63 | { 64 | "name": "X-Custom-Header", 65 | "value": "Awesome" 66 | } 67 | ], 68 | "path": "/healthz", 69 | "port": 8080, 70 | "scheme": "HTTP" 71 | }, 72 | "initialDelaySeconds": 3, 73 | "periodSeconds": 3, 74 | "successThreshold": 1, 75 | "timeoutSeconds": 1 76 | }, 77 | "name": "test-deploy04", 78 | "resources": { 79 | "limits": { 80 | "cpu": "50m", 81 | "memory": "128Mi" 82 | }, 83 | "requests": { 84 | "cpu": "50m", 85 | "memory": "128Mi" 86 | } 87 | }, 88 | "terminationMessagePath": "/dev/termination-log", 89 | "terminationMessagePolicy": "File" 90 | } 91 | ], 92 | "dnsPolicy": "ClusterFirst", 93 | "restartPolicy": "Always", 94 | "schedulerName": "default-scheduler", 95 | "securityContext": {}, 96 | "terminationGracePeriodSeconds": 30 97 | } 98 | } 99 | }, 100 | "status": {} 101 | }, 102 | "oldObject": null, 103 | "operation": "CREATE", 104 | "options": { 105 | "apiVersion": "meta.k8s.io/v1", 106 | "kind": "CreateOptions" 107 | }, 108 | "requestKind": { 109 | "group": "apps", 110 | "kind": "Deployment", 111 | "version": "v1" 112 | }, 113 | "requestResource": { 114 | "group": "apps", 115 | "resource": "deployments", 116 | "version": "v1" 117 | }, 118 | "resource": { 119 | "group": "apps", 120 | "resource": "deployments", 121 | "version": "v1" 122 | }, 123 | "uid": "f34ee74a-e154-4a87-b460-f1521bf16c65", 124 | "userInfo": { 125 | "groups": [ 126 | "system:masters", 127 | "system:authenticated" 128 | ], 129 | "username": "kubernetes-admin" 130 | } 131 | } 132 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy04.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy04 5 | labels: 6 | app: test-deploy04 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy04 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy04 16 | spec: 17 | containers: 18 | - name: test-deploy04 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | resources: 32 | limits: 33 | cpu: "50m" 34 | memory: "128Mi" 35 | requests: 36 | cpu: "50m" 37 | memory: "128Mi" 38 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy05-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Resource requests missing (CPU) for container \"test-deploy05\" (MT1004)" 6 | }, 7 | "uid": "38abb20b-8368-48b7-a4d3-d75c2ee3a1b5" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy05.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy05 5 | labels: 6 | app: test-deploy05 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy05 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy05 16 | spec: 17 | containers: 18 | - name: test-deploy05 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | limits: 42 | cpu: "50m" 43 | memory: "128Mi" 44 | requests: 45 | memory: "128Mi" 46 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy06-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Resource requests missing (MEM) for container \"test-deploy06\" (MT1004)" 6 | }, 7 | "uid": "d3e21059-ab6a-41c5-993e-147b303966a5" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy06.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy06 5 | labels: 6 | app: test-deploy06 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy06 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy06 16 | spec: 17 | containers: 18 | - name: test-deploy06 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | limits: 42 | cpu: "50m" 43 | memory: "128Mi" 44 | requests: 45 | cpu: "50m" 46 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy07-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Resource limits missing (CPU) for container \"test-deploy07\" (MT1003)" 6 | }, 7 | "uid": "e7ebc129-091c-41d4-a716-4beea34c5399" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy07.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy07 5 | labels: 6 | app: test-deploy07 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy07 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy07 16 | spec: 17 | containers: 18 | - name: test-deploy07 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | limits: 42 | memory: "128Mi" 43 | requests: 44 | cpu: "50m" 45 | memory: "128Mi" 46 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy08-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Resource limits missing (MEM) for container \"test-deploy08\" (MT1003)" 6 | }, 7 | "uid": "f1424388-52d2-4887-9f64-aa09df388bb4" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy08.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy08 5 | labels: 6 | app: test-deploy08 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy08 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy08 16 | spec: 17 | containers: 18 | - name: test-deploy08 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | limits: 42 | cpu: "50m" 43 | requests: 44 | cpu: "50m" 45 | memory: "128Mi" 46 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy09-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy09\" (MT1004)" 6 | }, 7 | "uid": "4462cbee-9c9a-47ce-b312-07939af94744" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy09.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy09 5 | labels: 6 | app: test-deploy09 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy09 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy09 16 | spec: 17 | containers: 18 | - name: test-deploy09 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | limits: 42 | cpu: "50m" 43 | memory: "128Mi" 44 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy10-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy10\" (MT1003)" 6 | }, 7 | "uid": "d748f395-7198-429b-b6d2-9dde803b61c3" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy10.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy10 5 | labels: 6 | app: test-deploy10 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy10 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy10 16 | spec: 17 | containers: 18 | - name: test-deploy10 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | httpHeaders: 27 | - name: X-Custom-Header 28 | value: Awesome 29 | initialDelaySeconds: 3 30 | periodSeconds: 3 31 | readinessProbe: 32 | httpGet: 33 | path: /healthz 34 | port: 8080 35 | httpHeaders: 36 | - name: X-Custom-Header 37 | value: Awesome 38 | initialDelaySeconds: 3 39 | periodSeconds: 3 40 | resources: 41 | requests: 42 | cpu: "50m" 43 | memory: "128Mi" 44 | -------------------------------------------------------------------------------- /testing/deployments/test-deploy11-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] LOW - Liveness Probe missing for container \"test-deploy11\" (MT1001), [FAIL] LOW - Liveness Probe missing for container \"test-deploy11-sidecar\" (MT1001), [FAIL] LOW - Readiness Probe missing for container \"test-deploy11\" (MT1002), [FAIL] LOW - Readiness Probe missing for container \"test-deploy11-sidecar\" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy11\" (MT1003), [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy11-sidecar\" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy11\" (MT1004), [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy11-sidecar\" (MT1004)" 6 | }, 7 | "uid": "4c63e05e-8550-46d3-a9d7-b616a71daacf" 8 | } 9 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy11.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "apps", 8 | "kind": "Deployment", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "apps/v1", 14 | "kind": "Deployment", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-deploy11\"},\"name\":\"test-deploy11\",\"namespace\":\"test1\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"app\":\"test-deploy11\"}},\"template\":{\"metadata\":{\"labels\":{\"app\":\"test-deploy11\"}},\"spec\":{\"containers\":[{\"args\":[\"/server\"],\"image\":\"k8s.gcr.io/liveness\",\"name\":\"test-deploy11\"},{\"args\":[\"while true; do echo 'still running'; sleep 30; done\"],\"image\":\"busybox\",\"name\":\"test-deploy11-sidecar\"}]}}}}\n" 18 | }, 19 | "creationTimestamp": "2019-08-05T05:45:20Z", 20 | "generation": 1, 21 | "labels": { 22 | "app": "test-deploy11" 23 | }, 24 | "name": "test-deploy11", 25 | "namespace": "test1", 26 | "uid": "8c732df9-5c29-497b-95c9-efca333bedae" 27 | }, 28 | "spec": { 29 | "progressDeadlineSeconds": 600, 30 | "replicas": 1, 31 | "revisionHistoryLimit": 10, 32 | "selector": { 33 | "matchLabels": { 34 | "app": "test-deploy11" 35 | } 36 | }, 37 | "strategy": { 38 | "rollingUpdate": { 39 | "maxSurge": "25%", 40 | "maxUnavailable": "25%" 41 | }, 42 | "type": "RollingUpdate" 43 | }, 44 | "template": { 45 | "metadata": { 46 | "creationTimestamp": null, 47 | "labels": { 48 | "app": "test-deploy11" 49 | } 50 | }, 51 | "spec": { 52 | "containers": [ 53 | { 54 | "args": [ 55 | "/server" 56 | ], 57 | "image": "k8s.gcr.io/liveness", 58 | "imagePullPolicy": "Always", 59 | "name": "test-deploy11", 60 | "resources": {}, 61 | "terminationMessagePath": "/dev/termination-log", 62 | "terminationMessagePolicy": "File" 63 | }, 64 | { 65 | "args": [ 66 | "while true; do echo 'still running'; sleep 30; done" 67 | ], 68 | "image": "busybox", 69 | "imagePullPolicy": "Always", 70 | "name": "test-deploy11-sidecar", 71 | "resources": {}, 72 | "terminationMessagePath": "/dev/termination-log", 73 | "terminationMessagePolicy": "File" 74 | } 75 | ], 76 | "dnsPolicy": "ClusterFirst", 77 | "restartPolicy": "Always", 78 | "schedulerName": "default-scheduler", 79 | "securityContext": {}, 80 | "terminationGracePeriodSeconds": 30 81 | } 82 | } 83 | }, 84 | "status": {} 85 | }, 86 | "oldObject": null, 87 | "operation": "CREATE", 88 | "options": { 89 | "apiVersion": "meta.k8s.io/v1", 90 | "kind": "CreateOptions" 91 | }, 92 | "requestKind": { 93 | "group": "apps", 94 | "kind": "Deployment", 95 | "version": "v1" 96 | }, 97 | "requestResource": { 98 | "group": "apps", 99 | "resource": "deployments", 100 | "version": "v1" 101 | }, 102 | "resource": { 103 | "group": "apps", 104 | "resource": "deployments", 105 | "version": "v1" 106 | }, 107 | "uid": "4c63e05e-8550-46d3-a9d7-b616a71daacf", 108 | "userInfo": { 109 | "groups": [ 110 | "system:masters", 111 | "system:authenticated" 112 | ], 113 | "username": "kubernetes-admin" 114 | } 115 | } 116 | } -------------------------------------------------------------------------------- /testing/deployments/test-deploy11.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: test-deploy11 5 | labels: 6 | app: test-deploy11 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: test-deploy11 12 | template: 13 | metadata: 14 | labels: 15 | app: test-deploy11 16 | spec: 17 | containers: 18 | - name: test-deploy11 19 | image: k8s.gcr.io/liveness 20 | args: 21 | - /server 22 | - name: test-deploy11-sidecar 23 | image: busybox 24 | args: 25 | - "while true; do echo 'still running'; sleep 30; done" 26 | -------------------------------------------------------------------------------- /testing/export-env.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2020 T-Mobile, USA, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # 15 | # Trademark Disclaimer: Neither the name of T-Mobile, USA, Inc. nor the names of 16 | # its contributors may be used to endorse or promote products derived from this 17 | # software without specific prior written permission. 18 | 19 | export MAGTAPE_NAMESPACE_NAME="magtape-system" 20 | export MAGTAPE_POD_NAME="magtape-abc1234" 21 | export MAGTAPE_CLUSTER_NAME="test-cluster" 22 | export MAGTAPE_K8S_EVENTS_ENABLED="TRUE" 23 | export MAGTAPE_SLACK_ENABLED="FALSE" 24 | export MAGTAPE_SLACK_PASSIVE="FALSE" 25 | export MAGTAPE_SLACK_WEBHOOK_URL_DEFAULT="https://slacky.slack.slack" 26 | export MAGTAPE_SLACK_CHANNEL="test" 27 | export MAGTAPE_SLACK_USER="test" 28 | export MAGTAPE_SLACK_ICON=":magtape:" 29 | export MAGTAPE_DENY_LEVEL="LOW" 30 | export MAGTAPE_LOG_LEVEL="INFO" 31 | export OPA_BASE_URL="http://127.0.0.1:8181" 32 | export OPA_K8S_PATH="/v0/data/magtape" 33 | -------------------------------------------------------------------------------- /testing/functional-tests.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - kind: deployments 3 | desired: pass 4 | script: 5 | manifests: 6 | - name: "Deployment - Pass all policies" 7 | file: test-deploy01.yaml 8 | - name: "Deployment - No Liveness Probe" 9 | file: test-deploy03.yaml 10 | - name: "Deployment - No Readiness Probe" 11 | file: test-deploy04.yaml 12 | - name: "Deployment - No CPU Requests" 13 | file: test-deploy05.yaml 14 | - name: "Deployment - No MEM Requests" 15 | file: test-deploy06.yaml 16 | - name: "Deployment - No CPU Limits" 17 | file: test-deploy07.yaml 18 | - name: "Deployment - No MEM Limits" 19 | file: test-deploy08.yaml 20 | - name: "Deployment No CPU or MEM Requests" 21 | file: test-deploy09.yaml 22 | - name: "Deployment - No CPU or MEM Limits" 23 | file: test-deploy10.yaml 24 | - name: "Deployment - Multiple Containers" 25 | file: test-deploy11.yaml 26 | - kind: deployments 27 | desired: fail 28 | script: 29 | manifests: 30 | - name: "Deployment - Fail all policies" 31 | file: test-deploy02.yaml 32 | - kind: pdbs 33 | desired: pass 34 | script: 35 | manifests: 36 | - name: "PDB - minAvailable, Percent in range" 37 | file: test-pdb02.yaml 38 | - name: "PDB - maxUnavailable, Percent in range" 39 | file: test-pdb05.yaml 40 | - kind: pdbs 41 | desired: fail 42 | script: 43 | manifests: 44 | - name: "PDB - minAvailable, Integer value" 45 | file: test-pdb01.yaml 46 | - name: "PDB - minAvailable, Percent out or range" 47 | file: test-pdb03.yaml 48 | - name: "PDB - maxUnavailable, Integer value" 49 | file: test-pdb04.yaml 50 | - name: "PDB - maxUnavailable, Percent out or range" 51 | file: test-pdb06.yaml 52 | - kind: pods 53 | desired: pass 54 | script: 55 | manifests: 56 | - name: "Pod - Container with hostPath" 57 | file: test-pod02.yaml 58 | - name: "Pod - emptyDir over size limit (100M)" 59 | file: test-pod03.yaml 60 | - name: "Pod - ownerReference" 61 | file: test-pod04.yaml 62 | - name: "Pod - emptyDir under size limit (100M)" 63 | file: test-pod05.yaml 64 | - name: "Pod - emptyDir under size limit (100M) + Pod - ownerReference" 65 | file: test-pod06.yaml 66 | - kind: pods 67 | desired: fail 68 | script: 69 | manifests: 70 | - name: "Pod - Container with hostPort" 71 | file: test-pod01.yaml 72 | - kind: services 73 | desired: pass 74 | script: 75 | manifests: 76 | - name: "Service - Pass all policies" 77 | file: test-svc01.yaml 78 | - name: "Service - nodePort in range" 79 | file: test-svc02.yaml 80 | - name: "Service nodePort out of range" 81 | file: test-svc03.yaml 82 | - name: "Service - ???" 83 | file: test-svc04.yaml 84 | -------------------------------------------------------------------------------- /testing/pdbs/test-pdb01.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: test-pdb01 5 | spec: 6 | minAvailable: 10 7 | selector: 8 | matchLabels: 9 | app: test-pdb01 10 | -------------------------------------------------------------------------------- /testing/pdbs/test-pdb02.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: test-pdb02 5 | spec: 6 | minAvailable: "66%" 7 | selector: 8 | matchLabels: 9 | app: test-pdb02 10 | -------------------------------------------------------------------------------- /testing/pdbs/test-pdb03.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: test-pdb03 5 | spec: 6 | minAvailable: "67%" 7 | selector: 8 | matchLabels: 9 | app: test-pdb03 10 | -------------------------------------------------------------------------------- /testing/pdbs/test-pdb04.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: test-pdb04 5 | spec: 6 | maxUnavailable: 10 7 | selector: 8 | matchLabels: 9 | app: test-pdb04 10 | -------------------------------------------------------------------------------- /testing/pdbs/test-pdb05.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: test-pdb05 5 | spec: 6 | maxUnavailable: "33%" 7 | selector: 8 | matchLabels: 9 | app: test-pdb05 10 | -------------------------------------------------------------------------------- /testing/pdbs/test-pdb06.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: test-pdb06 5 | spec: 6 | maxUnavailable: "32%" 7 | selector: 8 | matchLabels: 9 | app: test-pdb06 10 | -------------------------------------------------------------------------------- /testing/pods/test-pod01-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] HIGH - hostPort is configured for container \"test-pod01\" (MT1008), [FAIL] LOW - Liveness Probe missing for container \"test-pod01\" (MT1001), [FAIL] LOW - Readiness Probe missing for container \"test-pod01\" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-pod01\" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-pod01\" (MT1004)" 6 | }, 7 | "uid": "573f0594-58cb-11ea-b876-005056a7db08" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /testing/pods/test-pod01.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Pod", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Pod", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\"}]}}\n" 18 | }, 19 | "creationTimestamp": "2020-02-20T18:30:06Z", 20 | "labels": { 21 | "run": "toolbox" 22 | }, 23 | "name": "test-pod01", 24 | "namespace": "test1", 25 | "uid": "043ff4fd-540f-11ea-85fd-005056a7b324" 26 | }, 27 | "spec": { 28 | "containers": [ 29 | { 30 | "command": [ 31 | "sleep", 32 | "360000" 33 | ], 34 | "image": "jmsearcy/twrtools", 35 | "imagePullPolicy": "Always", 36 | "name": "toolbox", 37 | "ports": [ 38 | { 39 | "containerPort": 8080, 40 | "hostPort": 8080, 41 | "protocol": "TCP" 42 | } 43 | ], 44 | "resources": {}, 45 | "terminationMessagePath": "/dev/termination-log", 46 | "terminationMessagePolicy": "File", 47 | "volumeMounts": [ 48 | { 49 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 50 | "name": "default-token-89cp6", 51 | "readOnly": true 52 | } 53 | ] 54 | } 55 | ], 56 | "dnsPolicy": "ClusterFirst", 57 | "enableServiceLinks": true, 58 | "priority": 0, 59 | "restartPolicy": "Always", 60 | "schedulerName": "default-scheduler", 61 | "securityContext": {}, 62 | "serviceAccount": "default", 63 | "serviceAccountName": "default", 64 | "terminationGracePeriodSeconds": 30, 65 | "tolerations": [ 66 | { 67 | "effect": "NoExecute", 68 | "key": "node.kubernetes.io/not-ready", 69 | "operator": "Exists", 70 | "tolerationSeconds": 300 71 | }, 72 | { 73 | "effect": "NoExecute", 74 | "key": "node.kubernetes.io/unreachable", 75 | "operator": "Exists", 76 | "tolerationSeconds": 300 77 | } 78 | ], 79 | "volumes": [ 80 | { 81 | "name": "default-token-89cp6", 82 | "secret": { 83 | "secretName": "default-token-89cp6" 84 | } 85 | } 86 | ] 87 | }, 88 | "status": { 89 | "phase": "Pending", 90 | "qosClass": "BestEffort" 91 | } 92 | }, 93 | "oldObject": null, 94 | "operation": "CREATE", 95 | "resource": { 96 | "group": "", 97 | "resource": "pods", 98 | "version": "v1" 99 | }, 100 | "uid": "043ffbbf-540f-11ea-85fd-005056a7b324", 101 | "userInfo": { 102 | "groups": [ 103 | "group1" 104 | ], 105 | "username": "user1" 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /testing/pods/test-pod01.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: toolbox 6 | name: test-pod01 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "360000" 12 | image: jmsearcy/twrtools 13 | imagePullPolicy: Always 14 | name: toolbox 15 | ports: 16 | - containerPort: 8080 17 | hostPort: 8080 18 | -------------------------------------------------------------------------------- /testing/pods/test-pod02-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] HIGH - hostPath is configured for volume \"default-token\" (MT1010), [FAIL] LOW - Liveness Probe missing for container \"test-pod02\" (MT1001), [FAIL] LOW - Readiness Probe missing for container \"test-pod02\" (MT1002), [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-pod02\" (MT1003), [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-pod02\" (MT1004)" 6 | }, 7 | "uid": "7a202044-58cd-11ea-85c2-005056a72258" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /testing/pods/test-pod02.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Pod", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Pod", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\",\"ports\":[{\"containerPort\":8080,\"hostPort\":8080}]}],\"volumes\":[{\"hostPath\":{\"path\":\"/data\"},\"name\":\"default-token\"}]}}\n" 18 | }, 19 | "creationTimestamp": "2020-02-25T19:23:08Z", 20 | "labels": { 21 | "run": "toolbox" 22 | }, 23 | "name": "test-pod02", 24 | "namespace": "test1", 25 | "uid": "413e9d97-5804-11ea-b876-005056a7db08" 26 | }, 27 | "spec": { 28 | "containers": [ 29 | { 30 | "command": [ 31 | "sleep", 32 | "360000" 33 | ], 34 | "image": "jmsearcy/twrtools", 35 | "imagePullPolicy": "Always", 36 | "name": "toolbox", 37 | "resources": {}, 38 | "terminationMessagePath": "/dev/termination-log", 39 | "terminationMessagePolicy": "File", 40 | "volumeMounts": [ 41 | { 42 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 43 | "name": "default-token-q999w", 44 | "readOnly": true 45 | } 46 | ] 47 | } 48 | ], 49 | "dnsPolicy": "ClusterFirst", 50 | "enableServiceLinks": true, 51 | "priority": 0, 52 | "restartPolicy": "Always", 53 | "schedulerName": "default-scheduler", 54 | "securityContext": {}, 55 | "serviceAccount": "default", 56 | "serviceAccountName": "default", 57 | "terminationGracePeriodSeconds": 30, 58 | "tolerations": [ 59 | { 60 | "effect": "NoExecute", 61 | "key": "node.kubernetes.io/not-ready", 62 | "operator": "Exists", 63 | "tolerationSeconds": 300 64 | }, 65 | { 66 | "effect": "NoExecute", 67 | "key": "node.kubernetes.io/unreachable", 68 | "operator": "Exists", 69 | "tolerationSeconds": 300 70 | } 71 | ], 72 | "volumes": [ 73 | { 74 | "hostPath": { 75 | "path": "/data", 76 | "type": "" 77 | }, 78 | "name": "default-token" 79 | }, 80 | { 81 | "name": "default-token-q999w", 82 | "secret": { 83 | "secretName": "default-token-q999w" 84 | } 85 | } 86 | ] 87 | }, 88 | "status": { 89 | "phase": "Pending", 90 | "qosClass": "BestEffort" 91 | } 92 | }, 93 | "oldObject": null, 94 | "operation": "CREATE", 95 | "resource": { 96 | "group": "", 97 | "resource": "pods", 98 | "version": "v1" 99 | }, 100 | "uid": "413ea31f-5804-11ea-b876-005056a7db08", 101 | "userInfo": { 102 | "groups": [ 103 | "group1" 104 | ], 105 | "username": "user1" 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /testing/pods/test-pod02.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: toolbox 6 | name: test-pod02 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "360000" 12 | image: jmsearcy/twrtools 13 | imagePullPolicy: Always 14 | name: toolbox 15 | volumes: 16 | - name: default-token 17 | hostPath: 18 | path: /data 19 | -------------------------------------------------------------------------------- /testing/pods/test-pod03.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Pod", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Pod", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\"},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\"}],\"volumes\":[{\"emptyDir\":{\"sizeLimit\":\"200M\"},\"name\":\"default-token\"}]}}\n" 18 | }, 19 | "creationTimestamp": "2020-02-27T21:31:44Z", 20 | "labels": { 21 | "run": "toolbox" 22 | }, 23 | "name": "test-pod03", 24 | "namespace": "test1", 25 | "uid": "8ceb0b14-59a8-11ea-b876-005056a7db08" 26 | }, 27 | "spec": { 28 | "containers": [ 29 | { 30 | "command": [ 31 | "sleep", 32 | "360000" 33 | ], 34 | "image": "jmsearcy/twrtools", 35 | "imagePullPolicy": "Always", 36 | "name": "toolbox", 37 | "resources": {}, 38 | "terminationMessagePath": "/dev/termination-log", 39 | "terminationMessagePolicy": "File", 40 | "volumeMounts": [ 41 | { 42 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 43 | "name": "default-token-q999w", 44 | "readOnly": true 45 | } 46 | ] 47 | } 48 | ], 49 | "dnsPolicy": "ClusterFirst", 50 | "enableServiceLinks": true, 51 | "priority": 0, 52 | "restartPolicy": "Always", 53 | "schedulerName": "default-scheduler", 54 | "securityContext": {}, 55 | "serviceAccount": "default", 56 | "serviceAccountName": "default", 57 | "terminationGracePeriodSeconds": 30, 58 | "tolerations": [ 59 | { 60 | "effect": "NoExecute", 61 | "key": "node.kubernetes.io/not-ready", 62 | "operator": "Exists", 63 | "tolerationSeconds": 300 64 | }, 65 | { 66 | "effect": "NoExecute", 67 | "key": "node.kubernetes.io/unreachable", 68 | "operator": "Exists", 69 | "tolerationSeconds": 300 70 | } 71 | ], 72 | "volumes": [ 73 | { 74 | "emptyDir": { 75 | "sizeLimit": "200M" 76 | }, 77 | "name": "default-token" 78 | }, 79 | { 80 | "name": "default-token-q999w", 81 | "secret": { 82 | "secretName": "default-token-q999w" 83 | } 84 | } 85 | ] 86 | }, 87 | "status": { 88 | "phase": "Pending", 89 | "qosClass": "BestEffort" 90 | } 91 | }, 92 | "oldObject": null, 93 | "operation": "CREATE", 94 | "resource": { 95 | "group": "", 96 | "resource": "pods", 97 | "version": "v1" 98 | }, 99 | "uid": "8ceb0f85-59a8-11ea-b876-005056a7db08", 100 | "userInfo": { 101 | "groups": [ 102 | "group1" 103 | ], 104 | "username": "user1" 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /testing/pods/test-pod03.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: toolbox 6 | name: test-pod03 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "360000" 12 | image: jmsearcy/twrtools 13 | imagePullPolicy: Always 14 | name: toolbox 15 | volumes: 16 | - name: default-token 17 | emptyDir: 18 | sizeLimit: 200M 19 | -------------------------------------------------------------------------------- /testing/pods/test-pod04.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Pod", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Pod", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"labels\":{\"run\":\"toolbox\"},\"name\":\"toolbox\",\"namespace\":\"test1\",\"ownerReferences\":[{\"apiVersion\":\"v1\",\"kind\":\"Replica\",\"name\":\"my-repset\",\"uid\":\"uidexa1\"}]},\"spec\":{\"containers\":[{\"command\":[\"sleep\",\"360000\"],\"image\":\"jmsearcy/twrtools\",\"imagePullPolicy\":\"Always\",\"name\":\"toolbox\"}]}}\n" 18 | }, 19 | "creationTimestamp": "2020-03-04T21:38:41Z", 20 | "labels": { 21 | "run": "toolbox" 22 | }, 23 | "name": "test-pod04", 24 | "namespace": "test1", 25 | "ownerReferences": [ 26 | { 27 | "apiVersion": "v1", 28 | "kind": "Replica", 29 | "name": "my-repset", 30 | "uid": "uidexa1" 31 | } 32 | ], 33 | "uid": "84007904-5e60-11ea-a33d-005056a72b7b" 34 | }, 35 | "spec": { 36 | "containers": [ 37 | { 38 | "command": [ 39 | "sleep", 40 | "360000" 41 | ], 42 | "image": "jmsearcy/twrtools", 43 | "imagePullPolicy": "Always", 44 | "name": "toolbox", 45 | "resources": {}, 46 | "terminationMessagePath": "/dev/termination-log", 47 | "terminationMessagePolicy": "File", 48 | "volumeMounts": [ 49 | { 50 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", 51 | "name": "default-token-q999w", 52 | "readOnly": true 53 | } 54 | ] 55 | } 56 | ], 57 | "dnsPolicy": "ClusterFirst", 58 | "enableServiceLinks": true, 59 | "priority": 0, 60 | "restartPolicy": "Always", 61 | "schedulerName": "default-scheduler", 62 | "securityContext": {}, 63 | "serviceAccount": "default", 64 | "serviceAccountName": "default", 65 | "terminationGracePeriodSeconds": 30, 66 | "tolerations": [ 67 | { 68 | "effect": "NoExecute", 69 | "key": "node.kubernetes.io/not-ready", 70 | "operator": "Exists", 71 | "tolerationSeconds": 300 72 | }, 73 | { 74 | "effect": "NoExecute", 75 | "key": "node.kubernetes.io/unreachable", 76 | "operator": "Exists", 77 | "tolerationSeconds": 300 78 | } 79 | ], 80 | "volumes": [ 81 | { 82 | "name": "default-token-q999w", 83 | "secret": { 84 | "secretName": "default-token-q999w" 85 | } 86 | } 87 | ] 88 | }, 89 | "status": { 90 | "phase": "Pending", 91 | "qosClass": "BestEffort" 92 | } 93 | }, 94 | "oldObject": null, 95 | "operation": "CREATE", 96 | "resource": { 97 | "group": "", 98 | "resource": "pods", 99 | "version": "v1" 100 | }, 101 | "uid": "84007d21-5e60-11ea-a33d-005056a72b7b", 102 | "userInfo": { 103 | "groups": [ 104 | "group1" 105 | ], 106 | "username": "user1" 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /testing/pods/test-pod04.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: toolbox 6 | name: test-pod04 7 | ownerReferences: 8 | - name: my-repset 9 | kind: Replica 10 | uid: uidexa1 11 | apiVersion: v1 12 | spec: 13 | containers: 14 | - command: 15 | - sleep 16 | - "360000" 17 | image: jmsearcy/twrtools 18 | imagePullPolicy: Always 19 | name: toolbox 20 | -------------------------------------------------------------------------------- /testing/pods/test-pod05.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: toolbox 6 | name: test-pod05 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "360000" 12 | image: jmsearcy/twrtools 13 | imagePullPolicy: Always 14 | name: toolbox 15 | livenessProbe: 16 | httpGet: 17 | path: /healthz 18 | port: 8080 19 | httpHeaders: 20 | - name: X-Custom-Header 21 | value: Awesome 22 | initialDelaySeconds: 3 23 | periodSeconds: 3 24 | readinessProbe: 25 | httpGet: 26 | path: /healthz 27 | port: 8080 28 | httpHeaders: 29 | - name: X-Custom-Header 30 | value: Awesome 31 | initialDelaySeconds: 3 32 | periodSeconds: 3 33 | resources: 34 | limits: 35 | cpu: "50m" 36 | memory: "128Mi" 37 | requests: 38 | cpu: "50m" 39 | memory: "128Mi" 40 | volumes: 41 | - name: default-token 42 | emptyDir: 43 | sizeLimit: "50M" 44 | -------------------------------------------------------------------------------- /testing/pods/test-pod06-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": true 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /testing/pods/test-pod06.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | run: toolbox 6 | name: test-pod06 7 | ownerReferences: 8 | - name: my-repset 9 | kind: Replica 10 | uid: uidexa1 11 | apiVersion: v1 12 | spec: 13 | containers: 14 | - command: 15 | - sleep 16 | - "360000" 17 | image: jmsearcy/twrtools 18 | imagePullPolicy: Always 19 | name: toolbox 20 | livenessProbe: 21 | httpGet: 22 | path: /healthz 23 | port: 8080 24 | httpHeaders: 25 | - name: X-Custom-Header 26 | value: Awesome 27 | initialDelaySeconds: 3 28 | periodSeconds: 3 29 | readinessProbe: 30 | httpGet: 31 | path: /healthz 32 | port: 8080 33 | httpHeaders: 34 | - name: X-Custom-Header 35 | value: Awesome 36 | initialDelaySeconds: 3 37 | periodSeconds: 3 38 | resources: 39 | limits: 40 | cpu: "50m" 41 | memory: "128Mi" 42 | requests: 43 | cpu: "50m" 44 | memory: "128Mi" 45 | volumes: 46 | - name: default-token 47 | emptyDir: 48 | sizeLimit: "50M" 49 | -------------------------------------------------------------------------------- /testing/services/test-svc01.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Service", 9 | "version": "v1" 10 | }, 11 | "namespace": "default", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Service", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"labels\":{\"app\":\"test-svc\"},\"name\":\"test-svc\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"name\":\"http\",\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"app\":\"test-svc\"},\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n" 18 | }, 19 | "creationTimestamp": "2020-02-04T01:16:07Z", 20 | "labels": { 21 | "app": "test-svc" 22 | }, 23 | "name": "test-svc01", 24 | "namespace": "default", 25 | "uid": "ebaa71f7-46eb-11ea-85fd-005056a7b324" 26 | }, 27 | "spec": { 28 | "clusterIP": "198.19.241.208", 29 | "ports": [ 30 | { 31 | "name": "http", 32 | "port": 443, 33 | "protocol": "TCP", 34 | "targetPort": 443 35 | } 36 | ], 37 | "selector": { 38 | "app": "test-svc" 39 | }, 40 | "sessionAffinity": "None", 41 | "type": "ClusterIP" 42 | }, 43 | "status": { 44 | "loadBalancer": {} 45 | } 46 | }, 47 | "oldObject": null, 48 | "operation": "CREATE", 49 | "resource": { 50 | "group": "", 51 | "resource": "services", 52 | "version": "v1" 53 | }, 54 | "uid": "ebaa77b8-46eb-11ea-85fd-005056a7b324", 55 | "userInfo": { 56 | "groups": [ 57 | "group1", 58 | "group2" 59 | ], 60 | "username": "user2" 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /testing/services/test-svc01.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: test-svc 6 | name: test-svc01 7 | spec: 8 | ports: 9 | - name: http 10 | port: 443 11 | protocol: TCP 12 | targetPort: 443 13 | selector: 14 | app: test-svc 15 | type: ClusterIP 16 | -------------------------------------------------------------------------------- /testing/services/test-svc02-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": false, 4 | "status": { 5 | "message": "[FAIL] HIGH - nodePort out of defined range for Service \"test-svc01\" (MT2002)" 6 | }, 7 | "uid": "386b7c9d-6ae7-11ea-b876-005056a7db08" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /testing/services/test-svc02.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Service", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Service", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-svc\"},\"name\":\"test-svc\",\"namespace\":\"test1\"},\"spec\":{\"ports\":[{\"name\":\"test\",\"nodePort\":30400,\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"app\":\"test-svc\"},\"type\":\"NodePort\"}}\n" 18 | }, 19 | "creationTimestamp": "2020-03-20T17:51:09Z", 20 | "labels": { 21 | "app": "test-svc" 22 | }, 23 | "name": "test-svc02", 24 | "namespace": "test1", 25 | "uid": "618ae7bb-6ad3-11ea-b876-005056a7db08" 26 | }, 27 | "spec": { 28 | "clusterIP": "198.19.132.197", 29 | "externalTrafficPolicy": "Cluster", 30 | "ports": [ 31 | { 32 | "name": "test", 33 | "nodePort": 30400, 34 | "port": 443, 35 | "protocol": "TCP", 36 | "targetPort": 443 37 | } 38 | ], 39 | "selector": { 40 | "app": "test-svc" 41 | }, 42 | "sessionAffinity": "None", 43 | "type": "NodePort" 44 | }, 45 | "status": { 46 | "loadBalancer": {} 47 | } 48 | }, 49 | "oldObject": null, 50 | "operation": "CREATE", 51 | "resource": { 52 | "group": "", 53 | "resource": "services", 54 | "version": "v1" 55 | }, 56 | "uid": "618aebe1-6ad3-11ea-b876-005056a7db08", 57 | "userInfo": { 58 | "groups": [ 59 | "group1" 60 | ], 61 | "username": "user1" 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /testing/services/test-svc02.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: test-svc 6 | name: test-svc02 7 | spec: 8 | ports: 9 | - name: test 10 | port: 443 11 | nodePort: 30400 12 | protocol: TCP 13 | targetPort: 443 14 | selector: 15 | app: test-svc 16 | type: NodePort 17 | -------------------------------------------------------------------------------- /testing/services/test-svc03-response.json: -------------------------------------------------------------------------------- 1 | { 2 | "response": { 3 | "allowed": true 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /testing/services/test-svc03.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Service", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Service", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"test-svc\"},\"name\":\"test-svc\",\"namespace\":\"test1\"},\"spec\":{\"ports\":[{\"name\":\"test\",\"nodePort\":30101,\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"app\":\"test-svc\"},\"type\":\"NodePort\"}}\n" 18 | }, 19 | "creationTimestamp": "2020-03-20T20:15:08Z", 20 | "labels": { 21 | "app": "test-svc" 22 | }, 23 | "name": "test-svc03", 24 | "namespace": "test1", 25 | "uid": "7f07ccc4-6ae7-11ea-b876-005056a7db08" 26 | }, 27 | "spec": { 28 | "clusterIP": "198.19.124.248", 29 | "externalTrafficPolicy": "Cluster", 30 | "ports": [ 31 | { 32 | "name": "test", 33 | "nodePort": 30101, 34 | "port": 443, 35 | "protocol": "TCP", 36 | "targetPort": 443 37 | } 38 | ], 39 | "selector": { 40 | "app": "test-svc" 41 | }, 42 | "sessionAffinity": "None", 43 | "type": "NodePort" 44 | }, 45 | "status": { 46 | "loadBalancer": {} 47 | } 48 | }, 49 | "oldObject": null, 50 | "operation": "CREATE", 51 | "resource": { 52 | "group": "", 53 | "resource": "services", 54 | "version": "v1" 55 | }, 56 | "uid": "7f07d0e9-6ae7-11ea-b876-005056a7db08", 57 | "userInfo": { 58 | "groups": [ 59 | "group1" 60 | ], 61 | "username": "user1" 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /testing/services/test-svc03.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: test-svc 6 | name: test-svc03 7 | spec: 8 | ports: 9 | - name: test 10 | port: 443 11 | nodePort: 30101 12 | protocol: TCP 13 | targetPort: 443 14 | selector: 15 | app: test-svc 16 | type: NodePort 17 | -------------------------------------------------------------------------------- /testing/services/test-svc04.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admission.k8s.io/v1beta1", 3 | "kind": "AdmissionReview", 4 | "request": { 5 | "dryRun": false, 6 | "kind": { 7 | "group": "", 8 | "kind": "Service", 9 | "version": "v1" 10 | }, 11 | "namespace": "test1", 12 | "object": { 13 | "apiVersion": "v1", 14 | "kind": "Service", 15 | "metadata": { 16 | "annotations": { 17 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"labels\":{\"app\":\"my-svc\"},\"name\":\"my-svc\",\"namespace\":\"test1\"},\"spec\":{\"ports\":[{\"name\":\"5000-8080\",\"port\":5000,\"protocol\":\"TCP\",\"targetPort\":8080}],\"selector\":{\"app\":\"my-svc\"},\"type\":\"ClusterIP\"},\"status\":{\"loadBalancer\":{}}}\n" 18 | }, 19 | "creationTimestamp": "2020-03-25T17:38:31Z", 20 | "labels": { 21 | "app": "my-svc" 22 | }, 23 | "name": "test-svc04", 24 | "namespace": "test1", 25 | "uid": "7197a413-6ebf-11ea-b876-005056a7db08" 26 | }, 27 | "spec": { 28 | "clusterIP": "198.19.221.114", 29 | "ports": [ 30 | { 31 | "name": "5000-8080", 32 | "port": 5000, 33 | "protocol": "TCP", 34 | "targetPort": 8080 35 | } 36 | ], 37 | "selector": { 38 | "app": "my-svc" 39 | }, 40 | "sessionAffinity": "None", 41 | "type": "ClusterIP" 42 | }, 43 | "status": { 44 | "loadBalancer": {} 45 | } 46 | }, 47 | "oldObject": null, 48 | "operation": "CREATE", 49 | "resource": { 50 | "group": "", 51 | "resource": "services", 52 | "version": "v1" 53 | }, 54 | "uid": "7197a7d7-6ebf-11ea-b876-005056a7db08", 55 | "userInfo": { 56 | "groups": [ 57 | "group1" 58 | ], 59 | "username": "user1" 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /testing/services/test-svc04.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: my-svc 7 | name: test-svc04 8 | spec: 9 | ports: 10 | - name: 5000-8080 11 | port: 5000 12 | protocol: TCP 13 | targetPort: 8080 14 | selector: 15 | app: my-svc 16 | type: ClusterIP 17 | status: 18 | loadBalancer: {} 19 | -------------------------------------------------------------------------------- /testing/slack/slack-alert-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "attachments": [ 3 | { 4 | "color": "danger", 5 | "fallback": "MagTape detected failures for Deployment \"test-deploy11\" in namespace \"test1\" on cluster \"cluster1\"", 6 | "fields": [ 7 | { 8 | "short": "true", 9 | "title": "Cluster", 10 | "value": "cluster1" 11 | }, 12 | { 13 | "short": "true", 14 | "title": "Namespace", 15 | "value": "test1" 16 | }, 17 | { 18 | "short": "true", 19 | "title": "MagTape Deny Level", 20 | "value": "HIGH" 21 | }, 22 | { 23 | "short": "false", 24 | "title": "Workload", 25 | "value": "deployment/test-deploy11" 26 | }, 27 | { 28 | "short": "false", 29 | "title": "User", 30 | "value": "user1" 31 | }, 32 | { 33 | "short": "true", 34 | "title": "Customer Alert", 35 | "value": "False" 36 | } 37 | ], 38 | "pretext": "MagTape | Policy Denial Detected", 39 | "text": "[FAIL] LOW - Liveness Probe missing for container \"test-deploy11\" (MT1001)\n [FAIL] LOW - Liveness Probe missing for container \"test-deploy11-sidecar\" (MT1001)\n [FAIL] LOW - Readiness Probe missing for container \"test-deploy11\" (MT1002)\n [FAIL] LOW - Readiness Probe missing for container \"test-deploy11-sidecar\" (MT1002)\n [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy11\" (MT1003)\n [FAIL] LOW - Resource limits missing (CPU/MEM) for container \"test-deploy11-sidecar\" (MT1003)\n [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy11\" (MT1004)\n [FAIL] LOW - Resource requests missing (CPU/MEM) for container \"test-deploy11-sidecar\" (MT1004)" 40 | } 41 | ], 42 | "icon_emoji": ":magtape:", 43 | "username": "mtbot" 44 | } -------------------------------------------------------------------------------- /testing/statefulsets/test-sts1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: web 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx # has to match .spec.template.metadata.labels 9 | serviceName: "nginx" 10 | replicas: 3 # by default is 1 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx # has to match .spec.selector.matchLabels 15 | spec: 16 | terminationGracePeriodSeconds: 10 17 | containers: 18 | - name: nginx 19 | image: k8s.gcr.io/nginx-slim:0.8 20 | ports: 21 | - containerPort: 80 22 | name: web 23 | volumeMounts: 24 | - name: www 25 | mountPath: /usr/share/nginx/html 26 | securityContext: 27 | privileged: true 28 | volumeClaimTemplates: 29 | - metadata: 30 | name: www 31 | spec: 32 | accessModes: [ "ReadWriteOnce" ] 33 | storageClassName: "my-storage-class" 34 | resources: 35 | requests: 36 | storage: 1Gi 37 | --------------------------------------------------------------------------------