├── .flake8
├── .github
└── workflows
│ └── lint.yml
├── .gitignore
├── .isort.cfg
├── LICENSE.md
├── README.md
├── docker-compose.yml
├── docs
├── .gitkeep
├── ACCESS_POD.md
├── ACCESS_SECRET.md
├── AZURE_POD_IDENTITY_EXCEPTION.md
├── BOUND_TO.md
├── CAN_ACCESS_DANGEROUS_HOST_PATH.md
├── CAN_ACCESS_HOST_FD.md
├── CAN_CGROUP_BREAKOUT.md
├── CAN_EXEC_THROUGH_KUBELET.md
├── CAN_IMPERSONATE.md
├── CAN_LOAD_KERNEL_MODULES.md
├── CAN_NSENTER_HOST.md
├── CREATE_POD_WITH_SA.md
├── CREATE_PRIVILEGED_WORKLOAD.md
├── CREATE_SECRET_WITH_TOKEN.md
├── DEBUG_POD.md
├── EXEC_INTO.md
├── GENERATE_CLIENT_CERTIFICATE.md
├── GENERATE_TOKEN.md
├── GET_AUTHENTICATION_TOKEN_FOR.md
├── GRANTS_PERMISSION.md
├── IS_CLUSTER_ADMIN.md
├── IS_PRIVILEGED.md
├── MOUNTS_SECRET.md
├── PATCH_NAMESPACE_TO_BYPASS_PSA.md
├── RBAC_ESCALATE_TO.md
├── REPLACE_IMAGE.md
├── UPDATE_AWS_AUTH.md
├── UPDATE_WORKLOAD_WITH_SA.md
├── USES_ACCOUNT.md
└── logo.png
├── icekube
├── __init__.py
├── attack_paths.py
├── cli.py
├── config.py
├── icekube.py
├── kube.py
├── log_config.py
├── models
│ ├── __init__.py
│ ├── _helpers.py
│ ├── api_resource.py
│ ├── base.py
│ ├── cluster.py
│ ├── clusterrole.py
│ ├── clusterrolebinding.py
│ ├── group.py
│ ├── namespace.py
│ ├── node.py
│ ├── pod.py
│ ├── policyrule.py
│ ├── role.py
│ ├── rolebinding.py
│ ├── secret.py
│ ├── securitycontextconstraints.py
│ ├── serviceaccount.py
│ ├── signer.py
│ └── user.py
├── neo4j.py
├── relationships.py
└── utils.py
├── mypy.ini
├── poetry.lock
└── pyproject.toml
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | exclude =
3 | .git,
4 | .pybuild,
5 | .eggs,
6 | __pycache__,
7 | .venv
8 |
9 | max_line_length = 88
10 |
11 | ignore =
12 | #Don't require docstrings on magic methods.
13 | D105,
14 |
15 | #Don't require docstrings for __init__ methods.
16 | D107
17 |
18 | #Don't require imperative mood docstrings.
19 | D401,
20 |
21 | # Line break occurred before a binary operator
22 | W503,
23 |
24 | # Whitespace before ':'
25 | E203,
26 |
27 | # Don't error on TODOs
28 | T000,
29 |
30 | ################################################################################################
31 | ################################################################################################
32 | ################################################################################################
33 | # TODO - Remove these soon!!!
34 |
35 | # Missing Docstring in public module
36 | D100,
37 |
38 | # Missing Docstring in public module
39 | D101,
40 |
41 | # Missing Docstring in public module
42 | D102,
43 |
44 | # Missing docstring in public function
45 | D103,
46 |
47 | # Missing docstring in public package
48 | D104,
49 |
50 | # Missing docstring in public nested class
51 | D106,
52 |
53 | # isort found an import in the wrong position
54 | I001,
55 |
56 | # isort expected 1 blank line in imports, found 0
57 | I003,
58 |
59 | # isort found an unexpected blank line in imports
60 | I004,
61 |
62 | ################################################################################################
63 | ################################################################################################
64 | ################################################################################################
65 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | pull_request:
6 | branches:
7 | - main
8 |
9 | jobs:
10 | lint:
11 | runs-on: ubuntu-latest
12 |
13 | permissions:
14 | id-token: write
15 | contents: read
16 |
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | python-version: ["3.9", "3.10", "3.11"]
21 |
22 | steps:
23 | - uses: actions/checkout@v3
24 |
25 | - name: Set up Python ${{ matrix.python-version }}
26 | uses: actions/setup-python@v4
27 | with:
28 | python-version: ${{ matrix.python-version }}
29 |
30 | - name: Install Poetry Action
31 | uses: snok/install-poetry@v1
32 |
33 | - name: Install Dependencies
34 | run: poetry install
35 | if: steps.cache.outputs.cache-hit != 'true'
36 |
37 | - name: Cache Poetry virtualenv
38 | uses: actions/cache@v3
39 | id: cache
40 | with:
41 | path: ~/.virtualenvs
42 | key: poetry-${{ hashFiles('**/poetry.lock') }}
43 | restore-keys: |
44 | poetry-${{ hashFiles('**/poetry.lock') }}
45 |
46 | - name: Run black
47 | run: poetry run black --check .
48 |
49 | - name: Run isort
50 | run: poetry run isort --check --verbose --recursive icekube
51 |
52 | - name: Run flake8
53 | run: poetry run flake8 icekube
54 |
55 | - name: Run mypy
56 | run: poetry run mypy icekube
57 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | data/
2 |
3 | # Created by https://www.toptal.com/developers/gitignore/api/vim,python
4 | # Edit at https://www.toptal.com/developers/gitignore?templates=vim,python
5 |
6 | ### Python ###
7 | # Byte-compiled / optimized / DLL files
8 | __pycache__/
9 | *.py[cod]
10 | *$py.class
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | eggs/
22 | .eggs/
23 | lib/
24 | lib64/
25 | parts/
26 | sdist/
27 | var/
28 | wheels/
29 | pip-wheel-metadata/
30 | share/python-wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 | MANIFEST
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .nox/
50 | .coverage
51 | .coverage.*
52 | .cache
53 | nosetests.xml
54 | coverage.xml
55 | *.cover
56 | *.py,cover
57 | .hypothesis/
58 | .pytest_cache/
59 |
60 | # Translations
61 | *.mo
62 | *.pot
63 |
64 | # Django stuff:
65 | *.log
66 | local_settings.py
67 | db.sqlite3
68 | db.sqlite3-journal
69 |
70 | # Flask stuff:
71 | instance/
72 | .webassets-cache
73 |
74 | # Scrapy stuff:
75 | .scrapy
76 |
77 | # Sphinx documentation
78 | docs/_build/
79 |
80 | # PyBuilder
81 | target/
82 |
83 | # Jupyter Notebook
84 | .ipynb_checkpoints
85 |
86 | # IPython
87 | profile_default/
88 | ipython_config.py
89 |
90 | # pyenv
91 | .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
101 | __pypackages__/
102 |
103 | # Celery stuff
104 | celerybeat-schedule
105 | celerybeat.pid
106 |
107 | # SageMath parsed files
108 | *.sage.py
109 |
110 | # Environments
111 | .env
112 | .venv
113 | env/
114 | venv/
115 | ENV/
116 | env.bak/
117 | venv.bak/
118 |
119 | # Spyder project settings
120 | .spyderproject
121 | .spyproject
122 |
123 | # Rope project settings
124 | .ropeproject
125 |
126 | # mkdocs documentation
127 | /site
128 |
129 | # mypy
130 | .mypy_cache/
131 | .dmypy.json
132 | dmypy.json
133 |
134 | # Pyre type checker
135 | .pyre/
136 |
137 | # pytype static type analyzer
138 | .pytype/
139 |
140 | ### Vim ###
141 | # Swap
142 | [._]*.s[a-v][a-z]
143 | !*.svg # comment out if you don't need vector files
144 | [._]*.sw[a-p]
145 | [._]s[a-rt-v][a-z]
146 | [._]ss[a-gi-z]
147 | [._]sw[a-p]
148 |
149 | # Session
150 | Session.vim
151 | Sessionx.vim
152 |
153 | # Temporary
154 | .netrwhist
155 | *~
156 | # Auto-generated tag files
157 | tags
158 | # Persistent undo
159 | [._]*.un~
160 |
161 | # End of https://www.toptal.com/developers/gitignore/api/vim,python
162 |
--------------------------------------------------------------------------------
/.isort.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | atomic = true
3 | balanced_wrapping = true
4 | # vertical hanging indent style wrapping
5 | multi_line_output = 3
6 | include_trailing_comma = true
7 |
8 | known_first_party = kapi
9 | default_section = THIRDPARTY
10 | sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
11 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] WithSecure Oyj.
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # IceKube
2 |
3 |
4 |
5 |
6 |
7 | IceKube is a tool to help find attack paths within a Kubernetes cluster from a low privileged point, to a preferred location, typically `cluster-admin`
8 |
9 | ## Setup
10 |
11 | * `docker-compose up -d` - Spins up neo4j, accessible at `http://localhost:7474/`
12 | * `poetry install --no-dev` (creates venv) *OR* `pip install --user .` (installs the CLI globally)
13 | * Make sure your `kubectl` current context is set to the target cluster, and has `cluster-admin` privileges
14 |
15 |
16 | * IceKube is also available directly through pip with - `pip install icekube`
17 | * Neo4j can be spun up directly with docker with - `docker run -d -p 7474:7474 -p 7687:7687 -e NEO4J_AUTH=none -v $PWD/data:/data neo4j:4.4`
18 |
19 | ## Permissions Required
20 |
21 | This requires elevated privileges within the target cluster to enumerate resources. This typically requires read-only access on all resources within the cluster including secrets. IceKube does not persist any secret data it retrieves from secrets if that is a concern.
22 |
23 | Resource types can also be filtered from IceKube, instructions can be found below in the `Filtering Resources` section.
24 |
25 | ## Usage
26 |
27 | * `icekube enumerate` - Will enumerate all resources, and saves them into `neo4j` with generic relationships generated (note: not attack path relationships)
28 | * `icekube attack-path` - Generates attack path relationships within `neo4j`, these are identified with relationships having the property `attack_path` which is set to `1`
29 | * `icekube run` - Does both `enumerate` and `attack-path`, this will be the main option for quickly running IceKube against a cluster
30 | * `icekube purge` - Removes everything from the `neo4j` database
31 | * Run cypher queries within `neo4j` to discover attack paths and roam around the data, attack relationships will have the property `attack_path: 1`
32 |
33 | **NOTE**: In the `neo4j` browser, make sure to disable `Connect result nodes` in the Settings tab on the bottom left. This will stop it rendering every possible relationship automatically between nodes, leaving just the path queried for
34 |
35 | The contents of the cluster can also be downloade with `icekube download` or through [freezer](https://github.com/WithSecureLabs/freezer) (a rust implementation of `icekube download`) and then loaded in through `icekube load`.
36 |
37 | #### Filtering Resources
38 |
39 | It is possible to filter out specific resource types from enumeration. This can be done with the `--ignore` parameter to `enumerate` and `run` which takes the resource types comma-delimtied. For example, if you wish to exclude events and componentstatuses, you could run `icekube run --ignore events,componentstatuses` (NOTE: this is the default)
40 |
41 | Sensitive data from secrets are not stored in IceKube, data retrieved from the Secret resource type have their data fields deleted on ingestion. It is recommended to include secrets as part of the query if possible as IceKube can still analyse the secret type and relevant annotations to aid with attack path generation.
42 |
43 | ## Not sure where to start?
44 |
45 | Here is a quick introductory way on running IceKube for those new to the project:
46 |
47 | * `poetry install` - Installs dependancies using `poetry`
48 | * `poetry shell` - Create a shell within the python environment
49 | * `docker-compose up -d` - Creates the neo4j docker container with easy to use network and environment settings (give this a minute for `neo4j` to start up)
50 | * `icekube run` - Analyse a cluster using IceKube - this assumes your `kubectl` context is set appropriately to target a cluster
51 | * Open the neo4j browser at `http://localhost:7474/`
52 | * On the login form, simply click `Connect` - wait for the connection to be established
53 | * Click the cog wheel on the bottom left to open settings
54 | * Near the bottom of the new side-pane, de-select `Connect result nodes`
55 | * Enter the following query into the query bar at the top
56 | * `MATCH p = SHORTEST 1 (src)-[r {attack_path: 1}]->+(crb:ClusterRoleBinding)-[:GRANTS_PERMISSION {attack_path: 1}]->(cr:ClusterRole {name: "cluster-admin"}) RETURN p`
57 | * This will find routes to cluster administrator from service accounts, pods, users, or groups
58 | * Of the new window made with the query, click the Fullscreen button
59 | * Roam around the graph generated, clicking on nodes or relationships to get more details on the right where wanted
60 |
61 | ## Example Cypher Queries
62 |
63 | The following query will find all resources that have `cluster-admin` permissions. This is enforced through a Cluster Role Binding to ensure the permissions are cluster-wide
64 |
65 | ```cypher
66 | MATCH p = SHORTEST 1 (src)-[r {attack_path: 1}]->+(crb:ClusterRoleBinding)-[:GRANTS_PERMISSION {attack_path: 1}]->(cr:ClusterRole {name: "cluster-admin"}) RETURN p
67 | ```
68 |
69 | This performs the same, but restricts the query to start at nodes of type Pod / ServiceAccount / User / Group
70 |
71 | ```cypher
72 | MATCH p = SHORTEST 1 (src)-[r {attack_path: 1}]->+(crb:ClusterRoleBinding)-[:GRANTS_PERMISSION {attack_path: 1}]->(cr:ClusterRole {name: "cluster-admin"}) WHERE (src:ServiceAccount OR src:Pod or src:User or src:Group) RETURN p
73 | ```
74 |
75 | Using the old `shortestPath` syntax:
76 |
77 | ```cypher
78 | MATCH (crb:ClusterRoleBinding)-[r:GRANTS_PERMISSION {attack_path: 1}]->(cr:ClusterRole {name: 'cluster-admin'})
79 | WITH crb, cr, r
80 | MATCH (src) WHERE src:ServiceAccount OR src:Pod OR src:User or src:Group
81 | WITH src, crb, cr, r
82 | UNWIND src as s
83 | MATCH p=shortestPath((s)-[*]->(crb))
84 | WHERE all(r in relationships(p) where r.attack_path is not null)
85 | RETURN p, cr, r
86 | ```
87 |
88 | ## Acknowledgements
89 |
90 | - [BloodHound](https://github.com/BloodHoundAD/BloodHound) - The original project showing the power of graph databases for security
91 | - [KubeHound](https://github.com/DataDog/KubeHound) - An excellent and similar tool by DataDog, clearly we had similar ideas!
92 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | services:
4 | neo4j:
5 | image: neo4j:5
6 | environment:
7 | NEO4J_AUTH: none
8 | ports:
9 | - 7474:7474
10 | - 7687:7687
11 | volumes:
12 | - ./data:/data
13 |
--------------------------------------------------------------------------------
/docs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ReversecLabs/IceKube/bfd74d83838ebfe7c728839c8dd03f503c9a4624/docs/.gitkeep
--------------------------------------------------------------------------------
/docs/ACCESS_POD.md:
--------------------------------------------------------------------------------
1 | # ACCESS_POD
2 |
3 | ### Overview
4 |
5 | This rule establishes an attack path between a node and pods hosted upon it. This allows IceKube to consider accessible pods should an attacker break out onto a node.
6 |
7 | ### Description
8 |
9 | An attacker with access to a node can access all pods running on the node.
10 |
11 | ### Defense
12 |
13 | N/A
14 |
15 | ### Cypher Deep-Dive
16 |
17 | ```cypher
18 | MATCH (src:Node)-[:HOSTS_POD]->(dest:Pod)
19 | ```
20 |
21 | The above query finds nodes (`src`) hosting pods (`dest`) through the `HOSTS_POD` relationship.
22 |
--------------------------------------------------------------------------------
/docs/ACCESS_SECRET.md:
--------------------------------------------------------------------------------
1 | # ACCESS_SECRET
2 |
3 | ### Overview
4 |
5 | This attack path locates subjects which can access a secret. An attacker could use this to gain access to sensitive information, such as credentials.
6 |
7 | ### Description
8 |
9 | Kubernetes secrets typically contain sensitive information, and are a prime target for attackers. This attack path identifies subjects which have the ability to read a secret.
10 |
11 | ### Defense
12 |
13 | RBAC permissions regarding reading secrets should be reviewed. Access should be restricted to required entities.
14 |
15 | ### Cypher Deep-Dive
16 |
17 | ```cypher
18 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(dest:Secret)
19 | ```
20 |
21 | The above query finds subjects (`src`) which have read permissions on a secret (`dest`).
22 |
--------------------------------------------------------------------------------
/docs/AZURE_POD_IDENTITY_EXCEPTION.md:
--------------------------------------------------------------------------------
1 | # AZURE_POD_IDENTITY_EXCEPTION
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can access the cluster's Azure managed identity, allowing them to retrieve cluster administrator credentials in cases where Kubernetes local accounts are enabled.
6 |
7 | ### Description
8 |
9 | `AzurePodIdentityException` creates exceptions for pods to remove IPTables filtering for their access to Instance Metadata Service (IMDS). If a pod is exempt from this filtering, they can communicate with IMDS to retrieve the clusters Node Managed Identity (NMI) and authenticate as it. Once authenticated, this can be used to gain cluster administrator access in clusters where Kubernetes local accounts are enabled.
10 |
11 | An attacker has multiple avenues that could leverage `AzurePodIdentityException`. The first would be reviewing the pod labels from an existing `AzurePodIdentityException`, and creating or modifying workloads to meet those criteria within the same namespace. The resultant pods would have access to IMDS, and could contain malicious code based of the pod configuration that allows an attacker to gain a foothold within to leverage the access.
12 |
13 | Another option would be to create a new `AzurePodIdentityException` within the same namespace of a compromised pod. This exception would need to specify the labels of the compromised workload. This would remove any filtering from the workload, allowing it to once again access IMDS.
14 |
15 | ### Defense
16 |
17 | RBAC permissions regarding `AzurePodIdentityExceptions` should be reviewed. Access should be restricted to required entities.
18 |
19 | ### Cypher Deep-Dive
20 |
21 |
22 | #### Create workload based of existing APIE
23 |
24 | ```cypher
25 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(azexc:AzurePodIdentityException)-[:WITHIN_NAMESPACE]->(ns:Namespace), (dest:ClusterRoleBinding)
26 | WHERE (dest.name = 'aks-cluster-admin-binding' OR dest.name = 'aks-cluster-admin-binding-aad') AND (EXISTS {
27 | MATCH (src)-[:GRANTS_REPLICATIONCONTROLLERS_CREATE|GRANTS_DAEMONSETS_CREATE|GRANTS_DEPLOYMENTS_CREATE|GRANTS_REPLICASETS_CREATE|GRANTS_STATEFULSETS_CREATE|GRANTS_CRONJOBS_CREATE|GRANTS_JOBS_CREATE|GRANTS_POD_CREATE]->(ns)
28 | } OR EXISTS {
29 | MATCH (src)-[:GRANTS_PATCH|GRANTS_UPDATE]->(workload)-[:WITHIN_NAMESPACE]->(ns)
30 | WHERE (workload:ReplicationController OR workload:DaemonSet OR workload:Deployment OR workload:ReplicaSet OR workload:StatefulSet OR workload:CronJob OR workload:Job)
31 | })
32 | ```
33 |
34 | The above query finds subjects (`src`) which can view the `AzurePodIdentityException` configuration. It then checks that same subject can create or update workloads in the same namespace as the `AzurePodIdentityException`. The target is set as the default AKS cluster admin role bindings.
35 |
36 |
37 | #### Create APIE based of existing workload
38 |
39 | ```cypher
40 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(pod:Pod)-[:WITHIN_NAMESPACE]->(ns:Namespace), (src)-[r {
41 | attack_path: 1
42 | }]->(pod), (dest:ClusterRoleBinding)
43 | WHERE (dest.name='aks-cluster-admin-binding' OR dest.name='aks-cluster-admin-binding-aad') AND (EXISTS {
44 | (src)-[:GRANTS_AZUREPODIDENTITYEXCEPTIONS_CREATE]->(ns)
45 | } OR EXISTS {
46 | (src)-[:GRANTS_UPDATE|GRANTS_PATCH]->(:AzurePodIdentityException)-[:WITHIN_NAMESPACE]->(ns)
47 | })
48 | ```
49 |
50 | The above query finds subjects (`src`) which can get pods, and have an attack path to that pod. It then ensures the subject can create or update `AzurePodIdentityException` within the same namespace. The target is set as the default AKS cluster admin cluster role bindings.
51 |
--------------------------------------------------------------------------------
/docs/BOUND_TO.md:
--------------------------------------------------------------------------------
1 | # BOUND_TO
2 |
3 | ### Overview
4 |
5 | This rule establishes an attack path relationship between a role binding and its subjects. This allows IceKube to consider permissions associated with the role binding for required subjects.
6 |
7 | ### Description
8 |
9 | Role bindings bind a number of subjects with a role. The permissions granted to the subjects will be that of the bound role. If both the role binding and role are scoped cluster-wide, the permissions are also granted cluster-wide.
10 |
11 | ### Defense
12 |
13 | Review subjects in role bindings, and ensure subjects are only bound to roles that grant the minimal set of permissions required for use.
14 |
15 | ### Cypher Deep-Dive
16 |
17 | ```cypher
18 | MATCH (src)-[:BOUND_TO]->(dest)
19 | ```
20 |
21 | Finds all resources (`src`) that have a `BOUND_TO` relationship to other resources (`dest`). The `BOUND_TO` relationship is only between role binding and subject, thereby limiting `dest` to `RoleBinding` or `ClusterRoleBinding` and `src` to one of `Group`, `User`, `ServiceAccount`.
22 |
--------------------------------------------------------------------------------
/docs/CAN_ACCESS_DANGEROUS_HOST_PATH.md:
--------------------------------------------------------------------------------
1 | # CAN_ACCESS_DANGEROUS_HOST_PATH
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which have potentially dangerous paths from the underlying node's file system mounted. These could be used to gain a foothold on the underlying node or gain node credentials.
6 |
7 | ### Description
8 |
9 | Pods can mount paths from the underlying host. These are `hostPath` volume types. Access to certain paths on the host could be considered dangerous as it may grant access to sensitive resources on the host. This could include the kubelet credentials, the roots home directory, the container socket, etc.
10 |
11 | An attacker with access to these resources could potentially gain access to the underlying host, or gain access to sensitive credentials.
12 |
13 | ### Defense
14 |
15 | Pod Security Admission (PSA) should be configured to enforce the `restricted` standard. Should this be too restrictive, `baseline` could be used instead.
16 |
17 | PSA can be limited in its flexibility, for example having a policy that slightly deviates from the `restricted` standard. Should further flexibility be required compared to what PSA can provide, custom admission webhooks should be used to enforce pod security.
18 |
19 | Should host path volumes be required, the volumes should be reviewed to ensure they do not expose sensitive files from the host.
20 |
21 | ### Cypher Deep-Dive
22 |
23 | ```cypher
24 | MATCH (src:Pod {dangerous_host_path: true})<-[:HOSTS_POD]-(dest:Node)
25 | ```
26 |
27 | The above query finds pods (`src`) with the `dangerous_host_path` property set to `true`. This property is set by IceKube if a `hostPath` volume matches a number of pre-configured dangerous paths. The node (`dest`) that hosts the pod is then targeted.
28 |
--------------------------------------------------------------------------------
/docs/CAN_ACCESS_HOST_FD.md:
--------------------------------------------------------------------------------
1 | # CAN_ACCESS_HOST_FD
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which have the `DAC_READ_SEARCH` capability which could allow accessing files on the host filesystem. An attacker could use this to break out onto the underlying node.
6 |
7 | ### Description
8 |
9 | The `DAC_READ_SEARCH` capability grants access to `open_by_handle_at` which allows opening file descriptors across mount namespaces. `DAC_READ_SEARCH` by itself simply grants read access to the files opened, however when combined with `DAC_OVERRIDE` (a default capability) can provide write permissions.
10 |
11 | An attacker could use this access to open sensitive files on the underlying host, and either retrieve credentials to gain access to the underlying host or the kubelet credentials. Should `DAC_OVERRIDE` be present, access could be used to write authentication material such as an SSH key to an `authorized_keys` file.
12 |
13 | ### Defense
14 |
15 | Pod Security Admission (PSA) should be configured to enforce the `restricted` standard. Should this be too restrictive, `baseline` could be used instead.
16 |
17 | PSA can be limited in its flexibility, for example having a policy that slightly deviates from the `restricted` standard. Should further flexibility be required compared to what PSA can provide, custom admission webhooks should be used to enforce pod security.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src:Pod)<-[:HOSTS_POD]-(dest:Node)
23 | WHERE "DAC_READ_SEARCH" in src.capabilities
24 | ```
25 |
26 | The above query finds pods (`src`) where `DAC_READ_SEARCH` is in its `capabilities` property. This property is populated by IceKube with the capabilities calculated from the pod spec. The node (`dest`) that hosts the pod is then targeted.
27 |
--------------------------------------------------------------------------------
/docs/CAN_CGROUP_BREAKOUT.md:
--------------------------------------------------------------------------------
1 | # CAN_CGROUP_BREAKOUT
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which have the `SYS_ADMIN` capability, and as such can breakout onto the underlying node.
6 |
7 | ### Description
8 |
9 | The `SYS_ADMIN` capability provides the ability to perform a wide range of administrator operations. One of these operations is the ability to configure a release agent for a cgroup. This agent is triggered once the last task of the cgroup exits. The release agent is run as root on the underlying host.
10 |
11 | An attacker with this capability could utilise cgroups to execute commands on the underlying node, thereby breaking out of the current container.
12 |
13 | ### Defense
14 |
15 | Pod Security Admission (PSA) should be configured to enforce the `restricted` standard. Should this be too restrictive, `baseline` could be used instead.
16 |
17 | PSA can be limited in its flexibility, for example having a policy that slightly deviates from the `restricted` standard. Should further flexibility be required compared to what PSA can provide, custom admission webhooks should be used to enforce pod security.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src:Pod)<-[:HOSTS_POD]-(dest:Node)
23 | WHERE "SYS_ADMIN" in src.capabilities
24 | ```
25 |
26 | The above query finds pods (`src`) where `SYS_ADMIN` is in its `capabilities` property. This property is populated by IceKube with the capabilities calculated from the pod spec. The node (`dest`) that hosts the pod is then targeted.
27 |
--------------------------------------------------------------------------------
/docs/CAN_EXEC_THROUGH_KUBELET.md:
--------------------------------------------------------------------------------
1 | # CAN_EXEC_THROUGH_KUBELET
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can execute commands on pods directly through the kubelet. This could allow an attacker to get a foothold on that pod.
6 |
7 | ### Description
8 |
9 | The kubelet runs its own API with its own set of access controls. Authorisation for these endpoints are based on allowed verbs to sub resources on the `nodes` resource. For example, a GET request to `/stats/*` requires the `get` verb on `nodes/stats`. Tables showing the required request verb and sub resource for a particular endpoint can be found in the [Kubernetes documentation](https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization)
10 | The `/exec` path allows for the execution of commands in containers. This path is authorised by the `nodes/proxy` sub resource and the required verb is create.
11 |
12 | An attacker with create on `nodes/proxy` for a particular node can execute commands on containers running on that node. Potentially gaining a foothold within those containers.
13 |
14 | ### Defense
15 |
16 | RBAC permissions regarding the `nodes/proxy`sub resource should be reviewed. Access should be restricted to required entities.
17 |
18 | ### Cypher Deep-Dive
19 |
20 | ```cypher
21 | MATCH (src)-[:GRANTS_PROXY_CREATE]->(:Node)-[:HOSTS_POD]->(dest:Pod)
22 | ```
23 |
24 | The above query finds subjects (`src`) with the create permission on the `nodes/proxy` sub resource. The target is set as pods (`dest`) running on that particular node determined through the `HOSTS_POD` relationship.
25 |
--------------------------------------------------------------------------------
/docs/CAN_IMPERSONATE.md:
--------------------------------------------------------------------------------
1 | # CAN_IMPERSONATE
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which have the impersonate permission, allowing them to impersonate other subjects.
6 |
7 | ### Description
8 |
9 | Should a subject have the `impersonate` verb on another subject, they can perform requests against the API server specifying the other subject as an impersonation target. The actions performed are then performed as if the original subject was the targeted subject. This could be done with the `--as` flag to `kubectl`.
10 |
11 | An attacker could use this to laterally move within the cluster to other subjects.
12 |
13 | ### Defense
14 |
15 | RBAC permissions regarding the impersonate verb should be reviewed. Access should be restricted to required entities.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | ```cypher
20 | MATCH (src)-[:GRANTS_IMPERSONATE]->(dest)
21 | ```
22 |
23 | The above query finds all resources (`src`) that have the impersonate verb on a target resource (`dest`)
24 |
--------------------------------------------------------------------------------
/docs/CAN_LOAD_KERNEL_MODULES.md:
--------------------------------------------------------------------------------
1 | # CAN_LOAD_KERNEL_MODULES
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which have the `SYS_MODULE` capability, and as such can breakout onto the underlying node.
6 |
7 | ### Description
8 |
9 | The `SYS_MODULE` capability allows management of Kernel modules. This includes loading additional modules.
10 |
11 | An attacker with this capability could load a custom module with malicious code. The code would then be executed by the kernel allowing for commands to be run outside of the container, effectively breaking out.
12 |
13 | ### Defense
14 |
15 | Pod Security Admission (PSA) should be configured to enforce the `restricted` standard. Should this be too restrictive, `baseline` could be used instead.
16 |
17 | PSA can be limited in its flexibility, for example having a policy that slightly deviates from the `restricted` standard. Should further flexibility be required compared to what PSA can provide, custom admission webhooks should be used to enforce pod security.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src:Pod)<-[:HOSTS_POD]-(dest:Node)
23 | WHERE "SYS_MODULE" in src.capabilities
24 | ```
25 |
26 | The above query finds pods (`src`) where `SYS_MODULE` is in its `capabilities` property. This property is populated by IceKube with the capabilities calculated from the pod spec. The node (`dest`) that hosts the pod is then targeted.
27 |
--------------------------------------------------------------------------------
/docs/CAN_NSENTER_HOST.md:
--------------------------------------------------------------------------------
1 | # CAN_NSENTER_HOST
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which have either the `SYS_ADMIN` or `SYS_PTRACE` capability and share the host's PID namespace allowing them to break out onto the underlying node.
6 |
7 | ### Description
8 |
9 | An attacker with access to a pod which has either the `SYS_ADMIN` or `SYS_PTRACE` capability and shares the host's PID namespace could potentially break out of the pod using the `nsenter` utility.
10 |
11 | An example command could be `nsenter -t 1 -a`.
12 |
13 | ### Defense
14 |
15 | Pod Security Admission (PSA) should be configured to enforce the `restricted` standard. Should this be too restrictive, `baseline` could be used instead.
16 |
17 | PSA can be limited in its flexibility, for example having a policy that slightly deviates from the `restricted` standard. Should further flexibility be required compared to what PSA can provide, custom admission webhooks should be used to enforce pod security.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src:Pod {hostPID: true})<-[:HOSTS_POD]-(dest:Node)
23 | WHERE all(x in ["SYS_ADMIN", "SYS_PTRACE"] WHERE x in src.capabilities)
24 | ```
25 |
26 | The above query finds pods (`src`) configured with both the `SYS_ADMIN` or `SYS_PTRACE` capabilities and shares the node's PID namespace. These parameters are configured by IceKube based of the pod spec. The target (`dest`) is set as the node upon which the pod is running.
27 |
--------------------------------------------------------------------------------
/docs/CREATE_POD_WITH_SA.md:
--------------------------------------------------------------------------------
1 | # CREATE_POD_WITH_SA
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can create pods in a namespace with the target service account. Upon successful exploitation, an attacker will gain the permissions of the target service account.
6 |
7 | ### Description
8 |
9 | An attacker with the ability to create a pod could configure the service account associated with the pod by setting the `serviceAccountName` field in the pod spec. Should the value specified match the name of a service account in the namespace the pod is deployed in, the token for that service account can be mounted into the pod.
10 |
11 | As the attacker has created the pod, they would also have control of the image and the command executed. This could be configured to exfiltrate the token to the attacker. This could be by outputting it to `stdout` if the attacker has `pods/logs` permissions, or exfiltrating the token over the network, or some other means.
12 |
13 | Once the attacker has acquired the token, they would be able to perform actions against the API server as the service account.
14 |
15 | ### Defense
16 |
17 | RBAC permissions to create pods and workloads should be reviewed. Access should be restricted to required entities.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src)-[:GRANTS_PODS_CREATE|GRANTS_REPLICATIONCONTROLLERS_CREATE|GRANTS_DAEMONSETS_CREATE|GRANTS_DEPLOYMENTS_CREATE|GRANTS_REPLICASETS_CREATE|GRANTS_STATEFULSETS_CREATE|GRANTS_CRONJOBS_CREATE|GRANTS_JOBS_CREATE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount)
23 | ```
24 |
25 | The above query finds all resources (`src`) that have the `CREATE` permission against workload resources types within a specified namespace. All `CREATE` verbs are against the namespace for a namespaced resource. The target node (`dest`) is a service account within the same namespace as where the workload creation is permitted.
26 |
27 | Workload creation is used as opposed to solely pods because various Kubernetes controllers create pods automatically from more abstract workload resources. Configuration of the workload resource also configures the created pod, thus it would allow an attacker to create the desired pod.
28 |
29 | Workload creation includes the following:
30 | - `pods`
31 | - `replicationcontrollers`
32 | - `daemonsets`
33 | - `deployments`
34 | - `replicasets`
35 | - `statefulsets`
36 | - `cornjobs`
37 | - `jobs`
38 |
--------------------------------------------------------------------------------
/docs/CREATE_PRIVILEGED_WORKLOAD.md:
--------------------------------------------------------------------------------
1 | # CREATE_PRIVILEGED_WORKLOAD
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects that can create workloads that can be considered `privileged`. These are workloads deliberately configured with known weaknesses in their pod specification that could allow for a container breakout. Upon successful execution, an attacker would gain potentially privileged access to nodes they can deploy workloads upon.
6 |
7 | ### Description
8 |
9 | An attacker with the ability to create a pod could configure the pod to have attributes that would align with the `privileged` Pod Security Standard (PSS). These include configurations that allow for a breakout, for example the `privileged` flag, or access to the host filesystem.
10 |
11 | As they are creating the pod themselves, they would also be able to configure the process to run as root and set a custom malicious command to allow code execution on the host as the root user gaining privileged access. This assumes user namespaces are not in effect.
12 |
13 | ### Defense
14 |
15 | PSS should be enforced within the cluster. This could be done through Pod Security Admission (PSA) through labels on the namespace. Should more granularity be required, a validating admission webhook could be used as an alternative.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | ```cypher
20 | MATCH (src)-[:GRANTS_PODS_CREATE|{create_workload_query()}]->(ns:Namespace)-[:WITHIN_CLUSTER]->(cluster), (dest:Node) WHERE cluster.major_minor >= 1.25 AND (ns.psa_enforce <> 'restricted' AND ns.psa_enforce <> 'baseline')
21 | ```
22 |
23 | The above query finds all entities (`src`) that have the CREATE permission against workload resource types within a specified namespace. All CREATE verbs are against the namespace for a namespaced resource. The target node (`dest`) is a node within the cluster.
24 |
25 | The Cluster object is also retrieved to have access to the clusters Kubernetes version. Filters are performed to ensure the cluster version is greater than 1.25, where PSA moved to stable, and it is assumed that PSA is the sole enforcer of pod security within the cluster. The query then validates that the namespace does not enforce the `restricted` or `baseline` standards. This leaves `privileged` or blank both of which do not specify any restrictions on a pod specification. IceKube retrieves this configuration from the `pod-security.kubernetes.io/enforce` label of the namespace.
26 |
27 | Workload creation is used as opposed to solely pods because various Kubernetes controllers create pods automatically from more abstract workload resources. Configuration of the workload resource also configures the created pod, thus it would allow an attacker to create the desired pod.
28 |
29 | Workload creation includes the following:
30 | - `pods`
31 | - `replicationcontrollers`
32 | - `daemonsets`
33 | - `deployments`
34 | - `replicasets`
35 | - `statefulsets`
36 | - `cornjobs`
37 | - `jobs`
38 |
--------------------------------------------------------------------------------
/docs/CREATE_SECRET_WITH_TOKEN.md:
--------------------------------------------------------------------------------
1 | # CREATE_SECRET_WITH_TOKEN
2 |
3 | ### Overview
4 |
5 | This attack aims to locate subjects which can gain access to a service account token by creating a secret which would be populated by the control plane with the relevant secret. The subject would also need a method to read the newly created secret.
6 |
7 | ### Description
8 |
9 | Secrets can be configured with the type `kubernetes.io/service-account-token` and annotated with `kubernetes.io/service-account.name` which will result in the secret being automatically populated with a token for said service account.
10 |
11 | An attacker could leverage this by creating a secret for a service account and use it to request a token for a service account they wish to gain access to. Once created, an attacker would need to read the secret. This could be done in a few different ways such as reading the secret directly or mounting it into a pod and using that to exfiltrate the secret.
12 |
13 | ### Defense
14 |
15 | RBAC permissions regarding creating secrets should be reviewed. Access should be restricted where not needed.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | #### Listing secrets
20 |
21 | ```cypher
22 | MATCH (src)-[:GRANTS_SECRETS_CREATE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount) WHERE (src)-[:GRANTS_SECRETS_LIST]->(ns)
23 |
24 | ```
25 |
26 | This query identifies subjects (`src`) that can create secrets within a namespace. It also checks whether the same subject can list secrets within the same namespace. This would allow them to read the secret upon creation.
27 |
28 | #### Workload creation
29 |
30 | ```cypher
31 | MATCH (src)-[:GRANTS_SECRETS_CREATE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount) WHERE (src)-[:GRANTS_PODS_CREATE|GRANTS_REPLICATIONCONTROLLERS_CREATE|GRANTS_DAEMONSETS_CREATE|GRANTS_DEPLOYMENTS_CREATE|GRANTS_REPLICASETS_CREATE|GRANTS_STATEFULSETS_CREATE|GRANTS_CRONJOBS_CREATE|GRANTS_JOBS_CREATE]->(ns)
32 | ```
33 |
34 | This query identifies subjects (`src`) that can create secrets within a namespace. It also checks the subject can create a workload within the same namespace which could be configured to mount the newly created secret for exfiltration purposes.
35 |
36 | Workload creation includes the following:
37 | - `pods`
38 | - `replicationcontrollers`
39 | - `daemonsets`
40 | - `deployments`
41 | - `replicasets`
42 | - `statefulsets`
43 | - `cornjobs`
44 | - `jobs`
45 |
--------------------------------------------------------------------------------
/docs/DEBUG_POD.md:
--------------------------------------------------------------------------------
1 | # DEBUG_POD
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can create debug containers within a pod. An attacker could use this to gain a foothold within a pod.
6 |
7 | ### Description
8 |
9 | An attacker with permissions to debug a pod can contain a new container in the pod. This could also be configured to share the process namespace of an existing container in the pod. An attacker could use this to gain access to the containers filesystem, including service account tokens, as well as its network stack.
10 |
11 | The ability to debug a pod requires the `patch` verb on `pods/ephemeral` for the targeted pod.
12 |
13 | ### Defense
14 |
15 | RBAC permissions regarding the `patch` permission on the `pods/ephemeral` sub resource should be reviewed. Access should be restricted to required entities.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | ```cypher
20 | MATCH (src)-[:GRANTS_EPHEMERAL_PATCH]->(dest:Pod)
21 | ```
22 | Finds all resources (`src`) that have a `GRANTS_EPHEMERAL_PATCH` relationship to pods (`dest`).
23 |
--------------------------------------------------------------------------------
/docs/EXEC_INTO.md:
--------------------------------------------------------------------------------
1 | # EXEC_INTO
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can execute into pods. An attacker could use this to gain a foothold in a running pod.
6 |
7 | ### Description
8 |
9 | An attacker with the ability to execute commands within a pod could gain access to the data within. This would include access to its processes, filesystem, network position, etc. This could be used as a foothold for further attacks within the cluster.
10 |
11 | Executing commands in a pod requires two permissions. The first is `create` or `get` on `pods/exec` and the second is `get` on `pods`. Both of those permissions should affect the target pod.
12 |
13 | The fact that read-only permissions can be used to exec into a pod is a side-effect of how websockets work, this is further discussed by [Rory McCune](https://raesene.github.io/blog/2024/11/11/When-Is-Read-Only-Not-Read-Only/).
14 |
15 | ### Defense
16 |
17 | RBAC permissions regarding the outlined permissions should be reviewed. Access should be restricted to required entities.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src)-[:GRANTS_EXEC_CREATE|GRANTS_EXEC_GET]->(dest:Pod)<-[:GRANTS_GET]-(src)
23 | ```
24 |
25 | The above query finds all resources (`src`) that have `GRANTS_EXEC_CREATE` or `GRANTS_EXEC_GET` and `GRANTS_GET` on a Pod (`dest`). The two relationships map to the two required permissions for executing commands within a pod.
26 |
--------------------------------------------------------------------------------
/docs/GENERATE_CLIENT_CERTIFICATE.md:
--------------------------------------------------------------------------------
1 | # GENERATE_CLIENT_CERTIFICATE
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can create a certificate signing request (CSR) _and_ approve its signing. An attacker can use this to generate credentials for another user, service account, or group.
6 |
7 | ### Description
8 |
9 | The CSR API allows the submission of Certificate Signing Requests (CSRs). Should the CSR be signed by the `kubernetes.io/kube-apiserver-client` signer, the signed certificate can be used as a client certificate for the purpose of authenticating to the cluster. The common name of the certificate specifies the users username, and the organisations their groups.
10 |
11 | Should an attacker have the ability to create CSRs they could submit certificate requests for other subjects of the cluster. Should they also have the ability to approve the signing with the above signer, signed certificates for the specified subjects would be generated.
12 |
13 | An attacker could use these to escalate their privileges within the cluster.
14 |
15 | ### Defense
16 |
17 | RBAC permissions regarding the creation and approval of CSRs should be reviewed. Access should be restricted to required entities.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src)-[:GRANTS_CERTIFICATESIGNINGREQUESTS_CREATE]->(cluster:Cluster), (dest)
23 | WHERE (src)-[:HAS_CSR_APPROVAL]->(cluster) AND (src)-[:GRANTS_APPROVE]->(:Signer {
24 | name: "kubernetes.io/kube-apiserver-client"
25 | }) AND (dest:User OR dest:Group OR dest:ServiceAccount)
26 | ```
27 |
28 | The above query ensure a resource (`src`) has the following three permissions:
29 |
30 | - Ability to create CSRs through `GRANTS_CERTIFICATESIGNINGREQUESTS_CREATE`
31 | - Ability to approve CSRs through `HAS_CSR_APPROVAL`
32 | - Approved to use the `kubernetes.io/kube-apiserver-client` signer through `GRANTS_APPROVE`
33 |
34 | Should all three conditions be met, subjects (`dest`) are targeted if they are a `User`, `Group` or `ServiceAccount`
35 |
--------------------------------------------------------------------------------
/docs/GENERATE_TOKEN.md:
--------------------------------------------------------------------------------
1 | # GENERATE_TOKEN
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate resources which can generate a token for a given service account. Upon successful exploitation, an attacker will gain the permissions of the target service account.
6 |
7 | ### Description
8 |
9 | Short-lived service account tokens can be generated by the API server. This requires the `create` verb on `serviceaccounts/token` for the targeted service account.
10 |
11 | Upon generation of a token, it can be used to perform actions against the API server as the service account.
12 |
13 | ### Defense
14 |
15 | RBAC access to token creation should be reviewed and access restricted to required subjects.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | ```cypher
20 | MATCH (src)-[:GRANTS_TOKEN_CREATE]->(dest:ServiceAccount)
21 | ```
22 |
23 | The above query finds all resources (`src`) that have the permission to create a token for a given service account. The target (`dest`) is the targeted service account.
24 |
--------------------------------------------------------------------------------
/docs/GET_AUTHENTICATION_TOKEN_FOR.md:
--------------------------------------------------------------------------------
1 | # GET_AUTHENTICATION_TOKEN_FOR
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate resources which can get a long-lived service account token for a given service account. Upon successful exploitation, an attacker will gain the permissions of the target service account.
6 |
7 | ### Description
8 |
9 | Kubernetes secrets can contain long-lived tokens for service accounts. These are when the secret type is set to `kubernetes.io/service-account-token`. Should this be set, the `kubernetes.io/service-account.name` annotation determines which service account the token is created for by a Kubernetes controller with which the secret is automatically populated.
10 |
11 | An attacker with read access to this secret would be able to use the token to perform actions against the API server as the service account.
12 |
13 | ### Defense
14 |
15 | Long-lived service account tokens should be avoided in favour of short-lived tokens using the `TokenRequest` API. Should this not be possible, RBAC permissions should be reviewed to limit access to this permission to those required.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | ```cypher
20 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(secret:Secret)-[:AUTHENTICATION_TOKEN_FOR]->(dest:ServiceAccount)
21 | ```
22 |
23 | Thee above query finds all resources (`src`) that have either the GET, LIST or WATCH permissions on a secret containing a token. The service account that the token is for is the target (`dest`).
24 |
--------------------------------------------------------------------------------
/docs/GRANTS_PERMISSION.md:
--------------------------------------------------------------------------------
1 | # GRANTS_PERMISSION
2 |
3 | ### Overview
4 |
5 | This rule establishes an attack path relationship between a role binding and its role. This allows IceKube to consider role permissions for the associated role binding and, by extension, its subjects.
6 |
7 | ### Description
8 |
9 | Role bindings bind a number of subjects with a role. The permissions granted to the subjects will be that of the bound role. If both the role binding and role are scoped cluster-wide, the permissions are also granted cluster-wide.
10 |
11 | ### Defense
12 |
13 | Review associated roles for a role binding, and ensure roles that grant the minimal set of permissions required are attached.
14 |
15 | ### Cypher Deep-Dive
16 |
17 | ```cypher
18 | MATCH (src)-[:GRANTS_PERMISSION]->(dest)
19 | ```
20 |
21 | Finds all resources (`src`) that have a `GRANTS_PERMISSION` relationship to other resources (`dest`). The `GRANTS_PERMISSION` relationship is only between role binding and roles, thereby limiting `src` to `RoleBinding` or `ClusterRoleBinding` and `dest` to `Role` or `ClusterRole`.
22 |
--------------------------------------------------------------------------------
/docs/IS_CLUSTER_ADMIN.md:
--------------------------------------------------------------------------------
1 | # IS_CLUSTER_ADMIN
2 |
3 | ### Overview
4 |
5 | This attack path aims to provide a route for nodes that are expected to grant cluster administrator access just due to the nature of the resource.
6 |
7 | ### Description
8 |
9 | Compromise of certain resources within a cluster can be considered to grant cluster administrator due to the nature of the resource compromised.
10 |
11 | For example, compromise of a control plane node within a Kubernetes cluster that runs services such as the API server or etcd effectively grants cluster administrator access.
12 |
13 | ### Defense
14 |
15 | Security of resources that are effectively cluster administrator should be reviewed and hardened.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | ```cypher
20 | MATCH (src:Node), (dest:ClusterRoleBinding)-[:GRANTS_PERMISSION]->(:ClusterRole {name: "cluster-admin"}) WHERE any(x in ["master", "control-plane"] WHERE x in src.node_roles)
21 | ```
22 |
23 | The above query finds nodes `src` that have the `master` or `control-plane` role. The destination is set to a cluster role binding that binds to `cluster-admin`.
24 |
--------------------------------------------------------------------------------
/docs/IS_PRIVILEGED.md:
--------------------------------------------------------------------------------
1 | # IS_PRIVILEGED
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which are privileged, and as such could breakout onto the underlying node.
6 |
7 | ### Description
8 |
9 | Privileged pods run their containers without much of the segregation typical containers have. This makes it significantly easier for a container breakout to occur granting an attacker a foothold on the underlying node.
10 |
11 | A number of techniques are available to breakout of a privileged pod. For example, mounting the underlying drives from `/dev/` and accessing the hosts filesystem.
12 |
13 | ### Defense
14 |
15 | Pod Security Admission (PSA) should be configured to enforce the `restricted` standard. Should this be too restrictive, `baseline` could be used instead.
16 |
17 | PSA can be limited in its flexibility, for example having a policy that slightly deviates from the `restricted` standard. Should further flexibility be required compared to what PSA can provide, custom admission webhooks should be used to enforce pod security.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src:Pod {privileged: true})<-[:HOSTS_POD]-(dest:Node)
23 | ```
24 |
25 | The query above finds pods (`src`) that have the `privileged` property set to true. This property is configured by IceKube and is retrieved from the pod spec. The node (`dest`) that hosts the pod is then targeted.
26 |
--------------------------------------------------------------------------------
/docs/MOUNTS_SECRET.md:
--------------------------------------------------------------------------------
1 | # MOUNTS_SECRET
2 |
3 | ### Overview
4 |
5 | This attack path locates pods with mounted secrets. An attacker on a foothold on one of these pods would be able to access the values in the secret.
6 |
7 | ### Description
8 |
9 | Kubernetes secrets typically contain sensitive information, and are a prime target for attackers. This attack path identifies pods which have this data mounted.
10 |
11 | ### Defense
12 |
13 | Review which secrets are mounted into a pod, and ensure all secrets are required.
14 |
15 | ### Cypher Deep-Dive
16 |
17 | ```cypher
18 | MATCH (src:Pod)-[:MOUNTS_SECRET]->(dest:Secret)
19 | ```
20 |
21 | The above query finds pods (`src`) which have secrets (`dest`) mounted.
22 |
--------------------------------------------------------------------------------
/docs/PATCH_NAMESPACE_TO_BYPASS_PSA.md:
--------------------------------------------------------------------------------
1 | # PATCH_NAMESPACE_TO_BYPASS_PSA
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects that can both create workloads within a namespace that has Pod Security Admission (PSA) enforced, and the ability to modify the same namespace. This would allow the modification of the PSA policy, and therefore enable the deployment of insecure workloads as described in `CREATE_PRIVILEGED_WORKLOAD` gaining privileged access to nodes within the cluster.
6 |
7 | ### Description
8 |
9 | While Namespace resources are cluster-wide, they are also considered a namespaced resource for certain actions against that particular namespace. This includes `get`, `patch`, `update` and `delete`. An entity with permissions constrained to solely to a namespace using a RoleBinding, could have permissions against the Namespace resource itself. Should this include `patch` or `update`, an attacker can modify the Namespace.
10 |
11 | PSA is configured through labels on a namespace. These can be modified by an entity with `patch` or `update` permissions on the namespace. Therefore, an attacker with these permissions on the namespace could modify the PSA to allow all workloads to be deployed.
12 |
13 | After such a modification is made, a privileged workload can be created within the namespace as described in `CREATE_PRIVILEGED_WORKLOAD`.
14 |
15 | ### Defense
16 |
17 | Ensure permissions assigned to entities follow the principles of least privilege. Specifically, granting permissions on a namespace within a Role / RoleBinding should be avoided. Due care should be given when utilising wildcards in RBAC as this may inadvertently grant undesired permissions.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src)-[:GRANTS_PODS_CREATE|{create_workload_query()}]->(ns:Namespace)-[:WITHIN_CLUSTER]->(cluster), (dest:Node) WHERE (src)-[:GRANTS_PATCH|GRANTS_UPDATE]->(ns) AND cluster.major_minor >= 1.25
23 | ```
24 |
25 | The above query finds all entities (`src`) that have the CREATE permission against workload resource types within a specified namespace. All CREATE verbs are against the namespace for a namespaced resource. The target node (`dest`) is a node within the cluster.
26 |
27 | The Cluster object is also retrieved to have access to the clusters Kubernetes version. Filters are performed to ensure the cluster version is greater than 1.25, where PSA moved to stable, and it is assumed that PSA is the sole enforcer of pod security within the cluster.
28 |
29 | The query also validates that the entity has the ability to `patch` or `update` the namespace. Should they have this permission, the specifics of which PSA are currently enforced on the namespace are irrelevant as the attacker can simply change it to a more favourable value.
30 |
31 | Workload creation is used as opposed to solely pods because various Kubernetes controllers create pods automatically from more abstract workload resources. Configuration of the workload resource also configures the created pod, thus it would allow an attacker to create the desired pod.
32 |
33 | Workload creation includes the following:
34 | - `pods`
35 | - `replicationcontrollers`
36 | - `daemonsets`
37 | - `deployments`
38 | - `replicasets`
39 | - `statefulsets`
40 | - `cornjobs`
41 | - `jobs`
42 |
--------------------------------------------------------------------------------
/docs/RBAC_ESCALATE_TO.md:
--------------------------------------------------------------------------------
1 | # RBAC_ESCALATE_TO
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can escalate their privileges within the cluster by modifying bound roles.
6 |
7 | ### Description
8 |
9 | By default, a subject is unable to grant more permissions in RBAC than they originally have access to. The `escalate` verb is a special verb that bypasses this restriction. It permits the modification of roles to add more permissions than the editor may have.
10 |
11 | This could be used by an attacker to modify a role that grants them permissions to include
12 |
13 | ### Defense
14 |
15 | RBAC permissions regarding the `escalate` permission on roles should be reviewed. Access should be restricted to required entities.
16 |
17 | ### Cypher Deep-Dive
18 |
19 | #### RoleBindings
20 |
21 | ```cypher
22 | MATCH (src:RoleBinding)-[:GRANTS_ESCALATE]->(role)-[:WITHIN_NAMESPACE]->(:Namespace)<-[:WITHIN_NAMESPACE]-(dest)
23 | WHERE (role:Role OR role:ClusterRole) AND (src)-[:GRANTS_PERMISSION]->(role)
24 | ```
25 |
26 | The above query finds role bindings (`src`) that has escalate permissions on a role. The role can either be a `Role` or a `ClusterRole`. The role binding must also be bound to the role with through the `GRANTS_PERMISSION` relationship. Finally, the namespace for the role is retrieved, and all resources within that namespace are targeted (`dest`).
27 |
28 | #### ClusterRoleBindings
29 |
30 | ```cypher
31 | MATCH (src:ClusterRoleBinding)-[:GRANTS_ESCALATE]->(role:ClusterRole), (dest)
32 | WHERE (src)-[:GRANTS_PERMISSION]->(role)
33 | ```
34 |
35 | The above query finds cluster role bindings (`src`) that has escalate permissions on a cluster role. The role binding must also be bound to the role with through the `GRANTS_PERMISSION` relationship. Finally, all resources within the database are targeted (`dest`).
36 |
--------------------------------------------------------------------------------
/docs/REPLACE_IMAGE.md:
--------------------------------------------------------------------------------
1 | # REPLACE_IMAGE
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which have the ability to modify pods. An attacker can use this to replace a pod image, which could be used to inject malicious code that could be used to gain a foothold within the pod.
6 |
7 | ### Description
8 |
9 | An attacker with permissions to patch a pod could replace the pod's image with a malicious one. This malicious image could include code that could aid an attacker in getting a foothold within the pod. For example, it may connect to an attacker-controlled server with a reverse shell.
10 |
11 | ### Defense
12 |
13 | RBAC permissions regarding the patch permission should be reviewed. Access should be restricted to required entities.
14 |
15 | ### Cypher Deep-Dive
16 |
17 | ```cypher
18 | MATCH (src)-[:GRANTS_PATCH]->(dest:Pod)
19 | ```
20 |
21 | Finds all resources (`src`) that have a `GRANTS_PATCH` relationship to pods (`dest`).
22 |
--------------------------------------------------------------------------------
/docs/UPDATE_AWS_AUTH.md:
--------------------------------------------------------------------------------
1 | # UPDATE_AWS_AUTH
2 |
3 | ### Overview
4 |
5 | This attack path is specific to AWS EKS, and aims to locate subjects which can make changes to the `aws-auth` ConfigMap. Upon successful exploitation, an attacker is considered to have reached the `system:masters` group.
6 |
7 | ### Description
8 |
9 | In AWS EKS, the `aws-auth` ConfigMap is used to map AWS IAM roles to Kubernetes RBAC users and groups. As such, it allows the API server to enforce authorisation on AWS entities when accessing the cluster. Once an IAM identity is added to the ConfigMap, it will be able to access the cluster using the Kubernetes API with its permissions depending on the mapping created.
10 |
11 | An attacker with privileges which allows them to modify the `aws-auth` ConfigMap could add their own IAM roles to this configuration and granting their own role permissions within the cluster, including the `system:masters` group.
12 |
13 | An example addition to the ConfigMap can be seen below for the `mapRoles` section:
14 |
15 | ```yaml
16 | - groups:
17 | - system:masters
18 | rolearn: ATTACKER_CONTROLLED_ARN
19 | username: user
20 | ```
21 |
22 | An EKS token can then be manually generated or the kubeconfig file can be updated to automatically request a token for the configured role ARN which can be used to authenticate against the cluster.
23 |
24 | ### Defense
25 |
26 | RBAC write access to the `aws-auth` ConfigMap within the `kube-system` namespace should be reviewed. Access should be restricted to required entities.
27 |
28 | ### Cypher Deep-Dive
29 |
30 | ```cypher
31 | MATCH (src)-[:GRANTS_PATCH|GRANTS_UPDATE]->(:ConfigMap {
32 | name: 'aws-auth', namespace: 'kube-system'
33 | }), (dest:Group {
34 | name: 'system:masters'
35 | })
36 | ```
37 |
38 | The above query finds all resources (`src`) that have the `PATCH` or `UPDATE` permission against the `aws-auth` ConfigMap. Both the namespace and name are used to specify the exact ConfigMap in case another version is present in a different namespace.
39 |
40 | As all queries must have a `src` and `dest` so IceKube knows the two sides of a relationship, and as this is one of the rare instances where the original query doesn't include the destination resource. A secondary query is added to query for the `system:masters` group and specify that as the `dest`.
41 |
--------------------------------------------------------------------------------
/docs/UPDATE_WORKLOAD_WITH_SA.md:
--------------------------------------------------------------------------------
1 | # UPDATE_WORKLOAD_WITH_SA
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate subjects which can update workloads in a namespace with the target service account. Upon successful exploitation, an attacker will gain the permissions of the target service account.
6 |
7 | ### Description
8 |
9 | An attacker with the ability to update workloads could configure the service account associated with the resultant pod by setting the `serviceAccountName` field in the pod spec. Should the value specified match the name of a service account in the namespace the pod is deployed in, the token for that service account can be mounted into the pod.
10 |
11 | As the attacker has configured the workload, they would also have control of the image and the command executed. This could be configured to exfiltrate the token to the attacker. This could be by outputting it to `stdout` if the attacker has `pods/logs` permissions, or exfiltrating the token over the network, or some other means.
12 |
13 | Once the attacker has acquired the token, they would be able to perform actions against the API server as the service account.
14 |
15 | ### Defense
16 |
17 | RBAC permissions to update workloads should be reviewed. Access should be restricted to required entities.
18 |
19 | ### Cypher Deep-Dive
20 |
21 | ```cypher
22 | MATCH (src)-[:GRANTS_UPDATE|GRANTS_PATCH]->(workload)-[:WITHIN_NAMESPACE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount)
23 | WHERE (workload:ReplicationController OR workload:DaemonSet OR workload:Deployment OR workload:ReplicaSet OR workload:StatefulSet OR workload:CronJob OR workload:Job)
24 | ```
25 |
26 | The above query finds all resources (`src`) that have the `PATCH` or `UPDATE` permission against workload resources types within a specified namespace. All `PATCH` or `UPDATE` verbs are against the namespace for a namespaced resource. The target node (`dest`) is a service account within the same namespace as where the workload creation is permitted.
27 |
28 | Workload creation is used because various Kubernetes controllers create pods automatically from more abstract workload resources. Configuration of the workload resource also configures the created pod, thus it would allow an attacker to create the desired pod.
29 |
30 | Workload creation includes the following:
31 | - `replicationcontrollers`
32 | - `daemonsets`
33 | - `deployments`
34 | - `replicasets`
35 | - `statefulsets`
36 | - `cornjobs`
37 | - `jobs`
38 |
--------------------------------------------------------------------------------
/docs/USES_ACCOUNT.md:
--------------------------------------------------------------------------------
1 | # USES_ACCOUNT
2 |
3 | ### Overview
4 |
5 | This attack path aims to locate pods which mount a service account token. Upon compromise of a pod, an attacker would gain access to the token allowing them to perform actions as the associated service account.
6 |
7 | ### Description
8 |
9 | An attacker which gained access to a pod which uses a service account could leverage the service account's permissions, thus furthering themselves within the cluster.
10 |
11 | Pods are associated with service account. Should the service account token be mounted, it can typically be found at `/var/run/secrets/kubernetes.io/serviceaccount/token`. Upon compromise, an attacker can access the token and use it to communicate with the API server. This would allow them to perform actions as the service account.
12 |
13 | ### Defense
14 |
15 | Service account tokens should only be mounted into a pod should it be required. By default, the tokens are mounted in so this needs to be explicitly disabled. This can be done by setting `automountServiceAccountToken` to `false` in the pod spec, or within the service account. Examples for both can be seen below:
16 |
17 | ```yaml
18 | apiVersion: v1
19 | kind: ServiceAccount
20 | metadata:
21 | name: default
22 | automountServiceAccountToken: false
23 | ...
24 | ```
25 |
26 | ```yaml
27 | apiVersion: v1
28 | kind: Pod
29 | metadata:
30 | name: my-pod
31 | spec:
32 | automountServiceAccountToken: false
33 | ...
34 | ```
35 |
36 | ### Cypher Deep-Dive
37 |
38 | ```
39 | MATCH (src:Pod)-[:USES_ACCOUNT]->(dest:ServiceAccount)
40 | ```
41 |
42 | The above query finds all `Pod` resources (`src`) and finds the configured `ServiceAccount` node (`dest`) by means of the `USE_ACCOUNT` relationship.
43 |
--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ReversecLabs/IceKube/bfd74d83838ebfe7c728839c8dd03f503c9a4624/docs/logo.png
--------------------------------------------------------------------------------
/icekube/__init__.py:
--------------------------------------------------------------------------------
1 | import urllib3
2 |
3 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
4 |
--------------------------------------------------------------------------------
/icekube/attack_paths.py:
--------------------------------------------------------------------------------
1 | # flake8: noqa
2 |
3 | from typing import List
4 |
5 | from icekube.relationships import Relationship
6 |
7 | WORKLOAD_TYPES = [
8 | "ReplicationController",
9 | "DaemonSet",
10 | "Deployment",
11 | "ReplicaSet",
12 | "StatefulSet",
13 | "CronJob",
14 | "Job",
15 | ]
16 |
17 |
18 | def create_workload_query(workloads: List[str] = WORKLOAD_TYPES) -> str:
19 | relationships = [f"GRANTS_{workload.upper()}S_CREATE" for workload in workloads]
20 | return "|".join(relationships)
21 |
22 |
23 | def workload_query(
24 | workloads: List[str] = WORKLOAD_TYPES, name: str = "workload"
25 | ) -> str:
26 | joined = f" OR {name}:".join(WORKLOAD_TYPES)
27 | return f"({name}:{joined})"
28 |
29 |
30 | attack_paths = {
31 | # Subject -> Role Bindings
32 | Relationship.BOUND_TO: "MATCH (src)-[:BOUND_TO]->(dest)",
33 | # Role Binding -> Role
34 | Relationship.GRANTS_PERMISSION: "MATCH (src)-[:GRANTS_PERMISSION]->(dest)",
35 | # Pod -> Service Account
36 | Relationship.USES_ACCOUNT: "MATCH (src:Pod)-[:USES_ACCOUNT]->(dest:ServiceAccount)",
37 | # Pod -> Secrett
38 | Relationship.MOUNTS_SECRET: "MATCH (src:Pod)-[:MOUNTS_SECRET]->(dest:Secret)",
39 | # Subject has permission to create pod within namespace with target
40 | # Service Account
41 | Relationship.CREATE_POD_WITH_SA: f"""
42 | MATCH (src)-[:GRANTS_PODS_CREATE|{create_workload_query()}]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount)
43 | """,
44 | # Subject has permissions to create a workload that can allow breakout onto the underlying
45 | # node
46 | Relationship.CREATE_PRIVILEGED_WORKLOAD: [
47 | # Assume PSA is enabled for cluster versions >= 1.25
48 | f"MATCH (src)-[:GRANTS_PODS_CREATE|{create_workload_query()}]->(ns:Namespace)-[:WITHIN_CLUSTER]->(cluster), (dest:Node) "
49 | "WHERE cluster.major_minor >= 1.25 AND (ns.psa_enforce <> 'restricted' AND ns.psa_enforce <> 'baseline')",
50 | ],
51 | # Patch namespace to remove PSA restrictions and create privileged workload
52 | Relationship.PATCH_NAMESPACE_TO_BYPASS_PSA: f"""
53 | MATCH (src)-[:GRANTS_PODS_CREATE|{create_workload_query()}]->(ns:Namespace)-[:WITHIN_CLUSTER]->(cluster), (dest:Node)
54 | WHERE (src)-[:GRANTS_PATCH|GRANTS_UPDATE]->(ns) AND cluster.major_minor >= 1.25
55 | """,
56 | # Subject has permission to update workload within namespace with target
57 | # Service Account
58 | Relationship.UPDATE_WORKLOAD_WITH_SA: f"""
59 | MATCH (src)-[:GRANTS_UPDATE|GRANTS_PATCH]->(workload)-[:WITHIN_NAMESPACE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount)
60 | WHERE {workload_query()}
61 | """,
62 | # Subject -> Pod
63 | Relationship.EXEC_INTO: "MATCH (src)-[:GRANTS_EXEC_CREATE|GRANTS_EXEC_GET]->(dest:Pod)<-[:GRANTS_GET]-(src)",
64 | # Subject -> Pod
65 | Relationship.REPLACE_IMAGE: "MATCH (src)-[:GRANTS_PATCH]->(dest:Pod)",
66 | # Subject -> Pod
67 | Relationship.DEBUG_POD: "MATCH (src)-[:GRANTS_EPHEMERAL_PATCH]->(dest:Pod)",
68 | # Subject has permission to read authentication token for Service Account
69 | Relationship.GET_AUTHENTICATION_TOKEN_FOR: """
70 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(secret:Secret)-[:AUTHENTICATION_TOKEN_FOR]->(dest:ServiceAccount)
71 | """,
72 | # Subject -> Secret
73 | Relationship.ACCESS_SECRET: "MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(dest:Secret)",
74 | # Generate service account token
75 | Relationship.GENERATE_TOKEN: "MATCH (src)-[:GRANTS_TOKEN_CREATE]->(dest:ServiceAccount)",
76 | # Create a long-lived secret for a service account
77 | Relationship.CREATE_SECRET_WITH_TOKEN: [
78 | # Uses a workload to read the generated secret
79 | f"""
80 | MATCH (src)-[:GRANTS_SECRETS_CREATE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount)
81 | WHERE (src)-[:GRANTS_PODS_CREATE|{create_workload_query()}]->(ns)
82 | """,
83 | # Uses secret list to read the generated secret
84 | """
85 | MATCH (src)-[:GRANTS_SECRETS_CREATE]->(ns:Namespace)<-[:WITHIN_NAMESPACE]-(dest:ServiceAccount)
86 | WHERE (src)-[:GRANTS_SECRETS_LIST]->(ns)
87 | """,
88 | ],
89 | # RBAC escalate verb to change a role to be more permissive
90 | Relationship.RBAC_ESCALATE_TO: [
91 | # RoleBindings
92 | """
93 | MATCH (src:RoleBinding)-[:GRANTS_ESCALATE]->(role)-[:WITHIN_NAMESPACE]->(:Namespace)<-[:WITHIN_NAMESPACE]-(dest)
94 | WHERE (role:Role OR role:ClusterRole) AND (src)-[:GRANTS_PERMISSION]->(role)
95 | """,
96 | # ClusterRoleBindings
97 | """
98 | MATCH (src:ClusterRoleBinding)-[:GRANTS_ESCALATE]->(role:ClusterRole), (dest)
99 | WHERE (src)-[:GRANTS_PERMISSION]->(role)
100 | """,
101 | ],
102 | # Subject -> User / Group / ServiceAccount
103 | Relationship.GENERATE_CLIENT_CERTIFICATE: """
104 | MATCH (src)-[:GRANTS_CERTIFICATESIGNINGREQUESTS_CREATE]->(cluster:Cluster), (dest)
105 | WHERE (src)-[:HAS_CSR_APPROVAL]->(cluster) AND (src)-[:GRANTS_APPROVE]->(:Signer {
106 | name: "kubernetes.io/kube-apiserver-client"
107 | }) AND (dest:User OR dest:Group OR dest:ServiceAccount)
108 | """,
109 | # Impersonate
110 | Relationship.CAN_IMPERSONATE: "MATCH (src)-[:GRANTS_IMPERSONATE]->(dest)",
111 | # Pod breakout
112 | Relationship.IS_PRIVILEGED: "MATCH (src:Pod {privileged: true})<-[:HOSTS_POD]-(dest:Node)",
113 | Relationship.CAN_CGROUP_BREAKOUT: 'MATCH (src:Pod)<-[:HOSTS_POD]-(dest:Node) WHERE "SYS_ADMIN" in src.capabilities',
114 | Relationship.CAN_LOAD_KERNEL_MODULES: 'MATCH (src:Pod)<-[:HOSTS_POD]-(dest:Node) WHERE "SYS_MODULE" in src.capabilities',
115 | Relationship.CAN_ACCESS_DANGEROUS_HOST_PATH: "MATCH (src:Pod {dangerous_host_path: true})<-[:HOSTS_POD]-(dest:Node)",
116 | Relationship.CAN_NSENTER_HOST: 'MATCH (src:Pod {hostPID: true})<-[:HOSTS_POD]-(dest:Node) WHERE all(x in ["SYS_ADMIN", "SYS_PTRACE"] WHERE x in src.capabilities)',
117 | Relationship.CAN_ACCESS_HOST_FD: 'MATCH (src:Pod)<-[:HOSTS_POD]-(dest:Node) WHERE "DAC_READ_SEARCH" in src.capabilities',
118 | # Can jump to pods running on node
119 | Relationship.ACCESS_POD: "MATCH (src:Node)-[:HOSTS_POD]->(dest:Pod)",
120 | # Can exec into pods on a node
121 | Relationship.CAN_EXEC_THROUGH_KUBELET: "MATCH (src)-[:GRANTS_PROXY_CREATE]->(:Node)-[:HOSTS_POD]->(dest:Pod)",
122 | # Can update aws-auth ConfigMap
123 | Relationship.UPDATE_AWS_AUTH: """
124 | MATCH (src)-[:GRANTS_PATCH|GRANTS_UPDATE]->(:ConfigMap {
125 | name: 'aws-auth', namespace: 'kube-system'
126 | }), (dest:Group {
127 | name: 'system:masters'
128 | })
129 | """,
130 | Relationship.AZURE_POD_IDENTITY_EXCEPTION: [
131 | # Create workload based of existing APIE
132 | f"""
133 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(azexc:AzurePodIdentityException)-[:WITHIN_NAMESPACE]->(ns:Namespace), (dest:ClusterRoleBinding)
134 | WHERE (dest.name = 'aks-cluster-admin-binding' OR dest.name = 'aks-cluster-admin-binding-aad') AND (EXISTS {{
135 | MATCH (src)-[:{create_workload_query()}|GRANTS_PODS_CREATE]->(ns)
136 | }} OR EXISTS {{
137 | MATCH (src)-[:GRANTS_PATCH|GRANTS_UPDATE]->(workload)-[:WITHIN_NAMESPACE]->(ns)
138 | WHERE {workload_query()}
139 | }})
140 | """,
141 | # Create APIE based of existing workload
142 | """
143 | MATCH (src)-[:GRANTS_GET|GRANTS_LIST|GRANTS_WATCH]->(pod:Pod)-[:WITHIN_NAMESPACE]->(ns:Namespace), (src)-[r {
144 | attack_path: 1
145 | }]->(pod), (dest:ClusterRoleBinding)
146 | WHERE (dest.name='aks-cluster-admin-binding' OR dest.name='aks-cluster-admin-binding-aad') AND (EXISTS {
147 | (src)-[:GRANTS_AZUREPODIDENTITYEXCEPTIONS_CREATE]->(ns)
148 | } OR EXISTS {
149 | (src)-[:GRANTS_UPDATE|GRANTS_PATCH]->(:AzurePodIdentityException)-[:WITHIN_NAMESPACE]->(ns)
150 | })
151 | """,
152 | ],
153 | Relationship.IS_CLUSTER_ADMIN: """
154 | MATCH (src:Node), (dest:ClusterRoleBinding)-[:GRANTS_PERMISSION]->(:ClusterRole {name: "cluster-admin"})
155 | WHERE any(x in ["master", "control-plane"] WHERE x in src.node_roles)
156 | """,
157 | }
158 |
--------------------------------------------------------------------------------
/icekube/cli.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | from pathlib import Path
4 | from typing import Iterator, List, Optional, cast
5 |
6 | import typer
7 | from icekube.config import config
8 | from icekube.icekube import (
9 | create_indices,
10 | enumerate_resource_kind,
11 | generate_relationships,
12 | purge_neo4j,
13 | remove_attack_paths,
14 | setup_attack_paths,
15 | )
16 | from icekube.kube import (
17 | APIResource,
18 | Resource,
19 | all_resources,
20 | metadata_download,
21 | )
22 | from icekube.log_config import build_logger
23 | from tqdm import tqdm
24 |
25 | app = typer.Typer()
26 |
27 | IGNORE_DEFAULT = "events,componentstatuses"
28 |
29 |
30 | @app.command()
31 | def run(
32 | ignore: str = typer.Option(
33 | IGNORE_DEFAULT,
34 | help="Names of resource types to ignore",
35 | ),
36 | ):
37 | enumerate(ignore)
38 | attack_path()
39 |
40 |
41 | @app.command()
42 | def enumerate(
43 | ignore: str = typer.Option(
44 | IGNORE_DEFAULT,
45 | help="Names of resource types to ignore",
46 | ),
47 | ):
48 | create_indices()
49 | enumerate_resource_kind(ignore.split(","))
50 | generate_relationships()
51 |
52 |
53 | @app.command()
54 | def relationships():
55 | generate_relationships()
56 |
57 |
58 | @app.command()
59 | def attack_path():
60 | remove_attack_paths()
61 | setup_attack_paths()
62 |
63 |
64 | @app.command()
65 | def purge():
66 | purge_neo4j()
67 |
68 |
69 | @app.command()
70 | def download(output_dir: str):
71 | path = Path(output_dir)
72 | path.mkdir(exist_ok=True)
73 |
74 | resources = all_resources()
75 | metadata = metadata_download()
76 |
77 | with open(path / "_metadata.json", "w") as fs:
78 | fs.write(json.dumps(metadata, indent=2, default=str))
79 |
80 | current_type = None
81 | current_group = []
82 |
83 | for resource in resources:
84 | if current_type is None:
85 | current_type = resource.resource_definition_name
86 | elif current_type != resource.resource_definition_name:
87 | with open(path / f"{current_type}.json", "w") as fs:
88 | fs.write(json.dumps(current_group, indent=4, default=str))
89 | current_group = []
90 | current_type = resource.resource_definition_name
91 |
92 | if resource.raw:
93 | current_group.append(json.loads(resource.raw))
94 |
95 | if current_type:
96 | with open(path / f"{current_type}.json", "w") as fs:
97 | fs.write(json.dumps(current_group, indent=4, default=str))
98 |
99 |
100 | @app.command()
101 | def load(input_dir: str, attack_paths: bool = True):
102 | path = Path(input_dir)
103 | metadata = json.load(open(path / "_metadata.json"))
104 |
105 | from icekube import kube
106 | from icekube import icekube
107 |
108 | kube.kube_version = lambda: cast(str, metadata["kube_version"])
109 | kube.context_name = lambda: cast(str, metadata["context_name"])
110 | kube.api_versions = lambda: cast(List[str], metadata["api_versions"])
111 | kube.preferred_versions = metadata["preferred_versions"]
112 | kube.api_resources = lambda: cast(
113 | List[APIResource],
114 | [APIResource(**x) for x in metadata["api_resources"]],
115 | )
116 |
117 | icekube.api_resources = kube.api_resources
118 | icekube.context_name = kube.context_name
119 | icekube.kube_version = kube.kube_version
120 |
121 | def all_resources(
122 | preferred_versions_only: bool = True,
123 | ignore: Optional[List[str]] = None,
124 | ) -> Iterator[Resource]:
125 | print("Loading files from disk")
126 |
127 | for file in tqdm(path.glob("*")):
128 | if file.name == "_metadata.json":
129 | continue
130 | try:
131 | # If downloaded via kubectl get -A
132 | data = json.load(open(file))["items"]
133 | except TypeError:
134 | # If downloaded via icekube download
135 | data = json.load(open(file))
136 |
137 | for resource in data:
138 | yield Resource(
139 | apiVersion=resource["apiVersion"],
140 | kind=resource["kind"],
141 | name=resource["metadata"]["name"],
142 | namespace=resource["metadata"].get("namespace"),
143 | plural=file.name.split(".")[0],
144 | raw=json.dumps(resource, default=str),
145 | )
146 | print("")
147 |
148 | kube.all_resources = all_resources
149 | icekube.all_resources = all_resources
150 |
151 | if attack_paths:
152 | run(IGNORE_DEFAULT)
153 | else:
154 | enumerate(IGNORE_DEFAULT)
155 |
156 |
157 | @app.callback()
158 | def callback(
159 | neo4j_url: str = typer.Option("bolt://localhost:7687", show_default=True),
160 | neo4j_user: str = typer.Option("neo4j", show_default=True),
161 | neo4j_password: str = typer.Option("neo4j", show_default=True),
162 | neo4j_encrypted: bool = typer.Option(False, show_default=True),
163 | verbose: int = typer.Option(0, "--verbose", "-v", count=True),
164 | ):
165 | config["neo4j"]["url"] = neo4j_url
166 | config["neo4j"]["username"] = neo4j_user
167 | config["neo4j"]["password"] = neo4j_password
168 | config["neo4j"]["encrypted"] = neo4j_encrypted
169 |
170 | verbosity_levels = {
171 | 0: logging.ERROR,
172 | 1: logging.WARNING,
173 | 2: logging.INFO,
174 | 3: logging.DEBUG,
175 | }
176 | build_logger(verbosity_levels[verbose])
177 |
--------------------------------------------------------------------------------
/icekube/config.py:
--------------------------------------------------------------------------------
1 | from typing import TypedDict
2 |
3 |
4 | class Neo4j(TypedDict):
5 | url: str
6 | username: str
7 | password: str
8 | encrypted: bool
9 |
10 |
11 | class Config(TypedDict):
12 | neo4j: Neo4j
13 |
14 |
15 | config: Config = {
16 | "neo4j": {
17 | "url": "bolt://localhost:7687",
18 | "username": "neo4j",
19 | "password": "neo4j",
20 | "encrypted": False,
21 | },
22 | }
23 |
--------------------------------------------------------------------------------
/icekube/icekube.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from concurrent.futures import ThreadPoolExecutor
3 | from functools import partial
4 | from typing import List, Optional
5 |
6 | from icekube.attack_paths import attack_paths
7 | from icekube.kube import (
8 | all_resources,
9 | api_resources,
10 | context_name,
11 | kube_version,
12 | )
13 | from icekube.models import Cluster, Signer
14 | from icekube.models.base import Resource
15 | from icekube.neo4j import create, find, get, get_driver
16 | from neo4j import BoltDriver
17 | from tqdm import tqdm
18 |
19 | logger = logging.getLogger(__name__)
20 |
21 |
22 | def create_indices():
23 | for resource in api_resources():
24 | if "list" not in resource.verbs:
25 | continue
26 |
27 | kind = resource.kind
28 | namespace = resource.namespaced
29 |
30 | cmd = f"CREATE INDEX {kind.lower()} IF NOT EXISTS "
31 | cmd += f"FOR (n:{kind}) ON (n.name"
32 | if namespace:
33 | cmd += ", n.namespace"
34 | cmd += ")"
35 |
36 | with get_driver().session() as session:
37 | session.run(cmd)
38 |
39 |
40 | def enumerate_resource_kind(
41 | ignore: Optional[List[str]] = None,
42 | ):
43 | if ignore is None:
44 | ignore = []
45 |
46 | with get_driver().session() as session:
47 | cluster = Cluster(apiVersion="N/A", name=context_name(), version=kube_version())
48 | cmd, kwargs = create(cluster)
49 | session.run(cmd, **kwargs)
50 |
51 | signers = [
52 | "kubernetes.io/kube-apiserver-client",
53 | "kubernetes.io/kube-apiserver-client-kubelet",
54 | "kubernetes.io/kubelet-serving",
55 | "kubernetes.io/legacy-unknown",
56 | ]
57 | for signer in signers:
58 | s = Signer(name=signer)
59 | cmd, kwargs = create(s)
60 | session.run(cmd, **kwargs)
61 |
62 | for resource in all_resources(ignore=ignore):
63 | cmd, kwargs = create(resource)
64 | session.run(cmd, **kwargs)
65 |
66 |
67 | def relationship_generator(
68 | driver: BoltDriver,
69 | initial: bool,
70 | resource: Resource,
71 | ):
72 | with driver.session() as session:
73 | logger.info(f"Generating relationships for {resource}")
74 | for source, relationship, target in resource.relationships(initial):
75 | logger.debug(
76 | f"Creating relationship: {source} -> {relationship} -> {target}"
77 | )
78 | if isinstance(source, Resource):
79 | src_cmd, src_kwargs = get(source, prefix="src")
80 | else:
81 | src_cmd = source[0].format(prefix="src")
82 | src_kwargs = {f"src_{key}": value for key, value in source[1].items()}
83 |
84 | if isinstance(target, Resource):
85 | dst_cmd, dst_kwargs = get(target, prefix="dst")
86 | else:
87 | dst_cmd = target[0].format(prefix="dst")
88 | dst_kwargs = {f"dst_{key}": value for key, value in target[1].items()}
89 |
90 | cmd = src_cmd + "WITH src " + dst_cmd
91 |
92 | if isinstance(relationship, str):
93 | relationship = [relationship]
94 | cmd += "".join(f"MERGE (src)-[:{x}]->(dst) " for x in relationship)
95 |
96 | kwargs = {**src_kwargs, **dst_kwargs}
97 | logger.debug(f"Starting neo4j query: {cmd}, {kwargs}")
98 | session.run(cmd, kwargs)
99 |
100 |
101 | def generate_relationships(threaded: bool = False) -> None:
102 | logger.info("Generating relationships")
103 | logger.info("Fetching resources from neo4j")
104 | driver = get_driver()
105 | resources = find()
106 | logger.info("Fetched resources from neo4j")
107 | generator = partial(relationship_generator, driver, True)
108 |
109 | if threaded:
110 | with ThreadPoolExecutor() as exc:
111 | exc.map(generator, resources)
112 | else:
113 | print("First pass for relationships")
114 | for resource in tqdm(resources):
115 | generator(resource)
116 | print("")
117 |
118 | # Do a second loop across relationships to handle objects created as part
119 | # of other relationships
120 |
121 | resources = find()
122 | generator = partial(relationship_generator, driver, False)
123 |
124 | if threaded:
125 | with ThreadPoolExecutor() as exc:
126 | exc.map(generator, resources)
127 | else:
128 | print("Second pass for relationships")
129 | for resource in tqdm(resources):
130 | generator(resource)
131 | print("")
132 |
133 |
134 | def remove_attack_paths() -> None:
135 | with get_driver().session() as session:
136 | session.run("MATCH ()-[r]-() WHERE r.attack_path IS NOT NULL DELETE r")
137 |
138 |
139 | def setup_attack_paths() -> None:
140 | print("Generating attack paths")
141 | for relationship, query in tqdm(attack_paths.items()):
142 | with get_driver().session() as session:
143 | if isinstance(query, str):
144 | query = [query]
145 | for q in query:
146 | cmd = q + f" MERGE (src)-[:{relationship} {{ attack_path: 1 }}]->(dest)"
147 |
148 | session.run(cmd)
149 | print("")
150 |
151 |
152 | def purge_neo4j() -> None:
153 | with get_driver().session() as session:
154 | session.run("MATCH (x)-[r]-(y) DELETE x, r, y")
155 | session.run("MATCH (x) DELETE x")
156 |
--------------------------------------------------------------------------------
/icekube/kube.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from collections.abc import Iterator
3 | from typing import Any, Dict, List, Optional, cast
4 |
5 | from icekube.models import APIResource, Resource
6 | from kubernetes import client, config
7 | from tqdm import tqdm
8 |
9 | logger = logging.getLogger(__name__)
10 |
11 | loaded_kube_config = False
12 | api_resources_cache: Optional[List[APIResource]] = None
13 | preferred_versions: Dict[str, str] = {}
14 |
15 |
16 | def load_kube_config():
17 | global loaded_kube_config
18 |
19 | if not loaded_kube_config:
20 | config.load_kube_config()
21 | loaded_kube_config = True
22 |
23 |
24 | def kube_version() -> str:
25 | load_kube_config()
26 | return cast(str, client.VersionApi().get_code().git_version)
27 |
28 |
29 | def context_name() -> str:
30 | load_kube_config()
31 | return cast(str, config.list_kube_config_contexts()[1]["context"]["cluster"])
32 |
33 |
34 | def api_versions() -> List[str]:
35 | load_kube_config()
36 | versions = []
37 |
38 | for version in client.CoreApi().get_api_versions().versions:
39 | versions.append(f"{version}")
40 |
41 | for api in client.ApisApi().get_api_versions().groups:
42 | preferred_versions[api.name] = api.preferred_version.version
43 | for v in api.versions:
44 | versions.append(f"{api.name}/{v.version}")
45 |
46 | return sorted(versions)
47 |
48 |
49 | def api_resources() -> List[APIResource]:
50 | global api_resources_cache
51 | load_kube_config()
52 |
53 | if api_resources_cache is not None:
54 | return api_resources_cache
55 |
56 | try:
57 | versions = api_versions()
58 | except Exception:
59 | logger.error("Failed to access Kubernetes cluster")
60 | api_resources_cache = []
61 | return api_resources_cache
62 |
63 | resources: List[APIResource] = []
64 |
65 | for version in versions:
66 | if "/" in version:
67 | group, vers = version.split("/")
68 | resp = client.CustomObjectsApi().list_cluster_custom_object(
69 | group,
70 | vers,
71 | "",
72 | )
73 | preferred = preferred_versions[group] == vers
74 | else:
75 | resp = client.CoreV1Api().get_api_resources()
76 | preferred = True
77 | resp = resp.to_dict()
78 | for item in resp["resources"]:
79 | # if "/" in item["name"]:
80 | # continue
81 | # if not any(x in item["verbs"] for x in ["get", "list"]):
82 | # continue
83 |
84 | additional_verbs = {
85 | "roles": ["bind", "escalate"],
86 | "clusterroles": ["bind", "escalate"],
87 | "serviceaccounts": ["impersonate"],
88 | "users": ["impersonate"],
89 | "groups": ["impersonate"],
90 | }
91 |
92 | if item["name"] in additional_verbs.keys():
93 | item["verbs"] = list(
94 | set(item["verbs"] + additional_verbs[item["name"]]),
95 | )
96 |
97 | resources.append(
98 | APIResource(
99 | name=item["name"],
100 | namespaced=item["namespaced"],
101 | group=version,
102 | kind=item["kind"],
103 | preferred=preferred,
104 | verbs=item["verbs"],
105 | ),
106 | )
107 |
108 | if not any(x.name == "users" for x in resources):
109 | resources.append(
110 | APIResource(
111 | name="users",
112 | namespaced=False,
113 | group="",
114 | kind="User",
115 | preferred=True,
116 | verbs=["impersonate"],
117 | ),
118 | )
119 |
120 | if not any(x.name == "groups" for x in resources):
121 | resources.append(
122 | APIResource(
123 | name="groups",
124 | namespaced=False,
125 | group="",
126 | kind="Group",
127 | preferred=True,
128 | verbs=["impersonate"],
129 | ),
130 | )
131 |
132 | if not any(x.name == "signers" for x in resources):
133 | resources.append(
134 | APIResource(
135 | name="signers",
136 | namespaced=False,
137 | group="certificates.k8s.io/v1",
138 | kind="Signer",
139 | preferred=True,
140 | verbs=["approve", "sign"],
141 | ),
142 | )
143 |
144 | api_resources_cache = resources
145 | return resources
146 |
147 |
148 | def all_resources(
149 | preferred_versions_only: bool = True,
150 | ignore: Optional[List[str]] = None,
151 | ) -> Iterator[Resource]:
152 | load_kube_config()
153 |
154 | if ignore is None:
155 | ignore = []
156 |
157 | all_namespaces: List[str] = [
158 | x.metadata.name for x in client.CoreV1Api().list_namespace().items
159 | ]
160 |
161 | print("Enumerating Kubernetes resources")
162 | for resource_kind in tqdm(api_resources()):
163 | if "list" not in resource_kind.verbs:
164 | continue
165 |
166 | if preferred_versions_only and not resource_kind.preferred:
167 | continue
168 |
169 | if resource_kind.name in ignore:
170 | continue
171 |
172 | logger.info(f"Fetching {resource_kind.name} resources")
173 | try:
174 | resource_class = Resource.get_kind_class(
175 | resource_kind.group,
176 | resource_kind.kind,
177 | )
178 | if resource_kind.namespaced:
179 | for ns in all_namespaces:
180 | yield from resource_class.list(
181 | resource_kind.group,
182 | resource_kind.kind,
183 | resource_kind.name,
184 | ns,
185 | )
186 | else:
187 | yield from resource_class.list(
188 | resource_kind.group,
189 | resource_kind.kind,
190 | resource_kind.name,
191 | )
192 | except client.exceptions.ApiException:
193 | logger.error(f"Failed to retrieve {resource_kind.name}")
194 | print("")
195 |
196 |
197 | def metadata_download() -> Dict[str, Any]:
198 | return {
199 | "kube_version": kube_version(),
200 | "context_name": context_name(),
201 | "api_versions": api_versions(),
202 | "preferred_versions": preferred_versions,
203 | "api_resources": [x.dict() for x in api_resources()],
204 | }
205 |
--------------------------------------------------------------------------------
/icekube/log_config.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from tqdm.contrib.logging import _TqdmLoggingHandler, std_tqdm
4 |
5 |
6 | def build_logger(debug_level=logging.DEBUG):
7 | # create logger
8 | logger = logging.getLogger("icekube")
9 | logger.setLevel(debug_level)
10 | # create console handler with a higher log level
11 | ch = logging.StreamHandler()
12 | ch.setLevel(debug_level)
13 | # create formatter and add it to the handlers
14 | formatter = logging.Formatter("%(asctime)s|%(name)s|%(levelname)s|%(message)s")
15 | ch.setFormatter(formatter)
16 |
17 | # tell tqdm about the handler
18 | tqdm_handler = _TqdmLoggingHandler(std_tqdm)
19 | tqdm_handler.setFormatter(formatter)
20 | tqdm_handler.stream = ch.stream
21 |
22 | # add the handlers to the logger
23 | logger.addHandler(tqdm_handler)
24 |
--------------------------------------------------------------------------------
/icekube/models/__init__.py:
--------------------------------------------------------------------------------
1 | from icekube.models import (
2 | clusterrole,
3 | clusterrolebinding,
4 | group,
5 | namespace,
6 | pod,
7 | role,
8 | rolebinding,
9 | secret,
10 | securitycontextconstraints,
11 | serviceaccount,
12 | user,
13 | )
14 | from icekube.models.api_resource import APIResource
15 | from icekube.models.base import Resource
16 | from icekube.models.cluster import Cluster
17 | from icekube.models.signer import Signer
18 |
19 | __all__ = [
20 | "APIResource",
21 | "Cluster",
22 | "Signer",
23 | "Resource",
24 | "clusterrole",
25 | "clusterrolebinding",
26 | "group",
27 | "namespace",
28 | "pod",
29 | "role",
30 | "rolebinding",
31 | "secret",
32 | "securitycontextconstraints",
33 | "serviceaccount",
34 | "user",
35 | ]
36 |
--------------------------------------------------------------------------------
/icekube/models/_helpers.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | from kubernetes.client import ApiClient
4 |
5 |
6 | def load(obj, key, default=None):
7 | if isinstance(obj, dict):
8 | return obj.get(key, default)
9 | else:
10 | return getattr(obj, key) or default
11 |
12 |
13 | def save(obj, key, value):
14 | if isinstance(obj, dict):
15 | obj[key] = value
16 | else:
17 | setattr(obj, key, value)
18 |
19 | return obj
20 |
21 |
22 | def to_dict(resource) -> Dict[str, Any]:
23 | resp: Dict[str, Any] = ApiClient().sanitize_for_serialization(resource)
24 | return resp
25 |
--------------------------------------------------------------------------------
/icekube/models/api_resource.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from pydantic import BaseModel, field_validator
4 |
5 |
6 | class APIResource(BaseModel):
7 | name: str
8 | namespaced: bool
9 | group: str
10 | kind: str
11 | verbs: List[str]
12 | preferred: bool = False
13 |
14 | @field_validator("kind")
15 | @classmethod
16 | def kind_can_only_have_underscore(cls, v: str) -> str:
17 | s = "".join([x if x.isalnum() else "_" for x in v])
18 | return s
19 |
--------------------------------------------------------------------------------
/icekube/models/base.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import logging
5 | import traceback
6 | from functools import cached_property
7 | from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast
8 |
9 | from icekube.models._helpers import load, save
10 | from icekube.relationships import Relationship
11 | from icekube.utils import to_camel_case
12 | from kubernetes import client
13 | from pydantic import (
14 | BaseModel,
15 | Field,
16 | computed_field,
17 | field_validator,
18 | model_validator,
19 | )
20 |
21 | logger = logging.getLogger(__name__)
22 |
23 |
24 | def api_group(api_version: str) -> str:
25 | if "/" in api_version:
26 | return api_version.split("/")[0]
27 | # When the base APIGroup is ""
28 | return ""
29 |
30 |
31 | class Resource(BaseModel):
32 | apiVersion: str = Field(default=...)
33 | kind: str = Field(default=...)
34 | name: str = Field(default=...)
35 | plural: str = Field(default=...)
36 | namespace: Optional[str] = Field(default=None)
37 | raw: Optional[str] = Field(default=None)
38 | supported_api_groups: List[str] = Field(default_factory=list)
39 |
40 | def __new__(cls, **kwargs):
41 | kind_class = cls.get_kind_class(
42 | kwargs.get("apiVersion", ""),
43 | kwargs.get("kind", cls.__name__),
44 | )
45 | return super(Resource, kind_class).__new__(kind_class)
46 |
47 | def __repr__(self) -> str:
48 | if self.namespace:
49 | return f"{self.kind}(namespace='{self.namespace}', name='{self.name}')"
50 | else:
51 | return f"{self.kind}(name='{self.name}')"
52 |
53 | def __str__(self) -> str:
54 | return self.__repr__()
55 |
56 | def __eq__(self, other) -> bool:
57 | comparison_points = ["apiVersion", "kind", "namespace", "name"]
58 |
59 | return all(getattr(self, x) == getattr(other, x) for x in comparison_points)
60 |
61 | @field_validator("kind")
62 | @classmethod
63 | def kind_can_only_have_underscore(cls, v: str) -> str:
64 | s = "".join([x if x.isalnum() else "_" for x in v])
65 | return s
66 |
67 | @cached_property
68 | def data(self) -> Dict[str, Any]:
69 | return cast(Dict[str, Any], json.loads(self.raw or "{}"))
70 |
71 | @computed_field # type: ignore
72 | @property
73 | def labels(self) -> Dict[str, str]:
74 | return cast(Dict[str, str], self.data.get("metadata", {}).get("labels", {}))
75 |
76 | @model_validator(mode="before")
77 | def inject_missing_required_fields(cls, values):
78 | if not all(load(values, x) for x in ["apiVersion", "kind", "plural"]):
79 | from icekube.kube import api_resources, preferred_versions
80 |
81 | test_kind = load(values, "kind", cls.__name__) # type: ignore
82 |
83 | for x in api_resources():
84 | if x.kind == test_kind:
85 | if "/" in x.group:
86 | group, version = x.group.split("/")
87 | if preferred_versions[group] != version:
88 | continue
89 | api_resource = x
90 | break
91 | else:
92 | # Nothing found, setting them to blank
93 | def get_value(field):
94 | if isinstance(values, dict) and field in values:
95 | return values[field]
96 | elif not isinstance(values, dict) and getattr(values, field):
97 | return getattr(values, field)
98 |
99 | if cls.__fields__[field].default:
100 | return cls.__fields__[field].default
101 |
102 | if field == "kind":
103 | return test_kind
104 |
105 | return "N/A"
106 |
107 | for t in ["apiVersion", "kind", "plural"]:
108 | values = save(values, t, get_value(t))
109 |
110 | return values
111 |
112 | for attr, val in [
113 | ("apiVersion", api_resource.group),
114 | ("kind", api_resource.kind),
115 | ("plural", api_resource.name),
116 | ]:
117 | if load(values, attr) is None:
118 | values = save(values, attr, val)
119 |
120 | return values
121 |
122 | @classmethod
123 | def get_kind_class(cls, apiVersion: str, kind: str) -> Type[Resource]:
124 | for subclass in cls.__subclasses__():
125 | if subclass.__name__ != kind:
126 | continue
127 |
128 | supported = subclass.model_fields["supported_api_groups"].default
129 | if not isinstance(supported, list):
130 | continue
131 |
132 | if api_group(apiVersion) not in supported:
133 | continue
134 |
135 | return subclass
136 |
137 | return cls
138 |
139 | @property
140 | def api_group(self) -> str:
141 | return api_group(self.apiVersion)
142 |
143 | @property
144 | def resource_definition_name(self) -> str:
145 | if self.api_group:
146 | return f"{self.plural}.{self.api_group}"
147 | else:
148 | return self.plural
149 |
150 | @property
151 | def unique_identifiers(self) -> Dict[str, str]:
152 | ident = {
153 | "apiGroup": self.api_group,
154 | "apiVersion": self.apiVersion,
155 | "kind": self.kind,
156 | "name": self.name,
157 | }
158 | if self.namespace:
159 | ident["namespace"] = self.namespace
160 | return ident
161 |
162 | @property
163 | def db_labels(self) -> Dict[str, Any]:
164 | return {
165 | **self.unique_identifiers,
166 | "plural": self.plural,
167 | "raw": self.raw,
168 | }
169 |
170 | @classmethod
171 | def list(
172 | cls: Type[Resource],
173 | apiVersion: str,
174 | kind: str,
175 | name: str,
176 | namespace: Optional[str] = None,
177 | ) -> List[Resource]:
178 | try:
179 | group, version = apiVersion.split("/")
180 | except ValueError:
181 | # Core v1 API
182 | group = None
183 | version = apiVersion
184 | resources: List[Resource] = []
185 | if group:
186 | if namespace:
187 | resp = client.CustomObjectsApi().list_namespaced_custom_object(
188 | group,
189 | version,
190 | namespace,
191 | name,
192 | )
193 | else:
194 | resp = client.CustomObjectsApi().list_cluster_custom_object(
195 | group,
196 | version,
197 | name,
198 | )
199 | else:
200 | if namespace:
201 | func = f"list_namespaced_{to_camel_case(kind)}"
202 | resp = json.loads(
203 | getattr(client.CoreV1Api(), func)(
204 | namespace,
205 | _preload_content=False,
206 | ).data,
207 | )
208 | else:
209 | func = f"list_{to_camel_case(kind)}"
210 | resp = json.loads(
211 | getattr(client.CoreV1Api(), func)(_preload_content=False).data,
212 | )
213 |
214 | for item in resp.get("items", []):
215 | item["apiVersion"] = apiVersion
216 | item["kind"] = kind
217 | try:
218 | resources.append(
219 | Resource(
220 | apiVersion=apiVersion,
221 | kind=kind,
222 | name=item["metadata"]["name"],
223 | namespace=item["metadata"]["namespace"] if namespace else None,
224 | plural=name,
225 | raw=json.dumps(item, default=str),
226 | ),
227 | )
228 | except Exception:
229 | logger.error(
230 | f"Error when processing {kind} - "
231 | f"{item['metadata'].get('namespace', '')}:"
232 | f"{item['metadata']['name']}",
233 | )
234 | traceback.print_exc()
235 |
236 | return resources
237 |
238 | def relationships(
239 | self,
240 | initial: bool = True,
241 | ) -> List[RELATIONSHIP]:
242 | logger.debug(
243 | f"Generating {'initial' if initial else 'second'} set of relationships",
244 | )
245 | relationships: List[RELATIONSHIP] = []
246 |
247 | if self.namespace is not None:
248 | ns = Resource(name=self.namespace, kind="Namespace")
249 | relationships += [
250 | (
251 | self,
252 | Relationship.WITHIN_NAMESPACE,
253 | ns,
254 | ),
255 | ]
256 |
257 | return relationships
258 |
259 |
260 | QUERY_RESOURCE = Tuple[str, Dict[str, str]]
261 |
262 | RELATIONSHIP = Tuple[
263 | Union[Resource, QUERY_RESOURCE],
264 | Union[str, List[str]],
265 | Union[Resource, QUERY_RESOURCE],
266 | ]
267 |
--------------------------------------------------------------------------------
/icekube/models/cluster.py:
--------------------------------------------------------------------------------
1 | import re
2 | from typing import Any, Dict, List
3 |
4 | from icekube.models.base import RELATIONSHIP, Resource
5 | from pydantic import computed_field
6 |
7 |
8 | class Cluster(Resource):
9 | version: str
10 | kind: str = "Cluster"
11 | apiVersion: str = "N/A"
12 | plural: str = "clusters"
13 | supported_api_groups: List[str] = ["N"]
14 |
15 | def __repr__(self) -> str:
16 | return f"Cluster(name='{self.name}', version='{self.version}')"
17 |
18 | @computed_field # type: ignore
19 | @property
20 | def major_minor_version(self) -> float:
21 | match = re.match(r"^v?(\d+\.\d+)[^\d]", self.version)
22 | # failed to retrieve, set to a super new version
23 | if not match:
24 | return 100.0
25 | return float(match.groups()[0])
26 |
27 | @property
28 | def db_labels(self) -> Dict[str, Any]:
29 | return {
30 | **self.unique_identifiers,
31 | "plural": self.plural,
32 | "version": self.version,
33 | "major_minor": self.major_minor_version,
34 | }
35 |
36 | def relationships(
37 | self,
38 | initial: bool = True,
39 | ) -> List[RELATIONSHIP]:
40 | relationships = super().relationships()
41 |
42 | query = "MATCH (src) WHERE NOT src.apiVersion = 'N/A' "
43 |
44 | relationships += [((query, {}), "WITHIN_CLUSTER", self)]
45 |
46 | return relationships
47 |
--------------------------------------------------------------------------------
/icekube/models/clusterrole.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import List
5 |
6 | from icekube.models.base import Resource
7 | from icekube.models.policyrule import PolicyRule
8 | from pydantic import computed_field
9 |
10 |
11 | class ClusterRole(Resource):
12 | supported_api_groups: List[str] = [
13 | "rbac.authorization.k8s.io",
14 | "authorization.openshift.io",
15 | ]
16 |
17 | @computed_field # type: ignore
18 | @cached_property
19 | def rules(self) -> List[PolicyRule]:
20 | rules = []
21 | raw_rules = self.data.get("rules") or []
22 |
23 | for rule in raw_rules:
24 | rules.append(PolicyRule(**rule))
25 |
26 | return rules
27 |
--------------------------------------------------------------------------------
/icekube/models/clusterrolebinding.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import Any, Dict, List, Optional, Union
5 |
6 | from icekube.models.base import RELATIONSHIP, Resource
7 | from icekube.models.clusterrole import ClusterRole
8 | from icekube.models.group import Group
9 | from icekube.models.role import Role
10 | from icekube.models.serviceaccount import ServiceAccount
11 | from icekube.models.user import User
12 | from icekube.relationships import Relationship
13 | from pydantic import computed_field
14 |
15 |
16 | def get_role(
17 | role_ref: Dict[str, Any],
18 | namespace: Optional[str] = None,
19 | ) -> Union[ClusterRole, Role]:
20 | from icekube.neo4j import find_or_mock
21 |
22 | role_ref["kind"] = role_ref.get("kind", "ClusterRole")
23 | if role_ref["kind"] == "ClusterRole":
24 | return find_or_mock(ClusterRole, name=role_ref["name"])
25 | elif role_ref["kind"] == "Role":
26 | return find_or_mock(
27 | Role,
28 | name=role_ref["name"],
29 | namespace=role_ref.get("namespace", namespace),
30 | )
31 | else:
32 | raise Exception(f"Unknown RoleRef kind: {role_ref['kind']}")
33 |
34 |
35 | def get_subjects(
36 | subjects: List[Dict[str, Any]],
37 | namespace: Optional[str] = None,
38 | ) -> List[Union[ServiceAccount, User, Group]]:
39 | results: List[Union[ServiceAccount, User, Group]] = []
40 |
41 | if subjects is None:
42 | return results
43 |
44 | for subject in subjects:
45 | if subject["kind"] in ["SystemUser", "User"]:
46 | results.append(User(name=subject["name"]))
47 | elif subject["kind"] in ["SystemGroup", "Group"]:
48 | results.append(Group(name=subject["name"]))
49 | elif subject["kind"] == "ServiceAccount":
50 | results.append(
51 | ServiceAccount(
52 | name=subject["name"],
53 | namespace=subject.get("namespace", namespace),
54 | ),
55 | )
56 | else:
57 | raise Exception(f"Unknown Subject Kind: {subject['kind']}")
58 |
59 | return results
60 |
61 |
62 | class ClusterRoleBinding(Resource):
63 | supported_api_groups: List[str] = [
64 | "rbac.authorization.k8s.io",
65 | "authorization.openshift.io",
66 | ]
67 |
68 | @computed_field # type: ignore
69 | @cached_property
70 | def role(self) -> Union[ClusterRole, Role]:
71 | role_ref = self.data.get("roleRef")
72 | if role_ref:
73 | return get_role(role_ref)
74 | else:
75 | return ClusterRole(name="")
76 |
77 | @computed_field # type: ignore
78 | @cached_property
79 | def subjects(self) -> List[Union[ServiceAccount, User, Group]]:
80 | return get_subjects(self.data.get("subjects", []))
81 |
82 | def relationships(
83 | self,
84 | initial: bool = True,
85 | ) -> List[RELATIONSHIP]:
86 | relationships = super().relationships()
87 | relationships += [(self, Relationship.GRANTS_PERMISSION, self.role)]
88 | relationships += [
89 | (subject, Relationship.BOUND_TO, self) for subject in self.subjects
90 | ]
91 |
92 | cluster_query = (
93 | "MATCH ({prefix}) WHERE {prefix}.kind =~ ${prefix}_kind ",
94 | {"apiVersion": "N/A", "kind": "Cluster"},
95 | )
96 |
97 | if not initial:
98 | for role_rule in self.role.rules:
99 | if role_rule.contains_csr_approval:
100 | relationships.append(
101 | (self, Relationship.HAS_CSR_APPROVAL, cluster_query),
102 | )
103 | for relationship, resource in role_rule.affected_resource_query():
104 | relationships.append((self, relationship, resource))
105 |
106 | return relationships
107 |
--------------------------------------------------------------------------------
/icekube/models/group.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import List
4 |
5 | from icekube.models.base import Resource
6 |
7 |
8 | class Group(Resource):
9 | plural: str = "groups"
10 | supported_api_groups: List[str] = ["", "user.openshift.io"]
11 |
--------------------------------------------------------------------------------
/icekube/models/namespace.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import Any, Dict, List, cast
5 |
6 | from icekube.models.base import Resource
7 | from pydantic import computed_field
8 |
9 |
10 | class Namespace(Resource):
11 | supported_api_groups: List[str] = [""]
12 |
13 | @computed_field # type: ignore
14 | @cached_property
15 | def psa_enforce(self) -> str:
16 | return cast(
17 | str, self.labels.get("pod-security.kubernetes.io/enforce", "privileged")
18 | )
19 |
20 | @property
21 | def db_labels(self) -> Dict[str, Any]:
22 | return {
23 | **super().db_labels,
24 | "psa_enforce": self.psa_enforce,
25 | }
26 |
--------------------------------------------------------------------------------
/icekube/models/node.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Any, Dict, List
4 |
5 | from icekube.models.base import Resource
6 | from pydantic import computed_field
7 |
8 |
9 | class Node(Resource):
10 | supported_api_groups: List[str] = [""]
11 |
12 | @computed_field # type: ignore
13 | @property
14 | def node_roles(self) -> List[str]:
15 | return [
16 | x.split("/", 1)[1]
17 | for x in self.labels.keys()
18 | if x.startswith("node-role.kubernetes.io/")
19 | ]
20 |
21 | @property
22 | def db_labels(self) -> Dict[str, Any]:
23 | return {
24 | **super().db_labels,
25 | "node_roles": self.node_roles,
26 | }
27 |
--------------------------------------------------------------------------------
/icekube/models/pod.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from itertools import product
5 | from pathlib import Path
6 | from typing import Any, Dict, List, Optional, cast
7 |
8 | import jmespath
9 | from icekube.models.base import RELATIONSHIP, Resource
10 | from icekube.models.node import Node
11 | from icekube.models.secret import Secret
12 | from icekube.models.serviceaccount import ServiceAccount
13 | from icekube.relationships import Relationship
14 | from pydantic import computed_field
15 |
16 | CAPABILITIES = [
17 | "AUDIT_CONTROL",
18 | "AUDIT_READ",
19 | "AUDIT_WRITE",
20 | "BLOCK_SUSPEND",
21 | "BPF",
22 | "CHECKPOINT_RESTORE",
23 | "CHOWN",
24 | "DAC_OVERRIDE",
25 | "DAC_READ_SEARCH",
26 | "FOWNER",
27 | "FSETID",
28 | "IPC_LOCK",
29 | "IPC_OWNER",
30 | "KILL",
31 | "LEASE",
32 | "LINUX_IMMUTABLE",
33 | "MAC_ADMIN",
34 | "MAC_OVERRIDE",
35 | "MKNOD",
36 | "NET_ADMIN",
37 | "NET_BIND_SERVICE",
38 | "NET_BROADCAST",
39 | "NET_RAW",
40 | "PERFMON",
41 | "SETFCAP",
42 | "SETGID",
43 | "SETPCAP",
44 | "SETUID",
45 | "SYSLOG",
46 | "SYS_ADMIN",
47 | "SYS_BOOT",
48 | "SYS_CHROOT",
49 | "SYS_MODULE",
50 | "SYS_NICE",
51 | "SYS_PACCT",
52 | "SYS_PTRACE",
53 | "SYS_RAWIO",
54 | "SYS_RESOURCE",
55 | "SYS_TIME",
56 | "SYS_TTY_CONFIG",
57 | "WAKE_ALARM",
58 | ]
59 |
60 |
61 | class Pod(Resource):
62 | supported_api_groups: List[str] = [""]
63 |
64 | @computed_field # type: ignore
65 | @cached_property
66 | def service_account(self) -> Optional[ServiceAccount]:
67 | sa = jmespath.search("spec.serviceAccountName", self.data)
68 |
69 | if sa:
70 | return ServiceAccount(name=sa, namespace=self.namespace)
71 | else:
72 | return None
73 |
74 | @computed_field # type: ignore
75 | @cached_property
76 | def node(self) -> Optional[Node]:
77 | node = jmespath.search("spec.nodeName", self.data)
78 |
79 | if node:
80 | return Node(name=node)
81 | else:
82 | return None
83 |
84 | @computed_field # type: ignore
85 | @cached_property
86 | def containers(self) -> List[Dict[str, Any]]:
87 | return cast(
88 | List[Dict[str, Any]], jmespath.search("spec.containers[]", self.data) or []
89 | )
90 |
91 | @computed_field # type: ignore
92 | @cached_property
93 | def capabilities(self) -> List[str]:
94 | capabilities = set()
95 |
96 | for container in self.containers:
97 | addl = jmespath.search("securityContext.capabilities.add", container) or []
98 | addl = [x.upper() for x in addl]
99 | add = set(addl)
100 |
101 | if "ALL" in add:
102 | add.remove("ALL")
103 | add.update(set(CAPABILITIES))
104 |
105 | capabilities.update(add)
106 |
107 | return list(capabilities)
108 |
109 | @computed_field # type: ignore
110 | @cached_property
111 | def privileged(self) -> bool:
112 | privileged = (
113 | jmespath.search("spec.containers[].securityContext.privileged", self.data)
114 | or []
115 | )
116 | return any(privileged)
117 |
118 | @computed_field # type: ignore
119 | @cached_property
120 | def host_path_volumes(self) -> List[str]:
121 | return jmespath.search("spec.volumes[].hostPath.path", self.data) or []
122 |
123 | @computed_field # type: ignore
124 | @cached_property
125 | def hostPID(self) -> bool:
126 | return jmespath.search("spec.hostPID", self.data) or False
127 |
128 | @computed_field # type: ignore
129 | @cached_property
130 | def hostNetwork(self) -> bool:
131 | return jmespath.search("spec.hostNetwork", self.data) or False
132 |
133 | @property
134 | def dangerous_host_path(self) -> bool:
135 | # Dangerous paths to check for
136 | # Not all of these give direct node compromise, but will grant enough
137 | # permissions to maybe steal certificates to help with API server
138 | # as the node, or the like
139 | dangerous_paths = [
140 | "/etc/kubernetes/admin.conf",
141 | "/etc/kubernetes/kubeconfig",
142 | "/etc/shadow",
143 | "/proc/sys/kernel",
144 | "/root/.kube/config",
145 | "/root/.ssh/authorized_keys",
146 | "/run/containerd/containerd.sock",
147 | "/run/containerd/containerd.sock",
148 | "/run/crio/crio.sock",
149 | "/run/cri-dockerd.sock",
150 | "/run/docker.sock",
151 | "/run/dockershim.sock",
152 | "/var/lib/kubelet/pods/",
153 | "/var/lib/kubernetes/",
154 | "/var/lib/minikube/certs/apiserver.key",
155 | "/var/log",
156 | "/var/run/containerd/containerd.sock",
157 | "/var/run/containerd/containerd.sock",
158 | "/var/run/crio/crio.sock",
159 | "/var/run/cri-dockerd.sock",
160 | "/var/run/docker.sock",
161 | "/var/run/dockershim.sock",
162 | ]
163 | for volume, test_path in product(self.host_path_volumes, dangerous_paths):
164 | try:
165 | Path(test_path).relative_to(Path(volume))
166 | return True
167 | except ValueError:
168 | pass
169 | return False
170 |
171 | @property
172 | def mounted_secrets(self) -> List[str]:
173 | secrets = jmespath.search("spec.volumes[].secret.secretName", self.data) or []
174 | secrets += (
175 | jmespath.search(
176 | "spec.containers[].env[].valueFrom.secretKeyRef.name", self.data
177 | )
178 | or []
179 | )
180 |
181 | return secrets
182 |
183 | @property
184 | def db_labels(self) -> Dict[str, Any]:
185 | return {
186 | **super().db_labels,
187 | "capabilities": self.capabilities,
188 | "host_path_volumes": self.host_path_volumes,
189 | "dangerous_host_path": self.dangerous_host_path,
190 | "privileged": self.privileged,
191 | "hostPID": self.hostPID,
192 | "hostNetwork": self.hostNetwork,
193 | }
194 |
195 | def relationships(
196 | self,
197 | initial: bool = True,
198 | ) -> List[RELATIONSHIP]:
199 | relationships = super().relationships()
200 |
201 | if self.service_account:
202 | relationships += [(self, Relationship.USES_ACCOUNT, self.service_account)]
203 | if self.node:
204 | relationships += [(self.node, Relationship.HOSTS_POD, self)]
205 | for secret in self.mounted_secrets:
206 | relationships += [
207 | (
208 | self,
209 | Relationship.MOUNTS_SECRET,
210 | Secret(
211 | namespace=cast(str, self.namespace),
212 | name=secret,
213 | ),
214 | ),
215 | ]
216 |
217 | return relationships
218 |
--------------------------------------------------------------------------------
/icekube/models/policyrule.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from fnmatch import filter as fnfilter
3 | from fnmatch import fnmatch
4 | from typing import Dict, Iterator, List, Optional, Tuple, Union
5 |
6 | from icekube.relationships import Relationship
7 | from pydantic import BaseModel
8 | from pydantic.fields import Field
9 |
10 |
11 | def generate_query(
12 | filters: Dict[str, Union[str, List[str]]],
13 | ) -> Tuple[str, Dict[str, str]]:
14 | query = "MATCH ({prefix}) WHERE"
15 | final_filters = {}
16 | query_parts = []
17 | for key, value in filters.items():
18 | if isinstance(value, list):
19 | part = " OR ".join(
20 | f"{{prefix}}.{key} =~ ${{prefix}}_{key}_{idx}"
21 | for idx in range(len(value))
22 | )
23 | query_parts.append(f" ({part}) ")
24 | for idx, v in enumerate(value):
25 | final_filters[f"{key}_{idx}"] = v
26 | else:
27 | query_parts.append(f" {{prefix}}.{key} =~ ${{prefix}}_{key} ")
28 | final_filters[key] = value
29 | query += "AND".join(query_parts)
30 | return query, final_filters
31 |
32 |
33 | def remove_version(group):
34 | if "/" in group:
35 | return group.split("/")[0]
36 | else:
37 | return ""
38 |
39 |
40 | class PolicyRule(BaseModel):
41 | apiGroups: List[str] = Field(default_factory=list)
42 | nonResourceURLs: List[str] = Field(default_factory=list)
43 | resourceNames: List[str] = Field(default_factory=list)
44 | resources: List[str] = Field(default_factory=list)
45 | verbs: List[str] = Field(default_factory=list)
46 |
47 | @property
48 | def contains_csr_approval(self) -> bool:
49 | resource = any(
50 | fnmatch("certificatesigningrequests/approval", x) for x in self.resources
51 | )
52 | verb = any(fnmatch("update", x) for x in self.verbs)
53 |
54 | return resource and verb
55 |
56 | def api_resources(self):
57 | from icekube.kube import api_resources
58 |
59 | for api_group, resource in itertools.product(self.apiGroups, self.resources):
60 | for res in api_resources():
61 | if fnmatch(remove_version(res.group), api_group) and fnmatch(
62 | res.name,
63 | resource,
64 | ):
65 | yield res
66 |
67 | def affected_resource_query(
68 | self,
69 | namespace: Optional[str] = None,
70 | ) -> Iterator[Tuple[Union[str, List[str]], Tuple[str, Dict[str, str]]]]:
71 | for api_resource in self.api_resources():
72 | resource = api_resource.name
73 | sub_resource = None
74 | if "/" in resource:
75 | resource, sub_resource = resource.split("/")
76 | sub_resource.replace("-", "_")
77 |
78 | find_filter = {"apiVersion": api_resource.group, "plural": resource}
79 | if namespace:
80 | find_filter["namespace"] = namespace
81 |
82 | valid_verbs = set()
83 | for verb in self.verbs:
84 | valid_verbs.update(fnfilter(api_resource.verbs, verb.lower()))
85 |
86 | verbs_for_namespace = set("create list".split()).intersection(valid_verbs)
87 |
88 | if verbs_for_namespace and sub_resource is None:
89 | if namespace:
90 | query_filter: Dict[str, Union[str, List[str]]] = {
91 | "kind": "Namespace",
92 | "name": namespace,
93 | }
94 | else:
95 | query_filter = {"apiVersion": "N/A", "kind": "Cluster"}
96 | for verb in verbs_for_namespace:
97 | yield (
98 | Relationship.generate_grant(verb.upper(), resource),
99 | generate_query(query_filter),
100 | )
101 | query_filter = {"kind": "Namespace"}
102 | for verb in verbs_for_namespace:
103 | yield (
104 | Relationship.generate_grant(verb.upper(), resource),
105 | generate_query(query_filter),
106 | )
107 | if "create" in verbs_for_namespace:
108 | valid_verbs.remove("create")
109 |
110 | if not valid_verbs:
111 | continue
112 |
113 | tags = [
114 | Relationship.generate_grant(verb, sub_resource) for verb in valid_verbs
115 | ]
116 |
117 | if not self.resourceNames:
118 | yield (tags, generate_query(find_filter))
119 | else:
120 | names = [name.replace("*", ".*") for name in self.resourceNames]
121 | yield (tags, generate_query({**find_filter, "name": names}))
122 |
123 | # Special case for Namespace objects as they are both cluster-wide and
124 | # namespaced
125 | if namespace and resource == "namespaces":
126 | permitted_namespaced_verbs = set("get patch update delete".split())
127 | namespace_verbs = permitted_namespaced_verbs.intersection(valid_verbs)
128 |
129 | namespace_filter = {
130 | k: v for k, v in find_filter.items() if k != "namespace"
131 | }
132 |
133 | tags = [
134 | Relationship.generate_grant(verb, sub_resource)
135 | for verb in namespace_verbs
136 | ]
137 |
138 | # Ensure that any resourceNames still allow the actual namespace name
139 | if self.resourceNames:
140 | if not any(fnmatch("namespaces", x) for x in self.resourceNames):
141 | return
142 |
143 | if tags:
144 | yield (
145 | tags,
146 | generate_query({**namespace_filter, "name": [namespace]}),
147 | )
148 |
--------------------------------------------------------------------------------
/icekube/models/role.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import List
5 |
6 | from icekube.models.base import Resource
7 | from icekube.models.policyrule import PolicyRule
8 | from pydantic import computed_field
9 |
10 |
11 | class Role(Resource):
12 | supported_api_groups: List[str] = [
13 | "rbac.authorization.k8s.io",
14 | "authorization.openshift.io",
15 | ]
16 |
17 | @computed_field # type: ignore
18 | @cached_property
19 | def rules(self) -> List[PolicyRule]:
20 | rules = []
21 | raw_rules = self.data.get("rules") or []
22 |
23 | for rule in raw_rules:
24 | rules.append(PolicyRule(**rule))
25 |
26 | return rules
27 |
--------------------------------------------------------------------------------
/icekube/models/rolebinding.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import List, Union
5 |
6 | from icekube.models.base import RELATIONSHIP, Resource
7 | from icekube.models.clusterrole import ClusterRole
8 | from icekube.models.clusterrolebinding import get_role, get_subjects
9 | from icekube.models.group import Group
10 | from icekube.models.role import Role
11 | from icekube.models.serviceaccount import ServiceAccount
12 | from icekube.models.user import User
13 | from icekube.relationships import Relationship
14 | from pydantic import computed_field
15 |
16 |
17 | class RoleBinding(Resource):
18 | supported_api_groups: List[str] = [
19 | "rbac.authorization.k8s.io",
20 | "authorization.openshift.io",
21 | ]
22 |
23 | @computed_field # type: ignore
24 | @cached_property
25 | def role(self) -> Union[ClusterRole, Role]:
26 | role_ref = self.data.get("roleRef")
27 |
28 | if role_ref:
29 | return get_role(role_ref, self.namespace)
30 | else:
31 | return ClusterRole(name="")
32 |
33 | @computed_field # type: ignore
34 | @cached_property
35 | def subjects(self) -> List[Union[ServiceAccount, User, Group]]:
36 | return get_subjects(self.data.get("subjects", []), self.namespace)
37 |
38 | def relationships(
39 | self,
40 | initial: bool = True,
41 | ) -> List[RELATIONSHIP]:
42 | relationships = super().relationships()
43 | relationships += [(self, Relationship.GRANTS_PERMISSION, self.role)]
44 | relationships += [
45 | (subject, Relationship.BOUND_TO, self) for subject in self.subjects
46 | ]
47 |
48 | if not initial:
49 | for role_rule in self.role.rules:
50 | for relationship, resource in role_rule.affected_resource_query(
51 | self.namespace,
52 | ):
53 | relationships.append((self, relationship, resource))
54 |
55 | return relationships
56 |
--------------------------------------------------------------------------------
/icekube/models/secret.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from functools import cached_property
5 | from typing import Any, Dict, List, Optional, cast
6 |
7 | from icekube.models.base import RELATIONSHIP, Resource
8 | from icekube.relationships import Relationship
9 | from pydantic import computed_field, field_validator
10 |
11 |
12 | class Secret(Resource):
13 | supported_api_groups: List[str] = [""]
14 |
15 | @field_validator("raw")
16 | @classmethod
17 | def remove_secret_data(cls, v: Optional[str]) -> Optional[str]:
18 | if v:
19 | data = json.loads(v)
20 |
21 | if "data" in data:
22 | del data["data"]
23 |
24 | last_applied_configuration = (
25 | data.get("metadata", {})
26 | .get("annotations", {})
27 | .get("kubectl.kubernetes.io/last-applied-configuration")
28 | )
29 | if last_applied_configuration:
30 | last_applied_configuration = json.loads(last_applied_configuration)
31 | if "data" in last_applied_configuration:
32 | del last_applied_configuration["data"]
33 | data["metadata"]["annotations"][
34 | "kubectl.kubernetes.io/last-applied-configuration"
35 | ] = json.dumps(last_applied_configuration)
36 |
37 | return json.dumps(data)
38 |
39 | return v
40 |
41 | @computed_field # type: ignore
42 | @cached_property
43 | def secret_type(self) -> str:
44 | return cast(str, self.data.get("type", ""))
45 |
46 | @computed_field # type: ignore
47 | @cached_property
48 | def annotations(self) -> Dict[str, Any]:
49 | return self.data.get("metadata", {}).get("annotations") or {}
50 |
51 | def relationships(self, initial: bool = True) -> List[RELATIONSHIP]:
52 | relationships = super().relationships()
53 |
54 | if self.secret_type == "kubernetes.io/service-account-token":
55 | from icekube.models.serviceaccount import ServiceAccount
56 |
57 | sa = self.annotations.get("kubernetes.io/service-account.name")
58 | if sa:
59 | account = ServiceAccount(
60 | name=sa,
61 | namespace=cast(str, self.namespace),
62 | )
63 | relationships.append(
64 | (
65 | self,
66 | Relationship.AUTHENTICATION_TOKEN_FOR,
67 | account,
68 | ),
69 | )
70 |
71 | return relationships
72 |
--------------------------------------------------------------------------------
/icekube/models/securitycontextconstraints.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import List, Union
5 |
6 | from icekube.models.base import RELATIONSHIP, Resource
7 | from icekube.models.group import Group
8 | from icekube.models.serviceaccount import ServiceAccount
9 | from icekube.models.user import User
10 | from pydantic import computed_field
11 |
12 |
13 | class SecurityContextConstraints(Resource):
14 | plural: str = "securitycontextconstraints"
15 | supported_api_groups: List[str] = ["security.openshift.io"]
16 |
17 | @computed_field # type: ignore
18 | @cached_property
19 | def users(self) -> List[Union[User, ServiceAccount]]:
20 | users: List[Union[User, ServiceAccount]] = []
21 | raw_users = self.data.get("users", [])
22 |
23 | for user in raw_users:
24 | if user.startswith("system:serviceaccount:"):
25 | ns, name = user.split(":")[2:]
26 | users.append(ServiceAccount(name=name, namespace=ns))
27 | else:
28 | users.append(User(name=user))
29 |
30 | return users
31 |
32 | @computed_field # type: ignore
33 | @cached_property
34 | def groups(self) -> List[Group]:
35 | raw_groups = self.data.get("groups", [])
36 |
37 | return [Group(name=x) for x in raw_groups]
38 |
39 | def relationships(self, initial: bool = True) -> List[RELATIONSHIP]:
40 | relationships = super().relationships()
41 |
42 | relationships += [(x, "GRANTS_USE", self) for x in self.users + self.groups]
43 |
44 | return relationships
45 |
--------------------------------------------------------------------------------
/icekube/models/serviceaccount.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property
4 | from typing import List
5 |
6 | from icekube.models.base import RELATIONSHIP, Resource
7 | from icekube.models.secret import Secret
8 | from icekube.relationships import Relationship
9 | from pydantic import computed_field
10 |
11 |
12 | class ServiceAccount(Resource):
13 | supported_api_groups: List[str] = [""]
14 |
15 | @computed_field # type: ignore
16 | @cached_property
17 | def secrets(self) -> List[Secret]:
18 | secrets = []
19 | raw_secrets = self.data.get("secrets") or []
20 |
21 | for secret in raw_secrets:
22 | secrets.append(
23 | Secret(name=secret.get("name", ""), namespace=self.namespace),
24 | )
25 |
26 | return secrets
27 |
28 | def relationships(
29 | self,
30 | initial: bool = True,
31 | ) -> List[RELATIONSHIP]:
32 | relationships = super().relationships()
33 | relationships += [
34 | (x, Relationship.AUTHENTICATION_TOKEN_FOR, self) for x in self.secrets
35 | ]
36 | return relationships
37 |
--------------------------------------------------------------------------------
/icekube/models/signer.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List
2 |
3 | from icekube.models.base import Resource
4 |
5 |
6 | class Signer(Resource):
7 | apiVersion: str = "certificates.k8s.io/v1"
8 | kind: str = "Signer"
9 | plural: str = "signers"
10 | supported_api_groups: List[str] = ["certificates.k8s.io"]
11 |
12 | def __repr__(self) -> str:
13 | return f"Signer(name={self.name})"
14 |
15 | @property
16 | def db_labels(self) -> Dict[str, str]:
17 | return {
18 | **self.unique_identifiers,
19 | "plural": self.plural,
20 | }
21 |
--------------------------------------------------------------------------------
/icekube/models/user.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import List
4 |
5 | from icekube.models.base import Resource
6 |
7 |
8 | class User(Resource):
9 | plural: str = "users"
10 | supported_api_groups: List[str] = ["", "user.openshift.io"]
11 |
--------------------------------------------------------------------------------
/icekube/neo4j.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import logging
4 | from typing import Any, Dict, Generator, List, Optional, Tuple, Type, TypeVar
5 |
6 | from icekube.config import config
7 | from icekube.models import Resource
8 | from neo4j import BoltDriver, GraphDatabase
9 | from neo4j.io import ServiceUnavailable
10 |
11 | T = TypeVar("T")
12 |
13 | logger = logging.getLogger(__name__)
14 |
15 |
16 | driver: Optional[BoltDriver] = None
17 |
18 |
19 | def get_driver() -> BoltDriver:
20 | global driver
21 |
22 | if not driver:
23 | driver = init_connection()
24 |
25 | return driver
26 |
27 |
28 | def init_connection(
29 | uri: str = "bolt://localhost:7687",
30 | auth: Tuple[str, str] = ("neo4j", "neo4j"),
31 | encrypted: bool = False,
32 | ) -> BoltDriver:
33 | neo4j_config = config.get("neo4j", {})
34 | uri = neo4j_config.get("url", uri)
35 | auth = (
36 | neo4j_config.get("username", auth[0]),
37 | neo4j_config.get("password", auth[1]),
38 | )
39 | encrypted = neo4j_config.get("encrypted", encrypted)
40 |
41 | return GraphDatabase.driver(uri, auth=auth, encrypted=encrypted)
42 |
43 |
44 | def create_index(kind: str, namespace: bool) -> None:
45 | cmd = f"CREATE INDEX {kind.lower()} FOR (n:{kind}) ON (n.name"
46 | if namespace:
47 | cmd += ", n.namespace"
48 | cmd += ")"
49 |
50 | driver = get_driver()
51 |
52 | with driver.session() as session:
53 | session.run(cmd)
54 |
55 |
56 | def get(
57 | resource: Resource,
58 | identifier: str = "",
59 | prefix: str = "",
60 | ) -> Tuple[str, Dict[str, str]]:
61 | kwargs: Dict[str, str] = {}
62 | labels: List[str] = []
63 | identifier = identifier or prefix
64 |
65 | if prefix:
66 | prefix += "_"
67 |
68 | for key, value in resource.unique_identifiers.items():
69 | labels.append(f"{key}: ${prefix}{key}")
70 | kwargs[f"{prefix}{key}"] = value
71 |
72 | cmd = f"MERGE ({identifier}:{resource.kind} {{ {', '.join(labels)} }}) "
73 |
74 | return cmd, kwargs
75 |
76 |
77 | def create(resource: Resource, prefix: str = "") -> Tuple[str, Dict[str, Any]]:
78 | cmd, kwargs = get(resource, "x", prefix)
79 |
80 | labels: List[str] = []
81 |
82 | if prefix:
83 | prefix += "_"
84 |
85 | for key, value in resource.db_labels.items():
86 | labels.append(f"{key}: ${prefix}{key}")
87 | kwargs[f"{prefix}{key}"] = value
88 |
89 | cmd += f"SET x += {{ {', '.join(labels)} }} "
90 |
91 | return cmd, kwargs
92 |
93 |
94 | def find(
95 | resource: Optional[Type[Resource]] = None,
96 | raw: bool = False,
97 | **kwargs: str,
98 | ) -> Generator[Resource, None, None]:
99 | labels = [f"{key}: ${key}" for key in kwargs.keys()]
100 | if resource is None or resource is Resource:
101 | cmd = f"MATCH (x {{ {', '.join(labels)} }}) "
102 | else:
103 | cmd = f"MATCH (x:{resource.__name__} {{ {', '.join(labels)} }}) "
104 |
105 | if raw:
106 | cmd += "WHERE EXISTS (x.raw) "
107 |
108 | cmd += "RETURN x"
109 |
110 | driver = get_driver()
111 |
112 | with driver.session() as session:
113 | logger.debug(f"Starting neo4j query: {cmd}, {kwargs}")
114 | results = session.run(cmd, kwargs)
115 |
116 | for result in results:
117 | result = result[0]
118 | props = result._properties
119 | logger.debug(
120 | f"Loading resource: {props['kind']} "
121 | f"{props.get('namespace', '')} {props['name']}",
122 | )
123 |
124 | if resource is None:
125 | res = Resource(**props)
126 | else:
127 | res = resource(**props)
128 |
129 | yield res
130 |
131 |
132 | def find_or_mock(resource: Type[T], **kwargs: str) -> T:
133 | try:
134 | return next(find(resource, **kwargs)) # type: ignore
135 | except (StopIteration, IndexError, ServiceUnavailable):
136 | return resource(**kwargs)
137 |
--------------------------------------------------------------------------------
/icekube/relationships.py:
--------------------------------------------------------------------------------
1 | from typing import ClassVar, Optional
2 |
3 |
4 | class Relationship:
5 | """Consolidates the various relationship types into a single class.
6 |
7 | This allows for better tracking of where we assign each relationship
8 | across the codebase.
9 |
10 | Relationships in the order (ObjectOne, RELATIONSHIP, ObjectTwo) are
11 | in this direction in neo4j: (ObjectOne)-[:RELATIONSHIP]->(ObjectTwo)
12 | """
13 |
14 | IS_CLUSTER_ADMIN: ClassVar[str] = "IS_CLUSTER_ADMIN"
15 |
16 | HOSTS_POD: ClassVar[str] = "HOSTS_POD"
17 |
18 | AUTHENTICATION_TOKEN_FOR: ClassVar[str] = "AUTHENTICATION_TOKEN_FOR"
19 | GET_AUTHENTICATION_TOKEN_FOR: ClassVar[str] = "GET_AUTHENTICATION_TOKEN_FOR"
20 | CREATE_SECRET_WITH_TOKEN: ClassVar[str] = "CREATE_SECRET_WITH_TOKEN"
21 |
22 | WITHIN_NAMESPACE: ClassVar[str] = "WITHIN_NAMESPACE"
23 |
24 | GRANTS_PODS_CREATE: ClassVar[str] = "GRANTS_PODS_CREATE"
25 | GRANTS_REPLICATIONCONTROLLERS_CREATE: ClassVar[str] = (
26 | "GRANTS_REPLICATIONCONTROLLERS_CREATE"
27 | )
28 | GRANTS_DAEMONSETS_CREATE: ClassVar[str] = "GRANTS_DAEMONSETS_CREATE"
29 | GRANTS_DEPLOYMENTS_CREATE: ClassVar[str] = "GRANTS_DEPLOYMENTS_CREATE"
30 | GRANTS_REPLICASETS_CREATE: ClassVar[str] = "GRANTS_REPLICASETS_CREATE"
31 | GRANTS_STATEFULSETS_CREATE: ClassVar[str] = "GRANTS_STATEFULSETS_CREATE"
32 | GRANTS_CRONJOBS_CREATE: ClassVar[str] = "GRANTS_CRONJOBS_CREATE"
33 | GRANTS_JOBS_CREATE: ClassVar[str] = "GRANTS_JOBS_CREATE"
34 |
35 | GRANTS_AZUREPODIDENTITYEXCEPTIONS_CREATE: ClassVar[str] = (
36 | "GRANTS_AZUREPODIDENTITYEXCEPTIONS_CREATE"
37 | )
38 | GRANTS_CERTIFICATESIGNINGREQUESTS_CREATE: ClassVar[str] = (
39 | "GRANTS_CERTIFICATESIGNINGREQUESTS_CREATE"
40 | )
41 | GRANTS_PROXY_CREATE: ClassVar[str] = "GRANTS_PROXY_CREATE"
42 |
43 | GRANTS_GET: ClassVar[str] = "GRANTS_GET"
44 | GRANTS_LIST: ClassVar[str] = "GRANTS_LIST"
45 | GRANTS_UPDATE: ClassVar[str] = "GRANTS_UPDATE"
46 | GRANTS_WATCH: ClassVar[str] = "GRANTS_WATCH"
47 | GRANTS_PATCH: ClassVar[str] = "GRANTS_PATCH"
48 | GRANTS_APPROVE: ClassVar[str] = "GRANTS_APPROVE"
49 | GRANTS_PERMISSION: ClassVar[str] = "GRANTS_PERMISSION"
50 |
51 | GRANTS_ESCALATE: ClassVar[str] = "GRANTS_ESCALATE"
52 | GRANTS_IMPERSONATE: ClassVar[str] = "GRANTS_IMPERSONATE"
53 | GRANTS_TOKEN_CREATE: ClassVar[str] = "GRANTS_TOKEN_CREATE"
54 | GRANTS_EPHEMERAL_PATCH: ClassVar[str] = "GRANTS_EPHEMERAL_PATCH"
55 |
56 | BOUND_TO: ClassVar[str] = "BOUND_TO"
57 | USES_ACCOUNT: ClassVar[str] = "USES_ACCOUNT"
58 | MOUNTS_SECRET: ClassVar[str] = "MOUNTS_SECRET"
59 | CREATE_POD_WITH_SA: ClassVar[str] = "CREATE_POD_WITH_SA"
60 | UPDATE_WORKLOAD_WITH_SA: ClassVar[str] = "UPDATE_WORKLOAD_WITH_SA"
61 | CREATE_PRIVILEGED_WORKLOAD: ClassVar[str] = "CREATE_PRIVILEGED_WORKLOAD"
62 | PATCH_NAMESPACE_TO_BYPASS_PSA: ClassVar[str] = "PATCH_NAMESPACE_TO_BYPASS_PSA"
63 |
64 | EXEC_INTO: ClassVar[str] = "EXEC_INTO"
65 | REPLACE_IMAGE: ClassVar[str] = "REPLACE_IMAGE"
66 | DEBUG_POD: ClassVar[str] = "DEBUG_POD"
67 |
68 | ACCESS_SECRET: ClassVar[str] = "ACCESS_SECRET"
69 | GENERATE_TOKEN: ClassVar[str] = "GENERATE_TOKEN"
70 | RBAC_ESCALATE_TO: ClassVar[str] = "RBAC_ESCALATE_TO"
71 |
72 | GENERATE_CLIENT_CERTIFICATE: ClassVar[str] = "GENERATE_CLIENT_CERTIFICATE"
73 | HAS_CSR_APPROVAL: ClassVar[str] = "HAS_CSR_APPROVAL"
74 |
75 | CAN_IMPERSONATE: ClassVar[str] = "CAN_IMPERSONATE"
76 |
77 | IS_PRIVILEGED: ClassVar[str] = "IS_PRIVILEGED"
78 | CAN_CGROUP_BREAKOUT: ClassVar[str] = "CAN_CGROUP_BREAKOUT"
79 | CAN_LOAD_KERNEL_MODULES: ClassVar[str] = "CAN_LOAD_KERNEL_MODULES"
80 | CAN_ACCESS_DANGEROUS_HOST_PATH: ClassVar[str] = "CAN_ACCESS_DANGEROUS_HOST_PATH"
81 | CAN_NSENTER_HOST: ClassVar[str] = "CAN_NSENTER_HOST"
82 | CAN_ACCESS_HOST_FD: ClassVar[str] = "CAN_ACCESS_HOST_FD"
83 | CAN_EXEC_THROUGH_KUBELET: ClassVar[str] = "CAN_EXEC_THROUGH_KUBELET"
84 |
85 | ACCESS_POD: ClassVar[str] = "ACCESS_POD"
86 | UPDATE_AWS_AUTH: ClassVar[str] = "UPDATE_AWS_AUTH"
87 | AZURE_POD_IDENTITY_EXCEPTION: ClassVar[str] = "AZURE_POD_IDENTITY_EXCEPTION"
88 |
89 | # Current resource defines the spec/creation of the subresource
90 | DEFINES: ClassVar[str] = "DEFINES"
91 | # Defines a reference to another object (e.g. Pod -> ServiceAccount)
92 | REFERENCES: ClassVar[str] = "REFERENCES"
93 | # Directly consumes a resource (e.g. PersistentVolumeClaim -> PersistentVolume)
94 | CONSUMES: ClassVar[str] = "CONSUMES"
95 | # Indirectly consumes a resource, without an exclusive relationship to the refering
96 | # node (e.g. PersistentVolume -> StorageClass)
97 | USES: ClassVar[str] = "USES"
98 | # Defines ownership of a resource (e.g. Deployment-[:OWNS]->ReplicaSet)
99 | OWNS: ClassVar[str] = "OWNS"
100 |
101 | @staticmethod
102 | def generate_grant(verb: str, sub_resource: Optional[str]) -> str:
103 | if sub_resource is None:
104 | return f"GRANTS_{verb.upper()}".replace("-", "_")
105 |
106 | return f"GRANTS_{sub_resource}_{verb}".upper().replace("-", "_")
107 |
--------------------------------------------------------------------------------
/icekube/utils.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 |
4 | def to_camel_case(string: str) -> str:
5 | string = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", string)
6 | string = re.sub(r"([a-z\d])([A-Z])", r"\1_\2", string)
7 | string = string.replace("-", "_")
8 | return string.lower()
9 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | mypy_path = stubs
3 |
4 | warn_unused_ignores = True
5 | warn_return_any = True
6 |
7 | strict_optional = True
8 | no_implicit_optional = True
9 |
10 | #disallow_any_unimported = True
11 | #disallow_any_expr = True
12 | #disallow_any_decorated = True
13 | #disallow_any_explicit = True
14 | disallow_subclassing_any = True
15 | disallow_any_generics = True
16 |
17 | # disallow_untyped_calls = True
18 | # disallow_untyped_defs = True
19 | # disallow_incomplete_defs = True
20 | disallow_untyped_decorators = True
21 |
22 | check_untyped_defs = True
23 |
24 | ignore_missing_imports = True
25 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "icekube"
3 | version = "1.1.0"
4 | description = ""
5 | authors = ["Mohit Gupta "]
6 |
7 | [tool.poetry.dependencies]
8 | python = ">= 3.8.1"
9 | kubernetes = "^28.1.0"
10 | neo4j = "^4.4.11"
11 | typer = "^0.9"
12 | pydantic = "^2.5.2"
13 | tqdm = "^4.66.1"
14 | jmespath = "^1.0.1"
15 |
16 | [tool.poetry.group.dev.dependencies]
17 | types-pyyaml = "*"
18 | ipython = "*"
19 | flake8 = "^6.1.0"
20 | flake8-comprehensions = "*"
21 | flake8-debugger = "*"
22 | flake8-docstrings = "*"
23 | flake8-isort = "*"
24 | flake8-mutable = "*"
25 | flake8-todo = "*"
26 | isort = {version = "^4.3.21", extras = ["pyproject"]}
27 | mypy = "*"
28 | pyflakes = "*"
29 | pytest = "*"
30 | pytest-cov = "*"
31 | rope = "*"
32 | pydocstyle = "*"
33 | black = "*"
34 | pdbpp = "*"
35 | pyrepl = {git = "https://github.com/pypy/pyrepl"}
36 | setuptools = "*"
37 |
38 | [tool.poetry.scripts]
39 | icekube = 'icekube.cli:app'
40 |
41 | [build-system]
42 | requires = ["poetry>=0.12"]
43 | build-backend = "poetry.masonry.api"
44 |
--------------------------------------------------------------------------------