├── .github
└── workflows
│ ├── built_test_on_pull.yml
│ └── goreleaser.yml
├── .gitignore
├── .goreleaser.yaml
├── LICENSE
├── Makefile
├── README.md
├── SUPPORT.md
├── cmd
├── collect.go
├── eval.go
├── expand.go
└── root.go
├── docs
├── collect.md
├── eval.md
├── example.png
├── expand.md
├── logo.png
├── policies.md
└── prevent
│ ├── README.md
│ ├── suspicious_assignment_of_controller_service_accounts
│ ├── constraint.yaml
│ └── template.yaml
│ └── suspicious_self_subject_review
│ ├── constraint.yaml
│ └── template.yaml
├── go.mod
├── go.sum
├── lib
├── approve_csrs.rego
├── assign_sa.rego
├── bind_roles.rego
├── cluster_admin.rego
├── control_webhooks.rego
├── eks_modify_aws_auth.rego
├── escalate_roles.rego
├── impersonate.rego
├── issue_token_secrets.rego
├── list_secrets.rego
├── modify_node_status.rego
├── modify_pod_status.rego
├── modify_pods.rego
├── modify_service_status_cve_2020_8554.rego
├── nodes_proxy.rego
├── obtain_token_weak_ns.rego
├── pods_ephemeral_ctrs.rego
├── pods_exec.rego
├── providerIAM.rego
├── rce_weak_ns.rego
├── retrieve_token_secrets.rego
├── steal_pods.rego
├── token_request.rego
└── utils
│ ├── builtins.rego
│ └── wrapper.rego
├── main.go
├── pkg
├── collect
│ ├── cluster_db.go
│ ├── collect.go
│ ├── discover_protections.go
│ ├── offline.go
│ ├── rbac_db.go
│ └── types.go
├── eval
│ ├── eval.go
│ ├── types.go
│ ├── utils.go
│ └── wrapper.go
├── expand
│ ├── expand.go
│ └── types.go
└── utils
│ └── utils.go
└── utils
├── generate_policylib_docs.py
├── get_cluster_data.sh
├── update_policy_doc.sh
└── update_policy_to_use_targets.py
/.github/workflows/built_test_on_pull.yml:
--------------------------------------------------------------------------------
1 | name: build_test_on_pull
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 |
8 | permissions:
9 | contents: read
10 |
11 | jobs:
12 | goreleaser:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v2
17 | with:
18 | fetch-depth: 0
19 |
20 | - name: Fetch all tags
21 | run: git fetch --force --tags
22 |
23 | - name: Set up Go
24 | uses: actions/setup-go@v2
25 | with:
26 | go-version: 1.19
27 |
28 | - name: Run GoReleaser
29 | uses: goreleaser/goreleaser-action@v3.1.0
30 | with:
31 | distribution: goreleaser
32 | version: latest
33 | args: release --skip-publish
34 |
--------------------------------------------------------------------------------
/.github/workflows/goreleaser.yml:
--------------------------------------------------------------------------------
1 | name: goreleaser
2 |
3 | on:
4 | push:
5 | tags:
6 | - '*'
7 |
8 | permissions:
9 | contents: write
10 |
11 | jobs:
12 | goreleaser:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v2
17 | with:
18 | fetch-depth: 0
19 |
20 | - name: Fetch all tags
21 | run: git fetch --force --tags
22 |
23 | - name: Set up Go
24 | uses: actions/setup-go@v2
25 | with:
26 | go-version: 1.19
27 |
28 | - name: Run GoReleaser
29 | uses: goreleaser/goreleaser-action@v3.1.0
30 | with:
31 | distribution: goreleaser
32 | version: latest
33 | args: release
34 | env:
35 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
36 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | results/
2 | results
3 | rbac-police
4 | lib/ignore
5 | .DS_Store
6 | .idea/
7 | local_*/
8 | local/
9 | custom_lib/
--------------------------------------------------------------------------------
/.goreleaser.yaml:
--------------------------------------------------------------------------------
1 | project_name: rbac-police
2 | before:
3 | hooks:
4 | - go mod tidy
5 | builds:
6 | - id: rbac-police
7 | env:
8 | - CGO_ENABLED=0
9 | goos:
10 | - linux
11 | - darwin
12 | goarch:
13 | - amd64
14 | - arm64
15 | archives:
16 | - builds:
17 | - rbac-police
18 | format: binary
19 | name_template: "{{ .ProjectName }}_{{ .Tag }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
20 | checksum:
21 | name_template: 'checksums.txt'
22 | algorithm: sha256
23 | snapshot:
24 | name_template: "{{ incpatch .Version }}-next"
25 | changelog:
26 | filters:
27 | exclude:
28 | - [Tt]ypos?
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright © 2022 Palo Alto Networks
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | build:
2 | go mod tidy
3 | CGO_ENABLED=0 go build -buildmode pie
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # rbac-police
2 | Retrieve the RBAC permissions of Kubernetes identities - service accounts, pods, nodes, users and groups - and evaluate them using policies written in Rego.
3 |
4 | 
5 |
6 | The [policy library](./lib) includes over 20 policies that detect identities possessing risky permissions, each alerting on a different attack path.
7 |
8 | ## Quick Start
9 |
10 | 1. Clone the repository:
11 |
12 | ```shell
13 | git clone https://github.com/PaloAltoNetworks/rbac-police && cd rbac-police
14 | ```
15 | 2. Either install `rbac-police` from a release:
16 |
17 | ```shell
18 | OS=linux # OS=darwin
19 | ARCH=amd64 # ARCH=arm64
20 | LATEST_TAG=$(curl -s https://api.github.com/repos/PaloAltoNetworks/rbac-police/releases/latest | jq -r '.tag_name')
21 | curl -L -o rbac-police "https://github.com/PaloAltoNetworks/rbac-police/releases/download/${LATEST_TAG}/rbac-police_${LATEST_TAG}_${OS}_${ARCH}" && chmod +x rbac-police
22 | ```
23 | Or build it with [Golang](https://go.dev/doc/install)>=1.16:
24 |
25 | ```shell
26 | go build
27 | ```
28 | 3. Connect `kubectl` to a Kubernetes cluster.
29 | 4. Evaluate RBAC permissions and identify privilege escalation paths in your cluster using the default policy library:
30 |
31 | ```
32 | ./rbac-police eval lib/
33 | ```
34 | 5. Inspect the permissions of violating principals and identify the Roles and ClusterRoles granting them risky privileges. See the Recommendations section [here](https://www.paloaltonetworks.com/resources/whitepapers/kubernetes-privilege-escalation-excessive-permissions-in-popular-platforms) for remediation advice.
35 | ```
36 | ./rbac-police expand -z sa=production-ns:violating-sa
37 | ```
38 |
39 | ## Usage
40 | ### Set severity threshold
41 | Only evaluate policies with a severity equal to or higher than a threshold.
42 | ```
43 | ./rbac-police eval lib/ -s High
44 | ```
45 | ### Inspect the permissions of specific identities
46 | ```
47 | ./rbac-police expand -z sa=kube-system:metrics-server
48 | ./rbac-police expand -z user=example@email.com
49 | ./rbac-police expand # all identities
50 | ```
51 | ### Discover protections
52 | Improve accuracy by considering features gates and admission controllers that can protect against certain attacks. Note that [NodeRestriction](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction) is identified by impersonating a node and *dry-run creating a pod*, which may be logged by some systems.
53 | ```
54 | ./rbac-police eval lib/ -w
55 | ```
56 | ### Configure violation types
57 | Control which identities are evaluated for violations, default are `sa,node,combined` (see [policies.md](docs/policies.md) for more information).
58 | ```
59 | ./rbac-police eval lib/ --violations sa,user
60 | ./rbac-police eval lib/ --violations all # sa,node,combined,user,group
61 | ```
62 | Note that by default, `rbac-police` only looks into service accounts assigned to a pod. Use `-a` to include all service accounts.
63 | ### Scope to a namespace
64 | Only look into service accounts and pods from a certain namespace.
65 | ```
66 | ./rbac-police eval lib/ -n production
67 | ```
68 | ### Only SAs that exist on all nodes
69 | Only alert on service accounts that exist on all nodes. Useful for identifying violating DaemonSets.
70 | ```
71 | ./rbac-police eval lib/ --only-sas-on-all-nodes
72 | ```
73 | ### Ignore control plane
74 | Ignore control plane pods and nodes in clusters that host the control plane.
75 | ```
76 | ./rbac-police eval lib/ --ignore-controlplane
77 | ```
78 | ### Collect once for multiple evaluations
79 | ```
80 | ./rbac-police collect -o rbacDb.json
81 |
82 | ./rbac-police eval lib/ rbacDb.json -s High
83 | ./rbac-police eval lib/ rbacDb.json -s Medium --only-sas-on-all-nodes
84 | ./rbac-police expand rbacDb.json -z sa=ns:violating-sa
85 | ```
86 |
87 | ## Documentation
88 | - [Policies](docs/policies.md)
89 | - [Eval command](docs/eval.md)
90 | - [Collect command](docs/collect.md)
91 | - [Expand command](docs/expand.md)
92 |
93 | ## Media Mentions
94 | Radiohead:
95 | > rbac-police, I've given all I can. It's not enough...
96 |
97 | N.W.A:
98 | > rbac-police comin' straight from the underground!
99 |
--------------------------------------------------------------------------------
/SUPPORT.md:
--------------------------------------------------------------------------------
1 | Community Supported
2 |
3 | The software and templates in the repo are released under an as-is, best effort,
4 | support policy. This software should be seen as community supported and Palo
5 | Alto Networks will contribute our expertise as and when possible. We do not
6 | provide technical support or help in using or troubleshooting the components of
7 | the project through our normal support options such as Palo Alto Networks
8 | support teams, or ASC (Authorized Support Centers) partners and backline support
9 | options. The underlying product used (the VM-Series firewall) by the scripts or
10 | templates are still supported, but the support is only for the product
11 | functionality and not for help in deploying or using the template or script
12 | itself. Unless explicitly tagged, all projects or work posted in our GitHub
13 | repository (at https://github.com/PaloAltoNetworks) or sites other than our
14 | official Downloads page on https://support.paloaltonetworks.com are provided
15 | under the best effort policy.
16 |
--------------------------------------------------------------------------------
/cmd/collect.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
5 | "github.com/spf13/cobra"
6 |
7 | log "github.com/sirupsen/logrus"
8 | )
9 |
10 | // collectCmd represents the collect command
11 | var (
12 | collectCmd = &cobra.Command{
13 | Use: "collect",
14 | Short: "Collects the RBAC permissions of Kubernetes identities",
15 | Run: runCollect,
16 | }
17 | )
18 |
19 | func runCollect(cmd *cobra.Command, args []string) {
20 | collectResult := collect.Collect(collectConfig)
21 | if collectResult == nil {
22 | return // error printed by Collect()
23 | }
24 |
25 | // Output collect results
26 | output, err := marshalResults(collectResult)
27 | if err != nil {
28 | log.Errorln("runCollect: failed to marshal collectResult with", err)
29 | return
30 | }
31 | outputResults(output)
32 | }
33 |
34 | func init() {
35 | rootCmd.AddCommand(collectCmd)
36 | }
37 |
--------------------------------------------------------------------------------
/cmd/eval.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 |
7 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
8 | "github.com/PaloAltoNetworks/rbac-police/pkg/eval"
9 | "github.com/PaloAltoNetworks/rbac-police/pkg/utils"
10 | "github.com/spf13/cobra"
11 |
12 | log "github.com/sirupsen/logrus"
13 | )
14 |
15 | // evalCmd represents the eval command
16 | var (
17 | evalCmd = &cobra.Command{
18 | Use: "eval [rbac-json]",
19 | Short: "Evaulates RBAC permissions of Kubernetes identities using Rego policies",
20 | Run: runEval,
21 | }
22 |
23 | evalConfig eval.EvalConfig
24 | shortMode bool
25 | violations []string
26 | )
27 |
28 | func runEval(cmd *cobra.Command, args []string) {
29 | var (
30 | collectResult collect.CollectResult
31 | output []byte
32 | err error
33 | )
34 |
35 | if len(args) < 1 {
36 | fmt.Println("[!] No policies specified")
37 | cmd.Help()
38 | return
39 | }
40 | policyPath := args[0]
41 |
42 | if len(violations) == 0 {
43 | fmt.Println("[!] Cannot disable all violation types")
44 | cmd.Help()
45 | return
46 | }
47 |
48 | // Set the violations user asked to search for
49 | for _, violationType := range violations {
50 | if violationType == "all" {
51 | evalConfig.SaViolations = true
52 | evalConfig.NodeViolations = true
53 | evalConfig.CombinedViolations = true
54 | evalConfig.UserViolations = true
55 | evalConfig.GroupViolations = true
56 | break
57 | }
58 | if violationType == "sa" || violationType == "sas" {
59 | evalConfig.SaViolations = true
60 | } else if violationType == "node" || violationType == "nodes" {
61 | evalConfig.NodeViolations = true
62 | } else if violationType == "combined" {
63 | evalConfig.CombinedViolations = true
64 | } else if violationType == "user" || violationType == "users" {
65 | evalConfig.UserViolations = true
66 | } else if violationType == "group" || violationType == "groups" {
67 | evalConfig.GroupViolations = true
68 | } else {
69 | fmt.Printf("[!] Unrecognized violation type '%s', supported types are 'sa', 'node', 'combined', 'user', 'group' or 'all'\n", violationType)
70 | cmd.Help()
71 | return
72 | }
73 | }
74 |
75 | // Get RBAC input
76 | if len(args) > 1 {
77 | // Read RBAC from file
78 | if collectionOptionsSet() {
79 | fmt.Println("[!] Can only set collection options when collecting")
80 | cmd.Help()
81 | return
82 | }
83 | collectResultBytes, err := utils.ReadFile(args[1])
84 | if err != nil {
85 | return
86 | }
87 | err = json.Unmarshal(collectResultBytes, &collectResult)
88 | if err != nil {
89 | log.Errorf("runEval: failed to unmarshel %v into a CollectResult object with %v\n", args[0], err)
90 | return
91 | }
92 | } else {
93 | // Collect RBAC from remote cluster
94 | collectResultPtr := collect.Collect(collectConfig)
95 | if collectResultPtr == nil {
96 | return // error printed by Collect()
97 | }
98 | collectResult = *collectResultPtr
99 | }
100 |
101 | policyResults := eval.Eval(policyPath, collectResult, evalConfig)
102 | if policyResults == nil {
103 | return // error printed by Collect()
104 | }
105 |
106 | if !shortMode {
107 | output, err = marshalResults(policyResults)
108 | if err != nil {
109 | log.Errorln("runEval: failed to marshal results with", err)
110 | return
111 | }
112 | } else {
113 | abbreviatedResults := eval.AbbreviateResults(policyResults)
114 | output, err = marshalResults(abbreviatedResults)
115 | if err != nil {
116 | log.Errorln("runEval: failed to marshal abbreviated results with", err)
117 | return
118 | }
119 | }
120 | outputResults(output)
121 | }
122 |
123 | func init() {
124 | evalCmd.Flags().BoolVar(&shortMode, "short", false, "abbreviate results")
125 | evalCmd.Flags().BoolVarP(&evalConfig.DebugMode, "debug", "d", false, "debug mode, prints debug info and stdout of policies")
126 | evalCmd.Flags().BoolVar(&evalConfig.OnlySasOnAllNodes, "only-sas-on-all-nodes", false, "only evaluate serviceAccounts that exist on all nodes")
127 | evalCmd.Flags().StringVarP(&evalConfig.SeverityThreshold, "severity-threshold", "s", "Low", "only evaluate policies with severity >= threshold")
128 | evalCmd.Flags().StringSliceVar(&evalConfig.IgnoredNamespaces, "ignored-namespaces", []string{}, "ignore serviceAccounts from certain namespaces during eval") // TODO: consider moving to collect and implement via field selectors
129 | evalCmd.Flags().StringSliceVar(&violations, "violations", []string{"sa", "node", "combined"}, "violations to search for, beside default supports 'user', 'group' and 'all'")
130 |
131 | rootCmd.AddCommand(evalCmd)
132 | }
133 |
--------------------------------------------------------------------------------
/cmd/expand.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "strings"
7 |
8 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
9 | "github.com/PaloAltoNetworks/rbac-police/pkg/expand"
10 | "github.com/PaloAltoNetworks/rbac-police/pkg/utils"
11 | log "github.com/sirupsen/logrus"
12 | "github.com/spf13/cobra"
13 | )
14 |
15 | // expandCmd represents the expand command
16 | var (
17 | expandCmd = &cobra.Command{
18 | Use: "expand [rbac-json]",
19 | Short: "Presents the RBAC permissions of Kubernetes identities in a (more) human-readable format",
20 | Long: `Presents the RBAC permissions of Kubernetes identities in a (more) human-readable format for manual drill down.
21 | This is done by repeating the entire permissions of each role under each identity that has it.`,
22 | Run: runExpand,
23 | }
24 |
25 | zoomedIdentity string
26 | )
27 |
28 | func runExpand(cmd *cobra.Command, args []string) {
29 | var (
30 | collectResult collect.CollectResult
31 | output []byte
32 | err error
33 | zoomedType string
34 | zoomedName string
35 | zoomedNamespace string
36 | )
37 |
38 | // If zoomedIdentity is used, parse it
39 | if zoomedIdentity != "" {
40 | zoomedType, zoomedName, zoomedNamespace = parseZoomedIdentity(zoomedIdentity)
41 | if zoomedType == "" {
42 | cmd.Help()
43 | return
44 | }
45 | }
46 |
47 | // Get RBAC JSON
48 | if len(args) > 0 {
49 | if collectionOptionsSet() {
50 | fmt.Println("[!] Can only set collection options when collecting")
51 | cmd.Help()
52 | return
53 | }
54 | collectResultBytes, err := utils.ReadFile(args[0])
55 | if err != nil {
56 | return
57 | }
58 | err = json.Unmarshal(collectResultBytes, &collectResult)
59 | if err != nil {
60 | log.Errorf("runExpand: failed to unmarshel %v into a CollectResult object with %v\n", args[0], err)
61 | return
62 | }
63 | } else {
64 | collectResultPtr := collect.Collect(collectConfig)
65 | if collectResultPtr == nil {
66 | return // error printed by Collect()
67 | }
68 | collectResult = *collectResultPtr
69 | }
70 |
71 | // Expand collection results
72 | expandResult := expand.Expand(collectResult)
73 | if expandResult == nil {
74 | return // error printed by Expand()
75 | }
76 |
77 | // Marshal results
78 | if zoomedIdentity == "" {
79 | output, err = marshalResults(expandResult)
80 | } else {
81 | // Zoom on a specific identity // TODO: consider only collecting / expanding the zoomed identity
82 | if zoomedType == "sa" {
83 | for _, sa := range expandResult.ServiceAccounts {
84 | if sa.Name == zoomedName && sa.Namespace == zoomedNamespace {
85 | output, err = marshalResults(sa)
86 | break
87 | }
88 | }
89 | } else if zoomedType == "node" {
90 | for _, node := range expandResult.Nodes {
91 | if node.Name == zoomedName {
92 | output, err = marshalResults(node)
93 | break
94 | }
95 | }
96 | } else if zoomedType == "user" {
97 | for _, user := range expandResult.Users {
98 | if user.Name == zoomedName {
99 | output, err = marshalResults(user)
100 | break
101 | }
102 | }
103 | } else if zoomedType == "group" {
104 | for _, grp := range expandResult.Groups {
105 | if grp.Name == zoomedName {
106 | output, err = marshalResults(grp)
107 | break
108 | }
109 | }
110 | }
111 | if len(output) == 0 {
112 | fmt.Println("[!] Cannot find zoomed identity")
113 | return
114 | }
115 | }
116 |
117 | // Output expand results
118 | if err != nil {
119 | log.Errorln("runExpand: failed to marshal results with", err)
120 | return
121 | }
122 | outputResults(output)
123 | }
124 |
125 | func init() {
126 | expandCmd.Flags().StringVarP(&zoomedIdentity, "zoom", "z", "", "only show the permissions of the specified identity, format is 'type=identity', e.g. 'sa=kube-system:default', 'user=example@email.com'")
127 | rootCmd.AddCommand(expandCmd)
128 | }
129 |
130 | // Parses zoomedIdentity into a type, identity and namespace
131 | func parseZoomedIdentity(zoomedIdentity string) (string, string, string) {
132 | var zoomedNamespace string
133 |
134 | // Parse type & name
135 | separatorIndex := strings.Index(zoomedIdentity, "=")
136 | if separatorIndex < 0 {
137 | fmt.Println("[!] Cannot parse zoomed identity, format is 'type=identity'")
138 | return "", "", ""
139 | }
140 | zoomedType := zoomedIdentity[:separatorIndex]
141 | zoomedName := zoomedIdentity[separatorIndex+1:]
142 |
143 | // Parse namespace for service accounts
144 | if zoomedType == "sa" {
145 | separatorIndex = strings.Index(zoomedName, ":")
146 | if separatorIndex < 0 {
147 | fmt.Println("[!] Cannot parse zoomed SA, format is 'sa=namespace:name'")
148 | return "", "", ""
149 | }
150 | zoomedNamespace = zoomedName[:separatorIndex]
151 | zoomedName = zoomedName[separatorIndex+1:]
152 | } else if zoomedType != "node" && zoomedType != "user" && zoomedType != "group" {
153 | fmt.Printf("[!] Unsupported type for zoomed identity '%s', supported types are 'sa', 'node', 'user' and 'group'\n", zoomedType)
154 | return "", "", ""
155 | }
156 | return zoomedType, zoomedName, zoomedNamespace
157 | }
158 |
--------------------------------------------------------------------------------
/cmd/root.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "os"
7 | "strings"
8 |
9 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
10 | log "github.com/sirupsen/logrus"
11 | "github.com/spf13/cobra"
12 | )
13 |
14 | // rootCmd represents the base command when called without any subcommands
15 | var (
16 | outFile string
17 | loudMode bool
18 | jsonIndentLen uint
19 | collectConfig collect.CollectConfig
20 |
21 | rootCmd = &cobra.Command{
22 | Use: "rbac-police",
23 | Short: "See and evaluate RBAC permissions in Kubernetes clusters",
24 | Long: `Retrieves the RBAC permissions of Kubernetes identities and evaluates them using policies written in Rego.`,
25 | }
26 | )
27 |
28 | // Execute adds all child commands to the root command and sets flags appropriately.
29 | // This is called by main.main(). It only needs to happen once to the rootCmd.
30 | func Execute() {
31 | err := rootCmd.Execute()
32 | if err != nil {
33 | os.Exit(1)
34 | }
35 | }
36 |
37 | func init() {
38 | rootCmd.PersistentFlags().StringVarP(&outFile, "out-file", "o", "", "save results to file")
39 | rootCmd.PersistentFlags().BoolVarP(&loudMode, "loud", "l", false, "loud mode, print results regardless of -o")
40 | rootCmd.PersistentFlags().UintVarP(&jsonIndentLen, "json-indent", "j", 4, "json indent, 0 means compact mode")
41 | // Collect config
42 | rootCmd.PersistentFlags().BoolVarP(&collectConfig.AllServiceAccounts, "all-serviceaccounts", "a", false, "collect data on all serviceAccounts, not only those assigned to a pod")
43 | rootCmd.PersistentFlags().BoolVarP(&collectConfig.DiscoverProtections, "discover-protections", "w", false, "discover features gates and admission controllers that protect against certain attacks, partly by emulating the attacks via impersonation & dry-run write operations")
44 | rootCmd.PersistentFlags().BoolVar(&collectConfig.IgnoreControlPlane, "ignore-controlplane", false, "don't collect data on control plane nodes and pods. Identified by either the 'node-role.kubernetes.io/control-plane' or 'node-role.kubernetes.io/master' labels. ServiceAccounts will not be linked to control plane components")
45 | rootCmd.PersistentFlags().StringSliceVar(&collectConfig.NodeGroups, "node-groups", []string{"system:nodes"}, "treat nodes as part of these groups")
46 | rootCmd.PersistentFlags().StringVar(&collectConfig.NodeUser, "node-user", "", "user assigned to all nodes, default behaviour assumes nodes users are compatible with the NodeAuthorizer")
47 | rootCmd.PersistentFlags().StringVarP(&collectConfig.Namespace, "namespace", "n", "", "scope collection on serviceAccounts to a namespace")
48 | rootCmd.PersistentFlags().StringVar(&collectConfig.OfflineDir, "local-dir", "", "offline mode, get cluster data from local files, see /utils/get_cluster_data.sh")
49 | }
50 |
51 | // Prints and / or saves output to file
52 | func outputResults(output []byte) {
53 | if outFile != "" {
54 | err := os.WriteFile(outFile, output, 0644)
55 | if err != nil {
56 | log.Errorf("runCollect: failed to write results to %v with %v\n", outFile, err)
57 | return
58 | }
59 | if !loudMode {
60 | return
61 | }
62 | }
63 | fmt.Println(string(output))
64 | }
65 |
66 | // Is an option related to collection is set
67 | func collectionOptionsSet() bool {
68 | return collectConfig.IgnoreControlPlane || collectConfig.AllServiceAccounts ||
69 | collectConfig.Namespace != "" || collectConfig.NodeUser != "" ||
70 | (len(collectConfig.NodeGroups) != 1 && collectConfig.NodeGroups[0] != "system:nodes") ||
71 | collectConfig.DiscoverProtections
72 | }
73 |
74 | // Marshal results into a json byte slice, indented based on the global jsonIndentLen variable
75 | func marshalResults(results interface{}) ([]byte, error) {
76 | if jsonIndentLen > 0 {
77 | return json.MarshalIndent(results, "", getIndent(jsonIndentLen))
78 | } else {
79 | return json.Marshal(results) // compact json output
80 | }
81 | }
82 |
83 | // Create an indent string in the length of @jsonIndentLength, maxed at 12 chars.
84 | func getIndent(jsonIndentLength uint) string {
85 | return strings.Repeat(" ", int(uintMin(jsonIndentLength, 12)))
86 | }
87 |
88 | // Return the minimum number
89 | func uintMin(a uint, b uint) uint {
90 | if a < b {
91 | return a
92 | }
93 | return b
94 | }
95 |
--------------------------------------------------------------------------------
/docs/collect.md:
--------------------------------------------------------------------------------
1 | # rbac-police collect
2 | Collects the RBAC permissions of Kubernetes identities. For clusters hosted on EKS and GKE, the `collect` command also identifies service account annotations that assign cloud provider IAM entities to Kubernetes service accounts.
3 |
4 | ## Help
5 | ```
6 | Usage:
7 | rbac-police collect [flags]
8 |
9 | Flags:
10 | -h, --help help for collect
11 |
12 | Global Flags:
13 | -a, --all-serviceaccounts collect data on all serviceAccounts, not only those assigned to a pod
14 | -w, --discover-protections discover features gates and admission controllers that protect against certain attacks, partly by emulating the attacks via impersonation & dry-run write operations
15 | --ignore-controlplane don't collect data on control plane nodes and pods. Identified by either the 'node-role.kubernetes.io/control-plane' or 'node-role.kubernetes.io/master' labels. ServiceAccounts will not be linked to control plane components
16 | -j, --json-indent uint json indent, 0 means compact mode (default 4)
17 | --local-dir string offline mode, get cluster data from local files, see /utils/get_cluster_data.sh
18 | -l, --loud loud mode, print results regardless of -o
19 | -n, --namespace string scope collection on serviceAccounts to a namespace
20 | --node-groups strings treat nodes as part of these groups (default [system:nodes])
21 | --node-user string user assigned to all nodes, default behaviour assumes nodes users are compatible with the NodeAuthorizer
22 | -o, --out-file string save results to file
23 | ```
24 |
25 |
26 | ## Output Schema
27 | ```json
28 | {
29 | "metadata": {
30 | "cluster": "cluster name from the current kubectl context",
31 | "platform": "eks, gke or empty",
32 | "version": {
33 | "major": "1",
34 | "minor": "22",
35 | "gitVersion": "v1.22.10-gke.600"
36 | },
37 | "features": [
38 | "list of relevant feature gates and admission controllers,",
39 | "currently supports:",
40 | "LegacyTokenSecretsReducted",
41 | "NodeRestriction",
42 | "NodeRestriction1.17",
43 | ]
44 | },
45 | "serviceAccounts": [
46 | {
47 | "name": "serviceaccount name",
48 | "namespace": "serviceaccount namespace",
49 | "nodes": [
50 | {
51 | "name": "the node hosting the following pods",
52 | "pods": [
53 | "a pod assigned the service account"
54 | "a pod assigned the service account"
55 | ]
56 | },
57 | {
58 | "name": "the node hosting the following pods",
59 | "pods": [
60 | "a pod assigned the service account"
61 | ]
62 | }
63 | ],
64 | "providerIAM": { // omitempty
65 | "aws": "AWS role granted to this serviceaccount via the 'eks.amazonaws.com/role-arn' annotation, if exists",
66 | "gcp": "GCP service account binded to this serviceaccount via the 'iam.gke.io/gcp-service-account' annotation, if exists"
67 | },
68 | "roles": [
69 | {
70 | "name": "a role / clusterRole assigned to this serviceAccount",
71 | "namespace": "role's namespace", // omitempty
72 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect" // omitempty
73 | },
74 | ]
75 | },
76 | ],
77 | "nodes": [
78 | {
79 | "name": "node name",
80 | "roles": [
81 | {
82 | "name": "a role / clusterRole assigned to this node",
83 | "namespace": "role's namespace", // omitempty
84 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect" // omitempty
85 | },
86 | ],
87 | "serviceAccounts": [
88 | "serviceAccounts hosted on this node",
89 | "format is namespace:name",
90 | "kube-system:kube-dns",
91 | ]
92 | },
93 | ],
94 | "users": [
95 | {
96 | "name": "user-name",
97 | "roles": [
98 | {
99 | "name": "a role / clusterRole assigned to this user",
100 | "namespace": "role's namespace", // omitempty
101 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect" // omitempty
102 | }
103 | ]
104 | }
105 | ],
106 | "groups": [
107 | {
108 | "name": "group-name",
109 | "roles": [
110 | {
111 | "name": "a role / clusterRole assigned to this group",
112 | "namespace": "role's namespace", // omitempty
113 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect" // omitempty
114 | }
115 | ]
116 | }
117 | ],
118 | "roles": [
119 | {
120 | "name": "role or clusterrole referenced by an identity (SA, node, user or group)",
121 | "namespace": "role's namespace", // omitempty
122 | "rules": [] // k8s rule format
123 | },
124 | ]
125 | }
126 | ```
127 |
--------------------------------------------------------------------------------
/docs/eval.md:
--------------------------------------------------------------------------------
1 | # rbac-police eval
2 | Evaulates the RBAC permissions of Kubernetes identities using policies written in Rego. If a RBAC permission JSON file isn't provided as an argument, `eval` internally calls [`collect`](./collect.md).
3 |
4 | See [policies.md](./policies.md) for the list of built-in policies and for instructions on creating new ones. The built-in policy library aim to identify privilege escaltion paths in a cluster.
5 |
6 |
7 | ## Help
8 | ```
9 | Usage:
10 | rbac-police eval [rbac-json] [flags]
11 |
12 | Flags:
13 | -d, --debug debug mode, prints debug info and stdout of policies
14 | -h, --help help for eval
15 | --ignored-namespaces strings ignore serviceAccounts from certain namespaces during eval
16 | --only-sas-on-all-nodes only evaluate serviceAccounts that exist on all nodes
17 | -s, --severity-threshold string only evaluate policies with severity >= threshold (default "Low")
18 | --short abbreviate results
19 | --violations strings violations to search for, beside default supports 'user', 'group' and 'all' (default [sa,node,combined])
20 |
21 | Global Flags:
22 | -a, --all-serviceaccounts collect data on all serviceAccounts, not only those assigned to a pod
23 | -w, --discover-protections discover features gates and admission controllers that protect against certain attacks, partly by emulating the attacks via impersonation & dry-run write operations
24 | --ignore-controlplane don't collect data on control plane nodes and pods. Identified by either the 'node-role.kubernetes.io/control-plane' or 'node-role.kubernetes.io/master' labels. ServiceAccounts will not be linked to control plane components
25 | -j, --json-indent uint json indent, 0 means compact mode (default 4)
26 | --local-dir string offline mode, get cluster data from local files, see /utils/get_cluster_data.sh
27 | -l, --loud loud mode, print results regardless of -o
28 | -n, --namespace string scope collection on serviceAccounts to a namespace
29 | --node-groups strings treat nodes as part of these groups (default [system:nodes])
30 | --node-user string user assigned to all nodes, default behaviour assumes nodes users are compatible with the NodeAuthorizer
31 | -o, --out-file string save results to file
32 | ```
33 |
34 | ## Output Schema
35 | ```json
36 | {
37 | "policyResults": [
38 | {
39 | "policy": "policy file that produced results",
40 | "severity": "policy's severity",
41 | "description": "policy's description",
42 | "violations": {
43 | "serviceAccounts": [ // omitempty
44 | {
45 | "name": "a serviceAccount who violated the policy",
46 | "namespace": "namespace",
47 | "nodes": [
48 | {
49 | "node-name": [
50 | "pod running on node-name assigned the violation serviceaccount",
51 | "mypod",
52 | ],
53 | },
54 | {
55 | "second-node": [
56 | "pod running on second-node assigned the violation serviceaccount",
57 | "anotherpod",
58 | ],
59 | }
60 | ],
61 | "providerIAM": { // omitempty
62 | "aws": "AWS role granted to this serviceaccount via the 'eks.amazonaws.com/role-arn' annotation, if exists",
63 | "gcp": "GCP service account binded to this serviceaccount via the 'iam.gke.io/gcp-service-account' annotation, if exists"
64 | },
65 | },
66 | ],
67 | "nodes": [ // omitempty
68 | "list of node names",
69 | "that violated the policy"
70 | ],
71 | "combined": [ // omitempty
72 | {
73 | "node": "node that alongside the serviceAccounts below, violated the policy",
74 | "serviceAccounts": [
75 | "serviceAccounts which with their permissions",
76 | "along with the node permissions",
77 | "resulted in violation of the policy",
78 | "namespace:name",
79 | "default:default"
80 | ]
81 | },
82 | ],
83 | "users": [ // omitempty
84 | "users-who-violated-the-policy",
85 | "system:kube-controller-manager",
86 | "john@email.com"
87 | ],
88 | "groups": [ // omitempty
89 | "groups-who-violated-the-policy",
90 | "system:nodes",
91 | "qa-group"
92 | ],
93 | }
94 | },
95 | ]
96 | }
97 | ```
98 |
--------------------------------------------------------------------------------
/docs/example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaloAltoNetworks/rbac-police/1ec5d029f008e29869572157c9645ce6c21399c8/docs/example.png
--------------------------------------------------------------------------------
/docs/expand.md:
--------------------------------------------------------------------------------
1 | # rbac-police expand
2 | Presents the RBAC permissions of Kubernetes identities in a (more) human-readable format at the expense of storage. Each identity is listed alongside its permissions.
3 |
4 | ## Help
5 | ```
6 | Usage:
7 | rbac-police expand [rbac-json] [flags]
8 |
9 | Flags:
10 | -h, --help help for expand
11 | -z, --zoom string only show the permissions of the specified identity, format is 'type=identity', e.g. 'sa=kube-system:default', 'user=example@email.com'
12 |
13 | Global Flags:
14 | -a, --all-serviceaccounts collect data on all serviceAccounts, not only those assigned to a pod
15 | -w, --discover-protections discover features gates and admission controllers that protect against certain attacks, partly by emulating the attacks via impersonation & dry-run write operations
16 | --ignore-controlplane don't collect data on control plane nodes and pods. Identified by either the 'node-role.kubernetes.io/control-plane' or 'node-role.kubernetes.io/master' labels. ServiceAccounts will not be linked to control plane components
17 | -j, --json-indent uint json indent, 0 means compact mode (default 4)
18 | --local-dir string offline mode, get cluster data from local files, see /utils/get_cluster_data.sh
19 | -l, --loud loud mode, print results regardless of -o
20 | -n, --namespace string scope collection on serviceAccounts to a namespace
21 | --node-groups strings treat nodes as part of these groups (default [system:nodes])
22 | --node-user string user assigned to all nodes, default behaviour assumes nodes users are compatible with the NodeAuthorizer
23 | -o, --out-file string save results to file
24 | ```
25 |
26 |
27 | ## Output Schema
28 | ```json
29 | {
30 | "metadata": {
31 | "cluster": "cluster name from the current kubectl context",
32 | "platform": "eks, gke or empty",
33 | "version": {
34 | "major": "1",
35 | "minor": "22",
36 | "gitVersion": "v1.22.10-gke.600"
37 | },
38 | "features": [
39 | "list of relevant feature gates and admission controllers,",
40 | "currently supports:",
41 | "LegacyTokenSecretsReducted",
42 | "NodeRestriction",
43 | "NodeRestriction1.17",
44 | ]
45 | },
46 | "serviceAccounts": [
47 | {
48 | "name": "serviceaccount name",
49 | "namespace": "serviceaccount namespace",
50 | "nodes": [
51 | {
52 | "name": "the node hosting the following pods",
53 | "pods": [
54 | "a pod assigned the service account"
55 | "a pod assigned the service account"
56 | ]
57 | },
58 | {
59 | "name": "the node hosting the following pods",
60 | "pods": [
61 | "a pod assigned the service account"
62 | ]
63 | }
64 | ],
65 | "providerIAM": { // omitempty
66 | "aws": "AWS role granted to this serviceaccount via the 'eks.amazonaws.com/role-arn' annotation, if exists",
67 | "gcp": "GCP service account binded to this serviceaccount via the 'iam.gke.io/gcp-service-account' annotation, if exists"
68 | },
69 | "roles": [
70 | {
71 | "name": "a role / clusterRole assigned to this serviceAccount",
72 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect", // omitempty
73 | "rules": [] // k8s rule format
74 | },
75 | ]
76 | },
77 | ],
78 | "nodes": [
79 | {
80 | "name": "node name",
81 | "roles": [
82 | {
83 | "name": "a role / clusterRole assigned to this node",
84 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect", // omitempty
85 | "rules": [] // k8s rule format
86 | },
87 | ],
88 | "serviceAccounts": [
89 | "serviceAccounts hosted on this node",
90 | "format is namespace:name",
91 | "kube-system:kube-dns",
92 | ]
93 | },
94 | ],
95 | "users": [
96 | {
97 | "name": "user-name",
98 | "roles": [
99 | {
100 | "name": "a role / clusterRole assigned to this user",
101 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect", // omitempty
102 | "rules": [] // k8s rule format
103 | }
104 | ]
105 | }
106 | ],
107 | "groups": [
108 | {
109 | "name": "group-name",
110 | "roles": [
111 | {
112 | "name": "a role / clusterRole assigned to this group",
113 | "effectiveNamespace": "if granted by a roleBinding, namespace where permissions are in effect", // omitempty
114 | "rules": [] // k8s rule format
115 | }
116 | ]
117 | }
118 | ],
119 | }
120 | ```
121 |
--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PaloAltoNetworks/rbac-police/1ec5d029f008e29869572157c9645ce6c21399c8/docs/logo.png
--------------------------------------------------------------------------------
/docs/policies.md:
--------------------------------------------------------------------------------
1 | # Policies
2 | Policies are [Rego](https://www.openpolicyagent.org/docs/latest/policy-language/) scripts that detect identities like service accounts possessing RBAC permissions that match certain rule definitions. Policies produce violations, which can have 5 types:
3 | - **ServiceAccounts**: Service accounts that violate the policy based on their permissions.
4 | - **Nodes**: Nodes that violate the policy based on their permissions.
5 | - **Users**: Users that violate the policy based on their permissions.
6 | - **Groups**: Groups that violate the policy based on their permissions.
7 | - **Combined**: Nodes that violate the policy based on the union of their permissions and those of the service account tokens they host.
8 |
9 | The [policy library](../lib) includes ~20 policies that alert on identities possessing risky permissions, each detecting a different attack path.
10 |
11 | ## Writing Custom Policies
12 | Policies are written in Rego, and receive input in the [schema](./collect.md#output-schema) produced by `rbac-police collect`. Policies should define a `describe` rule, at least one violation type they produce, and an evaluator. Below is the [nodes_proxy](../lib/nodes_proxy.rego) policy for example:
13 |
14 | ```rego
15 | package policy
16 | import data.police_builtins as pb
17 |
18 | describe[{"desc": desc, "severity": severity}] {
19 | desc := "Identities with access to the nodes/proxy subresource can execute code on pods via the Kubelet API"
20 | severity := "High"
21 | }
22 | targets := {"serviceAccounts", "nodes", "users", "groups"}
23 |
24 | evaluateRoles(roles, owner) {
25 | rule := roles[_].rules[_]
26 | pb.valueOrWildcard(rule.verbs, "create")
27 | pb.subresourceOrWildcard(rule.resources, "nodes/proxy")
28 | pb.valueOrWildcard(rule.apiGroups, "")
29 | }
30 | ```
31 |
32 | - A policy must start with `package policy`.
33 | - A policy can import a number of built-in utility functions from [builtins.rego](../lib/utils/builtins.rego) via `import data.police_builtins`.
34 | - The `describe` rule defines the description and severity of the policy.
35 | - The `targets` set configures which identities the policy evaluates and produces violations for.
36 | - The `evaluateRoles` function receives the `roles` of a serviceAccount, node, user, or group, and based on them determines whether it violates the policy.
37 | - Policies can define an `evalute_combined` rule to produce combined violations. See [approve_csrs](../lib/approve_csrs.rego) for an example.
38 |
39 | The above options are implemented by a Rego [wrapper](../lib/utils/wrapper.rego). If full control over the execution is needed, a policy can be written to run independently, without the wrapper. See the [providerIAM](../lib/providerIAM.rego) policy for an example.
40 |
41 | ## Policy Library
42 | ### [approve_csrs](../lib/approve_csrs.rego)
43 | - Description: `Identities that can create and approve certificatesigningrequests can issue arbitrary certificates with cluster admin privileges`
44 | - Severity: `Critical`
45 | - Violation types: `serviceAccounts, nodes, combined, users, groups`
46 | ### [assign_sa](../lib/assign_sa.rego)
47 | - Description: `Identities that can create pods or create, update or patch pod controllers (e.g. DaemonSets, Deployments, Jobs) in privileged namespaces, may assign an admin-equivalent SA to a pod in their control`
48 | - Severity: `Critical`
49 | - Violation types: `serviceAccounts, nodes, users, groups`
50 | ### [bind_roles](../lib/bind_roles.rego)
51 | - Description: `Identities that can bind clusterrolebindings or bind rolebindings in privileged namespaces can grant admin-equivalent permissions to themselves`
52 | - Severity: `Critical`
53 | - Violation types: `serviceAccounts, nodes, users, groups`
54 | ### [cluster_admin](../lib/cluster_admin.rego)
55 | - Description: `Identities with cluster admin privileges pose a significant threat to the cluster if compromised`
56 | - Severity: `Critical`
57 | - Violation types: `serviceAccounts, nodes, users, groups`
58 | ### [control_webhooks](../lib/control_webhooks.rego)
59 | - Description: `Identities that can create, update or patch ValidatingWebhookConfigurations or MutatingWebhookConfigurations can read, and in the case of the latter also mutate, any object admitted to the cluster`
60 | - Severity: `High`
61 | - Violation types: `serviceAccounts, nodes, users, groups`
62 | ### [eks_modify_aws_auth](../lib/eks_modify_aws_auth.rego)
63 | - Description: `Identities that can modify configmaps in the kube-system namespace on EKS clusters can obtain cluster admin privileges by overwriting the aws-auth configmap`
64 | - Severity: `Critical`
65 | - Violation types: `serviceAccounts, nodes, users, groups`
66 | ### [escalate_roles](../lib/escalate_roles.rego)
67 | - Description: `Identities that can escalate clusterrole or roles in privileged namespaces are allowed to escalate privileges`
68 | - Severity: `Critical`
69 | - Violation types: `serviceAccounts, nodes, users, groups`
70 | ### [impersonate](../lib/impersonate.rego)
71 | - Description: `Identities that can impersonate users, groups or other serviceaccounts can escalate privileges by abusing the permissions of the impersonated identity`
72 | - Severity: `Critical`
73 | - Violation types: `serviceAccounts, nodes, users, groups`
74 | ### [issue_token_secrets](../lib/issue_token_secrets.rego)
75 | - Description: `Identities that can create or modify secrets in privileged namespaces can issue tokens for admin-equivalent SAs`
76 | - Severity: `Critical`
77 | - Violation types: `serviceAccounts, nodes, users, groups`
78 | ### [list_secrets](../lib/list_secrets.rego)
79 | - Description: `Identities that can list secrets cluster-wide may access confidential information, and in some cases serviceAccount tokens`
80 | - Severity: `Medium`
81 | - Violation types: `serviceAccounts, nodes, users, groups`
82 | ### [modify_node_status](../lib/modify_node_status.rego)
83 | - Description: `Identities that can modify nodes' status can set or remove labels to affect scheduling constraints enforced via nodeAffinity or nodeSelectors`
84 | - Severity: `Low`
85 | - Violation types: `serviceAccounts, nodes, users, groups`
86 | ### [modify_pod_status](../lib/modify_pod_status.rego)
87 | - Description: `Identities that can modify pods' status may match a pod's labels to services' selectors in order to intercept connections to services in the pod's namespace`
88 | - Severity: `Low`
89 | - Violation types: `serviceAccounts, nodes, users, groups`
90 | ### [modify_pods](../lib/modify_pods.rego)
91 | - Description: `Identities that can update or patch pods in privileged namespaces can gain code execution on pods that are likely to be powerful`
92 | - Severity: `High`
93 | - Violation types: `serviceAccounts, nodes, users, groups`
94 | ### [modify_service_status_cve_2020_8554](../lib/modify_service_status_cve_2020_8554.rego)
95 | - Description: `Identities that can modify services/status may set the status.loadBalancer.ingress.ip field to exploit the unfixed CVE-2020-8554 and launch MiTM attacks against the cluster. Most mitigations for CVE-2020-8554 only prevent ExternalIP services`
96 | - Severity: `Medium`
97 | - Violation types: `serviceAccounts, nodes, users, groups`
98 | ### [nodes_proxy](../lib/nodes_proxy.rego)
99 | - Description: `Identities with access to the nodes/proxy subresource can execute code on pods via the Kubelet API`
100 | - Severity: `High`
101 | - Violation types: `serviceAccounts, nodes, users, groups`
102 | ### [obtain_token_weak_ns](../lib/obtain_token_weak_ns.rego)
103 | - Description: `Identities that can retrieve or issue SA tokens in unprivileged namespaces could potentially obtain tokens with broader permissions over the cluster`
104 | - Severity: `Low`
105 | - Violation types: `serviceAccounts, nodes, users, groups`
106 | ### [pods_ephemeral_ctrs](../lib/pods_ephemeral_ctrs.rego)
107 | - Description: `Identities that can update or patch pods/ephemeralcontainers can gain code execution on other pods, and potentially break out to their node by adding an ephemeral container with a privileged securityContext`
108 | - Severity: `High`
109 | - Violation types: `serviceAccounts, nodes, users, groups`
110 | ### [pods_exec](../lib/pods_exec.rego)
111 | - Description: `Identities with the create pods/exec permission in privileged namespaces can execute code on pods who are likely to be powerful`
112 | - Severity: `High`
113 | - Violation types: `serviceAccounts, nodes, users, groups`
114 | ### [providerIAM](../lib/providerIAM.rego)
115 | - Description: `Kubernetes ServiceAccounts assigned cloud provider IAM roles may be abused to attack the underlying cloud account (depending on the permissions of the IAM role)`
116 | - Severity: `Low`
117 | - Violation types: `serviceAccounts`
118 | ### [rce_weak_ns](../lib/rce_weak_ns.rego)
119 | - Description: `Identities that can update or patch pods or create pods/exec in unprivileged namespaces can execute code on existing pods`
120 | - Severity: `Medium`
121 | - Violation types: `serviceAccounts, nodes, users, groups`
122 | ### [retrieve_token_secrets](../lib/retrieve_token_secrets.rego)
123 | - Description: `Identities that can retrieve secrets in privileged namespaces can obtain tokens of admin-equivalent SAs`
124 | - Severity: `Critical`
125 | - Violation types: `serviceAccounts, nodes, users, groups`
126 | ### [steal_pods](../lib/steal_pods.rego)
127 | - Description: `Identities that can delete or evict pods in privileged namespaces and also make other nodes unschedulable can steal powerful pods from other nodes onto a compromised one`
128 | - Severity: `High`
129 | - Violation types: `serviceAccounts, nodes, combined, users, groups`
130 | ### [token_request](../lib/token_request.rego)
131 | - Description: `Identities that can create TokenRequests (serviceaccounts/token) in privileged namespaces can issue tokens for admin-equivalent SAs`
132 | - Severity: `Critical`
133 | - Violation types: `serviceAccounts, nodes, users, groups`
134 |
--------------------------------------------------------------------------------
/docs/prevent/README.md:
--------------------------------------------------------------------------------
1 | # Prevent K8s PrivEsc Attacks With Admission Control
2 | Attacks that misuse powerful permissions often diverge from the credentials' common usage. K8s defenders can capitalize on that to identify compromised credentials and prevent attacks in real-time via admission control. This directory contains several example policies for OPA Gatekeeper.
3 |
4 | ## [Suspicious SelfSubjectReviews](./suspicious_self_subject_review)
5 | A common attack pattern following credential theft is querying for their permissions. In Kubernetes, that is done via the SelfSubjectAccessReview or SelfSubjectRulesReview APIs. Non-human identities such as service accounts or nodes querying these APIs for their permissions are strong indicators of compromise.
6 |
7 | ## [Suspicious Assignment of Controller Service Accounts](./suspicious_assignment_of_controller_service_accounts)
8 | By default, the kube-system namespace hosts several admin-equivalent service accounts used by controllers running as part of the api-server. Attackers that can create pods or pod controllers in the kube-system namespace, or modify pod controllers in kube-system namespace, can assign one of these admin-equivalent service accounts to a pod in their control to gain a an admin-equivalent service account token and abuse it to take over the entire cluster.
9 |
10 | Controller service accounts aren't normally assigned to running pods. Defenders can capitalize on that to detect this privilege escalation attack with a policy that alerts on requests that attach a controller service account to an existing or new kube-system pod.
11 |
12 |
13 |
--------------------------------------------------------------------------------
/docs/prevent/suspicious_assignment_of_controller_service_accounts/constraint.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: constraints.gatekeeper.sh/v1beta1
2 | kind: PrivEscAssignPowerfulSA
3 | metadata:
4 | name: prevent-pods-with-powerful-serviceaccounts
5 | spec:
6 | enforcementAction: deny
7 | match:
8 | kinds:
9 | - apiGroups: ["", "apps", "batch"]
10 | kinds:
11 | - Pod
12 | - CronJob
13 | - ReplicaSet
14 | - ReplicationController
15 | - Deployment
16 | - StatefulSet
17 | - DaemonSet
18 | - Job
19 | namespaces: ["kube-system"] # should contain all namespaces specified under forbiddenSAs
20 | parameters:
21 | forbiddenSAs:
22 | # Not every controller SA is powerful, but there's no reason
23 | # for either of them to be assigned to a pod
24 | - name: attachdetach-controller
25 | namespace: kube-system
26 | - name: certificate-controller
27 | namespace: kube-system
28 | - name: clusterrole-aggregation-controller
29 | namespace: kube-system
30 | - name: cronjob-controller
31 | namespace: kube-system
32 | - name: daemon-set-controller
33 | namespace: kube-system
34 | - name: deployment-controller
35 | namespace: kube-system
36 | - name: disruption-controller
37 | namespace: kube-system
38 | - name: endpoint-controller
39 | namespace: kube-system
40 | - name: endpointslice-controller
41 | namespace: kube-system
42 | - name: endpointslicemirroring-controller
43 | namespace: kube-system
44 | - name: ephemeral-volume-controller
45 | namespace: kube-system
46 | - name: expand-controller
47 | namespace: kube-system
48 | - name: job-controller
49 | namespace: kube-system
50 | - name: namespace-controller
51 | namespace: kube-system
52 | - name: node-controller
53 | namespace: kube-system
54 | - name: pod-garbage-controller
55 | namespace: kube-system
56 | - name: pv-protection-controller
57 | namespace: kube-system
58 | - name: pvc-protection-controller
59 | namespace: kube-system
60 | - name: replicaset-controller
61 | namespace: kube-system
62 | - name: replication-controller
63 | namespace: kube-system
64 | - name: resourcequota-controller
65 | namespace: kube-system
66 | - name: service-account-controller
67 | namespace: kube-system
68 | - name: service-controller
69 | namespace: kube-system
70 | - name: statefulset-controller
71 | namespace: kube-system
72 | - name: ttl-after-finished-controller
73 | namespace: kube-system
74 | - name: ttl-controller
75 | namespace: kube-system
76 | - name: vpc-resource-controller
77 | namespace: kube-system
78 |
79 |
--------------------------------------------------------------------------------
/docs/prevent/suspicious_assignment_of_controller_service_accounts/template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: templates.gatekeeper.sh/v1beta1
2 | kind: ConstraintTemplate
3 | metadata:
4 | name: privescassignpowerfulsa
5 | spec:
6 | crd:
7 | spec:
8 | names:
9 | kind: PrivEscAssignPowerfulSA
10 | validation:
11 | # Schema for the `parameters` field
12 | openAPIV3Schema:
13 | properties:
14 | forbiddenSAs:
15 | type: array
16 | items:
17 | type: object
18 | properties:
19 | name:
20 | type: string
21 | namespace:
22 | type: string
23 | targets:
24 | - target: admission.k8s.gatekeeper.sh
25 | rego: |
26 | package privescassignpowerfulsa
27 | operations = {"CREATE", "UPDATE"}
28 | violation[{"msg": msg, "details": {}}] {
29 | operations[input.review.operation]
30 | podSpec := getPodSpec(input.review.object)
31 | sa := podSpec.serviceAccountName
32 | ns := input.review.object.metadata.namespace
33 | isForbiddenSA(sa, ns, input.parameters.forbiddenSAs)
34 | msg := sprintf("Forbidden: cannot assign the '%v:%v' SA to a pod", [ns, sa])
35 | }
36 |
37 | isForbiddenSA(sa, ns, forbiddenSAs) {
38 | forbiddenSA := forbiddenSAs[_]
39 | sa == forbiddenSA.name
40 | ns == forbiddenSA.namespace
41 | }
42 |
43 | getPodSpec(obj) = spec {
44 | obj.kind == "Pod"
45 | spec := obj.spec
46 | } {
47 | obj.kind == "CronJob"
48 | spec := obj.spec.jobTemplate.spec.template.spec
49 | } {
50 | obj.kind == "ReplicaSet"
51 | spec := obj.spec.template.spec
52 | } {
53 | obj.kind == "ReplicationController"
54 | spec := obj.spec.template.spec
55 | } {
56 | obj.kind == "Deployment"
57 | spec := obj.spec.template.spec
58 | } {
59 | obj.kind == "StatefulSet"
60 | spec := obj.spec.template.spec
61 | } {
62 | obj.kind == "DaemonSet"
63 | spec := obj.spec.template.spec
64 | } {
65 | obj.kind == "Job"
66 | spec := obj.spec.template.spec
67 | }
68 |
--------------------------------------------------------------------------------
/docs/prevent/suspicious_self_subject_review/constraint.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: constraints.gatekeeper.sh/v1beta1
2 | kind: SuspiciousSelfSubjectReview
3 | metadata:
4 | name: detect-automated-identities-querying-permissions
5 | spec:
6 | enforcementAction: deny
7 | match:
8 | kinds:
9 | - apiGroups: ["authorization.k8s.io"]
10 | kinds:
11 | - SelfSubjectRulesReview
12 | - SelfSubjectAccessReview
13 | parameters:
14 | # Allow requests from certain users, e.g. "system:serviceaccount:my-priv-ns:my-priv-sa" or "system:node:nodename"
15 | allowedUsers: []
16 | # Allow requests from users in certain groups, e.g. "system:nodes", "system:serviceaccounts:privileged-ns"
17 | allowedGroups: []
18 |
--------------------------------------------------------------------------------
/docs/prevent/suspicious_self_subject_review/template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: templates.gatekeeper.sh/v1beta1
2 | kind: ConstraintTemplate
3 | metadata:
4 | name: suspiciousselfsubjectreview
5 | spec:
6 | crd:
7 | spec:
8 | names:
9 | kind: SuspiciousSelfSubjectReview
10 | validation:
11 | # Schema for the `parameters` field
12 | openAPIV3Schema:
13 | properties:
14 | allowedUsers:
15 | description: Users that should be allowed to bypass the policy.
16 | type: array
17 | items:
18 | type: string
19 | allowedGroups:
20 | description: Groups that should be allowed to bypass the policy.
21 | type: array
22 | items:
23 | type: string
24 | targets:
25 | - target: admission.k8s.gatekeeper.sh
26 | rego: |
27 | package suspiciousselfsubjectreview
28 | violation[{"msg": msg}] {
29 | is_self_review(input.review.object.kind)
30 | params := object.get(input, "parameters", {})
31 | allowedUsers := object.get(params, "allowedUsers", [])
32 | allowedGroups := object.get(params, "allowedGroups", [])
33 | not privileged(input.review.userInfo, allowedUsers, allowedGroups)
34 | user_suspicious[{"msg": msg}]
35 | }
36 |
37 | user_suspicious[{"msg": msg}] {
38 | startswith(input.review.userInfo.username, "system:serviceaccount:")
39 | msg := sprintf("Service account '%v' issued a suspicious %v request, querying its permissions", [input.review.userInfo.username, input.review.object.kind])
40 | } {
41 | startswith(input.review.userInfo.username, "system:node:")
42 | msg := sprintf("Node '%v' issued a suspicious %v request, querying its permissions", [input.review.userInfo.username, input.review.object.kind])
43 | }
44 |
45 | is_self_review(kind) {
46 | kind == "SelfSubjectAccessReview"
47 | } {
48 | kind == "SelfSubjectRulesReview"
49 | }
50 |
51 | privileged(userInfo, allowedUsers, allowedGroups) {
52 | username := object.get(userInfo, "username", "")
53 | allowedUsers[_] == username
54 | } {
55 | userGroups := object.get(userInfo, "groups", [])
56 | groups := {g | g := userGroups[_]}
57 | allowed := {g | g := allowedGroups[_]}
58 | intersection := groups & allowed
59 | count(intersection) > 0
60 | }
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/PaloAltoNetworks/rbac-police
2 |
3 | go 1.16
4 |
5 | require (
6 | github.com/mitchellh/mapstructure v1.4.3
7 | github.com/open-policy-agent/opa v0.37.2
8 | github.com/sirupsen/logrus v1.8.1
9 | github.com/spf13/cobra v1.3.0
10 | k8s.io/api v0.23.4
11 | k8s.io/apimachinery v0.23.4
12 | k8s.io/client-go v0.23.4
13 | )
14 |
--------------------------------------------------------------------------------
/lib/approve_csrs.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can create and approve certificatesigningrequests can issue arbitrary certificates with cluster admin privileges"
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "combined", "users", "groups"}
10 |
11 | # https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/
12 | # To create a CSR
13 | # 1. Verbs: create, get, list, watch, group: certificates.k8s.io, resource: certificatesigningrequests
14 | # To approve a CSR
15 | # 2. Verbs: get, list, watch, group: certificates.k8s.io, resource: certificatesigningrequests
16 | # 3. Verbs: update, group: certificates.k8s.io, resource: certificatesigningrequests/approval
17 | # 4. Verbs: approve, group: certificates.k8s.io, resource: signers, resourceName: / or /*
18 | # Nodes already have the 1 & 2: https://github.com/kubernetes/kubernetes/blob/e847b849c4d170b872d6020bfc2263d02c05e369/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go#L150
19 |
20 | evaluateRoles(roles, owner) {
21 | rolesCanUpdateCsrsApproval(roles)
22 | rolesCanApproveSigners(roles)
23 | rolesCanCreateAndRetrieveCsrs(roles, owner)
24 | }
25 |
26 | rolesCanCreateAndRetrieveCsrs(roles, owner) {
27 | owner == "node"
28 | } {
29 | rolesCanRetrieveCsrs(roles)
30 | rolesCanCreateCsrs(roles)
31 | }
32 |
33 | evaluateCombined = combinedViolations {
34 | combinedViolations := { combinedViolation |
35 | some node in input.nodes
36 | sasOnNode := pb.sasOnNode(node)
37 |
38 | # Can the node or one of its SAs update CSR approvals?
39 | sasCanUpdateCsrApproval := { saFullName |
40 | some sa in sasOnNode
41 | saEffectiveRoles := pb.effectiveRoles(sa.roles)
42 | rolesCanUpdateCsrsApproval(saEffectiveRoles)
43 | saFullName := pb.saFullName(sa)
44 | }
45 | nodeCanUpdateCsrsApproval(node.roles, sasCanUpdateCsrApproval)
46 |
47 | # Can the node or one of its SAs approve signers?
48 | sasCanApproveSigners := { saFullName |
49 | some sa in sasOnNode
50 | saEffectiveRoles := pb.effectiveRoles(sa.roles)
51 | rolesCanApproveSigners(saEffectiveRoles)
52 | saFullName := pb.saFullName(sa)
53 | }
54 | nodeCanApproveSigners(node.roles, sasCanApproveSigners)
55 |
56 | combinedViolation := {
57 | "node": node.name,
58 | "serviceAccounts": sasCanUpdateCsrApproval | sasCanApproveSigners
59 | }
60 | }
61 | }
62 |
63 |
64 | nodeCanUpdateCsrsApproval(nodeRoles, sasCanUpdateCsrApproval) {
65 | count(sasCanUpdateCsrApproval) > 0
66 | } {
67 | nodeEffectiveRoles := pb.effectiveRoles(nodeRoles)
68 | rolesCanUpdateCsrsApproval(nodeEffectiveRoles)
69 | }
70 |
71 | nodeCanApproveSigners(nodeRoles, sasCanApproveSigners) {
72 | count(sasCanApproveSigners) > 0
73 | } {
74 | nodeEffectiveRoles := pb.effectiveRoles(nodeRoles)
75 | rolesCanApproveSigners(nodeEffectiveRoles)
76 | }
77 |
78 | rolesCanUpdateCsrsApproval(roles) {
79 | some role in roles
80 | pb.notNamespaced(role)
81 | some rule in role.rules
82 | pb.valueOrWildcard(rule.apiGroups, "certificates.k8s.io")
83 | pb.updateOrPatchOrWildcard(rule.verbs) # https://github.com/kubernetes/kubernetes/blob/442a69c3bdf6fe8e525b05887e57d89db1e2f3a5/plugin/pkg/admission/certificates/approval/admission.go#L77
84 | pb.subresourceOrWildcard(rule.resources, "certificatesigningrequests/approval")
85 | }
86 |
87 | rolesCanApproveSigners(roles) {
88 | some role in roles
89 | pb.notNamespaced(role)
90 | some rule in role.rules
91 | pb.valueOrWildcard(rule.apiGroups, "certificates.k8s.io")
92 | pb.valueOrWildcard(rule.verbs, "approve")
93 | pb.valueOrWildcard(rule.resources, "signers")
94 | }
95 |
96 | rolesCanCreateCsrs(roles) {
97 | some role in roles
98 | pb.notNamespaced(role)
99 | some rule in role.rules
100 | pb.valueOrWildcard(rule.apiGroups, "certificates.k8s.io")
101 | pb.valueOrWildcard(rule.verbs, "create")
102 | }
103 |
104 | rolesCanRetrieveCsrs(roles) {
105 | some role in roles
106 | pb.notNamespaced(role)
107 | some rule in role.rules
108 | pb.valueOrWildcard(rule.apiGroups, "certificates.k8s.io")
109 | getListWatchOrWildcard(rule.verbs)
110 | }
111 |
112 | getListWatchOrWildcard(verbs) {
113 | pb.getOrListOrWildcard(verbs)
114 | } {
115 | "watch" in verbs
116 | }
--------------------------------------------------------------------------------
/lib/assign_sa.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can create pods or create, update or patch pod controllers (e.g. DaemonSets, Deployments, Jobs) in privileged namespaces (%v), may assign an admin-equivalent SA to a pod in their control", [concat(", ", pb.privileged_namespaces)])
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | pb.affectsPrivNS(role)
14 | some rule in role.rules
15 | pb.ruleCanControlPodSa(rule, owner)
16 | }
17 |
--------------------------------------------------------------------------------
/lib/bind_roles.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can bind clusterrolebindings or bind rolebindings in privileged namespaces (%v) can grant admin-equivalent permissions to themselves", [concat(", ", pb.privileged_namespaces)])
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | pb.affectsPrivNS(role)
14 | some rule in role.rules
15 | rolebindingsOrClusterrolebindings(rule.resources)
16 | pb.valueOrWildcard(rule.verbs, "bind")
17 | pb.valueOrWildcard(rule.apiGroups, "rbac.authorization.k8s.io")
18 | }
19 |
20 | rolebindingsOrClusterrolebindings(resources) {
21 | "clusterrolebindings" in resources
22 | } {
23 | "rolebindings" in resources
24 | } {
25 | pb.hasWildcard(resources)
26 | }
27 |
--------------------------------------------------------------------------------
/lib/cluster_admin.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities with cluster admin privileges pose a significant threat to the cluster if compromised"
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | pb.notNamespaced(role)
14 | some rule in role.rules
15 | pb.hasWildcard(rule.verbs)
16 | pb.hasWildcard(rule.resources)
17 | pb.valueOrWildcard(rule.apiGroups, "")
18 | }
19 |
--------------------------------------------------------------------------------
/lib/control_webhooks.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can create, update or patch ValidatingWebhookConfigurations or MutatingWebhookConfigurations can read, and in the case of the latter also mutate, any object admitted to the cluster"
7 | severity := "High"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | rule := roles[_].rules[_]
13 | validatingwebhookOrMutatingwebhook(rule.resources)
14 | pb.createUpdatePatchOrWildcard(rule.verbs)
15 | pb.valueOrWildcard(rule.apiGroups, "admissionregistration.k8s.io")
16 | }
17 |
18 | validatingwebhookOrMutatingwebhook(resources) {
19 | "validatingwebhookconfigurations" in resources
20 | } {
21 | "mutatingwebhookconfigurations" in resources
22 | } {
23 | pb.hasWildcard(resources)
24 | }
25 |
--------------------------------------------------------------------------------
/lib/eks_modify_aws_auth.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can modify configmaps in the kube-system namespace on EKS clusters can obtain cluster admin privileges by overwriting the aws-auth configmap"
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | input.metadata.platform == "eks"
13 | some role in roles
14 | pb.notNamespacedOrNamespace(role, "kube-system")
15 | some rule in role.rules
16 | pb.valueOrWildcard(rule.resources, "configmaps")
17 | pb.updateOrPatchOrWildcard(rule.verbs)
18 | pb.valueOrWildcard(rule.apiGroups, "")
19 | noResourceNamesOrValue(rule, "aws-auth")
20 | }
21 |
22 | noResourceNamesOrValue(rule, value){
23 | not pb.hasKey(rule, "resourceNames")
24 | } {
25 | value in rule.resourceNames
26 | }
27 |
--------------------------------------------------------------------------------
/lib/escalate_roles.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can escalate clusterrole or roles in privileged namespaces (%v) are allowed to escalate privileges", [concat(", ", pb.privileged_namespaces)])
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | pb.affectsPrivNS(role)
14 | some rule in role.rules
15 | rolesOrClusterroles(rule.resources)
16 | pb.valueOrWildcard(rule.verbs, "escalate")
17 | pb.valueOrWildcard(rule.apiGroups, "rbac.authorization.k8s.io")
18 | }
19 |
20 | rolesOrClusterroles(resources) {
21 | "clusterroles" in resources
22 | } {
23 | "roles" in resources
24 | } {
25 | pb.hasWildcard(resources)
26 | }
27 |
--------------------------------------------------------------------------------
/lib/impersonate.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can impersonate users, groups or other serviceaccounts can escalate privileges by abusing the permissions of the impersonated identity"
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | rule := roles[_].rules[_]
13 | pb.valueOrWildcard(rule.verbs, "impersonate")
14 | impersonationResources(rule.apiGroups, rule.resources)
15 | }
16 |
17 | impersonationResources(apiGroups, resources) {
18 | pb.valueOrWildcard(apiGroups, "")
19 | usersGroupsSasOrWildcard(resources)
20 | } {
21 | pb.valueOrWildcard(apiGroups, "authentication.k8s.io")
22 | pb.valueOrWildcard(resources, "userextras")
23 | }
24 |
25 | usersGroupsSasOrWildcard(resources) {
26 | "users" in resources
27 | } {
28 | "groups" in resources
29 | } {
30 | "serviceaccounts" in resources
31 | } {
32 | pb.hasWildcard(resources)
33 | }
34 |
--------------------------------------------------------------------------------
/lib/issue_token_secrets.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can create or modify secrets in privileged namespaces (%v) can issue tokens for admin-equivalent SAs", [concat(", ", pb.privileged_namespaces)])
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | pb.affectsPrivNS(role)
14 | some rule in role.rules
15 | pb.valueOrWildcard(rule.resources, "secrets")
16 | pb.createUpdatePatchOrWildcard(rule.verbs)
17 | pb.valueOrWildcard(rule.apiGroups, "")
18 | # TODO: Improve accuracy, only alert when rules grant
19 | # the following perm bundles over privileged namespaces (port any improvments to obtain_token_weak_ns)
20 | # [*] create && get && no resource names
21 | # - Starting from ~1.26 'get' won't be enough as SA token secrets will be removed
22 | # - create alone isn't enough since you cannot retreive the secret
23 | # - with resource name you can't actually create the secret without having 'patch' as well
24 | # [*] create && patch (server side apply)
25 | # [*] update || patch && no resource names
26 | # - with resource names the secret most likey already exists
27 | # and isn't of type SA token
28 | }
29 |
--------------------------------------------------------------------------------
/lib/list_secrets.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can list secrets cluster-wide may access confidential information, and in some cases serviceAccount tokens"
7 | severity := "Medium"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | pb.notNamespaced(role)
14 | some rule in role.rules
15 | pb.valueOrWildcard(rule.resources, "secrets")
16 | pb.valueOrWildcard(rule.verbs, "list")
17 | pb.valueOrWildcard(rule.apiGroups, "")
18 | }
19 |
--------------------------------------------------------------------------------
/lib/modify_node_status.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 |
4 | describe[{"desc": desc, "severity": severity}] {
5 | desc := "Identities that can modify nodes' status can set or remove labels to affect scheduling constraints enforced via nodeAffinity or nodeSelectors"
6 | severity := "Low"
7 | }
8 | targets := {"serviceAccounts", "nodes", "users", "groups"}
9 |
10 | evaluateRoles(roles, owner) {
11 | not pb.nodeRestrictionEnabledAndIsNode(owner)
12 | rule := roles[_].rules[_]
13 | pb.subresourceOrWildcard(rule.resources, "nodes/status")
14 | pb.updateOrPatchOrWildcard(rule.verbs)
15 | pb.valueOrWildcard(rule.apiGroups, "")
16 | }
17 |
--------------------------------------------------------------------------------
/lib/modify_pod_status.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 |
4 | describe[{"desc": desc, "severity": severity}] {
5 | desc := "Identities that can modify pods' status may match a pod's labels to services' selectors in order to intercept connections to services in the pod's namespace"
6 | severity := "Low"
7 | }
8 | targets := {"serviceAccounts", "nodes", "users", "groups"}
9 |
10 | evaluateRoles(roles, owner) {
11 | not pb.nodeRestrictionV117EnabledAndIsNode(owner)
12 | rule := roles[_].rules[_]
13 | pb.subresourceOrWildcard(rule.resources, "pods/status")
14 | pb.updateOrPatchOrWildcard(rule.verbs)
15 | pb.valueOrWildcard(rule.apiGroups, "")
16 | }
17 |
--------------------------------------------------------------------------------
/lib/modify_pods.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can update or patch pods in privileged namespaces (%v) can gain code execution on pods that are likely to be powerful", [concat(", ", pb.privileged_namespaces)])
7 | severity := "High"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | not pb.nodeRestrictionEnabledAndIsNode(owner)
13 | some role in roles
14 | pb.affectsPrivNS(role)
15 | some rule in role.rules
16 | pb.valueOrWildcard(rule.apiGroups, "")
17 | pb.valueOrWildcard(rule.resources, "pods")
18 | pb.updateOrPatchOrWildcard(rule.verbs)
19 | not pb.hasKey(rule, "resourceNames")
20 | }
21 |
--------------------------------------------------------------------------------
/lib/modify_service_status_cve_2020_8554.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 |
4 | describe[{"desc": desc, "severity": severity}] {
5 | desc := "Identities that can modify services/status may set the status.loadBalancer.ingress.ip field to exploit the unfixed CVE-2020-8554 and launch MiTM attacks against the cluster. Most mitigations for CVE-2020-8554 only prevent ExternalIP services"
6 | severity := "Medium"
7 | }
8 | targets := {"serviceAccounts", "nodes", "users", "groups"}
9 |
10 | evaluateRoles(roles, owner) {
11 | rule := roles[_].rules[_]
12 | pb.subresourceOrWildcard(rule.resources, "services/status")
13 | pb.updateOrPatchOrWildcard(rule.verbs)
14 | pb.valueOrWildcard(rule.apiGroups, "")
15 | # Considered adding create endpoint || update service as another requirement (control the endpoint where traffic is stolen to)
16 | # Dropped since an existing service may already point to a pod in the attacker's orbit, so it's not necessarily a requirement.
17 | }
18 |
--------------------------------------------------------------------------------
/lib/nodes_proxy.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 |
4 | describe[{"desc": desc, "severity": severity}] {
5 | desc := "Identities with access to the nodes/proxy subresource can execute code on pods via the Kubelet API"
6 | severity := "High"
7 | }
8 | targets := {"serviceAccounts", "nodes", "users", "groups"}
9 |
10 | evaluateRoles(roles, owner) {
11 | not pb.nodeRestrictionEnabledAndIsNode(owner)
12 | rule := roles[_].rules[_]
13 | pb.valueOrWildcard(rule.verbs, "create")
14 | pb.subresourceOrWildcard(rule.resources, "nodes/proxy")
15 | pb.valueOrWildcard(rule.apiGroups, "")
16 | }
17 |
--------------------------------------------------------------------------------
/lib/obtain_token_weak_ns.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can retrieve or issue SA tokens in unprivileged namespaces could potentially obtain tokens with broader permissions over the cluster"
7 | severity := "Low"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | some role in roles
13 | not pb.affectsPrivNS(role) # don't overlap with policy for token retrieval in privileged namespaces
14 | some rule in role.rules
15 | ruleCanObtainToken(rule, owner)
16 | }
17 |
18 | # This runs the retrieve_secrets, token_request, issue_token_secrets and assign_sa policies, but for unprivileged namespaces
19 | ruleCanObtainToken(rule, ruleOwner) {
20 | ruleCanAcquireToken(rule, ruleOwner)
21 | pb.valueOrWildcard(rule.apiGroups, "")
22 | } {
23 | pb.ruleCanControlPodSa(rule, ruleOwner)
24 | }
25 |
26 | ruleCanAcquireToken(rule, ruleOwner) {
27 | pb.valueOrWildcard(rule.resources, "secrets")
28 | canAbuseSecretsForToken(rule.verbs)
29 | } {
30 | not pb.nodeRestrictionEnabledAndIsNode(ruleOwner)
31 | pb.subresourceOrWildcard(rule.resources, "serviceaccounts/token")
32 | pb.valueOrWildcard(rule.verbs, "create")
33 | }
34 |
35 | # Get - brute force token secret name (retrieve_secrets)
36 | # List - retreive secrets (retrieve_secrets)
37 | # Create - mannualy create a token secret (issue_token_secrets)
38 | # Update & Patch - modfiy secret (issue_token_secrets), TODO: probably not exploitable if resourceNames is present?
39 | canAbuseSecretsForToken(verbs) {
40 | not pb.legacyTokenSecretsReducted
41 | listOrGet(verbs)
42 | } {
43 | pb.createUpdatePatchOrWildcard(verbs)
44 | }
45 |
46 | listOrGet(verbs) {
47 | "list" in verbs
48 | } {
49 | "get" in verbs
50 | }
--------------------------------------------------------------------------------
/lib/pods_ephemeral_ctrs.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 |
4 | describe[{"desc": desc, "severity": severity}] {
5 | desc := "Identities that can update or patch pods/ephemeralcontainers can gain code execution on other pods, and potentially break out to their node by adding an ephemeral container with a privileged securityContext"
6 | severity := "High"
7 | }
8 | targets := {"serviceAccounts", "nodes", "users", "groups"}
9 |
10 | evaluateRoles(roles, owner) {
11 | not pb.nodeRestrictionEnabledAndIsNode(owner)
12 | rule := roles[_].rules[_]
13 | pb.valueOrWildcard(rule.apiGroups, "")
14 | pb.subresourceOrWildcard(rule.resources, "pods/ephemeralcontainers")
15 | pb.updateOrPatchOrWildcard(rule.verbs)
16 | }
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/lib/pods_exec.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities with the create pods/exec permission in privileged namespaces (%v) can execute code on pods who are likely to be powerful", [concat(", ", pb.privileged_namespaces)])
7 | severity := "High"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | not pb.nodeRestrictionEnabledAndIsNode(owner)
13 | some role in roles
14 | pb.affectsPrivNS(role)
15 | some rule in role.rules
16 | pb.subresourceOrWildcard(rule.resources, "pods/exec")
17 | pb.valueOrWildcard(rule.verbs, "create")
18 | pb.valueOrWildcard(rule.apiGroups, "")
19 | not pb.hasKey(rule, "resourceNames")
20 | }
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/lib/providerIAM.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import data.config
4 | import future.keywords.in
5 |
6 | describe[{"desc": desc, "severity": severity}] {
7 | desc := "Kubernetes ServiceAccounts assigned cloud provider IAM roles may be abused to attack the underlying cloud account (depending on the permissions of the IAM role)"
8 | severity := "Low"
9 | }
10 |
11 | main[{"violations": violation}] {
12 | config.evalSaViolations
13 | violation := {"serviceAccounts": saViolations}
14 | }
15 |
16 | saViolations = violations {
17 | violations := { violation |
18 | some sa in input.serviceAccounts
19 | sa.providerIAM
20 | violation := {
21 | "name": sa.name,
22 | "namespace": sa.namespace,
23 | "nodes": { shortedNode |
24 | some node in sa.nodes
25 | shortedNode := {node.name: node.pods}
26 | },
27 | "providerIAM": sa.providerIAM
28 | }
29 | }
30 | count(violations) > 0
31 | }
--------------------------------------------------------------------------------
/lib/rce_weak_ns.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := "Identities that can update or patch pods or create pods/exec in unprivileged namespaces can execute code on existing pods"
7 | severity := "Medium"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | # This runs modify_pods and pods_exec but for weak namespaces
12 | evaluateRoles(roles, owner) {
13 | not pb.nodeRestrictionEnabledAndIsNode(owner)
14 | some role in roles
15 | not pb.affectsPrivNS(role)
16 | some rule in role.rules
17 | pb.valueOrWildcard(rule.apiGroups, "")
18 | not pb.hasKey(rule, "resourceNames")
19 | ruleCanRCE(rule)
20 | }
21 |
22 | ruleCanRCE(rule) {
23 | pb.updateOrPatchOrWildcard(rule.verbs)
24 | pb.valueOrWildcard(rule.resources, "pods")
25 | } {
26 | pb.valueOrWildcard(rule.verbs, "create")
27 | pb.subresourceOrWildcard(rule.resources, "pods/exec")
28 | }
29 |
--------------------------------------------------------------------------------
/lib/retrieve_token_secrets.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can retrieve secrets in privileged namespaces (%v) can obtain tokens of admin-equivalent SAs", [concat(", ", pb.privileged_namespaces)])
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | not pb.legacyTokenSecretsReducted
13 | some role in roles
14 | pb.affectsPrivNS(role)
15 | some rule in role.rules
16 | pb.valueOrWildcard(rule.resources, "secrets")
17 | pb.getOrListOrWildcard(rule.verbs) # get -> bruteforcing token secrets names
18 | pb.valueOrWildcard(rule.apiGroups, "")
19 | not pb.hasKey(rule, "resourceNames")
20 | }
21 |
--------------------------------------------------------------------------------
/lib/steal_pods.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can delete or evict pods in privileged namespaces (%v) and also make other nodes unschedulable can steal powerful pods from other nodes onto a compromised one", [concat(", ", pb.privileged_namespaces)])
7 | severity := "High"
8 | }
9 | targets := {"serviceAccounts", "nodes", "combined", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | rolesCanRemovePodsInPrivNS(roles, owner)
13 | rolesCanMakeNodesUnschedulable(roles, owner)
14 | }
15 |
16 | evaluateCombined = combinedViolations {
17 | combinedViolations := { combinedViolation |
18 | some node in input.nodes
19 | sasOnNode := pb.sasOnNode(node)
20 |
21 | # Can the node or one of its SAs remove pods?
22 | sasCanRemovePods := { saFullName |
23 | some sa in sasOnNode
24 | saEffectiveRoles := pb.effectiveRoles(sa.roles)
25 | rolesCanRemovePodsInPrivNS(saEffectiveRoles, "serviceAccount")
26 | saFullName := pb.saFullName(sa)
27 | }
28 | nodeCanRemovePods(node.roles, sasCanRemovePods)
29 |
30 | # Can the node or one of its SAs make other nodes unschedulable?
31 | sasCanMakeNodesUnschedulable := { saFullName |
32 | some sa in sasOnNode
33 | saEffectiveRoles := pb.effectiveRoles(sa.roles)
34 | rolesCanMakeNodesUnschedulable(saEffectiveRoles, "serviceAccount")
35 | saFullName := pb.saFullName(sa)
36 | }
37 | nodeCanMakeNodesUnschedulable(node.roles, sasCanMakeNodesUnschedulable)
38 |
39 | combinedViolation := {
40 | "node": node.name,
41 | "serviceAccounts": sasCanRemovePods | sasCanMakeNodesUnschedulable
42 | }
43 | }
44 | }
45 |
46 | nodeCanRemovePods(nodeRoles, sasCanRemovePods) {
47 | count(sasCanRemovePods) > 0
48 | } {
49 | nodeEffectiveRoles := pb.effectiveRoles(nodeRoles)
50 | rolesCanRemovePodsInPrivNS(nodeEffectiveRoles, "node")
51 | }
52 |
53 | nodeCanMakeNodesUnschedulable(nodeRoles, sasCanMakeNodesUnschedulable) {
54 | count(sasCanMakeNodesUnschedulable) > 0
55 | } {
56 | nodeEffectiveRoles := pb.effectiveRoles(nodeRoles)
57 | rolesCanMakeNodesUnschedulable(nodeEffectiveRoles, "node")
58 | }
59 |
60 | rolesCanRemovePodsInPrivNS(roles, owner) {
61 | some role in roles
62 | pb.affectsPrivNS(role)
63 | roleCanRemovePods(role, owner)
64 | }
65 |
66 | rolesCanMakeNodesUnschedulable(roles, owner) {
67 | not pb.nodeRestrictionEnabledAndIsNode(owner)
68 | rule := roles[_].rules[_]
69 | nodeOrNodeStatus(rule.resources)
70 | pb.updateOrPatchOrWildcard(rule.verbs)
71 | pb.valueOrWildcard(rule.apiGroups, "")
72 | not pb.hasKey(rule, "resourceNames")
73 | }
74 |
75 | roleCanRemovePods(role, roleOwner) {
76 | some rule in role.rules
77 | pb.valueOrWildcard(rule.apiGroups, "")
78 | ruleCanRemovePods(rule, roleOwner)
79 | }
80 |
81 | # Permissions that would allow one to remove a pod
82 | ruleCanRemovePods(rule, ruleOwner) {
83 | # Check perms that allow removal but may be blocked by NodeRestriction
84 | not pb.nodeRestrictionEnabledAndIsNode(ruleOwner)
85 | ruleCanRemovePodsInner(rule)
86 | } {
87 | # Check perms that allow removal but may be blocked by NodeRestriction from v1.17
88 | not pb.nodeRestrictionV117EnabledAndIsNode(ruleOwner)
89 | pb.subresourceOrWildcard(rule.resources, "pods/status")
90 | pb.updateOrPatchOrWildcard(rule.verbs)
91 | }
92 |
93 | # update / patch pods: set a pod's labels to match a pod controller, triggering the removal of a real replica
94 | # delete pods: simply delete a pod
95 | # create pods/eviction: evict a pod
96 | # delete nodes: delete a node to evict all its pods
97 | # update nodes: taint a node with the NoExecute taint to evict its pods
98 | ruleCanRemovePodsInner(rule) {
99 | pb.valueOrWildcard(rule.resources, "pods")
100 | pb.updateOrPatchOrWildcard(rule.verbs)
101 | } {
102 | not pb.hasKey(rule, "resourceNames")
103 | ruleCanRemovePodsInner2(rule)
104 | }
105 |
106 | # These are most likely benign with resourceNames
107 | ruleCanRemovePodsInner2(rule) {
108 | pb.valueOrWildcard(rule.resources, "pods")
109 | pb.valueOrWildcard(rule.verbs, "delete")
110 | } {
111 | pb.subresourceOrWildcard(rule.resources, "pods/eviction")
112 | pb.valueOrWildcard(rule.verbs, "create")
113 | } {
114 | pb.valueOrWildcard(rule.resources, "nodes")
115 | pb.valueOrWildcard(rule.verbs, "delete")
116 | } {
117 | pb.valueOrWildcard(rule.resources, "nodes")
118 | pb.updateOrPatchOrWildcard(rule.verbs)
119 | }
120 |
121 | nodeOrNodeStatus(resources) {
122 | pb.valueOrWildcard(resources, "nodes")
123 | } {
124 | pb.subresourceOrWildcard(resources, "nodes/status")
125 | }
126 |
--------------------------------------------------------------------------------
/lib/token_request.rego:
--------------------------------------------------------------------------------
1 | package policy
2 | import data.police_builtins as pb
3 | import future.keywords.in
4 |
5 | describe[{"desc": desc, "severity": severity}] {
6 | desc := sprintf("Identities that can create TokenRequests (serviceaccounts/token) in privileged namespaces (%v) can issue tokens for admin-equivalent SAs", [concat(", ", pb.privileged_namespaces)])
7 | severity := "Critical"
8 | }
9 | targets := {"serviceAccounts", "nodes", "users", "groups"}
10 |
11 | evaluateRoles(roles, owner) {
12 | not pb.nodeRestrictionEnabledAndIsNode(owner)
13 | some role in roles
14 | pb.affectsPrivNS(role)
15 | some rule in role.rules
16 | pb.subresourceOrWildcard(rule.resources, "serviceaccounts/token")
17 | pb.valueOrWildcard(rule.verbs, "create")
18 | pb.valueOrWildcard(rule.apiGroups, "")
19 | }
20 |
--------------------------------------------------------------------------------
/lib/utils/builtins.rego:
--------------------------------------------------------------------------------
1 | package police_builtins
2 | import future.keywords.in
3 |
4 | privileged_namespaces := {"kube-system"}
5 |
6 | # True if @arr contains @value or a wildcard
7 | valueOrWildcard(arr, value) {
8 | value in arr
9 | } {
10 | hasWildcard(arr)
11 | }
12 |
13 | # True if @arr includes a wildcard
14 | hasWildcard(arr) {
15 | "*" in arr
16 | }
17 |
18 | # True if @obj has a key @k
19 | hasKey(obj, k) {
20 | _ = obj[k]
21 | }
22 |
23 | # True if @role isn't namespaced, or namespaced to a privileged namespace
24 | affectsPrivNS(role) {
25 | notNamespaced(role)
26 | } {
27 | role.effectiveNamespace in privileged_namespaces
28 | }
29 |
30 | # True if @role isn't namespaced, or namespaced to @ns
31 | notNamespacedOrNamespace(role, ns) {
32 | notNamespaced(role)
33 | } {
34 | role.effectiveNamespace == ns
35 | }
36 |
37 | # True if @role isn't namespaced
38 | notNamespaced(role) {
39 | not hasKey(role, "effectiveNamespace")
40 | }
41 |
42 | # Returns the full name of @sa
43 | saFullName(sa) = fullName {
44 | fullName := sprintf("%v:%v", [sa.namespace, sa.name])
45 | }
46 |
47 | # True if @arr included @combinedResourceName or a wildcard that will apply to it
48 | subresourceOrWildcard(arr, combinedResourceName) {
49 | combinedResourceName in arr
50 | } {
51 | subresource := split(combinedResourceName, "/")[1]
52 | wildcardSubresource := sprintf("*/%v", [subresource])
53 | wildcardSubresource in arr
54 | } {
55 | hasWildcard(arr)
56 | }
57 |
58 | # Returns the SAs from @serviceaccounts that are hosted on the @node
59 | sasOnNode(node) = serviceAccountsOnNode {
60 | serviceAccountsOnNode = { sa |
61 | some sa in input.serviceAccounts
62 | fullname := saFullName(sa)
63 | fullname in node.serviceAccounts
64 | }
65 | }
66 |
67 | # True if @verbs includes either 'update', 'patch' or a wildcard
68 | updateOrPatchOrWildcard(verbs) {
69 | "update" in verbs
70 | } {
71 | "patch" in verbs
72 | } {
73 | hasWildcard(verbs)
74 | }
75 |
76 | # True if @verbs includes either 'create', 'update', 'patch' or a wildcard
77 | createUpdatePatchOrWildcard(verbs) {
78 | "create" in verbs
79 | } {
80 | updateOrPatchOrWildcard(verbs)
81 | }
82 |
83 | # True if @verbs includes either 'get', 'list', or a wildcard
84 | getOrListOrWildcard(verbs) {
85 | "list" in verbs
86 | }{
87 | "get" in verbs
88 | } {
89 | hasWildcard(verbs)
90 | }
91 |
92 | # True if by any mean, @rule is permitted to overwrite the SA of a pod
93 | ruleCanControlPodSa(rule, ruleOwner) {
94 | not nodeRestrictionEnabledAndIsNode(ruleOwner)
95 | valueOrWildcard(rule.verbs, "create")
96 | valueOrWildcard(rule.resources, "pods")
97 | valueOrWildcard(rule.apiGroups, "")
98 | } {
99 | podControllerResource(rule.resources, rule.apiGroups)
100 | createUpdatePatchOrWildcard(rule.verbs)
101 | }
102 |
103 | # True if @resources contains a resource that can control pods or a wildcard
104 | podControllerResource(resources, apiGroups) {
105 | "cronjobs"in resources
106 | valueOrWildcard(apiGroups, "batch")
107 | } {
108 | "jobs" in resources
109 | valueOrWildcard(apiGroups, "batch")
110 | } {
111 | "daemonsets" in resources
112 | valueOrWildcard(apiGroups, "apps")
113 | } {
114 | "statefulsets" in resources
115 | valueOrWildcard(apiGroups, "apps")
116 | } {
117 | "deployments" in resources
118 | valueOrWildcard(apiGroups, "apps")
119 | } {
120 | "replicasets" in resources
121 | valueOrWildcard(apiGroups, "apps")
122 | } {
123 | "replicationcontrollers" in resources
124 | valueOrWildcard(apiGroups, "")
125 | } {
126 | podControllerApiGroup(apiGroups)
127 | hasWildcard(resources)
128 | }
129 |
130 |
131 | # True if @apiGroups contains a wildcard,
132 | # or an API group that includes a resource that can control pods
133 | podControllerApiGroup(apiGroups) {
134 | "" in apiGroups
135 | }{
136 | "apps" in apiGroups
137 | }{
138 | "batch" in apiGroups
139 | }{
140 | hasWildcard(apiGroups)
141 | }
142 |
143 | # Return the roles referenced by @roleRefs
144 | effectiveRoles(roleRefs) = effectiveRoles {
145 | effectiveRoles := { effectiveRole |
146 | some roleObj in input.roles
147 | some roleRef in roleRefs
148 | roleRef.name == roleObj.name
149 | equalNamespaceIfExist(roleRef, roleObj)
150 | effectiveRole := buildRole(roleRef, roleObj)
151 | }
152 | }
153 |
154 | # Builds role from @roleRef and @roleObj
155 | buildRole(roleRef, roleObj) = role {
156 | not hasKey(roleRef, "effectiveNamespace")
157 | role := {
158 | "name": roleRef.name,
159 | "rules": roleObj.rules
160 | }
161 | } {
162 | hasKey(roleRef, "effectiveNamespace")
163 | role := {
164 | "name": roleRef.name,
165 | "effectiveNamespace": roleRef.effectiveNamespace,
166 | "rules": roleObj.rules
167 | }
168 | }
169 |
170 | # Checks whether @obj and @other have the same namespace
171 | equalNamespaceIfExist(obj, other) {
172 | obj.namespace == other.namespace
173 | } {
174 | not hasKey(obj, "namespace")
175 | not hasKey(other, "namespace")
176 | }
177 |
178 | # Checks for LegacyTokenSecretsReducted
179 | legacyTokenSecretsReducted := true {
180 | metadata := object.get(input, "metadata", {})
181 | features := object.get(metadata, "features", [])
182 | "LegacyTokenSecretsReducted" in features
183 | }
184 |
185 | # Checks for NodeRestriction
186 | NodeRestriction := true {
187 | metadata := object.get(input, "metadata", {})
188 | features := object.get(metadata, "features", [])
189 | "NodeRestriction" in features
190 | }
191 |
192 | # Checks for NodeRestriction1.17
193 | NodeRestrictionV117 := true {
194 | metadata := object.get(input, "metadata", {})
195 | features := object.get(metadata, "features", [])
196 | "NodeRestriction1.17" in features
197 | }
198 |
199 | # Permission owner is a node and NodeRestriction is enabled
200 | nodeRestrictionEnabledAndIsNode(permissionOwner) {
201 | NodeRestriction
202 | permissionOwner == "node"
203 | }
204 |
205 | # Permission owner is a node and NodeRestriction v1.17 is enabled
206 | nodeRestrictionV117EnabledAndIsNode(permissionOwner) {
207 | NodeRestrictionV117
208 | permissionOwner == "node"
209 | }
210 |
--------------------------------------------------------------------------------
/lib/utils/wrapper.rego:
--------------------------------------------------------------------------------
1 | package wrapper
2 |
3 | import data.policy as policy
4 | import data.police_builtins as pb
5 | import data.config
6 | import future.keywords.in
7 |
8 | main[{"violations": violation}] {
9 | config.evalSaViolations
10 | violation := {"serviceAccounts": saViolations}
11 | } {
12 | config.evalNodeViolations
13 | violation := {"nodes": nodeViolations}
14 | } {
15 | config.evalCombinedViolations
16 | violation := {"combined": combinedViolations}
17 | } {
18 | config.evalUserViolations
19 | violation := {"users": userViolations}
20 | } {
21 | config.evalGroupViolations
22 | violation := {"groups": groupViolations}
23 | }
24 |
25 |
26 | saViolations = violations {
27 | "serviceAccounts" in policy.targets
28 | violations := { violation |
29 | some sa in input.serviceAccounts
30 | saEffectiveRoles := pb.effectiveRoles(sa.roles)
31 | policy.evaluateRoles(saEffectiveRoles, "serviceAccount")
32 | violation := {
33 | "name": sa.name,
34 | "namespace": sa.namespace,
35 | "nodes": { shortedNode |
36 | some node in sa.nodes
37 | shortedNode := {node.name: node.pods}
38 | },
39 | }
40 | }
41 | count(violations) > 0
42 | }
43 |
44 | nodeViolations = violations {
45 | "nodes" in policy.targets
46 | violations := { violation |
47 | some node in input.nodes
48 | nodeEffectiveRoles := pb.effectiveRoles(node.roles)
49 | policy.evaluateRoles(nodeEffectiveRoles, "node")
50 | violation := node.name
51 | }
52 | count(violations) > 0
53 | }
54 |
55 | combinedViolations = violations {
56 | "combined" in policy.targets
57 | violations := policy.evaluateCombined
58 | count(violations) > 0
59 | }
60 |
61 | userViolations = violations {
62 | "users" in policy.targets
63 | violations := { violation |
64 | some user in input.users
65 | effectiveRoles := pb.effectiveRoles(user.roles)
66 | policy.evaluateRoles(effectiveRoles, "user")
67 | violation := user.name
68 | }
69 | count(violations) > 0
70 | }
71 |
72 | groupViolations = violations {
73 | "groups" in policy.targets
74 | violations := { violation |
75 | some group in input.groups
76 | effectiveRoles := pb.effectiveRoles(group.roles)
77 | policy.evaluateRoles(effectiveRoles, "group")
78 | violation := group.name
79 | }
80 | count(violations) > 0
81 | }
82 |
83 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import "github.com/PaloAltoNetworks/rbac-police/cmd"
4 |
5 | func main() {
6 | cmd.Execute()
7 | }
8 |
--------------------------------------------------------------------------------
/pkg/collect/cluster_db.go:
--------------------------------------------------------------------------------
1 | package collect
2 |
3 | import (
4 | "context"
5 |
6 | log "github.com/sirupsen/logrus"
7 | v1 "k8s.io/api/core/v1"
8 | rbac "k8s.io/api/rbac/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/client-go/kubernetes"
11 | )
12 |
13 | // buildClusterDb populates a ClusterDb object by querying a cluster
14 | func buildClusterDb(clientset *kubernetes.Clientset, ns string, ignoreControlPlane bool) *ClusterDb {
15 | var (
16 | clusterDb ClusterDb
17 | err error
18 | )
19 | clusterDb.RoleBindings, clusterDb.ClusterRoleBindings, err = getRoleBindingsAndClusterRoleBindings(clientset)
20 | if err != nil {
21 | return nil // error printed in getRoleBindingsAndClusterRoleBindings
22 | }
23 | clusterDb.Roles, clusterDb.ClusterRoles, err = getRolesAndClusterRoles(clientset)
24 | if err != nil {
25 | return nil // error printed in getRolesAndClusterRoles
26 | }
27 | clusterDb.ServiceAccounts, err = getServiceAccounts(clientset, ns)
28 | if err != nil {
29 | return nil // error printed in getServiceAccounts
30 | }
31 | clusterDb.Nodes, err = getNodes(clientset, ignoreControlPlane)
32 | if err != nil {
33 | return nil // error printed in getPods
34 | }
35 | clusterDb.Pods, err = getPods(clientset, ns)
36 | if err != nil {
37 | return nil // error printed in getPods
38 | }
39 | if ignoreControlPlane {
40 | removePodsFromExcludedNodes(&clusterDb) // remove control plane pods if needed
41 | }
42 | return &clusterDb
43 | }
44 |
45 | // Get all serviceAccounts cluster-wide, or in a namespace if @ns is set
46 | func getServiceAccounts(clientset *kubernetes.Clientset, ns string) ([]v1.ServiceAccount, error) {
47 | serviceAccountList, err := clientset.CoreV1().ServiceAccounts(ns).List(context.Background(), metav1.ListOptions{})
48 | if err != nil {
49 | log.Errorln("getServiceAccounts: failed to retrieve serviceaccounts with", err)
50 | return nil, err
51 | }
52 | return serviceAccountList.Items, nil
53 | }
54 |
55 | // Get all pods cluster-wide, or in a namespace if @ns is set
56 | func getPods(clientset *kubernetes.Clientset, ns string) ([]v1.Pod, error) {
57 | podList, err := clientset.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{})
58 | if err != nil {
59 | log.Errorln("getPods: failed to retrieve pods with", err)
60 | return nil, err
61 | }
62 | return podList.Items, nil
63 | }
64 |
65 | // Get nodes, drop control plane nodes if @ignoreControlPlane is set
66 | func getNodes(clientset *kubernetes.Clientset, ignoreControlPlane bool) ([]v1.Node, error) {
67 | listOptions := metav1.ListOptions{}
68 | if ignoreControlPlane {
69 | listOptions.LabelSelector = "!node-role.kubernetes.io/master, !node-role.kubernetes.io/control-plane"
70 | }
71 | nodeList, err := clientset.CoreV1().Nodes().List(context.Background(), listOptions)
72 | if err != nil {
73 | log.Errorln("getNodes: failed to retrieve nodes with", err)
74 | return nil, err
75 | }
76 | return nodeList.Items, nil
77 | }
78 |
79 | // Retrieves roles and clusterRoles
80 | func getRolesAndClusterRoles(clientset *kubernetes.Clientset) ([]rbac.Role, []rbac.ClusterRole, error) {
81 | roleList, err := clientset.RbacV1().Roles("").List(context.Background(), metav1.ListOptions{})
82 | if err != nil {
83 | log.Errorln("getRolesAndClusterRoles: failed to retrieve roles with", err)
84 | return nil, nil, err
85 | }
86 | clusterRoleList, err := clientset.RbacV1().ClusterRoles().List(context.Background(), metav1.ListOptions{})
87 | if err != nil {
88 | log.Errorln("getRolesAndClusterRoles: failed to retrieve clusterRoles with", err)
89 | return nil, nil, err
90 | }
91 | return roleList.Items, clusterRoleList.Items, nil
92 | }
93 |
94 | // Retrieves roleBindings and clusterRoleBindings
95 | func getRoleBindingsAndClusterRoleBindings(clientset *kubernetes.Clientset) ([]rbac.RoleBinding, []rbac.ClusterRoleBinding, error) {
96 | roleBindingList, err := clientset.RbacV1().RoleBindings("").List(context.Background(), metav1.ListOptions{})
97 | if err != nil {
98 | log.Errorln("getRoleBindingsAndClusterRoleBindings: failed to retrieve roleBindings with", err)
99 | return nil, nil, err
100 | }
101 | clusterRoleBindingList, err := clientset.RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{})
102 | if err != nil {
103 | log.Errorln("getRoleBindingsAndClusterRoleBindings: failed to retrieve ClusterroleBindings with", err)
104 | return nil, nil, err
105 | }
106 | return roleBindingList.Items, clusterRoleBindingList.Items, nil
107 | }
108 |
109 | // Removes pods that have a NodeName which is not in cDb.Nodes
110 | func removePodsFromExcludedNodes(cDb *ClusterDb) {
111 | var includedPods []v1.Pod
112 |
113 | for _, pod := range cDb.Pods {
114 | if pod.Spec.NodeName == "" {
115 | includedPods = append(includedPods, pod) // include non-scheduled pods
116 | continue
117 | }
118 | for _, node := range cDb.Nodes {
119 | if pod.Spec.NodeName == node.Name {
120 | // Pod hosted on included node
121 | includedPods = append(includedPods, pod)
122 | break
123 | }
124 | }
125 | }
126 | cDb.Pods = includedPods
127 | }
128 |
--------------------------------------------------------------------------------
/pkg/collect/collect.go:
--------------------------------------------------------------------------------
1 | package collect
2 |
3 | import (
4 | "strings"
5 |
6 | log "github.com/sirupsen/logrus"
7 | "k8s.io/client-go/kubernetes"
8 | _ "k8s.io/client-go/plugin/pkg/client/auth" // in order to connect to clusters via auth plugins
9 | "k8s.io/client-go/tools/clientcmd"
10 | )
11 |
12 | // Collect retrieves the RBAC settings in a k8s cluster
13 | func Collect(collectConfig CollectConfig) *CollectResult {
14 | var metadata *ClusterMetadata
15 | var clusterDb *ClusterDb
16 | var kubeConfig clientcmd.ClientConfig = nil
17 |
18 | if collectConfig.OfflineDir == "" {
19 | // Online mode, init Kubernetes client
20 | clientset, kConfigTmp, err := initKubeClient()
21 | kubeConfig = kConfigTmp
22 | if err != nil {
23 | return nil // error printed in initKubeClient
24 | }
25 | // Build metadata and clusterDb from remote cluster
26 | metadata = buildMetadata(clientset, kubeConfig)
27 | clusterDb = buildClusterDb(clientset, collectConfig.Namespace, collectConfig.IgnoreControlPlane)
28 | } else {
29 | // Offline mode, parse clusterDb and metadata from local files
30 | clusterDb, metadata = parseLocalCluster(collectConfig)
31 | }
32 | if clusterDb == nil {
33 | return nil // error printed in buildClusterDb or in parseLocalCluster
34 | }
35 |
36 | if collectConfig.DiscoverProtections {
37 | discoverRelevantControlPlaneFeatures(collectConfig, kubeConfig, clusterDb, metadata)
38 | }
39 |
40 | rbacDb := buildRbacDb(*clusterDb, collectConfig)
41 | if rbacDb == nil {
42 | return nil // error printed in BuildClusterDb
43 | }
44 |
45 | return &CollectResult{
46 | Metadata: *metadata,
47 | ServiceAccounts: rbacDb.ServiceAccounts,
48 | Nodes: rbacDb.Nodes,
49 | Users: rbacDb.Users,
50 | Groups: rbacDb.Groups,
51 | Roles: rbacDb.Roles,
52 | }
53 | }
54 |
55 | // Initialize the Kubernetes client
56 | func initKubeClient() (*kubernetes.Clientset, clientcmd.ClientConfig, error) {
57 | loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
58 | kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
59 | config, err := kubeConfig.ClientConfig()
60 | if err != nil {
61 | log.Errorln("initKubeClient: failed creating ClientConfig with", err)
62 | return nil, nil, err
63 | }
64 | clientset, err := kubernetes.NewForConfig(config)
65 | if err != nil {
66 | log.Errorln("initKubeClient: failed creating Clientset with", err)
67 | return nil, nil, err
68 | }
69 | return clientset, kubeConfig, nil
70 | }
71 |
72 | // Get cluster metadata
73 | func buildMetadata(clientset *kubernetes.Clientset, kubeConfig clientcmd.ClientConfig) *ClusterMetadata {
74 | metadata := ClusterMetadata{
75 | Features: []string{},
76 | }
77 |
78 | rawConfig, err := kubeConfig.RawConfig()
79 | if err != nil {
80 | log.Warnln("getMetadata: failed to get raw kubeconfig", err)
81 | } else {
82 | metadata.ClusterName = rawConfig.Contexts[rawConfig.CurrentContext].Cluster
83 | }
84 |
85 | versionInfo, err := clientset.Discovery().ServerVersion()
86 | if err != nil {
87 | log.Warnln("getMetadata: failed to get server version with", err)
88 | } else {
89 | metadata.Version = ClusterVersion{
90 | Major: versionInfo.Major,
91 | Minor: versionInfo.Minor,
92 | GitVersion: versionInfo.GitVersion,
93 | }
94 | metadata.Platform = platformFromVersion(versionInfo.GitVersion)
95 | }
96 |
97 | return &metadata
98 | }
99 |
100 | // Identifies the underlying platform from a cluster's @version,
101 | // supports EKS and GKE
102 | func platformFromVersion(version string) string {
103 | if strings.Contains(version, "-eks-") {
104 | return "eks"
105 | }
106 | if strings.Contains(version, "-gke.") {
107 | return "gke"
108 | }
109 | return ""
110 | }
111 |
--------------------------------------------------------------------------------
/pkg/collect/discover_protections.go:
--------------------------------------------------------------------------------
1 | package collect
2 |
3 | import (
4 | "context"
5 | "strconv"
6 | "strings"
7 |
8 | v1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/client-go/kubernetes"
11 | "k8s.io/client-go/rest"
12 | "k8s.io/client-go/tools/clientcmd"
13 | )
14 |
15 | // Discover control plane feature gates and admission controllers that protect against certain attacks,
16 | // and populate the cluster's metadata with them for policies to consume.
17 | // NOTE: Uses impersonation and dry-run write operations, which won't affect the cluster, but may be logged / audited on.
18 | func discoverRelevantControlPlaneFeatures(collectConfig CollectConfig, kubeConfig clientcmd.ClientConfig, clusterDb *ClusterDb, metadata *ClusterMetadata) {
19 | if legacyTokenSecretsReducted(clusterDb, collectConfig.Namespace) {
20 | metadata.Features = append(metadata.Features, "LegacyTokenSecretsReducted")
21 | }
22 | // If NodeAuthorization is used, and we're not running in offline mode, check for NodeRestriction
23 | if collectConfig.NodeUser == "" && collectConfig.OfflineDir == "" {
24 | if NodeRestrictionEnabled(kubeConfig, clusterDb, metadata) {
25 | metadata.Features = append(metadata.Features, "NodeRestriction")
26 | // If the cluster's version >=1.17, populate NodeRestriction1.17
27 | major, err := strconv.Atoi(metadata.Version.Major)
28 | if err == nil {
29 | minor, err := strconv.Atoi(metadata.Version.Minor)
30 | if err == nil {
31 | if major > 1 || minor >= 17 {
32 | metadata.Features = append(metadata.Features, "NodeRestriction1.17")
33 | }
34 | }
35 | }
36 | }
37 | }
38 | }
39 |
40 | // Best effort test for whether serviceAccount tokens are stored as secrets
41 | func legacyTokenSecretsReducted(clusterDb *ClusterDb, ns string) bool {
42 | // If collection is scoped to ns, use its default serviceAccount for testing,
43 | // Otherwise use kube-system:replicaset-controller
44 | saName := "default"
45 | if ns == "" {
46 | ns = "kube-system"
47 | saName = "replicaset-controller"
48 | }
49 |
50 | for _, serviceAccount := range clusterDb.ServiceAccounts {
51 | if serviceAccount.ObjectMeta.Namespace != ns {
52 | continue
53 | }
54 |
55 | if serviceAccount.ObjectMeta.Name != saName {
56 | continue
57 | }
58 | // Return true if there are no auto-generated secrets for the serviceAccount
59 | return len(serviceAccount.Secrets) == 0
60 | }
61 | return false
62 | }
63 |
64 | // Some variables for the NodeRestriction check
65 | var mirrorPodAnnotationErrMsg = "pod does not have \"kubernetes.io/config.mirror\" annotation"
66 | var dryRunName = "rbac-police-dry-run-test-pod"
67 | var testPodSpec = &v1.Pod{
68 | ObjectMeta: metav1.ObjectMeta{
69 | Name: dryRunName,
70 | },
71 | Spec: v1.PodSpec{
72 | Containers: []v1.Container{
73 | {
74 | Name: dryRunName,
75 | Image: dryRunName,
76 | },
77 | },
78 | },
79 | }
80 |
81 | // Check if NodeRestriction is enabled by impersonating a node and creating a non-mirror pod
82 | func NodeRestrictionEnabled(kubeConfig clientcmd.ClientConfig, clusterDb *ClusterDb, metadata *ClusterMetadata) bool {
83 | if len(clusterDb.Nodes) == 0 {
84 | return false
85 | }
86 |
87 | // Create client that impersonates a node
88 | config, err := kubeConfig.ClientConfig()
89 | if err != nil {
90 | return false
91 | }
92 | config.Impersonate = rest.ImpersonationConfig{
93 | UserName: "system:node:" + clusterDb.Nodes[0].ObjectMeta.Name,
94 | Groups: []string{"system:nodes", "system:authenticated"},
95 | }
96 | attackNodeClientSet, err := kubernetes.NewForConfig(config)
97 | if err != nil {
98 | return false
99 | }
100 | attackNodePodClient := attackNodeClientSet.CoreV1().Pods("default")
101 |
102 | // Dry run create the test pod
103 | dryRunCreate := metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}
104 | _, err = attackNodePodClient.Create(context.Background(), testPodSpec, dryRunCreate)
105 | if err == nil {
106 | return false
107 | }
108 | return strings.Contains(err.Error(), mirrorPodAnnotationErrMsg)
109 | }
110 |
--------------------------------------------------------------------------------
/pkg/collect/offline.go:
--------------------------------------------------------------------------------
1 | package collect
2 |
3 | import (
4 | "encoding/json"
5 | "github.com/PaloAltoNetworks/rbac-police/pkg/utils"
6 | "k8s.io/apimachinery/pkg/runtime"
7 | "k8s.io/apimachinery/pkg/runtime/serializer"
8 | "k8s.io/apimachinery/pkg/version"
9 | "os"
10 | "path"
11 | "reflect"
12 | "strings"
13 |
14 | log "github.com/sirupsen/logrus"
15 | v1 "k8s.io/api/core/v1"
16 | rbac "k8s.io/api/rbac/v1"
17 | )
18 |
19 | // parseLocalCluster parses k8s manifests from a local directory into ClusterDb and ClusterMetadata objects
20 | func parseLocalCluster(config CollectConfig) (*ClusterDb, *ClusterMetadata) {
21 | var versionInfo version.Info
22 | var inputFiles []string
23 | metadata := ClusterMetadata{
24 | Features: []string{},
25 | }
26 |
27 | // Read local dir
28 | files, err := os.ReadDir(config.OfflineDir)
29 | if err != nil {
30 | log.Errorf("parseLocalCluster: failed to read local dir %q with %v", config.OfflineDir, err)
31 | return nil, nil
32 | }
33 |
34 | // Iterate local dir
35 | for _, file := range files {
36 | if file.IsDir() {
37 | continue
38 | }
39 | if file.Name() == "cluster_name" {
40 | // Parse cluster_name file into metadata
41 | if nameBytes, err := os.ReadFile(path.Join(config.OfflineDir, file.Name())); err != nil {
42 | log.Warnf("parseLocalCluster: failed to read cluster name from local dir %v", err)
43 | } else {
44 | metadata.ClusterName = strings.TrimSuffix(string(nameBytes), "\n")
45 | }
46 | } else if file.Name() == "version.json" {
47 | // Parse version.json file into metadata
48 | if versionBytes, err := os.ReadFile(path.Join(config.OfflineDir, file.Name())); err != nil {
49 | log.Warnf("parseLocalCluster: failed to read version.json with %v\n", err)
50 | } else {
51 | err = json.Unmarshal(versionBytes, &versionInfo)
52 | if err != nil {
53 | log.Warnf("parseLocalCluster: failed to unmarshal %s into a version.Info obj with %v\n", file.Name(), err)
54 | } else {
55 | metadata.Version = ClusterVersion{
56 | Major: versionInfo.Major,
57 | Minor: versionInfo.Minor,
58 | GitVersion: versionInfo.GitVersion,
59 | }
60 | metadata.Platform = platformFromVersion(versionInfo.GitVersion)
61 | }
62 | }
63 | } else if strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".json") {
64 | // Add input files (e.g. 'pods.json', 'nodes.yaml', etc.)
65 | inputFiles = append(inputFiles, path.Join(config.OfflineDir, file.Name()))
66 | }
67 | }
68 | if len(inputFiles) != 7 {
69 | log.Errorf("parseLocalCluster: expected 7 input files, got %d\n", len(inputFiles))
70 | return nil, nil
71 | }
72 | clusterDb := clusterDbFromLocalFiles(inputFiles, config)
73 | return clusterDb, &metadata
74 | }
75 |
76 | // Creates a ClusterDb object from @inputFiles
77 | func clusterDbFromLocalFiles(inputFiles []string, config CollectConfig) *ClusterDb {
78 | var clusterDb ClusterDb
79 |
80 | // Prepare a scheme containing all the objects we need to decode
81 | scheme := returnScheme()
82 | if scheme == nil {
83 | return nil // err printed in returnScheme
84 | }
85 | decodeFunc := serializer.NewCodecFactory(scheme).UniversalDeserializer().Decode
86 |
87 | // Go over files
88 | for _, filePath := range inputFiles {
89 | // Read file
90 | inputBytes, err := utils.ReadFile(filePath)
91 | if err != nil {
92 | return nil
93 | }
94 | // Decode the file's contents into a *v1.List
95 | decodedBytes, _, err := decodeFunc(inputBytes, nil, nil)
96 | if err != nil {
97 | log.Errorf("clusterDbFromLocalFiles: error while decoding %s: %v\n", filePath, err)
98 | return nil
99 | }
100 | switch list := decodedBytes.(type) {
101 | case *v1.List:
102 | // Iterate list items and try to decode each into an expected input type
103 | for i, item := range list.Items {
104 | decodedObj, _, err := decodeFunc(item.Raw, nil, nil)
105 | if err != nil {
106 | log.Errorf("clusterDbFromLocalFiles: error while decoding %s items[%d].Raw: %v\n", filePath, i, err)
107 | return nil
108 | }
109 | switch item := decodedObj.(type) {
110 | case *v1.Pod:
111 | if config.Namespace != "" && item.ObjectMeta.Namespace != config.Namespace {
112 | continue // don't add pod if it's not in the ns the collection is scoped to
113 | }
114 | clusterDb.Pods = append(clusterDb.Pods, *item)
115 | case *v1.Node:
116 | if config.IgnoreControlPlane {
117 | for label := range item.ObjectMeta.Labels {
118 | if label == "node-role.kubernetes.io/master" || label == "node-role.kubernetes.io/control-plane" {
119 | continue // skip control plane nodes if asked to
120 | }
121 | }
122 | }
123 | clusterDb.Nodes = append(clusterDb.Nodes, *item)
124 | case *v1.ServiceAccount:
125 | if config.Namespace != "" && item.ObjectMeta.Namespace != config.Namespace {
126 | continue // don't add SA if it's not in the ns the collection is scoped to
127 | }
128 | clusterDb.ServiceAccounts = append(clusterDb.ServiceAccounts, *item)
129 | case *rbac.ClusterRole:
130 | clusterDb.ClusterRoles = append(clusterDb.ClusterRoles, *item)
131 | case *rbac.Role:
132 | clusterDb.Roles = append(clusterDb.Roles, *item)
133 | case *rbac.ClusterRoleBinding:
134 | clusterDb.ClusterRoleBindings = append(clusterDb.ClusterRoleBindings, *item)
135 | case *rbac.RoleBinding:
136 | clusterDb.RoleBindings = append(clusterDb.RoleBindings, *item)
137 | default:
138 | log.Errorf("clusterDbFromLocalFiles: unexpected type while decoding %s items[%d], got %s\n", filePath, i, reflect.TypeOf(decodedObj))
139 | return nil
140 | }
141 | }
142 | default:
143 | log.Errorf("clusterDbFromLocalFiles: unexpected type decoding %s, expected *v1.List, got %s\n", filePath, reflect.TypeOf(decodedBytes))
144 | return nil
145 | }
146 | }
147 | if config.IgnoreControlPlane {
148 | removePodsFromExcludedNodes(&clusterDb) // remove control plane pods if needed
149 | }
150 | return &clusterDb
151 | }
152 |
153 | // Returns a scheme describing the objects we want to decode
154 | func returnScheme() *runtime.Scheme {
155 | schemes := runtime.NewScheme()
156 | if err := v1.AddToScheme(schemes); err != nil {
157 | log.Errorf("returnScheme: failed to add the core v1 scheme with %v\n", err)
158 | return nil
159 | }
160 | if err := rbac.AddToScheme(schemes); err != nil {
161 | log.Errorf("returnScheme: failed to add the rbac v1 scheme with %v\n", err)
162 | return nil
163 | }
164 | return schemes
165 | }
166 |
--------------------------------------------------------------------------------
/pkg/collect/rbac_db.go:
--------------------------------------------------------------------------------
1 | package collect
2 |
3 | import (
4 | "strings"
5 |
6 | "github.com/PaloAltoNetworks/rbac-police/pkg/utils"
7 | v1 "k8s.io/api/core/v1"
8 | rbac "k8s.io/api/rbac/v1"
9 | )
10 |
11 | // buildRbacDb populates a RbacDb object from a ClusterDb according to config
12 | func buildRbacDb(cDb ClusterDb, collectConfig CollectConfig) *RbacDb {
13 | var rbacDb RbacDb
14 |
15 | for _, node := range cDb.Nodes {
16 | rbacDb.Nodes = append(rbacDb.Nodes, NodeEntry{Name: node.Name, ServiceAccounts: []string{}})
17 | }
18 |
19 | for _, sa := range cDb.ServiceAccounts {
20 | saEntry := ServiceAccountEntry{
21 | Name: sa.Name,
22 | Namespace: sa.Namespace,
23 | }
24 | // Add pods that are assigned the SA
25 | for _, pod := range cDb.Pods {
26 | if saEntry.Equals(pod.Spec.ServiceAccountName, pod.ObjectMeta.Namespace) {
27 | newNodeForSA := true
28 | for i := range saEntry.Nodes {
29 | if saEntry.Nodes[i].Name == pod.Spec.NodeName {
30 | saEntry.Nodes[i].Pods = append(saEntry.Nodes[i].Pods, pod.ObjectMeta.Name)
31 | newNodeForSA = false
32 | break
33 | }
34 | }
35 | if newNodeForSA {
36 | saEntry.Nodes = append(saEntry.Nodes, NodeToPods{Name: pod.Spec.NodeName, Pods: []string{pod.ObjectMeta.Name}})
37 | for i := range rbacDb.Nodes {
38 | if rbacDb.Nodes[i].Name == pod.Spec.NodeName {
39 | rbacDb.Nodes[i].ServiceAccounts = append(rbacDb.Nodes[i].ServiceAccounts, utils.FullName(saEntry.Namespace, saEntry.Name))
40 | break
41 | }
42 | }
43 | }
44 | }
45 | }
46 | // Add SA if it's assigned to a pod or if we're configured to always collect
47 | if saEntry.Nodes != nil || collectConfig.AllServiceAccounts {
48 | saEntry.ProviderIAM = getProviderIAM(sa)
49 | rbacDb.ServiceAccounts = append(rbacDb.ServiceAccounts, saEntry)
50 | }
51 | }
52 |
53 | populateRoleBindingsPermissions(&rbacDb, cDb, collectConfig)
54 | populateClusterRoleBindingsPermissions(&rbacDb, cDb, collectConfig)
55 |
56 | return &rbacDb
57 | }
58 |
59 | // Incorporates the permission granted by roleBindings into @rbacDb
60 | func populateRoleBindingsPermissions(rbacDb *RbacDb, cDb ClusterDb, collectConfig CollectConfig) {
61 | for _, rb := range cDb.RoleBindings {
62 | var roleEntry RoleEntry
63 | if rb.RoleRef.Kind == "ClusterRole" {
64 | roleEntry = findClusterRole(cDb.ClusterRoles, rb.RoleRef)
65 | } else if rb.RoleRef.Kind == "Role" {
66 | roleEntry = findRole(cDb.Roles, rb.RoleRef, rb.ObjectMeta.Namespace)
67 | }
68 | if roleEntry.Name == "" {
69 | continue // binded role doesn't exist
70 | }
71 |
72 | roleRef := RoleRef{ // short version of roleEntry for sa & nodes to point to
73 | Name: roleEntry.Name,
74 | Namespace: roleEntry.Namespace,
75 | EffectiveNamespace: rb.ObjectMeta.Namespace,
76 | }
77 | roleBindedToRelevantSubject := false
78 |
79 | // Check if rb grants role to a serviceAccount
80 | for i, sa := range rbacDb.ServiceAccounts {
81 | if isSAReferencedBySubjects(rb.Subjects, utils.FullName(sa.Namespace, sa.Name), rb.Namespace) {
82 | rbacDb.ServiceAccounts[i].Roles = append(rbacDb.ServiceAccounts[i].Roles, roleRef)
83 | roleBindedToRelevantSubject = true
84 | }
85 | }
86 | // Check if rb grants role to a node
87 | for i, node := range rbacDb.Nodes {
88 | if isNodeReferencedBySubjects(rb.Subjects, node.Name, collectConfig.NodeGroups, collectConfig.NodeUser) {
89 | rbacDb.Nodes[i].Roles = append(rbacDb.Nodes[i].Roles, roleRef)
90 | roleBindedToRelevantSubject = true
91 | }
92 | }
93 |
94 | // Check if rb grants role to a user or group
95 | for _, subject := range rb.Subjects {
96 | if subject.Kind == "User" {
97 | userAlreadyInDb := false
98 | roleBindedToRelevantSubject = true
99 | for i, user := range rbacDb.Users {
100 | if subject.Name == user.Name {
101 | rbacDb.Users[i].Roles = append(rbacDb.Users[i].Roles, roleRef)
102 | userAlreadyInDb = true
103 | break // found user, break
104 | }
105 | }
106 | if !userAlreadyInDb { // add user to RbacDb if encountered it for the first time
107 | rbacDb.Users = append(rbacDb.Users, NamedEntry{Name: subject.Name, Roles: []RoleRef{roleRef}})
108 | }
109 | } else if subject.Kind == "Group" {
110 | if subject.Name == "system:masters" {
111 | continue // ignore system:masters to reduce clutter
112 | }
113 | grpAlreadyInDb := false
114 | roleBindedToRelevantSubject = true
115 | for i, grp := range rbacDb.Groups {
116 | if subject.Name == grp.Name {
117 | rbacDb.Groups[i].Roles = append(rbacDb.Groups[i].Roles, roleRef)
118 | grpAlreadyInDb = true
119 | break // found group, break
120 | }
121 | }
122 | if !grpAlreadyInDb { // add grp to RbacDb if encountered it for the first time
123 | rbacDb.Groups = append(rbacDb.Groups, NamedEntry{Name: subject.Name, Roles: []RoleRef{roleRef}})
124 | }
125 | }
126 |
127 | }
128 | // Add role to rbacDb if it's granted to any SA or node
129 | if roleBindedToRelevantSubject {
130 | addRoleIfDoesntExists(rbacDb, roleEntry)
131 | }
132 | }
133 | }
134 |
135 | // Incorporates the permission granted by clusterRoleBindings into @rbacDb
136 | func populateClusterRoleBindingsPermissions(rbacDb *RbacDb, cDb ClusterDb, collectConfig CollectConfig) {
137 | for _, crb := range cDb.ClusterRoleBindings {
138 | clusterRoleEntry := findClusterRole(cDb.ClusterRoles, crb.RoleRef)
139 | if clusterRoleEntry.Name == "" {
140 | continue // binded clusterRole doesn't exist
141 | }
142 | clusterRoleRef := RoleRef{ // short version of roleEntry for sa & nodes to point to
143 | Name: clusterRoleEntry.Name,
144 | }
145 | roleBindedToRelevantSubject := false
146 |
147 | // Check if the crb grants the cr to a serviceAccount
148 | for i, sa := range rbacDb.ServiceAccounts {
149 | if isSAReferencedBySubjects(crb.Subjects, utils.FullName(sa.Namespace, sa.Name), "") {
150 | rbacDb.ServiceAccounts[i].Roles = append(rbacDb.ServiceAccounts[i].Roles, clusterRoleRef)
151 | roleBindedToRelevantSubject = true
152 | }
153 | }
154 | // Check if the crb grants the cr to a node
155 | for i, node := range rbacDb.Nodes {
156 | if isNodeReferencedBySubjects(crb.Subjects, node.Name, collectConfig.NodeGroups, collectConfig.NodeUser) {
157 | rbacDb.Nodes[i].Roles = append(rbacDb.Nodes[i].Roles, clusterRoleRef)
158 | roleBindedToRelevantSubject = true
159 | }
160 | }
161 |
162 | // Check if crb grants ClusterRole to a user or group
163 | for _, subject := range crb.Subjects {
164 | if subject.Kind == "User" {
165 | userAlreadyInDb := false
166 | roleBindedToRelevantSubject = true
167 | for i, user := range rbacDb.Users {
168 | if subject.Name == user.Name {
169 | rbacDb.Users[i].Roles = append(rbacDb.Users[i].Roles, clusterRoleRef)
170 | userAlreadyInDb = true
171 | break
172 | }
173 | }
174 | if !userAlreadyInDb {
175 | rbacDb.Users = append(rbacDb.Users, NamedEntry{Name: subject.Name, Roles: []RoleRef{clusterRoleRef}})
176 | }
177 | } else if subject.Kind == "Group" {
178 | if subject.Name == "system:masters" {
179 | continue // ignore system:masters to reduce clutter
180 | }
181 | grpAlreadyInDb := false
182 | roleBindedToRelevantSubject = true
183 | for i, grp := range rbacDb.Groups {
184 | if subject.Name == grp.Name {
185 | rbacDb.Groups[i].Roles = append(rbacDb.Groups[i].Roles, clusterRoleRef)
186 | grpAlreadyInDb = true
187 | break
188 | }
189 | }
190 | if !grpAlreadyInDb {
191 | rbacDb.Groups = append(rbacDb.Groups, NamedEntry{Name: subject.Name, Roles: []RoleRef{clusterRoleRef}})
192 | }
193 | }
194 | }
195 |
196 | // Add clusterRole to rbacDb if it's granted to any SA or node
197 | if roleBindedToRelevantSubject {
198 | addRoleIfDoesntExists(rbacDb, clusterRoleEntry)
199 | }
200 | }
201 | }
202 |
203 | // Checks whether the serviceAccount denoted by @fullname is refernced in @subjects
204 | func isSAReferencedBySubjects(subjects []rbac.Subject, saFullname string, rbNS string) bool {
205 | for _, subject := range subjects {
206 | if subject.Kind == "ServiceAccount" {
207 | if subject.Namespace == "" {
208 | subject.Namespace = rbNS
209 | }
210 | if saFullname == utils.FullName(subject.Namespace, subject.Name) {
211 | return true
212 | }
213 | } else if subject.Kind == "Group" {
214 | if subject.Name == "system:authenticated" {
215 | return true
216 | }
217 | if !strings.HasPrefix(subject.Name, "system:serviceaccounts") {
218 | return false // only handle sa groups
219 | }
220 | if subject.Name == "system:serviceaccounts" {
221 | return true
222 | }
223 | if subject.Name == "system:serviceaccounts:"+strings.Split(saFullname, ":")[0] {
224 | return true
225 | }
226 | }
227 | }
228 | return false
229 | }
230 |
231 | // Checks whether the node denoted by @nodeName is refernced in @subjects
232 | func isNodeReferencedBySubjects(subjects []rbac.Subject, nodeName string, nodeGroups []string, nodeUser string) bool {
233 | for _, subject := range subjects {
234 | if subject.Kind == "User" {
235 | if nodeUser != "" {
236 | if subject.Name == nodeUser {
237 | return true
238 | }
239 | } else {
240 | if subject.Name == "system:node:"+nodeName {
241 | return true
242 | }
243 | }
244 | } else if subject.Kind == "Group" {
245 | if subject.Name == "system:authenticated" {
246 | return true
247 | }
248 | for _, grp := range nodeGroups {
249 | if subject.Name == grp {
250 | return true
251 | }
252 | }
253 | }
254 | }
255 | return false
256 | }
257 |
258 | // Adds @role entry to @rbacDb if it's not already there
259 | func addRoleIfDoesntExists(rbacDb *RbacDb, roleEntry RoleEntry) {
260 | for _, role := range rbacDb.Roles {
261 | if roleEntry.Name == role.Name && roleEntry.Namespace == role.Namespace {
262 | return
263 | }
264 | }
265 | rbacDb.Roles = append(rbacDb.Roles, roleEntry)
266 | }
267 |
268 | // Identifies IAM roles granted to a @serviceAccount through annotaions,
269 | // Supports EKS and GKE annotations
270 | func getProviderIAM(serviceAccount v1.ServiceAccount) map[string]string {
271 | providerIAM := make(map[string]string)
272 | for key, value := range serviceAccount.ObjectMeta.Annotations {
273 | if key == "eks.amazonaws.com/role-arn" {
274 | providerIAM["aws"] = value
275 | } else if key == "iam.gke.io/gcp-service-account" {
276 | providerIAM["gcp"] = value
277 | }
278 | }
279 | return providerIAM
280 | }
281 |
282 | // Find clusterRole refrenced by @ref
283 | func findClusterRole(clusterRoles []rbac.ClusterRole, ref rbac.RoleRef) RoleEntry {
284 | var clusterRoleEntry RoleEntry
285 | for _, cr := range clusterRoles {
286 | if cr.Name == ref.Name {
287 | clusterRoleEntry.Name = cr.ObjectMeta.Name
288 | clusterRoleEntry.Rules = cr.Rules
289 | break
290 | }
291 | }
292 | return clusterRoleEntry
293 | }
294 |
295 | // Find role in @ns refrenced by @ref
296 | // Need @ns as ref.Namespace doesn't necessarily exist
297 | func findRole(roles []rbac.Role, ref rbac.RoleRef, ns string) RoleEntry {
298 | var roleEntry RoleEntry
299 | for _, role := range roles {
300 | if role.Name == ref.Name && role.Namespace == ns {
301 | roleEntry.Name = role.ObjectMeta.Name
302 | roleEntry.Namespace = ns
303 | roleEntry.Rules = role.Rules
304 | break
305 | }
306 | }
307 | return roleEntry
308 | }
309 |
--------------------------------------------------------------------------------
/pkg/collect/types.go:
--------------------------------------------------------------------------------
1 | package collect
2 |
3 | import (
4 | v1 "k8s.io/api/core/v1"
5 | rbac "k8s.io/api/rbac/v1"
6 | )
7 |
8 | // CollectConfig holds the options for Collect()
9 | type CollectConfig struct {
10 | AllServiceAccounts bool
11 | IgnoreControlPlane bool
12 | DiscoverProtections bool
13 | OfflineDir string
14 | NodeGroups []string
15 | NodeUser string
16 | Namespace string
17 | }
18 |
19 | // CollectResult is the output of Collect()
20 | // Includes the cluster metadata and the RBAC data (basically ClusterMetadata + RbacDb)
21 | type CollectResult struct {
22 | Metadata ClusterMetadata `json:"metadata"`
23 | ServiceAccounts []ServiceAccountEntry `json:"serviceAccounts"`
24 | Nodes []NodeEntry `json:"nodes"`
25 | Users []NamedEntry `json:"users"`
26 | Groups []NamedEntry `json:"groups"`
27 | Roles []RoleEntry `json:"roles"`
28 | }
29 |
30 | // ClusterDb holds cluster objects relevant to RBAC
31 | type ClusterDb struct {
32 | Pods []v1.Pod // TODO: only need name, namespace, serviceaccount, and node, not full object
33 | Nodes []v1.Node // TODO: only need name, not full object
34 | ServiceAccounts []v1.ServiceAccount // TODO: only need name, namespace, and annotations, not full object
35 | Roles []rbac.Role
36 | ClusterRoles []rbac.ClusterRole
37 | RoleBindings []rbac.RoleBinding
38 | ClusterRoleBindings []rbac.ClusterRoleBinding
39 | }
40 |
41 | // RbacDb is a database holding the RBAC permissions in the cluster
42 | type RbacDb struct {
43 | ServiceAccounts []ServiceAccountEntry
44 | Nodes []NodeEntry
45 | Users []NamedEntry
46 | Groups []NamedEntry
47 | Roles []RoleEntry
48 | }
49 |
50 | type ClusterMetadata struct {
51 | ClusterName string `json:"cluster"`
52 | Platform string `json:"platform"`
53 | Version ClusterVersion `json:"version"`
54 | Features []string `json:"features"`
55 | }
56 |
57 | type ClusterVersion struct {
58 | Major string `json:"major"`
59 | Minor string `json:"minor"`
60 | GitVersion string `json:"gitVersion"`
61 | }
62 |
63 | // ServiceAccountEntry holds the RBAC info of a serviceAccount
64 | type ServiceAccountEntry struct {
65 | Name string `json:"name"`
66 | Namespace string `json:"namespace"`
67 | Nodes []NodeToPods `json:"nodes,omitempty"`
68 | ProviderIAM map[string]string `json:"providerIAM,omitempty"`
69 | Roles []RoleRef `json:"roles"`
70 | }
71 |
72 | func (s *ServiceAccountEntry) Equals(name string, namespace string) bool {
73 | return s.Name == name && s.Namespace == namespace
74 | }
75 |
76 | // NodeEntry holds the RBAC info of a node
77 | type NodeEntry struct {
78 | Name string `json:"name"`
79 | Roles []RoleRef `json:"roles"`
80 | ServiceAccounts []string `json:"serviceAccounts"`
81 | }
82 |
83 | // NamedEntry marks an identity with roles denoted by only a name, like a user or a group
84 | type NamedEntry struct {
85 | Name string `json:"name"`
86 | Roles []RoleRef `json:"roles"`
87 | }
88 |
89 | // RoleEntry describes a Role or a ClusterRole
90 | type RoleEntry struct {
91 | Name string `json:"name"`
92 | Namespace string `json:"namespace,omitempty"`
93 | Rules []rbac.PolicyRule `json:"rules"`
94 | }
95 |
96 | // RoleRef denotes the outcome of a RoleBinding or a ClusterRoleBinding
97 | type RoleRef struct {
98 | Name string `json:"name"`
99 | Namespace string `json:"namespace,omitempty"`
100 | EffectiveNamespace string `json:"effectiveNamespace,omitempty"`
101 | }
102 |
103 | // NodeToPods list the pods on a node
104 | type NodeToPods struct {
105 | Name string `json:"name"`
106 | Pods []string `json:"pods"`
107 | }
108 |
--------------------------------------------------------------------------------
/pkg/eval/eval.go:
--------------------------------------------------------------------------------
1 | package eval
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "errors"
8 | "fmt"
9 | "github.com/open-policy-agent/opa/storage"
10 | "github.com/open-policy-agent/opa/storage/inmem"
11 | "strings"
12 |
13 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
14 | "github.com/PaloAltoNetworks/rbac-police/pkg/utils"
15 | "github.com/mitchellh/mapstructure"
16 | "github.com/open-policy-agent/opa/rego"
17 | "github.com/open-policy-agent/opa/topdown"
18 | log "github.com/sirupsen/logrus"
19 | )
20 |
21 | var (
22 | // Hash set of dirs to ignore when looking for
23 | // policy files under a path
24 | ignoredDirs = map[string]struct{}{
25 | "ignore": {},
26 | "utils": {},
27 | }
28 | severityMap = map[string]int{"Low": 1, "Medium": 2, "High": 3, "Critical": 4, "": 5}
29 | builtinsLibPath = "lib/utils/builtins.rego" // TODO: move out of eval.go / make configurable / go-bindata
30 | )
31 |
32 | // Evaluates RBAC permissions using Rego policies
33 | func Eval(policyPath string, collectResult collect.CollectResult, evalConfig EvalConfig) *PolicyResults {
34 | // Set debug mode
35 | if evalConfig.DebugMode {
36 | log.SetLevel(log.DebugLevel)
37 | }
38 |
39 | // Remove identities that we're not going to evaluate per the `--violations` flag
40 | removedUnneededIdentities(&collectResult, evalConfig)
41 |
42 | // Enforce evalConfig.OnlySasOnAllNodes
43 | if evalConfig.OnlySasOnAllNodes {
44 | filterOnlySasOnAllNodes(&collectResult)
45 | }
46 |
47 | // Enforce evalConfig.IgnoredNamespaces
48 | if len(evalConfig.IgnoredNamespaces) > 0 {
49 | ignoreNamespaces(&collectResult, evalConfig.IgnoredNamespaces)
50 | }
51 |
52 | // Since the above functions might have removed some identities, we could have dangling roles that are no longer referenced
53 | purgeDanglingRoles(&collectResult)
54 |
55 | // Decode input json
56 | var rbacJson interface{}
57 | rbacBytes, err := json.Marshal(collectResult)
58 | if err != nil {
59 | log.Errorf("Eval: failed to marshal CollectResult object with %v\n", err)
60 | return nil
61 | }
62 | d := json.NewDecoder(bytes.NewBuffer(rbacBytes))
63 | if err := d.Decode(&rbacJson); err != nil {
64 | log.Errorln("eval: failed to decode rbac json with", err)
65 | return nil
66 | }
67 |
68 | // Get the list of policies to evaluate
69 | policyFiles, err := getPolicyFiles(policyPath, ignoredDirs)
70 | if err != nil {
71 | return nil
72 | }
73 | if len(policyFiles) == 0 {
74 | log.Errorln("eval: couldn't find policy files with '.rego' suffix under", policyPath)
75 | return nil
76 | }
77 |
78 | // Prepare configuration for policies
79 | policyConfig := fmt.Sprintf(`{
80 | "config": {
81 | "evalSaViolations": %t,
82 | "evalNodeViolations": %t,
83 | "evalCombinedViolations": %t,
84 | "evalUserViolations": %t,
85 | "evalGroupViolations": %t
86 | }
87 | }`, evalConfig.SaViolations, evalConfig.NodeViolations, evalConfig.CombinedViolations, evalConfig.UserViolations, evalConfig.GroupViolations)
88 |
89 | // Run policies against input json
90 | var policyResults PolicyResults
91 | failedPolicies, errorsCounter, belowThresholdPolicies := 0, 0, 0
92 | for _, policyFile := range policyFiles {
93 | log.Debugf("eval: running policy %v...\n", policyFile)
94 | currPolicyResult, err := runPolicy(policyFile, rbacJson, policyConfig, evalConfig)
95 | if err != nil {
96 | switch err.(type) {
97 | default:
98 | errorsCounter += 1
99 | case *belowThresholdErr:
100 | belowThresholdPolicies += 1 // unused
101 | }
102 | continue
103 | }
104 | if currPolicyResult != nil {
105 | failedPolicies += 1
106 | policyResults.PolicyResults = append(policyResults.PolicyResults, *currPolicyResult)
107 | }
108 | }
109 |
110 | // Summarize
111 | policyResults.Summary = Summary{
112 | Evaluated: len(policyFiles),
113 | Failed: failedPolicies,
114 | Passed: len(policyFiles) - failedPolicies - errorsCounter,
115 | Errors: errorsCounter,
116 | }
117 |
118 | return &policyResults
119 | }
120 |
121 | // Runs a Rego policy on @rbacJson
122 | func runPolicy(policyFile string, rbacJson interface{}, policyConfig string, evalConfig EvalConfig) (*PolicyResult, error) {
123 | policyResult := PolicyResult{PolicyFile: policyFile}
124 |
125 | // Get policy description & severity
126 | desc := describePolicy(policyFile)
127 | if desc != nil {
128 | policyResult.Severity = desc.Severity
129 | policyResult.Description = desc.Description
130 | }
131 |
132 | // Don't evaluate if severity is under threshold
133 | if severityMap[policyResult.Severity] < severityMap[evalConfig.SeverityThreshold] {
134 | return nil, &belowThresholdErr{}
135 | }
136 |
137 | // Evaluate policy
138 | violations, err := evaluatePolicy(policyFile, rbacJson, policyConfig, evalConfig)
139 | if violations == nil || err != nil {
140 | return nil, err
141 | }
142 |
143 | policyResult.Violations = *violations
144 | return &policyResult, nil
145 | }
146 |
147 | // Get policy's description and severity
148 | func describePolicy(policyFile string) *DescribeRegoResult {
149 | // Prepare query
150 | var desc DescribeRegoResult
151 | describeQuery, err := rego.New(
152 | rego.Query("data.policy.describe[_]"),
153 | rego.Load([]string{policyFile, builtinsLibPath}, nil),
154 | ).PrepareForEval(context.Background())
155 | if err != nil {
156 | log.Debugf("describePolicy: error preparing query for %v with %v\n", policyFile, err)
157 | return nil
158 | }
159 |
160 | // Run describe query
161 | rs, err := describeQuery.Eval(context.Background())
162 | if err != nil {
163 | log.Debugf("describePolicy: failed to evaluate query for %v with %v\n", policyFile, err)
164 | return nil
165 | }
166 | if len(rs) == 0 || len(rs[0].Expressions) == 0 {
167 | return nil // no results
168 | }
169 | log.Debugf("describePolicy: results for %v:\n", policyFile)
170 | logResults(rs)
171 |
172 | err = mapstructure.Decode(rs[0].Expressions[0].Value, &desc)
173 | if err != nil {
174 | log.Debugf("describePolicy: failed to decode results for %v with %v\n", policyFile, err)
175 | return nil
176 | }
177 | return &desc
178 | }
179 |
180 | // Evaluate policy on @input, return violations
181 | func evaluatePolicy(policyFile string, input interface{}, policyConfig string, evalConfig EvalConfig) (*Violations, error) {
182 | var (
183 | foundViolations = false
184 | violations Violations
185 | queryStr string
186 | regoFiles = []string{policyFile, builtinsLibPath}
187 | ctx = context.Background()
188 | )
189 |
190 | // Read policy file
191 | policyBytes, err := utils.ReadFile(policyFile)
192 | if err != nil {
193 | return nil, err
194 | }
195 | policy := string(policyBytes)
196 |
197 | // Wrap policy if needed
198 | if policyNeedsWrapping(policy) {
199 | regoFiles = append([]string{wrapperFile}, regoFiles...)
200 | queryStr = "data.wrapper.main[_]"
201 | } else {
202 | queryStr = "data.policy.main[_]"
203 | }
204 |
205 | // Manually create storage in-memory, write policyConfig into it, and set up a writable transaction for Load()
206 | store := inmem.NewFromReader(bytes.NewBufferString(policyConfig))
207 | txn, err := store.NewTransaction(ctx, storage.WriteParams)
208 | if err != nil {
209 | log.Errorf("evaluatePolicy: error preparing transaction for %v with %v\n", policyFile, err)
210 | return nil, err
211 | }
212 |
213 | // Prepare query
214 | var policyStdoutBuf bytes.Buffer // collect debug output
215 | query, err := rego.New(
216 | rego.Query(queryStr),
217 | rego.Store(store),
218 | rego.Transaction(txn),
219 | rego.Load(regoFiles, nil),
220 | rego.EnablePrintStatements(true),
221 | rego.PrintHook(topdown.NewPrintHook(&policyStdoutBuf)),
222 | ).PrepareForEval(ctx)
223 | if err != nil {
224 | log.Errorf("evaluatePolicy: error preparing query for %v with %v\n", policyFile, err)
225 | return nil, err
226 | }
227 |
228 | // Evaluate policy over input
229 | rs, err := query.Eval(ctx, rego.EvalInput(input))
230 | if policyStdoutBuf.Len() > 0 {
231 | log.Debugln("evaluatePolicy: output from", policyFile)
232 | log.Debugf(policyStdoutBuf.String())
233 | }
234 | if err != nil {
235 | log.Errorf("evaluatePolicy: failed to evaluate query for %v with %v\n", policyFile, err)
236 | return nil, err
237 | }
238 | if len(rs) == 0 { // no results
239 | log.Debugln("evaluatePolicy: no results for", policyFile)
240 | return nil, err
241 | }
242 | log.Debugf("evaluatePolicy: results for %v:\n", policyFile)
243 | logResults(rs)
244 |
245 | // Parse results for violations
246 | for _, result := range rs {
247 | var (
248 | tmpInterface interface{}
249 | currViolations EvalRegoResult
250 | )
251 | // Our query contains one expression, main[_], so we only assess the first (and only) expression in the result
252 | tmpInterface, ok := result.Expressions[0].Value.(map[string]interface{})["violations"]
253 | if !ok {
254 | log.Errorln("evaluatePolicy: failed to get violation from", policyFile)
255 | return nil, errors.New("evaluatePolicy: failed to get violation from policy")
256 | }
257 | err = mapstructure.Decode(tmpInterface, &currViolations)
258 | if err != nil {
259 | log.Errorf("evaluatePolicy: failed to decode violation from %v with %v\n", policyFile, err)
260 | return nil, err
261 | }
262 | // Default policies only return 1 violation type per result,
263 | // and only 1 result for each violation type, but in case
264 | // custom ones don't follow this behaviour, we append instead of assign
265 | if currViolations.ServiceAccounts != nil && evalConfig.SaViolations {
266 | violations.ServiceAccounts = append(violations.ServiceAccounts, currViolations.ServiceAccounts...)
267 | foundViolations = true
268 | }
269 | if currViolations.Nodes != nil && evalConfig.NodeViolations {
270 | violations.Nodes = append(violations.Nodes, currViolations.Nodes...)
271 | foundViolations = true
272 | }
273 | if currViolations.Combined != nil && evalConfig.CombinedViolations {
274 | violations.Combined = append(violations.Combined, currViolations.Combined...)
275 | foundViolations = true
276 | }
277 | if currViolations.Users != nil && evalConfig.UserViolations {
278 | violations.Users = append(violations.Users, currViolations.Users...)
279 | foundViolations = true
280 | }
281 | if currViolations.Groups != nil && evalConfig.GroupViolations {
282 | violations.Groups = append(violations.Groups, currViolations.Groups...)
283 | foundViolations = true
284 | }
285 | }
286 | if !foundViolations {
287 | return nil, nil
288 | }
289 | return &violations, nil
290 | }
291 |
292 | // Remove identities that aren't going to be evaluated based on evalConfig
293 | func removedUnneededIdentities(collectResult *collect.CollectResult, evalConfig EvalConfig) {
294 | if !evalConfig.CombinedViolations {
295 | if !evalConfig.SaViolations {
296 | collectResult.ServiceAccounts = []collect.ServiceAccountEntry{}
297 | }
298 | if !evalConfig.NodeViolations {
299 | collectResult.Nodes = []collect.NodeEntry{}
300 | }
301 | }
302 | if !evalConfig.UserViolations {
303 | collectResult.Users = []collect.NamedEntry{}
304 | }
305 | if !evalConfig.GroupViolations {
306 | collectResult.Users = []collect.NamedEntry{}
307 | }
308 | }
309 |
310 | // Filter out serviceAccounts that aren't on all nodes
311 | // from @collectResult
312 | func filterOnlySasOnAllNodes(collectResult *collect.CollectResult) {
313 | var sasOnAllNodes []collect.ServiceAccountEntry
314 | nodeCount := len(collectResult.Nodes)
315 |
316 | for _, saEntry := range collectResult.ServiceAccounts {
317 | // Check if SA is on all nodes
318 | saNodeCount := len(saEntry.Nodes)
319 | if saNodeCount >= nodeCount {
320 | for _, node := range saEntry.Nodes {
321 | if node.Name == "" {
322 | // Ignore the empty node, which holds info on unscheduled pods
323 | saNodeCount -= 1
324 | break
325 | }
326 | }
327 | if saNodeCount >= nodeCount {
328 | sasOnAllNodes = append(sasOnAllNodes, saEntry)
329 | }
330 | }
331 | }
332 | collectResult.ServiceAccounts = sasOnAllNodes
333 | }
334 |
335 | // Filter out serviceAccounts in @ignoredNamespaces from @collectResult
336 | func ignoreNamespaces(collectResult *collect.CollectResult, ignoredNamespaces []string) {
337 | var sasRelevantNamespaces []collect.ServiceAccountEntry
338 |
339 | // Remove serviceAccounts in ignored namespaces
340 | for _, saEntry := range collectResult.ServiceAccounts {
341 | ignoreSa := false
342 | for _, ignoredNs := range ignoredNamespaces {
343 | if saEntry.Namespace == ignoredNs {
344 | ignoreSa = true
345 | break
346 | }
347 | }
348 | if !ignoreSa {
349 | sasRelevantNamespaces = append(sasRelevantNamespaces, saEntry)
350 | }
351 | }
352 | collectResult.ServiceAccounts = sasRelevantNamespaces
353 |
354 | // Remove serviceAccounts in ignored namespaces from nodes
355 | for _, nodeEntry := range collectResult.Nodes {
356 | var relevantSasOnNode []string
357 | for _, saFullname := range nodeEntry.ServiceAccounts {
358 | ignoreSa := false
359 | for _, ignoredNs := range ignoredNamespaces {
360 | if strings.HasPrefix(saFullname, ignoredNs+":") {
361 | ignoreSa = true
362 | }
363 | }
364 | if !ignoreSa {
365 | relevantSasOnNode = append(relevantSasOnNode, saFullname)
366 | }
367 | }
368 | nodeEntry.ServiceAccounts = relevantSasOnNode
369 | }
370 | }
371 |
372 | // Based on filters applied to collectResult, the identities that originally referenced certain roles
373 | // may have been removed. Purge unreferenced roles to improve policy perf.
374 | func purgeDanglingRoles(collectResult *collect.CollectResult) {
375 | var referencedRoles []collect.RoleEntry
376 | for _, role := range collectResult.Roles {
377 | if roleReferencedByAnIdentity(role, collectResult) {
378 | referencedRoles = append(referencedRoles, role)
379 | }
380 | }
381 | if len(referencedRoles) < len(collectResult.Roles) {
382 | collectResult.Roles = referencedRoles
383 | }
384 | }
385 |
386 | // Returns whether an identity in @collectResult references the @checkedRole
387 | func roleReferencedByAnIdentity(checkedRole collect.RoleEntry, collectResult *collect.CollectResult) bool {
388 | for _, sa := range collectResult.ServiceAccounts {
389 | for _, roleRef := range sa.Roles {
390 | if checkedRole.Name == roleRef.Name && checkedRole.Namespace == roleRef.Namespace {
391 | return true
392 | }
393 | }
394 | }
395 | for _, node := range collectResult.Nodes {
396 | for _, roleRef := range node.Roles {
397 | if checkedRole.Name == roleRef.Name && checkedRole.Namespace == roleRef.Namespace {
398 | return true
399 | }
400 | }
401 | }
402 | for _, user := range collectResult.Users {
403 | for _, roleRef := range user.Roles {
404 | if checkedRole.Name == roleRef.Name && checkedRole.Namespace == roleRef.Namespace {
405 | return true
406 | }
407 | }
408 | }
409 | for _, grp := range collectResult.Groups {
410 | for _, roleRef := range grp.Roles {
411 | if checkedRole.Name == roleRef.Name && checkedRole.Namespace == roleRef.Namespace {
412 | return true
413 | }
414 | }
415 | }
416 | return false
417 | }
418 |
419 | // Returns a shortened version of @policyResults
420 | func AbbreviateResults(policyResults *PolicyResults) AbbreviatedPolicyResults {
421 | abbreviatedPolicyResults := AbbreviatedPolicyResults{
422 | Summary: policyResults.Summary,
423 | }
424 | for _, policyResult := range policyResults.PolicyResults {
425 | currAbbreviatedPolicyResult := AbbreviatedPolicyResult{
426 | PolicyFile: policyResult.PolicyFile,
427 | Description: policyResult.Description,
428 | Severity: policyResult.Severity,
429 | }
430 | currAbbreviatedPolicyResult.Violations.Nodes = policyResult.Violations.Nodes
431 | currAbbreviatedPolicyResult.Violations.Combined = policyResult.Violations.Combined
432 | currAbbreviatedPolicyResult.Violations.Users = policyResult.Violations.Users
433 | currAbbreviatedPolicyResult.Violations.Groups = policyResult.Violations.Groups
434 |
435 | // Shorten service account violations
436 | for _, saViolation := range policyResult.Violations.ServiceAccounts {
437 | saFullName := utils.FullName(saViolation.Namespace, saViolation.Name)
438 | currAbbreviatedPolicyResult.Violations.ServiceAccounts = append(currAbbreviatedPolicyResult.Violations.ServiceAccounts, saFullName)
439 | }
440 |
441 | abbreviatedPolicyResults.PolicyResults = append(abbreviatedPolicyResults.PolicyResults, currAbbreviatedPolicyResult)
442 | }
443 |
444 | return abbreviatedPolicyResults
445 | }
446 |
--------------------------------------------------------------------------------
/pkg/eval/types.go:
--------------------------------------------------------------------------------
1 | package eval
2 |
3 | // Configuration for Expand()
4 | type EvalConfig struct {
5 | SeverityThreshold string
6 | OnlySasOnAllNodes bool
7 | IgnoredNamespaces []string
8 | DebugMode bool
9 | SaViolations bool
10 | NodeViolations bool
11 | CombinedViolations bool
12 | UserViolations bool
13 | GroupViolations bool
14 | }
15 |
16 | // Evalaution results for policies
17 | type PolicyResults struct {
18 | PolicyResults []PolicyResult `json:"policyResults"`
19 | Summary Summary `json:"summary"`
20 | }
21 |
22 | // Abbreviated results for policies
23 | type AbbreviatedPolicyResults struct {
24 | PolicyResults []AbbreviatedPolicyResult `json:"policyResults"`
25 | Summary Summary `json:"summary"`
26 | }
27 |
28 | // Result of policy evaluation
29 | type PolicyResult struct {
30 | PolicyFile string `json:"policy"`
31 | Severity string `json:"severity,omitempty"`
32 | Description string `json:"description,omitempty"`
33 | Violations Violations `json:"violations"`
34 | }
35 |
36 | // Result of policy evaluation, abbreviated
37 | type AbbreviatedPolicyResult struct {
38 | PolicyFile string `json:"policy"`
39 | Severity string `json:"severity,omitempty"`
40 | Description string `json:"description,omitempty"`
41 | Violations AbbreviatedViolations `json:"violations,omitempty"`
42 | }
43 |
44 | // Summary of results from all evaluated policies
45 | type Summary struct {
46 | Failed int `json:"failed"`
47 | Passed int `json:"passed"`
48 | Errors int `json:"errors"`
49 | Evaluated int `json:"evaluated"`
50 | }
51 |
52 | // Policy violations
53 | type Violations struct {
54 | ServiceAccounts []ServiceAccountViolation `json:"serviceAccounts,omitempty" mapstructure:"serviceAccounts"`
55 | Nodes []string `json:"nodes,omitempty"`
56 | Combined []CombinedViolation `json:"combined,omitempty"`
57 | Users []string `json:"users,omitempty"`
58 | Groups []string `json:"groups,omitempty"`
59 | }
60 |
61 | // Policy violations, abbreviated
62 | type AbbreviatedViolations struct {
63 | ServiceAccounts []string `json:"serviceAccounts,omitempty" mapstructure:"serviceAccounts"`
64 | Nodes []string `json:"nodes,omitempty"`
65 | Combined []CombinedViolation `json:"combined,omitempty"`
66 | Users []string `json:"users,omitempty"`
67 | Groups []string `json:"groups,omitempty"`
68 | }
69 |
70 | // Violation from a serviceAccount
71 | type ServiceAccountViolation struct {
72 | Name string `json:"name"`
73 | Namespace string `json:"namespace"`
74 | Nodes []map[string][]string `json:"nodes,omitempty"`
75 | ProviderIAM map[string]string `json:"providerIAM,omitempty" mapstructure:"providerIAM"`
76 | }
77 |
78 | // Violation from a node and its hosted serviceAccount
79 | type CombinedViolation struct {
80 | Node string `json:"node,omitempty"`
81 | ServiceAccounts []string `json:"serviceAccounts,omitempty" mapstructure:"serviceAccounts"`
82 | }
83 |
84 | // Output from the describe Rego rule
85 | type DescribeRegoResult struct {
86 | Severity string `json:"severity,omitempty"`
87 | Description string `json:"desc,omitempty" mapstructure:"desc"`
88 | }
89 |
90 | // Output from the main Rego rule
91 | type EvalRegoResult struct {
92 | ServiceAccounts []ServiceAccountViolation `json:"serviceAccounts,omitempty" mapstructure:"serviceAccounts"`
93 | Nodes []string `json:"nodes,omitempty"`
94 | Combined []CombinedViolation `json:"combined,omitempty"`
95 | Users []string `json:"users,omitempty"`
96 | Groups []string `json:"groups,omitempty"`
97 | }
98 |
99 | // Below severity threshold error
100 | type belowThresholdErr struct{}
101 |
102 | func (m *belowThresholdErr) Error() string {
103 | return "policy's severity is below the severity threshold"
104 | }
105 |
--------------------------------------------------------------------------------
/pkg/eval/utils.go:
--------------------------------------------------------------------------------
1 | package eval
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | "strings"
7 |
8 | "github.com/open-policy-agent/opa/rego"
9 | log "github.com/sirupsen/logrus"
10 | )
11 |
12 | // Get policy files with a .rego suffix under @path, ignoring directories in @ignoredDirs
13 | func getPolicyFiles(path string, ignoredDirs map[string]struct{}) ([]string, error) {
14 | fileInfo, err := os.Stat(path)
15 | if err != nil {
16 | log.Errorf("getPolicyFiles: failed to stat(%v) with %v\n", path, err)
17 | return nil, err
18 | }
19 |
20 | switch mode := fileInfo.Mode(); {
21 | case mode.IsRegular():
22 | return []string{path}, nil
23 | case mode.IsDir():
24 | var policyFiles []string
25 | err := filepath.Walk(path+"/", // if main path is symlink, make Walk follow it
26 | func(path string, info os.FileInfo, err error) error {
27 | if err != nil {
28 | return err
29 | }
30 | if info.Mode().IsRegular() && strings.HasSuffix(path, ".rego") {
31 | policyFiles = append(policyFiles, path)
32 | }
33 | if info.IsDir() {
34 | if _, ok := ignoredDirs[info.Name()]; ok {
35 | return filepath.SkipDir
36 | }
37 | }
38 | return nil
39 | })
40 | if err != nil {
41 | log.Errorf("getPolicyFiles: failed to walk '%v' with %v\n", path, err)
42 | return nil, err
43 | }
44 | return policyFiles, nil
45 | }
46 | return nil, nil
47 | }
48 |
49 | // Prints results given log level is set to debug
50 | func logResults(rs rego.ResultSet) {
51 | if len(rs) == 0 {
52 | return
53 | }
54 | for i, result := range rs {
55 | if len(result.Expressions) > 0 {
56 | log.Debugf("[+] rs[%v].Expressions:\n", i)
57 | for _, expression := range result.Expressions {
58 | log.Debugln(expression.Value)
59 | }
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/pkg/eval/wrapper.go:
--------------------------------------------------------------------------------
1 | package eval
2 |
3 | import (
4 | "regexp"
5 | )
6 |
7 | const (
8 | wrapperFile = "lib/utils/wrapper.rego" // TODO: move elsewhere / make configurable / go-bindata
9 | )
10 |
11 | var (
12 | wrappedPattern = `(?m)^\s*main\s*\[\s*\{.*\}\s*\].*$`
13 | )
14 |
15 | // Checks if policy needs wrapping (doesn't define main rule)
16 | func policyNeedsWrapping(policy string) bool {
17 | isWrapped, _ := regexp.MatchString(wrappedPattern, policy)
18 | return !isWrapped // needs wrapping
19 | }
20 |
--------------------------------------------------------------------------------
/pkg/expand/expand.go:
--------------------------------------------------------------------------------
1 | package expand
2 |
3 | import (
4 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
5 | )
6 |
7 | // Expands roleRefs in collectResult so that each serviceAccount or
8 | // node enty directly lists its permissions. For a more readble output
9 | func Expand(collectResult collect.CollectResult) *ExpandResult {
10 | expandResult := ExpandResult{
11 | Metadata: collectResult.Metadata,
12 | }
13 |
14 | // Add serviceaccounts
15 | for _, serviceAccount := range collectResult.ServiceAccounts {
16 | expandedSA := ExpandedServiceAccount{
17 | Name: serviceAccount.Name,
18 | Namespace: serviceAccount.Namespace,
19 | Nodes: serviceAccount.Nodes,
20 | ProviderIAM: serviceAccount.ProviderIAM,
21 | }
22 | expandedSA.Roles = expandRoleRefs(serviceAccount.Roles, collectResult.Roles)
23 | expandResult.ServiceAccounts = append(expandResult.ServiceAccounts, expandedSA)
24 | }
25 |
26 | // Add nodes
27 | for _, node := range collectResult.Nodes {
28 | expandedNode := ExpandedNode{
29 | Name: node.Name,
30 | ServiceAccounts: node.ServiceAccounts,
31 | }
32 | expandedNode.Roles = expandRoleRefs(node.Roles, collectResult.Roles)
33 | expandResult.Nodes = append(expandResult.Nodes, expandedNode)
34 | }
35 |
36 | // Add users
37 | for _, user := range collectResult.Users {
38 | expandedUser := ExpandedNamedEntry{
39 | Name: user.Name,
40 | Roles: expandRoleRefs(user.Roles, collectResult.Roles),
41 | }
42 | expandResult.Users = append(expandResult.Users, expandedUser)
43 | }
44 |
45 | // Add groups
46 | for _, group := range collectResult.Groups {
47 | expandedGroup := ExpandedNamedEntry{
48 | Name: group.Name,
49 | Roles: expandRoleRefs(group.Roles, collectResult.Roles),
50 | }
51 | expandResult.Groups = append(expandResult.Groups, expandedGroup)
52 | }
53 |
54 | return &expandResult
55 | }
56 |
57 | // Exapnds @rolesRefs to their full roles from @roleObjs
58 | func expandRoleRefs(roleRefs []collect.RoleRef, roleObjs []collect.RoleEntry) []ExpandedRole {
59 | var expandedRoles []ExpandedRole
60 | for _, roleRef := range roleRefs {
61 | expandedRole := ExpandedRole{
62 | Name: roleRef.Name,
63 | EffectiveNamespace: roleRef.EffectiveNamespace,
64 | }
65 | for _, roleObj := range roleObjs {
66 | if roleObj.Name == roleRef.Name && roleObj.Namespace == roleRef.Namespace {
67 | expandedRole.Rules = roleObj.Rules
68 | break
69 | }
70 | }
71 | expandedRoles = append(expandedRoles, expandedRole)
72 | }
73 | return expandedRoles
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/expand/types.go:
--------------------------------------------------------------------------------
1 | package expand
2 |
3 | import (
4 | "github.com/PaloAltoNetworks/rbac-police/pkg/collect"
5 | rbac "k8s.io/api/rbac/v1"
6 | )
7 |
8 | // Expanded RBAC permissions in a cluster
9 | // Result of Expand()
10 | type ExpandResult struct {
11 | Metadata collect.ClusterMetadata `json:"metadata"`
12 | ServiceAccounts []ExpandedServiceAccount `json:"serviceAccounts"`
13 | Nodes []ExpandedNode `json:"nodes"`
14 | Users []ExpandedNamedEntry `json:"users"`
15 | Groups []ExpandedNamedEntry `json:"groups"`
16 | }
17 |
18 | // RBAC permissions of a serviceAccount
19 | type ExpandedServiceAccount struct {
20 | Name string `json:"name"`
21 | Namespace string `json:"namespace"`
22 | Nodes []collect.NodeToPods `json:"nodes"`
23 | ProviderIAM map[string]string `json:"providerIAM,omitempty"`
24 | Roles []ExpandedRole `json:"roles"`
25 | }
26 |
27 | // RBAC permissions of a node
28 | type ExpandedNode struct {
29 | Name string `json:"name"`
30 | Roles []ExpandedRole `json:"roles"`
31 | ServiceAccounts []string `json:"serviceAccounts"`
32 | }
33 |
34 | // RBAC permissions of an identity denoted by name, like a user or a group
35 | type ExpandedNamedEntry struct {
36 | Name string `json:"name"`
37 | Roles []ExpandedRole `json:"roles"`
38 | }
39 |
40 | // A role granted in @EffectiveNamespace
41 | type ExpandedRole struct {
42 | Name string `json:"name"`
43 | EffectiveNamespace string `json:"effectiveNamespace,omitempty"`
44 | Rules []rbac.PolicyRule `json:"rules"`
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/utils/utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "os"
5 |
6 | log "github.com/sirupsen/logrus"
7 | )
8 |
9 | // Reads file a @path
10 | func ReadFile(path string) ([]byte, error) {
11 | bytes, err := os.ReadFile(path)
12 | if err != nil {
13 | log.Errorf("ReadFile: failed reading '%v' with: %v\n", path, err)
14 | return nil, err
15 | }
16 | return bytes, nil
17 | }
18 |
19 | // Get the full name of a namespaced k8s object
20 | func FullName(namespace string, name string) string {
21 | return namespace + ":" + name
22 | }
23 |
--------------------------------------------------------------------------------
/utils/generate_policylib_docs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | from sys import argv
4 | import regex
5 |
6 | POLICY_DIR = "lib"
7 | EXCLUDED_DIRS = ["ignore", "utils"]
8 |
9 | # Prints documentation for the policies in POLICY_DIR
10 | def main():
11 | # If needed, chdir to rbac-police's root directory
12 | cwd = os.getcwd()
13 | if cwd.endswith("utils") and not os.path.isdir(POLICY_DIR):
14 | os.chdir("..")
15 |
16 | docs = "## Policy Library\n"
17 | policy_paths = []
18 |
19 | # Get paths to all policies
20 | for root, dirs, files in os.walk(POLICY_DIR, topdown=True):
21 | dirs[:] = [d for d in dirs if d not in EXCLUDED_DIRS]
22 | for file_name in files:
23 | if file_name.endswith(".rego"):
24 | policy_paths.append(os.path.join(root, file_name))
25 |
26 | # Generate documentation for each policy
27 | for policy_path in sorted(policy_paths):
28 | docs += generate_doc(policy_path)
29 |
30 | # Output results
31 | print(docs)
32 |
33 | """
34 | Returns the description, severity and violation types of the
35 | policy at @policy_path, in the following markdown format:
36 |
37 | ### [](../lib/.rego)
38 | - Description: ``
39 | - Severity: ``
40 | - Violation types: ``
41 | """
42 | def generate_doc(policy_path):
43 | policy_name = os.path.basename(policy_path)[:-5] # remove ".rego"
44 | policy_path_from_docs_dir = "../lib/" + policy_name
45 | doc = f"### [{policy_name}]({policy_path_from_docs_dir}.rego)\n"
46 |
47 | violation_types = []
48 | description, severity = "", ""
49 | with open(policy_path, "r") as policy_file:
50 | for line in policy_file.readlines():
51 | if "targets" in line:
52 | if defined_in_rego_set(line, "targets", "serviceAccounts"):
53 | violation_types.append("serviceAccounts")
54 | if defined_in_rego_set(line, "targets", "nodes"):
55 | violation_types.append("nodes")
56 | if defined_in_rego_set(line, "targets", "combined"):
57 | violation_types.append("combined")
58 | if defined_in_rego_set(line, "targets", "users"):
59 | violation_types.append("users")
60 | if defined_in_rego_set(line, "targets", "groups"):
61 | violation_types.append("groups")
62 | elif defined_in_rego_line(line, "desc"):
63 | if "concat(\", \"" in line:
64 | description = "".join(line.split("\"")[1:-3])
65 | description = description.replace("namespaces (%v)", "namespaces")
66 | else:
67 | description = "".join(line.split("\"")[1:-1])
68 | elif defined_in_rego_line(line, "severity"):
69 | severity = "".join(line.split("\"")[1:-1])
70 |
71 | if len(violation_types) == 0 and policy_name == "providerIAM":
72 | violation_types.append("serviceAccounts")
73 |
74 | doc += f"- Description: `{description}`\n"
75 | doc += f"- Severity: `{severity}`\n"
76 | doc += f"- Violation types: `{', '.join(violation_types)}`\n"
77 | return doc
78 |
79 | # Returns True if @variable is defined in @line
80 | def defined_in_rego_line(line, variable):
81 | return regex.match(f"\s*{variable}\s*:?=", line) != None
82 |
83 | # Returns True if @element is part of a set named @set_name defined in @line
84 | def defined_in_rego_set(line, set_name, element):
85 | return regex.match(f'\s*{set_name}\s*:?=\s*\{{.*"{element}".*\}}', line) != None
86 |
87 | if __name__ == "__main__":
88 | main()
89 |
--------------------------------------------------------------------------------
/utils/get_cluster_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # Generate dir name as "local___"
5 | curr_context=$(kubectl config current-context)
6 | curr_context="${curr_context//[[:space:]]/}" # no whitespace
7 | suffix="local_${curr_context:0:15}_$(date +%Y%b%d)" # truncate curr_context at 15 chars
8 | # Find a non-existing dir name
9 | dir="${suffix}_$(LC_ALL=C tr -dc A-Za-z0-9 "$dir/pods.json"
18 | kubectl get nodes -A -o json > "$dir/nodes.json"
19 | kubectl get serviceaccounts -A -o json > "$dir/serviceaccounts.json"
20 | kubectl get roles -A -o json > "$dir/roles.json"
21 | kubectl get rolebindings -A -o json > "$dir/rolebindings.json"
22 | kubectl get clusterroles -o json > "$dir/clusterroles.json"
23 | kubectl get clusterrolebindings -o json > "$dir/clusterrolebindings.json"
24 | # Optional:
25 | kubectl config view -o jsonpath='{.contexts[?(@.name == "'"${curr_context}"'")].context.cluster}' > "$dir/cluster_name"
26 | kubectl get --raw /version > "$dir/version.json"
27 |
28 | echo "[+] Cluster data at $dir"
29 |
30 |
--------------------------------------------------------------------------------
/utils/update_policy_doc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Updates the policy library doc at policies.md
3 | set -e
4 |
5 | policy_doc_file="docs/policies.md"
6 | generate_policy_doc_script="utils/generate_policylib_docs.py"
7 | if [ ! -f "$policy_doc_file" ]; then
8 | # Not in root dir, try from utils dir
9 | policy_doc_file="../docs/policies.md"
10 | generate_policy_doc_script="./generate_policylib_docs.py"
11 | if [ ! -f "$policy_doc_file" ]; then
12 | echo "[!] Please run this script from rbac-police's root directory"
13 | exit 1
14 | fi
15 | fi
16 |
17 | policy_lib_line="## Policy Library"
18 |
19 | # Get policy doc without the policy library part
20 | policy_doc_without_lib=$(grep -B 99999 "$policy_lib_line" $policy_doc_file | grep -v "$policy_lib_line")
21 | # Generate updated policy library
22 | new_policy_lib=$($generate_policy_doc_script)
23 |
24 | # Rebuild policies.md with updated policy library
25 | echo "$policy_doc_without_lib" > $policy_doc_file
26 | echo >> $policy_doc_file
27 | echo "$new_policy_lib" >> $policy_doc_file
28 |
29 |
--------------------------------------------------------------------------------
/utils/update_policy_to_use_targets.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Updates a policy to use the new 'targets' set introduced in v1.1.0 instead of the old 'checkXXX' variables
3 | # Usage: update_policy_to_use_targets.py "
4 | from sys import argv
5 | import regex
6 |
7 | def main(policy_path, output_path):
8 | updated_policy = []
9 | with open(policy_path, "r") as policy_file:
10 | targets = []
11 | line_to_insert_targets = -1
12 |
13 | # Iterate policy file
14 | for i, line in enumerate(policy_file):
15 | # Exit if policy is already in the new format
16 | if defined_as_rego_set(line, "targets"):
17 | print(f"[+] Policy '{policy_path}' already defines a 'targets' set, it's in the new format")
18 | return
19 | # Add serviceAccounts to targets if checkServiceAccounts is defined
20 | elif defined_as_rego_true(line, "checkServiceAccounts"):
21 | if line_to_insert_targets < 0:
22 | line_to_insert_targets = i
23 | targets.append("serviceAccounts")
24 | # Add nodes to targets if checkNodes is defined
25 | elif defined_as_rego_true(line, "checkNodes"):
26 | if line_to_insert_targets < 0:
27 | line_to_insert_targets = i
28 | targets.append("nodes")
29 | # Add combined to targets if checkCombined is defined
30 | elif defined_as_rego_true(line, "checkCombined"):
31 | if line_to_insert_targets < 0:
32 | line_to_insert_targets = i
33 | targets.append("combined")
34 | # Add all others lines to new policy
35 | else:
36 | updated_policy.append(line)
37 | # If couldn't find any checkXXX variable, exit
38 | if line_to_insert_targets == -1:
39 | print(f"[!] Policy '{policy_path}' doesn't seem like a wrapped rbac-police policy as it doesn't define a 'checkServiceAccounts', 'checkNodes' or 'checkCombined' variable")
40 | return
41 | # Inject the 'targets' set into the new policy
42 | targets_str = 'targets := {"' + '", "'.join(targets) + '"}\n'
43 | updated_policy.insert(line_to_insert_targets, targets_str)
44 |
45 | # Write updated policy to output_path
46 | with open(output_path, "w") as output_file:
47 | output_file.write("".join(updated_policy))
48 | print(f"[+] Done, new policy at {output_path}")
49 |
50 | # Returns true if @set_name is defined as a Rego set in @line
51 | def defined_as_rego_set(line, set_name):
52 | return regex.match(f'\s*{set_name}\s*:?=\s*\{{.*\}}', line) != None
53 |
54 | # Returns true if @variable is a Rego variable set to true in @line
55 | def defined_as_rego_true(line, variable):
56 | return regex.match(f'\s*{variable}\s*:?=\s*true', line) != None
57 |
58 | if __name__ == "__main__":
59 | if len(argv) < 3:
60 | print(f"[+] Usage: {argv[0]} ")
61 | exit(1)
62 | policy_path = argv[1]
63 | output_path = argv[2]
64 |
65 | main(policy_path, output_path)
66 |
--------------------------------------------------------------------------------