├── sig-security-external-audit ├── security-audit-2019 │ ├── ancillary-data │ │ ├── dataflow │ │ │ ├── requirements.txt │ │ │ ├── process.sh │ │ │ ├── updated-dataflow.png │ │ │ ├── original dataflow.png │ │ │ ├── original dataflow.dot │ │ │ ├── tm.py │ │ │ └── updated-dataflow.dot │ │ └── rapid-risk-assessments │ │ │ ├── kcm-ccm-notes.md │ │ │ ├── template.md │ │ │ ├── container-runtime.md │ │ │ ├── kube-scheduler.md │ │ │ ├── kubelet.md │ │ │ ├── etcd.md │ │ │ ├── kube-apiserver.md │ │ │ └── kube-proxy.md │ ├── findings │ │ ├── Kubernetes Final Report.pdf │ │ ├── Kubernetes Threat Model.pdf │ │ ├── Kubernetes White Paper.pdf │ │ └── AtredisPartners_Attacking_Kubernetes-v1.0.pdf │ ├── Atredis and Trail of Bits Proposal.pdf │ ├── RFP_Decision.md │ └── RFP.md ├── OWNERS ├── adalogics-fuzzing-2022 │ └── kubernetes-fuzzing-report.pdf ├── security-audit-2025 │ ├── Shielder OSTIF Kubernetes PUBLIC-v1.1.pdf │ └── RFP_Decision.md ├── security-audit-2021-2022 │ ├── findings │ │ └── Kubernetes v1.24 Final Report.pdf │ ├── RFP_Decision.md │ └── RFP.md ├── external-audit-roadmap.md └── README.md ├── OWNERS ├── sig-security-tooling ├── cve-feed │ ├── hack │ │ ├── .gitignore │ │ ├── requirements.txt │ │ ├── cve_title_parser.py │ │ ├── fetch-cve-feed.sh │ │ ├── fetch-official-cve-feed.py │ │ └── test_cve_title_parser.py │ ├── OWNERS │ └── README.md ├── OWNERS ├── vulnerability-mgmt │ ├── OWNERS │ ├── README.md │ ├── container-images.md │ └── build-time-dependencies.md ├── README.md ├── scanning │ └── build-deps-and-release-images.sh └── learning-sessions.md ├── sig-security-docs ├── papers │ ├── policy │ │ ├── images │ │ │ ├── 4c.png │ │ │ ├── OSCAL-layers.png │ │ │ ├── XACML-architecture.png │ │ │ ├── Kubernetes-admission-controls.png │ │ │ └── Kubernetes-policy-architecture.png │ │ ├── kubernetes-policy-management.pdf │ │ ├── CNCF_Kubernetes_Policy_Management_WhitePaper_v1.pdf │ │ ├── Makefile │ │ └── README.md │ ├── shift-down │ │ ├── shift-down-security.pdf │ │ └── images │ │ │ ├── shift-down-teams.png │ │ │ ├── shift-down-banner.jpeg │ │ │ └── shift-down-shared-responsibility.png │ ├── policy_grc │ │ ├── images │ │ │ ├── policy-lifecycle.png │ │ │ ├── compliance-workflows.png │ │ │ └── kubernetes-policy-based-grc.png │ │ ├── Kubernetes_Policy_WG_Paper_v1_101123.pdf │ │ └── README.md │ └── admission-control │ │ ├── images │ │ └── attack-tree.png │ │ └── kubernetes-admission-control-threat-model.pdf └── OWNERS ├── code-of-conduct.md ├── sig-security-assessments ├── cluster-api │ ├── capi_2022_fuzzing.pdf │ └── images │ │ ├── mgmt-and-workload-cluster-relationship.png │ │ ├── excalidraw-data-flow-diagram-cluster-api.png │ │ └── data-flow-diagram-cluster-api-mermaid.md ├── OWNERS ├── README.md ├── vsphere-csi-driver │ └── self-assessment.md └── Documentation │ └── recipe-book.md ├── OWNERS_ALIASES ├── .gitignore ├── RELEASE.md ├── SECURITY_CONTACTS ├── .github └── ISSUE_TEMPLATE │ ├── audit-scope.md │ └── request-learning-session.md ├── SECURITY.md ├── CONTRIBUTING.md ├── README.md └── LICENSE /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/requirements.txt: -------------------------------------------------------------------------------- 1 | pytm==0.4 2 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | approvers: 4 | - sig-security-leads 5 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/hack/.gitignore: -------------------------------------------------------------------------------- 1 | #files generated by cve feed prow job 2 | cve-feed-hash 3 | official-cve-feed.json 4 | -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/images/4c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/images/4c.png -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/images/OSCAL-layers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/images/OSCAL-layers.png -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Community Code of Conduct 2 | 3 | Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) 4 | -------------------------------------------------------------------------------- /sig-security-assessments/cluster-api/capi_2022_fuzzing.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-assessments/cluster-api/capi_2022_fuzzing.pdf -------------------------------------------------------------------------------- /sig-security-docs/papers/shift-down/shift-down-security.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/shift-down/shift-down-security.pdf -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/images/XACML-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/images/XACML-architecture.png -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/kubernetes-policy-management.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/kubernetes-policy-management.pdf -------------------------------------------------------------------------------- /sig-security-docs/papers/policy_grc/images/policy-lifecycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy_grc/images/policy-lifecycle.png -------------------------------------------------------------------------------- /sig-security-docs/papers/shift-down/images/shift-down-teams.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/shift-down/images/shift-down-teams.png -------------------------------------------------------------------------------- /sig-security-docs/papers/admission-control/images/attack-tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/admission-control/images/attack-tree.png -------------------------------------------------------------------------------- /sig-security-docs/papers/policy_grc/images/compliance-workflows.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy_grc/images/compliance-workflows.png -------------------------------------------------------------------------------- /sig-security-docs/papers/shift-down/images/shift-down-banner.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/shift-down/images/shift-down-banner.jpeg -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners#owners_aliases 2 | 3 | aliases: 4 | sig-security-leads: 5 | - IanColdwater 6 | - tabbysable 7 | - cailyn-codes 8 | -------------------------------------------------------------------------------- /sig-security-external-audit/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | reviewers: 4 | - reylejano 5 | - smarticu5 6 | approvers: 7 | - reylejano 8 | - smarticu5 9 | -------------------------------------------------------------------------------- /sig-security-docs/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | reviewers: 4 | - savitharaghunathan 5 | - raesene 6 | approvers: 7 | - savitharaghunathan 8 | - raesene 9 | -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/images/Kubernetes-admission-controls.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/images/Kubernetes-admission-controls.png -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/images/Kubernetes-policy-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/images/Kubernetes-policy-architecture.png -------------------------------------------------------------------------------- /sig-security-docs/papers/policy_grc/images/kubernetes-policy-based-grc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy_grc/images/kubernetes-policy-based-grc.png -------------------------------------------------------------------------------- /sig-security-docs/papers/policy_grc/Kubernetes_Policy_WG_Paper_v1_101123.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy_grc/Kubernetes_Policy_WG_Paper_v1_101123.pdf -------------------------------------------------------------------------------- /sig-security-docs/papers/shift-down/images/shift-down-shared-responsibility.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/shift-down/images/shift-down-shared-responsibility.png -------------------------------------------------------------------------------- /sig-security-external-audit/adalogics-fuzzing-2022/kubernetes-fuzzing-report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/adalogics-fuzzing-2022/kubernetes-fuzzing-report.pdf -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/process.sh: -------------------------------------------------------------------------------- 1 | python3 tm.py --dfd > updated-dataflow.dot 2 | dot -Tpng < updated-dataflow.dot > updated-dataflow.png 3 | open updated-dataflow.png 4 | -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/CNCF_Kubernetes_Policy_Management_WhitePaper_v1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/policy/CNCF_Kubernetes_Policy_Management_WhitePaper_v1.pdf -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/findings/Kubernetes Final Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/findings/Kubernetes Final Report.pdf -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/findings/Kubernetes Threat Model.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/findings/Kubernetes Threat Model.pdf -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/findings/Kubernetes White Paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/findings/Kubernetes White Paper.pdf -------------------------------------------------------------------------------- /sig-security-assessments/cluster-api/images/mgmt-and-workload-cluster-relationship.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-assessments/cluster-api/images/mgmt-and-workload-cluster-relationship.png -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/Atredis and Trail of Bits Proposal.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/Atredis and Trail of Bits Proposal.pdf -------------------------------------------------------------------------------- /sig-security-tooling/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | reviewers: 4 | - mtardy 5 | approvers: 6 | - mtardy 7 | - sig-security-leads 8 | 9 | emeritus_approvers: 10 | - pushkarj 11 | -------------------------------------------------------------------------------- /sig-security-assessments/cluster-api/images/excalidraw-data-flow-diagram-cluster-api.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-assessments/cluster-api/images/excalidraw-data-flow-diagram-cluster-api.png -------------------------------------------------------------------------------- /sig-security-docs/papers/admission-control/kubernetes-admission-control-threat-model.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-docs/papers/admission-control/kubernetes-admission-control-threat-model.pdf -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2025/Shielder OSTIF Kubernetes PUBLIC-v1.1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2025/Shielder OSTIF Kubernetes PUBLIC-v1.1.pdf -------------------------------------------------------------------------------- /sig-security-assessments/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | reviewers: 4 | - krol3 5 | 6 | approvers: 7 | - krol3 8 | - sig-security-leads 9 | 10 | emeritus_approvers: 11 | - aladewberry 12 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/updated-dataflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/updated-dataflow.png -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | reviewers: 4 | - mtardy 5 | approvers: 6 | - mtardy 7 | - sig-security-leads 8 | 9 | emeritus_approvers: 10 | - pushkarj 11 | -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/Makefile: -------------------------------------------------------------------------------- 1 | .phony: pdf 2 | 3 | pdf: kubernetes-policy-management.pdf 4 | pandoc --variable urlcolor=blue --variable geometry:margin=1in --from gfm -o kubernetes-policy-management.pdf kubernetes-policy-management.md -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/original dataflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/original dataflow.png -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2021-2022/findings/Kubernetes v1.24 Final Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2021-2022/findings/Kubernetes v1.24 Final Report.pdf -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubernetes/sig-security/HEAD/sig-security-external-audit/security-audit-2019/findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf -------------------------------------------------------------------------------- /sig-security-tooling/vulnerability-mgmt/OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs at https://go.k8s.io/owners 2 | 3 | reviewers: 4 | - ericsmalling 5 | - mtardy 6 | approvers: 7 | - mtardy 8 | - sig-security-leads 9 | 10 | emeritus_approvers: 11 | - pushkarj 12 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/hack/requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements for Kubernetes CVE Feed Tool 2 | # Main dependency for HTTP requests to GitHub API 3 | requests==2.32.4 4 | 5 | # Dependencies automatically installed with requests 6 | certifi==2025.8.3 7 | charset-normalizer==3.4.3 8 | idna==3.10 9 | urllib3==2.5.0 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # OSX leaves these everywhere on SMB shares 2 | ._* 3 | 4 | # OSX trash 5 | .DS_Store 6 | 7 | # Eclipse files 8 | .classpath 9 | .project 10 | .settings/** 11 | 12 | # Files generated by JetBrains IDEs, e.g. IntelliJ IDEA 13 | .idea/ 14 | *.iml 15 | 16 | # Vscode files 17 | .vscode 18 | 19 | # Emacs save files 20 | *~ 21 | \#*\# 22 | .\#* 23 | 24 | # Vim-related files 25 | [._]*.s[a-w][a-z] 26 | [._]s[a-w][a-z] 27 | *.un~ 28 | Session.vim 29 | .netrwhist 30 | 31 | # User cluster configs 32 | .kubeconfig 33 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release Process 2 | 3 | The Kubernetes Template Project is released on an as-needed basis. The process is as follows: 4 | 5 | 1. An issue is proposing a new release with a changelog since the last release 6 | 1. All [OWNERS](OWNERS) must LGTM this release 7 | 1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` 8 | 1. The release issue is closed 9 | 1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` 10 | -------------------------------------------------------------------------------- /SECURITY_CONTACTS: -------------------------------------------------------------------------------- 1 | # Defined below are the security contacts for this repo. 2 | # 3 | # They are the contact point for the Product Security Committee to reach out 4 | # to for triaging and handling of incoming issues. 5 | # 6 | # The below names agree to abide by the 7 | # [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) 8 | # and will be removed and replaced if they violate that agreement. 9 | # 10 | # DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE 11 | # INSTRUCTIONS AT https://kubernetes.io/security/ 12 | 13 | IanColdwater 14 | tabbysable 15 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/audit-scope.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Third-Party Security Audit Add To Audit Roadmap Request 3 | about: Request to add a focus area to the third-party security audit roadmap 4 | title: 'REQUEST: Add to the third-party security audit roadmap' 5 | assignees: 6 | --- 7 | 8 | This issue is a request to add a focus area to the [Third-Party Security Audit Roadmap](https://github.com/kubernetes/sig-security/blob/main/sig-security-external-audit/external-audit-roadmap.md). 9 | 10 | 11 | **Name or title of focus area** 12 | 15 | 16 | **SIG or WG** 17 | 20 | 21 | **Maintainer or SME contact** 22 | 25 | 26 | **Link to code on GitHub** 27 | 30 | 31 | **Other** 32 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Security Announcements 4 | 5 | Join the [kubernetes-security-announce] group for security and vulnerability announcements. 6 | 7 | You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss]. 8 | 9 | ## Reporting a Vulnerability 10 | 11 | Instructions for reporting a vulnerability can be found on the 12 | [Kubernetes Security and Disclosure Information] page. 13 | 14 | ## Supported Versions 15 | 16 | Information about supported Kubernetes versions can be found on the 17 | [Kubernetes version and version skew support policy] page on the Kubernetes website. 18 | 19 | [kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce 20 | [kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50 21 | [Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions 22 | [Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability 23 | -------------------------------------------------------------------------------- /sig-security-docs/papers/policy/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Policy Management Whitepaper 2 | 3 | ## About 4 | 5 | The Kubernetes Policy Management paper is a [Policy Working Group](https://github.com/kubernetes/community/tree/master/wg-policy) project to ensure the cloud native community shares a common view and have access to information about Kubernetes policy management and related topics. 6 | 7 | ## Updates 8 | 9 | The paper is intended to be a living document that is maintained for the community, by its members. The paper is housed in this Git repository. 10 | 11 | ### Format 12 | 13 | The paper is maintained in markdown format. A PDF format is generated for new versions. 14 | 15 | To create a PDF version install [pandoc](https://pandoc.org/installing.html) and run the following command: 16 | 17 | ```sh 18 | pandoc --variable urlcolor=blue --variable geometry:margin=1in --from gfm -o kubernetes-policy-management.pdf kubernetes-policy-management.md 19 | ``` 20 | 21 | ### Contributions 22 | 23 | Updates to the whitepaper, suggestions for updates, or discussion for updates should initiate with an issue submitted to the [Policy WG repo](https://github.com/kubernetes-sigs/wg-policy-prototypes) and labeled with "suggestion" and "Kubernetes Policy Panagement Paper". 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/kcm-ccm-notes.md: -------------------------------------------------------------------------------- 1 | # Meeting notes 2 | 3 | - CCM per cloud provider 4 | - same host as kube-apiserver 5 | - caches live in memory 6 | - refresh cache, but can be forced to by request 7 | - Controller manager attempts to use PoLA, but the service account controller has permission to write to it's own policies 8 | - Cloud controller (routes, IPAM, &c.) can talk to external resources 9 | - CCM/KCM have no notion of multi-tenant, and there are implications going forward 10 | - Deployments across namespace 11 | - cloud controller has access to cloud credentials (passed in by various means, as we saw in the code) 12 | - CCM is a reference implementation, meant to separate out other company's code 13 | - So Amazon doesn't need to have Red Hat's code running, &c. 14 | - shared acache across all controllers 15 | - [FINDING] separate out high privileged controllers from lower privileged ones, so there's no confused deputy 16 | - single binary for controller 17 | - if you can trick the service account controller into granting access to things you shouldn't (for example) that would be problematic 18 | - make a "privileged controller manager" which bundles high and low-privileged controllers, and adds another trust boundary 19 | -------------------------------------------------------------------------------- /sig-security-docs/papers/policy_grc/README.md: -------------------------------------------------------------------------------- 1 | # Policy-Based Kubernetes Governance, Risk, and Compliance 2 | 3 | ## About 4 | 5 | The `Policy-Based Kubernetes Governance, Risk, and Compliance` paper is a [Policy Working Group](https://github.com/kubernetes/community/tree/master/wg-policy) project to ensure the cloud native community shares a common view and have access to information about how Kubernetes policies can be used to achieve organizational goals across governance, risk, and compliance. 6 | 7 | ## Updates 8 | 9 | The paper is intended to be a living document that is maintained for the community, by its members. The paper is housed in this Git repository. 10 | 11 | ### Format 12 | 13 | The paper is maintained in markdown format. A PDF format document is generated for each new version. 14 | 15 | Here is the latest PDF: 16 | 17 | [Policy-Based Kubernetes Governance, Risk, and Compliance, Version 1 October 11 2023](/sig-security-docs/papers/policy_grc/Kubernetes_Policy_WG_Paper_v1_101123.pdf) 18 | 19 | ### Contributions 20 | 21 | Updates to the whitepaper, suggestions for updates, or discussion for updates should initiate with an issue submitted to the [Policy WG repo](https://github.com/kubernetes-sigs/wg-policy-prototypes) and labeled with "suggestion" and "Kubernetes Policy-Based Governance, Risk, and Compliance Paper". 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /sig-security-assessments/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Self-assessment 3 | 4 | A Self-assessment is a great starting point for Kubernetes subprojects to reach a security baseline. Our team 5 | will work with project maintainers create a threat model and assess the current security stance of all or a portion of 6 | the project. The resulting assessment will allow maintainers to determine gaps in their project's security and develop a 7 | strategy to work toward improving it. 8 | 9 | ## Assessments 10 | - [X] [Cluster API](https://github.com/kubernetes/sig-security/blob/1e6f14de2edae1662e2897a933071928515cc27c/sig-security-assessments/cluster-api/self-assessment.md) 11 | - [ ] [vsphere-csi-driver](https://github.com/kubernetes/sig-security/blob/main/sig-security-assessments/vsphere-csi-driver/self-assessment.md) 12 | 13 | ## Getting Started 14 | If you have a project for which you would like to complete a Self-assessment you can take a look at the 15 | [Self-assessment Recipe Book](https://github.com/kubernetes/sig-security/blob/main/sig-security-assessments/Documentation/recipe-book.md) 16 | for next steps. 17 | 18 | ## Getting Involved 19 | If you would like to participate in the SIG Security Self-assessment area of work, please join the SIG Security 20 | [mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-security) to be added to meeting invites. 21 | 22 | ## Leadership 23 | 24 | ### Subproject Lead 25 | The subproject lead manages the operations of the subproject. 26 | 27 | ## Contact 28 | - Slack: [#sig-security-assessments](https://kubernetes.slack.com/messages/sig-security-assessments) 29 | 30 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/original dataflow.dot: -------------------------------------------------------------------------------- 1 | digraph K8S { 2 | subgraph cluster_apiserverinternal { 3 | node [style=filled]; 4 | color=green; 5 | etcd[label="etcd"]; 6 | label = "API Server Data Layer"; 7 | } 8 | 9 | subgraph cluster_apiserver { 10 | node [style=filled]; 11 | color=blue; 12 | kubeapiserver[label="kube-apiserver"]; 13 | kubeapiserver->etcd[label="HTTPS"] 14 | label = "API Server"; 15 | } 16 | 17 | subgraph cluster_mastercomponents { 18 | node [style=filled]; 19 | label = "Master Control Plane Components"; 20 | scheduler[label="Scheduler"]; 21 | controllers[label="Controllers"] 22 | scheduler->kubeapiserver[label="Callback/HTTPS"]; 23 | controllers->kubeapiserver[label="Callback/HTTPS"]; 24 | color=black; 25 | } 26 | 27 | subgraph cluster_worker { 28 | label="Worker" 29 | color="blue" 30 | kubelet->kubeapiserver[label="authenticated HTTPS"] 31 | kubeproxy[label="kube-proxy"] 32 | iptables->kubeproxy->iptables 33 | pods[label="pods with various containers"] 34 | pods->kubeproxy->pods 35 | } 36 | 37 | subgraph cluster_internet { 38 | label="Internet" 39 | authuser[label="Authorized User via kubebctl"] 40 | generaluser[label="General User"] 41 | authuser->kubeapiserver[label="Authenticated HTTPS"] 42 | generaluser->pods[label="application-specific connection protocol"] 43 | } 44 | kubeapiserver->kubelet[label="HTTPS"] 45 | kubeapiserver->pods[label="HTTP",color=red] 46 | } 47 | 48 | -------------------------------------------------------------------------------- /sig-security-assessments/cluster-api/images/data-flow-diagram-cluster-api-mermaid.md: -------------------------------------------------------------------------------- 1 | ```mermaid 2 | flowchart TB 3 | subgraph Bootstrap Node / Management Cluster 4 | kcp[Kubeadm Control Plane Controller]--https-->mgmtk8s 5 | kbc[Kubeadm Bootstrap Controller]--https-->mgmtk8s 6 | capi[Cluster API Controller]--https-->mgmtk8s 7 | capa[Cluster API AWS Controller]--https-->mgmtk8s 8 | mgmtk8s[Management Kubernetes API Server]--https-->mgmtetcd[etcd] 9 | end 10 | capa--https-->secrets 11 | capa--https-->EC2 12 | capa--https-->ELB 13 | kcp--https-->k8sapi 14 | capi--https-->k8sapi 15 | kbc--https-->k8sapi 16 | subgraph AWS Regional Services 17 | secrets[AWS Secrets Manager] 18 | EC2[Amazon EC2] 19 | ELB[Elastic Load Balancing] 20 | end 21 | subgraph VPC[Provisioned VPC] 22 | ELB--TCP Passthrough-->k8sapi 23 | IMDS[Instance Metadata Service] 24 | subgraph Workload EC2 Instance 25 | Kubelet 26 | Kubeadm 27 | cloud-init 28 | awscli[AWS CLI] 29 | cloud-init--executes-->awscli 30 | cloud-init--executes-->Kubeadm 31 | cloud-init--starts-->Kubelet 32 | end 33 | k8sapi--websocket-->Kubelet 34 | awscli--https-->secrets 35 | Kubeadm--https-->k8sapi 36 | Kubelet--http-->IMDS 37 | awscli--http-->IMDS 38 | Kubelet--https-->k8sapi 39 | subgraph Workload control plane 40 | k8sapi[Workload Kubernetes API server] 41 | end 42 | end 43 | 44 | classDef Amazon fill:#FF9900; 45 | classDef ThirdParty fill:#FFB6C1; 46 | classDef AmazonBoundary fill:#fff2e6; 47 | class EC2,secrets,EC2,ELB,IMDS,awscli Amazon 48 | class cloud-init ThirdParty 49 | class VPC AmazonBoundary 50 | ``` -------------------------------------------------------------------------------- /sig-security-tooling/README.md: -------------------------------------------------------------------------------- 1 | # Welcome to SIG security tooling 2 | 3 | Our mission is to enhance the security of Kubernetes through 4 | community-maintained code, often in collaboration with other Special Interest 5 | Groups (SIGs). 6 | 7 | We hold meetings every two weeks on Fridays at 4pm UTC ([check your local 8 | time](https://mytime.io/4pm/UTC)), typically alternating between working 9 | sessions and learning sessions. If you’d like to request a learning 10 | session, please [click here](https://github.com/kubernetes/sig-security/issues/new?assignees=&labels=sig/security&projects=&template=request-learning-session.md&title=REQUEST:+Request+a+Learning+session). 11 | See the history of learning session in the 12 | [learning-sessions.md](https://github.com/kubernetes/sig-security/blob/main/sig-security-tooling/learning-sessions.md) file. 13 | 14 | ## Dashboards 15 | 16 | Dashboard for SIG security are defined in 17 | [kubernetes/test-infra/config/testgrids/kubernetes/sig-security/config.yaml](https://github.com/kubernetes/test-infra/blob/master/config/testgrids/kubernetes/sig-security/config.yaml) and you can find the dashboard group at 18 | [testgrid.k8s.io/sig-security](https://testgrid.k8s.io/sig-security). 19 | 20 | For more information about each project, please refer to the dedicated 21 | subfolders within this directory. 22 | 23 | ## Contact us 24 | 25 | To join join our meetings, subscribe to the the [kubernetes-sig-security mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-security) 26 | which will send you calendar invites for the upcoming meetings. For 27 | asynchronous communication, you can reach us on the 28 | [Kubernetes sig-security-tooling Slack channel](https://kubernetes.slack.com/messages/sig-security-tooling). 29 | 30 | The sub-group owners are listed in the [OWNERS](https://github.com/kubernetes/sig-security/blob/main/sig-security-tooling/OWNERS) file. 31 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2021-2022/RFP_Decision.md: -------------------------------------------------------------------------------- 1 | # Third-Party Security Audit Subproject - RFP Decision Process 2 | 3 | Kubernetes SIG Security's Third-Party Security Audit subproject (subproject, henceforth) runs the process of conducting a third-party security audit for the Kubernetes project. 4 | 5 | The process started with publishing a Request for Proposal (RFP) in 2021 which outlined the scope and goals of the third-party security audit. 6 | The RFP closed after receiving four (4) responses to the RFP. 7 | The subproject leads evaluated the RFP responses to determine which vendor to proceed with the audit. 8 | The subproject leads are experienced in the information security industry and are active contributors to the Kubernetes project. 9 | 10 | Due to the extension of the RFP until four responses were received and contract negotiations, the 2021 Third-Party Security Audit will continue into 2022. 11 | 12 | Similar to the 2019 Third-Party Audit RFP process, the RFP responses were evaluated and scored by the subproject leads on a scale of 1 to 5 on the following categories: 13 | 14 | - Personnel fit and talent 15 | - Relevant understanding and experience (orchestration systems, containers, hardening, etc.) 16 | - The individual work products requested in the RFP: 17 | - Threat model 18 | - Report 19 | - Code analysis 20 | - Reference architecture 21 | 22 | Based on the above criteria, the scores of the RFP responses were: 23 | 24 | | Vendor | Total Score| 25 | |--------|------------| 26 | | Vendor A | 124 | 27 | | Vendor B | 108 | 28 | | Vendor C | 165 | 29 | | Vendor D | 163 | 30 | 31 | The Kubernetes SIG Security's Third-Party Security Audit subproject selects NCC Group to complete the 2021/2022 third-party security audit for the Kubernetes project. 32 | 33 | We look forward to working with NCC Group on the audit and to partner with various SIGs on this assessment. 34 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: 4 | 5 | _As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ 6 | 7 | ## Getting Started 8 | 9 | We have full documentation on how to get started contributing here: 10 | 11 | 14 | 15 | - [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests 16 | - [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) 17 | - [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers 18 | 19 | ## Mentorship 20 | 21 | - [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! 22 | 23 | 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/request-learning-session.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Request a Learning session 3 | about: Request a Learning session for SIG Security Tooling meeting 4 | title: 'REQUEST: Request a Learning session' 5 | labels: sig/security 6 | assignees: 7 | --- 8 | 9 | **Please tell us a bit more about the topic** 10 | 15 | 16 | **Please share speaker details** 17 | 18 | 27 | 28 | **If speaker is confirmed, please share speaker availability** 29 | 30 | 38 | 39 | 40 | **Explain the benefits to the community that this session provides** 41 | 42 | 50 | 51 | 57 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/hack/cve_title_parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright 2022 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import re 18 | 19 | # CVE ID Format: CVE-YYYY-NNNN+ (NNNN+ at least 4 digits) 20 | CVE_ID_PATTERN = r"CVE-\d{4}-\d{4,}" 21 | 22 | # Match leading CVEs with optional separators (anchored to start, using ^) 23 | LEADING_CVE_BLOCK_PATTERN = rf"^(?:{CVE_ID_PATTERN}[\s,:-]*)+" 24 | 25 | def parse_cve_title(title: str): 26 | """Parse CVE title to extract CVE IDs and description. 27 | 28 | Args: 29 | title: The CVE title string to parse 30 | 31 | Returns: 32 | tuple: (cve_ids, description) where cve_ids is a list of CVE ID strings 33 | and description is the cleaned description text 34 | 35 | Raises: 36 | LookupError: If the title doesn't start with a valid CVE block 37 | """ 38 | match = re.match(LEADING_CVE_BLOCK_PATTERN, title) 39 | if not match: 40 | raise LookupError(f"Title does not start with CVE block: {title}") 41 | 42 | leading_cve_block = match.group(0) 43 | # Extract CVEs only from that leading block - handling issues which contain other CVE' references 44 | # ex: CVE-2019-11249: Incomplete fixes for CVE-2019-1002101 and CVE-2019-11246, kubectl [...] 45 | # ref: https://github.com/kubernetes/kubernetes/issues/80984 46 | cve_ids = re.findall(CVE_ID_PATTERN, leading_cve_block) 47 | 48 | # Remove the leading CVE block from the title to get the description 49 | description = re.sub(LEADING_CVE_BLOCK_PATTERN, "", title).strip() 50 | return cve_ids, description 51 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2025/RFP_Decision.md: -------------------------------------------------------------------------------- 1 | # Third-Party Security Audit Subproject - RFP Decision Process 2 | 3 | Kubernetes SIG Security's Third-Party Security Audit subproject (subproject, henceforth) runs the process of conducting a third-party security audit for the Kubernetes project. 4 | 5 | The subproject is working with the [Open Source Technology Improvement Fund (OSTIF, henceforth)](https://ostif.org/) to create the RFP, evaluate responses to the RFP, and to coordinate with the vendor. Since 2022, the CNCF have been working with OSTIF to conduct security audits for incubating and graduated CNCF projects. 6 | For more background on CNCF’s partnership with OSTIF and security audits, checkout blogs from [2022](https://www.cncf.io/blog/2022/08/08/improving-cncf-security-posture-with-independent-security-audits/) and [2023](https://www.cncf.io/blog/2023/03/13/an-overview-of-the-cncf-and-ostif-impact-report-for-the-second-half-of-2022-and-early-2023/). 7 | 8 | The process started with creating the scope of the external security audit. The scope was identified by the subproject and Kubernetes contributors. 9 | OSTIF and the subproject created the [Request for Proposal (RFP) in 2024](https://github.com/kubernetes/sig-security/blob/main/sig-security-external-audit/security-audit-2024/RFP.md) which outlined the scope and goals of the security audit. The RFP closed after receiving seven (7) responses to the RFP. 10 | 11 | OSTIF evaluated the RFP responses to determine which vendor to proceed with the audit and reviewed results with the subproject. Scoring was based on the following criteria: 12 | - Openness 13 | - Scope 14 | - Expertise 15 | - Cost 16 | - Past performance 17 | - Documentation quality 18 | 19 | Out of a total score of 100, the scores of the RFP responses were: 20 | 21 | | Vendor | Total Score| 22 | |--------|------------| 23 | | Vendor A | 92 | 24 | | Vendor B | 73 | 25 | | Vendor C | 90 | 26 | | Vendor D | 100 | 27 | | Vendor E | 0 | 28 | | Vendor F | 86 | 29 | | Vendor G | 0 | 30 | 31 | OSTIF and the Kubernetes SIG Security's Third-Party Security Audit subproject selects Shielder to complete the 2025 third-party security audit for the Kubernetes project. 32 | 33 | We look forward to working with OSTIF and Shielder on the audit and to partner with various SIGs on this assessment. 34 | -------------------------------------------------------------------------------- /sig-security-assessments/vsphere-csi-driver/self-assessment.md: -------------------------------------------------------------------------------- 1 | --- 2 | breaks: false 3 | --- 4 | 5 | 6 | 7 | # vSphere CSI Driver Security Self-Assessment 8 | 9 | ## Metadata 10 | 11 | * **Last Modified**: 2023-03-14 12 | * **Youtube 13 | Recordings**: 14 | * **Authors** (alphabetical order): 15 | * person 16 | * **Reviewers** (alphabetical order): 17 | * person 18 | 19 | ## Overview 20 | 21 | 22 | 23 | ### Impact 24 | 25 | 26 | 27 | ### Scope 28 | 29 | #### Process level 30 | 31 | 32 | 33 | #### Technical 34 | 35 | 36 | 37 | ### Not in Scope 38 | 39 | 40 | ## Communication Channels 41 | 42 | ### Slack channels in Kubernetes Workspace 43 | 44 | 45 | 46 | ### Mailing lists 47 | 48 | * list 1 49 | * list 2 50 | 51 | ### GitHub tracking 52 | 53 | 54 | 55 | ### Primary Community Contact 56 | 57 | 58 | ## Project Overview 59 | 60 | 61 | ### Project Goals 62 | 63 | * 64 | 65 | ### Project Non-goals 66 | 67 | * 68 | 69 | ### Intended Uses of the Project 70 | 71 | 72 | ### Personas 73 | 74 | #### Cloud Admin 75 | 76 | 77 | #### Platform Operator 78 | 79 | 80 | 81 | #### Workload Operator 82 | 83 | 84 | 85 | #### Application Developer 86 | 87 | 88 | 89 | ### Primary Components 90 | 91 | 92 | #### Component 1 93 | 94 | #### Component 2 95 | 96 | ### Data Stores and Data Flows 97 | 98 | #### Data Flow diagrams 99 | 100 | 101 | #### Common Flows 102 | 103 | ##### Flow 1 104 | 105 | ###### Important considerations 106 | 107 | 108 | #### Flow 2: 109 | 110 | 111 | 112 | 113 | ### 3rd Party Requirements (source, libraries, services, APIs) 114 | 115 | 116 | ### Secure development practices 117 | 118 | 119 | 120 | ### Development Workflow 121 | 122 | 123 | #### Identified gaps in development workflow 124 | 125 | ## Threat Modeling with STRIDE 126 | 127 | ### Spoofing 128 | 129 | 130 | 131 | ### Tampering 132 | 133 | 134 | 135 | ### Repudiation 136 | 137 | 138 | ### Information Disclosure 139 | 140 | 141 | ### Denial of Service 142 | 143 | 144 | 145 | ### Elevation of Privilege 146 | 147 | 148 | 149 | ### Security issue resolution 150 | 151 | ## References 152 | 153 | ### Docs 154 | 155 | 156 | ### Talks 157 | 158 | ### Links 159 | 160 | -------------------------------------------------------------------------------- /sig-security-tooling/vulnerability-mgmt/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerability Management 2 | 3 | Covers Kubernetes wide vulnerability management process, policies and workflows 4 | that tackle *known* vulnerabilities in Kubernetes artifacts that can be found 5 | via automation 6 | 7 | ## Goals 8 | 9 | 1. Identify known vulnerabilities in Kubernetes artifacts by scanning them 10 | periodically 11 | 2. Leverage the existing resolution and reporting process (i.e. Github issues, PRs) 12 | to document and resolve any vulnerabilities that impact Kubernetes 13 | 3. Create community driven awareness and documentation around known CVEs in 14 | Kubernetes related artifacts 15 | 16 | ### Build Time Dependencies 17 | 18 | A tool agnostic periodic scanning of build time dependencies 19 | (typically dependencies found in `go.mod` file) of Kubernetes. More details can 20 | be found [here](build-time-dependencies.md) 21 | 22 | This track is a partnership between SIG 23 | Architecture's [Code Organization](https://github.com/kubernetes/community/tree/master/sig-architecture#code-organization) 24 | sub-project and SIG Security's Tooling sub-project 25 | 26 | ### Container Images 27 | 28 | A tool agnostic periodic scanning for vulnerabilities in container images 29 | shipped as part of a Kubernetes Release. This effort is beginning to take form 30 | and is being tracked in 31 | [Issue #5920](https://github.com/kubernetes/community/issues/5920). More details can be found [here](container-images.md) 32 | 33 | This track is a partnership 34 | between [SIG Release](https://github.com/kubernetes/sig-release) 35 | and SIG Security's Tooling sub-project 36 | 37 | **Note**: Artifacts here refer to code, images and binaries 38 | 39 | ## Non-Goals 40 | 41 | 1. **Responsible disclosure of vulnerabilities**: This will continue to be the 42 | responsibility 43 | of [Security Response Committee](https://github.com/kubernetes/community/tree/master/committee-product-security/README.md) 44 | 2. **Runtime dependencies**: Triaging of vulnerabilities found in components 45 | that are runtime dependencies for an on-premises or *-as-a-service* 46 | Kubernetes deployment. Examples include but are not limited to container 47 | runtimes, container registries, Node OS 48 | 3. **Resolving license violations**: Allowed third party license policy can be 49 | found [here](https://github.com/cncf/foundation/blob/master/allowed-third-party-license-policy.md#approved-licenses-for-allowlist) 50 | 4. **Fixing unfixable vulnerabilities**: The focus of this effort is to apply fixes to 51 | vulnerabilities for which a patch is released by owners of the vulnerable dependency 52 | or base image. This scoping decision is made to maximize utilization of maintainer 53 | time and attention span on high impact work. 54 | 55 | **Note**: If you have a topic that you think is missing, please hop on over to 56 | our 57 | [slack channel](https://kubernetes.slack.com/messages/sig-security-tooling) 58 | to discuss more :-) 59 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/README.md: -------------------------------------------------------------------------------- 1 | # Official CVE Feed 2 | 3 | The official CVE feed is separated into two main components: 4 | 1. The scripts, that update a cloud bucket containing the feed. 5 | 2. The website, rendering and serving the feed in various formats. 6 | 7 | ## Scripts 8 | 9 | A script in the [kubernetes/sig-security](https://github.com/kubernetes/sig-security) 10 | repository under the [sig-security-tooling/cve-feed/hack](https://github.com/kubernetes/sig-security/tree/main/sig-security-tooling/cve-feed/hack) 11 | folder. This script is 12 | a bash script named `fetch-cve-feed.sh` that: 13 | - sets up the python3 environment; 14 | - generates the CVE feed file with `fetch-official-cve-feed.py`; 15 | - compares the sha256 of the newly generated file with the existing one; 16 | - if the sha256 changed, uploads the newly generated CVE feed file to the bucket. 17 | 18 | The `fetch-official-cve-feed.py` file executed by the `fetch-cve-feed.sh` is a 19 | python3 script that: 20 | - queries the GitHub API to fetch all the issues with the `official-cve-feed` 21 | label in the [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes/issues?q=is%3Aissue%20label%3Aofficial-cve-feed%20) 22 | repository; 23 | - formats the result with the appropriate JSON schema to be JSON feed 24 | compliant; 25 | - prints the output to stdout. 26 | 27 | These scripts are run regularly as a CronJob on the k8s infrastructure. 28 | 29 | In short, these scripts take the GitHub [kubernetes/kubernetes issues 30 | labeled with `official-cve-feed`](https://github.com/kubernetes/kubernetes/issues?q=is%3Aissue%20label%3Aofficial-cve-feed%20) 31 | as the input and generate a JSON feed file as an output in a cloud bucket. The 32 | output can be publicly fetched at [gs://k8s-cve-feed/](https://console.cloud.google.com/storage/browser/k8s-cve-feed) or [storage.googleapis.com/k8s-cve-feed](https://storage.googleapis.com/k8s-cve-feed/). 33 | 34 | ## Website 35 | 36 | The main output of the official CVE feed is the HTML website page available on 37 | [k8s.io/docs/reference/issues-security/official-cve-feed](https://kubernetes.io/docs/reference/issues-security/official-cve-feed/) 38 | where you can also find links to the JSON and RSS feed formats. 39 | 40 | The corresponding HTML page is generated from the [official-cve-feed.md](https://github.com/kubernetes/website/blob/main/content/en/docs/reference/issues-security/official-cve-feed.md?plain=1) 41 | file from the [kubernetes/website](https://github.com/kubernetes/website) 42 | repository. It mainly calls the `cve-feed` shortcode that is defined in 43 | [website/layouts/shortcodes/cve-feed.html](https://github.com/kubernetes/website/blob/main/layouts/shortcodes/cve-feed.html) 44 | which consumes the JSON format by fetching the URL from the 45 | [`.Site.Params.cveFeedBucket`](https://github.com/kubernetes/website/blob/75f19fc9675d07fdbc724d02953d905ef7ca8619/hugo.toml#L168) 46 | and translating it to an HTML table. 47 | 48 | This page is thus updated every time the website is built. 49 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/RFP_Decision.md: -------------------------------------------------------------------------------- 1 | # Security Audit WG - RFP Decision Process 2 | 3 | The Security Audit Working Group was tasked with leading the process of having a third party security audit conducted for the Kubernetes project. Our first steps were to select the vendors to be included in the Request for Proposal (RFP) process and create the RFP document setting out the goals of the audit. We were then responsible for evaluating the submitted proposals in order to select the vendor best suited to complete a security assessment against Kubernetes, a very complex and widely scoped project. 4 | 5 | After publishing the initial RFP and distributing it to the eligible vendors, we had a period open for vendors to submit questions to better understand the project’s goals, which we made publicly available in the RFP document. While six (6) vendors were invited to participate, we ultimately received four (4) RFP responses, due to one vendor dropping out and two vendors partnering to submit a combined proposal. 6 | 7 | The next stage of this project was more difficult: evaluating the responses and determining which vendor to use for the audit. With the list of eligible vendors already limited to a small set of very strong and well-known firms, it came as no surprise to us that they each had extremely compelling proposals that made choosing one over the other very difficult. The working group leads have years of experience on both sides of the table: writing proposals and conducting audits, as well as working with these vendors and their teams to assess companies we’ve worked at. To help us combine objective evaluations with our individual past experiences and knowledge of each of the vendors’ work and relevant experience (conference talks, white papers, published research and reports), we came up with a set of criteria that each of us used to rank the proposals on a scale of 1 to 5: 8 | 9 | * Personnel fit and talent 10 | * Relevant understanding and experience (orchestration systems, containers, hardening, etc.) 11 | * The individual work products requested in the RFP: 12 | - Threat Model 13 | - Reference architecture 14 | - White paper 15 | - Assessment and report 16 | 17 | While budget constraints became a part of the final selection, we wanted to leave cost out of the process as much as possible and focus on ensuring the community received the best possible audit. Based on this criteria, the scoring overall was extremely close, with the total scores all within a few points of each other. 18 | 19 | | Vendor | Total Score | 20 | |--------|-------------| 21 | | Vendor A | 149 | 22 | | Vendor B | 149 | 23 | | Vendor C | 144 | 24 | | Vendor D | 135 | 25 | 26 | After narrowing it down to our top two choices and some further discussions with those vendors, we decided to select the partnership of Atredis and Trail of Bits to complete this audit. We felt very strongly that the combination of these two firms, both composed of very senior and well known staff in the information security industry, would provide the best possible results. We look forward to working with them to kick off the actual audit process soon and for other Kubernetes contributors from the various SIGs to help partner with the working group on this assessment. 27 | 28 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/hack/fetch-cve-feed.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2022 The Kubernetes Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -o nounset 17 | set -o pipefail 18 | 19 | # name of the output file 20 | OUTPUT_FILE=official-cve-feed.json 21 | 22 | # value to return at end of script 23 | RETURN_VALUE=0 24 | 25 | # install python-pip3 26 | apt-get update -qq 27 | DEBIAN_FRONTEND=noninteractive apt-get install -y -qq python3-pip 28 | 29 | # install requests module 30 | pip3 install requests 31 | 32 | # python script to generate official-cve-feed.json 33 | python3 fetch-official-cve-feed.py > "${OUTPUT_FILE}" 34 | EXIT_CODE=$? 35 | if [[ "${EXIT_CODE}" -ne 0 ]]; then 36 | RETURN_VALUE=${EXIT_CODE} 37 | fi 38 | 39 | # make the prow job logs always helpful 40 | cat "${OUTPUT_FILE}" 41 | 42 | # python returns 7 to indicate recoverable errors 43 | # Exit bash script now if unrecoverable python error 44 | if [[ "${EXIT_CODE}" -ne 0 ]] && [[ "${EXIT_CODE}" -ne 7 ]]; then 45 | exit "${RETURN_VALUE}" 46 | fi 47 | 48 | # function to calculate the hash value of official-cve-feed.json 49 | calculate_hash(){ 50 | if command -v shasum >/dev/null 2>&1; then 51 | cat "$@" | shasum -a 256 | cut -d' ' -f1 52 | elif command -v sha256sum >/dev/null 2>&1; then 53 | cat "$@" | sha256sum | cut -d' ' -f1 54 | else 55 | echo "missing shasum tool" 1>&2 56 | exit 1 57 | fi 58 | } 59 | 60 | # check if official-cve-feed.json blob exists in the bucket 61 | set -e 62 | EXIT_CODE=0 63 | gsutil ls "gs://k8s-cve-feed/${OUTPUT_FILE}" >/dev/null 2>&1 || EXIT_CODE=$? 64 | 65 | # fetch the hash value of existing official-cve-feed.json json, if differs then 66 | # upload the new cve feed data to the existing blob. 67 | if [[ "${EXIT_CODE}" -eq 1 ]]; then 68 | gsutil cp "${OUTPUT_FILE}" gs://k8s-cve-feed 69 | calculate_hash "${OUTPUT_FILE}" > cve-feed-hash 70 | echo "$( cve-feed-hash 81 | 82 | if [[ "${hash}" == "${new_hash}" ]]; then 83 | echo "Both the hashes have identical contents" 84 | else 85 | echo "Both the hash value differ" 86 | echo "Uploading the new json feed and hash value to gcs bucket" 87 | gsutil cp "${OUTPUT_FILE}" gs://k8s-cve-feed 88 | gsutil cp cve-feed-hash gs://k8s-cve-feed/cve-feed-hash 89 | fi 90 | fi 91 | 92 | exit "${RETURN_VALUE}" 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Security Special Interest Group 2 | 3 | Covers horizontal security initiatives for the Kubernetes project, including regular security audits, the vulnerability management process, cross-cutting security documentation, and security community management. 4 | 5 | The [charter](https://github.com/kubernetes/community/blob/master/sig-security/charter.md) defines the scope and governance of the Security Special Interest Group. 6 | 7 | ## Meetings 8 | *Joining the [mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-security) for the group will typically add invites for the following meetings to your calendar.* 9 | * Regular SIG Meeting: [Fridays at 8:00 PT (Pacific Time)](https://zoom.us/j/9934z1184192?pwd=L25Tc0ZOL3FqU09KNERlTU12dFhTQT09) (biweekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=8:00&tz=PT%20%28Pacific%20Time%29). 10 | * [Meeting notes and Agenda](https://docs.google.com/document/d/1GgmmNYN88IZ2v2NBiO3gdU8Riomm0upge_XNVxEYXp0/edit?usp=sharing). 11 | * [Meeting recordings](https://www.youtube.com/playlist?list=PL69nYSiGNLP1mXOLAc9ti0oX8s_ookQCi). 12 | 13 | ## Leadership 14 | 15 | ### Chairs 16 | The Chairs of the SIG run operations and processes governing the SIG. 17 | 18 | * Ian Coldwater (**[@IanColdwater](https://github.com/IanColdwater)**), Docker 19 | * Tabitha Sable (**[@tabbysable](https://github.com/tabbysable)**), Datadog 20 | * Cailyn Edwards (**[@cailyn-codes](https://github.com/cailyn-codes)**), Okta 21 | 22 | ## Contact 23 | - Slack: [#sig-security](https://kubernetes.slack.com/messages/sig-security) 24 | - [Mailing list](https://groups.google.com/forum/#!forum/kubernetes-sig-security) 25 | - [Open Community Issues/PRs](https://github.com/kubernetes/community/labels/sig%2Fsecurity) 26 | - [Sig Security Issues](https://github.com/kubernetes/sig-security/issues) 27 | - GitHub Teams: 28 | - [@kubernetes/sig-security-leads](https://github.com/orgs/kubernetes/teams/sig-security-leads) - SIG Security Leads 29 | - [@kubernetes/sig-security-pr-reviews](https://github.com/orgs/kubernetes/teams/sig-security-pr-reviews) - SIG Security PR review notifications 30 | - Steering Committee Liaison: Stephen Augustus (**[@justaugustus](https://github.com/justaugustus)**) 31 | 32 | ## Subprojects 33 | 34 | The following [subprojects][subproject-definition] are owned by sig-security: 35 | ### security-audit 36 | Third Party Security Audit 37 | - **Owners:** 38 | - [kubernetes/sig-security/sig-security-external-audit](https://github.com/kubernetes/sig-security/blob/main/sig-security-external-audit/OWNERS) 39 | ### security-docs 40 | Security Documents and Documentation 41 | - **Owners:** 42 | - [kubernetes/sig-security/sig-security-docs](https://github.com/kubernetes/sig-security/blob/main/sig-security-docs/OWNERS) 43 | - **Contact:** 44 | - Slack: [#sig-security-docs](https://kubernetes.slack.com/messages/sig-security-docs) 45 | ### security-tooling 46 | Development and Enhancements of Security Tooling 47 | - **Owners:** 48 | - [kubernetes/sig-security/sig-security-tooling](https://github.com/kubernetes/sig-security/blob/main/sig-security-tooling/OWNERS) 49 | - **Contact:** 50 | - Slack: [#sig-security-tooling](https://kubernetes.slack.com/messages/sig-security-tooling) 51 | ### sig-security 52 | SIG Security discussions, documents, processes and other artifacts 53 | - **Owners:** 54 | - [kubernetes/sig-security](https://github.com/kubernetes/sig-security/blob/master/OWNERS) 55 | - **Contact:** 56 | - Slack: [#sig-security](https://kubernetes.slack.com/messages/sig-security) 57 | 58 | [subproject-definition]: https://github.com/kubernetes/community/blob/master/governance.md#subprojects 59 | -------------------------------------------------------------------------------- /sig-security-tooling/scanning/build-deps-and-release-images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2022 The Kubernetes Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -euo pipefail 17 | apt update && apt -y install jq 18 | wget -q -O /usr/local/bin/snyk https://static.snyk.io/cli/latest/snyk-linux && chmod +x /usr/local/bin/snyk 19 | mkdir -p "${ARTIFACTS}" 20 | if [ -z "${SNYK_TOKEN}" ]; then 21 | echo "SNYK_TOKEN env var is not set, required for snyk scan" 22 | exit 1 23 | fi 24 | echo "Running snyk scan .." 25 | EXIT_CODE=0 26 | DEBUG_LOG_FILE=$(mktemp) 27 | RESULT_UNFILTERED=$(snyk test -d --json 2> "$DEBUG_LOG_FILE") || EXIT_CODE=$? 28 | if [ $EXIT_CODE -gt 1 ]; then 29 | echo "Failed to run snyk scan with exit code $EXIT_CODE" 30 | cat "$DEBUG_LOG_FILE" 31 | exit 1 32 | fi 33 | rm -f "$DEBUG_LOG_FILE" 34 | 35 | RESULT=$(echo $RESULT_UNFILTERED | jq \ 36 | '{vulnerabilities: .vulnerabilities | map(select((.type != "license") and (.version != "0.0.0"))) | select(length > 0) }') 37 | if [[ ${RESULT} ]]; then 38 | CVE_IDs=$(echo $RESULT | jq '.vulnerabilities[].identifiers.CVE | unique[]' | sort -u) 39 | #convert string to array 40 | CVE_IDs_array=(`echo ${CVE_IDs}`) 41 | #TODO:Implement deduplication of CVE IDs in future 42 | for i in "${CVE_IDs_array[@]}" 43 | do 44 | if [[ "$i" == *"CVE"* ]]; then 45 | #Look for presence of GitHub Issues for detected CVEs. If no issues are present, this CVE needs triage 46 | #Once the job fails, CVE is triaged by SIG Security and a tracking issue is created. 47 | #This will allow in the next run for the job to pass again 48 | TOTAL_COUNT=$(curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/search/issues?q=repo:kubernetes/kubernetes+${i}" | jq .total_count) 49 | if [[ $TOTAL_COUNT -eq 0 ]]; then 50 | echo "Vulnerability filtering failed" 51 | exit 1 52 | fi 53 | fi 54 | done 55 | fi 56 | echo "Build time dependency scan completed" 57 | 58 | # container images scan 59 | echo "Fetch the list of k8s images" 60 | curl -Ls https://sbom.k8s.io/$(curl -Ls https://dl.k8s.io/release/latest.txt)/release | grep "SPDXID: SPDXRef-Package-registry.k8s.io" | grep -v sha256 | cut -d- -f3- | sed 's/-/\//' | sed 's/-v1/:v1/' > images 61 | while read image; do 62 | echo "Running container image scan for $image" 63 | EXIT_CODE=0 64 | DEBUG_LOG_FILE=$(mktemp) 65 | RESULT_UNFILTERED=$(snyk container test $image -d --json 2> "$DEBUG_LOG_FILE") || EXIT_CODE=$? 66 | if [ $EXIT_CODE -gt 1 ]; then 67 | echo "Failed to run snyk scan with exit code $EXIT_CODE" 68 | cat "$DEBUG_LOG_FILE" 69 | exit 1 70 | fi 71 | rm -f "$DEBUG_LOG_FILE" 72 | 73 | RESULT=$(echo $RESULT_UNFILTERED | jq \ 74 | '{vulnerabilities: .vulnerabilities | map(select(.isUpgradable == true or .isPatchable == true)) | select(length > 0) }') 75 | if [[ ${RESULT} ]]; then 76 | echo "Vulnerability filtering failed" 77 | # exit 1 (To allow other images to be scanned even if one fails) 78 | else 79 | echo "Scan completed image $image" 80 | fi 81 | done < images -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/template.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: 4 | - Owner(s): 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classification: 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | ## Are there any subcomponents or shared boundaries? 19 | 20 | ## What communications protocols does it use? 21 | 22 | ## Where does it store data? 23 | 24 | ## What is the most sensitive data it stores? 25 | 26 | ## How is that data stored? 27 | 28 | # Data Dictionary 29 | 30 | | Name | Classification/Sensitivity | Comments | 31 | | :--: | :--: | :--: | 32 | | Data | Goes | Here | 33 | 34 | # Control Families 35 | 36 | These are the areas of controls that we're interested in based on what the audit working group selected. 37 | 38 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 39 | 40 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 41 | 42 | For example, an system may have authorization requirements that say: 43 | 44 | - users must be registered with a central authority 45 | - all requests must be verified to be owned by the requesting user 46 | - each account must have attributes associated with it to uniquely identify the user 47 | 48 | and so on. 49 | 50 | For this assessment, we're looking at six basic control families: 51 | 52 | - Networking 53 | - Cryptography 54 | - Secrets Management 55 | - Authentication 56 | - Authorization (Access Control) 57 | - Multi-tenancy Isolation 58 | 59 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 60 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 61 | isn't a weakness, it's simply "not applicable." 62 | 63 | For each control family we want to ask: 64 | 65 | - What does the component do for this control? 66 | - What sorts of data passes through that control? 67 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 68 | - What can attacker do with access to this component? 69 | - What's the simplest attack against it? 70 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 71 | - What happens if the component stops working (via DoS or other means)? 72 | - Have there been similar vulnerabilities in the past? What were the mitigations? 73 | 74 | # Threat Scenarios 75 | 76 | - An External Attacker without access to the client application 77 | - An External Attacker with valid access to the client application 78 | - An Internal Attacker with access to cluster 79 | - A Malicious Internal User 80 | 81 | ## Networking 82 | 83 | ## Cryptography 84 | 85 | ## Secrets Management 86 | 87 | ## Authentication 88 | 89 | ## Authorization 90 | 91 | ## Multi-tenancy Isolation 92 | 93 | ## Summary 94 | 95 | # Recommendations 96 | -------------------------------------------------------------------------------- /sig-security-external-audit/external-audit-roadmap.md: -------------------------------------------------------------------------------- 1 | Past external security audits have not been comprehensive of the entire Kubernetes project. 2 | This roadmap lists previously audited focus areas and focus areas requested to be included in future audits. 3 | The Kubernetes community is invited to create issues and PRs to request additional components to be audited. 4 | 5 | 6 | | **Kubernetes Focus Area** | **Audit Year**| **Links** | 7 | |---------------------------|---------------|-----------| 8 | | Networking | 2019 | | 9 | | Cryptography | 2019 | | 10 | | Authentication & Authorization (including Role Based Access Controls) | 2019 | | 11 | | Secrets Management | 2019 | | 12 | | Multi-tenancy isolation: Specifically soft (non-hostile co-tenants) | 2019 | | 13 | | kube-apiserver | 2023 | | 14 | | kube-scheduler | 2023 | | 15 | | etcd (in the context of Kubernetes use of etcd) | 2023 | | 16 | | kube-controller-manager | 2023 | | 17 | | cloud-controller-manager | 2023 | | 18 | | kubelet | 2023 | https://github.com/kubernetes/kubelet https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/kubelet | 19 | | kube-proxy | 2023 | https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/kube-proxy https://github.com/kubernetes/kube-proxy | 20 | | secrets-store-csi-driver | 2023 | https://github.com/kubernetes-sigs/secrets-store-csi-driver | 21 | | cluster API | TBD | https://github.com/kubernetes-sigs/cluster-api | 22 | | kubectl | TBD | https://github.com/kubernetes/kubectl | 23 | | kubeadm | TBD | https://github.com/kubernetes/kubeadm | 24 | | metrics server | TBD | https://github.com/kubernetes-sigs/metrics-server 25 | | nginx-ingress (in the context of a Kubernetes ingress controller) | TBD | https://github.com/kubernetes/ingress-nginx 26 | | kube-state-metrics | TBD | https://github.com/kubernetes/kube-state-metrics 27 | | node feature discovery | TBD | https://github.com/kubernetes-sigs/node-feature-discovery 28 | | hierarchial namespace | TBD | https://github.com/kubernetes-sigs/multi-tenancy/tree/master/incubator/hnc 29 | | pod security policy replacement | TBD | https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/2579-psp-replacement 30 | | CoreDNS (in the context of Kubernetes use of CoreDNS) | TBD | Concept: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ Reference: https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/ | 31 | | cluster autoscaler | TBD | https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler | 32 | | kube rbac proxy | TBD | https://github.com/brancz/kube-rbac-proxy | 33 | | kms plugins | TBD | https://kubernetes.io/docs/tasks/administer-cluster/kms-provider/#implementing-a-kms-plugin | 34 | | cni plugins | TBD | https://github.com/containernetworking/cni | 35 | | csi plugins | TBD | https://github.com/kubernetes-csi | 36 | | aggregator layer | TBD | https://github.com/kubernetes/kube-aggregator | 37 | | windows | TBD | https://github.com/kubernetes/enhancements/tree/master/keps/sig-auth/2579-psp-replacement https://github.com/kubernetes/kubelet https://kubernetes.io/docs/concepts/workloads/pods/#pod-os https://github.com/kubernetes-csi/csi-proxy https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#windowssecuritycontextoptions-v1-core https://github.com/kubernetes/kubernetes/tree/master/pkg/proxy/winkernel | 38 | | konnectivity | TBD | https://github.com/kubernetes-sigs/apiserver-network-proxy/tree/master/konnectivity-client | 39 | | shared cloud provider library | TBD | https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cloud-provider/ | 40 | | credential provider plugin | TBD | https://github.com/kubernetes/kubernetes/tree/master/pkg/credentialprovider/plugin | 41 | | image builder | TBD | https://github.com/kubernetes-sigs/image-builder | -------------------------------------------------------------------------------- /sig-security-tooling/learning-sessions.md: -------------------------------------------------------------------------------- 1 | ## Learning Sessions 2 | 3 | - For the most up to date information, please visit the [Kubernetes Sig Security Tooling Sub-project Meeting Notes](https://docs.google.com/document/d/1_Rn7S8UPMtXdu6EDl4_L2Ogb433z7hlxPs7El8TfNNg/edit?usp=sharing). 4 | - Also, please check out the slack channel at [#sig-security-tooling](https://kubernetes.slack.com/archives/C01CUSVMHPY). 5 | 6 | Here is the list of learning sessions hosted by sig-security tooling community: 7 | | **Date** | **Topic** | **Speaker(s)** | **Link** | 8 | | ---------- | :--------------------------------------------------------------------: | ------------------------------------------------------------------------------------------------------- | :------------------------------------------: | 9 | | 09-12-2025 | [Kubesonde](https://github.com/kubesonde/kubesonde) | [Jacopo Bufalino](https://github.com/jackap) | https://www.youtube.com/watch?v=zyOD7oOLX5M | 10 | | 07-19-2023 | [Tetragon](https://github.com/cilium/tetragon) | [Mahé Tardy](https://github.com/mtardy) | https://www.youtube.com/watch?v=4ifEI1n4lY4 | 11 | | 03-29-2023 | [Copacetic](https://github.com/project-copacetic/copacetic) | [Xander Grzywinski](https://github.com/salaxander) | https://www.youtube.com/watch?v=6Be41Nf52ts | 12 | | 03-01-2023 | [Security-Guard](https://github.com/knative-sandbox/security-guard) | [David Hadas](https://github.com/davidhadas) | https://youtu.be/FNIdRBGwzOo | 13 | | 08-16-2022 | [KubeAudit](https://github.com/shopify/kubeaudit) | [Genevieve Luyt](https://github.com/genevieveluyt) & [Dani Santos](https://github.com/dani-santos-code) | https://youtu.be/m18AIFmfM00 | 14 | | 07-05-2022 | [Eraser](https://github.com/Azure/eraser) | [Xander Grzywinski](https://twitter.com/XanderGrzy) | https://youtu.be/c1yhWxxEkJI | 15 | | 04-19-2022 | [Stratus Red Team](https://github.com/DataDog/stratus-red-team) | [Christophe Tafani-Dereeper](https://twitter.com/christophetd) | https://youtu.be/qb59dvq4KYE | 16 | | 03-15-2022 | [SIG Security](https://github.com/kubernetes/sig-security) | [Pushkar Joglekar](https://twitter.com/PuDiJoglekar) | https://youtu.be/jqfDgaGqJX0 | 17 | | 01-18-2022 | [kdigger](https://github.com/quarkslab/kdigger) | [Mahé Tardy](https://twitter.com/mtardy_) | https://www.youtube.com/watch?v=o-E6aoKmznY | 18 | | 11-16-2021 | [Kube Armor](https://github.com/kubearmor/KubeArmor) | [Rahul Jadhav](https://twitter.com/nyrahul) | https://www.youtube.com/watch?v=MWAb63gf3gs | 19 | | 09-21-2021 | [SBoM for K8s](https://github.com/kubernetes-sigs/bom) | [Adolfo García Veytia](https://twitter.com/puerco) | https://www.youtube.com/watch?v=zB1-7NLsfps | 20 | | 08-17-2021 | [go-vulncheck](https://pkg.go.dev/golang.org/x/exp/vulndb/govulncheck) | [Zvonimir Pavlinovic](https://wp.nyu.edu/zvonimir/) | https://www.youtube.com/watch?v=YUhiWK15yEc | 21 | | 07-20-2021 | [Images in k/k discussion](https://github.com/kubernetes/release) | [Stephen Augustus](https://twitter.com/stephenaugustus) | https://www.youtube.com/watch?v=oibVXk9AwO4 | 22 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/tm.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python3 2 | 3 | from pytm.pytm import TM, Server, Datastore, Dataflow, Boundary, Actor, Lambda, Process 4 | 5 | tm = TM("Kubernetes Threat Model") 6 | tm.description = "a deep-dive threat model of Kubernetes" 7 | 8 | # Boundaries 9 | 10 | inet = Boundary("Internet") 11 | mcdata = Boundary("Master Control Data") 12 | apisrv = Boundary("API Server") 13 | mcomps = Boundary("Master Control Components") 14 | worker = Boundary("Worker") 15 | contain = Boundary("Container") 16 | 17 | # Actors 18 | 19 | miu = Actor("Malicious Internal User") 20 | ia = Actor("Internal Attacker") 21 | ea = Actor("External Actor") 22 | admin = Actor("Administrator") 23 | dev = Actor("Developer") 24 | eu = Actor("End User") 25 | 26 | # Server & OS Components 27 | 28 | etcd = Datastore("N-ary etcd servers") 29 | apiserver = Server("kube-apiserver") 30 | kubelet = Server("kubelet") 31 | kubeproxy = Server("kube-proxy") 32 | scheduler = Server("kube-scheduler") 33 | controllers = Server("CCM/KCM") 34 | pods = Server("Pods") 35 | iptables = Process("iptables") 36 | 37 | # Component <> Boundary Relations 38 | etcd.inBoundary = mcdata 39 | mcdata.inBoundary = apisrv 40 | apiserver.inBoundary = apisrv 41 | kubelet.inBoundary = worker 42 | kubeproxy.inBoundary = worker 43 | pods.inBoundary = contain 44 | scheduler.inBoundary = mcomps 45 | controllers.inBoundary = mcomps 46 | pods.inBoundary = contain 47 | iptables.inBoundary = worker 48 | miu.inBoundary = apisrv 49 | ia.inBoundary = contain 50 | ea.inBoundary = inet 51 | admin.inBoundary = apisrv 52 | dev.inBoundary = inet 53 | eu.inBoundary = inet 54 | 55 | # Dataflows 56 | 57 | apiserver2etcd = Dataflow(apiserver, etcd, "All kube-apiserver data") 58 | apiserver2etcd.isEncrypted = True 59 | apiserver2etcd.protocol = "HTTPS" 60 | 61 | apiserver2kubelet = Dataflow(apiserver, kubelet, "kubelet Health, Status, &c.") 62 | apiserver2kubelet.isEncrypted = False 63 | apiserver2kubelet.protocol = "HTTP" 64 | 65 | apiserver2kubeproxy = Dataflow(apiserver, kubeproxy, "kube-proxy Health, Status, &c.") 66 | apiserver2kubeproxy.isEncrypted = False 67 | apiserver2kubeproxy.protocol = "HTTP" 68 | 69 | apiserver2scheduler = Dataflow(apiserver, scheduler, "kube-scheduler Health, Status, &c.") 70 | apiserver2scheduler.isEncrypted = False 71 | apiserver2scheduler.protocol = "HTTP" 72 | 73 | apiserver2controllers = Dataflow(apiserver, controllers, "{kube, cloud}-controller-manager Health, Status, &c.") 74 | apiserver2controllers.isEncrypted = False 75 | apiserver2controllers.protocol = "HTTP" 76 | 77 | kubelet2apiserver = Dataflow(kubelet, apiserver, "HTTP watch for resources on kube-apiserver") 78 | kubelet2apiserver.isEncrypted = True 79 | kubelet2apiserver.protocol = "HTTPS" 80 | 81 | kubeproxy2apiserver = Dataflow(kubeproxy, apiserver, "HTTP watch for resources on kube-apiserver") 82 | kubeproxy2apiserver.isEncrypted = True 83 | kubeproxy2apiserver.protocol = "HTTPS" 84 | 85 | controllers2apiserver = Dataflow(controllers, apiserver, "HTTP watch for resources on kube-apiserver") 86 | controllers2apiserver.isEncrypted = True 87 | controllers2apiserver.protocol = "HTTPS" 88 | 89 | scheduler2apiserver = Dataflow(scheduler, apiserver, "HTTP watch for resources on kube-apiserver") 90 | scheduler2apiserver.isEncrypted = True 91 | scheduler2apiserver.protocol = "HTTPS" 92 | 93 | kubelet2iptables = Dataflow(kubelet, iptables, "kubenet update of iptables (... ipvs, &c) to setup Host-level ports") 94 | kubelet2iptables.protocol = "IPC" 95 | 96 | kubeproxy2iptables = Dataflow(kubeproxy, iptables, "kube-prxy update of iptables (... ipvs, &c) to setup all pod networking") 97 | kubeproxy2iptables.protocol = "IPC" 98 | 99 | kubelet2pods = Dataflow(kubelet, pods, "kubelet to pod/CRI runtime, to spin up pods within a host") 100 | kubelet2pods.protocol = "IPC" 101 | 102 | eu2pods = Dataflow(eu, pods, "End-user access of Kubernetes-hosted applications") 103 | ea2pods = Dataflow(ea, pods, "External Attacker attempting to compromise a trust boundary") 104 | ia2cnts = Dataflow(ia, pods, "Internal Attacker with access to a compromised or malicious pod") 105 | 106 | tm.process() 107 | -------------------------------------------------------------------------------- /sig-security-assessments/Documentation/recipe-book.md: -------------------------------------------------------------------------------- 1 | # Recipe Book 👩‍🍳 2 | 🎉Yay! You have decided to do a Self-assessment and make your corner of Kubernetes more secure! We are so happy 3 | to have you here. Below is an outline of the journey to get to a successful Self-assessment. 4 | 5 | ## Preparation 6 | Set yourself up for success – build your team and get organized 7 | 1. Open a Security Assessment Request using the [issue template](https://github.com/kubernetes/sig-security/issues/new/choose). 8 | 1. Figure out who can do this Self-assessment with you. You will need a security champion and a counterpart who owns the 9 | project and is an expert in it. If you are requesting a Security Self-assessment you can reach out to the team in 10 | [#sig-security](https://kubernetes.slack.com/messages/sig-security), and [#sig-security-assessments](https://kubernetes.slack.com/archives/C0441E11REC) to see if there is someone interested in helping to 11 | champion the assessment. 12 | 1. Make sure that you and your counterpart each have a back-up person assigned who attends all the meetings and is 13 | involved in the assessment. Two heads are better than one, and redundancy means that if someone can’t make a meeting 14 | for whatever reason, that you can still make progress. 15 | 1. To get further participation, advertise in SIG meetings and channels for the project, as well as SIG Security. Aim for a minimum of 4 16 | people (a project expert, a security champion, and a backup for each), and a maximum of 8. This ensures that there is 17 | not overwhelming burden on any one individual but allows for a smooth decision-making process. 18 | 1. Once you have a dedicated group of individuals ready to start the Security Assessment you can open an issue to request 19 | a Slack channel for the project specific assessment - here's an [example](https://github.com/kubernetes/community/pull/7015). 20 | 1. Create a Google doc for meeting notes using the 21 | [Kubernetes community meeeting notes template](https://github.com/kubernetes/community/blob/master/events/community-meeting.md) 22 | ## Meetings 23 | - Once you have your collateral and your team together, set up a kickoff meeting with everyone. It is likely that not 24 | everyone will show up. That’s ok! 25 | - Keep meeting notes using the doc you created during the preparation phase. 26 | - Create a recurring meeting series of about 6 meetings (this is flexible and can be extended if necessary). 27 | - These sessions can be ad hoc and should be set for whatever timing works for the group. 28 | ## The Assessment 29 | ### Step One 30 | Draw a [data flow diagram](https://www.lucidchart.com/pages/data-flow-diagram) for the main workflows in the project. 31 | This step will show you the areas you can potentially assess, and allow the security champion to quickly gain context 32 | around how the project is structured - and where the biggest security concerns may be. 33 | [Excalidraw](https://excalidraw.com/) is a great tool for this! 34 | ### Step Two 35 | Decide what to assess - defining the scope of the assessment is essential to making it successful. This decision will 36 | keep you focused and ensure you make forward progress. Helpful questions when thinking about what to assess: 37 | - What workflows are used most? 38 | - What workflows does the community want a security assessment of? 39 | - What expertise does the team doing the assessment have? Does it match any of the potential flows? 40 | Try to get matching expertise for the scope! 41 | ### Step Three 42 | Now that you have scope defined, we can come up with threats that are reasonable and logical for the projects. 43 | - Use the [TAG Self-assessment template](https://github.com/cncf/tag-security/blob/main/assessments/guide/self-assessment.md) 44 | to write up the report. You can use Google Docs or HackMD – whatever the project team is most comfortable with. 45 | - Once the team is happy with the above doc, convert it into a markdown PR and assign reviewers. 46 | - Now is a good time to start writing a [blog post](https://kubernetes.io/docs/contribute/new-content/blogs-case-studies/) to 47 | share once the review is done. 48 | ### Step Four 49 | Once the reviews are complete, the Self-assessment can be merged! 50 | ### Step Five 51 | CLELEBRATE! Tell people about having completed the Self-assessment and make sure to complete and publish your blog! 52 | 53 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/hack/fetch-official-cve-feed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright 2022 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import copy 18 | import json 19 | import requests 20 | import sys 21 | from datetime import datetime, timezone 22 | from cve_title_parser import parse_cve_title 23 | 24 | def getCVEStatus(state, state_reason): 25 | if state == "open": 26 | if state_reason == "reopened": 27 | return "unknown" 28 | return "open" 29 | 30 | if state == "closed": 31 | if state_reason == "not_planned": 32 | return "unfixed" 33 | if state_reason == "completed": 34 | return "fixed" 35 | 36 | url = 'https://api.github.com/search/issues?q=is:issue+label:official-cve-feed+\ 37 | repo:kubernetes/kubernetes&per_page=100' 38 | 39 | headers = {'Accept': 'application/vnd.github.v3+json'} 40 | res = requests.get(url, headers=headers) 41 | gh_items = res.json()['items'] 42 | # Use link header to iterate over pages 43 | # https://docs.github.com/en/rest/overview/resources-in-the-rest-api#pagination 44 | # https://datatracker.ietf.org/doc/html/rfc5988 45 | # Please note that if there is a great number of pages, this unauthenticated 46 | # request may be subject to rate limits and fail. 47 | # https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting 48 | while 'next' in res.links: 49 | res = requests.get(res.links['next']['url'], headers=headers) 50 | gh_items.extend(res.json()['items']) 51 | 52 | feed_envelope = { 53 | 'version': 'https://jsonfeed.org/version/1.1', 54 | 'title': 'Kubernetes Vulnerability Announcements - CVE Feed', 55 | 'home_page_url': 'https://kubernetes.io', 56 | 'feed_url': 'https://kubernetes.io/docs/reference/issues-security/official-cve-feed/index.json', 57 | 'description': 'Auto-refreshing official CVE feed for Kubernetes repository', 58 | 'authors': [ 59 | { 60 | 'name': 'Kubernetes Community', 61 | 'url': 'https://www.kubernetes.dev' 62 | } 63 | ], 64 | '_kubernetes_io': None, 65 | 'items': None, 66 | } 67 | # format the timestamp the same way as GitHub RFC 3339 timestamps, with only seconds and not milli and microseconds. 68 | root_kubernetes_io = {'feed_refresh_job': 'https://testgrid.k8s.io/sig-security-cve-feed#auto-refreshing-official-cve-feed', 69 | 'updated_at': datetime.now(timezone.utc).replace(tzinfo=None).isoformat(sep='T', timespec='seconds') + 'Z'} 70 | feed_envelope['_kubernetes_io'] = root_kubernetes_io 71 | 72 | cve_list = [] 73 | non_parsable_cve_list = [] 74 | for item in gh_items: 75 | # These keys respects the item jsonfeed spec https://www.jsonfeed.org/version/1.1/ 76 | cve = {'content_text': None, 'date_published': None, 'external_url': None, 77 | 'id': None,'summary': None, 'url': None, '_kubernetes_io': None} 78 | # This is a custom extension 79 | item_kubernetes_io = {'google_group_url': None, 'issue_number': None} 80 | cve['_kubernetes_io'] = item_kubernetes_io 81 | 82 | cve['url'] = item['html_url'] 83 | cve['_kubernetes_io']['issue_number'] = item['number'] 84 | cve['content_text'] = item['body'] 85 | cve['date_published'] = item['created_at'] 86 | cve['status'] = getCVEStatus(item['state'], item['state_reason']) 87 | 88 | try: 89 | cve_ids, description = parse_cve_title(item['title']) 90 | cve['summary'] = description 91 | 92 | first_cve_id = cve_ids[0] 93 | cve['id'] = first_cve_id 94 | cve['external_url'] = f'https://www.cve.org/cverecord?id={first_cve_id}' 95 | cve['_kubernetes_io']['google_group_url'] = f'https://groups.google.com/g/kubernetes-announce/search?q={first_cve_id}' 96 | 97 | # Add additional entries for any remaining CVE IDs 98 | for additional_cve_id in cve_ids[1:]: 99 | additional_cve = copy.deepcopy(cve) 100 | additional_cve['id'] = additional_cve_id 101 | additional_cve['external_url'] = f'https://www.cve.org/cverecord?id={additional_cve_id}' 102 | additional_cve['_kubernetes_io']['google_group_url'] = f'https://groups.google.com/g/kubernetes-announce/search?q={additional_cve_id}' 103 | cve_list.append(additional_cve) 104 | 105 | cve_list.append(cve) 106 | except LookupError: 107 | non_parsable_cve_list.append((item['title'], item['html_url'])) 108 | 109 | feed_envelope['items'] = cve_list 110 | json_feed = json.dumps(feed_envelope, sort_keys=False, indent=4) 111 | print(json_feed) 112 | 113 | if len(non_parsable_cve_list) != 0: 114 | print("Failed to parse below CVE issues:", file=sys.stderr) 115 | for title, url in non_parsable_cve_list: 116 | print(f"{title}\n{url}", file=sys.stderr) 117 | exit(7) 118 | -------------------------------------------------------------------------------- /sig-security-external-audit/README.md: -------------------------------------------------------------------------------- 1 | # SIG Security External Audit Subproject 2 | 3 | ## Overview 4 | 5 | The SIG Security External Audit subproject (subproject, henceforth) is responsible for coordinating regular, 6 | comprehensive, third-party security audits. 7 | The subproject publishes the deliverables of the audit after abiding to the 8 | [Security Release Process](https://github.com/kubernetes/committee-security-response/blob/main/security-release-process.md) and 9 | [embargo policy](https://github.com/kubernetes/committee-security-response/blob/main/private-distributors-list.md#embargo-policy). 10 | 11 | - [Request for Proposal (RFP)](#rfp) 12 | - [Security Audit Scope](#security-audit-scope) 13 | - [Vendor and Community Questions](#vendor-and-community-questions) 14 | - [Review of Proposals](#review-of-proposals) 15 | - [Vendor Selection](#vendor-selection) 16 | - [Deliverables](#deliverables) 17 | 18 | ## RFP 19 | 20 | The subproject produces a RFP for a third-party, comprehensive security audit. The subproject publishes the RFP in the 21 | `sig-security` folder in the `kubernetes/community` repository. The subproject defines the scope, schedule, 22 | methodology, selection criteria, and deliverables in the RFP. 23 | 24 | Previous RFPs: 25 | - [2019](https://github.com/kubernetes/sig-security/blob/main/sig-security-external-audit/security-audit-2019/RFP.md) 26 | - [2021](https://github.com/kubernetes/sig-security/blob/main/sig-security-external-audit/security-audit-2021-2022/RFP.md) 27 | 28 | As efforts begin for the year's security audit, create a tracking issue for the security audit in 29 | `kubernetes/community` with the `/sig security` label. 30 | 31 | ### Security Audit Scope 32 | 33 | The scope of an audit is the most recent release at commencement of audit of the core 34 | [Kubernetes project](https://github.com/kubernetes/kubernetes) and certain other code maintained by 35 | [Kubernetes SIGs](https://github.com/kubernetes-sigs/). 36 | 37 | Core Kubernetes components remain as focus areas of regular audits. Additional focus areas are finalized by the 38 | subproject. 39 | 40 | ### Vendor and Community Questions 41 | 42 | Potential vendors and the community can submit questions regarding the RFP through a Google form. The Google form is 43 | linked in the RFP. 44 | [Example from the 2021 audit](https://docs.google.com/forms/d/e/1FAIpQLScjApMDAJ5o5pIBFKpJ3mUhdY9w5s9VYd_TffcMSvYH_O7-og/viewform). 45 | 46 | The subproject answers questions publicly on the RFP with pull requests to update the RFP. 47 | [Example from the 2021 audit](https://github.com/kubernetes/community/pull/5813). 48 | 49 | The question period is typically open between the RFP's opening date and closing date. 50 | 51 | ## Review of Proposals 52 | 53 | Proposals are reviewed by the subproject proposal reviewers after the RFP closing date. An understanding of security audits is required to be a proposal reviewer. 54 | 55 | All proposal reviewers must agree to abide by the 56 | **[Security Release Process](https://github.com/kubernetes/committee-security-response/blob/main/security-release-process.md)**, 57 | **[embargo policy](https://github.com/kubernetes/committee-security-response/blob/main/private-distributors-list.md#embargo-policy)**, 58 | and have no [conflict of interest](#conflict-of-interest) the tracking issue. 59 | This is done by placing a comment on the issue associated with the security audit. 60 | e.g. `I agree to abide by the guidelines set forth in the Security Release Process, specifically the embargo on CVE 61 | communications and have no conflict of interest` 62 | 63 | Proposal reviewers are members of a private Google group and private Slack channel to exchange sensitive, confidential information and to share artifacts. 64 | 65 | ### Conflict of Interest 66 | 67 | There is a possibility of a conflict of interest between a proposal reviewer and a vendor. Proposal reviewers should not have a conflict of interest. Examples of conflict of interest: 68 | - Proposal reviewer is employed by a vendor who submitted a proposal 69 | - Proposal reviewer has financial interest directly tied to the audit 70 | 71 | Should a conflict arise during the proposal review, reviewers should notify the subproject owner and SIG Security chairs when they become aware of the conflict. 72 | 73 | > The _Conflict of Interest_ section is inspired by the 74 | [CNCF Security TAG security reviewer process](https://github.com/cncf/tag-security/blob/main/assessments/guide/security-reviewer.md#conflict-of-interest). 75 | 76 | ## Vendor Selection 77 | 78 | On the vendor selection date, the subproject will publish a the selected vendor in the 'sig-security' folder in the `kubernetes/community` repository. 79 | [Example from the 2019 audit](https://github.com/kubernetes/sig-security/blob/main/sig-security-external-audit/security-audit-2019/RFP_Decision.md). 80 | 81 | ## Deliverables 82 | 83 | The deliverables of the audit are defined in the RFP e.g. findings report, threat model, white paper, audited reference architecture spec (with yaml manifests) and published in the 'sig-security' folder in the `kubernetes/community` repository. 84 | [Example from the 2019 audit](https://github.com/kubernetes/sig-security/tree/main/sig-security-external-audit/security-audit-2019/findings). 85 | 86 | **All information gathered and deliverables created as a part of the audit must not be shared outside the vendor or the subproject without the explicit consent of the subproject and SIG Security chairs.** 87 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/container-runtime.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: Container Runtime 4 | - Owner(s): [sig-node](https://github.com/kubernetes/community/blob/master/sig-node/README.md) 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classification: High 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | - Container Runtimes expose an IPC endpoint such as a file system socket 19 | - kubelet retrieves pods to be executed from the kube-apiserver 20 | - The Container Runtime Interface then executes the necessary commands/requests from the actual container system (e.g. docker) to run the pod 21 | 22 | ## Are there any subcomponents or shared boundaries? 23 | 24 | Yes 25 | 26 | - The Container Runtime technically interfaces with kublet, and runs on the same host 27 | - However, the Container Runtime is logically a separate Trust Zone within the node 28 | 29 | ## What communications protocols does it use? 30 | 31 | Various, depends on the IPC mechanism required by the Container Runtime 32 | 33 | ## Where does it store data? 34 | 35 | Most data should be provided by kubelet or the CRI in running the container 36 | 37 | ## What is the most sensitive data it stores? 38 | 39 | N/A 40 | 41 | ## How is that data stored? 42 | 43 | N/A 44 | 45 | # Meeting Notes 46 | 47 | 48 | # Data Dictionary 49 | 50 | | Name | Classification/Sensitivity | Comments | 51 | | :--: | :--: | :--: | 52 | | Data | Goes | Here | 53 | 54 | # Control Families 55 | 56 | These are the areas of controls that we're interested in based on what the audit working group selected. 57 | 58 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 59 | 60 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 61 | 62 | For example, an system may have authorization requirements that say: 63 | 64 | - users must be registered with a central authority 65 | - all requests must be verified to be owned by the requesting user 66 | - each account must have attributes associated with it to uniquely identify the user 67 | 68 | and so on. 69 | 70 | For this assessment, we're looking at six basic control families: 71 | 72 | - Networking 73 | - Cryptography 74 | - Secrets Management 75 | - Authentication 76 | - Authorization (Access Control) 77 | - Multi-tenancy Isolation 78 | 79 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 80 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 81 | isn't a weakness, it's simply "not applicable." 82 | 83 | For each control family we want to ask: 84 | 85 | - What does the component do for this control? 86 | - What sorts of data passes through that control? 87 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 88 | - What can attacker do with access to this component? 89 | - What's the simplest attack against it? 90 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 91 | - What happens if the component stops working (via DoS or other means)? 92 | - Have there been similar vulnerabilities in the past? What were the mitigations? 93 | 94 | # Threat Scenarios 95 | 96 | - An External Attacker without access to the client application 97 | - An External Attacker with valid access to the client application 98 | - An Internal Attacker with access to cluster 99 | - A Malicious Internal User 100 | 101 | ## Networking 102 | 103 | - CRI Runs an HTTP server 104 | - port forwarding, exec, attach 105 | - !FINDING TLS bye default, but not mutual TLS, and self-signed 106 | - kubelet -> exec request to CRI over gRPC 107 | - Returns URL with single use Token 108 | - gRPC is Unix Domain by default 109 | - Kubelet proxies or responds w/ redirect to API server (locally hosted CRI only) 110 | - !FINDING(same HTTP finding for pull as kubectl) CRI actually pulls images, no egress filtering 111 | - image tag is SHA256, CRI checks that 112 | - Not sure how CNI, it might be exec 113 | - only responds to connections 114 | - CRI uses Standard Go HTTP 115 | 116 | ## Cryptography 117 | 118 | - Nothing beyond TLS 119 | 120 | ## Secrets Management 121 | 122 | - !FINDING auth'd container repos, passed in via podspec, fetched by kubelet, are passed via CLI 123 | - so anyone with access to the host running the container can see those secrets 124 | 125 | ## Authentication 126 | 127 | - Unix Domain Socket for gRPC, so Linux authN/authZ 128 | - !FINDING 8 character random single use token with 1 minute lifetype (response to line 109) 129 | 130 | ## Authorization 131 | 132 | - no authZ 133 | 134 | ## Multi-tenancy Isolation 135 | 136 | - knows nothing about tenants or namespaces 137 | - low-level component, kubelet/api-server is the arbiter 138 | 139 | ## Summary 140 | 141 | # Recommendations 142 | -------------------------------------------------------------------------------- /sig-security-tooling/vulnerability-mgmt/container-images.md: -------------------------------------------------------------------------------- 1 | # Periodic scanning for vulnerabilities in container images 2 | 3 | Report vulnerabilities in container images 4 | of [Kubernetes](https://github.com/kubernetes/kubernetes) repository 5 | 6 | Tracker: [Issue #4](https://github.com/kubernetes/sig-security/issues/4) 7 | 8 | ## Background and Prior work 9 | 10 | The process described here is tooling agnostic i.e. the process can be 11 | implemented using any scanner with minimal or no changes. This is also _not_ an 12 | endorsement of any specific tool or scanner. In order to get a working solution 13 | in place, [snyk](https://snyk.io/) was chosen for following reasons: 14 | 15 | 1. Existing partnership between CNCF and Snyk helped procure an account that 16 | allowed us to scan `kubernetes/kubernetes` 17 | repo: https://github.com/kubernetes/steering/issues/206 18 | 2. Snyk has detected vulnerabilities in transient dependencies of 19 | `kubernetes/kubernetes`: https://kubernetes.slack.com/archives/CHGFYJVAN/p1595258034095300 20 | 3. Snyk has a programmable interface which made it easier to filter out 21 | unfixable or known false positive vulnerabilities 22 | 23 | ## Implementation with Snyk 24 | 25 | There are two ways to scan the Kubernetes release for vulnerabilities in its 26 | container images 27 | 28 | ### Running the scan locally 29 | 30 | #### Step 0: Install Snyk CLI 31 | 32 | Follow these instructions to snyk cli installed on your 33 | machine: https://support.snyk.io/hc/en-us/articles/360003812538-Install-the-Snyk-CL 34 | 35 | #### Step 1: Authenticate 36 | 37 | ##### Option A : 38 | 39 | Running command `snyk auth` takes you to snyk.io website, do signup/login/auth 40 | 41 | ``` 42 | snyk auth 43 | ``` 44 | 45 | ##### Option B: 46 | 47 | Get the API token from https://app.snyk.io/account and use it 48 | 49 | ``` 50 | snyk auth XXX-XXX-XXX-XXX-XXX 51 | Your account has been authenticated. Snyk is now ready to be used. 52 | ``` 53 | 54 | #### Step 2: Collect list of container images 55 | 56 | ```sh 57 | curl -Ls https://sbom.k8s.io/$(curl -Ls https://dl.k8s.io/release/latest.txt)/release | grep 'PackageName: k8s.gcr.io/' | awk '{print $2}' > images.txt 58 | ``` 59 | 60 | #### Step 3: Run test 61 | 62 | ```sh 63 | while read image; do snyk container test $image -d --json; done < images.txt 64 | ``` 65 | 66 | ### Running the scan as part of k/k testgrid 67 | 68 | Prow job that runs every 6 hours is located 69 | here: https://testgrid.k8s.io/sig-security-snyk-scan#ci-kubernetes-snyk-master 70 | 71 | #### Improvements to the raw scan results 72 | 73 | Raw scan results were useful, but needed some Kubernetes specific work 74 | 75 | ##### JSON output 76 | 77 | To store the json output in a file and let stdout use command line friendly 78 | output: 79 | 80 | ``` 81 | snyk container test --json-file-output=all-cves.json 82 | ``` 83 | 84 | ##### Fixable Vulnerabilities 85 | 86 | To output vulnerabilities able to be triaged, only upgradeable 87 | or patchable results can be parsed from the output using this query: 88 | 89 | ``` 90 | cat all-cves.json | jq '{ vulnerabilities: .vulnerabilities | map(select(.isUpgradable == true or .isPatchable == true)) | select(length > 0) }' > fixable_cves.json 91 | ``` 92 | 93 | ### Example of filtered JSON scan result 94 | 95 | __Note__: Results of the filtered scan are not printed as part of the CI job. 96 | However, the following historical scan result is mentioned here for 97 | reference purposes only: 98 | 99 | 100 |
Click to view result 101 | 102 | 103 | ``` 104 | { 105 | "title": "Use After Free", 106 | "credit": [ 107 | "" 108 | ], 109 | "packageName": "glibc", 110 | "language": "linux", 111 | "packageManager": "debian:11", 112 | "description": "## NVD Description\n **Note:** \n Versions mentioned in the description apply to the upstream `glibc` package. \n\nThe mq_notify function in the GNU C Library (aka glibc) versions 2.32 and 2.33 has a use-after-free. It may use the notification thread attributes object (passed through its struct sigevent parameter) after it has been freed by the caller, leading to a denial of service (application crash) or possibly unspecified other impact.\n## Remediation\nThere is no fixed version for `Debian:11` `glibc`.\n## References\n- [ADVISORY](https://security-tracker.debian.org/tracker/CVE-2021-33574)\n- [CONFIRM](https://security.netapp.com/advisory/ntap-20210629-0005/)\n- [FEDORA](https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/RBUUWUGXVILQXVWEOU7N42ICHPJNAEUP/)\n- [GENTOO](https://security.gentoo.org/glsa/202107-07)\n- [MISC](https://sourceware.org/bugzilla/show_bug.cgi?id=27896)\n- [MISC](https://sourceware.org/bugzilla/show_bug.cgi?id=27896#c1)\n", 113 | "identifiers": { 114 | "ALTERNATIVE": [], 115 | "CVE": [ 116 | "CVE-2021-33574" 117 | ], 118 | "CWE": [ 119 | "CWE-416" 120 | ] 121 | }, 122 | "severity": "critical", 123 | "severityWithCritical": "critical", 124 | "socialTrendAlert": false, 125 | "cvssScore": 9.8, 126 | "CVSSv3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H", 127 | "patches": [], 128 | "references": [ 129 | { 130 | "title": "ADVISORY", 131 | "url": "https://security-tracker.debian.org/tracker/CVE-2021-33574" 132 | }, 133 | { 134 | "title": "CONFIRM", 135 | "url": "https://security.netapp.com/advisory/ntap-20210629-0005/" 136 | }, 137 | { 138 | "title": "FEDORA", 139 | "url": "https://lists.fedoraproject.org/archives/list/package-announce@lists.fedoraproject.org/message/RBUUWUGXVILQXVWEOU7N42ICHPJNAEUP/" 140 | }, 141 | { 142 | "title": "GENTOO", 143 | "url": "https://security.gentoo.org/glsa/202107-07" 144 | }, 145 | { 146 | "title": "MISC", 147 | "url": "https://sourceware.org/bugzilla/show_bug.cgi?id=27896" 148 | }, 149 | { 150 | "title": "MISC", 151 | "url": "https://sourceware.org/bugzilla/show_bug.cgi?id=27896%23c1" 152 | } 153 | ], 154 | "creationTime": "2021-05-26T15:11:38.773280Z", 155 | "modificationTime": "2021-12-04T14:20:17.969879Z", 156 | "publicationTime": "2021-05-26T15:11:38.561943Z", 157 | "disclosureTime": "2021-05-25T22:15:00Z", 158 | "id": "SNYK-DEBIAN11-GLIBC-1296898", 159 | "malicious": false, 160 | "nvdSeverity": "critical", 161 | "relativeImportance": "not yet assigned", 162 | "semver": { 163 | "vulnerable": [ 164 | "*" 165 | ] 166 | }, 167 | "exploit": "Not Defined", 168 | "from": [ 169 | "docker-image|k8s.gcr.io/conformance-ppc64le@v1.24.0-alpha.1", 170 | "glibc/libc6@2.31-13+deb11u2" 171 | ], 172 | "upgradePath": [], 173 | "isUpgradable": false, 174 | "isPatchable": false, 175 | "name": "glibc/libc6", 176 | "version": "2.31-13+deb11u2" 177 | } 178 | ``` 179 | 180 |
181 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/kube-scheduler.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: kube-scheduler 4 | - Owner(s): [sig-scheduling](https://github.com/kubernetes/community/tree/master/sig-scheduling) 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classifjcation: Moderate (the scheduler adds pods to nodes, but will not remove pods, for the most part) 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | - Similar to most other components: 19 | 1. Watches for unscheduled/new pods 20 | 1. Watches nodes with and their resource constraints 21 | 1. Chooses a node, via various mechanisms, to allocate based on best fit of resource requirements 22 | 1. Updates the pod spec on the kube-apiserver 23 | 1. that update is then retrieved by the node, which is also Watching components via the kube-apiserver 24 | - there may be multiple schedulers with various names, and parameters (such as pod-specific schedulers) 25 | 26 | - !NOTE schedulers are coöperative 27 | - !NOTE schedulers are *supposed* to honor the name, but need not 28 | - Interesting note, makes the huge list of schedulers DoS interesting 29 | - !NOTE idea there was to add a *huge* number of pods to be scheduled that are associated with an poorly named scheduler 30 | - !NOTE peopoe shouldn't request specific schedulers in podspec, rather, there should be some webhook to process that 31 | - !NOTE team wasn't sure what would happen with large number of pods to be scheduled 32 | 33 | ## Are there any subcomponents or shared boundaries? 34 | 35 | Yes 36 | 37 | - there may be multiple schedulers on the same MCP host 38 | - schedulers may run on the same host as the API server 39 | 40 | ## What communications protocols does it use? 41 | 42 | - standard HTTPS + auth (chosen by the cluster) 43 | 44 | ## Where does it store data? 45 | 46 | - most should be stored in etcd (via kube-apiserver) 47 | - some data will be stored on command line (configuration options) or on the file system (certificate paths for authentication) 48 | 49 | ## What is the most sensitive data it stores? 50 | 51 | - No direct storage 52 | 53 | ## How is that data stored? 54 | 55 | - N/A 56 | 57 | # Data Dictionary 58 | 59 | | Name | Classification/Sensitivity | Comments | 60 | | :--: | :--: | :--: | 61 | | Data | Goes | Here | 62 | 63 | # Control Families 64 | 65 | These are the areas of controls that we're interested in based on what the audit working group selected. 66 | 67 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 68 | 69 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 70 | 71 | For example, an system may have authorization requirements that say: 72 | 73 | - users must be registered with a central authority 74 | - all requests must be verified to be owned by the requesting user 75 | - each account must have attributes associated with it to uniquely identify the user 76 | 77 | and so on. 78 | 79 | For this assessment, we're looking at six basic control families: 80 | 81 | - Networking 82 | - Cryptography 83 | - Secrets Management 84 | - Authentication 85 | - Authorization (Access Control) 86 | - Multi-tenancy Isolation 87 | 88 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 89 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 90 | isn't a weakness, it's simply "not applicable." 91 | 92 | For each control family we want to ask: 93 | 94 | - What does the component do for this control? 95 | - What sorts of data passes through that control? 96 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 97 | - What can attacker do with access to this component? 98 | - What's the simplest attack against it? 99 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 100 | - What happens if the component stops working (via DoS or other means)? 101 | - Have there been similar vulnerabilities in the past? What were the mitigations? 102 | 103 | # Threat Scenarios 104 | 105 | - An External Attacker without access to the client application 106 | - An External Attacker with valid access to the client application 107 | - An Internal Attacker with access to cluster 108 | - A Malicious Internal User 109 | 110 | ## Networking 111 | 112 | - only talks to kube-apiserver 113 | - colocated on the same host generally as kube-apiserver, but needn't be 114 | - has a web server (HTTP) 115 | - !FINDING: same HTTP server finding as all other components 116 | - metrics endpoint: qps, scheduling latency, &c 117 | - healthz endpoint, which is just a 200 Ok response 118 | - by default doesn't verify cert (maybe) 119 | 120 | ## Cryptography 121 | 122 | - None 123 | 124 | ## Secrets Management 125 | 126 | - Logs is the only persistence mechanism 127 | - !FINDING (to be added to all the other "you expose secrets in env and CLI" finding locations) auth token/cred passed in via CLI 128 | 129 | ## Authentication 130 | 131 | - no authN really 132 | - pods, nodes, related objects; doesn't deal in authN 133 | - unaware of any service/user accounts 134 | 135 | ## Authorization 136 | 137 | - schedluinc concepts protected by authZ 138 | - quotas 139 | - priority classes 140 | - &c 141 | - this authZ is not enforced by scheduler, however, enforced by kube-apiserver 142 | 143 | ## Multi-tenancy Isolation 144 | 145 | - tenant: different users of workloads that don't want to trust one another 146 | - namespaces are usually the boundaries 147 | - affinity/anti-affinity for namespace 148 | - scheduler doesn't have data plan access 149 | - can have noisy neighbory problem 150 | - is that the scheduler's issue? 151 | - not sure 152 | - namspace agnostic 153 | - can use priority classes which can be RBAC'd to a specific namespace, like kube-system 154 | - does not handle tenant fairness, handles priorty class fairness 155 | - no visibility into network boundary or usage information 156 | - no cgroup for network counts 157 | - !FINDING anti-affinity can be abused: only I can have this one host, no one else, applicable from `kubectl` 158 | - !NOTE no backoff process for scheduler to reschedule a rejected pod by the kublet; the replicaset controller can create a tightloop (RSC -> Scheduler -> Kubelet -> Reject -> RSC...) 159 | 160 | ## Summary 161 | 162 | # Recommendations 163 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/kubelet.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: kubelet 4 | - Owner(s): [sig-node](https://github.com/kubernetes/community/tree/master/sig-node) 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classification: High 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | - `kubelet` isses a watch request on the `kube-apiserver` 19 | - `kubelet` watches for pod allocations assigned to the node the kubelet is currently running on 20 | - when a new pod has been allocated for the kubelet's host, it retrieve the pod spec, and interacts with the Container Runtime via local Interprocess Communication to run the container 21 | - Kubelet also handles: 22 | - answering log requests from the kube-apiserver 23 | - monitoring pod health for failures 24 | - working with the Container Runtime to deschedule pods when the pod has been deleted 25 | - updating the kube-apiserver with host status (for use by the scheduler) 26 | 27 | ## Are there any subcomponents or shared boundaries? 28 | 29 | Yes. 30 | 31 | - Technically, kubelet runs on the same host as the Container Runtime and kubeproxy 32 | - There is a Trust Zone boundary between the Container Runtime and the kubelet 33 | 34 | ## What communications protocols does it use? 35 | 36 | - HTTPS with certificate validation and some authentication mechanism for communication with the kube-apiserver as a client 37 | - HTTPS without certificate validation by default 38 | 39 | ## Where does it store data? 40 | 41 | - kubelet itself should not store much data 42 | - kubelet can be run in an "apiserver-less mode" that loads pod manifests from the file system 43 | - most data should be retrieved from the kube-apiserver via etcd 44 | - authentication credentials for the kube-apiserver may be stored on the file system or in memory (both in CLI parameter as well as actual program memory) for the duration of execution 45 | 46 | ## What is the most sensitive data it stores? 47 | 48 | - authentication credentials are stored in memory or are out of scope 49 | 50 | ## How is that data stored? 51 | 52 | N/A 53 | 54 | # Data Dictionary 55 | 56 | | Name | Classification/Sensitivity | Comments | 57 | | :--: | :--: | :--: | 58 | | Data | Goes | Here | 59 | 60 | # Control Families 61 | 62 | These are the areas of controls that we're interested in based on what the audit working group selected. 63 | 64 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 65 | 66 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 67 | 68 | For example, an system may have authorization requirements that say: 69 | 70 | - users must be registered with a central authority 71 | - all requests must be verified to be owned by the requesting user 72 | - each account must have attributes associated with it to uniquely identify the user 73 | 74 | and so on. 75 | 76 | For this assessment, we're looking at six basic control families: 77 | 78 | - Networking 79 | - Cryptography 80 | - Secrets Management 81 | - Authentication 82 | - Authorization (Access Control) 83 | - Multi-tenancy Isolation 84 | 85 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 86 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 87 | isn't a weakness, it's simply "not applicable." 88 | 89 | For each control family we want to ask: 90 | 91 | - What does the component do for this control? 92 | - What sorts of data passes through that control? 93 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 94 | - What can attacker do with access to this component? 95 | - What's the simplest attack against it? 96 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 97 | - What happens if the component stops working (via DoS or other means)? 98 | - Have there been similar vulnerabilities in the past? What were the mitigations? 99 | 100 | # Threat Scenarios 101 | 102 | - An External Attacker without access to the client application 103 | - An External Attacker with valid access to the client application 104 | - An Internal Attacker with access to cluster 105 | - A Malicious Internal User 106 | 107 | ## Networking 108 | 109 | - Post 10250: read/write, authenticated 110 | - Port 10255: read-only, unauthenticated 111 | - cadvisor uses this, going to be deprecated 112 | - 10248: healthz, unauth'd 113 | - static pod manifest directory 114 | - Static pod fetch via HTTP(S) 115 | 116 | ### Routes: 117 | 118 | - Auth filter on API, for 10250 119 | - delegated to apiserver, subject access review, HTTPS request 120 | - `/pods` podspec on node -> leaks data 121 | - `/healthz` 122 | - `/spec` 123 | - `/stats-{cpu, mem, &c}` 124 | - on 10250 only: 125 | - `/exec` 126 | - `/attach` 127 | - `portforward` 128 | - `/kube-auth` 129 | - `/debug-flags` 130 | - `/cri/{exec, attach, portforward}` 131 | 132 | ### Findings: 133 | 134 | - !FINDING: 10255 is unauthenticated and leaks secrets 135 | - !FINDING: 10255/10248 136 | - !FINDING: 10250 is self-signed TLS 137 | 138 | ## Cryptography 139 | 140 | - None 141 | 142 | ## Secrets Management 143 | 144 | - returned from kube-apiserver unencrypted 145 | - in memory cache 146 | - if pod mounts disk, written to tmpfs 147 | - !FINDING (already captured) ENV vars can expose secrets 148 | - configmaps are treated like secrets by kubelet 149 | - !FINDING keynames and secret names may be logged 150 | - maintains its own certs, secrets, bootstrap credential 151 | - bootstrap: initial cert used to issue CSR to kube-apiserver 152 | - !NOTE certs are written to disk unencrypted 153 | - !FINDING bootstrap cert may be long lived, w/o a TTL 154 | 155 | ## Authentication 156 | 157 | - delegated to kube-apiserver, via HTTPS request, with subject access review 158 | - two-way TLS by default (we believe) 159 | - token auth 160 | - bearer token 161 | - passed to request to API server 162 | - "token review" 163 | - kube-apiserver responds w/ ident 164 | - response is boolean (yes/no is this a user) and username/uid/groups/arbitrary data as a tuple 165 | - no auditing on kublet, but logged on kube-apiserver 166 | 167 | ## Authorization 168 | 169 | - delegated to kube-apiserver 170 | 171 | ## Multi-tenancy Isolation 172 | 173 | - kube-apiserver is the arbiter 174 | - kubelet doesn't know namespaces really 175 | - every pod is a separate tenant 176 | - pods are security boundaries 177 | 178 | ## Summary 179 | 180 | # Recommendations 181 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/etcd.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: etcd 4 | - Owner(s): Technically external to Kubernetes itself, but managed by [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery) 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classification: Critical (on a cluster with an API server, access to etcd is root access to the cluster) 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | - Distributed key-value store 19 | - uses RAFT for consensus 20 | - always need to deploy (N x M) + 1 members to avoid leader election issues 21 | - five is recommended for production usage 22 | - listens for requests from clients 23 | - clients are simple REST clients that interact via JSON or other mechanisms 24 | - in Kubernetes' case, data is stored under `/registry` 25 | 26 | ## Are there any subcomponents or shared boundaries? 27 | 28 | There shouldn't be; documentation specifically states: 29 | 30 | - should be in own cluster 31 | - limited to access by the API server(s) only 32 | - should use some sort of authentication (hopefully certificate auth) 33 | 34 | ## What communications protocols does it use? 35 | 36 | - HTTPS (with optional client-side or two-way TLS) 37 | - can also use basic auth 38 | - there's technically gRPC as well 39 | 40 | ## Where does it store data? 41 | 42 | - typical database-style: 43 | - data directory 44 | - snapshot directory 45 | - write-ahead log (WAL) directory 46 | - all three may be the same, depends on command line options 47 | - Consensus is then achieved across nodes via RAFT (leader election + log replication via distributed state machine) 48 | 49 | ## What is the most sensitive data it stores? 50 | 51 | - literally holds the keys to the kingdom: 52 | - pod specs 53 | - secrets 54 | - roles/attributes for {R, A}BAC 55 | - literally any data stored in Kubernetes via the kube-apiserver 56 | - [Access to etcd is equivalent to root permission in the cluster](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/#securing-etcd-clusters) 57 | 58 | ## How is that data stored? 59 | 60 | - Outside the scope of this assessment per se, but not encrypted at rest 61 | - Kubernetes supports this itself with Encryption providers 62 | - the typical process of a WAL + data + snapshot is used 63 | - this is then replicated across the cluster with Raft 64 | 65 | # Meeting Notes 66 | 67 | - No authorization (from k8s perspective) 68 | - AUthentication by local port access in current k8s 69 | - working towards mTLS for all connections 70 | - Raft consensus port, listener port 71 | - backups in etcd (system-level) not encrypted 72 | - metrics aren't encrypted at all either 73 | - multi-tenant: no multi-tenant controls at all 74 | - the kube-apiserver is the arbiter namespaces 75 | - could add namespaces to the registry, but that is a large amount of work 76 | - no migration plan or test 77 | - watches (like kubelet watching for pod spec changes) would break 78 | - multi-single tenant is best route 79 | - RAFT port may be open by default, even in single etcd configuraitons 80 | - runs in a container within static Master kubelet, but is run as root 81 | - [CONTROL WEAKNESS] CA is passed on command line 82 | - Types of files: WAL, Snapshot, Data file (and maybe backup) 83 | - [FINDING] no checksums on WAL/Snapshot/Data 84 | - [RECOMMENDATION] checksum individual WAL entries, checksum the entire snapshot file 85 | - do this because it's fast enough for individual entries, and then the snapshot should never change 86 | - Crypto, really only TLS (std go) and checksums for backups (but not other files, as noted above) 87 | - No auditing, but that's less useful 88 | - kube-apiserver is the arbiter of what things are 89 | - kube-apiserver uses a single connection credential to etcd w/o impersonation, so harder to tell who did what 90 | - major events end up in the app log 91 | - debug mode allows you to see all events when they happen 92 | 93 | # Data Dictionary 94 | 95 | | Name | Classification/Sensitivity | Comments | 96 | | :--: | :--: | :--: | 97 | | Data | Goes | Here | 98 | 99 | # Control Families 100 | 101 | These are the areas of controls that we're interested in based on what the audit working group selected. 102 | 103 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 104 | 105 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 106 | 107 | For example, an system may have authorization requirements that say: 108 | 109 | - users must be registered with a central authority 110 | - all requests must be verified to be owned by the requesting user 111 | - each account must have attributes associated with it to uniquely identify the user 112 | 113 | and so on. 114 | 115 | For this assessment, we're looking at six basic control families: 116 | 117 | - Networking 118 | - Cryptography 119 | - Secrets Management 120 | - Authentication 121 | - Authorization (Access Control) 122 | - Multi-tenancy Isolation 123 | 124 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 125 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 126 | isn't a weakness, it's simply "not applicable." 127 | 128 | For each control family we want to ask: 129 | 130 | - What does the component do for this control? 131 | - What sorts of data passes through that control? 132 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 133 | - What can attacker do with access to this component? 134 | - What's the simplest attack against it? 135 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 136 | - What happens if the component stops working (via DoS or other means)? 137 | - Have there been similar vulnerabilities in the past? What were the mitigations? 138 | 139 | # Threat Scenarios 140 | 141 | - An External Attacker without access to the client application 142 | - An External Attacker with valid access to the client application 143 | - An Internal Attacker with access to cluster 144 | - A Malicious Internal User 145 | 146 | ## Networking 147 | 148 | ## Cryptography 149 | 150 | ## Secrets Management 151 | 152 | ## Authentication 153 | 154 | - by default Kubernetes doesn't use two-way TLS to the etcd cluster, which would be the most secure (combined with IP restrictions so that stolen creds can't be reused on new infrastructure) 155 | 156 | ## Authorization 157 | 158 | ## Multi-tenancy Isolation 159 | 160 | ## Summary 161 | 162 | # Recommendations 163 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/dataflow/updated-dataflow.dot: -------------------------------------------------------------------------------- 1 | digraph tm { 2 | graph [ 3 | fontname = Arial; 4 | fontsize = 14; 5 | ] 6 | node [ 7 | fontname = Arial; 8 | fontsize = 14; 9 | rankdir = lr; 10 | ] 11 | edge [ 12 | shape = none; 13 | fontname = Arial; 14 | fontsize = 12; 15 | ] 16 | labelloc = "t"; 17 | fontsize = 20; 18 | nodesep = 1; 19 | 20 | subgraph cluster_bfaefefcfbeeafeefac { 21 | graph [ 22 | fontsize = 10; 23 | fontcolor = firebrick2; 24 | style = dashed; 25 | color = firebrick2; 26 | label = <Internet>; 27 | ] 28 | 29 | bfbeacdafaceebdccfdffcdfcedfec [ 30 | shape = square; 31 | label = <
External Actor
>; 32 | ] 33 | abaadcacbbafdffbcffffbeedef [ 34 | shape = square; 35 | label = <
Developer
>; 36 | ] 37 | adafdaeaedeedcafe [ 38 | shape = square; 39 | label = <
End User
>; 40 | ] 41 | 42 | } 43 | 44 | subgraph cluster_bbfdadaacbdaedcebfec { 45 | graph [ 46 | fontsize = 10; 47 | fontcolor = firebrick2; 48 | style = dashed; 49 | color = firebrick2; 50 | label = <Master Control Data>; 51 | ] 52 | 53 | bfffcaeeeeedccabfaaeff [ 54 | shape = none; 55 | color = black; 56 | label = <
N-ary etcd servers
>; 57 | ] 58 | 59 | } 60 | 61 | subgraph cluster_afeffbbfdbeeefcabddacdba { 62 | graph [ 63 | fontsize = 10; 64 | fontcolor = firebrick2; 65 | style = dashed; 66 | color = firebrick2; 67 | label = <API Server>; 68 | ] 69 | 70 | bdfbefabdbefeacdfcabaac [ 71 | shape = square; 72 | label = <
Malicious Internal User
>; 73 | ] 74 | fabeebdadbcdffdcdec [ 75 | shape = square; 76 | label = <
Administrator
>; 77 | ] 78 | eadddadcfbabebaed [ 79 | shape = circle 80 | color = black 81 | label = <
kube-apiserver
>; 82 | ] 83 | 84 | } 85 | 86 | subgraph cluster_cebcbebffccbfedcaffbb { 87 | graph [ 88 | fontsize = 10; 89 | fontcolor = firebrick2; 90 | style = dashed; 91 | color = firebrick2; 92 | label = <Master Control Components>; 93 | ] 94 | 95 | ffceacecdbcacdddddffbfa [ 96 | shape = circle 97 | color = black 98 | label = <
kube-scheduler
>; 99 | ] 100 | adffdceecfcfbcfdaefca [ 101 | shape = circle 102 | color = black 103 | label = <
CCM/KCM
>; 104 | ] 105 | 106 | } 107 | 108 | subgraph cluster_baaffdafbdceebaaafaefeea { 109 | graph [ 110 | fontsize = 10; 111 | fontcolor = firebrick2; 112 | style = dashed; 113 | color = firebrick2; 114 | label = <Worker>; 115 | ] 116 | 117 | dbddcfaeaacebaecba [ 118 | shape = circle 119 | color = black 120 | label = <
kubelet
>; 121 | ] 122 | ddcaffdfdebdaeff [ 123 | shape = circle 124 | color = black 125 | label = <
kube-proxy
>; 126 | ] 127 | bcdcebabbdaadffeaeddcce [ 128 | shape = circle; 129 | color = black; 130 | 131 | label = <
iptables
>; 132 | ] 133 | 134 | } 135 | 136 | subgraph cluster_fdcecbcfbeadaccab { 137 | graph [ 138 | fontsize = 10; 139 | fontcolor = firebrick2; 140 | style = dashed; 141 | color = firebrick2; 142 | label = <Container>; 143 | ] 144 | 145 | bdfadfbeeaedceab [ 146 | shape = square; 147 | label = <
Internal Attacker
>; 148 | ] 149 | eefbffbeaaeecaceaaabe [ 150 | shape = circle 151 | color = black 152 | label = <
Pods
>; 153 | ] 154 | 155 | } 156 | 157 | eadddadcfbabebaed -> bfffcaeeeeedccabfaaeff [ 158 | color = black; 159 | label = <
All kube-apiserver data
>; 160 | ] 161 | eadddadcfbabebaed -> dbddcfaeaacebaecba [ 162 | color = black; 163 | label = <
kubelet Health, Status, &c.
>; 164 | ] 165 | eadddadcfbabebaed -> ddcaffdfdebdaeff [ 166 | color = black; 167 | label = <
kube-proxy Health, Status, &c.
>; 168 | ] 169 | eadddadcfbabebaed -> ffceacecdbcacdddddffbfa [ 170 | color = black; 171 | label = <
kube-scheduler Health, Status, &c.
>; 172 | ] 173 | eadddadcfbabebaed -> adffdceecfcfbcfdaefca [ 174 | color = black; 175 | label = <
{kube, cloud}-controller-manager Health, Status, &c.
>; 176 | ] 177 | dbddcfaeaacebaecba -> eadddadcfbabebaed [ 178 | color = black; 179 | label = <
HTTP watch for resources on kube-apiserver
>; 180 | ] 181 | ddcaffdfdebdaeff -> eadddadcfbabebaed [ 182 | color = black; 183 | label = <
HTTP watch for resources on kube-apiserver
>; 184 | ] 185 | adffdceecfcfbcfdaefca -> eadddadcfbabebaed [ 186 | color = black; 187 | label = <
HTTP watch for resources on kube-apiserver
>; 188 | ] 189 | ffceacecdbcacdddddffbfa -> eadddadcfbabebaed [ 190 | color = black; 191 | label = <
HTTP watch for resources on kube-apiserver
>; 192 | ] 193 | dbddcfaeaacebaecba -> bcdcebabbdaadffeaeddcce [ 194 | color = black; 195 | label = <
kubenet update of iptables (... ipvs, &c) to setup Host-level ports
>; 196 | ] 197 | ddcaffdfdebdaeff -> bcdcebabbdaadffeaeddcce [ 198 | color = black; 199 | label = <
kube-prxy update of iptables (... ipvs, &c) to setup all pod networking
>; 200 | ] 201 | dbddcfaeaacebaecba -> eefbffbeaaeecaceaaabe [ 202 | color = black; 203 | label = <
kubelet to pod/CRI runtime, to spin up pods within a host
>; 204 | ] 205 | adafdaeaedeedcafe -> eefbffbeaaeecaceaaabe [ 206 | color = black; 207 | label = <
End-user access of Kubernetes-hosted applications
>; 208 | ] 209 | bfbeacdafaceebdccfdffcdfcedfec -> eefbffbeaaeecaceaaabe [ 210 | color = black; 211 | label = <
External Attacker attempting to compromise a trust boundary
>; 212 | ] 213 | bdfadfbeeaedceab -> eefbffbeaaeecaceaaabe [ 214 | color = black; 215 | label = <
Internal Attacker with access to a compromised or malicious pod
>; 216 | ] 217 | } 218 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/kube-apiserver.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: kube-apiserver 4 | - Owner(s): [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery) 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classification: Critical (technically, it isn't needed, but most clusters will use it extensively) 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | - RESTful API server 19 | - made up of multiple subcomponents: 20 | - authenticators 21 | - authorizers 22 | - admission controllers 23 | - resource validators 24 | - users issue a request, which is authenticated via one (or more) plugins 25 | - the requests is then authorized by one or more authorizers 26 | - it is then potentially modified and validated by an admission controller 27 | - resource validation that validates the object, stores it in etcd, and responds 28 | - clients issue HTTP requests (via TLS ala HTTPS) to "watch" resources and poll for changes from the server; for example: 29 | 1. a client updates a pod definition via `kubectl` and a `POST` request 30 | 1. the scheduler is "watching" for pod updates via an HTTP watch request to retrieve new pods 31 | 1. the scheduler then update the pod list via a `POST` to the kube-apiserver 32 | 1. a node's `kubelet` retrieves a list of pods assigned to it via an HTTP watch request 33 | 1. the node's `kubelet` then update the running pod list on the kube-apiserver 34 | 35 | ## Are there any subcomponents or shared boundaries? 36 | 37 | Yes 38 | 39 | - Controllers technically run on the kube-apiserver 40 | - the various subcomponents (authenticators, authorizers, and so on) run on the kube-apiserver 41 | 42 | additionally, depending on the configuration there may be any number of other Master Control Pane components running on the same phyical/logical host 43 | 44 | ## What communications protocols does it use? 45 | 46 | - Communcations to the kube-apiserver use HTTPS and various authentication mechanisms 47 | - Communications from the kube-apiserver to etcd use HTTPS, with optional client-side (two-way) TLS 48 | - Communications from the kube-apiserver to kubelets can use HTTP or HTTPS, the latter is without validation by default (find this again in the docs) 49 | 50 | ## Where does it store data? 51 | 52 | - Most data is stored in etcd, mainly under `/registry` 53 | - Some data is obviously stored on the local host, to bootstrap the connection to etcd 54 | 55 | ## What is the most sensitive data it stores? 56 | 57 | - Not much sensitive is directly stored on kube-apiserver 58 | - However, all sensitive data within the system (save for in MCP-less setups) is processed and transacted via the kube-apiserver 59 | 60 | ## How is that data stored? 61 | 62 | - On etcd, with the level of protection requested by the user 63 | - looks like encryption [is a command line flag](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#configuration-and-determining-whether-encryption-at-rest-is-already-enabled) 64 | 65 | # Meeting notes 66 | 67 | - web hooks: kube-apiserver can call eternal resources 68 | - authorization webhook (for when you wish to auth a request without setting up a new authorizer) 69 | - images, other resources 70 | - [FINDING] supports HTTP 71 | - Aggregate API server // Aggregator 72 | - for adding externisbility resources 73 | - a type of CRD, basically 74 | - component status -> reaches out to every component on the cluster 75 | - Network proxy: restrict outbound connections from kube-apiserver (currently no restriction) 76 | - honestly a weakness: no egress filtering 77 | - Business logic in controllers, but kube-apiserver is info 78 | - cloud prociders, auth, &c 79 | - sharding by group version kind, put all KVKs into the same etcd 80 | - listeners: insecure and secure 81 | - check if insecure is configured by default 82 | - would be a finding if so 83 | - Not comfortable doing true multi-tenant on k8s 84 | - multi-single tenants (as in, if Pepsi wants to have marketing & accounting that's fine, but not Coke & Pepsi on the same cluster) 85 | - Best way to restrict access to kube-apiserver 86 | - and working on a proxy as noted above 87 | - kube-apiserver is the root CA for *at least two* PKIs: 88 | - two CAs, but not on by default w/o flags (check what happens w/o two CAs...) 89 | - that would be a finding, if you can cross CAs really 90 | - TLS (multiple domains): 91 | - etcd -> kube-apiserver 92 | - the other is webhooks/kublet/components... 93 | - check secrets: can you tell k8s to encrypt a secret but not provide the flag? what does it do? 94 | - Alt route for secrets: volumes, write to a volume, then mount 95 | - Can't really do much about that, since it's opaque to the kube-apiserver 96 | - ConfigMap: people can stuff secrets into ConfigMaps 97 | - untyped data blob 98 | - cannot encrypt 99 | - recommend moving away from ConfigMaps 100 | - Logging to var log 101 | - resource names in logs (namespace, secret name, &c). Can be sensitive 102 | - [FINDING] no logs by default who did what 103 | - need to turn on auditing for that 104 | - look at metrics as well, similar to CRDs 105 | - Data Validation 106 | - can have admission controller, webhooks, &c. 107 | - everything goes through validation 108 | - Session 109 | - upgrade to HTTP/2, channel, or SPDY 110 | - JWT is long lived (we know) 111 | - Certain requests like proxy and logs require upgrade to channels 112 | - look at k8s enhancement ... kube-apiserver dot md 113 | 114 | # Data Dictionary 115 | 116 | | Name | Classification/Sensitivity | Comments | 117 | | :--: | :--: | :--: | 118 | | Data | Goes | Here | 119 | 120 | # Control Families 121 | 122 | These are the areas of controls that we're interested in based on what the audit working group selected. 123 | 124 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 125 | 126 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 127 | 128 | For example, an system may have authorization requirements that say: 129 | 130 | - users must be registered with a central authority 131 | - all requests must be verified to be owned by the requesting user 132 | - each account must have attributes associated with it to uniquely identify the user 133 | 134 | and so on. 135 | 136 | For this assessment, we're looking at six basic control families: 137 | 138 | - Networking 139 | - Cryptography 140 | - Secrets Management 141 | - Authentication 142 | - Authorization (Access Control) 143 | - Multi-tenancy Isolation 144 | 145 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 146 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 147 | isn't a weakness, it's simply "not applicable." 148 | 149 | For each control family we want to ask: 150 | 151 | - What does the component do for this control? 152 | - What sorts of data passes through that control? 153 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 154 | - What can attacker do with access to this component? 155 | - What's the simplest attack against it? 156 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 157 | - What happens if the component stops working (via DoS or other means)? 158 | - Have there been similar vulnerabilities in the past? What were the mitigations? 159 | 160 | # Threat Scenarios 161 | 162 | - An External Attacker without access to the client application 163 | - An External Attacker with valid access to the client application 164 | - An Internal Attacker with access to cluster 165 | - A Malicious Internal User 166 | 167 | ## Networking 168 | 169 | - in the version of k8s we are testing, no outbound limits on external connections 170 | 171 | ## Cryptography 172 | 173 | - Not encrypting secrets in etcd by default 174 | - requiring [a command line flag](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#configuration-and-determining-whether-encryption-at-rest-is-already-enabled) 175 | - SUpports HTTP for Webhooks and comopnent status 176 | 177 | ## Secrets Management 178 | 179 | ## Authentication 180 | 181 | ## Authorization 182 | 183 | ## Multi-tenancy Isolation 184 | 185 | ## Summary 186 | 187 | # Recommendations 188 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/ancillary-data/rapid-risk-assessments/kube-proxy.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | - Component: kube-proxy 4 | - Owner(s): [sig-network](https://github.com/kubernetes/community/tree/master/sig-network) 5 | - SIG/WG(s) at meeting: 6 | - Service Data Classification: Medium 7 | - Highest Risk Impact: 8 | 9 | # Service Notes 10 | 11 | The portion should walk through the component and discuss connections, their relevant controls, and generally lay out how the component serves its relevant function. For example 12 | a component that accepts an HTTP connection may have relevant questions about channel security (TLS and Cryptography), authentication, authorization, non-repudiation/auditing, 13 | and logging. The questions aren't the *only* drivers as to what may be spoken about, the questions are meant to drive what we discuss and keep things on task for the duration 14 | of a meeting/call. 15 | 16 | ## How does the service work? 17 | 18 | - kubeproxy has several main modes of operation: 19 | - as a literal network proxy, handling networking between nodes 20 | - as a bridge between Container Network Interface (CNI) which handles the actual networking and the host operating system 21 | - `iptables` mode 22 | - `ipvs` mode 23 | - two Microsoft Windows-specific modes (not covered by the RRA) 24 | - in any of these modes, kubeproxy interfaces with the host's routing table so as to achieve a seamless, flat network across the kubernetes cluster 25 | 26 | ## Are there any subcomponents or shared boundaries? 27 | 28 | Yes. 29 | 30 | - Similar to kubelet, kube-proxy run's on the node, with an implicit trust boundary between Worker components and Container components (i.e. pods) 31 | 32 | ## What communications protocols does it use? 33 | 34 | - Direct IPC to `iptables` or `ipvs` 35 | - HTTPS to the kube-apiserver 36 | - HTTP Healthz port (which is a literal counter plus a `200 Ok` response) 37 | 38 | ## Where does it store data? 39 | 40 | Minimal data should be stored by kube-proxy itself, this should mainly be handled by kubelet and some file system configuration 41 | 42 | ## What is the most sensitive data it stores? 43 | 44 | N/A 45 | 46 | ## How is that data stored? 47 | 48 | N/A 49 | 50 | # Data Dictionary 51 | 52 | | Name | Classification/Sensitivity | Comments | 53 | | :--: | :--: | :--: | 54 | | Data | Goes | Here | 55 | 56 | # Control Families 57 | 58 | These are the areas of controls that we're interested in based on what the audit working group selected. 59 | 60 | When we say "controls," we mean a logical section of an application or system that handles a security requirement. Per CNSSI: 61 | 62 | > The management, operational, and technical controls (i.e., safeguards or countermeasures) prescribed for an information system to protect the confidentiality, integrity, and availability of the system and its information. 63 | 64 | For example, an system may have authorization requirements that say: 65 | 66 | - users must be registered with a central authority 67 | - all requests must be verified to be owned by the requesting user 68 | - each account must have attributes associated with it to uniquely identify the user 69 | 70 | and so on. 71 | 72 | For this assessment, we're looking at six basic control families: 73 | 74 | - Networking 75 | - Cryptography 76 | - Secrets Management 77 | - Authentication 78 | - Authorization (Access Control) 79 | - Multi-tenancy Isolation 80 | 81 | Obviously we can skip control families as "not applicable" in the event that the component does not require it. For example, 82 | something with the sole purpose of interacting with the local file system may have no meaningful Networking component; this 83 | isn't a weakness, it's simply "not applicable." 84 | 85 | For each control family we want to ask: 86 | 87 | - What does the component do for this control? 88 | - What sorts of data passes through that control? 89 | - for example, a component may have sensitive data (Secrets Management), but that data never leaves the component's storage via Networking 90 | - What can attacker do with access to this component? 91 | - What's the simplest attack against it? 92 | - Are there mitigations that we recommend (i.e. "Always use an interstitial firewall")? 93 | - What happens if the component stops working (via DoS or other means)? 94 | - Have there been similar vulnerabilities in the past? What were the mitigations? 95 | 96 | # Threat Scenarios 97 | 98 | - An External Attacker without access to the client application 99 | - An External Attacker with valid access to the client application 100 | - An Internal Attacker with access to cluster 101 | - A Malicious Internal User 102 | 103 | ## Networking 104 | 105 | - kube-proxy is actually five programs 106 | - proxy: mostly deprecated, but a literal proxy, in that it intercepts requests and proxies them to backend services 107 | - IPVS/iptables: very similar modes, handle connecting virtual IPs (VIPs) and the like via low-level routing (the preferred mode) 108 | - two Windows-specific modes (out of scope for this discussion, but if there are details we can certainly add them) 109 | 110 | Node ports: 111 | 112 | - captures traffic from Host IP 113 | - shuffles to backend (used for building load balancers) 114 | 115 | - kube-proxy shells out to `iptables` or `ipvs` 116 | - Also uses a netlink socket for IPVS (netlink are similar to Unix Domain Sockets) 117 | - *Also* shells out to `ipset` under certain circumstances for IPVS (building sets of IPs and such) 118 | 119 | 120 | ### User space proxy 121 | 122 | Setup: 123 | 124 | 1. Connect to the kube-apiserver 125 | 1. Watch the API server for services/endpoints/&c 126 | 1. Build in-memory caching map: for services, for every port a service maps, open a port, write iptables rule for VIP & Virt Port 127 | 1. Watch for updates of services/endpoints/&c 128 | 129 | when a consumer connects to the port: 130 | 131 | 1. Service is running VIP:VPort 132 | 1. Root NS -> iptable -> kube-proxy port 133 | 1. look at the src/dst port, check the map, pick a service on that port at random (if that fails, try another until either success or a retry count has exceeded) 134 | 1. Shuffle bytes back and forth between backend service and client until termination or failure 135 | 136 | ### iptables 137 | 138 | 1. Same initial setup (sans opening a port directly) 139 | 1. iptables restore command set 140 | 1. giant string of services 141 | 1. User VIP -> Random Backend -> Rewrite packets (at the kernel level, so kube-proxy never sees the data) 142 | 1. At the end of the sync loop, write (write in batches to avoid iptables contentions) 143 | 1. no more routing table touches until service updates (from watching kube-apiserver or a time out, expanded below) 144 | 145 | **NOTE**: rate limited (bounded frequency) updates: 146 | - no later than 10 minutes by default 147 | - no sooner than 15s by default (if there are no service map updates) 148 | 149 | this point came out of the following question: is having access to kube-proxy *worse* than having root access to the host machine? 150 | 151 | ### ipvs 152 | 153 | 1. Same setup as iptables & proxy mode 154 | 1. `ipvsadm` and `ipset` commands instead of `iptables` 155 | 1. This does have some strange changes: 156 | - ip address needs a dummy adapter 157 | - !NOTE Any service bound to 0.0.0.0 are also bound to _all_ adapters 158 | - somewhat expected because 0.0.0.0, but can still lead to interesting behavior 159 | 160 | ### concern points within networking 161 | 162 | - !NOTE: ARP table attacks (such as if someone has `CAP_NET_RAW` in a container or host access) can impact kube-proxy 163 | - Endpoint selection is namespace & pod-based, so injection could overwrite (I don't think this is worth a finding/note because kube-apiserver is the arbiter of truth) 164 | - !FINDING (but low...): POD IP Reuse: (factor of 2 x max) cause a machine to churn thru IPS, you could cause a kube-proxy to forward ports to your pod if you win the race condition. 165 | - this would be limited to the window of routing updates 166 | - however, established connections would remain 167 | - kube-apiserver could be the arbiter of routing, but that may require more watch and connection to the central component 168 | - [editor] I think just noting this potential issue and maybe warning on it in kube-proxy logs would be enough 169 | 170 | ### with root access? 171 | 172 | Access to kube-proxy is mostly the same as root access 173 | 174 | - set syscalls, route local, &c could gobble memory 175 | - Node/VIP level 176 | - Recommend `CAP_NET_BIND` (bind to low ports, don't need root for certain users) for containers/pods, alleviate concerns there 177 | - Can map low ports to high ports in kube-proxy as well, but mucks with anything that pretends to be a VIP 178 | - LB forwards packets to service without new connection (based on srcport) 179 | - 2-hop LB, can't do direct LB 180 | 181 | ## Cryptography 182 | 183 | - kube-proxy itself does not handle cryptography other than the TLS connection to kube-apiserver 184 | 185 | ## Secrets Management 186 | 187 | - kube-proxy itself does not handle secrets, but rather only consumes credentials from the command line (like all other k8s components) 188 | 189 | ## Authentication 190 | 191 | - kube-proxy does not handle any authentication other than credentials to the kube-apiserver 192 | 193 | ## Authorization 194 | 195 | - kube-proxy does not handle any authorization; the arbiters of authorization are kubelet and kube-proxy 196 | 197 | ## Multi-tenancy Isolation 198 | 199 | - kube-proxy does not currently segment clients from one another, as clients on the same pod/host must use the same iptables/ipvs configuration 200 | - kube-proxy does have conception of namespaces, but currently avoids enforcing much at that level 201 | - routes still must be added to iptables or the like 202 | - iptables contention could be problematic 203 | - much better to handle at higher-level components, namely kube-apiserver and kube-proxy 204 | 205 | ## Logging 206 | 207 | - stderr directed to a file 208 | - same as with kubelet 209 | - !FINDING (but same as all other components) logs namespaces, service names (same as every other service) 210 | 211 | # Additional Notes 212 | 213 | ## kubelet to iptables 214 | 215 | - per pod network management 216 | - pods can request a host port, docker style 217 | - kubenet and CNI plugins 218 | - kubenet uses CNI 219 | - setup kubenet iptable to map ports to a single pod 220 | - overly broad, should be appended to iptables list 221 | - all local IPs to the host 222 | 223 | !FINDING: don't use host ports, they can cause problems with services and such; we may recommend deprecating them 224 | 225 | ## Summary 226 | 227 | # Recommendations 228 | -------------------------------------------------------------------------------- /sig-security-tooling/cve-feed/hack/test_cve_title_parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright 2022 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import unittest 18 | from cve_title_parser import parse_cve_title 19 | 20 | class TestParseCVETitle(unittest.TestCase): 21 | 22 | def test_single_cve_with_colon_separator(self): 23 | """Test parsing a title with a single CVE followed by colon.""" 24 | title = "CVE-2023-1234: Some vulnerability description" 25 | cve_ids, description = parse_cve_title(title) 26 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 27 | self.assertEqual(description, "Some vulnerability description") 28 | 29 | def test_single_cve_with_space_separator(self): 30 | """Test parsing a title with a single CVE followed by space.""" 31 | title = "CVE-2023-1234 Some vulnerability description" 32 | cve_ids, description = parse_cve_title(title) 33 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 34 | self.assertEqual(description, "Some vulnerability description") 35 | 36 | def test_single_cve_with_dash_separator(self): 37 | """Test parsing a title with a single CVE followed by dash.""" 38 | title = "CVE-2023-1234 - Some vulnerability description" 39 | cve_ids, description = parse_cve_title(title) 40 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 41 | self.assertEqual(description, "Some vulnerability description") 42 | 43 | def test_multiple_cves_comma_separated(self): 44 | """Test parsing a title with multiple CVEs separated by commas.""" 45 | title = "CVE-2023-1234, CVE-2023-5678: Multiple vulnerabilities found" 46 | cve_ids, description = parse_cve_title(title) 47 | self.assertEqual(cve_ids, ["CVE-2023-1234", "CVE-2023-5678"]) 48 | self.assertEqual(description, "Multiple vulnerabilities found") 49 | 50 | def test_multiple_cves_space_separated(self): 51 | """Test parsing a title with multiple CVEs separated by spaces.""" 52 | title = "CVE-2023-1234 CVE-2023-5678 Multiple vulnerabilities found" 53 | cve_ids, description = parse_cve_title(title) 54 | self.assertEqual(cve_ids, ["CVE-2023-1234", "CVE-2023-5678"]) 55 | self.assertEqual(description, "Multiple vulnerabilities found") 56 | 57 | def test_cve_with_more_than_four_digits(self): 58 | """Test parsing CVE with more than 4 digits in the sequence number.""" 59 | title = "CVE-2023-123456: Vulnerability with long sequence number" 60 | cve_ids, description = parse_cve_title(title) 61 | self.assertEqual(cve_ids, ["CVE-2023-123456"]) 62 | self.assertEqual(description, "Vulnerability with long sequence number") 63 | 64 | def test_mixed_separators(self): 65 | """Test parsing with mixed separators between CVEs.""" 66 | title = "CVE-2023-1234, CVE-2023-5678 - CVE-2023-9999: Mixed separators" 67 | cve_ids, description = parse_cve_title(title) 68 | self.assertEqual(cve_ids, ["CVE-2023-1234", "CVE-2023-5678", "CVE-2023-9999"]) 69 | self.assertEqual(description, "Mixed separators") 70 | 71 | def test_real_world_example_from_comment(self): 72 | """Test a real-world example.""" 73 | title = "CVE-2019-11249: Incomplete fixes for CVE-2019-1002101 and CVE-2019-11246, kubectl [...]" 74 | cve_ids, description = parse_cve_title(title) 75 | self.assertEqual(cve_ids, ["CVE-2019-11249"]) 76 | self.assertEqual(description, "Incomplete fixes for CVE-2019-1002101 and CVE-2019-11246, kubectl [...]") 77 | 78 | def test_cve_only_no_description(self): 79 | """Test parsing a title with only CVE and no description.""" 80 | title = "CVE-2023-1234" 81 | cve_ids, description = parse_cve_title(title) 82 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 83 | self.assertEqual(description, "") 84 | 85 | def test_multiple_cves_only_no_description(self): 86 | """Test parsing multiple CVEs with no description.""" 87 | title = "CVE-2023-1234, CVE-2023-5678" 88 | cve_ids, description = parse_cve_title(title) 89 | self.assertEqual(cve_ids, ["CVE-2023-1234", "CVE-2023-5678"]) 90 | self.assertEqual(description, "") 91 | 92 | def test_whitespace_handling(self): 93 | """Test that extra whitespace is properly handled.""" 94 | title = "CVE-2023-1234 : Description with extra spaces" 95 | cve_ids, description = parse_cve_title(title) 96 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 97 | self.assertEqual(description, "Description with extra spaces") 98 | 99 | def test_no_cve_at_start_raises_lookup_error(self): 100 | """Test that titles not starting with CVE raise LookupError.""" 101 | title = "Some title without CVE at start" 102 | with self.assertRaises(LookupError) as context: 103 | parse_cve_title(title) 104 | self.assertIn("Title does not start with CVE block", str(context.exception)) 105 | self.assertIn(title, str(context.exception)) 106 | 107 | def test_cve_in_middle_raises_lookup_error(self): 108 | """Test that titles with CVE in the middle (not at start) raise LookupError.""" 109 | title = "Some title with CVE-2023-1234 in the middle" 110 | with self.assertRaises(LookupError) as context: 111 | parse_cve_title(title) 112 | self.assertIn("Title does not start with CVE block", str(context.exception)) 113 | 114 | def test_invalid_cve_format_raises_lookup_error(self): 115 | """Test that invalid CVE formats raise LookupError.""" 116 | title = "CVE-23-1234: Invalid year format" 117 | with self.assertRaises(LookupError) as context: 118 | parse_cve_title(title) 119 | self.assertIn("Title does not start with CVE block", str(context.exception)) 120 | 121 | def test_cve_with_three_digits_raises_lookup_error(self): 122 | """Test that CVE with only 3 digits in sequence raises LookupError.""" 123 | title = "CVE-2023-123: Too few digits in sequence" 124 | with self.assertRaises(LookupError) as context: 125 | parse_cve_title(title) 126 | self.assertIn("Title does not start with CVE block", str(context.exception)) 127 | 128 | def test_empty_string_raises_lookup_error(self): 129 | """Test that empty string raises LookupError.""" 130 | title = "" 131 | with self.assertRaises(LookupError) as context: 132 | parse_cve_title(title) 133 | self.assertIn("Title does not start with CVE block", str(context.exception)) 134 | 135 | def test_complex_description_with_special_characters(self): 136 | """Test parsing with complex description containing special characters.""" 137 | title = "CVE-2023-1234: Complex description with (parentheses), [brackets], and other-chars!" 138 | cve_ids, description = parse_cve_title(title) 139 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 140 | self.assertEqual(description, "Complex description with (parentheses), [brackets], and other-chars!") 141 | 142 | def test_cve_with_trailing_separators(self): 143 | """Test CVE with trailing separators but no description.""" 144 | title = "CVE-2023-1234::: " 145 | cve_ids, description = parse_cve_title(title) 146 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 147 | self.assertEqual(description, "") 148 | 149 | def test_regression_trailing_space_in_description(self): 150 | """Regression test: Ensure trailing spaces are stripped from descriptions. 151 | 152 | This test prevents regression of the fix where trailing spaces were 153 | not properly stripped from CVE descriptions. 154 | Original issue: "Bypass of seccomp profile enforcement " (with trailing space) 155 | Fixed to: "Bypass of seccomp profile enforcement" (no trailing space) 156 | """ 157 | title = "CVE-2023-1234: Bypass of seccomp profile enforcement " 158 | cve_ids, description = parse_cve_title(title) 159 | self.assertEqual(cve_ids, ["CVE-2023-1234"]) 160 | self.assertEqual(description, "Bypass of seccomp profile enforcement") 161 | # Explicitly check that there's no trailing space 162 | self.assertFalse(description.endswith(" "), "Description should not have trailing space") 163 | 164 | def test_regression_kubectl_flag_syntax(self): 165 | """Regression test: Ensure kubectl command flags are correctly formatted. 166 | 167 | This test prevents regression of malformed kubectl flag syntax in descriptions. 168 | Original issue: "`kubectl:-http-cache=`" (malformed colon) 169 | Fixed to: "`kubectl --http-cache=`" (correct double dash) 170 | """ 171 | title = "CVE-2023-5678: `kubectl --http-cache=` creates world-writeable cached schema files" 172 | cve_ids, description = parse_cve_title(title) 173 | self.assertEqual(cve_ids, ["CVE-2023-5678"]) 174 | self.assertEqual(description, "`kubectl --http-cache=` creates world-writeable cached schema files") 175 | # Explicitly check for correct flag syntax 176 | self.assertIn("--http-cache", description, "Should contain correct double-dash flag syntax") 177 | self.assertNotIn(":-http-cache", description, "Should not contain malformed colon-dash syntax") 178 | 179 | def test_regression_multiple_whitespace_normalization(self): 180 | """Regression test: Ensure multiple whitespace characters are properly normalized. 181 | 182 | This test ensures that various whitespace issues (multiple spaces, tabs, etc.) 183 | are consistently handled and normalized. 184 | """ 185 | title = "CVE-2023-9999: Description with multiple spaces " 186 | cve_ids, description = parse_cve_title(title) 187 | self.assertEqual(cve_ids, ["CVE-2023-9999"]) 188 | # Should normalize to single spaces and strip trailing whitespace 189 | self.assertEqual(description, "Description with multiple spaces") 190 | self.assertFalse(description.endswith(" "), "Should not have trailing whitespace") 191 | 192 | 193 | if __name__ == '__main__': 194 | unittest.main() 195 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2021-2022/RFP.md: -------------------------------------------------------------------------------- 1 | # Request for Proposal 2 | 3 | ## Kubernetes Third-Party Security Audit 4 | 5 | The Kubernetes SIG Security Third-Party Audit sub-project (working group, henceforth) is soliciting proposals from Information Security vendors for a comprehensive security audit of the Kubernetes Project. 6 | 7 | ### Background 8 | 9 | In August of 2019, the Kubernetes Security Audit working group, in concert with the CNCF, Trail of Bits, and Atredis Partners, completed the first comprehensive security audit of the Kubernetes project’s [codebase](https://github.com/kubernetes/kubernetes/), working from version 1.13. 10 | 11 | These findings, below, paint a broad picture of Kubernetes security, as of version 1.13, and highlight some areas that warrant further, deeper, research. 12 | 13 | * [Kubernetes Security Review](../security-audit-2019/findings/Kubernetes%20Final%20Report.pdf) 14 | * [Attacking and Defending Kubernetes Installations](../security-audit-2019/findings/AtredisPartners_Attacking_Kubernetes-v1.0.pdf) 15 | * [Whitepaper](../security-audit-2019/findings/Kubernetes%20White%20Paper.pdf) 16 | * [Threat Model](../security-audit-2019/findings/Kubernetes%20Threat%20Model.pdf) 17 | 18 | ### Project Goals and Scope 19 | 20 | This subsequent audit is intended to be the second in a series of recurring audits, each focusing on a specific aspect of Kubernetes while maintaining coverage of all aspects that have changed since the previous audit ([1.13](../security-audit-2019/findings/)). 21 | 22 | The scope of this audit is the most recent release at commencement of audit of the core [Kubernetes project](https://github.com/kubernetes/kubernetes) and certain other code maintained by [Kubernetes SIGs](https://github.com/kubernetes-sigs/). 23 | 24 | This audit will focus on the following components of Kubernetes: 25 | 26 | * kube-apiserver 27 | * kube-scheduler 28 | * etcd, Kubernetes use of 29 | * kube-controller-manager 30 | * cloud-controller-manager 31 | * kubelet 32 | * kube-proxy 33 | * secrets-store-csi-driver 34 | 35 | Adjacent findings within the scope of the [bug bounty program](https://hackerone.com/kubernetes?type=team#scope) may be included, but are not the primary goal. 36 | 37 | This audit is intended to find vulnerabilities or weaknesses in Kubernetes. While Kubernetes relies upon container runtimes such as Docker and CRI-O, we aren't looking for (for example) container escapes that rely upon bugs in the container runtime (unless, for example, the escape is made possible by a defect in the way that Kubernetes sets up the container). 38 | 39 | The working group is specifically interested in the following aspects of the in-scope components. Proposals should indicate the specific proposed personnel’s level of expertise in these fields as it relates to Kubernetes. 40 | 41 | * Golang analysis and fuzzing 42 | * Networking 43 | * Cryptography 44 | * Evaluation of component privilege 45 | * Trust relationships and architecture evaluation 46 | * Authentication & Authorization (including Role Based Access Controls) 47 | * Secrets management 48 | * Multi-tenancy isolation: Specifically soft (non-hostile co-tenants) 49 | 50 | Personnel written into the proposal must serve on the engagement, unless explicit approvals for staff changes are made by the Security Audit Working Group. 51 | 52 | #### Out of Scope 53 | 54 | Findings specifically excluded from the [bug bounty program](https://hackerone.com/kubernetes?type=team#scope) scope are out of scope for this audit. 55 | 56 | ### Eligible Vendors 57 | 58 | This RFP is open to proposals from all vendors. 59 | 60 | #### Constraints 61 | 62 | If your proposal includes subcontractors, please include relevant details from their firms such as CVs, past works, etc. The selected vendor will be wholly responsible for fulfillment of the audit and subcontractors must be wholly managed by the selected vendor. 63 | 64 | ### Anticipated Selection Schedule 65 | 66 | This RFP will be open until 4 proposals have been received. 67 | The RFP closing date will be set 2 calendar weeks after the fourth proposal is received. 68 | The working group will announce the vendor selection after reviewing proposals. 69 | Upon receipt of the fourth proposal, the working group will update the RFP closure date and vendor selection date in this document. 70 | 71 | The working group will answer questions for the RFP period. 72 | 73 | Questions can be submitted [here](https://docs.google.com/forms/d/e/1FAIpQLScjApMDAJ5o5pIBFKpJ3mUhdY9w5s9VYd_TffcMSvYH_O7-og/viewform). All questions will be answered publicly in this document. 74 | 75 | We understand scheduling can be complex but we prefer to have proposals include CVs, resumes, and/or example reports from staff that will be working on the project. 76 | 77 | Proposals should be submitted to kubernetes-security-audit-2021@googlegroups.com 78 | 79 | * 2021/02/08: RFP Open, Question period open 80 | * 2021/06/22: Fourth proposal received 81 | * 2021/07/06: RFP Closes, Question period closes 82 | * TBD: The working group will announce vendor selection when required agreements are fully executed 83 | 84 | ## Methodology 85 | 86 | The start and end dates will be negotiated after vendor selection. The timeline for this audit is flexible. 87 | 88 | The working group will establish a 60 minute kick-off meeting to answer any initial questions and discuss the Kubernetes architecture. 89 | 90 | This is a comprehensive audit, not a penetration test or red team exercise. The audit does not end with the first successful exploit or critical vulnerability. 91 | 92 | The vendor will document the Kubernetes configuration and architecture that they will audit and provide this to the working group. The cluster deployment assessed must not be specific to any public cloud. The working group must approve this configuration before the audit continues. This documented configuration will result in the "audited reference architecture specification" deliverable. 93 | 94 | The vendor will perform source code analysis on the Kubernetes code base, finding vulnerabilities and, where possible and making the most judicious use of time, providing proof of concept exploits that the Kubernetes project can use to investigate and fix defects. The vendor will discuss findings on a weekly basis and, at the vendor’s discretion, bring draft write-ups to status meetings. 95 | 96 | The working group will be available weekly to meet with the selected vendor and will provide subject matter experts as requested. 97 | 98 | The vendor will develop and deliver a draft report, describing their methodology, how much attention the various components received (to inform future work), and the work’s findings. The working group will review and comment on the draft report, either requesting updates or declaring the draft final. This draft-review-comment-draft cycle may repeat several times. 99 | 100 | ## Expectations 101 | 102 | The vendor must report urgent security issues immediately to both the working group and security@kubernetes.io. 103 | 104 | ## Selection Criteria 105 | 106 | To help us combine objective evaluations with the working group members’ individual past experiences and knowledge of the vendors’ work and relevant experience, the vendors will be evaluated against the following criteria. Each member of the working group will measure the RFP against the criteria on a scale of 1 to 5: 107 | 108 | * Relevant understanding and experience in code audit, threat modeling, and related work 109 | * Relevant understanding and experience in Kubernetes, other orchestration systems, containers, Linux, hardening of distributed systems, and related work 110 | * Strength of the vendor’s proposal and examples of previous work product, redacted as necessary 111 | 112 | A writeup which details our process and results of the last RFP is available [here](../security-audit-2019/RFP_Decision.md). 113 | 114 | ## Confidentiality and Embargo 115 | 116 | All information gathered and deliverables created as a part of the audit must not be shared outside the vendor or the working group without the explicit consent of the working group. 117 | 118 | ## Deliverables 119 | 120 | The audit should result in the following deliverables, which will be made public after any sensitive security issues are mitigated. 121 | 122 | * Audited reference architecture specification. Should take the form of a summary and associated configuration YAML files. 123 | * Findings report including an executive summary. 124 | * Where possible and, in the vendor’s opinion makes the most judicious use of time, proof of concept exploits that the Kubernetes project can use to investigate and fix defects. 125 | 126 | ## Questions Asked during RFP Response Process 127 | 128 | ### Do we need to use our own hardware and infrastructure or should we use a cloud? 129 | 130 | Strong preference would be for the vendor to provide their own infrastructure or use a public cloud provider, just NOT a managed offering like GKE or EKS. The reasoning is to prevent accidentally auditing a cloud provider's kubernetes service instead of kubernetes/kubernetes. Depending on the scope and approach, it may make sense to use a local cluster (e.g. kind) for API fuzzing and anything that doesn't impact the underlying OS, and is an easy to use repeatable setup (see Methodology above). 131 | 132 | ### What is the intellectual property ownership of the report and all work product? 133 | 134 | The report must be licensed under the Creative Commons Attribution 4.0 International Public License (CC BY 4.0) based on [section 11.(f) of the Cloud Native Computing Foundation (CNCF) Charter](https://github.com/cncf/foundation/blob/master/charter.md#11-ip-policy). 135 | Separately, any code released with or as part of the report needs to be under the Apache License, version 2.0. Please refer to [sections 11.(e) and (d) in the CNCF Charter](https://github.com/cncf/foundation/blob/master/charter.md#11-ip-policy). 136 | 137 | ### Must I use the report format from the previous audit? Can the SIG provide a report format template I can use? 138 | 139 | Vendors who wish to use either the previous report format, as allowed by CC BY 4.0, or a report format provided by the community may do so as long as it is also available under CC BY 4.0. Vendors who wish to publish 2 versions of the report, one tailored for the community under CC BY 4.0 and one that they host on their own site using their proprietary fonts, formats, branding, or other copyrights, under their own license may do so, in order to differentiate their commercial report format from this report. Vendors may also publish a synopsis and marketing materials regarding the report on their website as long as it links to the original report in this repository. In the community report, vendors can place links in the report to materials hosted on their commercial site. This does not imply that linked materials are themselves CC BY 4.0. 140 | 141 | ### Do you have any developer documentation or design documentation specifications that aren't available on the internet that you would be able to share? 142 | 143 | Kubernetes is an open source project, all documentation is available on https://kubernetes.io or on https://github.com/kubernetes. 144 | 145 | ### What are the most important publicly available pages detailing the design of the system and the data it receives. 146 | 147 | - Overview of [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) 148 | - [kube-apiserver overview](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/) 149 | - [kube-scheduler overview](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/) 150 | - [Operating etcd clusters for Kubernetes](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) 151 | - [etcd clustering guide](https://etcd.io/docs/next/op-guide/clustering/) 152 | - [kube-controller-manager overview](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/) 153 | - [cloud-controller-manager overview](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) 154 | - [cloud-controller-manager administration](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/) 155 | - [kubelet overview](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) 156 | - [kube-proxy overview](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/) 157 | - [secrets-store-csi-driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver) 158 | 159 | ### How long does the Working Group envision the engagement lasting and what is the latest date you can receive the deliverables? 160 | 161 | The latest date to receive deliverables will be negotiated with the selected vendor. 162 | 163 | ### Which attack vectors are of most concern to the Working Group. 164 | 165 | 1. The attack vector most concerned about is unauthenticated access to a cluster resulting in compromise of the [components in-scope](#project-goals-and-scope) 166 | 2. Crossing namespace boundaries, an authenticated attacker being able to affect resources their credentials do not directly allow 167 | 3. Any other attack vector that exists against the components in scope 168 | 169 | ### Is there flexibility to wait for staff to be available to work on the audit? 170 | 171 | Yes, the timeline for the audit is flexible and the timeline will be further discussed and negotiated with the selected vendor. -------------------------------------------------------------------------------- /sig-security-external-audit/security-audit-2019/RFP.md: -------------------------------------------------------------------------------- 1 | # Request for Proposal 2 | 3 | ## Kubernetes Third Party Security Audit 4 | 5 | The Kubernetes Third-Party Audit Working Group (working group, henceforth) is soliciting proposals from select Information Security vendors for a comprehensive security audit of the Kubernetes Project. 6 | 7 | ### Eligible Vendors 8 | 9 | Only the following vendors will be permitted to submit proposals: 10 | 11 | - NCC Group 12 | - Trail of Bits 13 | - Cure53 14 | - Bishop Fox 15 | - Insomnia 16 | - Atredis Partners 17 | 18 | If your proposal includes sub-contractors, please include relevant details from their firm such as CVs, past works, etc. 19 | 20 | ### RFP Process 21 | 22 | This RFP will be open between 2018/10/29 and 2019/11/30. 23 | 24 | The working group will answer questions for the first two weeks of this period. 25 | 26 | Questions can be submitted [here](https://docs.google.com/forms/d/e/1FAIpQLSd5rXSDYQ0KMjzSEGxv0pkGxInkdW1NEQHvUJpxgX3y0o9IEw/viewform?usp=sf_link). All questions will be answered publicly in this document. 27 | 28 | Proposals must include CVs, resumes, and/or example reports from staff that will be working on the project. 29 | 30 | - 2018/10/29: RFP Open, Question period open 31 | - 2018/11/12: Question period closes 32 | - 2018/11/30: RFP Closes 33 | - 2018/12/11: The working group will announce vendor selection 34 | 35 | ## Audit Scope 36 | 37 | The scope of the audit is the most recent release (1.12) of the core [Kubernetes project](https://github.com/kubernetes/kubernetes). 38 | 39 | - Findings within the [bug bounty program](https://github.com/kubernetes/community/blob/master/contributors/guide/bug-bounty.md) scope are in scope. 40 | 41 | We want the focus of the audit to be on bugs on Kubernetes. While Kubernetes relies upon a container runtimes such as Docker and CRI-O, we aren't looking for (for example) container escapes that rely upon bugs in the container runtime (unless, for example, the escape is made possible by a defect in the way that Kubernetes sets up the container). 42 | 43 | ### Focus Areas 44 | 45 | The Kubernetes Third-Party Audit Working Group is specifically interested in the following areas. Proposals should indicate their level of expertise in these fields as it relates to Kubernetes. 46 | 47 | - Networking 48 | - Cryptography 49 | - Authentication & Authorization (including Role Based Access Controls) 50 | - Secrets management 51 | - Multi-tenancy isolation: Specifically soft (non-hostile co-tenants) 52 | 53 | ### Out of Scope 54 | 55 | Findings specifically excluded from the [bug bounty program](https://github.com/kubernetes/community/blob/master/contributors/guide/bug-bounty.md) scope are out of scope. 56 | 57 | ## Methodology 58 | 59 | We are allowing 8 weeks for the audit, start date can be negioated after vendor selection. We recognize that November and December can be very high utilization periods for security vendors. 60 | 61 | The audit should not be treated as a penetration test, or red team exercise. It should be comprehensive and not end with the first successful exploit or critical vulnerability. 62 | 63 | The vendor should perform both source code analysis as well as live evaluation of Kubernetes. 64 | 65 | The vendor should document the Kubernetes configuration and architecture that the audit was performed against for the creation of a "audited reference architecture" artifact. The working group must approve this configuration before the audit continues. 66 | 67 | The working group will establish a 60 minute kick-off meeting to answer any initial questions and explain Kubernetes architecture. 68 | 69 | The working group will be available weekly to meet with the selected vendor, will and provide subject matter experts for requested components. 70 | 71 | The vendor must report urgent security issues immediately to both the working group and security@kubernetes.io. 72 | 73 | ## Confidentiality and Embargo 74 | 75 | All information gathered and artifacts created as a part of the audit must not be shared outside the vendor or the working group without the explicit consent of the working group. 76 | 77 | ## Artifacts 78 | 79 | The audit should result in the following artifacts, which will be made public after any sensitive security issues are mitigated. 80 | 81 | - Findings report, including an executive summary 82 | 83 | - Audited reference architecture specification. Should take the form of a summary and associated configuration yaml files. 84 | 85 | - Formal threat model 86 | 87 | - Any proof of concept exploits that we can use to investigate and fix defects 88 | 89 | - Retrospective white paper(s) on important security considerations in Kubernetes 90 | 91 | *This artifact can be provided up to 3 weeks after deadline for the others.* 92 | 93 | - E.g. [NCC Group: Understanding hardening linux containers](https://www.nccgroup.trust/globalassets/our-research/us/whitepapers/2016/april/ncc_group_understanding_hardening_linux_containers-1-1.pdf) 94 | - E.g. [NCC Group: Abusing Privileged and Unprivileged Linux 95 | Containers](https://www.nccgroup.trust/globalassets/our-research/us/whitepapers/2016/june/container_whitepaper.pdf) 96 | 97 | ## Q & A 98 | 99 | | # | Question | Answer | 100 | |---|----------|--------| 101 | | 1 | The RFP says that any area included in the out of scope section of the k8s bug bounty programme is not in-scope of this review. There are some areas which are out of scope of the bug bounty which would appear to be relatively core to k8s, for example Kubernetes on Windows. Can we have 100% confirmation that these areas are out of scope? | Yes. If you encounter a vulnerability in Kubernetes' use of an out-of-scope element, like etcd or the container network interface (to Calico, Weave, Flannel, ...), that is in scope. If you encounter a direct vulnerability in a third-party component during the audit you should follow the embargo section of the RFP. | 102 | | 2 | On the subject of target Distribution and configuration option review:
The RFP mentions an "audited reference architecture".
- Is the expectation that this will be based on a specific k8s install mechanism (e.g. kubeadm)?
- On a related note is it expected that High Availability configurations (e.g. multiple control plane nodes) should be included.
- The assessment mentions Networking as a focus area. Should a specific set of network plugins (e.g. weave, calico, flannel) be considered as in-scope or are all areas outside of the core Kubernetes code for this out of scope.
- Where features of Kubernetes have been deprecated but not removed in 1.12, should they be considered in-scope or not? | 1. No, we are interested in the final topology -- the installation mechanism, as well as its default configuration, is tangental. The purpose is to contextualise the findings.
2. High-availability configurations should be included. For confinement of level of effort, vendor could create one single-master configuration and one high-availability configuration.
3. All plugins are out of scope per the bug bounty scope -- for clarification regarding the interface to plug-ins, please see the previous question.
4. Deprecated features should be considered out of scope | 103 | | 3 | On the subject of dependencies:
- Will any of the project dependencies be in scope for the assessment? (e.g. https://github.com/kubernetes/kubernetes/blob/v1.14.3/Godeps/Godeps.json) | Project dependencies are in scope in the sense that they are **allowed** to be tested, but they should not be considered a **required** testing area. We would be interested in cases where Kubernetes is exploitable due to a vulnerability in a project depdendency. Vulnerabilities found in third-party dependencies should follow the embargo section of the RFP.| 104 | | 4 | Is the 8 weeks mentioned in the scope intended to be a limit on effort applied to the review, or just the timeframe for the review to occur in? | This is only a restriction on time frame, but is not intended to convey level of effort. | 105 | | 5| Will the report be released in its entirety after the issues have been remediated? | Yes. | 106 | | 6| What goals must be met to make this project a success? | We have several goals in mind:
1) Document a full and complete understanding of Kubernetes’ dataflow.
2) Achieve a reasonable understanding of potential vulnerability vectors for subsequent research.
3) Creation of artifacts that help third parties make a practical assessment of Kubernetes’ security position.
4) Eliminate design and architecture-level vulnerabilities.
5) Discover the most significant vulnerabilities, in both number and severity. | 107 | | 7 | Would you be open to two firms partnering on the proposal? | Yes, however both firms should collaborate on the proposal and individual contributors should all provide C.V.s or past works.| 108 | | 8| From a deliverables perspective, will the final report (aside from the whitepaper) be made public? | Yes. | 109 | | 9| The bug bounty document states the following is in scope, "Community maintained stable cloud platform plugins", however will the scope of the assessment include review of the cloud providers' k8s implementation? Reference of cloud providers: https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/ | Cloud provider-specific issues are excluded from the scope. | 110 | | 10| The bug bounty doc lists supply chain attacks as in scope and also says, "excluding social engineering attacks against maintainers". We can assume phishing these individuals is out of scope, but does the exclusion of social engineering against maintainers include all attacks involving individuals? For example, if we were to discover that one of these developers accidentally committed their SSH keys to a git repo unassociated with k8s and we could use these keys to gain access to the k8s project. Is that in scope? | Attacks against individual developers, such as the example provided, are out of scope for this engagement. | 111 | | 11| While suppression of logs is explicitly in scope, is log injection also in scope? | Log injection is in scope for the purposes of this audit.| 112 | | 12| Are all the various networking implementations in scope for the assessment? Ref: https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-networking-model | Please refer to question 1. | 113 | | 13| What does the working group refer to with formal threat model? Would STRIDE be a formal threat model in that sense?| A formal threat model should include a comprehensive dataflow diagram which shows data moving between different trust levels and assesses threats to that data using a system like STRIDE as the data moves between each process/component. Many good examples are present in Threat Modeling: Designing for Security by Adam Shostack. | 114 | | 14| Does Kubernetes uses any GoLang non-standard signing libraries? | An initial investigation has not uncovered any, however with a code base as large as Kubernetes, it is possible. | 115 | | 15| Does Kubernetes implement any cryptographic primitives on its own, i.e. primitives which are not part of the standard libraries? | An initial investigation has not uncovered any, however with a code base as large as Kubernetes, it is possible. | 116 | | 16| Presuming that live testing is part of the project, how does the working group see the "audited reference architecture" being defined? Is there a representative deployment, or a document describing a "default installation" that you foresee the engagement team using to inform the buildout of a test environment?| The purpose of the reference architecture is to define and document the configuration against which live testing was preformed. It should be generated collaboratively with the working group at the beginning of the project. We will want it to represent at least a common configuration, as in practice Kubernetes itself has no default configuration. It should take the form of a document detailing the set-up and configuration steps the vendor took to create their environment, ensuring an easily repeatable reference implementation. | 117 | | 17| The RFP describes ""networking and multi-tenancy isolation"" as one of the focus areas.

Can you describe for us what these terms mean to you? Can you also help us understand how you define a soft non-hostile co-tenant? Is a _hostile_ co-tenant also in scope?| By networking we mean vulnerabilities related to communication within and to/from the cluster: container to container, pod to pod, pod to service, and external to internal communications as described in [the networking documentation](https://kubernetes.io/docs/concepts/cluster-administration/networking/).

The concept of soft multi-tenancy is that you have a single cluster being shared by applications or groups within the same company or organization, with less intended restrictions of a hard multi-tenant platform like a PaaS that hosts multiple distinct and potentially hostile competing customers on a single cluster which requires stricter security assumptions. These definitions may vary by group and use case, but the idea is that you can have a cluster with multiple groups with their own namespaces, isolated by networking/storage/RBAC roles."| 118 | | 18| In the Artifacts section, you describe a Formal Threat Model as one of the outputs of the engagement. Can you expound on what this means to you? Are there any representative public examples you could point us to?| Please refer to question 13.| 119 | -------------------------------------------------------------------------------- /sig-security-tooling/vulnerability-mgmt/build-time-dependencies.md: -------------------------------------------------------------------------------- 1 | # Periodic scanning for vulnerabilities in build time dependencies 2 | 3 | Report vulnerabilities in build time dependencies 4 | of [Kubernetes](https://github.com/kubernetes/kubernetes) repository 5 | 6 | Tracker: [Issue #101528](https://github.com/kubernetes/kubernetes/issues/101528) 7 | 8 | ## Background and Prior work 9 | 10 | The process described here is tooling agnostic i.e. the process can be 11 | implemented using any scanner with minimal or no changes. This is also _not_ an 12 | endorsement of any specific tool or scanner. In order to get a working solution 13 | in place, [snyk](https://snyk.io/) was chosen for following reasons: 14 | 15 | 1. Existing partnership between CNCF and Snyk helped procure an account that 16 | allowed us to scan `kubernetes/kubernetes` 17 | repo: https://github.com/kubernetes/steering/issues/206 18 | 2. Snyk has detected vulnerabilities in transient dependencies of 19 | `kubernetes/kubernetes`: https://kubernetes.slack.com/archives/CHGFYJVAN/p1595258034095300 20 | 3. Snyk has a programmable interface which made it easier to filter out 21 | licensing issues and known false positive vulnerabilities 22 | 23 | ## Implementation with Snyk 24 | 25 | There are two ways to scan the Kubernetes repo for vulnerabilities in 26 | dependencies at build time 27 | 28 | ### Running the scan locally 29 | 30 | #### Step 0: Install Snyk CLI 31 | 32 | Follow these instructions to snyk cli installed on your 33 | machine: https://support.snyk.io/hc/en-us/articles/360003812538-Install-the-Snyk-CL 34 | 35 | #### Step 1: Authenticate 36 | 37 | ##### Option A : 38 | 39 | Running command `snyk auth` takes you to snyk.io website, do signup/login/auth 40 | 41 | ``` 42 | snyk auth 43 | ``` 44 | 45 | ##### Option B: 46 | 47 | Get the API token from https://app.snyk.io/account and use it 48 | 49 | ``` 50 | snyk auth XXX-XXX-XXX-XXX-XXX 51 | Your account has been authenticated. Snyk is now ready to be used. 52 | ``` 53 | 54 | #### Step 2: Run test 55 | 56 | ``` 57 | # in k/k repo 58 | snyk test 59 | ``` 60 | 61 | ### Running the scan as part of k/k testgrid 62 | 63 | Prow job that runs every 6 hours is located 64 | here: https://testgrid.k8s.io/sig-security-snyk-scan#ci-kubernetes-snyk-master 65 | 66 | #### Improvements to the raw scan results 67 | 68 | Raw scan results were useful, but needed some Kubernetes specific work 69 | 70 | ##### JSON output 71 | 72 | To store the json output in a file and let stdout use command line friendly 73 | output: 74 | 75 | ``` 76 | snyk test --json-file-output=licenses-cves.json 77 | ``` 78 | 79 | ##### Licenses 80 | 81 | Since detecting licensing violations is a non-goal, licenses related results, 82 | can be removed from the output using this query: 83 | 84 | ``` 85 | cat licenses-cves.json | jq '.vulnerabilities | .[] | select (.type=="license" | not)' > only_cves.json 86 | ``` 87 | 88 | ##### Removing False Positive CVEs identified with v0.0.0 89 | 90 | Since these are really pointing to the code at HEAD in git tracking, we can 91 | ignore the vulnerabilities that are generated when snyk detects v0.0.0 as 92 | Kubernetes version because of the way 93 | [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) 94 | directives are used. 95 | 96 | Easy way to remove licensing *and* CVEs like this: 97 | 98 | ``` 99 | cat licenses-cves.json | jq '.vulnerabilities | .[] | select ((.type=="license") or (.version=="0.0.0") | not)' > only_cves_wo000.json 100 | ``` 101 | 102 | ### Example of filtered JSON scan result 103 | 104 | __Note__: Results of the filtered scan are not printed as part of the CI job. 105 | However, the following historical scan result is mentioned here for 106 | reference purposes only: 107 | 108 | 109 |
Click to view result 110 | 111 | 112 | ``` 113 | { 114 | "CVSSv3": "CVSS:3.1/AV:N/AC:L/PR:H/UI:N/S:U/C:H/I:H/A:H", 115 | "alternativeIds": [], 116 | "creationTime": "2021-02-08T10:27:10.200417Z", 117 | "credit": [ 118 | "Unknown" 119 | ], 120 | "cvssScore": 7.2, 121 | "description": "## Overview\n\nAffected versions of this package are vulnerable to Directory Traversal. When specifying the plugin to load in the `type` field in the network configuration, it is possible to use special elements such as \"../\" separators to reference binaries elsewhere on the system. An attacker can use this to execute other existing binaries other than the cni plugins/types such as `reboot`.\n\n## Details\n\nA Directory Traversal attack (also known as path traversal) aims to access files and directories that are stored outside the intended folder. By manipulating files with \"dot-dot-slash (../)\" sequences and its variations, or by using absolute file paths, it may be possible to access arbitrary files and directories stored on file system, including application source code, configuration, and other critical system files.\n\nDirectory Traversal vulnerabilities can be generally divided into two types:\n\n- **Information Disclosure**: Allows the attacker to gain information about the folder structure or read the contents of sensitive files on the system.\n\n`st` is a module for serving static files on web pages, and contains a [vulnerability of this type](https://snyk.io/vuln/npm:st:20140206). In our example, we will serve files from the `public` route.\n\nIf an attacker requests the following URL from our server, it will in turn leak the sensitive private key of the root user.\n\n```\ncurl http://localhost:8080/public/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/root/.ssh/id_rsa\n```\n**Note** `%2e` is the URL encoded version of `.` (dot).\n\n- **Writing arbitrary files**: Allows the attacker to create or replace existing files. This type of vulnerability is also known as `Zip-Slip`. \n\nOne way to achieve this is by using a malicious `zip` archive that holds path traversal filenames. When each filename in the zip archive gets concatenated to the target extraction folder, without validation, the final path ends up outside of the target folder. If an executable or a configuration file is overwritten with a file containing malicious code, the problem can turn into an arbitrary code execution issue quite easily.\n\nThe following is an example of a `zip` archive with one benign file and one malicious file. Extracting the malicious file will result in traversing out of the target folder, ending up in `/root/.ssh/` overwriting the `authorized_keys` file:\n\n```\n2018-04-15 22:04:29 ..... 19 19 good.txt\n2018-04-15 22:04:42 ..... 20 20 ../../../../../../root/.ssh/authorized_keys\n```\n\n## Remediation\nUpgrade `github.com/containernetworking/cni/pkg/invoke` to version 0.8.1 or higher.\n## References\n- [GitHub PR](https://github.com/containernetworking/cni/pull/808)\n- [RedHat Bugzilla Bug](https://bugzilla.redhat.com/show_bug.cgi?id=1919391)\n", 122 | "disclosureTime": "2021-02-05T00:00:00Z", 123 | "exploit": "Not Defined", 124 | "fixedIn": [ 125 | "0.8.1" 126 | ], 127 | "functions": [], 128 | "functions_new": [], 129 | "id": "SNYK-GOLANG-GITHUBCOMCONTAINERNETWORKINGCNIPKGINVOKE-1070549", 130 | "identifiers": { 131 | "CVE": [ 132 | "CVE-2021-20206" 133 | ], 134 | "CWE": [ 135 | "CWE-22" 136 | ] 137 | }, 138 | "language": "golang", 139 | "modificationTime": "2021-02-08T14:14:51.744734Z", 140 | "moduleName": "github.com/containernetworking/cni/pkg/invoke", 141 | "packageManager": "golang", 142 | "packageName": "github.com/containernetworking/cni/pkg/invoke", 143 | "patches": [], 144 | "proprietary": false, 145 | "publicationTime": "2021-02-08T14:14:51.968123Z", 146 | "references": [ 147 | { 148 | "title": "GitHub PR", 149 | "url": "https://github.com/containernetworking/cni/pull/808" 150 | }, 151 | { 152 | "title": "RedHat Bugzilla Bug", 153 | "url": "https://bugzilla.redhat.com/show_bug.cgi?id=1919391" 154 | } 155 | ], 156 | "semver": { 157 | "hashesRange": [ 158 | " 164 | ] 165 | }, 166 | "severity": "high", 167 | "severityWithCritical": "high", 168 | "title": "Directory Traversal", 169 | "from": [ 170 | "k8s.io/kubernetes@0.0.0", 171 | "github.com/containernetworking/cni/libcni@0.8.0", 172 | "github.com/containernetworking/cni/pkg/invoke@0.8.0" 173 | ], 174 | "upgradePath": [], 175 | "isUpgradable": false, 176 | "isPatchable": false, 177 | "name": "github.com/containernetworking/cni/pkg/invoke", 178 | "version": "0.8.0" 179 | } 180 | { 181 | "CVSSv3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", 182 | "alternativeIds": [], 183 | "creationTime": "2020-07-30T13:33:31.283115Z", 184 | "credit": [ 185 | "christopher-wong" 186 | ], 187 | "cvssScore": 7.5, 188 | "description": "## Overview\n[github.com/dgrijalva/jwt-go](https://github.com/dgrijalva/jwt-go) is a go implementation of JSON Web Tokens.\n\nAffected versions of this package are vulnerable to Access Restriction Bypass if `m[\"aud\"]` happens to be `[]string{}`, as allowed by the spec, the type assertion fails and the value of `aud` is `\"\"`. This can cause audience verification to succeed even if the audiences being passed are incorrect if `required` is set to `false`.\n## Remediation\nUpgrade `github.com/dgrijalva/jwt-go` to version 4.0.0-preview1 or higher.\n## References\n- [GitHub Issue](https://github.com/dgrijalva/jwt-go/issues/422)\n- [GitHub PR](https://github.com/dgrijalva/jwt-go/pull/426)\n", 189 | "disclosureTime": "2020-07-30T13:22:28Z", 190 | "exploit": "Not Defined", 191 | "fixedIn": [ 192 | "4.0.0-preview1" 193 | ], 194 | "functions": [], 195 | "functions_new": [], 196 | "id": "SNYK-GOLANG-GITHUBCOMDGRIJALVAJWTGO-596515", 197 | "identifiers": { 198 | "CVE": [ 199 | "CVE-2020-26160" 200 | ], 201 | "CWE": [ 202 | "CWE-287" 203 | ] 204 | }, 205 | "language": "golang", 206 | "modificationTime": "2020-11-30T11:23:07.967004Z", 207 | "moduleName": "github.com/dgrijalva/jwt-go", 208 | "packageManager": "golang", 209 | "packageName": "github.com/dgrijalva/jwt-go", 210 | "patches": [], 211 | "proprietary": false, 212 | "publicationTime": "2020-09-13T15:53:35Z", 213 | "references": [ 214 | { 215 | "title": "GitHub Issue", 216 | "url": "https://github.com/dgrijalva/jwt-go/issues/422" 217 | }, 218 | { 219 | "title": "GitHub PR", 220 | "url": "https://github.com/dgrijalva/jwt-go/pull/426" 221 | } 222 | ], 223 | "semver": { 224 | "hashesRange": [ 225 | "v4.0.0-preview1" 226 | ], 227 | "vulnerable": [ 228 | "<4.0.0-preview1" 229 | ], 230 | "vulnerableHashes": null 231 | }, 232 | "severity": "high", 233 | "severityWithCritical": "high", 234 | "title": "Access Restriction Bypass", 235 | "from": [ 236 | "k8s.io/kubernetes@0.0.0", 237 | "github.com/heketi/heketi/client/api/go-client@10.2.0", 238 | "github.com/dgrijalva/jwt-go@3.2.0" 239 | ], 240 | "upgradePath": [], 241 | "isUpgradable": false, 242 | "isPatchable": false, 243 | "name": "github.com/dgrijalva/jwt-go", 244 | "version": "3.2.0" 245 | } 246 | { 247 | "CVSSv3": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", 248 | "alternativeIds": [], 249 | "creationTime": "2020-07-30T13:33:31.283115Z", 250 | "credit": [ 251 | "christopher-wong" 252 | ], 253 | "cvssScore": 7.5, 254 | "description": "## Overview\n[github.com/dgrijalva/jwt-go](https://github.com/dgrijalva/jwt-go) is a go implementation of JSON Web Tokens.\n\nAffected versions of this package are vulnerable to Access Restriction Bypass if `m[\"aud\"]` happens to be `[]string{}`, as allowed by the spec, the type assertion fails and the value of `aud` is `\"\"`. This can cause audience verification to succeed even if the audiences being passed are incorrect if `required` is set to `false`.\n## Remediation\nUpgrade `github.com/dgrijalva/jwt-go` to version 4.0.0-preview1 or higher.\n## References\n- [GitHub Issue](https://github.com/dgrijalva/jwt-go/issues/422)\n- [GitHub PR](https://github.com/dgrijalva/jwt-go/pull/426)\n", 255 | "disclosureTime": "2020-07-30T13:22:28Z", 256 | "exploit": "Not Defined", 257 | "fixedIn": [ 258 | "4.0.0-preview1" 259 | ], 260 | "functions": [], 261 | "functions_new": [], 262 | "id": "SNYK-GOLANG-GITHUBCOMDGRIJALVAJWTGO-596515", 263 | "identifiers": { 264 | "CVE": [ 265 | "CVE-2020-26160" 266 | ], 267 | "CWE": [ 268 | "CWE-287" 269 | ] 270 | }, 271 | "language": "golang", 272 | "modificationTime": "2020-11-30T11:23:07.967004Z", 273 | "moduleName": "github.com/dgrijalva/jwt-go", 274 | "packageManager": "golang", 275 | "packageName": "github.com/dgrijalva/jwt-go", 276 | "patches": [], 277 | "proprietary": false, 278 | "publicationTime": "2020-09-13T15:53:35Z", 279 | "references": [ 280 | { 281 | "title": "GitHub Issue", 282 | "url": "https://github.com/dgrijalva/jwt-go/issues/422" 283 | }, 284 | { 285 | "title": "GitHub PR", 286 | "url": "https://github.com/dgrijalva/jwt-go/pull/426" 287 | } 288 | ], 289 | "semver": { 290 | "hashesRange": [ 291 | "v4.0.0-preview1" 292 | ], 293 | "vulnerable": [ 294 | "<4.0.0-preview1" 295 | ], 296 | "vulnerableHashes": null 297 | }, 298 | "severity": "high", 299 | "severityWithCritical": "high", 300 | "title": "Access Restriction Bypass", 301 | "from": [ 302 | "k8s.io/kubernetes@0.0.0", 303 | "k8s.io/apiserver/pkg/storage/etcd3/testing@0.0.0", 304 | "go.etcd.io/etcd/integration@#dd1b699fc489", 305 | "go.etcd.io/etcd/etcdserver/api/v3rpc@#dd1b699fc489", 306 | "go.etcd.io/etcd/mvcc@#dd1b699fc489", 307 | "go.etcd.io/etcd/auth@#dd1b699fc489", 308 | "github.com/dgrijalva/jwt-go@3.2.0" 309 | ], 310 | "upgradePath": [], 311 | "isUpgradable": false, 312 | "isPatchable": false, 313 | "name": "github.com/dgrijalva/jwt-go", 314 | "version": "3.2.0" 315 | } 316 | 317 | ``` 318 | 319 |
320 | --------------------------------------------------------------------------------