├── config ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── samples │ ├── kustomization.yaml │ ├── test-resources.yaml │ ├── amtd_v1beta1_securityevent.yaml │ └── amtd_v1beta1_adaptivemovingtargetdefense.yaml ├── default │ ├── manager_config_patch.yaml │ ├── manager_auth_proxy_patch.yaml │ └── kustomization.yaml ├── manager │ ├── manager-image-pull-secret-patch.yaml │ ├── kustomization.yaml │ ├── registry-secret.yaml │ └── manager.yaml ├── crd │ ├── patches │ │ ├── cainjection_in_securityevents.yaml │ │ ├── cainjection_in_adaptivemovingtargetdefenses.yaml │ │ ├── webhook_in_securityevents.yaml │ │ └── webhook_in_adaptivemovingtargetdefenses.yaml │ ├── kustomizeconfig.yaml │ ├── kustomization.yaml │ └── bases │ │ └── amtd.r6security.com_securityevents.yaml └── rbac │ ├── service_account.yaml │ ├── auth_proxy_client_clusterrole.yaml │ ├── role_binding.yaml │ ├── auth_proxy_role_binding.yaml │ ├── leader_election_role_binding.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_service.yaml │ ├── securityevent_viewer_role.yaml │ ├── kustomization.yaml │ ├── securityevent_editor_role.yaml │ ├── adaptivemovingtargetdefense_viewer_role.yaml │ ├── adaptivemovingtargetdefense_editor_role.yaml │ ├── leader_election_role.yaml │ └── role.yaml ├── docs ├── img │ ├── architecture.jpg │ └── phoenix-logo.png ├── README.md ├── examples │ ├── timer-based-app-restart.md │ ├── falco-based-app-restart.md │ ├── falco-based-on-demand-app-quarantine.md │ └── kubearmor-based-app-restart.md ├── WHY.md ├── INSTALL.md ├── REFERENCE.md └── CONCEPTS.md ├── .dockerignore ├── .cr.yaml ├── deploy └── manifests │ ├── config-falco │ ├── falco-patch.yaml │ └── falco.yaml │ ├── falco-integrator-delete-demo-amtd.yaml │ ├── kubearmor-integrator-delete-demo-amtd.yaml │ ├── time-based-trigger-demo-amtd.yaml │ ├── falco-integrator-quarantine-demo-amtd.yaml │ ├── demo-page │ └── demo-page-deployment.yaml │ ├── deploy-time-based-trigger │ └── deploy.yaml │ ├── deploy-kubearmor-integrator │ └── deploy.yaml │ └── deploy-falco-integrator │ └── deploy.yaml ├── charts └── phoenix │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── crds │ │ └── securityevents_crd.yaml │ └── rbac.yaml │ ├── .helmignore │ ├── values.yaml │ └── Chart.yaml ├── .gitignore ├── .github ├── dependabot.yml └── workflows │ ├── release.yaml │ └── chart-releasing.yaml ├── SECURITY.md ├── hack └── boilerplate.go.txt ├── internal └── controller │ ├── amtd_manage_info.go │ ├── constants.go │ ├── suite_test.go │ ├── pod_controller.go │ ├── adaptivemovingtargetdefense_controller.go │ └── securityevent_controller.go ├── PROJECT ├── Dockerfile ├── api └── v1beta1 │ ├── groupversion_info.go │ ├── securityevent_types.go │ ├── zz_generated.deepcopy.go │ └── adaptivemovingtargetdefense_types.go ├── README.md ├── pkg └── controllers │ └── register.go ├── go.mod ├── cmd └── main.go └── Makefile /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /docs/img/architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/r6security/phoenix/HEAD/docs/img/architecture.jpg -------------------------------------------------------------------------------- /docs/img/phoenix-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/r6security/phoenix/HEAD/docs/img/phoenix-logo.png -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples of your project ## 2 | resources: 3 | - amtd_v1beta1_adaptivemovingtargetdefense.yaml 4 | - amtd_v1beta1_securityevent.yaml 5 | #+kubebuilder:scaffold:manifestskustomizesamples 6 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /config/manager/manager-image-pull-secret-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | imagePullSecrets: 10 | - name: registry-secret -------------------------------------------------------------------------------- /.cr.yaml: -------------------------------------------------------------------------------- 1 | owner: r6security 2 | git-repo: phoenix 3 | package-path: .cr-release-packages 4 | pages-branch: gh-pages 5 | skip-existing: true 6 | release-name-template: "{{ .Name }}-{{ .Version }}" 7 | charts-repo: https://r6security.github.io/phoenix 8 | generate-release-notes: true 9 | 10 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | - registry-secret.yaml 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | images: 7 | - name: controller 8 | newName: phoenixop/amtd-operator 9 | newTag: "0.2" 10 | patchesStrategicMerge: 11 | - manager-image-pull-secret-patch.yaml 12 | -------------------------------------------------------------------------------- /deploy/manifests/config-falco/falco-patch.yaml: -------------------------------------------------------------------------------- 1 | spec: 2 | template: 3 | spec: 4 | containers: 5 | - name: falco 6 | volumeMounts: 7 | - name: custom-rules 8 | mountPath: /etc/r6security 9 | volumes: 10 | - name: custom-rules 11 | configMap: 12 | name: falco-rules 13 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_securityevents.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME 7 | name: securityevents.amtd.r6security.com 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_adaptivemovingtargetdefenses.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME 7 | name: adaptivemovingtargetdefenses.amtd.r6security.com 8 | -------------------------------------------------------------------------------- /charts/phoenix/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | R6 Security Phoenix AMTD Operator for Kubernetes 2 | 3 | Chart successfully installed. 4 | 5 | '{{ .Chart.Name }}' is you current release. 6 | To check out the installed resources: 7 | $ kubectl get pod -n {{ .Release.Namespace }} 8 | 9 | To clean up: 10 | helm uninstall {{ .Release.Name }} -n {{ .Release.Namespace }} 11 | kubectl delete namespace {{ .Release.Namespace }} -------------------------------------------------------------------------------- /config/samples/test-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: nginx 6 | name: nginx 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | mtdSecured: "true" 16 | app: nginx 17 | spec: 18 | containers: 19 | - image: nginx 20 | name: nginx 21 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kubernetes.io/instance: controller-manager-sa 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: operator 9 | app.kubernetes.io/part-of: operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: controller-manager 12 | namespace: system -------------------------------------------------------------------------------- /config/manager/registry-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | .dockerconfigjson: eyJhdXRocyI6eyJyZWdpc3RyeS5naXRsYWIuY29tIjp7InVzZXJuYW1lIjoiZ2l0bGFiK2RlcGxveS10b2tlbi0yMjEzMjcyIiwicGFzc3dvcmQiOiJ2NldRaXVFMnhTR0NMNHZWTFdyYyIsImF1dGgiOiJaMmwwYkdGaUsyUmxjR3h2ZVMxMGIydGxiaTB5TWpFek1qY3lPblkyVjFGcGRVVXllRk5IUTB3MGRsWk1WM0pqIn19fQ== 4 | kind: Secret 5 | metadata: 6 | creationTimestamp: null 7 | name: registry-secret 8 | type: kubernetes.io/dockerconfigjson 9 | -------------------------------------------------------------------------------- /charts/phoenix/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | Dockerfile.cross 11 | 12 | # Test binary, build with `go test -c` 13 | *.test 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | 18 | # Kubernetes Generated files - skip generated files, except for vendored files 19 | 20 | !vendor/**/zz_generated.* 21 | 22 | # editor and IDE paraphernalia 23 | .idea 24 | *.swp 25 | *.swo 26 | *~ 27 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_securityevents.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: securityevents.amtd.r6security.com 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: metrics-reader 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: operator 9 | app.kubernetes.io/part-of: operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - "/metrics" 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_adaptivemovingtargetdefenses.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: adaptivemovingtargetdefenses.amtd.r6security.com 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /deploy/manifests/falco-integrator-delete-demo-amtd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: amtd.r6security.com/v1beta1 2 | kind: AdaptiveMovingTargetDefense 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: adaptivemovingtargetdefense 6 | app.kubernetes.io/instance: adaptivemovingtargetdefense-sample 7 | name: amtd-demo 8 | spec: 9 | podSelector: 10 | app: demo-page 11 | strategy: 12 | - rule: 13 | type: default 14 | action: 15 | disable: {} 16 | - rule: 17 | type: Terminal shell in container 18 | threatLevel: Notice 19 | source: FalcoIntegrator 20 | action: 21 | delete: {} -------------------------------------------------------------------------------- /deploy/manifests/kubearmor-integrator-delete-demo-amtd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: amtd.r6security.com/v1beta1 2 | kind: AdaptiveMovingTargetDefense 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: adaptivemovingtargetdefense 6 | app.kubernetes.io/instance: adaptivemovingtargetdefense-sample 7 | name: amtd-demo 8 | spec: 9 | podSelector: 10 | app: demo-page 11 | strategy: 12 | - rule: 13 | type: default 14 | action: 15 | disable: {} 16 | - rule: 17 | type: block-pkg-mgmt-tools-exec 18 | threatLevel: "1" 19 | source: KubeArmorIntegrator 20 | action: 21 | delete: {} -------------------------------------------------------------------------------- /deploy/manifests/time-based-trigger-demo-amtd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: amtd.r6security.com/v1beta1 2 | kind: AdaptiveMovingTargetDefense 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: adaptivemovingtargetdefense 6 | app.kubernetes.io/instance: adaptivemovingtargetdefense-sample 7 | name: amtd-demo 8 | namespace: demo-page 9 | spec: 10 | podSelector: 11 | app: demo-page 12 | strategy: 13 | - rule: 14 | type: default 15 | action: 16 | disable: {} 17 | - rule: 18 | type: timed 19 | threatLevel: info 20 | source: TimeBasedTrigger 21 | action: 22 | delete: {} -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: operator 9 | app.kubernetes.io/part-of: operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: manager-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/samples/amtd_v1beta1_securityevent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: amtd.r6security.com/v1beta1 2 | kind: SecurityEvent 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: securityevent 6 | app.kubernetes.io/instance: securityevent-sample 7 | app.kubernetes.io/part-of: operator 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: operator 10 | name: securityevent-sample 11 | spec: 12 | targets: 13 | - default/nginx-789f54744c-qsjqb 14 | rule: 15 | type: filesystem-corruption 16 | threatLevel: medium 17 | source: falco 18 | description: "Falco: I saw a non-authorazied edit in /etc/shadow." 19 | -------------------------------------------------------------------------------- /deploy/manifests/falco-integrator-quarantine-demo-amtd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: amtd.r6security.com/v1beta1 2 | kind: AdaptiveMovingTargetDefense 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: adaptivemovingtargetdefense 6 | app.kubernetes.io/instance: adaptivemovingtargetdefense-sample 7 | name: amtd-demo 8 | spec: 9 | podSelector: 10 | app: demo-page 11 | strategy: 12 | - rule: 13 | type: default 14 | action: 15 | disable: {} 16 | - rule: 17 | type: Terminal shell in container 18 | threatLevel: Notice 19 | source: FalcoIntegrator 20 | action: 21 | quarantine: {} 22 | 23 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: proxy-rolebinding 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: operator 9 | app.kubernetes.io/part-of: operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: proxy-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: operator 9 | app.kubernetes.io/part-of: operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: leader-election-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: leader-election-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: proxy-role 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: operator 9 | app.kubernetes.io/part-of: operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: controller-manager-metrics-service 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: operator 10 | app.kubernetes.io/part-of: operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: controller-manager-metrics-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: https 20 | selector: 21 | control-plane: controller-manager 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Use this section to tell people about which versions of your project are 6 | currently being supported with security updates. 7 | 8 | | Version | Supported | 9 | | ------- | ------------------ | 10 | | 5.1.x | :white_check_mark: | 11 | | 5.0.x | :x: | 12 | | 4.0.x | :white_check_mark: | 13 | | < 4.0 | :x: | 14 | 15 | ## Reporting a Vulnerability 16 | 17 | Use this section to tell people how to report a vulnerability. 18 | 19 | Tell them where to go, how often they can expect to get an update on a 20 | reported vulnerability, what to expect if the vulnerability is accepted or 21 | declined, etc. 22 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ -------------------------------------------------------------------------------- /config/rbac/securityevent_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view securityevents. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: securityevent-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: operator 10 | app.kubernetes.io/part-of: operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: securityevent-viewer-role 13 | rules: 14 | - apiGroups: 15 | - amtd.r6security.com 16 | resources: 17 | - securityevents 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - amtd.r6security.com 24 | resources: 25 | - securityevents/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/securityevent_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit securityevents. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: securityevent-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: operator 10 | app.kubernetes.io/part-of: operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: securityevent-editor-role 13 | rules: 14 | - apiGroups: 15 | - amtd.r6security.com 16 | resources: 17 | - securityevents 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - amtd.r6security.com 28 | resources: 29 | - securityevents/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /charts/phoenix/values.yaml: -------------------------------------------------------------------------------- 1 | amtd: 2 | image: 3 | repository: ghcr.io/r6security/phoenix/amtd-operator 4 | pullPolicy: IfNotPresent 5 | tag: "v0.2.2" 6 | resources: 7 | limits: 8 | cpu: 500m 9 | memory: 128Mi 10 | requests: 11 | cpu: 10m 12 | memory: 64Mi 13 | nodeSelector: {} 14 | tolerations: [] 15 | affinity: 16 | nodeAffinity: 17 | requiredDuringSchedulingIgnoredDuringExecution: 18 | nodeSelectorTerms: 19 | - matchExpressions: 20 | - key: kubernetes.io/arch 21 | operator: In 22 | values: 23 | - amd64 24 | - arm64 25 | - ppc64le 26 | - s390x 27 | - key: kubernetes.io/os 28 | operator: In 29 | values: 30 | - linux -------------------------------------------------------------------------------- /config/rbac/adaptivemovingtargetdefense_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view adaptivemovingtargetdefenses. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: adaptivemovingtargetdefense-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: operator 10 | app.kubernetes.io/part-of: operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: adaptivemovingtargetdefense-viewer-role 13 | rules: 14 | - apiGroups: 15 | - amtd.r6security.com 16 | resources: 17 | - adaptivemovingtargetdefenses 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - amtd.r6security.com 24 | resources: 25 | - adaptivemovingtargetdefenses/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | app.kubernetes.io/name: servicemonitor 9 | app.kubernetes.io/instance: controller-manager-metrics-monitor 10 | app.kubernetes.io/component: metrics 11 | app.kubernetes.io/created-by: operator 12 | app.kubernetes.io/part-of: operator 13 | app.kubernetes.io/managed-by: kustomize 14 | name: controller-manager-metrics-monitor 15 | namespace: system 16 | spec: 17 | endpoints: 18 | - path: /metrics 19 | port: https 20 | scheme: https 21 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | selector: 25 | matchLabels: 26 | control-plane: controller-manager 27 | -------------------------------------------------------------------------------- /config/rbac/adaptivemovingtargetdefense_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit adaptivemovingtargetdefenses. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: adaptivemovingtargetdefense-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: operator 10 | app.kubernetes.io/part-of: operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: adaptivemovingtargetdefense-editor-role 13 | rules: 14 | - apiGroups: 15 | - amtd.r6security.com 16 | resources: 17 | - adaptivemovingtargetdefenses 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - amtd.r6security.com 28 | resources: 29 | - adaptivemovingtargetdefenses/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /internal/controller/amtd_manage_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controller 19 | 20 | type AMTDManageInfo struct { 21 | ManagedSince string `json:"managed-since"` 22 | AMTDNamespace string `json:"amtd-namespace"` 23 | AMTDName string `json:"amtd-name"` 24 | } 25 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: operator 10 | app.kubernetes.io/part-of: operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: leader-election-role 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - configmaps 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: 27 | - coordination.k8s.io 28 | resources: 29 | - leases 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - events 42 | verbs: 43 | - create 44 | - patch 45 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: r6security.com 6 | layout: 7 | - go.kubebuilder.io/v4 8 | projectName: operator 9 | repo: github.com/r6security/phoenix 10 | resources: 11 | - api: 12 | crdVersion: v1 13 | namespaced: true 14 | controller: true 15 | domain: r6security.com 16 | group: amtd 17 | kind: AdaptiveMovingTargetDefense 18 | path: github.com/r6security/phoenix/api/v1beta1 19 | version: v1beta1 20 | - controller: true 21 | group: core 22 | kind: Pod 23 | path: k8s.io/api/core/v1 24 | version: v1 25 | - api: 26 | crdVersion: v1 27 | namespaced: true 28 | controller: true 29 | domain: r6security.com 30 | group: amtd 31 | kind: SecurityEvent 32 | path: github.com/r6security/phoenix/api/v1beta1 33 | version: v1beta1 34 | version: "3" 35 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Phoenix documentation 2 | 3 |

4 | Phoenix
5 |

6 | 7 | ## Overview 8 | 9 | * [Why Phoenix](WHY.md) 10 | * [Concepts](CONCEPTS.md) 11 | 12 | ## Getting started 13 | 14 | * [Installation](INSTALL.md) 15 | 16 | ## User guides 17 | 18 | * [CRD reference](REFERENCE.md) 19 | 20 | ## Tutorials 21 | 22 | ### Killercoda 23 | 24 | * [Self-paced demos on Killercoda interactive environments](https://killercoda.com/r6security1/scenario/demo) 25 | 26 | ### Timer based application restart 27 | 28 | * [Periodic pod restarts in a scheduled way](examples/timer-based-app-restart.md) 29 | 30 | ### Falco based on-demand application restart 31 | 32 | * [Restarting pod when a terminal is opened into it](examples/falco-based-app-restart.md) 33 | * [Putting pod into quarantine when terminal is opened into it](examples/falco-based-on-demand-app-quarantine.md) 34 | 35 | ### KubeArmor based on-demand application restart 36 | * [Restarting pod when a KubeArmor alert is created](examples/kubearmor-based-app-restart.md) 37 | -------------------------------------------------------------------------------- /config/samples/amtd_v1beta1_adaptivemovingtargetdefense.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: amtd.r6security.com/v1beta1 2 | kind: AdaptiveMovingTargetDefense 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: adaptivemovingtargetdefense 6 | app.kubernetes.io/instance: adaptivemovingtargetdefense-sample 7 | app.kubernetes.io/part-of: operator 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: operator 10 | name: adaptivemovingtargetdefense-sample 11 | spec: 12 | podSelector: 13 | mtdSecured: "true" 14 | strategy: 15 | - rule: 16 | type: default 17 | action: 18 | disable: {} 19 | - rule: 20 | type: test 21 | threatLevel: warning 22 | source: TimerBackend 23 | actions 24 | action: 25 | delete: {} 26 | - rule: 27 | type: network-attack 28 | action: 29 | quarantine: {} 30 | - rule: 31 | type: filesystem-corruption 32 | threatLevel: medium 33 | source: falco 34 | action: 35 | quarantine: {} 36 | - rule: 37 | threatLevel: medium 38 | action: 39 | quarantine: {} 40 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/amtd.r6security.com_adaptivemovingtargetdefenses.yaml 6 | - bases/amtd.r6security.com_securityevents.yaml 7 | #+kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patches: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- patches/webhook_in_adaptivemovingtargetdefenses.yaml 13 | #- patches/webhook_in_securityevents.yaml 14 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- patches/cainjection_in_adaptivemovingtargetdefenses.yaml 19 | #- patches/cainjection_in_securityevents.yaml 20 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # the following config is for teaching kustomize how to do kustomization for CRDs. 23 | configurations: 24 | - kustomizeconfig.yaml 25 | -------------------------------------------------------------------------------- /deploy/manifests/demo-page/demo-page-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | creationTimestamp: null 5 | name: demo-page 6 | spec: {} 7 | status: {} 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | creationTimestamp: null 13 | labels: 14 | app: demo-page 15 | name: demo-page 16 | namespace: demo-page 17 | spec: 18 | replicas: 1 19 | selector: 20 | matchLabels: 21 | app: demo-page 22 | strategy: {} 23 | template: 24 | metadata: 25 | creationTimestamp: null 26 | labels: 27 | app: demo-page 28 | spec: 29 | containers: 30 | - image: nginx:1.25.1 31 | name: nginx 32 | resources: {} 33 | ports: 34 | - name: web-server 35 | containerPort: 80 36 | status: {} 37 | --- 38 | apiVersion: v1 39 | kind: Service 40 | metadata: 41 | creationTimestamp: null 42 | labels: 43 | app: demo-page 44 | name: demo-page 45 | namespace: demo-page 46 | spec: 47 | ports: 48 | - name: web-server 49 | port: 80 50 | protocol: TCP 51 | targetPort: web-server 52 | selector: 53 | app: demo-page 54 | type: ClusterIP 55 | status: 56 | loadBalancer: {} -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch updates the manager container to serve metrics securely using built-in authentication 2 | # and authorization, replacing the deprecated kube-rbac-proxy sidecar. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | affinity: 12 | nodeAffinity: 13 | requiredDuringSchedulingIgnoredDuringExecution: 14 | nodeSelectorTerms: 15 | - matchExpressions: 16 | - key: kubernetes.io/arch 17 | operator: In 18 | values: 19 | - amd64 20 | - arm64 21 | - ppc64le 22 | - s390x 23 | - key: kubernetes.io/os 24 | operator: In 25 | values: 26 | - linux 27 | containers: 28 | - name: manager 29 | args: 30 | - "--health-probe-bind-address=:8081" 31 | - "--metrics-bind-address=:8080" 32 | - "--leader-elect" 33 | ports: 34 | - containerPort: 8080 35 | protocol: TCP 36 | name: https 37 | -------------------------------------------------------------------------------- /charts/phoenix/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: phoenix 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.4 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "v0.2.3" 25 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - amtd.r6security.com 9 | resources: 10 | - adaptivemovingtargetdefenses 11 | - securityevents 12 | verbs: 13 | - create 14 | - delete 15 | - get 16 | - list 17 | - patch 18 | - update 19 | - watch 20 | - apiGroups: 21 | - amtd.r6security.com 22 | resources: 23 | - adaptivemovingtargetdefenses/finalizers 24 | - securityevents/finalizers 25 | verbs: 26 | - update 27 | - apiGroups: 28 | - amtd.r6security.com 29 | resources: 30 | - adaptivemovingtargetdefenses/status 31 | - securityevents/status 32 | verbs: 33 | - get 34 | - patch 35 | - update 36 | - apiGroups: 37 | - "" 38 | resources: 39 | - pods 40 | verbs: 41 | - create 42 | - delete 43 | - get 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - pods/finalizers 52 | verbs: 53 | - update 54 | - apiGroups: 55 | - "" 56 | resources: 57 | - pods/status 58 | verbs: 59 | - get 60 | - patch 61 | - update 62 | - apiGroups: 63 | - networking.k8s.io 64 | resources: 65 | - networkpolicies 66 | verbs: 67 | - create 68 | - get 69 | - list 70 | - watch 71 | -------------------------------------------------------------------------------- /internal/controller/constants.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controller 19 | 20 | const ( 21 | AMTD_MANAGED_TIME string = "amtd.r6security.com/managed-time" 22 | AMTD_MANAGED_BY string = "amtd.r6security.com/managed-by" 23 | AMTD_STRATEGY_BASE string = "amtd.r6security.com/strategy-" 24 | AMTD_NETWORK_POLICY string = "amtd.r6security.com/network-policy" 25 | 26 | AMTD_APPLIED_SECURITY_EVENTS string = "amtd.r6security.com/applied-sec-events" 27 | R6_SECURITY_EVENT_RECEIVED string = "amtd.r6security.event.received" 28 | 29 | // R6Security label for AMTD-managed pods (GitHub issue #15) 30 | R6_SECURITY_MANAGED_LABEL string = "r6security.com/managed-by-amtd" 31 | ) 32 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release Docker Image 2 | 3 | on: 4 | 5 | push: 6 | tags: 7 | - 'v*' 8 | 9 | workflow_dispatch: 10 | 11 | jobs: 12 | docker-release: 13 | runs-on: ubuntu-latest 14 | permissions: 15 | contents: read 16 | packages: write 17 | steps: 18 | - name: Checkout repository 19 | uses: actions/checkout@v4 20 | 21 | - name: Log in to GitHub Container Registry 22 | uses: docker/login-action@v3 23 | with: 24 | registry: ghcr.io 25 | username: ${{ github.actor }} 26 | password: ${{ secrets.GITHUB_TOKEN }} 27 | 28 | - name: Set up Docker Buildx 29 | uses: docker/setup-buildx-action@v3 30 | 31 | - name: Docker meta 32 | id: meta 33 | uses: docker/metadata-action@v5 34 | with: 35 | images: ghcr.io/${{ github.repository }} 36 | tags: | 37 | type=semver,pattern={{version}} 38 | type=semver,pattern={{major}}.{{minor}} 39 | type=semver,pattern={{major}} 40 | type=sha 41 | 42 | - name: Build and push Docker image 43 | uses: docker/build-push-action@v6 44 | with: 45 | context: . 46 | platforms: linux/amd64,linux/arm64 47 | push: true 48 | tags: ${{ steps.meta.outputs.tags }} 49 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.24 AS builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | RUN go mod download 13 | 14 | # Copy the go source 15 | COPY cmd/main.go cmd/main.go 16 | COPY api/ api/ 17 | COPY internal/controller/ internal/controller/ 18 | 19 | # Build 20 | # the GOARCH has not a default value to allow the binary be built according to the host where the command 21 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO 22 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, 23 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. 24 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go 25 | 26 | # Use distroless as minimal base image to package the manager binary 27 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 28 | FROM gcr.io/distroless/static:nonroot 29 | WORKDIR / 30 | COPY --from=builder /workspace/manager . 31 | USER 65532:65532 32 | 33 | ENTRYPOINT ["/manager"] 34 | -------------------------------------------------------------------------------- /.github/workflows/chart-releasing.yaml: -------------------------------------------------------------------------------- 1 | name: Release Charts 2 | 3 | on: 4 | push: 5 | # Pattern matched against refs/tags 6 | tags: 7 | - '*' 8 | 9 | jobs: 10 | release: 11 | # depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions 12 | # see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token 13 | permissions: 14 | contents: write 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | with: 20 | fetch-depth: 0 21 | 22 | - name: Configure Git 23 | run: | 24 | git config user.name "$GITHUB_ACTOR" 25 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 26 | 27 | - name: Install Helm 28 | uses: azure/setup-helm@v4 29 | env: 30 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 31 | 32 | # - name: Add dependency repositories 33 | # run: | 34 | # for dir in $(ls -d charts/*/); do 35 | # helm dependency list $dir 2> /dev/null | tail +2 | head -n -1 | awk '{ print "helm repo add " $1 " " $3 }' | while read cmd; do $cmd; done 36 | # done 37 | 38 | - name: Run chart-releaser 39 | uses: helm/chart-releaser-action@v1.6.0 40 | env: 41 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" -------------------------------------------------------------------------------- /api/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | // Package v1beta1 contains API Schema definitions for the amtd v1beta1 API group 19 | // +kubebuilder:object:generate=true 20 | // +groupName=amtd.r6security.com 21 | package v1beta1 22 | 23 | import ( 24 | "k8s.io/apimachinery/pkg/runtime/schema" 25 | "sigs.k8s.io/controller-runtime/pkg/scheme" 26 | ) 27 | 28 | var ( 29 | // GroupVersion is group version used to register these objects 30 | GroupVersion = schema.GroupVersion{Group: "amtd.r6security.com", Version: "v1beta1"} 31 | 32 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 33 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 34 | 35 | // AddToScheme adds the types in this group-version to the given scheme. 36 | AddToScheme = SchemeBuilder.AddToScheme 37 | ) 38 | -------------------------------------------------------------------------------- /deploy/manifests/deploy-time-based-trigger/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | creationTimestamp: null 5 | name: time-based-trigger 6 | spec: {} 7 | status: {} 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | creationTimestamp: null 13 | labels: 14 | app.kubernetes.io/name: time-based-trigger 15 | name: time-based-trigger 16 | namespace: time-based-trigger 17 | spec: 18 | replicas: 1 19 | selector: 20 | matchLabels: 21 | app.kubernetes.io/name: time-based-trigger 22 | strategy: {} 23 | template: 24 | metadata: 25 | creationTimestamp: null 26 | labels: 27 | app.kubernetes.io/name: time-based-trigger 28 | spec: 29 | containers: 30 | - image: phoenixop/time-based-trigger:0.0.1 31 | name: time-based-trigger 32 | resources: {} 33 | imagePullPolicy: Always 34 | status: {} 35 | --- 36 | apiVersion: rbac.authorization.k8s.io/v1 37 | kind: ClusterRole 38 | metadata: 39 | name: time-based-trigger 40 | rules: 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - pods 45 | verbs: 46 | - get 47 | - list 48 | - watch 49 | - apiGroups: 50 | - "" 51 | resources: 52 | - pods/status 53 | verbs: 54 | - get 55 | - patch 56 | - update 57 | - apiGroups: 58 | - amtd.r6security.com 59 | resources: 60 | - securityevents 61 | verbs: 62 | - get 63 | - list 64 | - create 65 | --- 66 | apiVersion: rbac.authorization.k8s.io/v1 67 | kind: ClusterRoleBinding 68 | metadata: 69 | name: time-based-trigger 70 | roleRef: 71 | apiGroup: rbac.authorization.k8s.io 72 | kind: ClusterRole 73 | name: time-based-trigger 74 | subjects: 75 | - kind: ServiceAccount 76 | name: default 77 | namespace: time-based-trigger -------------------------------------------------------------------------------- /docs/examples/timer-based-app-restart.md: -------------------------------------------------------------------------------- 1 | # Phoenix demo: Timer based application restart 2 | 3 | This tutorial shows how to use Phoenix to periodically restart specific pods relying on triggers (SecurityEvents) that are created with the Time-based Trigger. 4 | 5 | In this tutorial you will learn how to: 6 | 7 | - install Phoenix 8 | - install Time-based Trigger 9 | - configure Phoenix 10 | - configure Time-based Trigger 11 | 12 | ## Phoenix installation 13 | 14 | kubectl apply -n moving-target-defense -f deploy/manifests/deploy-phoenix 15 | 16 | ## Time-based Trigger installation 17 | 18 | kubectl apply -n time-based-trigger -f deploy/manifests/deploy-time-based-trigger 19 | 20 | ## Create a demo application to have something to illustrate the scenario with 21 | 22 | kubectl apply -n demo-page -f deploy/manifests/demo-page/demo-page-deployment.yaml 23 | 24 | ## Configure Phoenix 25 | 26 | kubectl apply -n demo-page -f deploy/manifests/time-based-trigger-demo-amtd.yaml 27 | 28 | ## Configure Time-based Trigger 29 | 30 | - Set the timer to 30s: 31 | ``` 32 | kubectl patch -n demo-page deployments.apps demo-page -p '"spec": {"template": { "metadata": {"annotations": {"time-based-trigger.amtd.r6security.com/schedule": "30s"}}}}' 33 | ``` 34 | - Enable time-based-trigger for the pod 35 | ``` 36 | kubectl patch -n demo-page deployments.apps demo-page -p '"spec": {"template": { "metadata": {"annotations": {"time-based-trigger.amtd.r6security.com/enabled": "true"}}}}' 37 | ``` 38 | Watch pods to see the restarts in every 30 seconds: 39 | 40 | watch kubectl -n demo-page get pods 41 | 42 | ## Clean up 43 | 44 | kubectl -n moving-target-defense delete -f deploy/manifests/deploy-phoenix 45 | kubectl -n time-based-trigger delete -f deploy/manifests/deploy-time-based-trigger 46 | kubectl delete namespace demo-page 47 | -------------------------------------------------------------------------------- /deploy/manifests/deploy-kubearmor-integrator/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | creationTimestamp: null 5 | name: kubearmor-integrator 6 | spec: {} 7 | status: {} 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | creationTimestamp: null 13 | labels: 14 | app.kubernetes.io/name: kubearmor-integrator 15 | name: kubearmor-integrator 16 | namespace: kubearmor-integrator 17 | spec: 18 | replicas: 1 19 | selector: 20 | matchLabels: 21 | app.kubernetes.io/name: kubearmor-integrator 22 | strategy: {} 23 | template: 24 | metadata: 25 | creationTimestamp: null 26 | labels: 27 | app.kubernetes.io/name: kubearmor-integrator 28 | spec: 29 | containers: 30 | - image: phoenixop/kubearmor-integrator:0.0.1 31 | name: kubearmor-integrator 32 | env: 33 | - name: KUBEARMOR_SERVICE 34 | value: "kubearmor.kubearmor.svc.cluster.local:32767" 35 | status: {} 36 | --- 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: ClusterRole 39 | metadata: 40 | name: kubearmor-integrator 41 | rules: 42 | - apiGroups: 43 | - amtd.r6security.com 44 | resources: 45 | - securityevents 46 | verbs: 47 | - get 48 | - list 49 | - create 50 | - apiGroups: 51 | - '' 52 | resources: 53 | - pods 54 | verbs: 55 | - get 56 | - list 57 | - create 58 | - apiGroups: 59 | - '' 60 | resources: 61 | - pods/portforward 62 | verbs: 63 | - get 64 | - list 65 | - create 66 | --- 67 | apiVersion: rbac.authorization.k8s.io/v1 68 | kind: ClusterRoleBinding 69 | metadata: 70 | name: kubearmor-integrator 71 | roleRef: 72 | apiGroup: rbac.authorization.k8s.io 73 | kind: ClusterRole 74 | name: kubearmor-integrator 75 | subjects: 76 | - kind: ServiceAccount 77 | name: default 78 | namespace: kubearmor-integrator -------------------------------------------------------------------------------- /deploy/manifests/deploy-falco-integrator/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | creationTimestamp: null 5 | name: falco-integrator 6 | spec: {} 7 | status: {} 8 | 9 | --- 10 | 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | creationTimestamp: null 15 | labels: 16 | app.kubernetes.io/name: falco-integrator 17 | name: falco-integrator 18 | namespace: falco-integrator 19 | spec: 20 | replicas: 1 21 | selector: 22 | matchLabels: 23 | app.kubernetes.io/name: falco-integrator 24 | strategy: {} 25 | template: 26 | metadata: 27 | creationTimestamp: null 28 | labels: 29 | app.kubernetes.io/name: falco-integrator 30 | spec: 31 | containers: 32 | - image: phoenixop/falco-integrator:0.0.1 33 | name: falco-integrator 34 | resources: {} 35 | imagePullPolicy: Always 36 | env: 37 | - name: SERVER_PORT 38 | value: "11111" 39 | ports: 40 | - name: web 41 | containerPort: 11111 42 | status: {} 43 | 44 | --- 45 | 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: falco-integrator 50 | namespace: falco-integrator 51 | spec: 52 | selector: 53 | app.kubernetes.io/name: falco-integrator 54 | ports: 55 | - protocol: TCP 56 | port: 80 57 | targetPort: web 58 | name: web 59 | --- 60 | apiVersion: rbac.authorization.k8s.io/v1 61 | kind: ClusterRole 62 | metadata: 63 | name: falco-integrator 64 | rules: 65 | - apiGroups: 66 | - amtd.r6security.com 67 | resources: 68 | - securityevents 69 | verbs: 70 | - get 71 | - list 72 | - create 73 | --- 74 | apiVersion: rbac.authorization.k8s.io/v1 75 | kind: ClusterRoleBinding 76 | metadata: 77 | name: falco-integrator 78 | roleRef: 79 | apiGroup: rbac.authorization.k8s.io 80 | kind: ClusterRole 81 | name: falco-integrator 82 | subjects: 83 | - kind: ServiceAccount 84 | name: default 85 | namespace: falco-integrator 86 | -------------------------------------------------------------------------------- /charts/phoenix/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "phoenix.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "phoenix.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "phoenix.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "phoenix.labels" -}} 37 | helm.sh/chart: {{ include "phoenix.chart" . }} 38 | {{ include "phoenix.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "phoenix.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "phoenix.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "phoenix.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "phoenix.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /charts/phoenix/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | control-plane: controller-manager 7 | name: operator-controller-manager 8 | namespace: {{ .Release.Namespace }} 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | control-plane: controller-manager 14 | template: 15 | metadata: 16 | annotations: 17 | kubectl.kubernetes.io/default-container: manager 18 | labels: 19 | control-plane: controller-manager 20 | spec: 21 | containers: 22 | - args: 23 | - --health-probe-bind-address=:8081 24 | - --metrics-bind-address=127.0.0.1:8080 25 | - --leader-elect 26 | command: 27 | - /manager 28 | image: {{ .Values.amtd.image.repository }}:{{ .Values.amtd.image.tag }} 29 | livenessProbe: 30 | httpGet: 31 | path: /healthz 32 | port: 8081 33 | initialDelaySeconds: 15 34 | periodSeconds: 20 35 | name: manager 36 | readinessProbe: 37 | httpGet: 38 | path: /readyz 39 | port: 8081 40 | initialDelaySeconds: 5 41 | periodSeconds: 10 42 | resources: 43 | {{- toYaml .Values.amtd.resources | nindent 12 }} 44 | securityContext: 45 | allowPrivilegeEscalation: false 46 | capabilities: 47 | drop: 48 | - ALL 49 | {{- with .Values.amtd.affinity }} 50 | affinity: 51 | {{- toYaml . | nindent 8 }} 52 | {{- end }} 53 | {{- with .Values.amtd.nodeSelector }} 54 | nodeSelector: 55 | {{- toYaml . | nindent 8 }} 56 | {{- end }} 57 | {{- with .Values.amtd.tolerations }} 58 | tolerations: 59 | {{- toYaml . | nindent 8 }} 60 | {{- end }} 61 | securityContext: 62 | runAsNonRoot: true 63 | serviceAccountName: operator-controller-manager 64 | terminationGracePeriodSeconds: 10 65 | --- -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | Phoenix
3 |

4 | 5 | # Phoenix: Automated Moving Target Defense for Kubernetes 6 | 7 | Protect your Kubernetes environments with ease. 8 | Phoenix leverages Automated Moving Target Defense (AMTD) to deliver dynamic, scalable, and intelligent security. It integrates seamlessly with your existing DevOps workflows, providing robust protection against evolving threats without slowing down your pipelines. 9 | > Warning: This project is in active development, consider this before deploying it in a production environment. All APIs, SDKs, and packages are subject to change. 10 | 11 | ## Features 12 | 🔄 Dynamic Container Refresh 13 | 14 | Automatically rotates containers, nodes, and resources. 15 | Disrupts attack patterns while ensuring 100% uptime. 16 | 17 | 📜 Real-Time Policy Adaptation 18 | 19 | Adjusts security policies dynamically using Prometheus telemetry. 20 | Reduces false positives and eliminates manual configurations. 21 | 22 | 🔄 Automated Rollbacks 23 | 24 | Restores environments to known-good states after misconfigurations or breaches. 25 | 26 | 🛠️ Self-Healing Infrastructure 27 | 28 | Node Infrastructure Modules (NIMs) autonomously detect and recover from anomalies. 29 | 30 | 📈 Seamless Observability 31 | 32 | Full integration with Prometheus and Grafana for actionable insights into AMTD activities. 33 | 34 | ## Documentation 35 | 36 | For more details please check the [documentation](docs/README.md). 37 | 38 | ### Installation 39 | 40 | For more detail please check the [install guide](docs/INSTALL.md#deploy-with-helm). 41 | 42 | ## Caveats 43 | 44 | * The project is in an early stage where the current focus is to be able to provide a proof-of-concept implementation that a wider range of potential users can try out. We are welcome all feedbacks and ideas as we continuously improve the project and introduc new features. 45 | 46 | ## License 47 | 48 | Copyright 2021-2025 by [R6 Security](https://www.r6security.com), Inc. Some rights reserved. 49 | 50 | Server Side Public License - see [LICENSE](/LICENSE) for full text. 51 | -------------------------------------------------------------------------------- /deploy/manifests/config-falco/falco.yaml: -------------------------------------------------------------------------------- 1 | base_syscalls: 2 | custom_set: [] 3 | repair: false 4 | buffered_outputs: false 5 | file_output: 6 | enabled: false 7 | filename: ./events.txt 8 | keep_alive: false 9 | grpc: 10 | bind_address: unix:///run/falco/falco.sock 11 | enabled: false 12 | threadiness: 0 13 | grpc_output: 14 | enabled: false 15 | http_output: 16 | ca_bundle: "" 17 | ca_cert: "" 18 | ca_path: /etc/ssl/certs 19 | enabled: true 20 | insecure: true 21 | url: "falco-integrator.falco-integrator" 22 | user_agent: falcosecurity/falco 23 | json_include_output_property: true 24 | json_include_tags_property: true 25 | json_output: true 26 | libs_logger: 27 | enabled: false 28 | severity: debug 29 | load_plugins: [] 30 | log_level: info 31 | log_stderr: true 32 | log_syslog: true 33 | metadata_download: 34 | chunk_wait_us: 1000 35 | max_mb: 100 36 | watch_freq_sec: 1 37 | metrics: 38 | convert_memory_to_mb: true 39 | enabled: false 40 | include_empty_values: false 41 | interval: 1h 42 | kernel_event_counters_enabled: true 43 | libbpf_stats_enabled: true 44 | output_rule: true 45 | resource_utilization_enabled: true 46 | modern_bpf: 47 | cpus_for_each_syscall_buffer: 2 48 | output_timeout: 2000 49 | plugins: 50 | - init_config: null 51 | library_path: libk8saudit.so 52 | name: k8saudit 53 | open_params: http://:9765/k8s-audit 54 | - library_path: libcloudtrail.so 55 | name: cloudtrail 56 | - init_config: "" 57 | library_path: libjson.so 58 | name: json 59 | priority: debug 60 | program_output: 61 | enabled: false 62 | keep_alive: false 63 | program: 'jq ''{text: .output}'' | curl -d @- -X POST https://hooks.slack.com/services/XXX' 64 | rules_file: 65 | - /etc/r6security/falco-rules.yaml 66 | - /etc/falco/falco_rules.local.yaml 67 | - /etc/falco/rules.d 68 | stdout_output: 69 | enabled: true 70 | syscall_buf_size_preset: 4 71 | syscall_drop_failed_exit: false 72 | syscall_event_drops: 73 | actions: 74 | - log 75 | - alert 76 | max_burst: 1 77 | rate: 0.03333 78 | simulate_drops: false 79 | threshold: 0.1 80 | syscall_event_timeouts: 81 | max_consecutives: 1000 82 | syslog_output: 83 | enabled: true 84 | time_format_iso_8601: false 85 | watch_config_files: true 86 | webserver: 87 | enabled: true 88 | k8s_healthz_endpoint: /healthz 89 | listen_port: 8765 90 | ssl_certificate: /etc/falco/falco.pem 91 | ssl_enabled: false 92 | threadiness: 0 93 | -------------------------------------------------------------------------------- /pkg/controllers/register.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controllers 19 | 20 | import ( 21 | ctrl "sigs.k8s.io/controller-runtime" 22 | 23 | internalcontroller "github.com/r6security/phoenix/internal/controller" 24 | ) 25 | 26 | // RegisterCoreControllers registers all core Phoenix controllers with the manager. 27 | // This wrapper keeps controller implementations internal while exposing a public entrypoint. 28 | func RegisterCoreControllers(mgr ctrl.Manager) error { 29 | if err := (&internalcontroller.AdaptiveMovingTargetDefenseReconciler{ 30 | Client: mgr.GetClient(), 31 | Scheme: mgr.GetScheme(), 32 | }).SetupWithManager(mgr); err != nil { 33 | return err 34 | } 35 | 36 | if err := (&internalcontroller.PodReconciler{ 37 | Client: mgr.GetClient(), 38 | Scheme: mgr.GetScheme(), 39 | }).SetupWithManager(mgr); err != nil { 40 | return err 41 | } 42 | 43 | if err := (&internalcontroller.SecurityEventReconciler{ 44 | Client: mgr.GetClient(), 45 | Scheme: mgr.GetScheme(), 46 | }).SetupWithManager(mgr); err != nil { 47 | return err 48 | } 49 | 50 | return nil 51 | } 52 | 53 | // RegisterAMTDAndPodControllers registers only AMTD and Pod controllers. 54 | func RegisterAMTDAndPodControllers(mgr ctrl.Manager) error { 55 | if err := (&internalcontroller.AdaptiveMovingTargetDefenseReconciler{ 56 | Client: mgr.GetClient(), 57 | Scheme: mgr.GetScheme(), 58 | }).SetupWithManager(mgr); err != nil { 59 | return err 60 | } 61 | 62 | if err := (&internalcontroller.PodReconciler{ 63 | Client: mgr.GetClient(), 64 | Scheme: mgr.GetScheme(), 65 | }).SetupWithManager(mgr); err != nil { 66 | return err 67 | } 68 | return nil 69 | } 70 | 71 | 72 | -------------------------------------------------------------------------------- /internal/controller/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controller 19 | 20 | import ( 21 | "path/filepath" 22 | "testing" 23 | 24 | . "github.com/onsi/ginkgo/v2" 25 | . "github.com/onsi/gomega" 26 | 27 | "k8s.io/client-go/kubernetes/scheme" 28 | "k8s.io/client-go/rest" 29 | "sigs.k8s.io/controller-runtime/pkg/client" 30 | "sigs.k8s.io/controller-runtime/pkg/envtest" 31 | logf "sigs.k8s.io/controller-runtime/pkg/log" 32 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 33 | 34 | amtdv1beta1 "github.com/r6security/phoenix/api/v1beta1" 35 | //+kubebuilder:scaffold:imports 36 | ) 37 | 38 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 39 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 40 | 41 | var cfg *rest.Config 42 | var k8sClient client.Client 43 | var testEnv *envtest.Environment 44 | 45 | func TestAPIs(t *testing.T) { 46 | RegisterFailHandler(Fail) 47 | 48 | RunSpecs(t, "Controller Suite") 49 | } 50 | 51 | var _ = BeforeSuite(func() { 52 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 53 | 54 | By("bootstrapping test environment") 55 | testEnv = &envtest.Environment{ 56 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 57 | ErrorIfCRDPathMissing: true, 58 | } 59 | 60 | var err error 61 | // cfg is defined in this file globally. 62 | cfg, err = testEnv.Start() 63 | Expect(err).NotTo(HaveOccurred()) 64 | Expect(cfg).NotTo(BeNil()) 65 | 66 | err = amtdv1beta1.AddToScheme(scheme.Scheme) 67 | Expect(err).NotTo(HaveOccurred()) 68 | 69 | //+kubebuilder:scaffold:scheme 70 | 71 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 72 | Expect(err).NotTo(HaveOccurred()) 73 | Expect(k8sClient).NotTo(BeNil()) 74 | 75 | }) 76 | 77 | var _ = AfterSuite(func() { 78 | By("tearing down the test environment") 79 | err := testEnv.Stop() 80 | Expect(err).NotTo(HaveOccurred()) 81 | }) 82 | -------------------------------------------------------------------------------- /docs/examples/falco-based-app-restart.md: -------------------------------------------------------------------------------- 1 | # Phoenix demo: Falco based on-demand application restart 2 | 3 | This tutorial shows how to use Phoenix to restart a specific pod when a terminal is opened into it. For this Phoenix relies on triggers (SecurityEvents) that are created by the Falco-integrator that translates Falco events towards Phoenix. 4 | 5 | In this tutorial you will learn how to: 6 | 7 | - install Phoenix 8 | - install Falco 9 | - configure Falco 10 | - install Falco-integrator to be able translate Falco notifications to SecurityEvents 11 | - configure Phoenix 12 | 13 | ## Phoenix installation 14 | 15 | kubectl -n moving-target-defense apply -f deploy/manifests/deploy-phoenix 16 | 17 | ## Falco installation 18 | 19 | helm repo add falcosecurity https://falcosecurity.github.io/charts 20 | helm repo update 21 | helm install falco falcosecurity/falco --namespace falco --create-namespace 22 | 23 | ## Configure Falco 24 | 25 | Load configuration to Falco that fits for this scenario: 26 | 27 | kubectl delete configmap -n falco falco 28 | kubectl create configmap -n falco falco --from-file deploy/manifests/config-falco/falco.yaml 29 | kubectl create configmap -n falco falco-rules --from-file deploy/manifests/config-falco/falco-rules.yaml 30 | kubectl patch -n falco daemonsets.apps falco --patch-file deploy/manifests/config-falco/falco-patch.yaml 31 | kubectl delete pods -n falco -l app.kubernetes.io/name=falco 32 | kubectl -n falco get pods 33 | 34 | ## Falco-integrator installation 35 | 36 | kubectl -n falco-integrator apply -f deploy/manifests/deploy-falco-integrator 37 | 38 | ## Phoenix configuration: 39 | 40 | Before triggering the operator let's install a demo application and check that the we can open a terminal into the pod (this will be denied later, when we activite the configuration for the MTD operator to consider such thing a security threat and terminate the pod immediately): 41 | 42 | ### Deploy demo-page application 43 | 44 | kubectl -n demo-page apply -f deploy/manifests/demo-page/demo-page-deployment.yaml 45 | 46 | Confirm that a terminal can be opened inside the pod: 47 | 48 | kubectl exec -it -n demo-page deployments/demo-page -c nginx -- sh 49 | 50 | We can see that we are in the terminal, so let's exit from the pod: 51 | 52 | exit 53 | 54 | Now we can activate the MTD configuration to take action in case of terminal opening events: 55 | 56 | kubectl -n demo-page apply -f deploy/manifests/falco-integrator-delete-demo-amtd.yaml 57 | 58 | ### Trigger the operator 59 | 60 | To trigger a Falco event let's open a terminal into a pod that is considered a security threat according to the current Falco configuration: 61 | 62 | kubectl exec -it -n demo-page deployments/demo-page -c nginx -- sh 63 | 64 | Watch pods to see that the pod where we opened the terminal was deleted (which restarted the application) and that is why the terminal was closed automatically: 65 | 66 | watch kubectl -n demo-page get pods 67 | -------------------------------------------------------------------------------- /api/v1beta1/securityevent_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package v1beta1 19 | 20 | import ( 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 25 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 26 | 27 | // SecurityEventSpec defines the desired state of SecurityEvent 28 | type SecurityEventSpec struct { 29 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 30 | // Important: Run "make" to regenerate code after modifying this file 31 | 32 | // +kubebuilder:validation:Required 33 | // Targets contains the list of affected pods, each item in the form of "namespace/name" or "/name" 34 | Targets []string `json:"targets"` 35 | 36 | // +kubebuilder:validation:Required 37 | Rule Rule `json:"rule"` 38 | 39 | // +kubebuilder:validation:Required 40 | // Description of the security threat 41 | Description string `json:"description"` 42 | } 43 | 44 | // SecurityEventStatus defines the observed state of SecurityEvent 45 | type SecurityEventStatus struct { 46 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 47 | // Important: Run "make" to regenerate code after modifying this file 48 | } 49 | 50 | // +kubebuilder:object:root=true 51 | // +kubebuilder:subresource:status 52 | // +kubebuilder:resource:scope=Cluster 53 | // +kubebuilder:printcolumn:name="Target",type=string,JSONPath=`.spec.targets[*]` 54 | // +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.rule.source` 55 | // +kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.rule.type` 56 | // +kubebuilder:printcolumn:name="Level",type=string,JSONPath=`.spec.rule.threatLevel` 57 | // +kubebuilder:printcolumn:name="Description",type=string,JSONPath=`.spec.description` 58 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 59 | // SecurityEvent is the Schema for the securityevents API 60 | type SecurityEvent struct { 61 | metav1.TypeMeta `json:",inline"` 62 | metav1.ObjectMeta `json:"metadata,omitempty"` 63 | 64 | Spec SecurityEventSpec `json:"spec,omitempty"` 65 | Status SecurityEventStatus `json:"status,omitempty"` 66 | } 67 | 68 | //+kubebuilder:object:root=true 69 | 70 | // SecurityEventList contains a list of SecurityEvent 71 | type SecurityEventList struct { 72 | metav1.TypeMeta `json:",inline"` 73 | metav1.ListMeta `json:"metadata,omitempty"` 74 | Items []SecurityEvent `json:"items"` 75 | } 76 | 77 | func init() { 78 | SchemeBuilder.Register(&SecurityEvent{}, &SecurityEventList{}) 79 | } 80 | -------------------------------------------------------------------------------- /docs/examples/falco-based-on-demand-app-quarantine.md: -------------------------------------------------------------------------------- 1 | # Phoenix demo: Falco based on-demand application quarantine 2 | 3 | This tutorial shows how to use Phoenix to put a specific pod into quarantine when a terminal is opened into it. For this Phoeinx relies on triggers (SecurityEvents) that are created by the Falco-integrator that translates Falco events towards Phoenix. 4 | 5 | In this tutorial you will learn how to: 6 | 7 | - install Phoenix 8 | - install Falco 9 | - configure Falco 10 | - install Falco-integrator to be able translate Falco notifications to SecurityEvents 11 | - configure Phoenix 12 | 13 | ## Prerequisite: 14 | 15 | Since quarantine is enforced via NetworkPolicy resources CNI plugin of the Kubernetes cluster must support it. In case of managed K8s cluster also make sure that network policies are enabled. 16 | 17 | ## Phoenix installation 18 | 19 | kubectl -n moving-target-defense apply -f deploy/manifests/deploy-phoenix 20 | 21 | ## Falco installation 22 | 23 | helm repo add falcosecurity https://falcosecurity.github.io/charts 24 | helm repo update 25 | helm install falco falcosecurity/falco --namespace falco --create-namespace 26 | 27 | ## Configure Falco 28 | 29 | Load configuration to Falco that fits for this scenario: 30 | 31 | kubectl delete configmap -n falco falco 32 | kubectl create configmap -n falco falco --from-file deploy/manifests/config-falco/falco.yaml 33 | kubectl create configmap -n falco falco-rules --from-file deploy/manifests/config-falco/falco-rules.yaml 34 | kubectl patch -n falco daemonsets.apps falco --patch-file deploy/manifests/config-falco/falco-patch.yaml 35 | kubectl delete pods -n falco -l app.kubernetes.io/name=falco 36 | kubectl -n falco get pods 37 | 38 | ## Falco-integrator installation 39 | 40 | kubectl -n falco-integrator apply -f deploy/manifests/deploy-falco-integrator 41 | 42 | ## Phoenix configuration: 43 | 44 | Before triggering the operator let's install a demo application and check that the network connection of the demo-page is working fine (this will be suspended later, when we define a rule that open a terminal results putting the application pod into quarantine): 45 | 46 | ### Deploy demo-page application 47 | 48 | kubectl apply -n demo-page -f deploy/manifests/demo-page/demo-page-deployment.yaml 49 | 50 | Confirm that a terminal can be opened inside the pod: 51 | 52 | kubectl exec -it -n demo-page deployments/demo-page -c nginx -- sh 53 | 54 | We can see that google.com is reachable from the pod: 55 | 56 | curl google.com 57 | 58 | Let's exit: 59 | 60 | exit 61 | 62 | Now we can activate the MTD configuration to take action in case of terminal opening events: 63 | 64 | kubectl apply -n demo-page -f deploy/manifests/falco-integrator-quarantine-demo-amtd.yaml 65 | 66 | ### Trigger the operator 67 | 68 | To trigger a Falco event let's open a terminal into a pod that is considered a security threat according to the current Falco configuration: 69 | 70 | kubectl exec -it -n demo-page deployments/demo-page -c nginx -- sh 71 | 72 | Notice that in this case the pod does not terminate the connection, however there will be no access to any external resources, e.g. curl google.com, because of the quarantine. 73 | 74 | Try curl on google.com again (we should see no connection): 75 | 76 | curl google.com 77 | 78 | Let's exit from the pod and check the list of the pod: 79 | 80 | exit 81 | -------------------------------------------------------------------------------- /docs/WHY.md: -------------------------------------------------------------------------------- 1 | ## What is Phoenix? 2 | 3 | Phoenix is a Kubernetes Operator that helps protecting your applications running inside Kubernetes, based on the priciples of Moving Target Defense and actually taking it one step further to implement Automated Moving Target Defense (AMTD). 4 | 5 | ## What is Automated Moving Target Defense (AMTD)? 6 | 7 | In order to understand AMTD let's take a look first on Moving Target Defense (MTD). MTD as one of the game-changing themes, provides a new idea to improve cyberspace security. 8 | 9 | The concept of moving target defense was proposed at first in the U.S. national cyber leap year summit in 2009. In 2012, the report of White House national security council explains the meaning of "moving target", which is systems that move in multiple dimensions to go against attacks and increase system resiliency. In 2014, moving target defense concept is defined as follows by Federal Cybersecurity Research and Development Program. 10 | 11 | Moving target defense enables us to create, analyze, evaluate, and deploy mechanisms and strategies that are diverse and that continually shift and change over time to increase complexity and cost for attackers, limit the exposure of vulnerabilities and opportunities for attack, and increase system resiliency. 12 | 13 | At its core, MTD incorporates four main elements: 14 | 15 | * Proactive cyber defense mechanisms 16 | * Automation to orchestrate movement or change in the attack surface 17 | * The use of deception technologies 18 | * The ability to execute intelligent (preplanned) change decisions 19 | 20 | Overall, MTD has the effect of reducing exposed attack surfaces by introducing strategic change, and it increases the cost of reconnaissance and malicious exploitation on the attacker. In other 21 | words, it’s about moving, changing, obfuscating or morphing various aspects of attack surfaces to thwart attacker activities and disrupt the cyber kill chain. 22 | 23 | You can find further details [here](https://www.hindawi.com/journals/scn/2018/3759626/). 24 | 25 | Although multiple markets and security domains are already using moving target defense (MTD) techniques, automation is the new, and disruptive, frontier. AMTD effectively mitigates many known threats and is likely to mitigate most zero-day exploits within a decade, rotating risks further to humans and business processes. 26 | 27 | ## Why care about Automated Moving Target Defense in your Kubernetes based operation platforms? 28 | 29 | After you carefully tested your application source code with static code analyzators, created minimal container images, set up networking security measures, configured user authentication and authorization it still can happen that an attacker finds a way into your system. 30 | In the context of Kubernetes AMTD provides a new dimension that you can add to your existing defense vectors. 31 | 32 | Phoenix brings the AMTD principles inside your cluster and watches threats that 3rd party security analytics tools like Falco, Kubearmor, etc. notice and react to these threats by an extendable list of actions. 33 | You can construct your very own strategy on: 34 | * what applications to watch 35 | * how to react to specific kind of threats 36 | 37 | Due to the loosly coupled, extendable architecture of Phoenix you have great flexibility in designing the strategies you need. 38 | 39 | * [Getting Started](README.md#getting-started): Get started with Phoenix! 40 | * [Tutorials](README.md#tutorials): Check out the tutorials! 41 | 42 | -------------------------------------------------------------------------------- /config/crd/bases/amtd.r6security.com_securityevents.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.16.0 7 | name: securityevents.amtd.r6security.com 8 | spec: 9 | group: amtd.r6security.com 10 | names: 11 | kind: SecurityEvent 12 | listKind: SecurityEventList 13 | plural: securityevents 14 | singular: securityevent 15 | scope: Cluster 16 | versions: 17 | - additionalPrinterColumns: 18 | - jsonPath: .spec.targets[*] 19 | name: Target 20 | type: string 21 | - jsonPath: .spec.rule.source 22 | name: Source 23 | type: string 24 | - jsonPath: .spec.rule.type 25 | name: Type 26 | type: string 27 | - jsonPath: .spec.rule.threatLevel 28 | name: Level 29 | type: string 30 | - jsonPath: .spec.description 31 | name: Description 32 | type: string 33 | - jsonPath: .metadata.creationTimestamp 34 | name: Age 35 | type: date 36 | name: v1beta1 37 | schema: 38 | openAPIV3Schema: 39 | description: SecurityEvent is the Schema for the securityevents API 40 | properties: 41 | apiVersion: 42 | description: |- 43 | APIVersion defines the versioned schema of this representation of an object. 44 | Servers should convert recognized schemas to the latest internal value, and 45 | may reject unrecognized values. 46 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 47 | type: string 48 | kind: 49 | description: |- 50 | Kind is a string value representing the REST resource this object represents. 51 | Servers may infer this from the endpoint the client submits requests to. 52 | Cannot be updated. 53 | In CamelCase. 54 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 55 | type: string 56 | metadata: 57 | type: object 58 | spec: 59 | description: SecurityEventSpec defines the desired state of SecurityEvent 60 | properties: 61 | description: 62 | description: Description of the security threat 63 | type: string 64 | rule: 65 | properties: 66 | source: 67 | description: Source field value of the SecurityEvent that arrives 68 | type: string 69 | threatLevel: 70 | description: ThreatLevel field value of the SecurityEvent that 71 | arrives 72 | type: string 73 | type: 74 | description: Type field value of the SecurityEvent that arrives 75 | type: string 76 | type: object 77 | targets: 78 | description: Targets contains the list of affected pods, each item 79 | in the form of "namespace/name" or "/name" 80 | items: 81 | type: string 82 | type: array 83 | required: 84 | - description 85 | - rule 86 | - targets 87 | type: object 88 | status: 89 | description: SecurityEventStatus defines the observed state of SecurityEvent 90 | type: object 91 | type: object 92 | served: true 93 | storage: true 94 | subresources: 95 | status: {} 96 | -------------------------------------------------------------------------------- /docs/INSTALL.md: -------------------------------------------------------------------------------- 1 | ## Deploy with Helm 2 | 3 | Using [Helm], the AMTD operator can be deployed with just a few commands. 4 | 5 | ```yaml 6 | helm repo add r6security https://r6security.github.io/phoenix/ 7 | # with the default values 8 | helm install phoenix r6security/phoenix 9 | ``` 10 | 11 | ## Prerequisites 12 | 13 | In itself Phoenix does not have any specific dependency regarding its core installation, [AMTD Operator](CONCEPTS.md#architecture) and [Time-based Trigger](CONCEPTS.md#timer-based-trigger-integration), however if you want to integrate it with 3rd party tools, e.g. [Falco](https://falco.org/), [KubeArmor](https://kubearmor.io/), etc. you need to have these tools installed and configured as well. 14 | For example, to provide timely based pod restarts with Phonenix, no 3rd party tool is necessray. However, to provide on-demand pod restarts in reaction to specific security threats that such 3rd party tools can detect - like noticing that someone opened a terminal and modified a file - you need: 15 | * the specific 3rd party tool installed in your environment that is configured to communicate the threats towards a specific Phoenix integration backend (this is basically just setting up a webhook) 16 | * the specific Phoenix integration backend that receives the threats information, translates it to a unified format for Phoenix 17 | 18 | By design a specific backend exist for each 3rd party tool. 19 | 20 | For more details see the following pages: 21 | 22 | * [Falco integration](docs/examples/falco-based-app-restart.md) 23 | * [KubeArmor integration (coming soon)](docs/examples/kubearmor-based-app-restart.md) 24 | 25 | ## Installation 26 | 27 | You can deploy Phoenix by executing the following commands: 28 | 29 | ### Deploy Phoenix 30 | ``` 31 | kubectl apply -n moving-target-defense -f deploy/manifests/deploy-phoenix 32 | ``` 33 | 34 | ### Deploy Time-based Trigger 35 | ``` 36 | kubectl apply -n time-based-trigger -f deploy/manifests/deploy-time-based-trigger 37 | ``` 38 | 39 | ### Check that all pods are in a running state 40 | ``` 41 | kubectl -n time-based-trigger get pods 42 | kubectl -n moving-target-defense get pods 43 | ``` 44 | 45 | ## Setup Scheduled restart with Time-based Trigger ("Hello World" example) 46 | 47 | Let's start a demo-page application that is restarted by Phoenix on a scheduled basis as a security measure. To do this we use the Time-based Trigger that creates security events that are handled by Phoenix. This setup consists the following steps: 48 | 49 | 1. Deploying the demo-page application: 50 | 51 | ``` 52 | kubectl apply -n demo-page -f deploy/manifests/demo-page/demo-page-deployment.yaml 53 | kubectl -n demo-page wait --for=condition=ready pod --all 54 | ``` 55 | 56 | 2. Deploy an MTD configuration: 57 | 58 | ``` 59 | kubectl apply -n demo-page -f deploy/manifests/time-based-trigger-demo-amtd.yaml 60 | kubectl -n moving-target-defense get AdaptiveMovingTargetDefense 61 | ``` 62 | 63 | 3. Enable time backend for the demo-page deployment and schdedule the restart in every 30s: 64 | 65 | ``` 66 | kubectl patch -n demo-page deployments.apps demo-page -p '"spec": {"template": { "metadata": {"annotations": {"time-based-trigger.amtd.r6security.com/schedule": "30s"}}}}' 67 | kubectl patch -n demo-page deployments.apps demo-page -p '"spec": {"template": { "metadata": {"annotations": {"time-based-trigger.amtd.r6security.com/enabled": "true"}}}}' 68 | ``` 69 | 70 | 4. Watch pods to see the restarts in every 30 seconds: 71 | 72 | ``` 73 | watch kubectl -n demo-page get pods 74 | ``` 75 | 76 | ## Try it out! 77 | 78 | You can try Phoenix out at [Killercoda](https://killercoda.com/phoenix/scenario/test-demo) in a self paced tutorial where you can try out the following scenarios: 79 | * Scheduled restart with Time-based Trigger 80 | * On-demand restart with Falco-integrator 81 | * On-demand quarantine with Falco-integrator 82 | -------------------------------------------------------------------------------- /charts/phoenix/templates/crds/securityevents_crd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.16.0 7 | name: securityevents.amtd.r6security.com 8 | spec: 9 | group: amtd.r6security.com 10 | names: 11 | kind: SecurityEvent 12 | listKind: SecurityEventList 13 | plural: securityevents 14 | singular: securityevent 15 | scope: Cluster 16 | versions: 17 | - additionalPrinterColumns: 18 | - jsonPath: .spec.targets[*] 19 | name: Target 20 | type: string 21 | - jsonPath: .spec.rule.source 22 | name: Source 23 | type: string 24 | - jsonPath: .spec.rule.type 25 | name: Type 26 | type: string 27 | - jsonPath: .spec.rule.threatLevel 28 | name: Level 29 | type: string 30 | - jsonPath: .spec.description 31 | name: Description 32 | type: string 33 | - jsonPath: .metadata.creationTimestamp 34 | name: Age 35 | type: date 36 | name: v1beta1 37 | schema: 38 | openAPIV3Schema: 39 | description: SecurityEvent is the Schema for the securityevents API 40 | properties: 41 | apiVersion: 42 | description: |- 43 | APIVersion defines the versioned schema of this representation of an object. 44 | Servers should convert recognized schemas to the latest internal value, and 45 | may reject unrecognized values. 46 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 47 | type: string 48 | kind: 49 | description: |- 50 | Kind is a string value representing the REST resource this object represents. 51 | Servers may infer this from the endpoint the client submits requests to. 52 | Cannot be updated. 53 | In CamelCase. 54 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 55 | type: string 56 | metadata: 57 | type: object 58 | spec: 59 | description: SecurityEventSpec defines the desired state of SecurityEvent 60 | properties: 61 | description: 62 | description: Description of the security threat 63 | type: string 64 | rule: 65 | properties: 66 | source: 67 | description: Source field value of the SecurityEvent that arrives 68 | type: string 69 | threatLevel: 70 | description: ThreatLevel field value of the SecurityEvent that 71 | arrives 72 | type: string 73 | type: 74 | description: Type field value of the SecurityEvent that arrives 75 | type: string 76 | type: object 77 | targets: 78 | description: Targets contains the list of affected pods, each item 79 | in the form of "namespace/name" or "/name" 80 | items: 81 | type: string 82 | type: array 83 | required: 84 | - description 85 | - rule 86 | - targets 87 | type: object 88 | status: 89 | description: SecurityEventStatus defines the observed state of SecurityEvent 90 | type: object 91 | type: object 92 | served: true 93 | storage: true 94 | subresources: 95 | status: {} 96 | --- -------------------------------------------------------------------------------- /docs/examples/kubearmor-based-app-restart.md: -------------------------------------------------------------------------------- 1 | # Phoenix demo: KubeArmor based on-demand application restart 2 | 3 | This tutorial shows how to use Phoenix to restart a specific pod when a corresponding KubeArmor alert is created. Since KubeArmor is capable of doing security enforcement on its own, this scenario would like to show an example of how Phoenix can be used to serve as another layer of security measures. In this simple example, KubeArmor will be configured to block any usage of package manager inside the pod, and in response to the alerts that KuberArmor generates, Phoenix immediately restarts the pod (the specific action is configurable). For this, Phoenix relies on triggers (SecurityEvents) that are created by the KubeArmor-integrator which translates KubeArmor alerts towards Phoenix. 4 | 5 | In this tutorial you will learn how to: 6 | 7 | - install Phoenix 8 | - install KubeArmor 9 | - create an example application and create a KubeArmor policy for it 10 | - install KubeArmor-integrator to be able translate KubeArmor notifications to SecurityEvents 11 | - configure Phoenix 12 | 13 | ## Phoenix installation 14 | 15 | kubectl -n moving-target-defense apply -f deploy/manifests/deploy-phoenix 16 | 17 | ## KubeArmor installation 18 | 19 | helm repo add kubearmor https://kubearmor.github.io/charts 20 | helm repo update kubearmor 21 | helm upgrade --install kubearmor-operator kubearmor/kubearmor-operator -n kubearmor --create-namespace 22 | kubectl apply -f https://raw.githubusercontent.com/kubearmor/KubeArmor/main/pkg/KubeArmorOperator/config/samples/sample-config.yml 23 | 24 | ### Deploy demo-page application and configure KubeArmor to deny execution of package management tools (apt/apt-get) by creating a policy 25 | 26 | Create the test application: 27 | 28 | kubectl -n demo-page apply -f deploy/manifests/demo-page/demo-page-deployment.yaml 29 | 30 | Create the policy: 31 | 32 | ``` 33 | cat </`. | Yes | 86 | | `rule` | `object` | Object with the following keys: `type`, `threatLevel`, `source`, where at least one key is mandatory. | Yes | 87 | | `description` | `string` | Description helps describe a SecurityEvent with more details | Yes | 88 | 89 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/r6security/phoenix 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/go-logr/logr v1.4.3 7 | github.com/onsi/ginkgo/v2 v2.25.2 8 | github.com/onsi/gomega v1.38.2 9 | k8s.io/api v0.34.0 10 | k8s.io/apimachinery v0.34.0 11 | k8s.io/client-go v0.34.0 12 | k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 13 | sigs.k8s.io/controller-runtime v0.21.0 14 | ) 15 | 16 | require ( 17 | cel.dev/expr v0.19.1 // indirect 18 | github.com/Masterminds/semver/v3 v3.4.0 // indirect 19 | github.com/antlr4-go/antlr/v4 v4.13.0 // indirect 20 | github.com/beorn7/perks v1.0.1 // indirect 21 | github.com/blang/semver/v4 v4.0.0 // indirect 22 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect 23 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 24 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 25 | github.com/emicklei/go-restful/v3 v3.12.2 // indirect 26 | github.com/evanphx/json-patch/v5 v5.9.11 // indirect 27 | github.com/felixge/httpsnoop v1.0.4 // indirect 28 | github.com/fsnotify/fsnotify v1.7.0 // indirect 29 | github.com/fxamacker/cbor/v2 v2.9.0 // indirect 30 | github.com/go-logr/stdr v1.2.2 // indirect 31 | github.com/go-logr/zapr v1.3.0 // indirect 32 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 33 | github.com/go-openapi/jsonreference v0.20.2 // indirect 34 | github.com/go-openapi/swag v0.23.0 // indirect 35 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 36 | github.com/gogo/protobuf v1.3.2 // indirect 37 | github.com/google/btree v1.1.3 // indirect 38 | github.com/google/cel-go v0.23.2 // indirect 39 | github.com/google/gnostic-models v0.7.0 // indirect 40 | github.com/google/go-cmp v0.7.0 // indirect 41 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 42 | github.com/google/uuid v1.6.0 // indirect 43 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect 44 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 45 | github.com/josharian/intern v1.0.0 // indirect 46 | github.com/json-iterator/go v1.1.12 // indirect 47 | github.com/mailru/easyjson v0.7.7 // indirect 48 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 49 | github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect 50 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 51 | github.com/pkg/errors v0.9.1 // indirect 52 | github.com/pmezard/go-difflib v1.0.0 // indirect 53 | github.com/prometheus/client_golang v1.22.0 // indirect 54 | github.com/prometheus/client_model v0.6.1 // indirect 55 | github.com/prometheus/common v0.62.0 // indirect 56 | github.com/prometheus/procfs v0.15.1 // indirect 57 | github.com/spf13/cobra v1.8.1 // indirect 58 | github.com/spf13/pflag v1.0.6 // indirect 59 | github.com/stoewer/go-strcase v1.3.0 // indirect 60 | github.com/x448/float16 v0.8.4 // indirect 61 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 62 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect 63 | go.opentelemetry.io/otel v1.33.0 // indirect 64 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect 65 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect 66 | go.opentelemetry.io/otel/metric v1.33.0 // indirect 67 | go.opentelemetry.io/otel/sdk v1.33.0 // indirect 68 | go.opentelemetry.io/otel/trace v1.33.0 // indirect 69 | go.opentelemetry.io/proto/otlp v1.4.0 // indirect 70 | go.uber.org/automaxprocs v1.6.0 // indirect 71 | go.uber.org/multierr v1.11.0 // indirect 72 | go.uber.org/zap v1.27.0 // indirect 73 | go.yaml.in/yaml/v2 v2.4.2 // indirect 74 | go.yaml.in/yaml/v3 v3.0.4 // indirect 75 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect 76 | golang.org/x/net v0.43.0 // indirect 77 | golang.org/x/oauth2 v0.27.0 // indirect 78 | golang.org/x/sync v0.16.0 // indirect 79 | golang.org/x/sys v0.35.0 // indirect 80 | golang.org/x/term v0.34.0 // indirect 81 | golang.org/x/text v0.28.0 // indirect 82 | golang.org/x/time v0.9.0 // indirect 83 | golang.org/x/tools v0.36.0 // indirect 84 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 85 | google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect 86 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect 87 | google.golang.org/grpc v1.68.1 // indirect 88 | google.golang.org/protobuf v1.36.7 // indirect 89 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 90 | gopkg.in/inf.v0 v0.9.1 // indirect 91 | gopkg.in/yaml.v3 v3.0.1 // indirect 92 | k8s.io/apiextensions-apiserver v0.33.0 // indirect 93 | k8s.io/apiserver v0.33.2 // indirect 94 | k8s.io/component-base v0.33.2 // indirect 95 | k8s.io/klog/v2 v2.130.1 // indirect 96 | k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect 97 | sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect 98 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect 99 | sigs.k8s.io/randfill v1.0.0 // indirect 100 | sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect 101 | sigs.k8s.io/yaml v1.6.0 // indirect 102 | ) 103 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #labels: 13 | #- includeSelectors: true 14 | # pairs: 15 | # someName: someValue 16 | 17 | resources: 18 | - ../crd 19 | - ../rbac 20 | - ../manager 21 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 22 | # crd/kustomization.yaml 23 | #- ../webhook 24 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 25 | #- ../certmanager 26 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 27 | #- ../prometheus 28 | 29 | patchesStrategicMerge: 30 | # Protect the /metrics endpoint by putting it behind auth. 31 | # If you want your controller-manager to expose the /metrics 32 | # endpoint w/o any authn/z, please comment the following line. 33 | - manager_auth_proxy_patch.yaml 34 | 35 | 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 47 | # Uncomment the following replacements to add the cert-manager CA injection annotations 48 | #replacements: 49 | # - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs 50 | # kind: Certificate 51 | # group: cert-manager.io 52 | # version: v1 53 | # name: serving-cert # this name should match the one in certificate.yaml 54 | # fieldPath: .metadata.namespace # namespace of the certificate CR 55 | # targets: 56 | # - select: 57 | # kind: ValidatingWebhookConfiguration 58 | # fieldPaths: 59 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 60 | # options: 61 | # delimiter: '/' 62 | # index: 0 63 | # create: true 64 | # - select: 65 | # kind: MutatingWebhookConfiguration 66 | # fieldPaths: 67 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 68 | # options: 69 | # delimiter: '/' 70 | # index: 0 71 | # create: true 72 | # - select: 73 | # kind: CustomResourceDefinition 74 | # fieldPaths: 75 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 76 | # options: 77 | # delimiter: '/' 78 | # index: 0 79 | # create: true 80 | # - source: 81 | # kind: Certificate 82 | # group: cert-manager.io 83 | # version: v1 84 | # name: serving-cert # this name should match the one in certificate.yaml 85 | # fieldPath: .metadata.name 86 | # targets: 87 | # - select: 88 | # kind: ValidatingWebhookConfiguration 89 | # fieldPaths: 90 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 91 | # options: 92 | # delimiter: '/' 93 | # index: 1 94 | # create: true 95 | # - select: 96 | # kind: MutatingWebhookConfiguration 97 | # fieldPaths: 98 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 99 | # options: 100 | # delimiter: '/' 101 | # index: 1 102 | # create: true 103 | # - select: 104 | # kind: CustomResourceDefinition 105 | # fieldPaths: 106 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 107 | # options: 108 | # delimiter: '/' 109 | # index: 1 110 | # create: true 111 | # - source: # Add cert-manager annotation to the webhook Service 112 | # kind: Service 113 | # version: v1 114 | # name: webhook-service 115 | # fieldPath: .metadata.name # namespace of the service 116 | # targets: 117 | # - select: 118 | # kind: Certificate 119 | # group: cert-manager.io 120 | # version: v1 121 | # fieldPaths: 122 | # - .spec.dnsNames.0 123 | # - .spec.dnsNames.1 124 | # options: 125 | # delimiter: '.' 126 | # index: 0 127 | # create: true 128 | # - source: 129 | # kind: Service 130 | # version: v1 131 | # name: webhook-service 132 | # fieldPath: .metadata.namespace # namespace of the service 133 | # targets: 134 | # - select: 135 | # kind: Certificate 136 | # group: cert-manager.io 137 | # version: v1 138 | # fieldPaths: 139 | # - .spec.dnsNames.0 140 | # - .spec.dnsNames.1 141 | # options: 142 | # delimiter: '.' 143 | # index: 1 144 | # create: true 145 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package main 19 | 20 | import ( 21 | "flag" 22 | "os" 23 | 24 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 25 | // to ensure that exec-entrypoint and run can make use of them. 26 | _ "k8s.io/client-go/plugin/pkg/client/auth" 27 | 28 | "k8s.io/apimachinery/pkg/runtime" 29 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 30 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 31 | ctrl "sigs.k8s.io/controller-runtime" 32 | "sigs.k8s.io/controller-runtime/pkg/healthz" 33 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 34 | "sigs.k8s.io/controller-runtime/pkg/webhook" 35 | "sigs.k8s.io/controller-runtime/pkg/metrics/filters" 36 | 37 | amtdv1beta1 "github.com/r6security/phoenix/api/v1beta1" 38 | "github.com/r6security/phoenix/internal/controller" 39 | //+kubebuilder:scaffold:imports 40 | 41 | "sigs.k8s.io/controller-runtime/pkg/metrics/server" 42 | ) 43 | 44 | var ( 45 | scheme = runtime.NewScheme() 46 | setupLog = ctrl.Log.WithName("setup") 47 | ) 48 | 49 | func init() { 50 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 51 | 52 | utilruntime.Must(amtdv1beta1.AddToScheme(scheme)) 53 | //+kubebuilder:scaffold:scheme 54 | } 55 | 56 | func main() { 57 | var metricsAddr string 58 | var enableLeaderElection bool 59 | var probeAddr string 60 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 61 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 62 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 63 | "Enable leader election for controller manager. "+ 64 | "Enabling this will ensure there is only one active controller manager.") 65 | opts := zap.Options{ 66 | Development: true, 67 | } 68 | opts.BindFlags(flag.CommandLine) 69 | flag.Parse() 70 | 71 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 72 | 73 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 74 | Scheme: scheme, 75 | Metrics: server.Options{ 76 | BindAddress: metricsAddr, 77 | SecureServing: true, 78 | FilterProvider: filters.WithAuthenticationAndAuthorization, 79 | }, 80 | WebhookServer: webhook.NewServer(webhook.Options{Port: 9443}), 81 | HealthProbeBindAddress: probeAddr, 82 | LeaderElection: enableLeaderElection, 83 | LeaderElectionID: "548c5e7b.r6security.com", 84 | LeaderElectionReleaseOnCancel: true, // Enable to allow fast leader transitions 85 | // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily 86 | // when the Manager ends. This requires the binary to immediately end when the 87 | // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 88 | // speeds up voluntary leader transitions as the new leader don't have to wait 89 | // LeaseDuration time first. 90 | // 91 | // In the default scaffold provided, the program ends immediately after 92 | // the manager stops, so would be fine to enable this option. However, 93 | // if you are doing or is intended to do any operation such as perform cleanups 94 | // after the manager stops then its usage might be unsafe. 95 | // LeaderElectionReleaseOnCancel: true, 96 | }) 97 | if err != nil { 98 | setupLog.Error(err, "unable to start manager") 99 | os.Exit(1) 100 | } 101 | 102 | if err = (&controller.AdaptiveMovingTargetDefenseReconciler{ 103 | Client: mgr.GetClient(), 104 | Scheme: mgr.GetScheme(), 105 | }).SetupWithManager(mgr); err != nil { 106 | setupLog.Error(err, "unable to create controller", "controller", "AdaptiveMovingTargetDefense") 107 | os.Exit(1) 108 | } 109 | if err = (&controller.PodReconciler{ 110 | Client: mgr.GetClient(), 111 | Scheme: mgr.GetScheme(), 112 | }).SetupWithManager(mgr); err != nil { 113 | setupLog.Error(err, "unable to create controller", "controller", "Pod") 114 | os.Exit(1) 115 | } 116 | if err = (&controller.SecurityEventReconciler{ 117 | Client: mgr.GetClient(), 118 | Scheme: mgr.GetScheme(), 119 | }).SetupWithManager(mgr); err != nil { 120 | setupLog.Error(err, "unable to create controller", "controller", "SecurityEvent") 121 | os.Exit(1) 122 | } 123 | //+kubebuilder:scaffold:builder 124 | 125 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 126 | setupLog.Error(err, "unable to set up health check") 127 | os.Exit(1) 128 | } 129 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 130 | setupLog.Error(err, "unable to set up ready check") 131 | os.Exit(1) 132 | } 133 | 134 | setupLog.Info("starting manager") 135 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 136 | setupLog.Error(err, "problem running manager") 137 | os.Exit(1) 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /charts/phoenix/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: rbac 7 | app.kubernetes.io/created-by: operator 8 | app.kubernetes.io/instance: controller-manager-sa 9 | app.kubernetes.io/managed-by: kustomize 10 | app.kubernetes.io/name: serviceaccount 11 | app.kubernetes.io/part-of: operator 12 | name: operator-controller-manager 13 | namespace: {{ .Release.Namespace }} 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: Role 17 | metadata: 18 | labels: 19 | app.kubernetes.io/component: rbac 20 | app.kubernetes.io/created-by: operator 21 | app.kubernetes.io/instance: leader-election-role 22 | app.kubernetes.io/managed-by: kustomize 23 | app.kubernetes.io/name: role 24 | app.kubernetes.io/part-of: operator 25 | name: operator-leader-election-role 26 | namespace: {{ .Release.Namespace }} 27 | rules: 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | verbs: 33 | - get 34 | - list 35 | - watch 36 | - create 37 | - update 38 | - patch 39 | - delete 40 | - apiGroups: 41 | - coordination.k8s.io 42 | resources: 43 | - leases 44 | verbs: 45 | - get 46 | - list 47 | - watch 48 | - create 49 | - update 50 | - patch 51 | - delete 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - events 56 | verbs: 57 | - create 58 | - patch 59 | --- 60 | apiVersion: rbac.authorization.k8s.io/v1 61 | kind: ClusterRole 62 | metadata: 63 | name: operator-manager-role 64 | rules: 65 | - apiGroups: 66 | - amtd.r6security.com 67 | resources: 68 | - adaptivemovingtargetdefenses 69 | - securityevents 70 | verbs: 71 | - create 72 | - delete 73 | - get 74 | - list 75 | - patch 76 | - update 77 | - watch 78 | - apiGroups: 79 | - amtd.r6security.com 80 | resources: 81 | - adaptivemovingtargetdefenses/finalizers 82 | - securityevents/finalizers 83 | verbs: 84 | - update 85 | - apiGroups: 86 | - amtd.r6security.com 87 | resources: 88 | - adaptivemovingtargetdefenses/status 89 | - securityevents/status 90 | verbs: 91 | - get 92 | - patch 93 | - update 94 | - apiGroups: 95 | - "" 96 | resources: 97 | - pods 98 | verbs: 99 | - create 100 | - delete 101 | - get 102 | - list 103 | - patch 104 | - update 105 | - watch 106 | - apiGroups: 107 | - "" 108 | resources: 109 | - pods/finalizers 110 | verbs: 111 | - update 112 | - apiGroups: 113 | - "" 114 | resources: 115 | - pods/status 116 | verbs: 117 | - get 118 | - patch 119 | - update 120 | - apiGroups: 121 | - networking.k8s.io 122 | resources: 123 | - networkpolicies 124 | verbs: 125 | - create 126 | - get 127 | - list 128 | - watch 129 | --- 130 | apiVersion: rbac.authorization.k8s.io/v1 131 | kind: ClusterRole 132 | metadata: 133 | labels: 134 | app.kubernetes.io/component: kube-rbac-proxy 135 | app.kubernetes.io/created-by: operator 136 | app.kubernetes.io/instance: metrics-reader 137 | app.kubernetes.io/managed-by: kustomize 138 | app.kubernetes.io/name: clusterrole 139 | app.kubernetes.io/part-of: operator 140 | name: operator-metrics-reader 141 | rules: 142 | - nonResourceURLs: 143 | - /metrics 144 | verbs: 145 | - get 146 | --- 147 | apiVersion: rbac.authorization.k8s.io/v1 148 | kind: ClusterRole 149 | metadata: 150 | labels: 151 | app.kubernetes.io/component: kube-rbac-proxy 152 | app.kubernetes.io/created-by: operator 153 | app.kubernetes.io/instance: proxy-role 154 | app.kubernetes.io/managed-by: kustomize 155 | app.kubernetes.io/name: clusterrole 156 | app.kubernetes.io/part-of: operator 157 | name: operator-proxy-role 158 | rules: 159 | - apiGroups: 160 | - authentication.k8s.io 161 | resources: 162 | - tokenreviews 163 | verbs: 164 | - create 165 | - apiGroups: 166 | - authorization.k8s.io 167 | resources: 168 | - subjectaccessreviews 169 | verbs: 170 | - create 171 | --- 172 | apiVersion: rbac.authorization.k8s.io/v1 173 | kind: RoleBinding 174 | metadata: 175 | labels: 176 | app.kubernetes.io/component: rbac 177 | app.kubernetes.io/created-by: operator 178 | app.kubernetes.io/instance: leader-election-rolebinding 179 | app.kubernetes.io/managed-by: kustomize 180 | app.kubernetes.io/name: rolebinding 181 | app.kubernetes.io/part-of: operator 182 | name: operator-leader-election-rolebinding 183 | namespace: {{ .Release.Namespace }} 184 | roleRef: 185 | apiGroup: rbac.authorization.k8s.io 186 | kind: Role 187 | name: operator-leader-election-role 188 | subjects: 189 | - kind: ServiceAccount 190 | name: operator-controller-manager 191 | namespace: {{ .Release.Namespace }} 192 | --- 193 | apiVersion: rbac.authorization.k8s.io/v1 194 | kind: ClusterRoleBinding 195 | metadata: 196 | labels: 197 | app.kubernetes.io/component: rbac 198 | app.kubernetes.io/created-by: operator 199 | app.kubernetes.io/instance: manager-rolebinding 200 | app.kubernetes.io/managed-by: kustomize 201 | app.kubernetes.io/name: clusterrolebinding 202 | app.kubernetes.io/part-of: operator 203 | name: operator-manager-rolebinding 204 | roleRef: 205 | apiGroup: rbac.authorization.k8s.io 206 | kind: ClusterRole 207 | name: operator-manager-role 208 | subjects: 209 | - kind: ServiceAccount 210 | name: operator-controller-manager 211 | namespace: {{ .Release.Namespace }} 212 | --- 213 | apiVersion: rbac.authorization.k8s.io/v1 214 | kind: ClusterRoleBinding 215 | metadata: 216 | labels: 217 | app.kubernetes.io/component: kube-rbac-proxy 218 | app.kubernetes.io/created-by: operator 219 | app.kubernetes.io/instance: proxy-rolebinding 220 | app.kubernetes.io/managed-by: kustomize 221 | app.kubernetes.io/name: clusterrolebinding 222 | app.kubernetes.io/part-of: operator 223 | name: operator-proxy-rolebinding 224 | roleRef: 225 | apiGroup: rbac.authorization.k8s.io 226 | kind: ClusterRole 227 | name: operator-proxy-role 228 | subjects: 229 | - kind: ServiceAccount 230 | name: operator-controller-manager 231 | namespace: {{ .Release.Namespace }} 232 | --- -------------------------------------------------------------------------------- /internal/controller/pod_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controller 19 | 20 | import ( 21 | "context" 22 | "reflect" 23 | "strings" 24 | 25 | //corev1 "k8s.io/api/core/v1" 26 | corev1 "k8s.io/api/core/v1" 27 | "k8s.io/apimachinery/pkg/runtime" 28 | ctrl "sigs.k8s.io/controller-runtime" 29 | "sigs.k8s.io/controller-runtime/pkg/client" 30 | ) 31 | 32 | // PodReconciler reconciles a Pod object 33 | type PodReconciler struct { 34 | client.Client 35 | Scheme *runtime.Scheme 36 | } 37 | 38 | //+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete 39 | //+kubebuilder:rbac:groups=core,resources=pods/status,verbs=get;update;patch 40 | //+kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=update 41 | 42 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 43 | // move the current state of the cluster closer to the desired state. 44 | // TODO(user): Modify the Reconcile function to compare the state specified by 45 | // the Pod object against the actual cluster state, and then 46 | // perform operations to make the cluster state reflect the state specified by 47 | // the user. 48 | // 49 | // For more details, check Reconcile and its Result here: 50 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile 51 | func (r *PodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 52 | //log := log.FromContext(ctx) 53 | /* 54 | // TODO(user): your logic here 55 | // Start by declaring the custom resource to be type "Pod" 56 | pod := &corev1.Pod{} 57 | 58 | // Then retrieve from the cluster the resource that triggered this reconciliation. 59 | // Store these contents into an object used throughout reconciliation. 60 | err := r.Client.Get(context.Background(), req.NamespacedName, pod) 61 | if err != nil { 62 | if errors.IsNotFound(err) { 63 | // If the resource is not found, that is OK. It just means the desired state is to 64 | // not have any resources for this Pod but no delete is required. 65 | log.Info(fmt.Sprintf(`Pod was deleted for Pod "%s" does not exist, but that's OK, no action is required`, req.Name)) 66 | return ctrl.Result{}, nil 67 | } else { 68 | // some other error happend 69 | log.Error(err, fmt.Sprintf(`Failed to retrieve pod resource "%s": %s`, req.Name, err.Error())) 70 | return ctrl.Result{}, err 71 | } 72 | } 73 | 74 | if pod.ObjectMeta.Annotations == nil { 75 | // here no scheduled return is needed, since anything modifies a pod (with annotations) Reconcile will catch it 76 | return ctrl.Result{}, nil 77 | } 78 | 79 | // Does the pod have SecurityEvent on it? 80 | if securityEvent, found := pod.ObjectMeta.Annotations[AMTD_SECURITY_EVENT]; found { 81 | 82 | // Filter annotations to find to best action for the security event based on strategies 83 | action := getAction(pod.ObjectMeta.Annotations, securityEvent) 84 | 85 | switch action { 86 | case "destroy": 87 | // Try to delete pod 88 | podErr := r.Client.Delete(ctx, pod) 89 | 90 | // Success for this delete is either: 91 | // 1. the delete is successful without error 92 | // 2. the resource already doesn't exist so delete can't take action 93 | if err != nil && !errors.IsNotFound(err) { 94 | // If any other error occurs, log it 95 | log.Error(podErr, fmt.Sprintf(`Failed to delete pod "%s"`, req.Name)) 96 | } 97 | log.Info(fmt.Sprintf(`Pod: "%s" was sucessfully deleted with ACTION: delete`, pod.Name)) 98 | default: 99 | log.Info(fmt.Sprintf(`ACTION: %s -> POD: %s - NOT IMPLEMENTED YET`, action, pod.Name)) 100 | } 101 | } 102 | 103 | // here no scheduled return is needed, since anything modifies a pod (with annotations) Reconcile will catch it 104 | // return ctrl.Result{RequeueAfter: 10 * time.Second}, nil 105 | // TODO: shall we schedule it again if something went wrong? (Should it be handled above?) 106 | */ 107 | return ctrl.Result{}, nil 108 | } 109 | 110 | // SetupWithManager sets up the controller with the Manager. 111 | func (r *PodReconciler) SetupWithManager(mgr ctrl.Manager) error { 112 | return ctrl.NewControllerManagedBy(mgr). 113 | For(&corev1.Pod{}). 114 | Complete(r) 115 | } 116 | 117 | func getAction(podAnnotations map[string]string, securityEvent string) string { 118 | // Check for the proper strategy to match and action to execute 119 | securityEventMap := stringToMap(securityEvent) 120 | 121 | action := "" 122 | for key, value := range podAnnotations { 123 | // strategy annotation is found 124 | if strings.HasPrefix(key, AMTD_STRATEGY_BASE) { 125 | 126 | // prepare strategy and action for comparison with SecurityEvent 127 | strategyMap := stringToMap(value) 128 | actionCandidate := strategyMap["action"] 129 | // remove action to be easily comparable with SecurityEvent (it has no "action" key) 130 | delete(strategyMap, "action") 131 | 132 | // check for a custom strategy (e.g. not default type and action) 133 | if reflect.DeepEqual(securityEventMap, strategyMap) { 134 | action = actionCandidate 135 | return action 136 | } 137 | 138 | // check for a default strategy (type=...,action=...) 139 | if strategyMap["type"] == "default" { 140 | action = actionCandidate 141 | } 142 | } 143 | } 144 | 145 | return action 146 | } 147 | 148 | func isSecurityEventMatchStrategy(securityEvent string, strategy string) (bool, string) { 149 | securityEventMap := stringToMap(securityEvent) 150 | strategyMap := stringToMap(strategy) 151 | 152 | if reflect.DeepEqual(securityEventMap, strategyMap) { 153 | return true, strategyMap["action"] 154 | } 155 | 156 | return false, "" 157 | } 158 | 159 | func stringToMap(text string) map[string]string { 160 | entries := strings.Split(text, ",") 161 | 162 | m := make(map[string]string) 163 | for _, e := range entries { 164 | parts := strings.Split(e, "=") 165 | m[parts[0]] = parts[1] 166 | } 167 | 168 | return m 169 | } 170 | -------------------------------------------------------------------------------- /docs/CONCEPTS.md: -------------------------------------------------------------------------------- 1 | ## Architecture 2 | 3 | The diagram below illustrates the distinct components of Phoenix. 4 | 5 | ![Phoenix Architecture](img/architecture.jpg) 6 | 7 | Phoenix is implemented as a Kubernetes Operator with its own specific Custom Resource Definitions (SecurityEvent and AdaptiveMovingTargetDefense). In general, Phoenix can be configured via AdaptiveMovingTargetDefense resources to watch specific resources in the cluster (as of now only pods) and, in the case of security threats, which are reported via SecurityEvent resources, execute specific actions to prevent attackers from exploiting vulnerabilities. 8 | 9 | Earch SecurityEvent contains a collection of key-value pairs (e.g., threatLevel=warning, source=KSOC) that describe a specific threat and the identifier(s) of the threathend resource(s). 10 | Phoenix can be configured with rule-action pairs where the rule part is a collection of key-value pairs (e.g., threatLevel=warning, source=KSOC) that are compared against the SecurityEvent key-value pairs. Phoenix executes the action that belongs to the best-matching rule-action pair (i.e., the most exact key-value match). 11 | The collection of rule-action pairs are called `strategy` in AdaptiveMovingTargetDefense, this way, each AdaptiveMovingTargetDefense resource defines a set of resources to watch and a strategy based on which Phoenix should react. 12 | 13 | The SecurityEvents typically originate from 3rd party security analytics tools (e.g., Falco, KubeArmor, KSOC) via Integration Backends. Since the 3rd party security analytics tools are independent from Phoenix, they do not know anything about SecurityEvents, neither necessarily need to run on Kubernetes, hence the creation of SecurityEvents is the responsibility of the Integration Backends. 14 | An Integration Backend is a webserver (as of now) that is prepared to receive notifications from a 3rd party security analytics tool via a webhook and to translate the threat information towards Phoenix via creating a SecurityEvent resource inside the cluster. 15 | Each Integration Backend is specifically created for a concrete tool (e.g., Falco-integrator, KSOC-integrator) and runs inside the Kubernetes cluster, so integrating Phoenix with these tools requires no modification on their side; only the proper notification method and Integration Backend - as the receiving endpoint - need to be configured making the process transparent from their viewpoint. 16 | 17 | Every time one would like to introduce a new 3rd party security analytics tool integration for Phoenix the only task is to implement a new Integration Backend that is capable of translating the threat notifications of that tool to SecurityEvents. 18 | 19 | Phoenix itself is written in Go using the [Kubebuilder](https://book.kubebuilder.io/) framework and consists multiple controllers (AMTD, ActionExecutor) that ensure that, for any relevant object, the actual state of the world matches the desired state in the object. 20 | 21 | ## Integration 22 | 23 | ### Configuring available 3rd party security analytics tools 24 | 25 | If the Integration Backend exists for a specific tool the following steps are needed to set it up: i) deploying the specific backend and configure the tool. 26 | 27 | #### Falco integration 28 | 29 | ##### 1. Deploying the Falco Integration Backend: 30 | 31 | See the detailed documentation [here](https://github.com/r6security/phoenix/blob/main/docs/examples/falco-based-app-restart.md#falco-integrator-installation) 32 | 33 | ##### 2. Configuring Falco: 34 | 35 | See the detailed documentation [here](https://github.com/falcosecurity/falco) 36 | 37 | #### KubeArmor integration 38 | 39 | ##### 1. Deploying the KubeArmor Integration Backend: 40 | 41 | See the detailed documentation [here](https://github.com/r6security/phoenix/blob/main/docs/examples/kubearmor-based-app-restart.md#kubearmor-integrator-installation) 42 | 43 | ##### 2. Configuring KubeArmor: 44 | 45 | See the detailed documentation [here](https://docs.kubearmor.io/kubearmor/documentation/security_policy_specification) 46 | 47 | #### Timer-based Trigger integration 48 | 49 | The Timer-based Trigger is a special part of the architecture as its main purpose is to provide periodic or scheduled triggers for Phoenix in the form of SecurityEvent CRDs. This feature can be used for multiple purposes: 50 | - Execute actions with the phoenix operator periodically or in a scheduled manner. e.g.: hourly restart a service to avoid any undetected action. 51 | - Update the SecurityEvent content and mimic events from another tool 52 | - Use a periodic trigger and an alerting system to check if phoenix operator is working 53 | - Debug/test the phoenix operator or new alert rules. 54 | 55 | Architecture-wise the special aspect of the Timer-based Trigger is that it can create SecurityEvents without an Integration Backend. The reason of this is that Timer-based Trigger is not an independent, standalone tool. Without Phoenix it has no purpose, however, for simplicity it made sense to have its own code base. 56 | 57 | ##### Deployment and usage: 58 | 59 | See the detailed documentation [here](https://github.com/r6security/time-based-trigger#usage) 60 | 61 | ### Extending Phoenix 62 | 63 | The main architectural design principle for Phoenix is extendability. In the heart of Phoenix are the Actions that are executed in response to incoming threat detection, i.e. SecurityEvents. Consequently, when one would like to add new capabilities there are two approaches: 64 | 65 | #### 1. Extending the available set of 3rd party security analytics tools 66 | 67 | To introduce the threat detection capabilites of a new 3rd party security analytics tool a new Integration Backend is needed, that is capable of translating the notifications of the specific tool and is able to create SecurityEvents inside the Kubernetes cluster. 68 | 69 | Since the main integration point - or interface so to say - between the world and Phoenix is the SecurityEvent it is crucial that SecurityEvent CRDs of an Integration Backend be compatible with Phoenix, otherwise the integration breaks. 70 | 71 | For this reason our goal is to keep SecurityEvent CRD backward compatible with consecutive releases and change it very carefully, thus enabling a loose coupling between the Integration Backend implementations and Phoenix. 72 | 73 | #### 2. Extending the available set of Actions 74 | 75 | To introduce new behaviors in threat handling, new Actions can be created in Phoenix. Once the Action is available in a release, it can be simply assigned to any threat described in the `strategy` field of an AdaptiveMovingTargetDefense resource. 76 | 77 | Currently, the only way to add an Action is by contributing to the source code of Phoenix via a pull request. 78 | 79 | ## Actions 80 | 81 | As of now the following Actions are available in Phoenix: 82 | 83 | ### Delete 84 | 85 | **Description:** Delete the Pod(s) listed in the `target` field of a SecurityEvent. 86 | 87 | **Scope:** Pod 88 | 89 | ### Quarantine 90 | 91 | **Description:** Block all ingress and engress traffic of the Pod(s) listed in the `target` field of a SecurityEvent. 92 | 93 | **Scope:** Pod 94 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Image URL to use all building/pushing image targets 2 | IMG ?= phoenixop/amtd-operator:0.2 3 | # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. 4 | ENVTEST_K8S_VERSION = 1.30.4 5 | 6 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 7 | ifeq (,$(shell go env GOBIN)) 8 | GOBIN=$(shell go env GOPATH)/bin 9 | else 10 | GOBIN=$(shell go env GOBIN) 11 | endif 12 | 13 | # Setting SHELL to bash allows bash commands to be executed by recipes. 14 | # Options are set to exit when a recipe line exits non-zero or a piped command fails. 15 | SHELL = /usr/bin/env bash -o pipefail 16 | .SHELLFLAGS = -ec 17 | 18 | .PHONY: all 19 | all: build 20 | 21 | ##@ General 22 | 23 | # The help target prints out all targets with their descriptions organized 24 | # beneath their categories. The categories are represented by '##@' and the 25 | # target descriptions by '##'. The awk commands is responsible for reading the 26 | # entire set of makefiles included in this invocation, looking for lines of the 27 | # file as xyz: ## something, and then pretty-format the target and help. Then, 28 | # if there's a line with ##@ something, that gets pretty-printed as a category. 29 | # More info on the usage of ANSI control characters for terminal formatting: 30 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 31 | # More info on the awk command: 32 | # http://linuxcommand.org/lc3_adv_awk.php 33 | 34 | .PHONY: help 35 | help: ## Display this help. 36 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 37 | 38 | ##@ Development 39 | 40 | .PHONY: manifests 41 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 42 | $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases 43 | 44 | .PHONY: generate 45 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 46 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 47 | 48 | .PHONY: fmt 49 | fmt: ## Run go fmt against code. 50 | go fmt ./... 51 | 52 | .PHONY: vet 53 | vet: ## Run go vet against code. 54 | go vet ./... 55 | 56 | .PHONY: test 57 | test: manifests generate fmt vet envtest ## Run tests. 58 | KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out 59 | 60 | ##@ Build 61 | 62 | .PHONY: build 63 | build: manifests generate fmt vet ## Build manager binary. 64 | go build -o bin/manager cmd/main.go 65 | 66 | .PHONY: run 67 | run: manifests generate fmt vet ## Run a controller from your host. 68 | go run ./cmd/main.go 69 | 70 | # If you wish built the manager image targeting other platforms you can use the --platform flag. 71 | # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. 72 | # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 73 | .PHONY: docker-build 74 | docker-build: test ## Build docker image with the manager. 75 | docker build -t ${IMG} . 76 | 77 | .PHONY: docker-push 78 | docker-push: ## Push docker image with the manager. 79 | docker push ${IMG} 80 | 81 | # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple 82 | # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: 83 | # - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ 84 | # - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 85 | # - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) 86 | # To properly provided solutions that supports more than one platform you should use this option. 87 | PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le 88 | .PHONY: docker-buildx 89 | docker-buildx: test ## Build and push docker image for the manager for cross-platform support 90 | # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile 91 | sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross 92 | - docker buildx create --name project-v3-builder 93 | docker buildx use project-v3-builder 94 | - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . 95 | - docker buildx rm project-v3-builder 96 | rm Dockerfile.cross 97 | 98 | ##@ Deployment 99 | 100 | ifndef ignore-not-found 101 | ignore-not-found = false 102 | endif 103 | 104 | .PHONY: install 105 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 106 | $(KUSTOMIZE) build config/crd | kubectl apply -f - 107 | 108 | .PHONY: uninstall 109 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 110 | $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - 111 | 112 | .PHONY: deploy 113 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 114 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 115 | $(KUSTOMIZE) build config/default | kubectl apply -f - 116 | 117 | .PHONY: helm 118 | helm: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 119 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 120 | $(KUSTOMIZE) build config/default > operator-helm-gen.yaml 121 | 122 | .PHONY: undeploy 123 | undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 124 | $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - 125 | 126 | ##@ Build Dependencies 127 | 128 | ## Location to install dependencies to 129 | LOCALBIN ?= $(shell pwd)/bin 130 | $(LOCALBIN): 131 | mkdir -p $(LOCALBIN) 132 | 133 | ## Tool Binaries 134 | KUSTOMIZE ?= $(LOCALBIN)/kustomize 135 | CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen 136 | ENVTEST ?= $(LOCALBIN)/setup-envtest 137 | 138 | ## Tool Versions 139 | KUSTOMIZE_VERSION ?= v5.0.0 140 | CONTROLLER_TOOLS_VERSION ?= v0.16.0 141 | 142 | KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" 143 | .PHONY: kustomize 144 | kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. 145 | $(KUSTOMIZE): $(LOCALBIN) 146 | @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ 147 | echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ 148 | rm -rf $(LOCALBIN)/kustomize; \ 149 | fi 150 | test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) --output install_kustomize.sh && bash install_kustomize.sh $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); rm install_kustomize.sh; } 151 | 152 | .PHONY: controller-gen 153 | controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. 154 | $(CONTROLLER_GEN): $(LOCALBIN) 155 | test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ 156 | GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) 157 | 158 | .PHONY: envtest 159 | envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. 160 | $(ENVTEST): $(LOCALBIN) 161 | test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest -------------------------------------------------------------------------------- /internal/controller/adaptivemovingtargetdefense_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controller 19 | 20 | import ( 21 | "context" 22 | "encoding/json" 23 | "fmt" 24 | "reflect" 25 | "strconv" 26 | "time" 27 | 28 | "github.com/go-logr/logr" 29 | corev1 "k8s.io/api/core/v1" 30 | "k8s.io/apimachinery/pkg/api/errors" 31 | "k8s.io/apimachinery/pkg/labels" 32 | "k8s.io/apimachinery/pkg/runtime" 33 | "k8s.io/apimachinery/pkg/types" 34 | ctrl "sigs.k8s.io/controller-runtime" 35 | "sigs.k8s.io/controller-runtime/pkg/client" 36 | "sigs.k8s.io/controller-runtime/pkg/log" 37 | 38 | amtdv1beta1 "github.com/r6security/phoenix/api/v1beta1" 39 | ) 40 | 41 | // AdaptiveMovingTargetDefenseReconciler reconciles a AdaptiveMovingTargetDefense object 42 | type AdaptiveMovingTargetDefenseReconciler struct { 43 | client.Client 44 | Scheme *runtime.Scheme 45 | } 46 | 47 | //+kubebuilder:rbac:groups=amtd.r6security.com,resources=adaptivemovingtargetdefenses,verbs=get;list;watch;create;update;patch;delete 48 | //+kubebuilder:rbac:groups=amtd.r6security.com,resources=adaptivemovingtargetdefenses/status,verbs=get;update;patch 49 | //+kubebuilder:rbac:groups=amtd.r6security.com,resources=adaptivemovingtargetdefenses/finalizers,verbs=update 50 | 51 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 52 | // move the current state of the cluster closer to the desired state. 53 | // TODO(user): Modify the Reconcile function to compare the state specified by 54 | // the AdaptiveMovingTargetDefense object against the actual cluster state, and then 55 | // perform operations to make the cluster state reflect the state specified by 56 | // the user. 57 | // 58 | // For more details, check Reconcile and its Result here: 59 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile 60 | func (r *AdaptiveMovingTargetDefenseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 61 | log := log.FromContext(ctx) 62 | 63 | // Start by declaring the custom resource to be type "AdaptiveMovingTargetDefense" 64 | AMTD := &amtdv1beta1.AdaptiveMovingTargetDefense{} 65 | 66 | // Then retrieve from the cluster the resource that triggered this reconciliation. 67 | // Store these contents into an object used throughout reconciliation. 68 | err := r.Client.Get(context.Background(), req.NamespacedName, AMTD) 69 | if err != nil { 70 | if errors.IsNotFound(err) { 71 | log.Info(fmt.Sprintf(`Custom resource for AdaptiveMovingTargetDefense "%s" does not exist, remove annotations from pods`, req.Namespace+"/"+req.Name)) 72 | // Get pods based on PodSelector 73 | podList := &corev1.PodList{} 74 | listOptions := &client.ListOptions{Namespace: req.Namespace} 75 | if err = r.List(context.TODO(), podList, listOptions); err != nil { 76 | log.Error(err, fmt.Sprintf(`Failed to retrieve pods: "%s"`, err.Error())) 77 | return ctrl.Result{}, err 78 | } 79 | 80 | for _, pod := range podList.Items { 81 | // Check whether already AMTD "member", if so AMTD_MANAGED_TIME is not changed 82 | if _, found := pod.ObjectMeta.Annotations[AMTD_MANAGED_BY]; found { 83 | 84 | removeAMTDAnnotationFromPod(pod, log, req.Namespace, req.Name) 85 | 86 | // Try to apply this patch, if it fails, return the failure 87 | err = r.Client.Update(ctx, &pod) 88 | if err != nil { 89 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 90 | // this makes the controller to log the error and in the future ignore the this AMTD resource (at least until it changes) 91 | return ctrl.Result{}, err 92 | } 93 | 94 | log.Info(fmt.Sprintf(`Pod: "%s" was sucessfully updated with annotations`, pod.Name)) 95 | } 96 | } 97 | return ctrl.Result{}, nil 98 | } else { 99 | // some other error happend 100 | log.Error(err, fmt.Sprintf(`Failed to retrieve custom resource "%s": %s`, req.Name, err.Error())) 101 | return ctrl.Result{}, err 102 | } 103 | } 104 | 105 | // Get pods based on PodSelector 106 | podSelectorSet := labels.SelectorFromSet(AMTD.Spec.PodSelector) 107 | podList := &corev1.PodList{} 108 | listOptions := &client.ListOptions{Namespace: AMTD.Namespace, LabelSelector: podSelectorSet} 109 | if err = r.List(context.TODO(), podList, listOptions); err != nil { 110 | log.Error(err, fmt.Sprintf(`Failed to retrieve pods: "%s"`, err.Error())) 111 | return ctrl.Result{}, err 112 | } 113 | 114 | // Annotate pods 115 | for _, pod := range podList.Items { 116 | 117 | var amtdManageInfoList []AMTDManageInfo 118 | 119 | // Check whether already AMTD "member", if so AMTD_MANAGED_TIME is not changed 120 | if _, found := pod.ObjectMeta.Annotations[AMTD_MANAGED_BY]; !found { 121 | // if no annotations set so far initialize Annotations first 122 | if pod.ObjectMeta.Annotations == nil { 123 | pod.ObjectMeta.Annotations = map[string]string{} 124 | } 125 | } else { 126 | // Already AMTD member 127 | json.Unmarshal([]byte(pod.ObjectMeta.Annotations[AMTD_MANAGED_BY]), &amtdManageInfoList) 128 | } 129 | 130 | // Check whether there is collision with other AMTD resources? 131 | for _, AMTDManageInfo := range amtdManageInfoList { 132 | // Do not want to test collision with self 133 | if !(AMTD.Namespace == AMTDManageInfo.AMTDNamespace && AMTD.Name == AMTDManageInfo.AMTDName) { 134 | AMTDOther := &amtdv1beta1.AdaptiveMovingTargetDefense{} 135 | err := r.Client.Get(context.Background(), types.NamespacedName{Namespace: AMTDManageInfo.AMTDNamespace, Name: AMTDManageInfo.AMTDName}, AMTDOther) 136 | if err != nil { 137 | if errors.IsNotFound(err) { 138 | // If the resource is not found, that is OK. It just means the desired state is to 139 | // not have any resources for this AdaptiveMovingTargetDefense but no delete is required. 140 | // TODO: remove the annotations 141 | log.Info(fmt.Sprintf(`Custom resource for AdaptiveMovingTargetDefense "%s" does not exist, remove annotations from pods`, req.Namespace+"/"+req.Name)) 142 | } else { 143 | // some other error happend 144 | log.Error(err, fmt.Sprintf(`Failed to retrieve custom resource "%s": %s`, AMTDManageInfo.AMTDName, err.Error())) 145 | return ctrl.Result{}, err 146 | } 147 | } 148 | 149 | for _, strategyOther := range AMTDOther.Spec.Strategy { 150 | for _, strategy := range AMTD.Spec.Strategy { 151 | if reflect.DeepEqual(strategyOther.Rule, strategy.Rule) { 152 | log.Error(err, fmt.Sprintf(`AMTD RuleIDs collision "%s" <> "%s"`, AMTD.Name, AMTDOther.Name)) 153 | return ctrl.Result{}, err 154 | } 155 | } 156 | } 157 | } 158 | } 159 | 160 | // Add AMTD manage info if necessary 161 | amtdManagedInfoExist := false 162 | for _, amtdManageInfo := range amtdManageInfoList { 163 | if amtdManageInfo.AMTDNamespace == AMTD.Namespace && amtdManageInfo.AMTDName == AMTD.Name { 164 | amtdManagedInfoExist = true 165 | break 166 | } 167 | } 168 | if !amtdManagedInfoExist { 169 | initalTime := strconv.FormatInt(time.Now().Unix(), 10) 170 | amtdManageInfoList = append(amtdManageInfoList, AMTDManageInfo{initalTime, AMTD.Namespace, AMTD.Name}) 171 | } 172 | amtdManagedInfoListEncoded, err := json.Marshal(amtdManageInfoList) 173 | if err != nil { 174 | log.Error(err, fmt.Sprintf(`amtdManagedInfoList json encoding does not work: %s`, err.Error())) 175 | } 176 | pod.ObjectMeta.Annotations[AMTD_MANAGED_BY] = string(amtdManagedInfoListEncoded) 177 | 178 | // Add r6security label for AMTD-managed pods (GitHub issue #15) 179 | if pod.ObjectMeta.Labels == nil { 180 | pod.ObjectMeta.Labels = map[string]string{} 181 | } 182 | pod.ObjectMeta.Labels[R6_SECURITY_MANAGED_LABEL] = "true" 183 | 184 | // Try to apply this patch, if it fails, return the failure 185 | // TODO: before update we could check whether any changes would happen 186 | err = r.Client.Update(ctx, &pod) 187 | if err != nil { 188 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 189 | // this makes the controller to log the error and in the future ignore the this AMTD resource (at least until it changes) 190 | return ctrl.Result{}, err 191 | } 192 | 193 | log.Info(fmt.Sprintf(`Pod: "%s" was sucessfully updated with annotations`, pod.Name)) 194 | } 195 | 196 | return ctrl.Result{RequeueAfter: 10 * time.Second}, nil 197 | } 198 | 199 | func removeAMTDAnnotationFromPod(pod corev1.Pod, log logr.Logger, reqNamespace string, reqName string) { 200 | // Already AMTD member 201 | var amtdManageInfoList []AMTDManageInfo 202 | json.Unmarshal([]byte(pod.ObjectMeta.Annotations[AMTD_MANAGED_BY]), &amtdManageInfoList) 203 | for index, amtdManagedInfo := range amtdManageInfoList { 204 | if amtdManagedInfo.AMTDNamespace == reqNamespace && amtdManagedInfo.AMTDName == reqName { 205 | // Remove the element at index i from a 206 | amtdManageInfoList[index] = amtdManageInfoList[len(amtdManageInfoList)-1] // Copy last element to index i. 207 | amtdManageInfoList[len(amtdManageInfoList)-1] = AMTDManageInfo{"", "", ""} // Erase last element (write zero value). 208 | amtdManageInfoList = amtdManageInfoList[:len(amtdManageInfoList)-1] // Truncate slice. 209 | break 210 | } 211 | } 212 | 213 | if len(amtdManageInfoList) == 0 { 214 | delete(pod.ObjectMeta.Annotations, AMTD_MANAGED_BY) 215 | // Remove r6security label when pod is no longer AMTD-managed (GitHub issue #15) 216 | if pod.ObjectMeta.Labels != nil { 217 | delete(pod.ObjectMeta.Labels, R6_SECURITY_MANAGED_LABEL) 218 | } 219 | } else { 220 | amtdManagedInfoListEncoded, err := json.Marshal(amtdManageInfoList) 221 | if err != nil { 222 | log.Error(err, fmt.Sprintf(`amtdManagedInfoList json encoding does not work: %s`, err.Error())) 223 | } 224 | pod.ObjectMeta.Annotations[AMTD_MANAGED_BY] = string(amtdManagedInfoListEncoded) 225 | } 226 | } 227 | 228 | // SetupWithManager sets up the controller with the Manager. 229 | func (r *AdaptiveMovingTargetDefenseReconciler) SetupWithManager(mgr ctrl.Manager) error { 230 | return ctrl.NewControllerManagedBy(mgr). 231 | For(&amtdv1beta1.AdaptiveMovingTargetDefense{}). 232 | Complete(r) 233 | } 234 | -------------------------------------------------------------------------------- /api/v1beta1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | 3 | /* 4 | * Copyright (C) 2023 R6 Security, Inc. 5 | * 6 | * This program is free software: you can redistribute it and/or modify 7 | * it under the terms of the Server Side Public License, version 1, 8 | * as published by MongoDB, Inc. 9 | * 10 | * This program is distributed in the hope that it will be useful, 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | * Server Side Public License for more details. 14 | * 15 | * You should have received a copy of the Server Side Public License 16 | * along with this program. If not, see 17 | * . 18 | */ 19 | 20 | // Code generated by controller-gen. DO NOT EDIT. 21 | 22 | package v1beta1 23 | 24 | import ( 25 | "k8s.io/api/core/v1" 26 | runtime "k8s.io/apimachinery/pkg/runtime" 27 | ) 28 | 29 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 30 | func (in *AMTDAction) DeepCopyInto(out *AMTDAction) { 31 | *out = *in 32 | if in.Disable != nil { 33 | in, out := &in.Disable, &out.Disable 34 | *out = new(DisableAction) 35 | **out = **in 36 | } 37 | if in.Delete != nil { 38 | in, out := &in.Delete, &out.Delete 39 | *out = new(DeleteAction) 40 | **out = **in 41 | } 42 | if in.Quarantine != nil { 43 | in, out := &in.Quarantine, &out.Quarantine 44 | *out = new(QuarantineAction) 45 | **out = **in 46 | } 47 | if in.Debugger != nil { 48 | in, out := &in.Debugger, &out.Debugger 49 | *out = new(Debugger) 50 | **out = **in 51 | } 52 | if in.CustomAction != nil { 53 | in, out := &in.CustomAction, &out.CustomAction 54 | *out = new(CustomAction) 55 | (*in).DeepCopyInto(*out) 56 | } 57 | } 58 | 59 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AMTDAction. 60 | func (in *AMTDAction) DeepCopy() *AMTDAction { 61 | if in == nil { 62 | return nil 63 | } 64 | out := new(AMTDAction) 65 | in.DeepCopyInto(out) 66 | return out 67 | } 68 | 69 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 70 | func (in *AdaptiveMovingTargetDefense) DeepCopyInto(out *AdaptiveMovingTargetDefense) { 71 | *out = *in 72 | out.TypeMeta = in.TypeMeta 73 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 74 | in.Spec.DeepCopyInto(&out.Spec) 75 | out.Status = in.Status 76 | } 77 | 78 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveMovingTargetDefense. 79 | func (in *AdaptiveMovingTargetDefense) DeepCopy() *AdaptiveMovingTargetDefense { 80 | if in == nil { 81 | return nil 82 | } 83 | out := new(AdaptiveMovingTargetDefense) 84 | in.DeepCopyInto(out) 85 | return out 86 | } 87 | 88 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 89 | func (in *AdaptiveMovingTargetDefense) DeepCopyObject() runtime.Object { 90 | if c := in.DeepCopy(); c != nil { 91 | return c 92 | } 93 | return nil 94 | } 95 | 96 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 97 | func (in *AdaptiveMovingTargetDefenseList) DeepCopyInto(out *AdaptiveMovingTargetDefenseList) { 98 | *out = *in 99 | out.TypeMeta = in.TypeMeta 100 | in.ListMeta.DeepCopyInto(&out.ListMeta) 101 | if in.Items != nil { 102 | in, out := &in.Items, &out.Items 103 | *out = make([]AdaptiveMovingTargetDefense, len(*in)) 104 | for i := range *in { 105 | (*in)[i].DeepCopyInto(&(*out)[i]) 106 | } 107 | } 108 | } 109 | 110 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveMovingTargetDefenseList. 111 | func (in *AdaptiveMovingTargetDefenseList) DeepCopy() *AdaptiveMovingTargetDefenseList { 112 | if in == nil { 113 | return nil 114 | } 115 | out := new(AdaptiveMovingTargetDefenseList) 116 | in.DeepCopyInto(out) 117 | return out 118 | } 119 | 120 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 121 | func (in *AdaptiveMovingTargetDefenseList) DeepCopyObject() runtime.Object { 122 | if c := in.DeepCopy(); c != nil { 123 | return c 124 | } 125 | return nil 126 | } 127 | 128 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 129 | func (in *AdaptiveMovingTargetDefenseSpec) DeepCopyInto(out *AdaptiveMovingTargetDefenseSpec) { 130 | *out = *in 131 | if in.PodSelector != nil { 132 | in, out := &in.PodSelector, &out.PodSelector 133 | *out = make(map[string]string, len(*in)) 134 | for key, val := range *in { 135 | (*out)[key] = val 136 | } 137 | } 138 | if in.Strategy != nil { 139 | in, out := &in.Strategy, &out.Strategy 140 | *out = make([]ResponseStrategy, len(*in)) 141 | for i := range *in { 142 | (*in)[i].DeepCopyInto(&(*out)[i]) 143 | } 144 | } 145 | } 146 | 147 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveMovingTargetDefenseSpec. 148 | func (in *AdaptiveMovingTargetDefenseSpec) DeepCopy() *AdaptiveMovingTargetDefenseSpec { 149 | if in == nil { 150 | return nil 151 | } 152 | out := new(AdaptiveMovingTargetDefenseSpec) 153 | in.DeepCopyInto(out) 154 | return out 155 | } 156 | 157 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 158 | func (in *AdaptiveMovingTargetDefenseStatus) DeepCopyInto(out *AdaptiveMovingTargetDefenseStatus) { 159 | *out = *in 160 | } 161 | 162 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdaptiveMovingTargetDefenseStatus. 163 | func (in *AdaptiveMovingTargetDefenseStatus) DeepCopy() *AdaptiveMovingTargetDefenseStatus { 164 | if in == nil { 165 | return nil 166 | } 167 | out := new(AdaptiveMovingTargetDefenseStatus) 168 | in.DeepCopyInto(out) 169 | return out 170 | } 171 | 172 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 173 | func (in *CustomAction) DeepCopyInto(out *CustomAction) { 174 | *out = *in 175 | if in.Command != nil { 176 | in, out := &in.Command, &out.Command 177 | *out = make([]string, len(*in)) 178 | copy(*out, *in) 179 | } 180 | if in.Args != nil { 181 | in, out := &in.Args, &out.Args 182 | *out = make([]string, len(*in)) 183 | copy(*out, *in) 184 | } 185 | if in.EnvFrom != nil { 186 | in, out := &in.EnvFrom, &out.EnvFrom 187 | *out = make([]v1.EnvFromSource, len(*in)) 188 | for i := range *in { 189 | (*in)[i].DeepCopyInto(&(*out)[i]) 190 | } 191 | } 192 | if in.Env != nil { 193 | in, out := &in.Env, &out.Env 194 | *out = make([]v1.EnvVar, len(*in)) 195 | for i := range *in { 196 | (*in)[i].DeepCopyInto(&(*out)[i]) 197 | } 198 | } 199 | in.Resources.DeepCopyInto(&out.Resources) 200 | if in.ResizePolicy != nil { 201 | in, out := &in.ResizePolicy, &out.ResizePolicy 202 | *out = make([]v1.ContainerResizePolicy, len(*in)) 203 | copy(*out, *in) 204 | } 205 | if in.RestartPolicy != nil { 206 | in, out := &in.RestartPolicy, &out.RestartPolicy 207 | *out = new(v1.ContainerRestartPolicy) 208 | **out = **in 209 | } 210 | if in.VolumeMounts != nil { 211 | in, out := &in.VolumeMounts, &out.VolumeMounts 212 | *out = make([]v1.VolumeMount, len(*in)) 213 | for i := range *in { 214 | (*in)[i].DeepCopyInto(&(*out)[i]) 215 | } 216 | } 217 | if in.VolumeDevices != nil { 218 | in, out := &in.VolumeDevices, &out.VolumeDevices 219 | *out = make([]v1.VolumeDevice, len(*in)) 220 | copy(*out, *in) 221 | } 222 | if in.LivenessProbe != nil { 223 | in, out := &in.LivenessProbe, &out.LivenessProbe 224 | *out = new(v1.Probe) 225 | (*in).DeepCopyInto(*out) 226 | } 227 | if in.ReadinessProbe != nil { 228 | in, out := &in.ReadinessProbe, &out.ReadinessProbe 229 | *out = new(v1.Probe) 230 | (*in).DeepCopyInto(*out) 231 | } 232 | if in.StartupProbe != nil { 233 | in, out := &in.StartupProbe, &out.StartupProbe 234 | *out = new(v1.Probe) 235 | (*in).DeepCopyInto(*out) 236 | } 237 | if in.Lifecycle != nil { 238 | in, out := &in.Lifecycle, &out.Lifecycle 239 | *out = new(v1.Lifecycle) 240 | (*in).DeepCopyInto(*out) 241 | } 242 | if in.SecurityContext != nil { 243 | in, out := &in.SecurityContext, &out.SecurityContext 244 | *out = new(v1.SecurityContext) 245 | (*in).DeepCopyInto(*out) 246 | } 247 | if in.Ports != nil { 248 | in, out := &in.Ports, &out.Ports 249 | *out = make([]CustomContainerPort, len(*in)) 250 | for i := range *in { 251 | (*in)[i].DeepCopyInto(&(*out)[i]) 252 | } 253 | } 254 | } 255 | 256 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomAction. 257 | func (in *CustomAction) DeepCopy() *CustomAction { 258 | if in == nil { 259 | return nil 260 | } 261 | out := new(CustomAction) 262 | in.DeepCopyInto(out) 263 | return out 264 | } 265 | 266 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 267 | func (in *CustomContainerPort) DeepCopyInto(out *CustomContainerPort) { 268 | *out = *in 269 | if in.HostPort != nil { 270 | in, out := &in.HostPort, &out.HostPort 271 | *out = new(int32) 272 | **out = **in 273 | } 274 | } 275 | 276 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomContainerPort. 277 | func (in *CustomContainerPort) DeepCopy() *CustomContainerPort { 278 | if in == nil { 279 | return nil 280 | } 281 | out := new(CustomContainerPort) 282 | in.DeepCopyInto(out) 283 | return out 284 | } 285 | 286 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 287 | func (in *Debugger) DeepCopyInto(out *Debugger) { 288 | *out = *in 289 | } 290 | 291 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Debugger. 292 | func (in *Debugger) DeepCopy() *Debugger { 293 | if in == nil { 294 | return nil 295 | } 296 | out := new(Debugger) 297 | in.DeepCopyInto(out) 298 | return out 299 | } 300 | 301 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 302 | func (in *DeleteAction) DeepCopyInto(out *DeleteAction) { 303 | *out = *in 304 | } 305 | 306 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteAction. 307 | func (in *DeleteAction) DeepCopy() *DeleteAction { 308 | if in == nil { 309 | return nil 310 | } 311 | out := new(DeleteAction) 312 | in.DeepCopyInto(out) 313 | return out 314 | } 315 | 316 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 317 | func (in *DisableAction) DeepCopyInto(out *DisableAction) { 318 | *out = *in 319 | } 320 | 321 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DisableAction. 322 | func (in *DisableAction) DeepCopy() *DisableAction { 323 | if in == nil { 324 | return nil 325 | } 326 | out := new(DisableAction) 327 | in.DeepCopyInto(out) 328 | return out 329 | } 330 | 331 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 332 | func (in *QuarantineAction) DeepCopyInto(out *QuarantineAction) { 333 | *out = *in 334 | } 335 | 336 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuarantineAction. 337 | func (in *QuarantineAction) DeepCopy() *QuarantineAction { 338 | if in == nil { 339 | return nil 340 | } 341 | out := new(QuarantineAction) 342 | in.DeepCopyInto(out) 343 | return out 344 | } 345 | 346 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 347 | func (in *ResponseStrategy) DeepCopyInto(out *ResponseStrategy) { 348 | *out = *in 349 | out.Rule = in.Rule 350 | in.Action.DeepCopyInto(&out.Action) 351 | } 352 | 353 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponseStrategy. 354 | func (in *ResponseStrategy) DeepCopy() *ResponseStrategy { 355 | if in == nil { 356 | return nil 357 | } 358 | out := new(ResponseStrategy) 359 | in.DeepCopyInto(out) 360 | return out 361 | } 362 | 363 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 364 | func (in *Rule) DeepCopyInto(out *Rule) { 365 | *out = *in 366 | } 367 | 368 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. 369 | func (in *Rule) DeepCopy() *Rule { 370 | if in == nil { 371 | return nil 372 | } 373 | out := new(Rule) 374 | in.DeepCopyInto(out) 375 | return out 376 | } 377 | 378 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 379 | func (in *SecurityEvent) DeepCopyInto(out *SecurityEvent) { 380 | *out = *in 381 | out.TypeMeta = in.TypeMeta 382 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 383 | in.Spec.DeepCopyInto(&out.Spec) 384 | out.Status = in.Status 385 | } 386 | 387 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityEvent. 388 | func (in *SecurityEvent) DeepCopy() *SecurityEvent { 389 | if in == nil { 390 | return nil 391 | } 392 | out := new(SecurityEvent) 393 | in.DeepCopyInto(out) 394 | return out 395 | } 396 | 397 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 398 | func (in *SecurityEvent) DeepCopyObject() runtime.Object { 399 | if c := in.DeepCopy(); c != nil { 400 | return c 401 | } 402 | return nil 403 | } 404 | 405 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 406 | func (in *SecurityEventList) DeepCopyInto(out *SecurityEventList) { 407 | *out = *in 408 | out.TypeMeta = in.TypeMeta 409 | in.ListMeta.DeepCopyInto(&out.ListMeta) 410 | if in.Items != nil { 411 | in, out := &in.Items, &out.Items 412 | *out = make([]SecurityEvent, len(*in)) 413 | for i := range *in { 414 | (*in)[i].DeepCopyInto(&(*out)[i]) 415 | } 416 | } 417 | } 418 | 419 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityEventList. 420 | func (in *SecurityEventList) DeepCopy() *SecurityEventList { 421 | if in == nil { 422 | return nil 423 | } 424 | out := new(SecurityEventList) 425 | in.DeepCopyInto(out) 426 | return out 427 | } 428 | 429 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 430 | func (in *SecurityEventList) DeepCopyObject() runtime.Object { 431 | if c := in.DeepCopy(); c != nil { 432 | return c 433 | } 434 | return nil 435 | } 436 | 437 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 438 | func (in *SecurityEventSpec) DeepCopyInto(out *SecurityEventSpec) { 439 | *out = *in 440 | if in.Targets != nil { 441 | in, out := &in.Targets, &out.Targets 442 | *out = make([]string, len(*in)) 443 | copy(*out, *in) 444 | } 445 | out.Rule = in.Rule 446 | } 447 | 448 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityEventSpec. 449 | func (in *SecurityEventSpec) DeepCopy() *SecurityEventSpec { 450 | if in == nil { 451 | return nil 452 | } 453 | out := new(SecurityEventSpec) 454 | in.DeepCopyInto(out) 455 | return out 456 | } 457 | 458 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 459 | func (in *SecurityEventStatus) DeepCopyInto(out *SecurityEventStatus) { 460 | *out = *in 461 | } 462 | 463 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityEventStatus. 464 | func (in *SecurityEventStatus) DeepCopy() *SecurityEventStatus { 465 | if in == nil { 466 | return nil 467 | } 468 | out := new(SecurityEventStatus) 469 | in.DeepCopyInto(out) 470 | return out 471 | } 472 | -------------------------------------------------------------------------------- /api/v1beta1/adaptivemovingtargetdefense_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package v1beta1 19 | 20 | import ( 21 | corev1 "k8s.io/api/core/v1" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 26 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 27 | 28 | // AdaptiveMovingTargetDefenseSpec defines the desired state of AdaptiveMovingTargetDefense 29 | type AdaptiveMovingTargetDefenseSpec struct { 30 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 31 | // Important: Run "make" to regenerate code after modifying this file 32 | 33 | // +kubebuilder:validation:Required 34 | // PodSelector is the selector of the Kubernetes Pods on which the user desires to enable moving target defense 35 | PodSelector map[string]string `json:"podSelector"` 36 | 37 | // +kubebuilder:validation:Required 38 | // +kubebuilder:validation:MinItems=1 39 | // Define strategy that maps actions to security events (based on the security event fields) 40 | Strategy []ResponseStrategy `json:"strategy"` 41 | } 42 | 43 | type DisableAction struct{} 44 | type DeleteAction struct{} 45 | type QuarantineAction struct{} 46 | type Debugger struct { 47 | 48 | // +kubebuilder:validation:Optional 49 | Name string `json:"name,omitempty"` 50 | 51 | // +kubebuilder:validation:Required 52 | Image string `json:"image,omitempty"` 53 | 54 | // +kubebuilder:validation:Optional 55 | Terminal bool `json:"terminal,omitempty"` 56 | } 57 | 58 | // CustomContainerPort represents a network port in a single EphemeralContainer. 59 | type CustomContainerPort struct { 60 | // Number of port to expose on the pod's IP address. 61 | // This must be a valid port number, 0 < x < 65536. 62 | ContainerPort int32 `json:"containerPort"` 63 | 64 | // What host IP to bind the external port to. 65 | HostIP string `json:"hostIP,omitempty"` 66 | 67 | // Number of port to expose on the host. 68 | // If specified, this must be a valid port number, 0 < x < 65536. 69 | HostPort *int32 `json:"hostPort,omitempty"` 70 | 71 | // If specified, this must be an IANA_SVC_NAME and unique within the pod. 72 | // Name for the port that can be referred to by services. 73 | Name string `json:"name,omitempty"` 74 | 75 | // Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". 76 | // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" 77 | // +kubebuilder:default:="TCP" 78 | Protocol string `json:"protocol,omitempty"` 79 | } 80 | 81 | // CustomAction defines the custom action with an overridden Ports field. 82 | // controller-gen v0.19 generates for EphemeralContainers a protocol with an allOf field with two defaults which throws an error under k8@1.30.4 83 | type CustomAction struct { 84 | // EphemeralContainer Fields except Ports for correct json representation. 85 | // Ports are forrbiden in EphemeralContainers. In this scenario Ports are a CustomContainerPort type. 86 | 87 | // Name of the ephemeral container specified as a DNS_LABEL. 88 | // This name must be unique among all containers, init containers and ephemeral containers. 89 | Name string `json:"name" protobuf:"bytes,1,opt,name=name"` 90 | // Container image name. 91 | // More info: https://kubernetes.io/docs/concepts/containers/images 92 | Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` 93 | // Entrypoint array. Not executed within a shell. 94 | // The image's ENTRYPOINT is used if this is not provided. 95 | // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable 96 | // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced 97 | // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will 98 | // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless 99 | // of whether the variable exists or not. Cannot be updated. 100 | // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell 101 | // +optional 102 | // +listType=atomic 103 | Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` 104 | // Arguments to the entrypoint. 105 | // The image's CMD is used if this is not provided. 106 | // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable 107 | // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced 108 | // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will 109 | // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless 110 | // of whether the variable exists or not. Cannot be updated. 111 | // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell 112 | // +optional 113 | // +listType=atomic 114 | Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` 115 | // Container's working directory. 116 | // If not specified, the container runtime's default will be used, which 117 | // might be configured in the container image. 118 | // Cannot be updated. 119 | // +optional 120 | WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` 121 | // List of sources to populate environment variables in the container. 122 | // The keys defined within a source must be a C_IDENTIFIER. All invalid keys 123 | // will be reported as an event when the container is starting. When a key exists in multiple 124 | // sources, the value associated with the last source will take precedence. 125 | // Values defined by an Env with a duplicate key will take precedence. 126 | // Cannot be updated. 127 | // +optional 128 | // +listType=atomic 129 | EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` 130 | // List of environment variables to set in the container. 131 | // Cannot be updated. 132 | // +optional 133 | // +patchMergeKey=name 134 | // +patchStrategy=merge 135 | // +listType=map 136 | // +listMapKey=name 137 | Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` 138 | // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources 139 | // already allocated to the pod. 140 | // +optional 141 | Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` 142 | // Resources resize policy for the container. 143 | // +featureGate=InPlacePodVerticalScaling 144 | // +optional 145 | // +listType=atomic 146 | ResizePolicy []corev1.ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"` 147 | // Restart policy for the container to manage the restart behavior of each 148 | // container within a pod. 149 | // This may only be set for init containers. You cannot set this field on 150 | // ephemeral containers. 151 | // +featureGate=SidecarContainers 152 | // +optional 153 | RestartPolicy *corev1.ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"` 154 | // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. 155 | // Cannot be updated. 156 | // +optional 157 | // +patchMergeKey=mountPath 158 | // +patchStrategy=merge 159 | // +listType=map 160 | // +listMapKey=mountPath 161 | VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` 162 | // volumeDevices is the list of block devices to be used by the container. 163 | // +patchMergeKey=devicePath 164 | // +patchStrategy=merge 165 | // +listType=map 166 | // +listMapKey=devicePath 167 | // +optional 168 | VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` 169 | // Probes are not allowed for ephemeral containers. 170 | // +optional 171 | LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` 172 | // Probes are not allowed for ephemeral containers. 173 | // +optional 174 | ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` 175 | // Probes are not allowed for ephemeral containers. 176 | // +optional 177 | StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` 178 | // Lifecycle is not allowed for ephemeral containers. 179 | // +optional 180 | Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` 181 | // Optional: Path at which the file to which the container's termination message 182 | // will be written is mounted into the container's filesystem. 183 | // Message written is intended to be brief final status, such as an assertion failure message. 184 | // Will be truncated by the node if greater than 4096 bytes. The total message length across 185 | // all containers will be limited to 12kb. 186 | // Defaults to /dev/termination-log. 187 | // Cannot be updated. 188 | // +optional 189 | TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` 190 | // Indicate how the termination message should be populated. File will use the contents of 191 | // terminationMessagePath to populate the container status message on both success and failure. 192 | // FallbackToLogsOnError will use the last chunk of container log output if the termination 193 | // message file is empty and the container exited with an error. 194 | // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. 195 | // Defaults to File. 196 | // Cannot be updated. 197 | // +optional 198 | TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"-"` /* 129-byte string literal not displayed */ 199 | // Image pull policy. 200 | // One of Always, Never, IfNotPresent. 201 | // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. 202 | // Cannot be updated. 203 | // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images 204 | // +optional 205 | ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` 206 | // Optional: SecurityContext defines the security options the ephemeral container should be run with. 207 | // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. 208 | // +optional 209 | SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` 210 | 211 | // Ports are not allowed for ephemeral containers. 212 | // +optional 213 | // +patchMergeKey=containerPort 214 | // +patchStrategy=merge 215 | // +listType=map 216 | // +listMapKey=containerPort 217 | // +listMapKey=protocol 218 | Ports []CustomContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` 219 | 220 | // Whether this container should allocate a buffer for stdin in the container runtime. If this 221 | // is not set, reads from stdin in the container will always result in EOF. 222 | // Default is false. 223 | // +optional 224 | Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` 225 | // Whether the container runtime should close the stdin channel after it has been opened by 226 | // a single attach. When stdin is true the stdin stream will remain open across multiple attach 227 | // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the 228 | // first client attaches to stdin, and then remains open and accepts data until the client disconnects, 229 | // at which time stdin is closed and remains closed until the container is restarted. If this 230 | // flag is false, a container processes that reads from stdin will never receive an EOF. 231 | // Default is false 232 | // +optional 233 | StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` 234 | // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. 235 | // Default is false. 236 | // +optional 237 | TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` 238 | // If set, the name of the container from PodSpec that this ephemeral container targets. 239 | // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. 240 | // If not set then the ephemeral container uses the namespaces configured in the Pod spec. 241 | // 242 | // The container runtime must implement support for this feature. If the runtime does not 243 | // support namespace targeting then the result of setting this field is undefined. 244 | // +optional 245 | TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` 246 | } 247 | 248 | // +kubebuilder:validation:MaxProperties=1 249 | type AMTDAction struct { 250 | Disable *DisableAction `json:"disable,omitempty"` 251 | Delete *DeleteAction `json:"delete,omitempty"` 252 | Quarantine *QuarantineAction `json:"quarantine,omitempty"` 253 | Debugger *Debugger `json:"debugger,omitempty"` 254 | CustomAction *CustomAction `json:"customAction,omitempty"` 255 | } 256 | 257 | // MovingStrategy Substructure for strategy definitions 258 | type ResponseStrategy struct { 259 | //TODO: use enum for the specific values of these fields 260 | 261 | // +kubebuilder:validation:Required 262 | Rule Rule `json:"rule"` 263 | 264 | // +kubebuilder:validation:Required 265 | // Action field value of the SecurityEvent that arrives 266 | Action AMTDAction `json:"action"` 267 | } 268 | 269 | type Rule struct { 270 | // +kubebuilder:validation:Optional 271 | // Type field value of the SecurityEvent that arrives 272 | Type string `json:"type"` 273 | 274 | // +kubebuilder:validation:Optional 275 | // ThreatLevel field value of the SecurityEvent that arrives 276 | ThreatLevel string `json:"threatLevel"` 277 | 278 | // +kubebuilder:validation:Optional 279 | // Source field value of the SecurityEvent that arrives 280 | Source string `json:"source"` 281 | } 282 | 283 | // AdaptiveMovingTargetDefenseStatus defines the observed state of AdaptiveMovingTargetDefense 284 | type AdaptiveMovingTargetDefenseStatus struct { 285 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 286 | // Important: Run "make" to regenerate code after modifying this file 287 | } 288 | 289 | // +kubebuilder:object:root=true 290 | // +kubebuilder:subresource:status 291 | // AdaptiveMovingTargetDefense is the Schema for the adaptivemovingtargetdefenses API 292 | type AdaptiveMovingTargetDefense struct { 293 | metav1.TypeMeta `json:",inline"` 294 | metav1.ObjectMeta `json:"metadata,omitempty"` 295 | 296 | Spec AdaptiveMovingTargetDefenseSpec `json:"spec,omitempty"` 297 | Status AdaptiveMovingTargetDefenseStatus `json:"status,omitempty"` 298 | } 299 | 300 | //+kubebuilder:object:root=true 301 | 302 | // AdaptiveMovingTargetDefenseList contains a list of AdaptiveMovingTargetDefense 303 | type AdaptiveMovingTargetDefenseList struct { 304 | metav1.TypeMeta `json:",inline"` 305 | metav1.ListMeta `json:"metadata,omitempty"` 306 | Items []AdaptiveMovingTargetDefense `json:"items"` 307 | } 308 | 309 | func init() { 310 | SchemeBuilder.Register(&AdaptiveMovingTargetDefense{}, &AdaptiveMovingTargetDefenseList{}) 311 | } 312 | -------------------------------------------------------------------------------- /internal/controller/securityevent_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023 R6 Security, Inc. 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the Server Side Public License, version 1, 6 | * as published by MongoDB, Inc. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 | * Server Side Public License for more details. 12 | * 13 | * You should have received a copy of the Server Side Public License 14 | * along with this program. If not, see 15 | * . 16 | */ 17 | 18 | package controller 19 | 20 | import ( 21 | "context" 22 | "encoding/json" 23 | "fmt" 24 | "reflect" 25 | "strings" 26 | "time" 27 | 28 | corev1 "k8s.io/api/core/v1" 29 | v1 "k8s.io/api/networking/v1" 30 | "k8s.io/apimachinery/pkg/api/errors" 31 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 32 | "k8s.io/apimachinery/pkg/runtime" 33 | "k8s.io/apimachinery/pkg/types" 34 | "k8s.io/utils/strings/slices" 35 | ctrl "sigs.k8s.io/controller-runtime" 36 | "sigs.k8s.io/controller-runtime/pkg/client" 37 | "sigs.k8s.io/controller-runtime/pkg/log" 38 | 39 | amtdv1beta1 "github.com/r6security/phoenix/api/v1beta1" 40 | ) 41 | 42 | // SecurityEventReconciler reconciles a SecurityEvent object 43 | type SecurityEventReconciler struct { 44 | client.Client 45 | Scheme *runtime.Scheme 46 | } 47 | 48 | //+kubebuilder:rbac:groups=amtd.r6security.com,resources=securityevents,verbs=get;list;watch;create;update;patch;delete 49 | //+kubebuilder:rbac:groups=amtd.r6security.com,resources=securityevents/status,verbs=get;update;patch 50 | //+kubebuilder:rbac:groups=amtd.r6security.com,resources=securityevents/finalizers,verbs=update 51 | //+kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create 52 | 53 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 54 | // move the current state of the cluster closer to the desired state. 55 | // TODO(user): Modify the Reconcile function to compare the state specified by 56 | // the SecurityEvent object against the actual cluster state, and then 57 | // perform operations to make the cluster state reflect the state specified by 58 | // the user. 59 | // 60 | // For more details, check Reconcile and its Result here: 61 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile 62 | func (r *SecurityEventReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 63 | log := log.FromContext(ctx) 64 | 65 | securityEvent := &amtdv1beta1.SecurityEvent{} 66 | 67 | // Retrieve SecurityEvent that triggered reconciliation from the cluster 68 | err := r.Client.Get(context.Background(), req.NamespacedName, securityEvent) 69 | if err != nil { 70 | if errors.IsNotFound(err) { 71 | log.Info(fmt.Sprintf(`SecurityEvent "%s" removed, no action is needed`, req.Name)) 72 | return ctrl.Result{}, nil 73 | } else { 74 | log.Error(err, fmt.Sprintf(`Failed to retrieve Security Event "%s": %s`, req.Name, err.Error())) 75 | return ctrl.Result{}, err 76 | } 77 | } 78 | 79 | log.Info(fmt.Sprintf(`SecurityEvent found: "%s, targets: %s"`, securityEvent.Name, securityEvent.Spec.Targets)) 80 | 81 | // --------------------------------------------------- 82 | // Process pods in the target list of the SecurityEvent 83 | // --------------------------------------------------- 84 | var AMTD *amtdv1beta1.AdaptiveMovingTargetDefense 85 | for _, target := range securityEvent.Spec.Targets { 86 | namespace := strings.Split(target, "/")[0] 87 | name := strings.Split(target, "/")[1] 88 | 89 | // --------------------------------------------------- 90 | // Check that the resource exists and whether to deal with it 91 | // --------------------------------------------------- 92 | pod := &corev1.Pod{} 93 | namespacedName := types.NamespacedName{Namespace: namespace, Name: name} 94 | err = r.Client.Get(context.Background(), namespacedName, pod) 95 | 96 | if err != nil { 97 | if errors.IsNotFound(err) { 98 | log.Info(fmt.Sprintf(`Pod "%s/%s" does not exist`, namespace, name)) 99 | return ctrl.Result{}, nil 100 | } else { 101 | // some other error happend 102 | log.Error(err, fmt.Sprintf(`Failed to retrieve pod "%s"`, pod.Name)) 103 | return ctrl.Result{}, err 104 | } 105 | } 106 | 107 | // Does the pod have annotation AMTD_MANAGED_BY? 108 | if pod.ObjectMeta.Annotations == nil { 109 | log.Error(err, fmt.Sprintf(`Pod "%s" is a SecurityEvent target but not AMTD managed`, pod.Name)) 110 | return ctrl.Result{}, err 111 | } else if _, found := pod.ObjectMeta.Annotations[AMTD_MANAGED_BY]; !found { 112 | log.Error(err, fmt.Sprintf(`Pod "%s" is a SecurityEvent target but not AMTD managed`, pod.Name)) 113 | return ctrl.Result{}, err 114 | } 115 | 116 | // --------------------------------------------------- 117 | // Look for the proper action for the SecurityEvent in AMTDs that manage the pod 118 | // --------------------------------------------------- 119 | var action amtdv1beta1.AMTDAction 120 | var AMTDManageInfoList []AMTDManageInfo 121 | json.Unmarshal([]byte(pod.ObjectMeta.Annotations[AMTD_MANAGED_BY]), &AMTDManageInfoList) 122 | for _, AMTDManageInfo := range AMTDManageInfoList { 123 | 124 | // Get AMTD resource that is in AMTDManageInfo so it belongs to the pod 125 | AMTD = &amtdv1beta1.AdaptiveMovingTargetDefense{} 126 | err := r.Client.Get(context.Background(), types.NamespacedName{Namespace: AMTDManageInfo.AMTDNamespace, Name: AMTDManageInfo.AMTDName}, AMTD) 127 | if err != nil { 128 | if errors.IsNotFound(err) { 129 | log.Info(fmt.Sprintf(`AdaptiveMovingTargetDefense "%s" does not exist but found in pod annotation`, AMTDManageInfo.AMTDNamespace+"/"+AMTDManageInfo.AMTDName)) 130 | break 131 | } else { 132 | log.Error(err, fmt.Sprintf(`Failed to retrieve AdaptiveMovingTargetDefense "%s": %s`, AMTDManageInfo.AMTDName, err.Error())) 133 | return ctrl.Result{}, err 134 | } 135 | } 136 | 137 | // Check whether there is a specific action to the security event 138 | for _, strategy := range AMTD.Spec.Strategy { 139 | if reflect.DeepEqual(strategy.Rule, securityEvent.Spec.Rule) { 140 | // we found the matching strategy no need to look further 141 | action = strategy.Action 142 | break 143 | } 144 | } 145 | // we found the matching strategy no need to look further 146 | if action != (amtdv1beta1.AMTDAction{}) { 147 | break 148 | } 149 | } 150 | 151 | // --------------------------------------------------- 152 | // Add SecurityEvent spec to the annotation of the pod 153 | // --------------------------------------------------- 154 | var appliedSecurityEvents []amtdv1beta1.SecurityEvent 155 | if _, found := pod.ObjectMeta.Annotations[AMTD_APPLIED_SECURITY_EVENTS]; !found { 156 | appliedSecurityEvents = append(appliedSecurityEvents, *securityEvent) 157 | } else { 158 | // Already AMTD member 159 | json.Unmarshal([]byte(pod.ObjectMeta.Annotations[AMTD_APPLIED_SECURITY_EVENTS]), &appliedSecurityEvents) 160 | 161 | securityEventExist := false 162 | for _, appliedSecurityEvent := range appliedSecurityEvents { 163 | if appliedSecurityEvent.Name == securityEvent.Name { 164 | securityEventExist = true 165 | break 166 | } 167 | } 168 | 169 | if !securityEventExist { 170 | appliedSecurityEvents = append(appliedSecurityEvents, *securityEvent) 171 | } else { 172 | log.Info(fmt.Sprintf(`This SecurityEvent ("%s") was already processed - ignore it`, securityEvent.Name)) 173 | //return ctrl.Result{}, nil 174 | } 175 | } 176 | 177 | if appliedSecurityEvents != nil { 178 | appliedSecurityEventsEncoded, err := json.Marshal(appliedSecurityEvents) 179 | if err != nil { 180 | log.Error(err, fmt.Sprintf(`appliedSecurityEvents json encoding does not work: %s`, err.Error())) 181 | } 182 | pod.ObjectMeta.Annotations[AMTD_APPLIED_SECURITY_EVENTS] = string(appliedSecurityEventsEncoded) 183 | 184 | err = r.Client.Update(ctx, pod) 185 | if err != nil { 186 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 187 | return ctrl.Result{}, err 188 | } 189 | 190 | log.Info(fmt.Sprintf(`SecurityEvent was sucessfully applied to the pod`)) 191 | } 192 | 193 | // --------------------------------------------------- 194 | // Execute the proper action 195 | // TODO: replace with Action interface and function call 196 | // --------------------------------------------------- 197 | if action.Delete != nil { 198 | podErr := r.Client.Delete(ctx, pod) 199 | 200 | if err != nil && !errors.IsNotFound(err) { 201 | log.Error(podErr, fmt.Sprintf(`Failed to delete pod "%s"`, req.Name)) 202 | } 203 | log.Info(fmt.Sprintf(`Pod: "%s" was sucessfully deleted with ACTION: delete`, pod.Name)) 204 | } else if action.Debugger != nil { 205 | if len(pod.Spec.Containers) == 0 { 206 | log.Info("There is no container in the pod which can be used to attach the ephemeral container to") 207 | break 208 | } 209 | 210 | if action.Debugger.Name == "" { 211 | action.Debugger.Name = "amtd-debug-container" 212 | } 213 | 214 | ephemeralContainers := make([]string, len(pod.Spec.EphemeralContainers)) 215 | for _, c := range pod.Spec.EphemeralContainers { 216 | ephemeralContainers = append(ephemeralContainers, c.Name) 217 | } 218 | 219 | if slices.Contains(ephemeralContainers, action.Debugger.Name) { 220 | log.Info("Cannot attach a debug container because it is already exists") 221 | break 222 | } 223 | 224 | ec := corev1.EphemeralContainer{ 225 | EphemeralContainerCommon: corev1.EphemeralContainerCommon{ 226 | Name: action.Debugger.Name, 227 | Image: action.Debugger.Image, 228 | Stdin: action.Debugger.Terminal, 229 | TTY: action.Debugger.Terminal, 230 | }, 231 | TargetContainerName: pod.Spec.Containers[0].Name, 232 | } 233 | pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, ec) 234 | 235 | err = r.Client.SubResource("ephemeralcontainers").Update(ctx, pod) 236 | 237 | if err != nil { 238 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 239 | break 240 | } 241 | log.Info("Successfully attached debug container", "containerName", action.Debugger.Name) 242 | } else if action.CustomAction != nil { 243 | 244 | if len(pod.Spec.Containers) == 0 { 245 | log.Info("There is no container in the pod which can be used to attach the ephemeral container to") 246 | break 247 | } 248 | 249 | if action.CustomAction.Name == "" { 250 | action.CustomAction.Name = "amtd-debug-container" 251 | } 252 | 253 | if action.CustomAction.TargetContainerName == "" { 254 | action.CustomAction.TargetContainerName = pod.Spec.Containers[0].Name 255 | } 256 | 257 | ephemeralContainers := make([]string, len(pod.Spec.EphemeralContainers)) 258 | for _, c := range pod.Spec.EphemeralContainers { 259 | ephemeralContainers = append(ephemeralContainers, c.Name) 260 | } 261 | 262 | if slices.Contains(ephemeralContainers, action.CustomAction.Name) { 263 | log.Info("Cannot attach a custom action container because it is already exists") 264 | break 265 | } 266 | 267 | ec := corev1.EphemeralContainer{ 268 | EphemeralContainerCommon: corev1.EphemeralContainerCommon{ 269 | Name: action.CustomAction.Name, 270 | Image: action.CustomAction.Image, 271 | Stdin: action.CustomAction.Stdin, 272 | TTY: action.CustomAction.TTY, 273 | }, 274 | TargetContainerName: pod.Spec.Containers[0].Name, 275 | } 276 | pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, ec) 277 | 278 | err = r.Client.SubResource("ephemeralcontainers").Update(ctx, pod) 279 | 280 | if err != nil { 281 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 282 | break 283 | } 284 | log.Info("Successfully attached custom action container", "containerName", action.CustomAction.Name) 285 | } else if action.Quarantine != nil { 286 | networkPolicyName := fmt.Sprintf("%s-%s-%s", pod.Namespace, pod.Name, "policy") 287 | 288 | networkPolicy := &v1.NetworkPolicy{} 289 | err = r.Client.Get(context.Background(), types.NamespacedName{ 290 | Namespace: pod.Namespace, 291 | Name: networkPolicyName, 292 | }, networkPolicy) 293 | 294 | if err != nil && errors.IsNotFound(err) { 295 | networkPolicy := &v1.NetworkPolicy{ 296 | ObjectMeta: metav1.ObjectMeta{ 297 | Name: networkPolicyName, 298 | Namespace: pod.Namespace, 299 | }, 300 | Spec: v1.NetworkPolicySpec{ 301 | PodSelector: metav1.LabelSelector{ 302 | MatchLabels: map[string]string{AMTD_NETWORK_POLICY: networkPolicyName}, 303 | }, 304 | Ingress: []v1.NetworkPolicyIngressRule{}, 305 | Egress: []v1.NetworkPolicyEgressRule{}, 306 | PolicyTypes: []v1.PolicyType{ 307 | v1.PolicyTypeIngress, 308 | v1.PolicyTypeEgress, 309 | }, 310 | }, 311 | } 312 | 313 | // Set AMTD instance as the owner and controller for the NetworkPolicy 314 | err := ctrl.SetControllerReference(AMTD, networkPolicy, r.Scheme) 315 | if err != nil { 316 | log.Error(err, "Failed to set AMTD as owner and controller reference on NetworkPolicy", 317 | "AMTD", AMTD.ObjectMeta.Name, 318 | "NetworkPolicy", networkPolicy.Name, 319 | "Namespace", networkPolicy.Namespace, 320 | ) 321 | } 322 | 323 | err = r.Create(ctx, networkPolicy) 324 | if err != nil { 325 | log.Error(err, "Failed to create Networkpolicy in the cluster", 326 | "NetworkPolicy", networkPolicy.Name, 327 | "Namespace", networkPolicy.Namespace) 328 | } 329 | 330 | // Relabel pod: 331 | // i) move labels under annotations to preserve them - except those that belong to AMTD management, 332 | for key, value := range pod.ObjectMeta.Labels { 333 | if !isSetContain(AMTD.Spec.PodSelector, map[string]string{key: value}) { 334 | pod.ObjectMeta.Annotations[key] = value 335 | delete(pod.ObjectMeta.Labels, key) 336 | } 337 | } 338 | // ii) add new label that match networkPolicy podSelector 339 | pod.ObjectMeta.Labels[AMTD_NETWORK_POLICY] = networkPolicyName 340 | 341 | err = r.Client.Update(ctx, pod) 342 | if err != nil { 343 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 344 | return ctrl.Result{}, err 345 | } 346 | 347 | log.Info(fmt.Sprintf(`Pod %s was put in quarantine`, pod.Name)) 348 | } 349 | 350 | // Set AMTD instance as the owner and controller for the Pod - this 351 | // step cannot combined into a single update with relabel, because 352 | // until relabel another owner exists that cannot be updated 353 | // Actually since it's not immediate that OwnerReference is deleted 354 | // by ReplicaSet or sg. we need to reschedule and check it later 355 | err = ctrl.SetControllerReference(AMTD, pod, r.Scheme) 356 | if err != nil { 357 | log.Error(err, "Failed to set AMTD as owner and controller reference on Pod - Rescheduling and trying later", 358 | "AMTD", AMTD.ObjectMeta.Name, 359 | "Pod", pod.Name, 360 | "Namespace", networkPolicy.Namespace, 361 | ) 362 | return ctrl.Result{RequeueAfter: 2 * time.Second}, nil 363 | } 364 | 365 | err = r.Client.Update(ctx, pod) 366 | if err != nil { 367 | log.Error(err, fmt.Sprintf(`Failed to update pod: "%s": %s`, pod.Name, err.Error())) 368 | return ctrl.Result{}, err 369 | } 370 | } else { 371 | log.Info(fmt.Sprintf(`ACTION: %v -> POD: %s - NOT IMPLEMENTED YET`, action, pod.Name)) 372 | } 373 | } 374 | 375 | return ctrl.Result{}, nil 376 | } 377 | 378 | // SetupWithManager sets up the controller with the Manager. 379 | func (r *SecurityEventReconciler) SetupWithManager(mgr ctrl.Manager) error { 380 | return ctrl.NewControllerManagedBy(mgr). 381 | For(&amtdv1beta1.SecurityEvent{}). 382 | Complete(r) 383 | } 384 | 385 | func isSetContain(set map[string]string, subset map[string]string) bool { 386 | if len(subset) == 0 { 387 | return false 388 | } 389 | for key, value := range subset { 390 | if value2, ok := set[key]; ok { 391 | if value != value2 { 392 | return false 393 | } 394 | } else { 395 | return false 396 | } 397 | } 398 | return true 399 | } 400 | --------------------------------------------------------------------------------