├── .github ├── dependabot.yml └── workflows │ └── validate.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── OWNERS ├── README.md ├── SECURITY.md ├── blogs └── README.md ├── build └── validate-policies.sh ├── community ├── AC-Access-Control │ ├── policy-configure-appworkloads-rbac-sample.yaml │ ├── policy-configure-clusterlevel-rbac.yaml │ ├── policy-gatekeeper-disallow-anonymous.yaml │ ├── policy-gatekeeper-limitclusteradmin.yaml │ ├── policy-rbac-adminiterpolicies-sample.yaml │ ├── policy-roles-no-wildcards.yaml │ └── policy-standalone-hubtemplate-config.yaml ├── AU-Audit-and-Accountability │ └── policy-openshift-audit-logs-sample.yaml ├── CA-Security-Assessment-and-Authorization │ ├── policy-check-fips.yaml │ └── policy-compliance-operator-install-upstream.yaml ├── CM-Configuration-Management │ ├── acm-app-pv-backup │ │ ├── README.md │ │ ├── input │ │ │ ├── pv-snap │ │ │ │ └── hdr-app-configmap.txt │ │ │ └── restic │ │ │ │ └── hdr-app-configmap.txt │ │ └── resources │ │ │ ├── policies │ │ │ ├── oadp-hdr-app-backup.yaml │ │ │ ├── oadp-hdr-app-install.yaml │ │ │ └── oadp-hdr-app-restore.yaml │ │ │ └── policy-sets │ │ │ ├── acm-app-backup-policy-set.yaml │ │ │ └── acm-app-restore-policy-set.yaml │ ├── acm-hub-pvc-backup │ │ ├── README.md │ │ ├── acm-hub-pvc-backup-config.yaml │ │ ├── acm-hub-pvc-backup-destination.yaml │ │ ├── acm-hub-pvc-backup-policyset.yaml │ │ ├── acm-hub-pvc-backup-source.yaml │ │ ├── acm-hub-pvc-placement.yaml │ │ └── images │ │ │ ├── backup_dest_policy.png │ │ │ ├── backup_dest_policy_1.png │ │ │ ├── backup_source_policy.png │ │ │ ├── backup_source_policy_1.png │ │ │ ├── config_policy.png │ │ │ ├── policies.png │ │ │ ├── policyset.png │ │ │ ├── restore_dest_policy.png │ │ │ ├── restore_dest_policy_1.png │ │ │ ├── restore_dest_pvc.png │ │ │ ├── restore_dest_rd.png │ │ │ └── restore_source_policy.png │ ├── policy-acs-operator-central.yaml │ ├── policy-acs-operator-secured-clusters.yaml │ ├── policy-ansible-awx-operator.yaml │ ├── policy-argocd-kubernetes.yaml │ ├── policy-automation-operator.yaml │ ├── policy-autoscaler-templatized.yaml │ ├── policy-autoscaler.yaml │ ├── policy-aws-machine-sets.yaml │ ├── policy-cert-manager-operator.yaml │ ├── policy-check-policyreports.yaml │ ├── policy-check-reports.yaml │ ├── policy-cluster-dns-sample.yaml │ ├── policy-cluster-logforwarder-templatized.yaml │ ├── policy-cluster-network-sample.yaml │ ├── policy-cluster-proxy-sample.yaml │ ├── policy-compliance-operator-high-scan.yaml │ ├── policy-compliance-operator-hypershift-hosted-cluster.yaml │ ├── policy-compliance-operator-moderate-scan.yaml │ ├── policy-compliance-operator-nerc-cip-scan.yaml │ ├── policy-compliance-operator-pci-dss-scan.yaml │ ├── policy-configure-logforwarding.yaml │ ├── policy-configure-subscription-admin-hub.yaml │ ├── policy-continuous-restore-backup-triliovault-for-kubernetes.yaml │ ├── policy-continuous-restore-event-target-triliovault-for-kubernetes.yaml │ ├── policy-continuous-restore-triliovault-for-kubernetes.yaml │ ├── policy-create-helm-backup-triliovault-for-kubernetes.yaml │ ├── policy-create-label-backup-triliovault-for-kubernetes.yaml │ ├── policy-create-license-triliovault-for-kubernetes-upstream.yaml │ ├── policy-create-ns-backup-triliovault-for-kubernetes-templatized.yaml │ ├── policy-create-ns-backup-triliovault-for-kubernetes.yaml │ ├── policy-create-operator-backup-triliovault-for-kubernetes.yaml │ ├── policy-custom-catalog.yaml │ ├── policy-egress-firewall-sample.yaml │ ├── policy-enable-if-etcd-encrypted-templatized.yaml │ ├── policy-enable-if-ns-exists-templatized.yaml │ ├── policy-engineering-configmap.yaml │ ├── policy-engineering-pod-disruption-budget.yaml │ ├── policy-etcd-backup.yaml │ ├── policy-file-integrity-operator.yaml │ ├── policy-gatekeeper-allowed-external-ips.yaml │ ├── policy-gatekeeper-annotation-owner.yaml │ ├── policy-gatekeeper-config-exclude-namespaces.yaml │ ├── policy-gatekeeper-container-image-latest.yaml │ ├── policy-gatekeeper-container-livenessprobenotset.yaml │ ├── policy-gatekeeper-container-readinessprobenotset.yaml │ ├── policy-gatekeeper-container-tgps.yaml │ ├── policy-gatekeeper-image-pull-policy.yaml │ ├── policy-gatekeeper-operator.yaml │ ├── policy-gatekeeper-sample.yaml │ ├── policy-github-oauth-sample.yaml │ ├── policy-idp-operator.yaml │ ├── policy-idp-sample-github.yaml │ ├── policy-image-policy-sample.yaml │ ├── policy-ingress-controller.yaml │ ├── policy-install-external-secrets.yaml │ ├── policy-install-kyverno.yaml │ ├── policy-install-triliovault-for-kubernetes-using-helm.yaml │ ├── policy-install-triliovault-for-kubernetes.yaml │ ├── policy-integrity-shield-events.yaml │ ├── policy-integrity-shield-observer.yaml │ ├── policy-integrity-shield.yaml │ ├── policy-kernel-devel.yaml │ ├── policy-kyverno-add-network-policy.yaml │ ├── policy-kyverno-add-quota.yaml │ ├── policy-kyverno-config-exclude-resources.yaml │ ├── policy-kyverno-container-tgps.yaml │ ├── policy-kyverno-image-pull-policy.yaml │ ├── policy-kyverno-operator.yaml │ ├── policy-kyverno-sample.yaml │ ├── policy-kyverno-sync-secrets.yaml │ ├── policy-label-cluster.yaml │ ├── policy-label-worker-nodes.yaml │ ├── policy-local-storage-operator.yaml │ ├── policy-lvm-operator.yaml │ ├── policy-machineconfig-chrony.yaml │ ├── policy-managedclusterinfo-templatized.yaml │ ├── policy-managedclustersetbinding.yaml │ ├── policy-mce-hcp-autoimport.yaml │ ├── policy-network-policy-samples.yaml │ ├── policy-nginx-deployment-templatized.yaml │ ├── policy-nginx-deployment.yaml │ ├── policy-oadp.yaml │ ├── policy-oauth-config.yaml │ ├── policy-oauth-htpasswd.yaml │ ├── policy-oauth-ldapsync.yaml │ ├── policy-oc-client-cronjob.yaml │ ├── policy-odf.yaml │ ├── policy-opa-sample.yaml │ ├── policy-openshift-gitops-policygenerator.yaml │ ├── policy-openshift-gitops.yaml │ ├── policy-openshift-serverless.yaml │ ├── policy-openshift-servicemesh.yaml │ ├── policy-pao-operator.yaml │ ├── policy-persistent-data-management.yaml │ ├── policy-pod-disruption-budget-templatized.yaml │ ├── policy-pod-placement.yaml │ ├── policy-proxy-protocol.yaml │ ├── policy-ptp-operator.yaml │ ├── policy-resiliency-image-pruner.yaml │ ├── policy-rhoda-operator.yaml │ ├── policy-rhsso-configure-mc-hubresources.yaml │ ├── policy-rhsso-configure-mc-spokeresources.yaml │ ├── policy-rhsso-install-operator.yaml │ ├── policy-rhsso-operator.yaml │ ├── policy-rhsso-setup-for-acm.yaml │ ├── policy-rosa-autoimport.yaml │ ├── policy-scheduler.yaml │ ├── policy-sriov-operator.yaml │ ├── policy-sriovnetwork-templatized.yaml │ ├── policy-trusted-container.yaml │ ├── policy-trusted-node.yaml │ ├── policy-update-service-openshift-cluster.yaml │ ├── policy-upgrade-openshift-cluster.yaml │ ├── policy-vsphere-machine-set.yaml │ ├── policy-web-terminal-operator.yaml │ ├── policy-ztp-node-add-static.yaml │ ├── policy-ztp-node-add.yaml │ ├── policy-zts-cmc-deployment.yaml │ ├── policy-zts-xcrypt-deployment.yaml │ ├── policy-zts-xcrypt-rbac.yaml │ └── terminating-configpolicies.yaml ├── Operator-Management │ ├── README.md │ └── policy-cnv-mtv-operators │ │ ├── README.md │ │ ├── placement-cnv.yaml │ │ ├── policy-cnv.yaml │ │ └── policy-mtv.yaml ├── README.md ├── SC-System-and-Communications-Protection │ ├── policy-checkclusteroperator.yaml │ ├── policy-checknamespaces-terminating.yaml │ ├── policy-disable-self-provisioner.yaml │ ├── policy-ocp4-certs.yaml │ └── policy-remove-kubeadmin.yaml └── SI-System-and-Information-Integrity │ ├── policy-blackduck.yaml │ ├── policy-crowdstrike-falcon-rhmp.yaml │ ├── policy-falco-auditing.yaml │ ├── policy-falco-helm.yaml │ ├── policy-falco.yaml │ └── policy-sysdig.yaml ├── deploy ├── README.md ├── application.yaml ├── application_template.json ├── argoDeploy.sh ├── channel.yaml ├── channel_template.json ├── deploy.sh ├── kustomization_template.yaml ├── placement.yaml ├── placement_template.json ├── remove.sh ├── subscription.yaml ├── subscription_placement_template.json └── subscription_template.json ├── policygenerator ├── README.md ├── kustomize │ ├── kustomization.yml │ ├── policy1_deployment │ │ ├── deployment.yaml │ │ ├── namespace.yaml │ │ ├── route.yaml │ │ └── service.yaml │ ├── policy2_gatekeeper │ │ └── gatekeeper.yaml │ ├── policy3_kyverno │ │ └── kyverno.yaml │ └── policyGenerator.yaml ├── policy-sets │ ├── README.md │ ├── community │ │ ├── README.md │ │ ├── acs-secure │ │ │ ├── README.md │ │ │ ├── input-sensor │ │ │ │ ├── acs-check-certificates.yaml │ │ │ │ ├── policy-acs-sync-resources.yaml │ │ │ │ ├── policy-advanced-managed-cluster-security.yaml │ │ │ │ └── policy-advanced-managed-cluster-status.yaml │ │ │ ├── kustomization.yml │ │ │ └── policyGenerator.yaml │ │ ├── gatekeeper │ │ │ ├── README.md │ │ │ ├── images │ │ │ │ ├── policies-installed.png │ │ │ │ ├── policies.png │ │ │ │ ├── policysets.png │ │ │ │ ├── subscription.png │ │ │ │ └── topology.png │ │ │ ├── input │ │ │ │ ├── any-warn-deprecated-api-versions │ │ │ │ │ ├── src.rego │ │ │ │ │ └── src_test.rego │ │ │ │ ├── container-deny-added-caps │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── container-deny-escalation │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── container-deny-latest-tag │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── container-deny-no-resource-constraints │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── container-deny-privileged-if-tenant │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── container-deny-privileged │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── lib │ │ │ │ │ ├── core.rego │ │ │ │ │ ├── pods.rego │ │ │ │ │ ├── psp.rego │ │ │ │ │ ├── psp_test.rego │ │ │ │ │ ├── rbac.rego │ │ │ │ │ └── security.rego │ │ │ │ ├── pod-deny-host-alias │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── pod-deny-host-ipc │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── pod-deny-host-network │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── pod-deny-host-pid │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── pod-deny-without-runasnonroot │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ ├── suspicious_assignment_of_controller_service_accounts │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ │ └── suspicious_self_subject_review │ │ │ │ │ ├── constraint.yaml │ │ │ │ │ └── template.yaml │ │ │ ├── kustomization.yml │ │ │ ├── placement.yaml │ │ │ └── policyGenerator.yaml │ │ ├── kyverno │ │ │ ├── README.md │ │ │ ├── best-practises-for-apps │ │ │ │ ├── README.md │ │ │ │ ├── input │ │ │ │ │ ├── affinity │ │ │ │ │ │ ├── add_node_affinity.yaml │ │ │ │ │ │ ├── create_pod_antiaffinity.yaml │ │ │ │ │ │ └── spread_pods_across_topology.yaml │ │ │ │ │ ├── base_images │ │ │ │ │ │ ├── allowed-base-images.yaml │ │ │ │ │ │ └── annotate-base-images.yaml │ │ │ │ │ ├── deployments │ │ │ │ │ │ ├── add_volume_deployment.yaml │ │ │ │ │ │ ├── mutate-large-termination-gps.yaml │ │ │ │ │ │ ├── restart_deployment_on_secret_change.yaml │ │ │ │ │ │ └── scale_deployment_zero.yaml │ │ │ │ │ ├── limitsrequests │ │ │ │ │ │ └── require_requests_limits.yaml │ │ │ │ │ ├── poddisruptionbudget │ │ │ │ │ │ └── create_default_pdb.yaml │ │ │ │ │ ├── require_deployments_have_multiple_replicas │ │ │ │ │ │ ├── deployment-musthaverolling-strategy.yaml │ │ │ │ │ │ └── require_deployments_have_multiple_replicas.yaml │ │ │ │ │ ├── require_probes │ │ │ │ │ │ ├── ensure_probes_different.yaml │ │ │ │ │ │ └── require_probes.yaml │ │ │ │ │ ├── resource-exhaustion │ │ │ │ │ │ └── disallow-self-provisioner │ │ │ │ │ │ │ └── disallow-self-provisioner.yaml │ │ │ │ │ ├── restrict_controlplane_scheduling │ │ │ │ │ │ └── restrict_controlplane_scheduling.yaml │ │ │ │ │ ├── routes │ │ │ │ │ │ └── check-routes.yaml │ │ │ │ │ └── validate_git │ │ │ │ │ │ └── validategit.yaml │ │ │ │ ├── kustomization.yml │ │ │ │ ├── placement.yaml │ │ │ │ └── policyGenerator.yaml │ │ │ ├── images │ │ │ │ ├── applications.png │ │ │ │ ├── policies-installed.png │ │ │ │ ├── policies.png │ │ │ │ ├── policysets.png │ │ │ │ ├── subscription.png │ │ │ │ └── topology.png │ │ │ ├── multitenancy │ │ │ │ ├── README.md │ │ │ │ ├── input │ │ │ │ │ ├── addlabelstotenant │ │ │ │ │ │ ├── add-labels-to-bluetenant.yaml │ │ │ │ │ │ └── add-labels-to-redtenant.yaml │ │ │ │ │ ├── disallowplacementrules │ │ │ │ │ │ └── disallow-placementRules.yaml │ │ │ │ │ ├── generateManagedClusterSetBinding │ │ │ │ │ │ ├── generateManagedClusterSetBindingblueteam-hub.yaml │ │ │ │ │ │ └── generateManagedClusterSetBindingredteam-hub.yaml │ │ │ │ │ ├── generatePlacementRules │ │ │ │ │ │ ├── generatePlacementblueteam-hub.yaml │ │ │ │ │ │ └── generatePlacementredteam-hub.yaml │ │ │ │ │ ├── generateall │ │ │ │ │ │ ├── generate-all-blueteam-spoke.yml │ │ │ │ │ │ └── generate-all-redteam-spoke.yml │ │ │ │ │ ├── generateargocdpersmissions │ │ │ │ │ │ ├── generate-argocd-permissions-blueteam-spoke.yaml │ │ │ │ │ │ └── generate-argocd-permissions-redteam-spoke.yaml │ │ │ │ │ ├── other │ │ │ │ │ │ └── add-ttl-to-dangling-job.yaml │ │ │ │ │ ├── preventupdatesappproject │ │ │ │ │ │ └── application-prevent-updates-project-all.yaml │ │ │ │ │ ├── restrictions │ │ │ │ │ │ ├── restrict-blueteam-destination-spoke.yaml │ │ │ │ │ │ ├── restrict-blueteam-to-its-appproject-all.yaml │ │ │ │ │ │ ├── restrict-blueteam-to-its-placement-hub.yaml │ │ │ │ │ │ ├── restrict-redteam-destination-spoke.yaml │ │ │ │ │ │ ├── restrict-redteam-to-its-appproject-hub.yaml │ │ │ │ │ │ └── restrict-redteam-to-its-placement-hub.yaml │ │ │ │ │ ├── sharedresources │ │ │ │ │ │ └── add-managedclustersetbinding-shared-sre-group.yaml │ │ │ │ │ ├── validatens │ │ │ │ │ │ ├── validate-ns-bluesre-spoke.yaml │ │ │ │ │ │ └── validate-ns-redsre-spoke.yaml │ │ │ │ │ └── validateplacement │ │ │ │ │ │ ├── preventupdates-appproject-all.yaml │ │ │ │ │ │ ├── validateplacementblueteam-hub.yml │ │ │ │ │ │ └── validateplacementredteam-hub.yml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── placement.yaml │ │ │ │ └── policyGenerator.yaml │ │ │ └── security │ │ │ │ ├── README.md │ │ │ │ ├── input │ │ │ │ ├── authorization │ │ │ │ │ ├── disallow-scc-runasany.yaml │ │ │ │ │ ├── host-namespaces │ │ │ │ │ │ ├── disallow-host-ipc.yaml │ │ │ │ │ │ ├── disallow-host-network.yaml │ │ │ │ │ │ ├── disallow-host-pid.yaml │ │ │ │ │ │ └── disallow-host-ports.yaml │ │ │ │ │ └── protect-default-scc │ │ │ │ │ │ └── protect-default-scc.yaml │ │ │ │ ├── disallow-host-namespaces │ │ │ │ │ └── disallow-host-namespaces.yaml │ │ │ │ ├── disallow_latest_tag │ │ │ │ │ └── disallow_latest_tag.yaml │ │ │ │ ├── httpsonly │ │ │ │ │ └── httpsonly.yaml │ │ │ │ ├── networking │ │ │ │ │ └── block-nodeport-services │ │ │ │ │ │ └── block-nodeport-services.yaml │ │ │ │ ├── require-run-as-non-root-user │ │ │ │ │ ├── require-run-as-non-root-user.yaml │ │ │ │ │ └── restrictions │ │ │ │ │ │ ├── restrict-blueteam-destination-spoke.yaml │ │ │ │ │ │ ├── restrict-blueteam-to-its-appproject-all.yaml │ │ │ │ │ │ ├── restrict-blueteam-to-its-placement-hub.yaml │ │ │ │ │ │ ├── restrict-redteam-destination-spoke.yaml │ │ │ │ │ │ ├── restrict-redteam-to-its-appproject-hub.yaml │ │ │ │ │ │ └── restrict-redteam-to-its-placement-hub.yaml │ │ │ │ ├── restrict-binding-clusteradmin │ │ │ │ │ ├── block-stale-images │ │ │ │ │ │ └── block-stale-images.yaml │ │ │ │ │ └── restrict-binding-clusteradmin.yaml │ │ │ │ ├── restrict-clusterrole-nodesproxy │ │ │ │ │ └── restrict-clusterrole-nodesproxy.yaml │ │ │ │ ├── restrict-escalation-verbs-roles │ │ │ │ │ └── restrict-escalation-verbs-roles.yaml │ │ │ │ ├── restrict-ingress-wildcard │ │ │ │ │ └── restrict-ingress-wildcard.yaml │ │ │ │ ├── restrict-secret-role-verbs │ │ │ │ │ └── restrict-secret-role-verbs.yaml │ │ │ │ ├── restrict-service-account │ │ │ │ │ └── restrict_service_account.yaml │ │ │ │ ├── restrict-wildcard-resources │ │ │ │ │ └── restrict-wildcard-resources.yaml │ │ │ │ ├── restrict-wildcard-verbs │ │ │ │ │ └── restrict-wildcard-verbs.yaml │ │ │ │ ├── restrict_annotations │ │ │ │ │ └── restrict_annotations.yaml │ │ │ │ ├── restrict_automount_sa_token │ │ │ │ │ └── restrict_automount_sa_token.yaml │ │ │ │ ├── restrict_ingress_classes │ │ │ │ │ └── restrict_ingress_classes.yaml │ │ │ │ ├── restrict_ingress_defaultbackend │ │ │ │ │ └── restrict_ingress_defaultbackend.yaml │ │ │ │ ├── restrict_ingress_host │ │ │ │ │ └── restrict_ingress_host.yaml │ │ │ │ ├── restrict_loadbalancer │ │ │ │ │ └── restrict_loadbalancer.yaml │ │ │ │ ├── restrict_node_selection │ │ │ │ │ └── restrict_node_selection.yaml │ │ │ │ ├── restrict_secrets_by_label │ │ │ │ │ └── restrict-secrets-by-label.yaml │ │ │ │ ├── restrict_secrets_by_name │ │ │ │ │ └── restrict-secrets-by-name.yaml │ │ │ │ ├── restrict_service_port_range │ │ │ │ │ └── restrict-service-port-range.yaml │ │ │ │ ├── restrict_usergroup_fsgroup_id │ │ │ │ │ └── restrict_usergroup_fsgroup_id.yaml │ │ │ │ └── security-context-contraint │ │ │ │ │ └── disallow-security-context-constraint-anyuid.yaml │ │ │ │ ├── kustomization.yml │ │ │ │ ├── placement.yaml │ │ │ │ └── policyGenerator.yaml │ │ ├── ocp-best-practices │ │ │ ├── input-admin │ │ │ │ └── policy-remove-kubeadmin.yaml │ │ │ ├── input-audit │ │ │ │ └── policy-config-audit.yaml │ │ │ ├── input-certs │ │ │ │ └── policy-ocp4-certs.yaml │ │ │ ├── input-compliance │ │ │ │ ├── policy-compliance-operator-install.yaml │ │ │ │ ├── policy-compliance-operator-moderate-scan.yaml │ │ │ │ └── policy-compliance-operator-scan-results.yaml │ │ │ ├── input-etcd │ │ │ │ ├── policy-etcd-backup.yaml │ │ │ │ └── policy-etcdencryption.yaml │ │ │ ├── input-files │ │ │ │ ├── policy-file-integrity-node-status.yaml │ │ │ │ └── policy-file-integrity-operator.yaml │ │ │ ├── input-fips │ │ │ │ └── policy-check-fips.yaml │ │ │ ├── input-operators │ │ │ │ └── policy-checkclusteroperator.yaml │ │ │ ├── input-scc │ │ │ │ └── policy-scc-restricted.yaml │ │ │ ├── input │ │ │ │ └── placement.yaml │ │ │ ├── kustomization.yml │ │ │ └── policyGenerator.yaml │ │ ├── openshift-gitops │ │ │ ├── README.md │ │ │ ├── kustomization.yml │ │ │ ├── placement.yaml │ │ │ ├── policy-openshift-gitops-grc.yaml │ │ │ ├── policy-openshift-gitops.yaml │ │ │ └── policyGenerator.yaml │ │ ├── openshift-plus-setup │ │ │ ├── README.md │ │ │ ├── kustomization.yml │ │ │ ├── machine-sets.yaml │ │ │ ├── managedclustersetbinding.yaml │ │ │ ├── namespace.yaml │ │ │ ├── opp-settings.yaml │ │ │ ├── placement.yaml │ │ │ ├── policyGenerator.yaml │ │ │ └── test │ │ │ │ ├── README.md │ │ │ │ ├── aws │ │ │ │ ├── cluster-claim.yaml │ │ │ │ ├── example-drtt7-workerocs-us-east-2a.yaml │ │ │ │ ├── example-drtt7-workerocs-us-east-2b.yaml │ │ │ │ ├── example-drtt7-workerocs-us-east-2c.yaml │ │ │ │ ├── infrastructure.yaml │ │ │ │ └── opp-settings.yaml │ │ │ │ ├── mappings.yaml │ │ │ │ └── vsphere │ │ │ │ ├── ci-op-ldb7hpt4-d53f7-srxtz-storage.yaml │ │ │ │ ├── cluster-claim.yaml │ │ │ │ ├── infrastructure.yaml │ │ │ │ └── opp-settings.yaml │ │ ├── policygenerator-download │ │ │ ├── consoleclidownload.yml │ │ │ ├── deployment.yml │ │ │ ├── generator.yml │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yml │ │ │ ├── route.yml │ │ │ └── service.yml │ │ └── zts-xcrypt │ │ │ ├── README.md │ │ │ ├── input │ │ │ ├── operator-dh-v1-local.yaml │ │ │ ├── operator-executor.yaml │ │ │ ├── placementrule.yaml │ │ │ ├── xcrypt.zettaset.com_xcrypts.yaml │ │ │ └── zts-rbac.yaml │ │ │ ├── policy-zts-xcrypt-version-1.yaml │ │ │ └── policyGenerator.yaml │ └── stable │ │ ├── README.md │ │ ├── acm-hardening │ │ ├── README.md │ │ ├── input-backup │ │ │ └── policy-backup.yaml │ │ ├── input-managedcluster │ │ │ └── policy-managedclusteraddon-available.yaml │ │ ├── input-policyreport │ │ │ └── policy-check-policyreports.yaml │ │ ├── input-subscriptions │ │ │ └── policy-subscriptions.yaml │ │ ├── input │ │ │ └── placement.yaml │ │ ├── kustomization.yml │ │ └── policyGenerator.yaml │ │ └── openshift-plus │ │ ├── README.md │ │ ├── input-acm-observability │ │ ├── operator.yaml │ │ └── storage.yaml │ │ ├── input-acs-central │ │ ├── policy-acs-central-status.yaml │ │ └── policy-acs-operator-central.yaml │ │ ├── input-compliance │ │ └── policy-compliance-operator-install.yaml │ │ ├── input-odf │ │ ├── policy-object-storage.yaml │ │ ├── policy-odf-cluster.yaml │ │ ├── policy-odf-status.yaml │ │ └── policy-odf.yaml │ │ ├── input-quay │ │ ├── policy-config-quay.yaml │ │ ├── policy-hub-quay-bridge.yaml │ │ ├── policy-install-quay.yaml │ │ ├── policy-quay-bridge.yaml │ │ └── policy-quay-status.yaml │ │ ├── input-sensor │ │ ├── acs-check-certificates.yaml │ │ ├── policy-acs-central-ca-bundle-expired.yaml │ │ ├── policy-acs-central-ca-bundle-v1.yaml │ │ ├── policy-acs-central-ca-bundle-v2.yaml │ │ ├── policy-acs-central-ca-bundle.yaml │ │ ├── policy-acs-sync-resources.yaml │ │ ├── policy-advanced-managed-cluster-security.yaml │ │ └── policy-advanced-managed-cluster-status.yaml │ │ ├── input │ │ ├── clusters-placement.yaml │ │ └── hub-placement.yaml │ │ ├── kustomization.yml │ │ └── policyGenerator.yaml └── subscription.yaml └── stable ├── AC-Access-Control ├── README.md ├── policy-role.yaml └── policy-rolebinding.yaml ├── CA-Security-Assessment-and-Authorization ├── README.md └── policy-compliance-operator-install.yaml ├── CM-Configuration-Management ├── README.md ├── argocd-policy-healthchecks.yaml ├── policy-compliance-operator-cis-scan.yaml ├── policy-compliance-operator-e8-scan.yaml ├── policy-gatekeeper-operator-downstream.yaml ├── policy-namespace.yaml ├── policy-pod.yaml └── policy-zts-cmc.yaml ├── README.md ├── SC-System-and-Communications-Protection ├── README.md ├── policy-certificate.yaml ├── policy-etcdencryption.yaml ├── policy-limitmemory.yaml ├── policy-psp.yaml └── policy-scc.yaml └── SI-System-and-Information-Integrity ├── README.md └── policy-imagemanifestvuln.yaml /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: github-actions 4 | directory: / 5 | schedule: 6 | interval: weekly 7 | groups: 8 | github-actions: 9 | patterns: 10 | - "*" 11 | -------------------------------------------------------------------------------- /.github/workflows/validate.yml: -------------------------------------------------------------------------------- 1 | name: Validation tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | defaults: 12 | run: 13 | shell: bash 14 | working-directory: policy-collection 15 | 16 | jobs: 17 | validation-tests: 18 | runs-on: ubuntu-latest 19 | name: Validation tests 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | with: 24 | path: policy-collection 25 | 26 | - name: Verify content 27 | run: | 28 | ./build/validate-policies.sh 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | deploy/manifests.yaml 2 | .idea 3 | bin 4 | schemas 5 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Refer to our [Open Cluster Management Community Code of Conduct](https://github.com/open-cluster-management-io/community/blob/main/CODE_OF_CONDUCT.md) -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - dhaiducek 3 | - gparvin 4 | - JustinKuli 5 | - yiraeChristineKim 6 | reviewers: 7 | - dhaiducek 8 | - gparvin 9 | - JustinKuli 10 | - yiraeChristineKim 11 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Response 2 | 3 | If you've found a security issue that you'd like to disclose confidentially please contact Red Hat's Product Security team. 4 | Details at https://access.redhat.com/security/team/contact 5 | -------------------------------------------------------------------------------- /community/AC-Access-Control/policy-roles-no-wildcards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-disallowed-roles 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: AC Access Control 8 | policy.open-cluster-management.io/controls: AC-6 Least Privilege 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-disallowed-roles-sample-role 18 | spec: 19 | remediationAction: inform # will be overridden by remediationAction in parent policy 20 | severity: high 21 | namespaceSelector: 22 | include: ["default"] 23 | object-templates: 24 | - complianceType: mustnothave 25 | objectDefinition: 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: Role 28 | rules: 29 | - apiGroups: 30 | - '*' 31 | resources: 32 | - '*' 33 | verbs: 34 | - '*' 35 | -------------------------------------------------------------------------------- /community/AU-Audit-and-Accountability/policy-openshift-audit-logs-sample.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-config-audit 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: AU Audit and Accountability 8 | policy.open-cluster-management.io/controls: AU-3 Content of Audit Records 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-config-audit 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: config.openshift.io/v1 25 | kind: APIServer 26 | metadata: 27 | name: cluster 28 | spec: 29 | audit: 30 | customRules: 31 | - group: system:authenticated:oauth 32 | profile: WriteRequestBodies 33 | - group: system:authenticated 34 | profile: AllRequestBodies 35 | profile: Default 36 | -------------------------------------------------------------------------------- /community/CA-Security-Assessment-and-Authorization/policy-check-fips.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: checkfipscompliance 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST-CSF 7 | policy.open-cluster-management.io/categories: PR.IP Information Protection Processes and Procedures 8 | policy.open-cluster-management.io/controls: PR.IP-1 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: 'checkfipscompliance' 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: machineconfiguration.openshift.io/v1 25 | kind: MachineConfig 26 | metadata: 27 | labels: 28 | machineconfiguration.openshift.io/role: worker 29 | name: 99-worker-fips 30 | spec: 31 | fips: true 32 | - complianceType: musthave 33 | objectDefinition: 34 | apiVersion: machineconfiguration.openshift.io/v1 35 | kind: MachineConfig 36 | metadata: 37 | labels: 38 | machineconfiguration.openshift.io/role: master 39 | name: 99-master-fips 40 | spec: 41 | fips: true 42 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/acm-hub-pvc-backup-policyset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1beta1 2 | kind: PolicySet 3 | metadata: 4 | name: acm-hub-pvc-backup-policyset 5 | namespace: open-cluster-management-backup 6 | spec: 7 | description: Hub PVC backup and restore using volsync with the restic mover feature. This leverages CSI volume snapshot. 8 | policies: 9 | - acm-hub-pvc-backup-config 10 | - acm-hub-pvc-backup-source 11 | - acm-hub-pvc-backup-destination 12 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/acm-hub-pvc-placement.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.open-cluster-management.io/v1beta1 3 | kind: Placement 4 | metadata: 5 | name: acm-hub-pvc-placement 6 | namespace: open-cluster-management-backup 7 | spec: 8 | predicates: 9 | - requiredClusterSelector: 10 | labelSelector: 11 | matchExpressions: 12 | - key: local-cluster 13 | operator: In 14 | values: 15 | - 'true' 16 | - requiredClusterSelector: 17 | labelSelector: 18 | matchExpressions: 19 | - key: is-hub 20 | operator: In 21 | values: 22 | - 'true' 23 | --- 24 | apiVersion: policy.open-cluster-management.io/v1 25 | kind: PlacementBinding 26 | metadata: 27 | name: acm-hub-pvc-placement 28 | namespace: open-cluster-management-backup 29 | placementRef: 30 | name: acm-hub-pvc-placement 31 | apiGroup: cluster.open-cluster-management.io 32 | kind: Placement 33 | subjects: 34 | - name: acm-hub-pvc-backup-policyset 35 | apiGroup: policy.open-cluster-management.io 36 | kind: PolicySet 37 | 38 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_dest_policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_dest_policy.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_dest_policy_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_dest_policy_1.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_source_policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_source_policy.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_source_policy_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/backup_source_policy_1.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/config_policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/config_policy.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/policies.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/policyset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/policyset.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_policy.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_policy_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_policy_1.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_pvc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_pvc.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_rd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_dest_rd.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_source_policy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/community/CM-Configuration-Management/acm-hub-pvc-backup/images/restore_source_policy.png -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-cert-manager-operator.yaml: -------------------------------------------------------------------------------- 1 | # This policy installs cert-manager from the jetstack cert-manager community 2 | # operator. Cert-manager is installed in the openshift-operators namespace when 3 | # this policy is enforced. 4 | apiVersion: policy.open-cluster-management.io/v1 5 | kind: Policy 6 | metadata: 7 | name: policy-cert-manager-operator 8 | annotations: 9 | policy.open-cluster-management.io/standards: NIST SP 800-53 10 | policy.open-cluster-management.io/categories: CM Configuration Management 11 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 12 | spec: 13 | remediationAction: enforce 14 | disabled: false 15 | policy-templates: 16 | - objectDefinition: 17 | apiVersion: policy.open-cluster-management.io/v1 18 | kind: ConfigurationPolicy 19 | metadata: 20 | name: cert-manager-operator-subscription 21 | spec: 22 | remediationAction: inform 23 | severity: high 24 | object-templates: 25 | - complianceType: musthave 26 | objectDefinition: 27 | apiVersion: operators.coreos.com/v1alpha1 28 | kind: Subscription 29 | metadata: 30 | name: cert-manager 31 | namespace: openshift-operators 32 | spec: 33 | channel: stable 34 | name: cert-manager 35 | source: community-operators 36 | sourceNamespace: openshift-marketplace 37 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-cluster-dns-sample.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-config-dns 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-config-dns 18 | spec: 19 | remediationAction: inform # will be overridden by remediationAction in parent policy 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: config.openshift.io/v1 25 | kind: DNS 26 | metadata: 27 | name: cluster 28 | spec: 29 | baseDomain: 30 | privateZone: 31 | tags: 32 | Name: -int 33 | kubernetes.io/cluster/-wfpg4: owned 34 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-custom-catalog.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-acm-catalogsource 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST 800-53, NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management, PR.IP Information Protection Processes and Procedures 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration, PR.IP-1 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-acm-catalogsource 18 | spec: 19 | remediationAction: inform 20 | severity: medium 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: operators.coreos.com/v1alpha1 25 | kind: CatalogSource 26 | metadata: 27 | name: acm-custom-registry 28 | namespace: open-cluster-management 29 | spec: 30 | address: acm-custom-registry.open-cluster-management.svc:50051 31 | sourceType: grpc 32 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-gatekeeper-annotation-owner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-gatekeeper-annotation-owner 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: enforce 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-gatekeeper-annotation-owner 18 | spec: 19 | remediationAction: inform # will be overridden by remediationAction in parent policy 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: mutations.gatekeeper.sh/v1alpha1 25 | kind: AssignMetadata 26 | metadata: 27 | name: pod-annotation-owner 28 | spec: 29 | match: 30 | scope: Namespaced 31 | kinds: 32 | - apiGroups: ["*"] 33 | kinds: ["Pod"] 34 | location: "metadata.annotations.owner" 35 | parameters: 36 | assign: 37 | value: "admin" 38 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-image-policy-sample.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: image-policy 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: image-policy-example 18 | spec: 19 | remediationAction: enforce 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: config.openshift.io/v1 25 | kind: Image 26 | metadata: 27 | name: cluster 28 | spec: 29 | registrySources: 30 | allowedRegistries: 31 | - myregistry.com 32 | - registry.redhat.io 33 | - registry.access.redhat.com 34 | - quay.io 35 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-ingress-controller 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST 800-53, NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management, PR.IP Information Protection Processes and Procedures 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration, PR.IP-1 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-ingress-controller 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: operator.openshift.io/v1 25 | kind: IngressController 26 | metadata: 27 | name: default 28 | namespace: openshift-ingress-operator 29 | spec: 30 | nodePlacement: 31 | nodeSelector: 32 | matchLabels: 33 | node-role.kubernetes.io/infra: "" 34 | tolerations: 35 | - effect: NoSchedule 36 | operator: Exists 37 | - effect: NoExecute 38 | operator: Exists 39 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-integrity-shield-events.yaml: -------------------------------------------------------------------------------- 1 | # This policy checks if Integrity Shield has denied one or 2 | # more requests on the managed clusters. 3 | # 4 | # "inform" This policy will show "Not compliant" on UI when something was denied. 5 | 6 | apiVersion: policy.open-cluster-management.io/v1 7 | kind: Policy 8 | metadata: 9 | name: policy-integrity-shield-events 10 | annotations: 11 | policy.open-cluster-management.io/standards: NIST SP 800-53 12 | policy.open-cluster-management.io/categories: CM Configuration Management 13 | policy.open-cluster-management.io/controls: CM-5 Access Restrictions for Change 14 | spec: 15 | remediationAction: inform 16 | disabled: false 17 | policy-templates: 18 | - objectDefinition: 19 | apiVersion: policy.open-cluster-management.io/v1 20 | kind: ConfigurationPolicy 21 | metadata: 22 | name: policy-integrity-shield-admission 23 | spec: 24 | remediationAction: inform # will be overridden by remediationAction in parent policy 25 | severity: low 26 | namespaceSelector: 27 | include: ["*"] 28 | exclude: ["kube-*", "openshift-*"] 29 | object-templates: 30 | - complianceType: mustnothave 31 | objectDefinition: 32 | apiVersion: v1 33 | kind: Event 34 | metadata: 35 | annotations: 36 | integrityshield.io/eventType: verify-result 37 | integrityshield.io/eventResult: deny 38 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-integrity-shield-observer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | annotations: 5 | policy.open-cluster-management.io/categories: CM Configuration Management 6 | policy.open-cluster-management.io/controls: CM-5 Access Restrictions for Change 7 | policy.open-cluster-management.io/standards: NIST SP 800-53 8 | name: policy-integrity-shield-observer 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-integrity-shield-observer 18 | spec: 19 | namespaceSelector: 20 | include: 21 | - integrity-shield-operator-system 22 | object-templates: 23 | - complianceType: mustnothave 24 | objectDefinition: 25 | apiVersion: apis.integrityshield.io/v1 26 | kind: ManifestIntegrityState 27 | metadata: 28 | labels: 29 | integrityshield.io/verifyResourceIgnored: "false" 30 | integrityshield.io/verifyResourceViolation: "true" 31 | remediationAction: inform 32 | severity: low 33 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-kyverno-sample.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-kyverno-sample 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-kyverno-sample 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: kyverno.io/v1 25 | kind: ClusterPolicy 26 | metadata: 27 | name: require-labels 28 | spec: 29 | validationFailureAction: enforce 30 | rules: 31 | - name: check-for-labels 32 | match: 33 | resources: 34 | kinds: 35 | - Pod 36 | validate: 37 | message: "label `app.kubernetes.io/name` is required" 38 | pattern: 39 | metadata: 40 | labels: 41 | app.kubernetes.io/name: "?*" 42 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-label-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-label-cluster 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: enforce 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-label-cluster 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: cluster.open-cluster-management.io/v1 25 | kind: ManagedCluster 26 | metadata: 27 | labels: 28 | profile: demo 29 | name: demo-cluster-label 30 | spec: 31 | hubAcceptsClient: true 32 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-oauth-config.yaml: -------------------------------------------------------------------------------- 1 | # This policy applies configuration to OpenShift OAuth custom resources. 2 | # For more information on configuring the OAuth server, consult the OpenShift documentation: 3 | # https://docs.openshift.com/container-platform/4.7/authentication/configuring-oauth-clients.html 4 | apiVersion: policy.open-cluster-management.io/v1 5 | kind: Policy 6 | metadata: 7 | name: policy-oauth-config 8 | annotations: 9 | policy.open-cluster-management.io/standards: NIST SP 800-53 10 | policy.open-cluster-management.io/categories: CM Configuration Management 11 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 12 | spec: 13 | remediationAction: inform 14 | disabled: false 15 | policy-templates: 16 | - objectDefinition: 17 | apiVersion: policy.open-cluster-management.io/v1 18 | kind: ConfigurationPolicy 19 | metadata: 20 | name: oauth-config 21 | spec: 22 | remediationAction: inform 23 | severity: low 24 | object-templates: 25 | - complianceType: musthave 26 | objectDefinition: 27 | apiVersion: config.openshift.io/v1 28 | kind: OAuth 29 | metadata: 30 | name: cluster 31 | spec: 32 | tokenConfig: 33 | accessTokenInactivityTimeout: 10m0s 34 | accessTokenMaxAgeSeconds: 28800 35 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-opa-sample.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-opa-sample 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: enforce 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-opa-configmap 18 | spec: 19 | remediationAction: enforce 20 | severity: high 21 | namespaceSelector: 22 | include: ["default"] 23 | object-templates: 24 | - complianceType: musthave 25 | objectDefinition: 26 | apiVersion: v1 27 | data: 28 | no_pod.rego: |- 29 | package kubernetes.admission 30 | deny[msg] { 31 | input.request.kind.kind == "Pod" 32 | input.request.namespace == "opa" 33 | image := input.request.object.spec.containers[_].image 34 | not startswith(image, "hooli.com") 35 | msg := sprintf("image fails to come from trusted registry: %v", [image]) 36 | } 37 | kind: ConfigMap 38 | metadata: 39 | name: nopod 40 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-openshift-gitops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: openshift-gitops-installed 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: enforce 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: openshift-gitops-installed 18 | spec: 19 | remediationAction: enforce 20 | severity: medium 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: operators.coreos.com/v1alpha1 25 | kind: Subscription 26 | metadata: 27 | name: openshift-gitops-operator 28 | namespace: openshift-operators 29 | labels: 30 | operators.coreos.com/openshift-gitops-operator.openshift-operators: '' 31 | spec: 32 | channel: latest 33 | installPlanApproval: Automatic 34 | name: openshift-gitops-operator 35 | source: redhat-operators 36 | sourceNamespace: openshift-marketplace 37 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-proxy-protocol.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-proxy-protocol 5 | annotations: 6 | policy.open-cluster-management.io/categories: CM Configuration Management 7 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 8 | policy.open-cluster-management.io/standards: NIST SP 800-53 9 | spec: 10 | disabled: false 11 | policy-templates: 12 | - objectDefinition: 13 | apiVersion: policy.open-cluster-management.io/v1 14 | kind: ConfigurationPolicy 15 | metadata: 16 | name: policy-proxy-protocol 17 | spec: 18 | object-templates: 19 | - complianceType: musthave 20 | objectDefinition: 21 | apiVersion: operator.openshift.io/v1 22 | kind: IngressController 23 | metadata: 24 | finalizers: 25 | - ingresscontroller.operator.openshift.io/finalizer-ingresscontroller 26 | name: default 27 | namespace: openshift-ingress-operator 28 | spec: 29 | endpointPublishingStrategy: 30 | hostNetwork: 31 | protocol: PROXY 32 | type: HostNetwork 33 | remediationAction: inform 34 | severity: low 35 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-resiliency-image-pruner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-resiliency-image-pruner 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management, PR.IP Information Protection Processes and Procedures 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration, PR.IP-1 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-resiliency-image-pruner 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: imageregistry.operator.openshift.io/v1 25 | kind: ImagePruner 26 | metadata: 27 | spec: 28 | schedule: 0 0 * * * 29 | suspend: false 30 | keepTagRevisions: 3 31 | keepYoungerThan: 60m 32 | resources: {} 33 | affinity: {} 34 | nodeSelector: {} 35 | tolerations: {} 36 | startingDeadlineSeconds: 60 37 | successfulJobsHistoryLimit: 3 38 | failedJobsHistoryLimit: 3 39 | logLevel: Normal 40 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-scheduler 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-config-scheduler 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | apiVersion: config.openshift.io/v1 25 | kind: Scheduler 26 | metadata: 27 | name: cluster 28 | spec: 29 | mastersSchedulable: false 30 | policy: 31 | name: '' 32 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-trusted-container.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-trusted-container 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policies.ibm.com/v1alpha1 15 | kind: TrustedContainerPolicy 16 | metadata: 17 | name: example-trustedcontainerpolicy 18 | spec: 19 | severity: low 20 | namespaceSelector: 21 | include: ["default"] 22 | remediationAction: inform 23 | imageRegistry: quay.io 24 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-trusted-node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-trusted-node 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policies.ibm.com/v1alpha1 15 | kind: TrustedNodePolicy 16 | metadata: 17 | name: example-trustednodepolicy 18 | spec: 19 | severity: low 20 | namespaceSelector: 21 | include: ["default"] 22 | remediationAction: inform 23 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/policy-web-terminal-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-web-terminal-operator 5 | annotations: 6 | policy.open-cluster-management.io/categories: CM Configuration Management 7 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 8 | policy.open-cluster-management.io/standards: NIST 800-53 9 | spec: 10 | disabled: false 11 | policy-templates: 12 | - objectDefinition: 13 | apiVersion: policy.open-cluster-management.io/v1 14 | kind: ConfigurationPolicy 15 | metadata: 16 | name: policy-web-terminal-operator 17 | spec: 18 | object-templates: 19 | - complianceType: musthave 20 | objectDefinition: 21 | apiVersion: v1 22 | kind: Namespace 23 | metadata: 24 | name: openshift-terminal 25 | - complianceType: musthave 26 | objectDefinition: 27 | apiVersion: operators.coreos.com/v1alpha1 28 | kind: Subscription 29 | metadata: 30 | name: web-terminal 31 | namespace: openshift-operators 32 | spec: 33 | name: web-terminal 34 | channel: fast 35 | installPlanApproval: Automatic 36 | source: redhat-operators 37 | sourceNamespace: openshift-marketplace 38 | remediationAction: inform 39 | severity: medium 40 | remediationAction: inform 41 | -------------------------------------------------------------------------------- /community/CM-Configuration-Management/terminating-configpolicies.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: terminating-configpolicies 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: terminating-configpolicies 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | namespaceSelector: 22 | include: 23 | - "{{hub .ManagedClusterName hub}}" 24 | object-templates: 25 | - complianceType: mustnothave 26 | objectDefinition: 27 | apiVersion: policy.open-cluster-management.io/v1 28 | kind: ConfigurationPolicy 29 | status: 30 | compliant: Terminating 31 | -------------------------------------------------------------------------------- /community/Operator-Management/README.md: -------------------------------------------------------------------------------- 1 | # Operator Management 2 | 3 | ### policy-cnv-mtv-operators 4 | [Enable Container Native Virtualization in a fleet managed by Advanced Cluster Management for Kubernetes](./policy-cnv-mtv-operators/README.md) 5 | * Roll out the OpenShift Virtualization operator to the fleet 6 | * Roll out the Migration Toolkit for Virtualization operator to the hub 7 | 8 | -------------------------------------------------------------------------------- /community/SC-System-and-Communications-Protection/policy-checknamespaces-terminating.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-checknamespaces-terminating 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST-CSF 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-namespaces 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: mustnothave 23 | objectDefinition: 24 | apiVersion: v1 25 | kind: Namespace 26 | status: 27 | phase: Terminating 28 | -------------------------------------------------------------------------------- /community/SC-System-and-Communications-Protection/policy-disable-self-provisioner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-remove-self-provisioner 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: SC System and Communications Protection 8 | policy.open-cluster-management.io/controls: SC-1 SYSTEM AND COMMUNICATIONS PROTECTION POLICY AND PROCEDURES 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-remove-self-provisioner 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: mustonlyhave 23 | objectDefinition: 24 | kind: ClusterRoleBinding 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | metadata: 27 | name: self-provisioners 28 | annotations: 29 | rbac.authorization.kubernetes.io/autoupdate: 'false' 30 | subjects: [] 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: self-provisioner 35 | -------------------------------------------------------------------------------- /community/SC-System-and-Communications-Protection/policy-remove-kubeadmin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-remove-kubeadmin 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: SC System and Communications Protection 8 | policy.open-cluster-management.io/controls: SC-1 SYSTEM AND COMMUNICATIONS PROTECTION POLICY AND PROCEDURES 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-remove-kubeadmin 18 | spec: 19 | remediationAction: inform 20 | severity: low 21 | object-templates: 22 | - complianceType: mustnothave 23 | objectDefinition: 24 | kind: Secret 25 | apiVersion: v1 26 | metadata: 27 | name: kubeadmin 28 | namespace: kube-system 29 | type: Opaque 30 | -------------------------------------------------------------------------------- /deploy/application.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: app.k8s.io/v1beta1 2 | kind: Application 3 | metadata: 4 | name: demo-stable-policies-app 5 | namespace: policies 6 | spec: 7 | componentKinds: 8 | - group: apps.open-cluster-management.io 9 | kind: Subscription 10 | descriptor: {} 11 | selector: 12 | matchExpressions: 13 | - key: app 14 | operator: In 15 | values: 16 | - demo-stable-policies 17 | -------------------------------------------------------------------------------- /deploy/application_template.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "replace", 4 | "path": "/metadata/name", 5 | "value": "##NAME##-app" 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/spec/selector/matchExpressions/0/values/0", 10 | "value": "##NAME##" 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /deploy/channel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Channel 3 | metadata: 4 | name: demo-stable-policies-chan 5 | annotations: 6 | apps.open-cluster-management.io/reconcile-rate: medium 7 | spec: 8 | type: GitHub 9 | pathname: https://github.com/stolostron/policy-collection.git 10 | -------------------------------------------------------------------------------- /deploy/channel_template.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "replace", 4 | "path": "/metadata/name", 5 | "value": "##NAME##-chan" 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/spec/pathname", 10 | "value": "##GH_URL##" 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/metadata/annotations/apps.open-cluster-management.io~1reconcile-rate", 15 | "value": "##RATE##" 16 | } 17 | ] 18 | -------------------------------------------------------------------------------- /deploy/kustomization_template.yaml: -------------------------------------------------------------------------------- 1 | # kustomization.yaml 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | # namespace to deploy all Resources to 6 | namespace: ##NAMESPACE## 7 | commonLabels: 8 | app: ##NAME## 9 | 10 | generatorOptions: 11 | disableNameSuffixHash: true 12 | 13 | # list of Resource Config to be Applied 14 | resources: 15 | - channel.yaml 16 | - subscription.yaml 17 | ## - application.yaml 18 | ## - placement.yaml 19 | 20 | patchesJson6902: 21 | ##- path: application_patch.json 22 | ## target: 23 | ## group: app.k8s.io 24 | ## version: v1beta1 25 | ## kind: Application 26 | ## name: demo-stable-policies-app 27 | ##- path: placement_patch.json 28 | ## target: 29 | ## group: apps.open-cluster-management.io 30 | ## version: v1 31 | ## kind: PlacementRule 32 | ## name: demo-stable-policies-placement 33 | ##- path: subscription_placement_patch.json 34 | ## target: 35 | ## group: apps.open-cluster-management.io 36 | ## version: v1 37 | ## kind: Subscription 38 | ## name: demo-stable-policies-sub 39 | - path: subscription_patch.json 40 | target: 41 | group: apps.open-cluster-management.io 42 | version: v1 43 | kind: Subscription 44 | name: demo-stable-policies-sub 45 | - path: channel_patch.json 46 | target: 47 | group: apps.open-cluster-management.io 48 | version: v1 49 | kind: Channel 50 | name: demo-stable-policies-chan 51 | -------------------------------------------------------------------------------- /deploy/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: demo-stable-policies-placement 5 | spec: 6 | clusterSelector: 7 | matchExpressions: 8 | - key: local-cluster 9 | operator: In 10 | values: 11 | - "true" 12 | -------------------------------------------------------------------------------- /deploy/placement_template.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "replace", 4 | "path": "/metadata/name", 5 | "value": "##NAME##-placement" 6 | } 7 | ] 8 | -------------------------------------------------------------------------------- /deploy/subscription.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | annotations: 5 | apps.open-cluster-management.io/git-branch: main 6 | apps.open-cluster-management.io/git-path: stable 7 | name: demo-stable-policies-sub 8 | spec: 9 | channel: policies/demo-stable-policies-chan 10 | placement: 11 | local: true 12 | -------------------------------------------------------------------------------- /deploy/subscription_placement_template.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "replace", 4 | "path": "/spec/placement", 5 | "value": { 6 | "placementRef": { 7 | "kind": "PlacementRule", 8 | "name": "##NAME##-placement" 9 | } 10 | } 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /deploy/subscription_template.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "replace", 4 | "path": "/metadata/name", 5 | "value": "##NAME##-sub" 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/metadata/annotations/apps.open-cluster-management.io~1git-branch", 10 | "value": "##GH_BRANCH##" 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/metadata/annotations/apps.open-cluster-management.io~1git-path", 15 | "value": "##GH_PATH##" 16 | }, 17 | { 18 | "op": "replace", 19 | "path": "/spec/channel", 20 | "value": "##NAMESPACE##/##NAME##-chan" 21 | } 22 | ] 23 | -------------------------------------------------------------------------------- /policygenerator/kustomize/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - policyGenerator.yaml 3 | commonLabels: 4 | env: prod 5 | -------------------------------------------------------------------------------- /policygenerator/kustomize/policy1_deployment/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: gitops-demo 5 | namespace: gitops-demo 6 | labels: 7 | app: gitops-demo 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: gitops-demo 13 | template: 14 | metadata: 15 | labels: 16 | app: gitops-demo 17 | spec: 18 | containers: 19 | - name: gitops-demo 20 | image: quay.io/centos7/httpd-24-centos7:2021-08-06T13_04_45_02_00-905d024 21 | ports: 22 | - containerPort: 8080 23 | triggers: 24 | - type: ConfigChange 25 | -------------------------------------------------------------------------------- /policygenerator/kustomize/policy1_deployment/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: gitops-demo 5 | -------------------------------------------------------------------------------- /policygenerator/kustomize/policy1_deployment/route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: gitops-demo 5 | namespace: gitops-demo 6 | labels: 7 | app: gitops-demo 8 | spec: 9 | port: 10 | targetPort: http 11 | tls: 12 | insecureEdgeTerminationPolicy: Redirect 13 | termination: edge 14 | to: 15 | kind: Service 16 | name: gitops-demo 17 | -------------------------------------------------------------------------------- /policygenerator/kustomize/policy1_deployment/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gitops-demo 5 | namespace: gitops-demo 6 | labels: 7 | app: gitops-demo 8 | spec: 9 | ports: 10 | - name: http 11 | port: 80 12 | protocol: TCP 13 | targetPort: 8080 14 | selector: 15 | app: gitops-demo 16 | -------------------------------------------------------------------------------- /policygenerator/kustomize/policy3_kyverno/kyverno.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: require-labels 5 | spec: 6 | validationFailureAction: audit 7 | rules: 8 | - name: check-for-labels 9 | match: 10 | resources: 11 | kinds: 12 | - Namespace 13 | validate: 14 | message: "The label `purpose` is required." 15 | pattern: 16 | metadata: 17 | labels: 18 | purpose: "?*" 19 | -------------------------------------------------------------------------------- /policygenerator/kustomize/policyGenerator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: PolicyGenerator 3 | metadata: 4 | name: demo-policy-generator 5 | placementBindingDefaults: 6 | name: demo-placement-binding 7 | policyDefaults: 8 | namespace: policies 9 | placement: 10 | name: demo-placement-rule 11 | labelSelector: 12 | matchExpressions: 13 | - {key: "local-cluster", operator: In, values: ["true"]} 14 | remediationAction: inform 15 | severity: medium 16 | policies: 17 | - name: policy-app 18 | manifests: 19 | - path: policy1_deployment 20 | patches: 21 | - apiVersion: apps/v1 22 | kind: Deployment 23 | metadata: 24 | name: gitops-demo 25 | namespace: gitops-demo 26 | spec: 27 | template: 28 | spec: 29 | containers: 30 | - name: gitops-demo 31 | image: quay.io/centos7/httpd-24-centos7:2.4 32 | - name: policy-gatekeeper-require-labels 33 | consolidateManifests: false 34 | manifests: 35 | - path: policy2_gatekeeper/gatekeeper.yaml 36 | - name: policy-kyverno-require-labels 37 | manifests: 38 | - path: policy3_kyverno/kyverno.yaml 39 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/README.md: -------------------------------------------------------------------------------- 1 | # What are `PolicySets` 2 | 3 | A `PolicySet` is a collection of policies that can be placed together instead of having to manage 4 | their placement individually. The intent of a particular `PolicySet` can be specified in the 5 | `description` field and the `status` of the `PolicySet` reflects the status of the policies that it 6 | contains. 7 | 8 | # Stable `PolicySets` 9 | 10 | A stable `PolicySet` is a `PolicySet` that is tested and supported by Red Hat. These `PolicySets` 11 | work with little or no modifications. 12 | 13 | # Community `PolicySets` 14 | 15 | A community `PolicySet` is a `PolicySet` that has been contributed by the community. Community 16 | contributions provide value but could be specific to certain environments and may need modifications 17 | to apply the solution to your environment. Some community `PolicySets` provide samples showing how a 18 | problem can be solved for one user of the policy framework, which may or may not apply to other 19 | users. 20 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/acs-secure/input-sensor/acs-check-certificates.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: CertificatePolicy 3 | metadata: 4 | name: acs-bundle-certificates 5 | spec: 6 | namespaceSelector: 7 | include: ["policies"] 8 | remediationAction: inform 9 | severity: high 10 | minimumDuration: 720h 11 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/acs-secure/input-sensor/policy-acs-sync-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: '{{ copySecretData "stackrox" "admission-control-tls" }}' 3 | kind: Secret 4 | metadata: 5 | labels: 6 | certificate_key_name: admission-control-cert.pem 7 | name: admission-control-tls 8 | namespace: policies 9 | type: Opaque 10 | --- 11 | apiVersion: v1 12 | data: '{{ copySecretData "stackrox" "collector-tls" }}' 13 | kind: Secret 14 | metadata: 15 | labels: 16 | certificate_key_name: collector-cert.pem 17 | name: collector-tls 18 | namespace: policies 19 | type: Opaque 20 | --- 21 | apiVersion: v1 22 | data: '{{ copySecretData "stackrox" "sensor-tls" }}' 23 | kind: Secret 24 | metadata: 25 | labels: 26 | certificate_key_name: sensor-cert.pem 27 | name: sensor-tls 28 | namespace: policies 29 | type: Opaque 30 | --- 31 | apiVersion: v1 32 | data: 33 | acs-host: '{{ (lookup "route.openshift.io/v1" "Route" "stackrox" "central").spec.host }}:443' 34 | kind: ConfigMap 35 | metadata: 36 | name: acs-config 37 | namespace: policies 38 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/acs-secure/input-sensor/policy-advanced-managed-cluster-status.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: sensor 5 | namespace: stackrox 6 | status: 7 | conditions: 8 | - status: "True" 9 | type: Available 10 | --- 11 | apiVersion: apps/v1 12 | kind: DaemonSet 13 | metadata: 14 | name: collector 15 | namespace: stackrox 16 | status: 17 | numberMisscheduled: 0 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/acs-secure/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - ./policyGenerator.yaml 3 | commonLabels: 4 | open-cluster-management.io/policy-set: acs-sensors 5 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/images/policies-installed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/gatekeeper/images/policies-installed.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/images/policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/gatekeeper/images/policies.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/images/policysets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/gatekeeper/images/policysets.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/images/subscription.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/gatekeeper/images/subscription.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/images/topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/gatekeeper/images/topology.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/any-warn-deprecated-api-versions/src.rego: -------------------------------------------------------------------------------- 1 | # METADATA 2 | # title: Deprecated Deployment and DaemonSet API 3 | # description: |- 4 | # The `extensions/v1beta1 API` has been deprecated in favor of `apps/v1`. Later versions of Kubernetes 5 | # remove this API so to ensure that the Deployment or DaemonSet can be successfully deployed to the cluster, 6 | # the version for both of these resources must be `apps/v1`. 7 | # custom: 8 | # matchers: 9 | # kinds: 10 | # - apiGroups: 11 | # - apps 12 | # kinds: 13 | # - DaemonSet 14 | # - Deployment 15 | package any_warn_deprecated_api_versions 16 | 17 | policyID := "P0001" 18 | 19 | import data.lib.core 20 | 21 | warn[msg] { 22 | resources := ["DaemonSet", "Deployment"] 23 | core.apiVersion == "extensions/v1beta1" 24 | core.kind == resources[_] 25 | 26 | msg := core.format_with_id(sprintf("API extensions/v1beta1 for %s has been deprecated, use apps/v1 instead.", [core.kind]), policyID) 27 | } 28 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/any-warn-deprecated-api-versions/src_test.rego: -------------------------------------------------------------------------------- 1 | package any_warn_deprecated_api_versions 2 | 3 | test_matching { 4 | input := { 5 | "kind": "Deployment", 6 | "metadata": {"name": "test"}, 7 | "apiVersion": "extensions/v1beta1", 8 | } 9 | 10 | warns := warn with input as input 11 | count(warns) == 1 12 | } 13 | 14 | test_different_kind { 15 | input := { 16 | "kind": "test", 17 | "metadata": {"name": "test"}, 18 | "apiVersion": "extensions/v1beta1", 19 | } 20 | 21 | warns := warn with input as input 22 | count(warns) == 0 23 | } 24 | 25 | test_different_apiversion { 26 | input := { 27 | "kind": "Deployment", 28 | "metadata": {"name": "test"}, 29 | "apiVersion": "test", 30 | } 31 | 32 | warns := warn with input as input 33 | count(warns) == 0 34 | } 35 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/container-deny-added-caps/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: ContainerDenyAddedCaps 3 | metadata: 4 | name: containerdenyaddedcaps 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/container-deny-escalation/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: ContainerDenyEscalation 3 | metadata: 4 | name: containerdenyescalation 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/container-deny-latest-tag/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: ContainerDenyLatestTag 3 | metadata: 4 | name: containerdenylatesttag 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/container-deny-no-resource-constraints/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: ContainerDenyWithoutResourceConstraints 3 | metadata: 4 | name: containerdenywithoutresourceconstraints 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/container-deny-privileged-if-tenant/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: ContainerDenyPrivilegedIfTenant 3 | metadata: 4 | name: containerdenyprivilegediftenant 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | labelSelector: 19 | matchLabels: 20 | is-tenant: "true" 21 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/container-deny-privileged/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: ContainerDenyPrivileged 3 | metadata: 4 | name: containerdenyprivileged 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/lib/core.rego: -------------------------------------------------------------------------------- 1 | package lib.core 2 | 3 | default is_gatekeeper = false 4 | 5 | is_gatekeeper { 6 | has_field(input, "review") 7 | has_field(input.review, "object") 8 | } 9 | 10 | resource = input.review.object { 11 | is_gatekeeper 12 | } 13 | 14 | resource = input { 15 | not is_gatekeeper 16 | } 17 | 18 | format(msg) = {"msg": msg} 19 | 20 | format_with_id(msg, id) = msg_fmt { 21 | msg_fmt := { 22 | "msg": sprintf("%s: %s", [id, msg]), 23 | "details": {"policyID": id}, 24 | } 25 | } 26 | 27 | apiVersion = resource.apiVersion 28 | 29 | name = resource.metadata.name 30 | 31 | kind = resource.kind 32 | 33 | labels = resource.metadata.labels 34 | 35 | annotations = resource.metadata.annotations 36 | 37 | gv := split(apiVersion, "/") 38 | 39 | group = gv[0] { 40 | contains(apiVersion, "/") 41 | } 42 | 43 | group = "core" { 44 | not contains(apiVersion, "/") 45 | } 46 | 47 | version := gv[count(gv) - 1] 48 | 49 | has_field(obj, field) { 50 | not object.get(obj, field, "N_DEFINED") == "N_DEFINED" 51 | } 52 | 53 | missing_field(obj, field) { 54 | obj[field] == "" 55 | } 56 | 57 | missing_field(obj, field) { 58 | not has_field(obj, field) 59 | } 60 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/lib/pods.rego: -------------------------------------------------------------------------------- 1 | package lib.pods 2 | 3 | import data.lib.core 4 | 5 | default pod = false 6 | 7 | pod = core.resource.spec.template { 8 | pod_templates := ["daemonset", "deployment", "job", "replicaset", "replicationcontroller", "statefulset"] 9 | lower(core.kind) == pod_templates[_] 10 | } 11 | 12 | pod = core.resource { 13 | lower(core.kind) == "pod" 14 | } 15 | 16 | pod = core.resource.spec.jobTemplate.spec.template { 17 | lower(core.kind) == "cronjob" 18 | } 19 | 20 | containers[container] { 21 | keys = {"containers", "initContainers"} 22 | all_containers = [c | keys[k]; c = pod.spec[k][_]] 23 | container = all_containers[_] 24 | } 25 | 26 | volumes[volume] { 27 | volume = pod.spec.volumes[_] 28 | } 29 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/lib/psp.rego: -------------------------------------------------------------------------------- 1 | package lib.psps 2 | 3 | import data.lib.core 4 | 5 | # PodSecurityPolicies are not namespace scoped, so the default PSPs included 6 | # in managed Kubernetes offerings cannot be excluded using the normal 7 | # methods in Gatekeeper. 8 | is_exception { 9 | exceptions := { 10 | "gce.privileged", # GKE 11 | "gce.persistent-volume-binder", # GKE 12 | "gce.event-exporter", # GKE 13 | "gce.gke-metrics-agent", # GKE 14 | "gce.unprivileged-addon", # GKE 15 | "gce.fluentd-gke", # GKE 16 | "gce.fluentd-gcp", # GKE 17 | } 18 | 19 | core.name == exceptions[_] 20 | } 21 | 22 | psps[psp] { 23 | lower(core.kind) = "podsecuritypolicy" 24 | not is_exception 25 | psp = core.resource 26 | } 27 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/lib/psp_test.rego: -------------------------------------------------------------------------------- 1 | package lib.psps 2 | 3 | test_exception_pos { 4 | input := {"metadata": {"name": "gce.privileged"}} 5 | is_exception with input as input 6 | } 7 | 8 | test_exception_neg { 9 | input := {"metadata": {"name": "test"}} 10 | not is_exception with input as input 11 | } 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/lib/rbac.rego: -------------------------------------------------------------------------------- 1 | package lib.rbac 2 | 3 | import data.lib.core 4 | 5 | rule_has_verb(rule, verb) { 6 | verbs := ["*", lower(verb)] 7 | verbs[_] == lower(rule.verbs[_]) 8 | } 9 | 10 | rule_has_resource_type(rule, type) { 11 | types := ["*", lower(type)] 12 | types[_] == lower(rule.resources[_]) 13 | } 14 | 15 | rule_has_resource_name(rule, name) { 16 | name == rule.resourceNames[_] 17 | } 18 | 19 | rule_has_resource_name(rule, name) { 20 | core.missing_field(rule, "resourceNames") 21 | } 22 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/lib/security.rego: -------------------------------------------------------------------------------- 1 | package lib.security 2 | 3 | dropped_capability(container, cap) { 4 | lower(container.securityContext.capabilities.drop[_]) == lower(cap) 5 | } 6 | 7 | dropped_capability(psp, cap) { 8 | lower(psp.spec.requiredDropCapabilities[_]) == lower(cap) 9 | } 10 | 11 | added_capability(container, cap) { 12 | lower(container.securityContext.capabilities.add[_]) == lower(cap) 13 | } 14 | 15 | added_capability(psp, cap) { 16 | lower(psp.spec.allowedCapabilities[_]) == lower(cap) 17 | } 18 | 19 | added_capability(psp, cap) { 20 | lower(psp.spec.defaultAddCapabilities[_]) == lower(cap) 21 | } 22 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/pod-deny-host-alias/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: PodDenyHostAlias 3 | metadata: 4 | name: poddenyhostalias 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/pod-deny-host-ipc/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: PodDenyHostIpc 3 | metadata: 4 | name: poddenyhostipc 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/pod-deny-host-network/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: PodDenyHostNetwork 3 | metadata: 4 | name: poddenyhostnetwork 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/pod-deny-host-pid/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: PodDenyHostPid 3 | metadata: 4 | name: poddenyhostpid 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/pod-deny-without-runasnonroot/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: PodDenyWithoutRunasnonroot 3 | metadata: 4 | name: poddenywithoutrunasnonroot 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: 9 | - "" 10 | kinds: 11 | - Pod 12 | - apiGroups: 13 | - apps 14 | kinds: 15 | - DaemonSet 16 | - Deployment 17 | - StatefulSet 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/input/suspicious_self_subject_review/constraint.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: SuspiciousSelfSubjectReview 3 | metadata: 4 | name: detect-automated-identities-querying-permissions 5 | spec: 6 | enforcementAction: deny 7 | match: 8 | kinds: 9 | - apiGroups: ["authorization.k8s.io"] 10 | kinds: 11 | - SelfSubjectRulesReview 12 | - SelfSubjectAccessReview 13 | parameters: 14 | # Allow requests from certain users, e.g. "system:serviceaccount:my-priv-ns:my-priv-sa" or "system:node:nodename" 15 | allowedUsers: [] 16 | # Allow requests from users in certain groups, e.g. "system:nodes", "system:serviceaccounts:privileged-ns" 17 | allowedGroups: [] 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - policyGenerator.yaml 3 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/gatekeeper/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: policies-gatekeeper 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/affinity/add_node_affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-node-affinity 5 | annotations: 6 | policies.kyverno.io/title: Add Node Affinity 7 | policies.kyverno.io/category: Other 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Deployment 10 | kyverno.io/kyverno-version: 1.5.1 11 | kyverno.io/kubernetes-version: "1.21" 12 | policies.kyverno.io/description: >- 13 | Node affinity, similar to node selection, is a way to specify which node(s) on which Pods will be scheduled 14 | but based on more complex conditions. This policy will add node affinity to a Deployment and if one already 15 | exists an expression will be added to it. 16 | spec: 17 | background: false 18 | rules: 19 | - name: add-node-affinity-deployment 20 | match: 21 | resources: 22 | kinds: 23 | - Deployment 24 | mutate: 25 | patchesJson6902: |- 26 | - path: "/spec/template/spec/affinity/nodeAffinity/requiredDuringSchedulingIgnoredDuringExecution/nodeSelectorTerms/-1/matchExpressions/-1" 27 | op: add 28 | value: {"key":"zone_weight","operator":"Lt","values":["400"]} 29 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/affinity/spread_pods_across_topology.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: spread-pods 5 | annotations: 6 | policies.kyverno.io/title: Spread Pods Across Nodes 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/subject: Deployment, Pod 9 | policies.kyverno.io/description: >- 10 | Deployments to a Kubernetes cluster with multiple availability zones often need to 11 | distribute those replicas to align with those zones to ensure site-level failures 12 | do not impact availability. This policy matches Deployments with the label 13 | `distributed=required` and mutates them to spread Pods across zones. 14 | spec: 15 | rules: 16 | - name: spread-pods-across-nodes 17 | # Matches any Deployment with the label `distributed=required` 18 | match: 19 | resources: 20 | kinds: 21 | - Deployment 22 | selector: 23 | matchLabels: 24 | distributed: required 25 | # Mutates the incoming Deployment. 26 | mutate: 27 | patchStrategicMerge: 28 | spec: 29 | template: 30 | spec: 31 | # Adds the topologySpreadConstraints field if non-existent in the request. 32 | +(topologySpreadConstraints): 33 | - maxSkew: 1 34 | topologyKey: zone 35 | whenUnsatisfiable: DoNotSchedule 36 | labelSelector: 37 | matchLabels: 38 | distributed: required -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/deployments/add_volume_deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-volume 5 | annotations: 6 | policies.kyverno.io/title: Add Volume to Deployment 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/subject: Deployment, Volume 9 | policies.kyverno.io/description: >- 10 | Some Kubernetes applications like HashiCorp Vault must perform some modifications 11 | to resources in order to invoke their specific functionality. Often times, that functionality 12 | is controlled by the presence of a label or specific annotation. This policy, based on HashiCorp 13 | Vault, adds a volume and volumeMount to a Deployment if there is an annotation called 14 | "vault.k8s.corp.net/inject=enabled" present. 15 | spec: 16 | rules: 17 | - name: add-volume 18 | match: 19 | resources: 20 | kinds: 21 | - Deployment 22 | preconditions: 23 | any: 24 | - key: "{{request.object.spec.template.metadata.annotations.\"vault.k8s.corp.net/inject\"}}" 25 | operator: Equals 26 | value: "enabled" 27 | mutate: 28 | patchesJson6902: |- 29 | - op: add 30 | path: /spec/template/spec/volumes 31 | value: [{"name": "vault-secret","emptyDir": {"medium": "Memory"}}] 32 | - op: add 33 | path: /spec/template/spec/containers/0/volumeMounts 34 | value: [{"mountPath": "/secret","name": "vault-secret"}] -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/deployments/mutate-large-termination-gps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: mutate-termination-grace-period-seconds 5 | annotations: 6 | policies.kyverno.io/title: Mutate termination Grace Periods Seconds 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.2 10 | kyverno.io/kubernetes-version: "1.23" 11 | policies.kyverno.io/subject: Pod 12 | policies.kyverno.io/description: >- 13 | Pods with large terminationGracePeriodSeconds (tGPS) might prevent cluster nodes 14 | from getting drained, ultimately making the whole cluster unstable. This policy 15 | mutates all incoming Pods to set their tGPS under 50s. If the user creates a pod 16 | without specifying tGPS, then the Kubernetes default of 30s is maintained. 17 | spec: 18 | background: false 19 | rules: 20 | - name: mutate-termination-grace-period-seconds 21 | match: 22 | resources: 23 | kinds: 24 | - Pod 25 | preconditions: 26 | all: 27 | - key: "{{request.object.spec.terminationGracePeriodSeconds || `0` }}" 28 | operator: GreaterThan 29 | value: 50 # maximum tGPS allowed by cluster admin 30 | mutate: 31 | patchStrategicMerge: 32 | spec: 33 | terminationGracePeriodSeconds: 50 34 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/limitsrequests/require_requests_limits.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: require-requests-limits 5 | annotations: 6 | policies.kyverno.io/title: Require Limits and Requests 7 | policies.kyverno.io/category: Best Practices 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Pod 10 | policies.kyverno.io/description: >- 11 | As application workloads share cluster resources, it is important to limit resources 12 | requested and consumed by each Pod. It is recommended to require resource requests and 13 | limits per Pod, especially for memory and CPU. If a Namespace level request or limit is specified, 14 | defaults will automatically be applied to each Pod based on the LimitRange configuration. 15 | This policy validates that all containers have something specified for memory and CPU 16 | requests and memory limits. 17 | spec: 18 | validationFailureAction: audit 19 | background: true 20 | rules: 21 | - name: validate-resources 22 | match: 23 | resources: 24 | kinds: 25 | - Pod 26 | validate: 27 | message: "CPU and memory resource requests and limits are required." 28 | pattern: 29 | spec: 30 | containers: 31 | - resources: 32 | requests: 33 | memory: "?*" 34 | cpu: "?*" 35 | limits: 36 | memory: "?*" 37 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/poddisruptionbudget/create_default_pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: create-default-pdb 5 | annotations: 6 | policies.kyverno.io/title: Add Pod Disruption Budget 7 | policies.kyverno.io/category: Sample 8 | kyverno.io/kyverno-version: 1.6.2 9 | policies.kyverno.io/minversion: 1.6.0 10 | policies.kyverno.io/subject: Deployment 11 | policies.kyverno.io/description: >- 12 | A PodDisruptionBudget limits the number of Pods of a replicated application that 13 | are down simultaneously from voluntary disruptions. For example, a quorum-based 14 | application would like to ensure that the number of replicas running is never brought 15 | below the number needed for a quorum. As an application owner, you can create a PodDisruptionBudget (PDB) 16 | for each application. This policy will create a PDB resource whenever a new Deployment is created. 17 | spec: 18 | rules: 19 | - name: create-default-pdb 20 | match: 21 | any: 22 | - resources: 23 | kinds: 24 | - Deployment 25 | generate: 26 | apiVersion: policy/v1 27 | kind: PodDisruptionBudget 28 | name: "{{request.object.metadata.name}}-default-pdb" 29 | namespace: "{{request.object.metadata.namespace}}" 30 | data: 31 | spec: 32 | minAvailable: 1 33 | selector: 34 | matchLabels: 35 | "{{request.object.metadata.labels}}" -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/require_deployments_have_multiple_replicas/deployment-musthaverolling-strategy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: deployment-must-have-rolling 5 | annotations: 6 | policies.kyverno.io/title: Require Multiple Replicas 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Deployment 10 | policies.kyverno.io/description: >- 11 | Deployments with a single replica cannot be highly available and thus the application 12 | may suffer downtime if that one replica goes down. This policy validates that Deployments 13 | have more than one replica. 14 | spec: 15 | validationFailureAction: audit 16 | background: true 17 | rules: 18 | - name: deployment-has-multiple-replicas 19 | match: 20 | resources: 21 | kinds: 22 | - Deployment 23 | validate: 24 | message: "Deployments should have RollingUpdate strategy" 25 | pattern: 26 | spec: 27 | strategy: 28 | type: RollingUpdate 29 | rollingUpdate: 30 | maxSurge: 1 31 | maxUnavailable: 0 32 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/require_deployments_have_multiple_replicas/require_deployments_have_multiple_replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: deployment-has-multiple-replicas 5 | annotations: 6 | policies.kyverno.io/title: Require Multiple Replicas 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Deployment 10 | policies.kyverno.io/description: >- 11 | Deployments with a single replica cannot be highly available and thus the application 12 | may suffer downtime if that one replica goes down. This policy validates that Deployments 13 | have more than one replica. 14 | spec: 15 | validationFailureAction: audit 16 | background: true 17 | rules: 18 | - name: deployment-has-multiple-replicas 19 | match: 20 | resources: 21 | kinds: 22 | - Deployment 23 | validate: 24 | message: "Deployments should have more than one replica to ensure availability." 25 | pattern: 26 | spec: 27 | replicas: ">1" -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/require_probes/ensure_probes_different.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: validate-probes 5 | annotations: 6 | pod-policies.kyverno.io/autogen-controllers: none 7 | policies.kyverno.io/title: Validate Probes 8 | policies.kyverno.io/category: Sample 9 | policies.kyverno.io/severity: medium 10 | policies.kyverno.io/minversion: 1.3.6 11 | policies.kyverno.io/subject: Pod 12 | policies.kyverno.io/description: >- 13 | Liveness and readiness probes accomplish different goals, and setting both to the same 14 | is an anti-pattern and often results in app problems in the future. This policy 15 | checks that liveness and readiness probes are not equal. 16 | spec: 17 | validationFailureAction: audit 18 | background: false 19 | rules: 20 | - name: validate-probes 21 | match: 22 | resources: 23 | kinds: 24 | - Deployment 25 | - DaemonSet 26 | - StatefulSet 27 | validate: 28 | message: "Liveness and readiness probes cannot be the same." 29 | deny: 30 | conditions: 31 | any: 32 | - key: "{{ request.object.spec.template.spec.containers[?readinessProbe==livenessProbe] | length(@) }}" 33 | operator: GreaterThan 34 | value: "0" -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/require_probes/require_probes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: require-pod-probes 5 | annotations: 6 | pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet 7 | policies.kyverno.io/title: Require Pod Probes 8 | policies.kyverno.io/category: Best Practices 9 | policies.kyverno.io/severity: medium 10 | policies.kyverno.io/subject: Pod 11 | policies.kyverno.io/description: >- 12 | Liveness and readiness probes need to be configured to correctly manage a Pod's 13 | lifecycle during deployments, restarts, and upgrades. For each Pod, a periodic 14 | `livenessProbe` is performed by the kubelet to determine if the Pod's containers 15 | are running or need to be restarted. A `readinessProbe` is used by Services 16 | and Deployments to determine if the Pod is ready to receive network traffic. 17 | This policy validates that all containers have liveness and readiness probes by 18 | ensuring the `periodSeconds` field is greater than zero. 19 | spec: 20 | validationFailureAction: audit 21 | background: true 22 | rules: 23 | - name: validate-livenessProbe-readinessProbe 24 | match: 25 | resources: 26 | kinds: 27 | - Pod 28 | validate: 29 | message: "Liveness and readiness probes are required." 30 | pattern: 31 | spec: 32 | containers: 33 | - livenessProbe: 34 | periodSeconds: ">0" 35 | readinessProbe: 36 | periodSeconds: ">0" -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/resource-exhaustion/disallow-self-provisioner/disallow-self-provisioner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-self-provisioners 5 | annotations: 6 | policies.kyverno.io/description: >- 7 | The policy does not allow ClusterRoleBinding resources to associate to the self-provisioner ClusterRole. 8 | ClusterRoleBindings with a reference to the self-provisioner ClusterRole must be deleted. Thereby, 9 | disallowing regular users to provision their own namespaces. 10 | spec: 11 | background: false 12 | validationFailureAction: audit 13 | rules: 14 | - name: Disallow self-provisioner ClusterRole 15 | match: 16 | resources: 17 | kinds: 18 | - ClusterRoleBinding 19 | validate: 20 | message: "The self-provisioner ClusterRole can not be referenced in a ClusterRoleBinding!" 21 | deny: 22 | conditions: 23 | - key: "{{ request.object.roleRef.name }}" 24 | operator: Equals 25 | value: "self-provisioner" 26 | - key: "{{ request.operation }}" 27 | operator: In 28 | value: 29 | - CREATE 30 | - UPDATE 31 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/routes/check-routes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: check-routes 5 | annotations: 6 | policies.kyverno.io/title: Require TLS routes in OpenShift 7 | policies.kyverno.io/category: OpenShift 8 | policies.kyverno.io/severity: high 9 | kyverno.io/kyverno-version: 1.6.0 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.20" 12 | policies.kyverno.io/subject: Route 13 | policies.kyverno.io/description: |- 14 | HTTP traffic is not encrypted and hence insecure. This policy prevents configuration of OpenShift HTTP routes. 15 | spec: 16 | validationFailureAction: enforce 17 | background: true 18 | rules: 19 | - name: require-tls-routes 20 | match: 21 | any: 22 | - resources: 23 | kinds: 24 | - route.openshift.io/v1/Route 25 | preconditions: 26 | all: 27 | - key: "{{ request.operation }}" 28 | operator: NotEquals 29 | value: ["DELETE"] 30 | validate: 31 | message: >- 32 | HTTP routes are not allowed. Configure TLS for secure routes. 33 | deny: 34 | conditions: 35 | all: 36 | - key: "{{ keys(request.object.spec) | contains(@, 'tls') }}" 37 | operator: Equals 38 | value: false 39 | 40 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/input/validate_git/validategit.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: verify-git-sources 5 | annotations: 6 | policies.kyverno.io/title: Verify Git Sources 7 | policies.kyverno.io/category: Git 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.2 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: GitRepository, Bucket, HelmRepository, ImageRepository 13 | policies.kyverno.io/description: >- 14 | In a production environment, 15 | it may be desired to restrict these to only known sources to prevent 16 | accessing outside sources. This policy verifies that each of the Git 17 | sources comes from a trusted location. 18 | spec: 19 | validationFailureAction: audit 20 | rules: 21 | - name: validate-github-repositories 22 | match: 23 | any: 24 | - resources: 25 | kinds: 26 | - argoproj.io/v1alpha1/Application 27 | - argoproj.io/v1alpha1/ApplicationSet 28 | validate: 29 | message: ".spec.url must be from a repository within the myorg organization." 30 | pattern: 31 | spec: 32 | url: "https://github.com/ch-stark/?* | ssh://git@github.com:ch-stark/?*" 33 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - policyGenerator.yaml 3 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/best-practises-for-apps/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: kyverno-app-placement 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/images/applications.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/kyverno/images/applications.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/images/policies-installed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/kyverno/images/policies-installed.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/images/policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/kyverno/images/policies.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/images/policysets.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/kyverno/images/policysets.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/images/subscription.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/kyverno/images/subscription.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/images/topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stolostron/policy-collection/9f34a292e86fd5d8fab133bed3e9e8e1d45764ee/policygenerator/policy-sets/community/kyverno/images/topology.png -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/addlabelstotenant/add-labels-to-bluetenant.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-labelsnamespace-blueteam 5 | annotations: 6 | policies.kyverno.io/title: Add labels to blueteam 7 | policies.kyverno.io/category: Multi-Tenancy 8 | policies.kyverno.io/subject: ClusterRoleBinding 9 | policies.kyverno.io/severity: medium 10 | kyverno.io/kyverno-version: 1.6.2 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | Add labels to blueteam. see for detailled description https://cloud.redhat.com/blog/managing-applications-via-a-gitops-control-plane 15 | This demonstrates the ability of a development team to manage OCP objects in multiple namespaces from one Git repo. 16 | spec: 17 | background: false 18 | rules: 19 | - name: add-labelsnamespace-blueteam 20 | match: 21 | any: 22 | - resources: 23 | kinds: 24 | - Namespace 25 | subjects: 26 | - kind: Group 27 | name: blue-sre-group 28 | preconditions: 29 | any: 30 | - key: "{{request.operation}}" 31 | operator: In 32 | value: 33 | - CREATE 34 | - UPDATE 35 | mutate: 36 | patchStrategicMerge: 37 | metadata: 38 | labels: 39 | argocd.argoproj.io/managed-by: blueteam 40 | 41 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/addlabelstotenant/add-labels-to-redtenant.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-labelsnamespace-redteam 5 | annotations: 6 | policies.kyverno.io/title: Add labels to redteam 7 | policies.kyverno.io/category: Multi-Tenancy 8 | policies.kyverno.io/subject: ClusterRoleBinding 9 | policies.kyverno.io/severity: medium 10 | kyverno.io/kyverno-version: 1.6.2 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | Add labels to redteam 15 | spec: 16 | background: false 17 | rules: 18 | - name: add-labelsnamespace-redteam 19 | match: 20 | any: 21 | - resources: 22 | kinds: 23 | - Namespace 24 | subjects: 25 | - kind: Group 26 | name: red-sre-group 27 | preconditions: 28 | any: 29 | - key: "{{request.operation}}" 30 | operator: In 31 | value: 32 | - CREATE 33 | - UPDATE 34 | mutate: 35 | patchStrategicMerge: 36 | metadata: 37 | labels: 38 | argocd.argoproj.io/managed-by: redteam 39 | 40 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/disallowplacementrules/disallow-placementRules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-placementrules 5 | spec: 6 | validationFailureAction: audit 7 | background: false 8 | rules: 9 | - name: disallow-placementrules 10 | match: 11 | resources: 12 | kinds: 13 | - apps.open-cluster-management.io/v1/PlacementRule 14 | validate: 15 | message: "Using Placement Rules not allowed" 16 | deny: {} 17 | 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/generateManagedClusterSetBinding/generateManagedClusterSetBindingblueteam-hub.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: add-managedclustersetbinding-blue-sre-group 6 | annotations: 7 | policies.kyverno.io/title: Add ArgoCD Rolebinding 8 | policies.kyverno.io/category: Multi-Tenancy 9 | policies.kyverno.io/subject: ClusterRoleBinding 10 | policies.kyverno.io/description: >- 11 | When a user from Team1 creates a NS a ClusterRoleBinding gets created so 12 | it can access ArgoCD. 13 | spec: 14 | background: false 15 | rules: 16 | - name: managedclustersetbinding-blue-sre-group 17 | match: 18 | any: 19 | - resources: 20 | kinds: 21 | - Namespace 22 | subjects: 23 | - kind: Group 24 | name: "blue-sre-group" 25 | preconditions: 26 | any: 27 | - key: "{{request.operation}}" 28 | operator: In 29 | value: 30 | - CREATE 31 | - UPDATE 32 | generate: 33 | apiVersion: cluster.open-cluster-management.io/v1beta2 34 | kind: ManagedClusterSetBinding 35 | name: blueteam 36 | namespace: "{{request.object.metadata.name}}" 37 | synchronize: true 38 | data: 39 | spec: 40 | clusterSet: blueteam 41 | 42 | 43 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/generateManagedClusterSetBinding/generateManagedClusterSetBindingredteam-hub.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: add-managedclustersetbinding-red-sre-group 6 | annotations: 7 | policies.kyverno.io/title: Add ArgoCD Rolebinding 8 | policies.kyverno.io/category: Multi-Tenancy 9 | policies.kyverno.io/subject: ClusterRoleBinding 10 | policies.kyverno.io/description: >- 11 | When a user from Team1 creates a NS a ClusterRoleBinding gets created so 12 | it can access ArgoCD. 13 | spec: 14 | background: false 15 | rules: 16 | - name: generate-managedclustersetbinding-red 17 | match: 18 | any: 19 | - resources: 20 | kinds: 21 | - Namespace 22 | subjects: 23 | - kind: Group 24 | name: "red-sre-group" 25 | preconditions: 26 | any: 27 | - key: "{{request.operation}}" 28 | operator: In 29 | value: 30 | - CREATE 31 | generate: 32 | apiVersion: cluster.open-cluster-management.io/v1beta2 33 | kind: ManagedClusterSetBinding 34 | name: redteam 35 | namespace: "{{request.object.metadata.name}}" 36 | synchronize: true 37 | data: 38 | spec: 39 | clusterSet: redteam 40 | 41 | 42 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/generatePlacementRules/generatePlacementblueteam-hub.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: add-placement-blue-sre-group 6 | annotations: 7 | policies.kyverno.io/title: Add ArgoCD Rolebinding 8 | policies.kyverno.io/category: Multi-Tenancy 9 | policies.kyverno.io/subject: ClusterRoleBinding 10 | policies.kyverno.io/description: >- 11 | When a user from Team1 creates a NS a ClusterRoleBinding gets created so 12 | it can access ArgoCD. 13 | spec: 14 | background: false 15 | rules: 16 | - name: generate-placement-blue 17 | match: 18 | any: 19 | - resources: 20 | kinds: 21 | - Namespace 22 | subjects: 23 | - kind: Group 24 | name: "blue-sre-group" 25 | preconditions: 26 | any: 27 | - key: "{{request.operation}}" 28 | operator: In 29 | value: 30 | - CREATE 31 | generate: 32 | apiVersion: cluster.open-cluster-management.io/v1beta1 33 | kind: Placement 34 | name: blueteam 35 | namespace: "{{request.object.metadata.name}}" 36 | synchronize: true 37 | data: 38 | spec: 39 | clusterSets: 40 | - blueteam 41 | predicates: 42 | - requiredClusterSelector: 43 | labelSelector: 44 | matchLabels: 45 | cluster.open-cluster-management.io/placement: blueplacement 46 | 47 | 48 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/generatePlacementRules/generatePlacementredteam-hub.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: add-placement-red-sre-group 6 | annotations: 7 | policies.kyverno.io/title: Add ArgoCD Rolebinding 8 | policies.kyverno.io/category: Multi-Tenancy 9 | policies.kyverno.io/subject: ClusterRoleBinding 10 | policies.kyverno.io/description: >- 11 | When a user from Team1 creates a NS a ClusterRoleBinding gets created so 12 | it can access ArgoCD. 13 | spec: 14 | background: false 15 | rules: 16 | - name: generate-placement-red 17 | match: 18 | any: 19 | - resources: 20 | kinds: 21 | - Namespace 22 | subjects: 23 | - kind: Group 24 | name: "red-sre-group" 25 | preconditions: 26 | any: 27 | - key: "{{request.operation}}" 28 | operator: In 29 | value: 30 | - CREATE 31 | generate: 32 | apiVersion: cluster.open-cluster-management.io/v1beta1 33 | kind: Placement 34 | name: redteam 35 | namespace: "{{request.object.metadata.name}}" 36 | synchronize: true 37 | data: 38 | spec: 39 | clusterSets: 40 | - redteam 41 | 42 | 43 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/generateargocdpersmissions/generate-argocd-permissions-blueteam-spoke.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: add-argocd-clusterrolebinding-blue 6 | annotations: 7 | policies.kyverno.io/title: Add ArgoCD Rolebinding 8 | policies.kyverno.io/category: Multi-Tenancy 9 | policies.kyverno.io/subject: ClusterRoleBinding 10 | policies.kyverno.io/description: >- 11 | When a user from Team1 creates a NS a ClusterRoleBinding gets created so 12 | it can access ArgoCD. 13 | spec: 14 | background: false 15 | rules: 16 | - name: generate-clusterrolebinding-blue 17 | match: 18 | any: 19 | - resources: 20 | kinds: 21 | - Namespace 22 | subjects: 23 | - kind: Group 24 | name: "blue-sre-group" 25 | preconditions: 26 | any: 27 | - key: "{{request.operation}}" 28 | operator: In 29 | value: 30 | - CREATE 31 | generate: 32 | kind: RoleBinding 33 | name: argocdaccess-blueteam 34 | namespace: "{{request.object.metadata.name}}" 35 | synchronize: true 36 | data: 37 | roleRef: 38 | apiGroup: rbac.authorization.k8s.io 39 | kind: ClusterRole 40 | name: admin 41 | subjects: 42 | - apiGroup: rbac.authorization.k8s.io 43 | kind: Group 44 | name: blue-sre-group 45 | 46 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/generateargocdpersmissions/generate-argocd-permissions-redteam-spoke.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-argocd-clusterrolebinding-red 5 | annotations: 6 | policies.kyverno.io/title: Add ArgoCD Rolebinding 7 | policies.kyverno.io/category: Multi-Tenancy 8 | policies.kyverno.io/subject: ClusterRoleBinding 9 | policies.kyverno.io/description: test 10 | spec: 11 | background: false 12 | rules: 13 | - name: generate-clusterrolebinding-red 14 | match: 15 | any: 16 | - resources: 17 | kinds: 18 | - Namespace 19 | subjects: 20 | - kind: Group 21 | name: "red-sre-group" 22 | preconditions: 23 | any: 24 | - key: "{{request.operation}}" 25 | operator: In 26 | value: 27 | - CREATE 28 | generate: 29 | kind: RoleBinding 30 | name: redteamargocdaccess 31 | namespace: "{{request.object.metadata.name}}" 32 | synchronize: true 33 | data: 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: admin 38 | subjects: 39 | - apiGroup: rbac.authorization.k8s.io 40 | kind: Group 41 | name: red-sre-group 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/preventupdatesappproject/application-prevent-updates-project-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: application-prevent-updates-project-all 5 | annotations: 6 | policies.kyverno.io/title: Prevent Updates to Project 7 | policies.kyverno.io/category: Argo 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.2 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Application 13 | policies.kyverno.io/description: >- 14 | This policy prevents updates to the project field after an Application is created. 15 | spec: 16 | validationFailureAction: audit 17 | background: false 18 | rules: 19 | - name: project-updates 20 | match: 21 | any: 22 | - resources: 23 | kinds: 24 | - argoproj.io/v1alpha1/Application 25 | preconditions: 26 | any: 27 | - key: "{{request.operation}}" 28 | operator: In 29 | value: 30 | - CREATE 31 | - UPDATE 32 | validate: 33 | message: "The spec.project cannot be changed once the Application is created." 34 | deny: 35 | conditions: 36 | any: 37 | - key: "{{request.object.spec.project}}" 38 | operator: NotEquals 39 | value: "{{request.oldObject.spec.project}}" 40 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/restrictions/restrict-blueteam-destination-spoke.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-blueteam-destination 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-destination-blueteam 10 | match: 11 | resources: 12 | kinds: 13 | - argoproj.io/v1alpha1/ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `blue-sre-group` member, creation of ArgoCD Application must be against the `blueteam` namespace pattern" 26 | pattern: 27 | spec: 28 | template: 29 | spec: 30 | destination: 31 | namespace: blueteam* 32 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/restrictions/restrict-blueteam-to-its-appproject-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-blueteam-to-its-appproject 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - argoproj.io/v1alpha1/ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `blue-sre-group` member, creation of ArgoCD Application must be against the `blueproject` AppProject." 26 | pattern: 27 | spec: 28 | template: 29 | spec: 30 | project: blueteam 31 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/restrictions/restrict-blueteam-to-its-placement-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-blueteam-to-its-placement 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - argoproj.io/v1alpha1/ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `blue-sre-group` member, creation of ArgoCD Application must be against blue-placement" 26 | pattern: 27 | spec: 28 | predicates: 29 | requiredClusterSelector: 30 | labelSelector: 31 | matchLabels: 32 | cluster.open-cluster-management.io/placement: blue-placement 33 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/restrictions/restrict-redteam-destination-spoke.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-redteam-destination 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - argoproj.io/v1alpha1/ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `red-sre-group` member, creation of ArgoCD Application must be against the `redteam` namespace pattern" 26 | pattern: 27 | spec: 28 | template: 29 | spec: 30 | destination: 31 | namespace: redteam* 32 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/restrictions/restrict-redteam-to-its-appproject-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-redteam-to-its-appproject 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - argoproj.io/v1alpha1/ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | validate: 18 | message: "For any `red-sre-group` member, creation of ArgoCD Application must be against the `redteam` AppProject." 19 | pattern: 20 | spec: 21 | template: 22 | spec: 23 | project: redteam 24 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/restrictions/restrict-redteam-to-its-placement-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-redteam-to-its-placement 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - argoproj.io/v1alpha1/ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `red-sre-group` member, creation of ArgoCD ApplicationSet must point to red-placement" 26 | pattern: 27 | spec: 28 | generators: 29 | - clusterDecisionResource: 30 | configMapRef: acm-placement 31 | labelSelector: 32 | matchLabels: 33 | cluster.open-cluster-management.io/placement: red-placement 34 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/validatens/validate-ns-bluesre-spoke.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: team-validate-blue-ns-schema 5 | annotations: 6 | policies.kyverno.io/title: Validate Team Namespace Schema 7 | policies.kyverno.io/category: Multitenancy, Multicluster-Management 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.7.0 10 | policies.kyverno.io/minversion: 1.7.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Group 13 | policies.kyverno.io/description: >- 14 | Denies the creation of a namespace is the name of the namespace does 15 | not follow a specific naming defined by the cluster admins. 16 | spec: 17 | validationFailureAction: enforce 18 | background: false 19 | rules: 20 | - name: namespace-name 21 | match: 22 | any: 23 | - resources: 24 | kinds: 25 | - Namespace 26 | - ProjectRequest 27 | - Project 28 | subjects: 29 | - kind: Group 30 | name: "blue-sre-group" 31 | validate: 32 | message: The only names approved for your namespaces are the ones starting by blueteam* 33 | foreach: 34 | - list: "request.userInfo.groups[?contains(@,':') == `false`][].{name:@}" 35 | elementScope: true 36 | deny: 37 | conditions: 38 | any: 39 | - key: "{{request.object.metadata.name}}" 40 | operator: AnyNotIn 41 | value: blueteam* 42 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/validateplacement/preventupdates-appproject-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: application-prevent-updates-project 5 | annotations: 6 | policies.kyverno.io/title: Prevent Updates to Project 7 | policies.kyverno.io/category: Argo 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.2 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Application 13 | policies.kyverno.io/description: >- 14 | This policy prevents updates to the project field after an Application is created. 15 | spec: 16 | validationFailureAction: audit 17 | background: false 18 | rules: 19 | - name: project-updates 20 | match: 21 | any: 22 | - resources: 23 | kinds: 24 | - Application 25 | preconditions: 26 | all: 27 | - key: "{{ request.operation }}" 28 | operator: Equals 29 | value: UPDATE 30 | validate: 31 | message: "The spec.project cannot be changed once the Application is created." 32 | deny: 33 | conditions: 34 | any: 35 | - key: "{{request.object.spec.project}}" 36 | operator: NotEquals 37 | value: "{{request.oldObject.spec.project}}" 38 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/validateplacement/validateplacementblueteam-hub.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-placement-blueteam 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - Placement 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | validate: 18 | message: "For any `blue-sre-group` member, creation of Placement must be bound to its clusterset" 19 | pattern: 20 | spec: 21 | clusterSets: 22 | - blueteam -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/input/validateplacement/validateplacementredteam-hub.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-placement-redteam 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - Placement 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | validate: 18 | message: "For any `red-sre-group` member, creation of Placement must be bound to its clusterset" 19 | pattern: 20 | spec: 21 | clusterSets: 22 | - redteam -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/kustomization.yaml: -------------------------------------------------------------------------------- 1 | generators: 2 | - policyGenerator.yaml 3 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/multitenancy/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: kyverno-tenancy-placement 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/authorization/host-namespaces/disallow-host-ipc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-host-ipc 5 | annotations: 6 | policies.kyverno.io/description: Sharing the host's IPC namespace allows 7 | the container process to communicate with processes on the host. To avoid pod container from 8 | having visibility to host process space, validate that 'hostIPC' is set to 'false'. 9 | spec: 10 | background: false 11 | validationFailureAction: audit 12 | rules: 13 | - name: validate-hostIPC-scc 14 | match: 15 | resources: 16 | kinds: 17 | - SecurityContextConstraints 18 | preconditions: 19 | - key: "{{ request.object.metadata.name }}" 20 | operator: NotIn 21 | value: ["privileged", "hostaccess"] 22 | validate: 23 | message: "Creating SCC that allows host IPC namespaces in custom created scc is forbidden" 24 | pattern: 25 | =(allowHostIPC): "false" 26 | - name: validate-hostIPC-pod 27 | match: 28 | resources: 29 | kinds: 30 | - Pod 31 | exclude: 32 | resources: 33 | namespaces: 34 | - "openshift-*" 35 | - "openshift*" 36 | - "kube-*" 37 | - "kube*" 38 | validate: 39 | message: "Use of host IPC namespaces is not allowed in pods" 40 | pattern: 41 | spec: 42 | =(hostIPC): "false" 43 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/authorization/host-namespaces/disallow-host-pid.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-host-pid 5 | annotations: 6 | policies.kyverno.io/description: Sharing the host's PID namespace allows process visibility 7 | on the host, potentially exposing process information. To avoid pod container from 8 | having visibility to host process space, validate that 'hostPID' is set to 'false'. 9 | spec: 10 | background: false 11 | validationFailureAction: audit 12 | rules: 13 | - name: validate-hostPID-scc 14 | match: 15 | resources: 16 | kinds: 17 | - SecurityContextConstraints 18 | preconditions: 19 | - key: "{{ request.object.metadata.name }}" 20 | operator: NotIn 21 | value: ["privileged", "hostaccess", "node-exporter"] 22 | validate: 23 | message: "Creating SCC that allows host PID namespaces in custom created scc is forbidden" 24 | pattern: 25 | =(allowHostPID): "false" 26 | - name: validate-hostIPC-pod 27 | match: 28 | resources: 29 | kinds: 30 | - Pod 31 | exclude: 32 | resources: 33 | namespaces: 34 | - "openshift-*" 35 | - "openshift*" 36 | - "kube-*" 37 | - "kube*" 38 | validate: 39 | message: "Use of host PID namespaces is not allowed in pods" 40 | pattern: 41 | spec: 42 | =(hostPID): "false" 43 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/authorization/protect-default-scc/protect-default-scc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: protect-default-scc 5 | spec: 6 | background: false 7 | validationFailureAction: audit 8 | rules: 9 | - name: Prevent users from modifying or deleting default scc 10 | match: 11 | resources: 12 | kinds: 13 | - SecurityContextConstraints 14 | exclude: 15 | resources: 16 | kinds: 17 | - SecurityContextConstraints 18 | name: "custom-scc-*" 19 | validate: 20 | message: "Modifying or deleting default scc is forbidden. You can create custom scc. The custom scc name syntax should begin with 'custom-scc-'" 21 | deny: {} 22 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/disallow-host-namespaces/disallow-host-namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-host-namespaces 5 | annotations: 6 | policies.kyverno.io/title: Disallow Host Namespaces 7 | policies.kyverno.io/category: Pod Security Standards (Baseline) 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.0 10 | kyverno.io/kubernetes-version: "1.22-1.23" 11 | policies.kyverno.io/subject: Pod 12 | policies.kyverno.io/description: >- 13 | Host namespaces (Process ID namespace, Inter-Process Communication namespace, and 14 | network namespace) allow access to shared information and can be used to elevate 15 | privileges. Pods should not be allowed access to host namespaces. This policy ensures 16 | fields which make use of these host namespaces are unset or set to `false`. 17 | spec: 18 | validationFailureAction: audit 19 | background: true 20 | rules: 21 | - name: host-namespaces 22 | match: 23 | any: 24 | - resources: 25 | kinds: 26 | - Pod 27 | validate: 28 | message: >- 29 | Sharing the host namespaces is disallowed. The fields spec.hostNetwork, 30 | spec.hostIPC, and spec.hostPID must be unset or set to `false`. 31 | pattern: 32 | spec: 33 | =(hostPID): "false" 34 | =(hostIPC): "false" 35 | =(hostNetwork): "false" 36 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/disallow_latest_tag/disallow_latest_tag.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-latest-tag 5 | annotations: 6 | policies.kyverno.io/title: Disallow Latest Tag 7 | policies.kyverno.io/category: Best Practices 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Pod 10 | policies.kyverno.io/description: >- 11 | The ':latest' tag is mutable and can lead to unexpected errors if the 12 | image changes. A best practice is to use an immutable tag that maps to 13 | a specific version of an application Pod. This policy validates that the image 14 | specifies a tag and that it is not called `latest`. 15 | spec: 16 | validationFailureAction: audit 17 | background: true 18 | rules: 19 | - name: require-image-tag 20 | match: 21 | resources: 22 | kinds: 23 | - Pod 24 | validate: 25 | message: "An image tag is required." 26 | pattern: 27 | spec: 28 | containers: 29 | - image: "*:*" 30 | - name: validate-image-tag 31 | match: 32 | resources: 33 | kinds: 34 | - Pod 35 | validate: 36 | message: "Using a mutable image tag e.g. 'latest' is not allowed." 37 | pattern: 38 | spec: 39 | containers: 40 | - image: "!*:latest" -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/httpsonly/httpsonly.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: disallow-http-routes 5 | spec: 6 | background: true 7 | validationFailureAction: audit 8 | rules: 9 | - name: Prevent users from deploying Routes with no https 10 | match: 11 | resources: 12 | kinds: 13 | - Route 14 | validate: 15 | pattern: 16 | spec: 17 | tls: 18 | termination: "?*" 19 | message: "Routes must be configured configured with TLS!" 20 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/networking/block-nodeport-services/block-nodeport-services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | annotations: 5 | policies.kyverno.io/description: >- 6 | The policy does not allow creating NodePort services. 7 | name: block-nodeport-services 8 | spec: 9 | background: true 10 | validationFailureAction: audit 11 | rules: 12 | - name: Prevent users from creating NodePort services 13 | match: 14 | resources: 15 | kinds: 16 | - Service 17 | validate: 18 | pattern: 19 | spec: 20 | type: "!NodePort" 21 | message: "Services with the NodePort type are not allowed!" 22 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/require-run-as-non-root-user/restrictions/restrict-blueteam-destination-spoke.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-blueteam-destination 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-destination-blueteam 10 | match: 11 | resources: 12 | kinds: 13 | - ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `red-sre-group` member, creation of ArgoCD Application must be against the `blueteam` namespace pattern" 26 | pattern: 27 | spec: 28 | template: 29 | spec: 30 | destination: 31 | namespace: blueteam* 32 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/require-run-as-non-root-user/restrictions/restrict-blueteam-to-its-appproject-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-blueteam-to-its-appproject 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `blue-sre-group` member, creation of ArgoCD Application must be against the `blueproject` AppProject." 26 | pattern: 27 | spec: 28 | template: 29 | spec: 30 | project: blueteam 31 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/require-run-as-non-root-user/restrictions/restrict-blueteam-to-its-placement-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-blueteam-to-its-placement 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: blue-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `blue-sre-group` member, creation of ArgoCD Application must be against blue-placement" 26 | pattern: 27 | spec: 28 | predicates: 29 | requiredClusterSelector: 30 | labelSelector: 31 | matchLabels: 32 | cluster.open-cluster-management.io/placement: blue-placement 33 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/require-run-as-non-root-user/restrictions/restrict-redteam-destination-spoke.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-redteam-destination 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `red-sre-group` member, creation of ArgoCD Application must be against the `redteam` namespace pattern" 26 | pattern: 27 | spec: 28 | template: 29 | spec: 30 | destination: 31 | namespace: redteam* 32 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/require-run-as-non-root-user/restrictions/restrict-redteam-to-its-appproject-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-redteam-to-its-appproject 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | validate: 18 | message: "For any `red-sre-group` member, creation of ArgoCD Application must be against the `redteam` AppProject." 19 | pattern: 20 | spec: 21 | template: 22 | spec: 23 | project: redteam 24 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/require-run-as-non-root-user/restrictions/restrict-redteam-to-its-placement-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-redteam-to-its-placement 5 | spec: 6 | validationFailureAction: enforce 7 | background: false 8 | rules: 9 | - name: restrict-to-argoproject 10 | match: 11 | resources: 12 | kinds: 13 | - ApplicationSet 14 | subjects: 15 | - kind: Group 16 | name: red-sre-group 17 | preconditions: 18 | any: 19 | - key: "{{request.operation}}" 20 | operator: In 21 | value: 22 | - CREATE 23 | - UPDATE 24 | validate: 25 | message: "For any `red-sre-group` member, creation of ArgoCD ApplicationSet must point to red-placement" 26 | pattern: 27 | spec: 28 | generators: 29 | - clusterDecisionResource: 30 | configMapRef: acm-placement 31 | labelSelector: 32 | matchLabels: 33 | cluster.open-cluster-management.io/placement: red-placement 34 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-binding-clusteradmin/block-stale-images/block-stale-images.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: block-stale-images 5 | annotations: 6 | policies.kyverno.io/title: Block Stale Images 7 | policies.kyverno.io/category: Other 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.0 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Pod 13 | policies.kyverno.io/description: >- 14 | Images that are old usually have some open security vulnerabilities which are not patched. 15 | This policy checks the contents of every container image and inspects them for the create time. 16 | If it finds any image which was built more than 6 months ago this policy blocks the deployment. 17 | spec: 18 | validationFailureAction: audit 19 | rules: 20 | - name: block-stale-images 21 | match: 22 | any: 23 | - resources: 24 | kinds: 25 | - Pod 26 | validate: 27 | message: "Images built more than 6 months ago are prohibited." 28 | foreach: 29 | - list: "request.object.spec.containers" 30 | context: 31 | - name: imageData 32 | imageRegistry: 33 | reference: "{{ element.image }}" 34 | deny: 35 | conditions: 36 | all: 37 | - key: "{{ time_since('', '{{ imageData.configData.created }}', '') }}" 38 | operator: GreaterThan 39 | value: 4380h 40 | 41 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-binding-clusteradmin/restrict-binding-clusteradmin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-binding-clusteradmin 5 | annotations: 6 | policies.kyverno.io/title: Restrict Binding to Cluster-Admin 7 | policies.kyverno.io/category: Security 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: RoleBinding, ClusterRoleBinding, RBAC 10 | kyverno.io/kyverno-version: 1.6.2 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | The cluster-admin ClusterRole allows any action to be performed on any resource 15 | in the cluster and its granting should be heavily restricted. This 16 | policy prevents binding to the cluster-admin ClusterRole in 17 | RoleBinding or ClusterRoleBinding resources. 18 | spec: 19 | validationFailureAction: audit 20 | background: false 21 | rules: 22 | - name: clusteradmin-bindings 23 | match: 24 | any: 25 | - resources: 26 | kinds: 27 | - RoleBinding 28 | - ClusterRoleBinding 29 | validate: 30 | message: "Binding to cluster-admin is not allowed." 31 | pattern: 32 | roleRef: "!cluster-admin" 33 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-clusterrole-nodesproxy/restrict-clusterrole-nodesproxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-clusterrole-nodesproxy 5 | annotations: 6 | policies.kyverno.io/title: Restrict ClusterRole with Nodes Proxy 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: ClusterRole, RBAC 10 | kyverno.io/kyverno-version: 1.7.0 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | A ClusterRole with nodes/proxy resource access allows a user to 15 | perform anything the kubelet API allows. It also allows users to bypass 16 | the API server and talk directly to the kubelet potentially circumventing 17 | audits and admission controllers. See https://blog.aquasec.com/privilege-escalation-kubernetes-rbac 18 | for more info. This policy prevents the creation 19 | of a ClusterRole if it contains the nodes/proxy resource. 20 | spec: 21 | validationFailureAction: audit 22 | background: false 23 | rules: 24 | - name: clusterrole-nodesproxy 25 | match: 26 | any: 27 | - resources: 28 | kinds: 29 | - ClusterRole 30 | validate: 31 | message: "A ClusterRole containing the nodes/proxy resource is not allowed." 32 | deny: 33 | conditions: 34 | any: 35 | - key: nodes/proxy 36 | operator: AnyIn 37 | value: "{{ request.object.rules[].resources[] }}" 38 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-escalation-verbs-roles/restrict-escalation-verbs-roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-escalation-verbs-roles 5 | annotations: 6 | policies.kyverno.io/title: Restrict Escalation Verbs in Roles 7 | policies.kyverno.io/category: Security 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Role, ClusterRole, RBAC 10 | kyverno.io/kyverno-version: 1.6.2 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | The verbs `impersonate`, `bind`, and `escalate` may all potentially lead to 15 | privilege escalation and should be tightly controlled. This policy prevents 16 | use of these verbs in Role or ClusterRole resources. In order to 17 | fully implement this control, it is recommended to pair this policy with another which 18 | also prevents use of the wildcard ('*') in the verbs list. 19 | spec: 20 | validationFailureAction: audit 21 | background: false 22 | rules: 23 | - name: escalate 24 | match: 25 | any: 26 | - resources: 27 | kinds: 28 | - Role 29 | - ClusterRole 30 | validate: 31 | message: "Use of verbs `escalate`, `bind`, and `impersonate` are forbidden." 32 | deny: 33 | conditions: 34 | any: 35 | - key: ["escalate","bind","impersonate"] 36 | operator: AnyIn 37 | value: "{{ request.object.rules[].verbs[] }}" 38 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-ingress-wildcard/restrict-ingress-wildcard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-ingress-wildcard 5 | annotations: 6 | policies.kyverno.io/title: Restrict Ingress Host with Wildcards 7 | policies.kyverno.io/category: Other 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.2 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Ingress 13 | policies.kyverno.io/description: >- 14 | Ingress hosts optionally accept a wildcard as an alternative 15 | to precise matching. In some cases, this may be too permissive as it 16 | would direct unintended traffic to the given Ingress resource. This 17 | policy enforces that any Ingress host does not contain a wildcard 18 | character. 19 | spec: 20 | validationFailureAction: audit 21 | background: false 22 | rules: 23 | - name: block-ingress-wildcard 24 | match: 25 | any: 26 | - resources: 27 | kinds: 28 | - Ingress 29 | preconditions: 30 | all: 31 | - key: "{{ request.operation }}" 32 | operator: AnyIn 33 | value: ["CREATE", "UPDATE"] 34 | validate: 35 | message: "Wildcards are not permitted as hosts." 36 | foreach: 37 | - list: "request.object.spec.rules" 38 | deny: 39 | conditions: 40 | any: 41 | - key: "{{ contains(element.host, '*') }}" 42 | operator: Equals 43 | value: true 44 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-wildcard-resources/restrict-wildcard-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-wildcard-resources 5 | annotations: 6 | policies.kyverno.io/title: Restrict Wildcards in Resources 7 | policies.kyverno.io/category: Security 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: ClusterRole, Role, RBAC 10 | kyverno.io/kyverno-version: 1.7.0 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | Wildcards ('*') in resources grants access to all of the resources referenced by 15 | the given API group and does not follow the principal of least privilege. As much as possible, 16 | avoid such open resources unless scoped to perhaps a custom API group. 17 | This policy blocks any Role or ClusterRole that contains a wildcard entry in 18 | the resources list found in any rule. 19 | spec: 20 | validationFailureAction: audit 21 | background: false 22 | rules: 23 | - name: wildcard-resources 24 | match: 25 | any: 26 | - resources: 27 | kinds: 28 | - Role 29 | - ClusterRole 30 | validate: 31 | message: "Use of a wildcard ('*') in any resources is forbidden." 32 | deny: 33 | conditions: 34 | any: 35 | - key: "{{ contains(request.object.rules[].resources[], '*') }}" 36 | operator: Equals 37 | value: true 38 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict-wildcard-verbs/restrict-wildcard-verbs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-wildcard-verbs 5 | annotations: 6 | policies.kyverno.io/title: Restrict Wildcard in Verbs 7 | policies.kyverno.io/category: Security 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Role, ClusterRole, RBAC 10 | kyverno.io/kyverno-version: 1.6.2 11 | policies.kyverno.io/minversion: 1.6.0 12 | kyverno.io/kubernetes-version: "1.23" 13 | policies.kyverno.io/description: >- 14 | Wildcards ('*') in verbs grants all access to the resources referenced by it and 15 | does not follow the principal of least privilege. As much as possible, 16 | avoid such open verbs unless scoped to perhaps a custom API group. 17 | This policy blocks any Role or ClusterRole that contains a wildcard entry in 18 | the verbs list found in any rule. 19 | spec: 20 | validationFailureAction: audit 21 | background: false 22 | rules: 23 | - name: wildcard-verbs 24 | match: 25 | any: 26 | - resources: 27 | kinds: 28 | - Role 29 | - ClusterRole 30 | validate: 31 | message: "Use of a wildcard ('*') in any verbs is forbidden." 32 | deny: 33 | conditions: 34 | any: 35 | - key: "{{ contains(request.object.rules[].verbs[], '*') }}" 36 | operator: Equals 37 | value: true 38 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_annotations/restrict_annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-annotations 5 | annotations: 6 | policies.kyverno.io/title: Restrict Annotations 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/minversion: 1.3.0 9 | policies.kyverno.io/subject: Pod, Annotation 10 | policies.kyverno.io/description: >- 11 | Some annotations control functionality driven by other cluster-wide tools and are not 12 | normally set by some class of users. This policy prevents the use of an annotation beginning 13 | with `fluxcd.io/`. This can be useful to ensure users either 14 | don't set reserved annotations or to force them to use a newer version of an annotation. 15 | pod-policies.kyverno.io/autogen-controllers: None 16 | spec: 17 | validationFailureAction: audit 18 | background: true 19 | rules: 20 | - name: block-flux-v1 21 | match: 22 | resources: 23 | kinds: 24 | - Deployment 25 | - CronJob 26 | - Job 27 | - StatefulSet 28 | - DaemonSet 29 | - Pod 30 | validate: 31 | message: Cannot use Flux v1 annotation. 32 | pattern: 33 | metadata: 34 | =(annotations): 35 | X(fluxcd.io/*): "*?" -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_automount_sa_token/restrict_automount_sa_token.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-automount-sa-token 5 | annotations: 6 | policies.kyverno.io/title: Restrict Auto-Mount of Service Account Tokens 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Pod,ServiceAccount 10 | policies.kyverno.io/description: >- 11 | Kubernetes automatically mounts ServiceAccount credentials in each Pod. 12 | The ServiceAccount may be assigned roles allowing Pods to access API resources. 13 | Blocking this ability is an extension of the least privilege best practice and should 14 | be followed if Pods do not need to speak to the API server to function. 15 | This policy ensures that mounting of these ServiceAccount tokens is blocked. 16 | spec: 17 | validationFailureAction: audit 18 | background: true 19 | rules: 20 | - name: validate-automountServiceAccountToken 21 | match: 22 | resources: 23 | kinds: 24 | - Pod 25 | preconditions: 26 | all: 27 | - key: "{{ request.\"object\".metadata.labels.\"app.kubernetes.io/part-of\" || '' }}" 28 | operator: NotEquals 29 | value: policy-reporter 30 | validate: 31 | message: "Auto-mounting of Service Account tokens is not allowed." 32 | pattern: 33 | spec: 34 | automountServiceAccountToken: "false" 35 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_ingress_classes/restrict_ingress_classes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-ingress-classes 5 | annotations: 6 | policies.kyverno.io/title: Restrict Ingress Classes 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Ingress 10 | policies.kyverno.io/description: >- 11 | Ingress classes should only be allowed which match up to deployed Ingress controllers 12 | in the cluster. Allowing users to define classes which cannot be satisfied by a deployed 13 | Ingress controller can result in either no or undesired functionality. This policy checks 14 | Ingress resources and only allows those which define `HAProxy` or `nginx` in the respective 15 | annotation. This annotation has largely been replaced as of Kubernetes 1.18 with the IngressClass 16 | resource. 17 | spec: 18 | validationFailureAction: audit 19 | background: false 20 | rules: 21 | - name: validate-ingress 22 | match: 23 | resources: 24 | kinds: 25 | - Ingress 26 | validate: 27 | message: "Unknown ingress class." 28 | pattern: 29 | metadata: 30 | annotations: 31 | kubernetes.io/ingress.class: "HAProxy | nginx" 32 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_ingress_defaultbackend/restrict_ingress_defaultbackend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-ingress-defaultbackend 5 | annotations: 6 | policies.kyverno.io/title: Restrict Ingress defaultBackend 7 | policies.kyverno.io/category: Best Practices 8 | policies.kyverno.io/severity: high 9 | kyverno.io/kyverno-version: 1.6.2 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Ingress 13 | policies.kyverno.io/description: >- 14 | An Ingress with no rules sends all traffic to a single default backend. The defaultBackend 15 | is conventionally a configuration option of the Ingress controller and is not specified in 16 | your Ingress resources. If none of the hosts or paths match the HTTP request in the Ingress 17 | objects, the traffic is routed to your default backend. In a multi-tenant environment, you 18 | want users to use explicit hosts, they should not be able to overwrite the global default backend 19 | service. This policy prohibits the use of the defaultBackend field. 20 | spec: 21 | validationFailureAction: audit 22 | background: false 23 | rules: 24 | - name: restrict-ingress-defaultbackend 25 | match: 26 | any: 27 | - resources: 28 | kinds: 29 | - Ingress 30 | validate: 31 | message: Setting the defaultBackend field is prohibited. 32 | pattern: 33 | spec: 34 | X(defaultBackend): "null" 35 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_loadbalancer/restrict_loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: no-loadbalancer-service 5 | annotations: 6 | policies.kyverno.io/title: Disallow Service Type LoadBalancer 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/severity: medium 9 | policies.kyverno.io/subject: Service 10 | policies.kyverno.io/description: >- 11 | Especially in cloud provider environments, a Service having type LoadBalancer will cause the 12 | provider to respond by creating a load balancer somewhere in the customer account. This adds 13 | cost and complexity to a deployment. Without restricting this ability, users may easily 14 | overrun established budgets and security practices set by the organization. This policy restricts 15 | use of the Service type LoadBalancer. 16 | spec: 17 | validationFailureAction: audit 18 | background: false 19 | rules: 20 | - name: no-LoadBalancer 21 | match: 22 | resources: 23 | kinds: 24 | - Service 25 | validate: 26 | message: "Service of type LoadBalancer is not allowed." 27 | pattern: 28 | spec: 29 | type: "!LoadBalancer" 30 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_node_selection/restrict_node_selection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-node-selection 5 | annotations: 6 | policies.kyverno.io/title: Restrict node selection 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/subject: Pod 9 | policies.kyverno.io/description: >- 10 | The Kubernetes scheduler uses complex logic to determine the optimal placement 11 | for new Pods. Users who have access to set certain fields in a Pod spec 12 | may sidestep this logic which in many cases is undesirable. This policy 13 | prevents users from targeting specific Nodes for scheduling of Pods by 14 | prohibiting the use of the `nodeSelector` and `nodeName` fields. 15 | spec: 16 | validationFailureAction: audit 17 | background: true 18 | rules: 19 | - name: restrict-nodeselector 20 | match: 21 | resources: 22 | kinds: 23 | - Pod 24 | validate: 25 | message: Setting the nodeSelector field is prohibited. 26 | pattern: 27 | spec: 28 | X(nodeSelector): "null" 29 | - name: restrict-nodename 30 | match: 31 | resources: 32 | kinds: 33 | - Pod 34 | validate: 35 | message: Setting the nodeName field is prohibited. 36 | pattern: 37 | spec: 38 | X(nodeName): "null" 39 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/input/restrict_service_port_range/restrict-service-port-range.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: restrict-service-port-range 5 | annotations: 6 | policies.kyverno.io/title: Restrict Service Port Range 7 | policies.kyverno.io/category: Other 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.0 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Service 13 | policies.kyverno.io/description: >- 14 | Services which are allowed to expose any port number may be able 15 | to impact other applications running on the Node which require them, 16 | or may make specifying security policy externally more challenging. 17 | This policy enforces that only the port range 32000 to 33000 may 18 | be used for Service resources. 19 | spec: 20 | validationFailureAction: audit 21 | rules: 22 | - name: restrict-port-range 23 | match: 24 | any: 25 | - resources: 26 | kinds: 27 | - Service 28 | validate: 29 | message: Ports must be between 32000-33000 30 | pattern: 31 | spec: 32 | ports: 33 | - port: 32000-33000 -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - policyGenerator.yaml 3 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/kyverno/security/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: kyverno-security-placement 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-admin/policy-remove-kubeadmin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: kubeadmin 5 | namespace: kube-system 6 | type: Opaque 7 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-audit/policy-config-audit.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.openshift.io/v1 2 | kind: APIServer 3 | metadata: 4 | name: cluster 5 | spec: 6 | audit: 7 | customRules: 8 | - group: system:authenticated:oauth 9 | profile: WriteRequestBodies 10 | - group: system:authenticated 11 | profile: AllRequestBodies 12 | profile: Default 13 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-compliance/policy-compliance-operator-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: openshift-compliance 5 | --- 6 | apiVersion: operators.coreos.com/v1 7 | kind: OperatorGroup 8 | metadata: 9 | name: compliance-operator 10 | namespace: openshift-compliance 11 | spec: 12 | targetNamespaces: 13 | - openshift-compliance 14 | --- 15 | apiVersion: operators.coreos.com/v1alpha1 16 | kind: Subscription 17 | metadata: 18 | name: compliance-operator 19 | namespace: openshift-compliance 20 | spec: 21 | installPlanApproval: Automatic 22 | name: compliance-operator 23 | source: redhat-operators 24 | sourceNamespace: openshift-marketplace 25 | --- 26 | apiVersion: operators.coreos.com/v1alpha1 27 | kind: ClusterServiceVersion 28 | metadata: 29 | namespace: openshift-compliance 30 | spec: 31 | displayName: Compliance Operator 32 | status: 33 | phase: Succeeded # check the csv status to determine if operator is running or not 34 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-compliance/policy-compliance-operator-moderate-scan.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: compliance.openshift.io/v1alpha1 2 | kind: ScanSettingBinding 3 | metadata: 4 | name: moderate 5 | namespace: openshift-compliance 6 | profiles: 7 | - apiGroup: compliance.openshift.io/v1alpha1 8 | kind: Profile 9 | name: ocp4-moderate 10 | - apiGroup: compliance.openshift.io/v1alpha1 11 | kind: Profile 12 | name: ocp4-moderate-node 13 | settingsRef: 14 | apiGroup: compliance.openshift.io/v1alpha1 15 | kind: ScanSetting 16 | name: default 17 | --- 18 | apiVersion: compliance.openshift.io/v1alpha1 19 | kind: ComplianceSuite 20 | metadata: 21 | name: moderate 22 | namespace: openshift-compliance 23 | status: 24 | phase: DONE 25 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-compliance/policy-compliance-operator-scan-results.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: compliance.openshift.io/v1alpha1 2 | kind: ComplianceCheckResult 3 | metadata: 4 | namespace: openshift-compliance 5 | labels: 6 | compliance.openshift.io/check-status: FAIL 7 | compliance.openshift.io/suite: moderate 8 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-etcd/policy-etcdencryption.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.openshift.io/v1 2 | kind: APIServer 3 | metadata: 4 | name: cluster 5 | spec: 6 | encryption: 7 | type: aescbc 8 | --- 9 | apiVersion: operator.openshift.io/v1 10 | kind: KubeAPIServer 11 | metadata: 12 | name: cluster 13 | status: 14 | conditions: 15 | - message: 'All resources encrypted: secrets, configmaps' 16 | reason: EncryptionCompleted 17 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-files/policy-file-integrity-node-status.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fileintegrity.openshift.io/v1alpha1 2 | kind: FileIntegrityNodeStatus 3 | metadata: 4 | namespace: openshift-file-integrity 5 | results: 6 | - condition: Failed 7 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-files/policy-file-integrity-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: openshift-file-integrity 5 | --- 6 | apiVersion: operators.coreos.com/v1 7 | kind: OperatorGroup 8 | metadata: 9 | name: openshift-file-integrity-og 10 | namespace: openshift-file-integrity 11 | spec: 12 | targetNamespaces: 13 | - openshift-file-integrity 14 | --- 15 | apiVersion: operators.coreos.com/v1alpha1 16 | kind: Subscription 17 | metadata: 18 | name: file-integrity-operator 19 | namespace: openshift-file-integrity 20 | spec: 21 | channel: release-0.1 22 | installPlanApproval: Automatic 23 | name: file-integrity-operator 24 | source: redhat-operators 25 | sourceNamespace: openshift-marketplace 26 | --- 27 | apiVersion: fileintegrity.openshift.io/v1alpha1 28 | kind: FileIntegrity 29 | metadata: 30 | name: example-fileintegrity 31 | namespace: openshift-file-integrity 32 | spec: 33 | config: 34 | gracePeriod: 900 35 | nodeSelector: 36 | node-role.kubernetes.io/worker: "" 37 | tolerations: 38 | - effect: NoSchedule 39 | key: node-role.kubernetes.io/master 40 | operator: Exists 41 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-fips/policy-check-fips.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: machineconfiguration.openshift.io/v1 2 | kind: MachineConfig 3 | metadata: 4 | labels: 5 | machineconfiguration.openshift.io/role: worker 6 | name: 99-worker-fips 7 | spec: 8 | fips: true 9 | --- 10 | apiVersion: machineconfiguration.openshift.io/v1 11 | kind: MachineConfig 12 | metadata: 13 | labels: 14 | machineconfiguration.openshift.io/role: master 15 | name: 99-master-fips 16 | spec: 17 | fips: true 18 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-operators/policy-checkclusteroperator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.openshift.io/v1 2 | kind: ClusterOperator 3 | status: 4 | conditions: 5 | - status: 'False' 6 | type: Available 7 | --- 8 | apiVersion: config.openshift.io/v1 9 | kind: ClusterOperator 10 | status: 11 | conditions: 12 | - status: 'True' 13 | type: Degraded 14 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input-scc/policy-scc-restricted.yaml: -------------------------------------------------------------------------------- 1 | allowHostDirVolumePlugin: false 2 | allowHostIPC: false 3 | allowHostNetwork: false 4 | allowHostPID: false 5 | allowHostPorts: false 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: false 8 | allowedCapabilities: null 9 | apiVersion: security.openshift.io/v1 10 | defaultAddCapabilities: null 11 | fsGroup: 12 | type: MustRunAs 13 | groups: 14 | - system:authenticated 15 | kind: SecurityContextConstraints 16 | metadata: 17 | annotations: 18 | include.release.openshift.io/ibm-cloud-managed: "true" 19 | include.release.openshift.io/self-managed-high-availability: "true" 20 | include.release.openshift.io/single-node-developer: "true" 21 | kubernetes.io/description: restricted denies access to all host features and requires 22 | pods to be run with a UID, and SELinux context that are allocated to the namespace. This 23 | is the most restrictive SCC and it is used by default for authenticated users. 24 | release.openshift.io/create-only: "true" 25 | name: restricted 26 | priority: null 27 | readOnlyRootFilesystem: false 28 | requiredDropCapabilities: 29 | - KILL 30 | - MKNOD 31 | - SETUID 32 | - SETGID 33 | runAsUser: 34 | type: MustRunAsRange 35 | seLinuxContext: 36 | type: MustRunAs 37 | supplementalGroups: 38 | type: RunAsAny 39 | users: [] 40 | volumes: 41 | - configMap 42 | - downwardAPI 43 | - emptyDir 44 | - persistentVolumeClaim 45 | - projected 46 | - secret 47 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/input/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: placement-openshift-best-practices 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: vendor, operator: In, values: ["OpenShift"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/ocp-best-practices/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - ./policyGenerator.yaml 3 | commonLabels: 4 | open-cluster-management.io/policy-set: openshift-hardening 5 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-gitops/README.md: -------------------------------------------------------------------------------- 1 | # Setup OpenShift Gitops 2 | 3 | ## Installation 4 | 5 | This PolicySet only deploys OpenShift Gitops to the hub cluster. In addition to deploying 6 | OpenShift Gitops, configuration sets up the following: 7 | - The plugin for the policy generator is configured 8 | - Health scripts for Policy resources are configured 9 | 10 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-gitops/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - ./policyGenerator.yaml 3 | commonLabels: 4 | open-cluster-management.io/policy-set: openshift-gitops 5 | commonAnnotations: 6 | argocd.argoproj.io/compare-options: IgnoreExtraneous 7 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-gitops/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: placement-openshift-gitops 5 | namespace: open-cluster-management-global-set 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-gitops/policy-openshift-gitops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: Subscription 3 | metadata: 4 | name: openshift-gitops-operator 5 | namespace: openshift-operators 6 | labels: 7 | operators.coreos.com/openshift-gitops-operator.openshift-operators: '' 8 | spec: 9 | channel: latest 10 | installPlanApproval: Automatic 11 | name: openshift-gitops-operator 12 | source: redhat-operators 13 | sourceNamespace: openshift-marketplace 14 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-gitops/policyGenerator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: PolicyGenerator 3 | metadata: 4 | name: policy-opp-prereqs 5 | placementBindingDefaults: 6 | name: binding-policy-openshift-gitops 7 | policyDefaults: 8 | categories: 9 | - CM Configuration Management 10 | controls: 11 | - CM-2 Baseline Configuration 12 | namespace: open-cluster-management-global-set 13 | policySets: 14 | - openshift-gitops 15 | remediationAction: enforce 16 | severity: medium 17 | standards: 18 | - NIST SP 800-53 19 | policies: 20 | - name: openshift-gitops 21 | manifests: 22 | - path: policy-openshift-gitops.yaml 23 | - name: openshift-gitops-grc 24 | dependencies: 25 | - name: openshift-gitops 26 | manifests: 27 | - path: policy-openshift-gitops-grc.yaml 28 | policySets: 29 | - description: Applies the OpenShift Gitops subscription and configures 30 | OpenShift Gitops to work with the Governance APIs 31 | name: openshift-gitops 32 | placement: 33 | placementPath: placement.yaml 34 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - ./policyGenerator.yaml 3 | commonLabels: 4 | open-cluster-management.io/policy-set: openshift-plus-setup 5 | commonAnnotations: 6 | argocd.argoproj.io/compare-options: IgnoreExtraneous 7 | 8 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/managedclustersetbinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta2 2 | kind: ManagedClusterSetBinding 3 | metadata: 4 | namespace: policies 5 | name: default 6 | spec: 7 | clusterSet: default 8 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: policies 5 | spec: {} 6 | --- 7 | apiVersion: v1 8 | kind: Namespace 9 | metadata: 10 | labels: 11 | openshift.io/cluster-monitoring: "true" 12 | name: openshift-storage 13 | spec: {} 14 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/opp-settings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: opp-settings 5 | namespace: policies 6 | data: 7 | vmwareReplicas: "6" 8 | vmwareCPU: "8" 9 | vmwareMemory: "32768" 10 | awsReplicas: "2" 11 | awsInstanceType: m6a.2xlarge 12 | us-east-1-4.12: ami-0c321aac14de997e3 13 | us-east-2-4.12: ami-0fce6015e3592d4a5 14 | us-east-1-4.13: ami-03efc0188afd5f5b9 15 | us-east-2-4.13: ami-031d6e5e3d4f2f192 16 | us-east-1-4.14: ami-058af1563befa5c0e 17 | us-east-2-4.14: ami-0dd810c1f47c5c233 18 | us-east-1-4.15: ami-0d653d86d4113326a 19 | us-east-2-4.15: ami-0d6c4efce8daf7d2d 20 | us-east-1-4.16: ami-03ca8605aa130b597 21 | us-east-2-4.16: ami-09ab4b62c2f0a4555 22 | us-east-1-4.17: ami-0eddfa7634d2beba0 23 | us-east-2-4.17: ami-022fbb77a3226215f 24 | us-east-1-4.18: ami-08f1807771f4e468b 25 | us-east-2-4.18: ami-078e26f293629fe91 26 | us-east-1-4.19: ami-0b6b825641a2ea530 27 | us-east-2-4.19: ami-0f13d2cbfbca6203b 28 | us-east-1-4.20: ami-0b6b825641a2ea530 29 | us-east-2-4.20: ami-0f13d2cbfbca6203b 30 | zone1: a 31 | zone2: b 32 | zone3: c 33 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: placement-openshift-plus-hub 5 | namespace: open-cluster-management-global-set 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/policyGenerator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: PolicyGenerator 3 | metadata: 4 | name: policy-opp-prereqs 5 | placementBindingDefaults: 6 | name: binding-policy-openshift-plus-prereqs 7 | policyDefaults: 8 | categories: 9 | - CM Configuration Management 10 | controls: 11 | - CM-2 Baseline Configuration 12 | namespace: open-cluster-management-global-set 13 | policySets: 14 | - openshift-plus-prereqs 15 | remediationAction: enforce 16 | severity: medium 17 | standards: 18 | - NIST SP 800-53 19 | policies: 20 | - name: policy-opp-prereq-ns 21 | manifests: 22 | - path: namespace.yaml 23 | - name: policy-opp-prereq-binding 24 | dependencies: 25 | - name: policy-opp-prereq-ns 26 | manifests: 27 | - path: managedclustersetbinding.yaml 28 | - name: policy-opp-settings 29 | dependencies: 30 | - name: policy-opp-prereq-ns 31 | manifests: 32 | - path: opp-settings.yaml 33 | - name: policy-opp-prereq-machines 34 | dependencies: 35 | - name: policy-opp-settings 36 | manifests: 37 | - path: machine-sets.yaml 38 | policySets: 39 | - description: Applies the OpenShift Plus prerequisites to the ACM hub cluster 40 | name: openshift-plus-prereqs 41 | placement: 42 | placementPath: placement.yaml 43 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/test/aws/cluster-claim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1alpha1 2 | kind: ClusterClaim 3 | metadata: 4 | labels: 5 | open-cluster-management.io/hub-managed: "" 6 | velero.io/exclude-from-backup: "true" 7 | name: version.openshift.io 8 | spec: 9 | value: 4.18.8-0.nightly-2025-03-30-191359 10 | --- 11 | apiVersion: cluster.open-cluster-management.io/v1alpha1 12 | kind: ClusterClaim 13 | metadata: 14 | labels: 15 | open-cluster-management.io/hub-managed: "" 16 | open-cluster-management.io/spoke-only: "" 17 | velero.io/exclude-from-backup: "true" 18 | name: openshiftversion-major-minor 19 | spec: 20 | value: "4.18" 21 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/test/aws/infrastructure.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: config.openshift.io/v1 2 | kind: Infrastructure 3 | metadata: 4 | name: cluster 5 | spec: 6 | cloudConfig: 7 | key: config 8 | name: cloud-provider-config 9 | platformSpec: 10 | aws: {} 11 | type: AWS 12 | status: 13 | apiServerInternalURI: https://api-int.example.dev08.red-chesterfield.com:6443 14 | apiServerURL: https://api.example.dev08.red-chesterfield.com:6443 15 | controlPlaneTopology: HighlyAvailable 16 | cpuPartitioning: None 17 | etcdDiscoveryDomain: "" 18 | infrastructureName: example-drtt7 19 | infrastructureTopology: HighlyAvailable 20 | platform: AWS 21 | platformStatus: 22 | aws: 23 | region: us-east-2 24 | type: AWS 25 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/test/aws/opp-settings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: opp-settings 5 | namespace: policies 6 | data: 7 | vmwareReplicas: "6" 8 | vmwareCPU: "8" 9 | vmwareMemory: "32768" 10 | awsReplicas: "2" 11 | awsInstanceType: m6a.2xlarge 12 | us-east-1-4.12: ami-0c321aac14de997e3 13 | us-east-2-4.12: ami-0fce6015e3592d4a5 14 | us-east-1-4.13: ami-03efc0188afd5f5b9 15 | us-east-2-4.13: ami-031d6e5e3d4f2f192 16 | us-east-1-4.14: ami-058af1563befa5c0e 17 | us-east-2-4.14: ami-0dd810c1f47c5c233 18 | us-east-1-4.15: ami-0d653d86d4113326a 19 | us-east-2-4.15: ami-0d6c4efce8daf7d2d 20 | us-east-1-4.16: ami-075cc98266f9df501 21 | us-east-2-4.16: ami-08bb6907b96d2a024 22 | us-east-1-4.17: ami-0e79bb8acc37d2696 23 | us-east-2-4.17: ami-08997afda521c28fa 24 | us-east-1-4.18: ami-012d486b4a2bd1c08 25 | us-east-2-4.18: ami-0197c5c22c44c04f1 26 | zone1: a 27 | zone2: b 28 | zone3: c 29 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/test/vsphere/cluster-claim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1alpha1 2 | kind: ClusterClaim 3 | metadata: 4 | labels: 5 | open-cluster-management.io/hub-managed: "" 6 | velero.io/exclude-from-backup: "true" 7 | name: version.openshift.io 8 | spec: 9 | value: 4.19.0-0.nightly-2025-03-30-191359 10 | --- 11 | apiVersion: cluster.open-cluster-management.io/v1alpha1 12 | kind: ClusterClaim 13 | metadata: 14 | labels: 15 | open-cluster-management.io/hub-managed: "" 16 | open-cluster-management.io/spoke-only: "" 17 | velero.io/exclude-from-backup: "true" 18 | name: openshiftversion-major-minor 19 | spec: 20 | value: "4.19" 21 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/openshift-plus-setup/test/vsphere/opp-settings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: opp-settings 5 | namespace: policies 6 | data: 7 | vmwareReplicas: "6" 8 | vmwareCPU: "8" 9 | vmwareMemory: "32768" 10 | awsReplicas: "2" 11 | awsInstanceType: m6a.2xlarge 12 | us-east-1-4.12: ami-0c321aac14de997e3 13 | us-east-2-4.12: ami-0fce6015e3592d4a5 14 | us-east-1-4.13: ami-03efc0188afd5f5b9 15 | us-east-2-4.13: ami-031d6e5e3d4f2f192 16 | us-east-1-4.14: ami-058af1563befa5c0e 17 | us-east-2-4.14: ami-0dd810c1f47c5c233 18 | us-east-1-4.15: ami-0d653d86d4113326a 19 | us-east-2-4.15: ami-0d6c4efce8daf7d2d 20 | us-east-1-4.16: ami-075cc98266f9df501 21 | us-east-2-4.16: ami-08bb6907b96d2a024 22 | us-east-1-4.17: ami-0e79bb8acc37d2696 23 | us-east-2-4.17: ami-08997afda521c28fa 24 | us-east-1-4.18: ami-012d486b4a2bd1c08 25 | us-east-2-4.18: ami-0197c5c22c44c04f1 26 | zone1: a 27 | zone2: b 28 | zone3: c 29 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/policygenerator-download/consoleclidownload.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: console.openshift.io/v1 3 | kind: ConsoleCLIDownload 4 | metadata: 5 | name: policygenerator 6 | spec: 7 | description: The Policy Generator is a part of the Red Hat Advanced Cluster Management for Kubernetes application lifecycle subscription GitOps workflow that generates Red Hat Advanced Cluster Management policies using Kustomize. The Policy Generator builds Red Hat Advanced Cluster Management policies from Kubernetes manifest YAML files, which are provided through a PolicyGenerator manifest YAML file that is used to configure it. The Policy Generator is implemented as a Kustomize generator plug-in. 8 | displayName: 'Advanced Cluster Management - v{{ (lookup "operator.open-cluster-management.io/v1" "MultiClusterHub" "open-cluster-management" "multiclusterhub").status.currentVersion }}' 9 | links: 10 | - href: 'https://policygen-downloader.{{ (lookup "config.openshift.io/v1" "Ingress" "" "cluster").spec.domain }}/PolicyGenerator' 11 | text: Download PolicyGenerator for Linux for x86_64 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/policygenerator-download/generator.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy.open-cluster-management.io/v1 3 | kind: PolicyGenerator 4 | metadata: 5 | name: policygenerator-download 6 | policyDefaults: 7 | namespace: policies 8 | remediationAction: inform ## Change to enforce to create the downloader objects on the cluster 9 | ## set the default compliance annotations 10 | categories: 11 | - CM Configuration Management 12 | controls: 13 | - CM-2 Baseline Configuration 14 | standards: 15 | - NIST SP 800-53 16 | severity: low 17 | placement: 18 | labelSelector: 19 | matchLabels: 20 | local-cluster: 'true' 21 | 22 | policies: 23 | - name: acm-policygenerator-downloader 24 | manifests: 25 | - path: namespace.yml 26 | - path: deployment.yml 27 | - path: service.yml 28 | - path: route.yml 29 | - path: consoleclidownload.yml 30 | complianceType: mustonlyhave 31 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/policygenerator-download/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | 5 | generators: 6 | - generator.yml 7 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/policygenerator-download/namespace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: open-cluster-management-downloader 6 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/policygenerator-download/route.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Route 3 | apiVersion: route.openshift.io/v1 4 | metadata: 5 | name: policygen-downloader 6 | namespace: open-cluster-management-downloader 7 | labels: 8 | app: policygen-downloader 9 | spec: 10 | host: 'policygen-downloader.{{ (lookup "config.openshift.io/v1" "Ingress" "" "cluster").spec.domain }}' 11 | to: 12 | kind: Service 13 | name: policygen-downloader 14 | weight: 100 15 | port: 16 | targetPort: 8080-tcp 17 | tls: 18 | termination: edge 19 | insecureEdgeTerminationPolicy: Redirect 20 | wildcardPolicy: None 21 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/policygenerator-download/service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: policygen-downloader 6 | namespace: open-cluster-management-downloader 7 | labels: 8 | app: policygen-downloader 9 | spec: 10 | ipFamilies: 11 | - IPv4 12 | ports: 13 | - name: 8080-tcp 14 | protocol: TCP 15 | port: 8080 16 | targetPort: 8080 17 | internalTrafficPolicy: Cluster 18 | type: ClusterIP 19 | sessionAffinity: None 20 | selector: 21 | app: policygen-downloader 22 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/zts-xcrypt/input/operator-executor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: xcrypt.zettaset.com/v1 2 | kind: XCrypt 3 | metadata: 4 | name: xcrypt-launcher 5 | namespace: zts-xcrypt 6 | spec: 7 | replicas: 1 -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/zts-xcrypt/input/placementrule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | labels: 5 | custom: myApp 6 | name: placement-dev 7 | namespace: zts-xcrypt 8 | spec: 9 | clusterSelector: 10 | matchExpressions: 11 | - {key: environment, operator: In, values: ["dev"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/community/zts-xcrypt/policyGenerator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: PolicyGenerator 3 | metadata: 4 | name: policy-xcrypt 5 | placementBindingDefaults: 6 | name: binding-xcrypt 7 | policyDefaults: 8 | controls: 9 | - CM-2 Baseline Configuration 10 | namespace: zts-xcrypt 11 | placement: 12 | placementRulePath: input/placementrule.yaml 13 | remediationAction: enforce 14 | severity: medium 15 | standards: 16 | - NIST 800-53 17 | policySets: 18 | - policyset-xcrypt 19 | policies: 20 | - name: policy-zts-rbac 21 | disabled: false 22 | manifests: 23 | - path: input/zts-rbac.yaml 24 | remediationAction: enforce 25 | - name: policy-zts-install-xcrypts-crd-v1 26 | disabled: false 27 | manifests: 28 | - path: input/xcrypt.zettaset.com_xcrypts.yaml 29 | remediationAction: enforce 30 | - name: policy-zts-xcrypts-deployment 31 | disabled: false 32 | manifests: 33 | - path: input/operator-dh-v1-local.yaml 34 | remediationAction: enforce 35 | - name: policy-zts-xcrypt-operator-executor-deployment 36 | disabled: false 37 | manifests: 38 | - path: input/operator-executor.yaml 39 | remediationAction: enforce 40 | policySets: 41 | - name: policyset-xcrypt 42 | description: Zettaset Xcrypt product is deployed using an Operator Deployment 43 | on an Openshift cluster. This policy set is focused on the components that 44 | install on every managed cluster. 45 | placement: 46 | placementRulePath: input/placementrule.yaml 47 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/README.md: -------------------------------------------------------------------------------- 1 | # PolicySets -- Stable 2 | PolicySets in this folder are organized by name. Each `PolicySet` requires the policy 3 | generator for deployment. See the 4 | [Policy Generator](https://github.com/stolostron/policy-generator-plugin) 5 | Kustomize plugin for more information on using the policy generator. 6 | 7 | ## PolicySet details 8 | 9 | Policy | Description | Prerequisites 10 | ------- | ----------- | ------------- 11 | [Advanced Cluster Management Hub Best Practices](./acm-hardening) | Applies best practice policies for your Advanced Cluster Management hub installation. | Needs to be deployed only to the Advanced Cluster Management hub cluster. The `PolicySet` uses cluster `Placement` and not the `PlacementRule` placement mechanism. 12 | [OpenShift Platform Plus](./openshift-plus) | The OpenShift Platform Plus policy set applies several policies that installs the OpenShift Platform Plus products using best practices that allow them to work well together. | The OpenShift Platform Plus policy set works with OpenShift managed clusters and installs many components to the hub cluster. See the policy set [README.md](./openshift-plus/README.md) for more information on prerequisites. The `PolicySet` uses cluster `Placement` and not the `PlacementRule` placement mechanism. Red Hat Advanced Cluster Management version 2.7 is required to install this PolicySet. 13 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/README.md: -------------------------------------------------------------------------------- 1 | # PolicySets -- Hub Best Practices 2 | 3 | See the [Policy Generator](https://github.com/stolostron/policy-generator-plugin) 4 | Kustomize plugin for more information on using the policy generator. 5 | 6 | ## PolicySet details 7 | 8 | The Advanced Cluster Management Hub Best Practices `PolicySet` applies best practices for how to govern your Advanced Cluster Management hub installation. 9 | This `PolicySet` needs to be deployed only to the Advanced Cluster Management hub cluster. 10 | 11 | **Note**: The `PolicySet` uses cluster `Placement` and not the `PlacementRule` placement mechanism, so the namespace of 12 | the `Placement` must also be bound to a `ManagedClusterSet` using a `ManagedClusterSetBinding`. See the 13 | [Placement](https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#placement-overview) 14 | and 15 | [ManagedClusterSetBinding](https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.8/html/clusters/cluster_mce_overview#creating-managedclustersetbinding) 16 | documentation. 17 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/input-backup/policy-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: BackupSchedule 3 | metadata: 4 | namespace: open-cluster-management-backup 5 | status: 6 | phase: Enabled 7 | --- 8 | apiVersion: v1 9 | kind: Pod 10 | metadata: 11 | labels: 12 | app: cluster-backup-chart 13 | namespace: open-cluster-management-backup 14 | status: 15 | phase: Running 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | annotations: 21 | repository: https://github.com/openshift/oadp-operator 22 | namespace: open-cluster-management-backup 23 | status: 24 | phase: Running 25 | --- 26 | apiVersion: v1 27 | kind: Pod 28 | metadata: 29 | labels: 30 | app.kubernetes.io/name: velero 31 | namespace: open-cluster-management-backup 32 | status: 33 | phase: Running 34 | --- 35 | apiVersion: velero.io/v1 36 | kind: BackupStorageLocation 37 | metadata: 38 | namespace: open-cluster-management-backup 39 | status: 40 | phase: Available 41 | --- 42 | apiVersion: velero.io/v1 43 | kind: Backup 44 | metadata: 45 | labels: 46 | velero.io/schedule-name: acm-validation-policy-schedule 47 | namespace: open-cluster-management-backup 48 | --- 49 | apiVersion: velero.io/v1 50 | kind: Backup 51 | metadata: 52 | labels: 53 | velero.io/schedule-name: acm-managed-clusters-schedule 54 | namespace: open-cluster-management-backup 55 | status: 56 | phase: Completed 57 | --- 58 | apiVersion: velero.io/v1 59 | kind: Backup 60 | metadata: 61 | labels: 62 | velero.io/schedule-name: acm-resources-schedule 63 | namespace: open-cluster-management-backup 64 | status: 65 | phase: Completed 66 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/input-managedcluster/policy-managedclusteraddon-available.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: addon.open-cluster-management.io/v1alpha1 2 | kind: ManagedClusterAddOn 3 | status: 4 | conditions: 5 | - type: Available 6 | status: "False" 7 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/input-policyreport/policy-check-policyreports.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: wgpolicyk8s.io/v1alpha2 2 | kind: PolicyReport 3 | results: 4 | - result: fail 5 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/input-subscriptions/policy-subscriptions.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | status: 4 | phase: Failed 5 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/input/placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: placement-acm-hardening 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/acm-hardening/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - ./policyGenerator.yaml 3 | commonLabels: 4 | open-cluster-management.io/policy-set: acm-hardening 5 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-acm-observability/storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: objectbucket.io/v1alpha1 2 | kind: ObjectBucketClaim 3 | metadata: 4 | name: obc-observability 5 | namespace: openshift-storage 6 | spec: 7 | generateBucketName: obc-observability-bucket 8 | storageClassName: openshift-storage.noobaa.io 9 | status: 10 | phase: Bound 11 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-acs-central/policy-acs-central-status.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: central 6 | namespace: stackrox 7 | status: 8 | conditions: 9 | - status: "True" 10 | type: Available 11 | --- 12 | apiVersion: apps/v1 13 | kind: Deployment 14 | metadata: 15 | name: central-db 16 | namespace: stackrox 17 | status: 18 | conditions: 19 | - status: "True" 20 | type: Available 21 | --- 22 | apiVersion: apps/v1 23 | kind: Deployment 24 | metadata: 25 | name: scanner-db 26 | namespace: stackrox 27 | status: 28 | conditions: 29 | - status: "True" 30 | type: Available 31 | --- 32 | apiVersion: apps/v1 33 | kind: Deployment 34 | metadata: 35 | name: scanner 36 | namespace: stackrox 37 | status: 38 | conditions: 39 | - status: "True" 40 | type: Available 41 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-acs-central/policy-acs-operator-central.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: stackrox 5 | --- 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: rhacs-operator 10 | --- 11 | apiVersion: policy.open-cluster-management.io/v1beta1 12 | kind: OperatorPolicy 13 | metadata: 14 | name: operatorpolicy-rhacs-operator 15 | spec: 16 | remediationAction: enforce 17 | severity: high 18 | complianceType: musthave 19 | upgradeApproval: Automatic 20 | operatorGroup: 21 | name: rhacs-operator-group 22 | namespace: rhacs-operator 23 | subscription: 24 | channel: stable 25 | name: rhacs-operator 26 | namespace: rhacs-operator 27 | source: redhat-operators 28 | sourceNamespace: openshift-marketplace 29 | --- 30 | apiVersion: platform.stackrox.io/v1alpha1 31 | kind: Central 32 | metadata: 33 | namespace: stackrox 34 | name: stackrox-central-services 35 | spec: 36 | central: 37 | exposure: 38 | loadBalancer: 39 | enabled: false 40 | port: 443 41 | nodePort: 42 | enabled: false 43 | route: 44 | enabled: true 45 | persistence: 46 | persistentVolumeClaim: 47 | claimName: stackrox-db 48 | egress: 49 | connectivityPolicy: Online 50 | scanner: 51 | analyzer: 52 | scaling: 53 | autoScaling: Enabled 54 | maxReplicas: 5 55 | minReplicas: 2 56 | replicas: 2 57 | scannerComponent: Enabled 58 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-compliance/policy-compliance-operator-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: openshift-compliance 5 | --- 6 | apiVersion: policy.open-cluster-management.io/v1beta1 7 | kind: OperatorPolicy 8 | metadata: 9 | name: operatorpolicy-comp-operator 10 | spec: 11 | remediationAction: enforce 12 | severity: high 13 | complianceType: musthave 14 | upgradeApproval: Automatic 15 | operatorGroup: 16 | name: compliance-operator 17 | namespace: openshift-compliance 18 | targetNamespaces: 19 | - openshift-compliance 20 | subscription: 21 | channel: stable 22 | name: compliance-operator 23 | namespace: openshift-compliance 24 | source: redhat-operators 25 | sourceNamespace: openshift-marketplace 26 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-odf/policy-object-storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: objectbucket.io/v1alpha1 2 | kind: ObjectBucketClaim 3 | metadata: 4 | name: obc-observability 5 | namespace: openshift-storage 6 | spec: 7 | generateBucketName: obc-observability-bucket 8 | storageClassName: openshift-storage.noobaa.io 9 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-odf/policy-odf-status.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: noobaa-operator 5 | namespace: openshift-storage 6 | status: 7 | conditions: 8 | - status: "True" 9 | type: Available 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: ocs-operator 15 | namespace: openshift-storage 16 | status: 17 | conditions: 18 | - status: "True" 19 | type: Available 20 | --- 21 | apiVersion: ocs.openshift.io/v1 22 | kind: StorageCluster 23 | metadata: 24 | name: ocs-storagecluster 25 | namespace: openshift-storage 26 | status: 27 | phase: Ready 28 | --- 29 | apiVersion: noobaa.io/v1alpha1 30 | kind: NooBaa 31 | metadata: 32 | name: noobaa 33 | namespace: openshift-storage 34 | status: 35 | phase: Ready 36 | --- 37 | apiVersion: noobaa.io/v1alpha1 38 | kind: BackingStore 39 | metadata: 40 | name: noobaa-default-backing-store 41 | namespace: openshift-storage 42 | status: 43 | phase: Ready 44 | --- 45 | apiVersion: noobaa.io/v1alpha1 46 | kind: BucketClass 47 | metadata: 48 | name: noobaa-default-bucket-class 49 | namespace: openshift-storage 50 | status: 51 | phase: Ready 52 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-odf/policy-odf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | openshift.io/cluster-monitoring: "true" 6 | name: openshift-storage 7 | --- 8 | apiVersion: policy.open-cluster-management.io/v1beta1 9 | kind: OperatorPolicy 10 | metadata: 11 | name: operatorpolicy-odf-operator 12 | spec: 13 | remediationAction: enforce 14 | severity: high 15 | complianceType: musthave 16 | upgradeApproval: Automatic 17 | operatorGroup: 18 | name: openshift-storage-operatorgroup 19 | namespace: openshift-storage 20 | targetNamespaces: 21 | - openshift-storage 22 | subscription: 23 | name: odf-operator 24 | namespace: openshift-storage 25 | source: redhat-operators 26 | sourceNamespace: openshift-marketplace 27 | --- 28 | apiVersion: odf.openshift.io/v1alpha1 29 | kind: StorageSystem 30 | metadata: 31 | name: ocs-storagecluster-storagesystem 32 | namespace: openshift-storage 33 | spec: 34 | kind: storagecluster.ocs.openshift.io/v1 35 | name: ocs-storagecluster 36 | namespace: openshift-storage 37 | --- 38 | apiVersion: operator.openshift.io/v1 39 | kind: Console 40 | metadata: 41 | name: cluster 42 | spec: 43 | plugins: 44 | - odf-console 45 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-quay/policy-quay-status.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: registry-quay-app 5 | namespace: local-quay 6 | status: 7 | conditions: 8 | - status: "True" 9 | type: Available 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: registry-quay-database 15 | namespace: local-quay 16 | status: 17 | conditions: 18 | - status: "True" 19 | type: Available 20 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-sensor/acs-check-certificates.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: CertificatePolicy 3 | metadata: 4 | name: acs-bundle-certificates 5 | spec: 6 | namespaceSelector: 7 | include: ["policies"] 8 | remediationAction: inform 9 | severity: high 10 | minimumDuration: 720h 11 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-sensor/policy-acs-central-ca-bundle-expired.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: sensor-tls 6 | namespace: stackrox 7 | --- 8 | apiVersion: v1 9 | kind: Secret 10 | metadata: 11 | name: collector-tls 12 | namespace: stackrox 13 | --- 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: admission-control-tls 18 | namespace: stackrox -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-sensor/policy-acs-central-ca-bundle-v1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: create-cluster-init-bundle 6 | namespace: stackrox -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-sensor/policy-acs-central-ca-bundle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: stackrox 5 | --- 6 | apiVersion: v1 7 | kind: ServiceAccount 8 | metadata: 9 | annotations: 10 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 11 | name: create-cluster-init 12 | namespace: stackrox 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: Role 16 | metadata: 17 | annotations: 18 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 19 | argocd.argoproj.io/sync-wave: "1" 20 | name: create-cluster-init 21 | namespace: stackrox 22 | rules: 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - secrets 27 | verbs: 28 | - get 29 | - list 30 | - create 31 | - patch 32 | - update 33 | - apiGroups: 34 | - platform.stackrox.io 35 | resources: 36 | - securedclusters 37 | verbs: 38 | - get 39 | - list 40 | - patch 41 | - update 42 | - apiGroups: 43 | - route.openshift.io 44 | resources: 45 | - routes 46 | verbs: 47 | - get 48 | - list 49 | --- 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | kind: RoleBinding 52 | metadata: 53 | annotations: 54 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 55 | name: create-cluster-init 56 | namespace: stackrox 57 | roleRef: 58 | apiGroup: rbac.authorization.k8s.io 59 | kind: Role 60 | name: create-cluster-init 61 | subjects: 62 | - kind: ServiceAccount 63 | name: create-cluster-init 64 | namespace: stackrox 65 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input-sensor/policy-advanced-managed-cluster-status.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: sensor 6 | namespace: stackrox 7 | status: 8 | conditions: 9 | - status: "True" 10 | type: Available 11 | --- 12 | apiVersion: apps/v1 13 | kind: DaemonSet 14 | metadata: 15 | name: collector 16 | namespace: stackrox 17 | status: 18 | numberMisscheduled: 0 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: admission-control 24 | namespace: stackrox 25 | status: 26 | conditions: 27 | - status: "True" 28 | type: Available 29 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input/clusters-placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: placement-openshift-plus-clusters 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: vendor, operator: In, values: ["OpenShift"]} 12 | - {key: "local-cluster", operator: NotIn, values: ["true"]} 13 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/input/hub-placement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.open-cluster-management.io/v1beta1 2 | kind: Placement 3 | metadata: 4 | name: placement-openshift-plus-hub 5 | namespace: policies 6 | spec: 7 | predicates: 8 | - requiredClusterSelector: 9 | labelSelector: 10 | matchExpressions: 11 | - {key: "local-cluster", operator: In, values: ["true"]} 12 | -------------------------------------------------------------------------------- /policygenerator/policy-sets/stable/openshift-plus/kustomization.yml: -------------------------------------------------------------------------------- 1 | generators: 2 | - ./policyGenerator.yaml 3 | commonLabels: 4 | open-cluster-management.io/policy-set: openshift-plus 5 | commonAnnotations: 6 | argocd.argoproj.io/compare-options: IgnoreExtraneous 7 | -------------------------------------------------------------------------------- /stable/AC-Access-Control/README.md: -------------------------------------------------------------------------------- 1 | # Access Control 2 | 3 | See [NIST Special Publication 800-53 (Rev. 4)](https://nvd.nist.gov/800-53/Rev4/control/AC-1) for a description of the Access Control family. View the policies that define user actions for your cluster and map to the Access Control catalog. View the following table list of the stable policies that are supported by [Red Hat Advanced Cluster Management for Kubernetes](https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html-single/governance/index#kubernetes-configuration-policy-controller): 4 | 5 | Policy | Description | Prerequisites 6 | ------- | ----------- | ------------- 7 | [policy-role](../AC-Access-Control/policy-role.yaml) | Ensures that a role exists with permissions as specified. | 8 | [policy-rolebinding](../AC-Access-Control/policy-rolebinding.yaml) | Ensures that an entity is bound to a particular role. | 9 | 10 | 11 | You can contribute more policies that map to the Access Control catalog. See [Contibuting policies](https://github.com/stolostron/policy-collection/blob/main/docs/CONTRIBUTING.md) for more details. 12 | -------------------------------------------------------------------------------- /stable/AC-Access-Control/policy-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-role 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: AC Access Control 8 | policy.open-cluster-management.io/controls: AC-3 Access Enforcement 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-role-example 18 | spec: 19 | remediationAction: inform # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction. 20 | severity: high 21 | namespaceSelector: 22 | include: ["default"] 23 | object-templates: 24 | - complianceType: mustonlyhave # role definition should exact match 25 | objectDefinition: 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: Role 28 | metadata: 29 | name: sample-role 30 | rules: 31 | - apiGroups: ["extensions", "apps"] 32 | resources: ["deployments"] 33 | verbs: ["get", "list", "watch", "delete", "patch"] 34 | -------------------------------------------------------------------------------- /stable/CA-Security-Assessment-and-Authorization/README.md: -------------------------------------------------------------------------------- 1 | # Security Assessment and Authorization 2 | 3 | See [NIST Special Publication 800-53 (Rev. 4)](https://nvd.nist.gov/800-53/Rev4/control/CA-1) for a description of the Security Assessment and Authorization security control. View the following table list of the stable policy that is supported by [Red Hat Advanced Cluster Management for Kubernetes](https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html-single/governance/index#kubernetes-configuration-policy-controller): 4 | 5 | Policy | Description | Prerequisites 6 | ------- | ----------- | ------------- 7 | [Install Red Hat Compliance Operator policy](../CA-Security-Assessment-and-Authorization/policy-compliance-operator-install.yaml) | Use the official and supported compliance operator installation, `policy-comp-operator` policy, to enable continuous compliance monitoring for your cluster. After you install this operator, you must select what benchmark you want to comply to, and create the appropriate objects for the scans to be run. | See [Compliance Operator](https://docs.openshift.com/container-platform/4.6/security/compliance_operator/compliance-operator-understanding.html#compliance-operator-understanding) for more details. 8 | 9 | **Note:** This policy is not supported for Red Hat Advanced Cluster Management 2.1.0, but it can be applied. 10 | 11 | You can contribute more policies that map to the Security Assessment and Authorization control family. See [Contributing policies](https://github.com/stolostron/policy-collection/blob/main/docs/CONTRIBUTING.md) for more details. 12 | 13 | -------------------------------------------------------------------------------- /stable/CM-Configuration-Management/policy-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-namespace 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-namespace-example 18 | spec: 19 | remediationAction: inform # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction. 20 | severity: low 21 | object-templates: 22 | - complianceType: musthave 23 | objectDefinition: 24 | kind: Namespace # must have namespace 'prod' 25 | apiVersion: v1 26 | metadata: 27 | name: prod 28 | -------------------------------------------------------------------------------- /stable/CM-Configuration-Management/policy-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-pod 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: CM Configuration Management 8 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-pod-example 18 | spec: 19 | remediationAction: inform # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction. 20 | severity: low 21 | namespaceSelector: 22 | include: ["default"] 23 | object-templates: 24 | - complianceType: musthave 25 | objectDefinition: 26 | apiVersion: v1 27 | kind: Pod # nginx pod must exist 28 | metadata: 29 | name: sample-nginx-pod 30 | spec: 31 | containers: 32 | - image: nginx:1.18.0 33 | name: nginx 34 | ports: 35 | - containerPort: 80 36 | -------------------------------------------------------------------------------- /stable/SC-System-and-Communications-Protection/policy-certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-certificate 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: SC System and Communications Protection 8 | policy.open-cluster-management.io/controls: SC-8 Transmission Confidentiality and Integrity 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: CertificatePolicy # cert management expiration 16 | metadata: 17 | name: policy-certificate-example 18 | spec: 19 | namespaceSelector: 20 | include: ["default"] 21 | remediationAction: inform # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction. 22 | severity: low 23 | minimumDuration: 300h 24 | -------------------------------------------------------------------------------- /stable/SC-System-and-Communications-Protection/policy-limitmemory.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy.open-cluster-management.io/v1 2 | kind: Policy 3 | metadata: 4 | name: policy-limitmemory 5 | annotations: 6 | policy.open-cluster-management.io/standards: NIST SP 800-53 7 | policy.open-cluster-management.io/categories: SC System and Communications Protection 8 | policy.open-cluster-management.io/controls: SC-6 Resource Availability 9 | spec: 10 | remediationAction: inform 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: policy-limitrange-example 18 | spec: 19 | remediationAction: inform # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction. 20 | severity: medium 21 | namespaceSelector: 22 | include: ["default"] 23 | object-templates: 24 | - complianceType: mustonlyhave 25 | objectDefinition: 26 | apiVersion: v1 27 | kind: LimitRange # limit memory usage 28 | metadata: 29 | name: mem-limit-range 30 | spec: 31 | limits: 32 | - default: 33 | memory: 512Mi 34 | defaultRequest: 35 | memory: 256Mi 36 | type: Container 37 | -------------------------------------------------------------------------------- /stable/SI-System-and-Information-Integrity/README.md: -------------------------------------------------------------------------------- 1 | # System and Information Integrity 2 | 3 | See [NIST Special Publication 800-53 (Rev. 4)](https://nvd.nist.gov/800-53/Rev4/control/SI-1) for a description of the System and Information Integrity security control. View the following table list of the stable policies that are supported by [Red Hat Advanced Cluster Management for Kubernetes](https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.6/html-single/governance/index#kubernetes-configuration-policy-controller): 4 | 5 | Policy | Description | Prerequisites 6 | ------- | ----------- | ------------- 7 | [policy-imagemanifestvuln](../SI-System-and-Information-Integrity/policy-imagemanifestvuln.yaml) | Detect vulnerabilities in container images. Leverages the [Container Security Operator](https://github.com/quay/container-security-operator) and installs it on the managed cluster if it does not exist. | 8 | 9 | You can contribute more policies that map to the System and Information Integrity control family. See [Contibuting policies](https://github.com/stolostron/policy-collection/blob/main/docs/CONTRIBUTING.md) for more details. 10 | --------------------------------------------------------------------------------