├── collections
└── requirements.yml
├── 07.Ansible-Tower-Integration
├── ansible-playbooks
│ ├── logger-playbook.yml
│ ├── namespace-playbook.yml
│ ├── certificate-playbook.yml
│ └── roles
│ │ ├── k8s-namespace
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ │ ├── k8s-rotate-certificate
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── openssl.cnf
│ │ ├── templates
│ │ │ └── openssl-ingress.cnf.j2
│ │ └── tasks
│ │ │ └── main.yml
│ │ └── logger
│ │ ├── defaults
│ │ └── main.yml
│ │ └── tasks
│ │ └── main.yml
├── images
│ ├── tower-result.png
│ ├── application-ansible.png
│ ├── k8s-namespace-tower.png
│ ├── forbidden-namespace-violation.png
│ └── forbidden-namespace-no-violation.png
├── demo-application
│ ├── mariadb-resources
│ │ ├── mariadb-secret.yml
│ │ ├── prehook
│ │ │ └── pre_log.yaml
│ │ ├── posthook
│ │ │ └── post_log.yaml
│ │ └── mariadb.yml
│ └── rhacm-resources
│ │ └── application.yml
├── demo-policy
│ └── rhacm-resources
│ │ ├── policyautomation.yaml
│ │ └── policy.yaml
├── INSTRUCTOR.md
└── README.md
├── 03.Observability
├── images
│ ├── panel-1.png
│ └── panel-2.png
├── exercise
│ └── multiclusterobservability.yaml
└── README.md
├── 05.Governance-Risk-Compliance
├── demo
│ ├── namespace.yaml
│ ├── demo-application
│ │ ├── application-resources
│ │ │ ├── service.yaml
│ │ │ ├── route.yaml
│ │ │ └── deployment.yaml
│ │ └── rhacm-resources
│ │ │ └── application.yaml
│ ├── placementrule.yaml
│ └── demo-policies
│ │ ├── policy-deletekubeadmin.yaml
│ │ ├── policy-configmap-ocp-version.yaml
│ │ ├── policy-image.yaml
│ │ └── policy-rbac.yaml
├── exercise
│ ├── namespace.yaml
│ ├── exercise-application
│ │ ├── application-resources
│ │ │ ├── mariadb-secret.yaml
│ │ │ ├── mariadb-service.yaml
│ │ │ ├── exporter-service.yaml
│ │ │ ├── exporter-route.yaml
│ │ │ ├── exporter-deployment.yaml
│ │ │ └── mariadb-deployment.yaml
│ │ └── rhacm-resources
│ │ │ └── application.yaml
│ ├── placementrule.yaml
│ ├── exercise-policies
│ │ ├── placementrule.yaml
│ │ ├── limitrange-policy.yaml
│ │ └── networkpolicy-policy.yaml
│ └── exercise-templates
│ │ └── metrics-configmap.yaml
├── images
│ ├── policies-overview.png
│ └── networkpolicy-status.png
└── README.md
├── 06.Advanced-Policy-Management
├── demo-policy-generator
│ ├── policy-generator-example
│ │ ├── kustomization.yaml
│ │ ├── networkpolicies
│ │ │ ├── netpol-application-1.yaml
│ │ │ ├── netpol-application-3.yaml
│ │ │ ├── netpol-application-4.yaml
│ │ │ └── netpol-application-2.yaml
│ │ └── networkpolicy-policy.yaml
│ └── complex-policy.yaml
├── demo-compliance-operator
│ ├── policy-compliance-operator.yaml
│ └── policy-moderate-scan.yaml
├── exercise-compliance-operator
│ ├── policy-compliance-operator.yaml
│ └── policy-moderate-scan.yaml
├── demo-gatekeeper
│ ├── policy-gatekeeper-operator.yaml
│ └── policy-gatekeeper-disable-nodeport.yaml
└── README.md
├── 04.Application-Lifecycle
├── demo-argocd
│ ├── mariadb-resources
│ │ ├── cluster-a
│ │ │ ├── namespace.yaml
│ │ │ ├── mariadb-secret.yaml
│ │ │ ├── service.yaml
│ │ │ └── mariadb.yaml
│ │ └── local-cluster
│ │ │ ├── namespace.yaml
│ │ │ ├── mariadb-secret.yaml
│ │ │ ├── service.yaml
│ │ │ └── mariadb.yaml
│ └── argocd-resources
│ │ ├── appproject.yaml
│ │ ├── application.yaml
│ │ └── applicationset.yaml
├── images
│ ├── argocd-cluster.png
│ ├── application-dev.png
│ ├── application-prod.png
│ ├── argocd-applications.png
│ ├── rhacm-argocd-app-details.png
│ └── argocd-rhacm-applications.png
├── exercise-argocd
│ ├── application-resources
│ │ ├── templates
│ │ │ ├── namespace.yaml
│ │ │ ├── configmap.yaml
│ │ │ ├── service.yaml
│ │ │ ├── route.yaml
│ │ │ └── deployment.yaml
│ │ ├── values
│ │ │ ├── webserver-production
│ │ │ │ └── values.yaml
│ │ │ └── webserver-development
│ │ │ │ └── values.yaml
│ │ ├── .helmignore
│ │ └── Chart.yaml
│ └── argocd-resources
│ │ ├── appproject.yaml
│ │ └── applicationset.yaml
├── exercise-application
│ ├── application-resources
│ │ ├── configmap.yaml
│ │ ├── service.yaml
│ │ ├── route.yaml
│ │ └── deployment.yaml
│ └── rhacm-resources
│ │ └── application.yaml
├── demo-application
│ ├── mariadb-resources
│ │ ├── mariadb-secret.yaml
│ │ ├── service.yaml
│ │ └── mariadb.yaml
│ └── rhacm-resources
│ │ └── application.yaml
└── README.md
├── 01.RHACM-Installation
└── README.md
├── README.md
└── 02.Cluster-Management
└── README.md
/collections/requirements.yml:
--------------------------------------------------------------------------------
1 | collections:
2 | - community.kubernetes
3 | - community.okd
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/logger-playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - logger
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/namespace-playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - k8s-namespace
--------------------------------------------------------------------------------
/03.Observability/images/panel-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/03.Observability/images/panel-1.png
--------------------------------------------------------------------------------
/03.Observability/images/panel-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/03.Observability/images/panel-2.png
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: rhacm-policies
6 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: rhacm-policies
6 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/policy-generator-example/kustomization.yaml:
--------------------------------------------------------------------------------
1 | generators:
2 | - networkpolicy-policy.yaml
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/certificate-playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - k8s-rotate-certificate
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/cluster-a/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: mariadb
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/local-cluster/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: mariadb
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/k8s-namespace/defaults/main.yml:
--------------------------------------------------------------------------------
1 | k8s_namespace_state: "absent"
2 | k8s_namespace_name: "forbidden-namespace"
--------------------------------------------------------------------------------
/04.Application-Lifecycle/images/argocd-cluster.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/04.Application-Lifecycle/images/argocd-cluster.png
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/templates/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: {{ .Values.namespace }}
--------------------------------------------------------------------------------
/04.Application-Lifecycle/images/application-dev.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/04.Application-Lifecycle/images/application-dev.png
--------------------------------------------------------------------------------
/04.Application-Lifecycle/images/application-prod.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/04.Application-Lifecycle/images/application-prod.png
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/images/tower-result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/07.Ansible-Tower-Integration/images/tower-result.png
--------------------------------------------------------------------------------
/04.Application-Lifecycle/images/argocd-applications.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/04.Application-Lifecycle/images/argocd-applications.png
--------------------------------------------------------------------------------
/04.Application-Lifecycle/images/rhacm-argocd-app-details.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/04.Application-Lifecycle/images/rhacm-argocd-app-details.png
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/images/policies-overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/05.Governance-Risk-Compliance/images/policies-overview.png
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/images/application-ansible.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/07.Ansible-Tower-Integration/images/application-ansible.png
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/images/k8s-namespace-tower.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/07.Ansible-Tower-Integration/images/k8s-namespace-tower.png
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/values/webserver-production/values.yaml:
--------------------------------------------------------------------------------
1 | application:
2 | content: "
Production Application
"
3 | namespace: webserver-prod
--------------------------------------------------------------------------------
/04.Application-Lifecycle/images/argocd-rhacm-applications.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/04.Application-Lifecycle/images/argocd-rhacm-applications.png
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/images/networkpolicy-status.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/05.Governance-Risk-Compliance/images/networkpolicy-status.png
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/values/webserver-development/values.yaml:
--------------------------------------------------------------------------------
1 | application:
2 | content: "Development Application
"
3 | namespace: webserver-dev
4 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/images/forbidden-namespace-violation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/07.Ansible-Tower-Integration/images/forbidden-namespace-violation.png
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/images/forbidden-namespace-no-violation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/HEAD/07.Ansible-Tower-Integration/images/forbidden-namespace-no-violation.png
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/k8s-rotate-certificate/defaults/main.yml:
--------------------------------------------------------------------------------
1 | base_dir: "/tmp"
2 | certificate_path: "{{ base_dir }}/{{ cluster_domain }}"
3 | custom_ca_name: "custom-ca"
4 | old_certificate: "test"
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-application/application-resources/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | application.html: |
4 | Production Application
5 | kind: ConfigMap
6 | metadata:
7 | name: application-prod
8 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/logger/defaults/main.yml:
--------------------------------------------------------------------------------
1 | log_file_name: "rhacm.log"
2 | log_file_path: "/var/www/html/logs"
3 | date: "{{ lookup('pipe', 'date') }}"
4 | trigger_name: "null"
5 | hook_type: "undefined"
6 | target_clusters: "undefined"
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | application.html: |
4 | {{ .Values.application.content }}
5 | kind: ConfigMap
6 | metadata:
7 | name: application-prod
8 | namespace: {{ .Values.namespace }}
9 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/cluster-a/mariadb-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | MYSQL_ROOT_PASSWORD: cmVkaGF0
4 | kind: Secret
5 | metadata:
6 | namespace: mariadb
7 | name: mariadb
8 | labels:
9 | app: mariadb
10 | type: database
11 | type: Opaque
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/local-cluster/mariadb-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | MYSQL_ROOT_PASSWORD: cmVkaGF0
4 | kind: Secret
5 | metadata:
6 | namespace: mariadb
7 | name: mariadb
8 | labels:
9 | app: mariadb
10 | type: database
11 | type: Opaque
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-application/mariadb-resources/mariadb-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | MYSQL_ROOT_PASSWORD: cmVkaGF0
4 | kind: Secret
5 | metadata:
6 | name: mariadb
7 | labels:
8 | app: mariadb
9 | type: database
10 | type: Opaque
11 | spec:
12 | labels:
13 | app: mariadb
14 | type: database
15 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-application/mariadb-resources/mariadb-secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | MYSQL_ROOT_PASSWORD: cmVkaGF0
4 | kind: Secret
5 | metadata:
6 | name: mariadb
7 | labels:
8 | app: mariadb
9 | type: database
10 | type: Opaque
11 | spec:
12 | labels:
13 | app: mariadb
14 | type: database
15 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/k8s-rotate-certificate/files/openssl.cnf:
--------------------------------------------------------------------------------
1 | [req]
2 | distinguished_name = req_distinguished_name
3 | x509_extensions = v3_req
4 | prompt = no
5 | default_md = sha224
6 | [req_distinguished_name]
7 | CN = localhost.ssl
8 | [v3_req]
9 | keyUsage = keyEncipherment, dataEncipherment
10 | extendedKeyUsage = serverAuth
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/application-resources/mariadb-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | MYSQL_ROOT_PASSWORD: cmVkaGF0
4 | kind: Secret
5 | metadata:
6 | name: mariadb
7 | labels:
8 | app: mariadb
9 | type: database
10 | type: Opaque
11 | spec:
12 | labels:
13 | app: mariadb
14 | type: database
15 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/policy-generator-example/networkpolicies/netpol-application-1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: stackrox-generated-webserver
5 | namespace: application-1
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: webserver
10 | policyTypes:
11 | - Ingress
12 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/policy-generator-example/networkpolicies/netpol-application-3.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: stackrox-generated-webserver
5 | namespace: application-3
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: webserver
10 | policyTypes:
11 | - Ingress
12 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/policy-generator-example/networkpolicies/netpol-application-4.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: stackrox-generated-webserver
5 | namespace: application-4
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: webserver
10 | policyTypes:
11 | - Ingress
12 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-application/mariadb-resources/prehook/pre_log.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: tower.ansible.com/v1alpha1
3 | kind: AnsibleJob
4 | metadata:
5 | name: prejob
6 | spec:
7 | tower_auth_secret: ansible-tower
8 | job_template_name: Logger
9 | extra_vars:
10 | trigger_name: mariadb
11 | hook_type: prehook
12 | log_file_name: rhacm.log
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-application/mariadb-resources/posthook/post_log.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: tower.ansible.com/v1alpha1
3 | kind: AnsibleJob
4 | metadata:
5 | name: postjob
6 | spec:
7 | tower_auth_secret: ansible-tower
8 | job_template_name: Logger
9 | extra_vars:
10 | trigger_name: mariadb
11 | hook_type: posthook
12 | log_file_name: rhacm.log
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-application/application-resources/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: webserver
6 | name: webserver
7 | spec:
8 | ports:
9 | - name: 8080-tcp
10 | port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | selector:
14 | app: webserver
15 | sessionAffinity: None
16 | type: ClusterIP
17 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-application/application-resources/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: webserver
6 | name: webserver
7 | spec:
8 | ports:
9 | - name: 8080-tcp
10 | port: 8080
11 | protocol: TCP
12 | targetPort: 8080
13 | selector:
14 | app: webserver
15 | sessionAffinity: None
16 | type: ClusterIP
17 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-application/mariadb-resources/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: mariadb
6 | name: mariadb
7 | namespace: mariadb
8 | spec:
9 | ports:
10 | - name: 3306-tcp
11 | port: 3306
12 | protocol: TCP
13 | targetPort: 3306
14 | selector:
15 | app: mariadb
16 | sessionAffinity: None
17 | type: ClusterIP
18 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/argocd-resources/appproject.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: AppProject
3 | metadata:
4 | name: demo-project-argocd
5 | namespace: openshift-gitops
6 | spec:
7 | clusterResourceWhitelist:
8 | - group: '*'
9 | kind: '*'
10 | destinations:
11 | - namespace: '*'
12 | name: local-cluster
13 | server: '*'
14 | sourceRepos:
15 | - '*'
16 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/argocd-resources/appproject.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: AppProject
3 | metadata:
4 | name: demo-project-argocd
5 | namespace: openshift-gitops
6 | spec:
7 | clusterResourceWhitelist:
8 | - group: '*'
9 | kind: '*'
10 | destinations:
11 | - namespace: '*'
12 | name: local-cluster
13 | server: '*'
14 | sourceRepos:
15 | - '*'
16 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/application-resources/mariadb-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: mariadb
6 | name: mariadb
7 | spec:
8 | ports:
9 | - name: 3306-tcp
10 | port: 3306
11 | protocol: TCP
12 | targetPort: 3306
13 | selector:
14 | app: mariadb
15 | sessionAffinity: None
16 | type: ClusterIP
17 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/cluster-a/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: mariadb
6 | name: mariadb
7 | namespace: mariadb
8 | spec:
9 | ports:
10 | - name: 3306-tcp
11 | port: 3306
12 | protocol: TCP
13 | targetPort: 3306
14 | selector:
15 | deploymentconfig: mariadb
16 | sessionAffinity: None
17 | type: ClusterIP
18 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/local-cluster/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: mariadb
6 | name: mariadb
7 | namespace: mariadb
8 | spec:
9 | ports:
10 | - name: 3306-tcp
11 | port: 3306
12 | protocol: TCP
13 | targetPort: 3306
14 | selector:
15 | deploymentconfig: mariadb
16 | sessionAffinity: None
17 | type: ClusterIP
18 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/application-resources/exporter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: mysqld-exporter
6 | name: mysqld-exporter
7 | spec:
8 | ports:
9 | - name: 9104-tcp
10 | port: 9104
11 | protocol: TCP
12 | targetPort: 9104
13 | selector:
14 | app: mysqld-exporter
15 | sessionAffinity: None
16 | type: ClusterIP
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-application/application-resources/route.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: route.openshift.io/v1
2 | kind: Route
3 | metadata:
4 | labels:
5 | app: webserver
6 | name: webserver
7 | spec:
8 | path: "/application.html"
9 | port:
10 | targetPort: 8080-tcp
11 | tls:
12 | termination: edge
13 | to:
14 | kind: Service
15 | name: webserver
16 | weight: 100
17 | wildcardPolicy: None
18 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/application-resources/exporter-route.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: route.openshift.io/v1
2 | kind: Route
3 | metadata:
4 | labels:
5 | app: mysqld-exporter
6 | name: mysqld-exporter
7 | spec:
8 | port:
9 | targetPort: 9104-tcp
10 | tls:
11 | termination: edge
12 | to:
13 | kind: Service
14 | name: mysqld-exporter
15 | weight: 100
16 | wildcardPolicy: None
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-application/application-resources/route.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: route.openshift.io/v1
2 | kind: Route
3 | metadata:
4 | labels:
5 | app: webserver
6 | name: webserver
7 | spec:
8 | path: "/application.html"
9 | port:
10 | targetPort: 8080-tcp
11 | tls:
12 | termination: edge
13 | to:
14 | kind: Service
15 | name: webserver
16 | weight: 100
17 | wildcardPolicy: None
18 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: webserver
6 | name: webserver
7 | namespace: {{ .Values.namespace }}
8 | spec:
9 | ports:
10 | - name: 8080-tcp
11 | port: 8080
12 | protocol: TCP
13 | targetPort: 8080
14 | selector:
15 | app: webserver
16 | sessionAffinity: None
17 | type: ClusterIP
18 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/placementrule.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps.open-cluster-management.io/v1
3 | kind: PlacementRule
4 | metadata:
5 | name: dev-clusters
6 | namespace: rhacm-policies
7 | spec:
8 | clusterConditions:
9 | - type: ManagedClusterConditionAvailable
10 | status: "True"
11 | clusterSelector:
12 | matchExpressions:
13 | - key: environment
14 | operator: In
15 | values:
16 | - "dev"
17 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/placementrule.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps.open-cluster-management.io/v1
3 | kind: PlacementRule
4 | metadata:
5 | name: prod-policies-clusters
6 | namespace: rhacm-policies
7 | spec:
8 | clusterConditions:
9 | - type: ManagedClusterConditionAvailable
10 | status: "True"
11 | clusterSelector:
12 | matchExpressions:
13 | - key: environment
14 | operator: In
15 | values:
16 | - "production"
17 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/templates/route.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: route.openshift.io/v1
2 | kind: Route
3 | metadata:
4 | annotations:
5 | labels:
6 | app: webserver
7 | name: webserver
8 | namespace: {{ .Values.namespace }}
9 | spec:
10 | path: "/application.html"
11 | port:
12 | targetPort: 8080-tcp
13 | tls:
14 | termination: edge
15 | to:
16 | kind: Service
17 | name: webserver
18 | weight: 100
19 | wildcardPolicy: None
20 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-policies/placementrule.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps.open-cluster-management.io/v1
3 | kind: PlacementRule
4 | metadata:
5 | name: prod-policies-clusters
6 | namespace: rhacm-policies
7 | spec:
8 | clusterConditions:
9 | - type: ManagedClusterConditionAvailable
10 | status: "True"
11 | clusterSelector:
12 | matchExpressions:
13 | - key: environment
14 | operator: In
15 | values:
16 | - "production"
17 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-policy/rhacm-resources/policyautomation.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1beta1
2 | kind: PolicyAutomation
3 | metadata:
4 | name: dangerous-namespace-policy-automation
5 | namespace: rhacm-policies
6 | spec:
7 | automationDef:
8 | extra_vars:
9 | trigger_name: dangerous-namespace-policy
10 | hook_type: policyautomation
11 | name: Logger
12 | secret: grc-ansible-tower
13 | type: AnsibleJob
14 | mode: once
15 | policyRef: policy-remove-dangerous-namespace
16 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/policy-generator-example/networkpolicy-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1
2 | kind: PolicyGenerator
3 | metadata:
4 | name: policy-network-management
5 | policyDefaults:
6 | namespace: default
7 | policies:
8 | - name: network-policy-management
9 | manifests:
10 | - path: networkpolicies
11 | remediationAction: enforce
12 | placement:
13 | clusterSelector:
14 | matchExpressions:
15 | - {key: environment, operator: In, values: ["dev"]}
16 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/argocd-resources/application.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: Application
3 | metadata:
4 | name: mariadb-argocd
5 | namespace: openshift-gitops
6 | spec:
7 | destination:
8 | name: local-cluster
9 | project: demo-project-argocd
10 | source:
11 | path: 04.Application-Lifecycle/demo-argocd/mariadb-resources/local-cluster
12 | repoURL: https://github.com/michaelkotelnikov/rhacm-workshop
13 | targetRevision: master
14 | syncPolicy:
15 | automated:
16 | prune: false
17 | selfHeal: true
18 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/k8s-rotate-certificate/templates/openssl-ingress.cnf.j2:
--------------------------------------------------------------------------------
1 | [req]
2 | default_bits = 2048
3 | prompt = no
4 | default_md = sha256
5 | req_extensions = req_ext
6 | distinguished_name = dn
7 |
8 | [ dn ]
9 | C=UA
10 | ST=Dnepropetrovskaya
11 | L=Kamyanske
12 | O=DMK
13 | OU=OASUP
14 | emailAddress=webmaster@localhost
15 | CN = api.{{ cluster_domain }}
16 |
17 | [ req_ext ]
18 | basicConstraints = CA:FALSE
19 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
20 | subjectAltName = @alt_names
21 |
22 | [ alt_names ]
23 | DNS.0 = api.{{ cluster_domain }}
24 | DNS.1 = *.apps.{{ cluster_domain }}
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/policy-generator-example/networkpolicies/netpol-application-2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: stackrox-generated-webserver
5 | namespace: application-2
6 | spec:
7 | ingress:
8 | - from:
9 | - namespaceSelector:
10 | matchLabels:
11 | kubernetes.io/metadata.name: application-1
12 | podSelector:
13 | matchLabels:
14 | app: webserver
15 | ports:
16 | - port: 8080
17 | protocol: TCP
18 | podSelector:
19 | matchLabels:
20 | app: webserver
21 | policyTypes:
22 | - Ingress
23 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/logger/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create log file directory
3 | file:
4 | path: "{{ log_file_path }}"
5 | state: directory
6 |
7 | - name: Validate the log file
8 | stat:
9 | path: "{{ log_file_path }}/{{ log_file_name }}"
10 | register: log_state
11 |
12 | - name: Create the log file
13 | file:
14 | path: "{{ log_file_path }}/{{ log_file_name }}"
15 | state: touch
16 | when: log_state.stat.exists == false
17 |
18 | - name: Added log message to file
19 | lineinfile:
20 | path: "{{ log_file_path }}/{{ log_file_name }}"
21 | line: "{{ date }} Ansible Job was triggered by {{ trigger_name }} as {{ hook_type }} in clusters {{ target_clusters }}."
--------------------------------------------------------------------------------
/01.RHACM-Installation/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 1 - Advanced Cluster Management Installation
2 |
3 | In this exercise you will install the Advanced Cluster Management for Kubernetes operator. In order to comply with the workshop's rationale please install Red Hat Advanced Cluster Management for Kubernetes 2.5. During the installation, when choosing the update channel, select **release-2.5**.
4 |
5 | To install the up-to-date instance of Advanced Cluster Management, follow the steps presented in the **Installation** section of the workshop’s presentation - [https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing](https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing).
6 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-application/mariadb-resources/mariadb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.openshift.io/v1
2 | kind: DeploymentConfig
3 | metadata:
4 | name: mariadb
5 | labels:
6 | app: mariadb
7 | type: database
8 | spec:
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | app: mariadb
14 | type: database
15 | spec:
16 | containers:
17 | - name: mariadb
18 | image: docker.io/bitnami/mariadb:10.3
19 | ports:
20 | - containerPort: 3306
21 | name: db-port
22 | env:
23 | - name: MARIADB_ROOT_PASSWORD
24 | valueFrom:
25 | secretKeyRef:
26 | name: mariadb
27 | key: MYSQL_ROOT_PASSWORD
28 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/k8s-namespace/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install pre-requisites
3 | pip:
4 | name:
5 | - openshift
6 | - pyyaml
7 | - kubernetes
8 |
9 | - name: Log in (obtain access token)
10 | community.okd.openshift_auth:
11 | username: "{{ k8s_username }}"
12 | password: "{{ k8s_password }}"
13 | host: "{{ k8s_api_url }}"
14 | validate_certs: no
15 | register: k8s_auth_results
16 |
17 | - name: Validate the state of the wanted Namespace
18 | k8s:
19 | name: "{{ k8s_namespace_name }}"
20 | api_version: v1
21 | kind: Namespace
22 | state: "{{ k8s_namespace_state }}"
23 | host: "{{ k8s_api_url }}"
24 | api_key: "{{ k8s_auth_results.k8s_auth.api_key }}"
25 | validate_certs: no
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/cluster-a/mariadb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.openshift.io/v1
2 | kind: DeploymentConfig
3 | metadata:
4 | name: mariadb
5 | namespace: mariadb
6 | labels:
7 | app: mariadb
8 | type: database
9 | spec:
10 | replicas: 2
11 | template:
12 | metadata:
13 | labels:
14 | app: mariadb
15 | type: database
16 | spec:
17 | containers:
18 | - name: mariadb
19 | image: docker.io/bitnami/mariadb:10.3
20 | ports:
21 | - containerPort: 3306
22 | name: db-port
23 | env:
24 | - name: MARIADB_ROOT_PASSWORD
25 | valueFrom:
26 | secretKeyRef:
27 | name: mariadb
28 | key: MYSQL_ROOT_PASSWORD
29 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/mariadb-resources/local-cluster/mariadb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps.openshift.io/v1
2 | kind: DeploymentConfig
3 | metadata:
4 | name: mariadb
5 | namespace: mariadb
6 | labels:
7 | app: mariadb
8 | type: database
9 | spec:
10 | replicas: 2
11 | template:
12 | metadata:
13 | labels:
14 | app: mariadb
15 | type: database
16 | spec:
17 | containers:
18 | - name: mariadb
19 | image: docker.io/bitnami/mariadb:10.3
20 | ports:
21 | - containerPort: 3306
22 | name: db-port
23 | env:
24 | - name: MARIADB_ROOT_PASSWORD
25 | valueFrom:
26 | secretKeyRef:
27 | name: mariadb
28 | key: MYSQL_ROOT_PASSWORD
29 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-argocd/argocd-resources/applicationset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: ApplicationSet
3 | metadata:
4 | name: multicluster-mariadb
5 | namespace: openshift-gitops
6 | spec:
7 | generators:
8 | - git:
9 | repoURL: https://github.com/michaelkotelnikov/rhacm-workshop.git
10 | revision: master
11 | directories:
12 | - path: 04.Application-Lifecycle/demo-argocd/mariadb-resources/*
13 | template:
14 | metadata:
15 | name: '{{path.basename}}-mariadb'
16 | spec:
17 | project: default
18 | source:
19 | repoURL: https://github.com/michaelkotelnikov/rhacm-workshop.git
20 | targetRevision: master
21 | path: '{{path}}'
22 | destination:
23 | name: '{{path.basename}}'
24 | syncPolicy:
25 | automated:
26 | prune: false
27 | selfHeal: true
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/application-resources/exporter-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: mysqld-exporter
6 | name: mysqld-exporter
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: mysqld-exporter
12 | template:
13 | metadata:
14 | labels:
15 | app: mysqld-exporter
16 | spec:
17 | containers:
18 | - env:
19 | - name: DATA_SOURCE_NAME
20 | value: root:cmVkaGF0@(mariadb.mariadb-metrics.svc.cluster.local:3306)/
21 | image: docker.io/prom/mysqld-exporter:v0.13.0
22 | imagePullPolicy: Always
23 | name: mysqld-exporter
24 | ports:
25 | - containerPort: 9104
26 | protocol: TCP
27 | resources:
28 | requests:
29 | memory: "128Mi"
30 | cpu: "10m"
31 | limits:
32 | memory: "512Mi"
33 | cpu: "500m"
34 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-application/mariadb-resources/mariadb.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: mariadb
5 | labels:
6 | app: mariadb
7 | type: database
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: mariadb
12 | replicas: 1
13 | template:
14 | metadata:
15 | labels:
16 | app: mariadb
17 | type: database
18 | spec:
19 | containers:
20 | - name: mariadb
21 | image: docker.io/bitnami/mariadb:10.3
22 | ports:
23 | - containerPort: 3306
24 | name: db-port
25 | env:
26 | - name: MARIADB_ROOT_PASSWORD
27 | valueFrom:
28 | secretKeyRef:
29 | name: mariadb
30 | key: MYSQL_ROOT_PASSWORD
31 | resources:
32 | requests:
33 | memory: "128Mi"
34 | cpu: "250m"
35 | limits:
36 | memory: "512Mi"
37 | cpu: "500m"
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: webserver
5 | labels:
6 | app: webserver
7 | namespace: {{ .Values.namespace }}
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: webserver
12 | replicas: 1
13 | template:
14 | metadata:
15 | labels:
16 | app: webserver
17 | spec:
18 | containers:
19 | - name: apache
20 | image: docker.io/bitnami/apache:2.4.46
21 | ports:
22 | - containerPort: 8080
23 | name: webserver
24 | volumeMounts:
25 | - mountPath: /app/application.html
26 | name: application-prod
27 | subPath: application.html
28 | volumes:
29 | - configMap:
30 | defaultMode: 420
31 | items:
32 | - key: application.html
33 | path: application.html
34 | name: application-prod
35 | name: application-prod
36 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/argocd-resources/applicationset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: argoproj.io/v1alpha1
2 | kind: ApplicationSet
3 | metadata:
4 | name: webserver
5 | namespace: openshift-gitops
6 | spec:
7 | generators:
8 | - git:
9 | repoURL: https://github.com/michaelkotelnikov/rhacm-workshop.git
10 | revision: master
11 | directories:
12 | - path: 04.Application-Lifecycle/exercise-argocd/application-resources/values/*
13 | template:
14 | metadata:
15 | name: '{{path.basename}}'
16 | spec:
17 | project: demo-project-argocd
18 | source:
19 | repoURL: https://github.com/michaelkotelnikov/rhacm-workshop.git
20 | targetRevision: master
21 | path: 04.Application-Lifecycle/exercise-argocd/application-resources/
22 | helm:
23 | valueFiles:
24 | - 'values/{{path.basename}}/values.yaml'
25 | destination:
26 | name: 'local-cluster'
27 | syncPolicy:
28 | automated:
29 | prune: false
30 | selfHeal: true
31 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/application-resources/mariadb-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: mariadb
5 | labels:
6 | app: mariadb
7 | type: database
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: mariadb
12 | replicas: 1
13 | template:
14 | metadata:
15 | labels:
16 | app: mariadb
17 | type: database
18 | spec:
19 | containers:
20 | - name: mariadb
21 | image: docker.io/bitnami/mariadb:10.3
22 | ports:
23 | - containerPort: 3306
24 | name: db-port
25 | env:
26 | - name: MARIADB_ROOT_PASSWORD
27 | valueFrom:
28 | secretKeyRef:
29 | name: mariadb
30 | key: MYSQL_ROOT_PASSWORD
31 | resources:
32 | requests:
33 | memory: "128Mi"
34 | cpu: "250m"
35 | limits:
36 | memory: "512Mi"
37 | cpu: "500m"
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-application/application-resources/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: webserver
5 | labels:
6 | app: webserver
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: webserver
11 | replicas: 1
12 | template:
13 | metadata:
14 | labels:
15 | app: webserver
16 | spec:
17 | containers:
18 | - name: apache
19 | image: docker.io/bitnami/apache:2.4.46
20 | ports:
21 | - containerPort: 8080
22 | name: webserver
23 | volumeMounts:
24 | - mountPath: /app/application.html
25 | name: application-prod
26 | subPath: application.html
27 | resources:
28 | requests:
29 | memory: "128Mi"
30 | cpu: "250m"
31 | limits:
32 | memory: "512Mi"
33 | cpu: "500m"
34 | volumes:
35 | - configMap:
36 | defaultMode: 420
37 | items:
38 | - key: application.html
39 | path: application.html
40 | name: application-prod
41 | name: application-prod
42 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-application/application-resources/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: webserver
5 | labels:
6 | app: webserver
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: webserver
11 | replicas: 1
12 | template:
13 | metadata:
14 | labels:
15 | app: webserver
16 | spec:
17 | containers:
18 | - name: apache
19 | image: docker.io/bitnami/apache:2.4.46
20 | ports:
21 | - containerPort: 8080
22 | name: webserver
23 | volumeMounts:
24 | - mountPath: /app/application.html
25 | name: application-prod
26 | subPath: application.html
27 | resources:
28 | requests:
29 | memory: "128Mi"
30 | cpu: "250m"
31 | limits:
32 | memory: "512Mi"
33 | cpu: "500m"
34 | volumes:
35 | - configMap:
36 | defaultMode: 420
37 | items:
38 | - key: application.html
39 | path: application.html
40 | name: application-prod
41 | name: application-prod
42 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-argocd/application-resources/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: application-resources
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.1.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.0"
25 |
--------------------------------------------------------------------------------
/03.Observability/exercise/multiclusterobservability.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: observability.open-cluster-management.io/v1beta2
2 | kind: MultiClusterObservability
3 | metadata:
4 | name: observability #Your customized name of MulticlusterObservability CR
5 | spec:
6 | availabilityConfig: High # Available values are High or Basic
7 | enableDownSampling: false # The default value is false. This is not recommended as querying long-time ranges without non-downsampled data is not efficient and useful.
8 | imagePullPolicy: Always
9 | observabilityAddonSpec: # The ObservabilityAddonSpec defines the global settings for all managed clusters which have observability add-on enabled
10 | enableMetrics: true # EnableMetrics indicates the observability addon push metrics to hub server
11 | interval: 30 # Interval for the observability addon push metrics to hub server
12 | retentionResolution1h: 30d # How long to retain samples of 1 hour in bucket
13 | retentionResolution5m: 14d
14 | retentionResolutionRaw: 5d
15 | storageConfig: # Specifies the storage to be used by Observability
16 | metricObjectStorage:
17 | name: thanos-object-storage
18 | key: thanos.yaml
19 | statefulSetSize: 10Gi # The amount of storage applied to the Observability StatefulSets, i.e. Amazon S3 store, Rule, compact and receiver.
20 | statefulSetStorageClass: gp2
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Advanced Cluster Management Workshop
2 |
3 | This document concludes exercises and demonstrations for the Advanced Cluster Management workshop. The workshop is presented in - [https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing](https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing)
4 |
5 | Participants in the workshop must have -
6 | * A running OpenShift cluster.
7 | * The `oc` CLI tool installed.
8 | * The `kubectl` CLI tool installed.
9 | * The `git` CLI tool installed.
10 | * A GitHub account
11 |
12 | The repository is separated into 7 sections. Each section represents a stage in the workshop.
13 | * [RHACM Installation](./01.RHACM-Installation)
14 | * [Cluster Management](./02.Cluster-Management)
15 | * [Observability](./03.Observability)
16 | * [Application Lifecycle](./04.Application-Lifecycle)
17 | * [Governance Risk and Compliance](./05.Governance-Risk-Compliance)
18 | * [Gatekeeper Integration](./06.Gatekeeper-Integration)
19 | * [Ansible Tower Integration](./07.Ansible-Tower-Integration)
20 |
21 |
22 | Each section contains a `README.md` file that contains exercises which summarize the topic. When the participants finish the relevant section in the [workshop](https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing), they may start working on the associated exercise.
23 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-application/rhacm-resources/application.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps.open-cluster-management.io/v1
3 | kind: Channel
4 | metadata:
5 | name: mariadb-app
6 | namespace: mariadb
7 | spec:
8 | type: Git
9 | pathname: https://github.com/michaelkotelnikov/rhacm-workshop.git
10 | ---
11 | apiVersion: apps.open-cluster-management.io/v1
12 | kind: PlacementRule
13 | metadata:
14 | name: prod-clusters
15 | namespace: mariadb
16 | spec:
17 | clusterConditions:
18 | - type: ManagedClusterConditionAvailable
19 | status: "True"
20 | clusterSelector:
21 | matchLabels:
22 | environment: production
23 | ---
24 | apiVersion: apps.open-cluster-management.io/v1
25 | kind: Subscription
26 | metadata:
27 | name: mariadb-app
28 | namespace: mariadb
29 | labels:
30 | app: mariadb-app
31 | annotations:
32 | apps.open-cluster-management.io/github-path: 07.Ansible-Tower-Integration/demo-application/mariadb-resources
33 | apps.open-cluster-management.io/git-branch: master
34 | spec:
35 | channel: mariadb/mariadb-app
36 | placement:
37 | placementRef:
38 | kind: PlacementRule
39 | name: prod-clusters
40 | ---
41 | apiVersion: app.k8s.io/v1beta1
42 | kind: Application
43 | metadata:
44 | name: mariadb-app
45 | namespace: mariadb
46 | spec:
47 | componentKinds:
48 | - group: apps.open-cluster-management.io
49 | kind: Subscription
50 | descriptor: {}
51 | selector:
52 | matchExpressions:
53 | - key: app
54 | operator: In
55 | values:
56 | - mariadb-app
57 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/demo-application/rhacm-resources/application.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: mariadb
6 | ---
7 | apiVersion: apps.open-cluster-management.io/v1
8 | kind: Channel
9 | metadata:
10 | name: mariadb-app
11 | namespace: mariadb
12 | spec:
13 | type: Git
14 | pathname: https://github.com/michaelkotelnikov/rhacm-workshop.git
15 | ---
16 | apiVersion: apps.open-cluster-management.io/v1
17 | kind: PlacementRule
18 | metadata:
19 | name: dev-clusters
20 | namespace: mariadb
21 | spec:
22 | clusterConditions:
23 | - type: ManagedClusterConditionAvailable
24 | status: "True"
25 | clusterSelector:
26 | matchLabels:
27 | environment: dev
28 | ---
29 | apiVersion: apps.open-cluster-management.io/v1
30 | kind: Subscription
31 | metadata:
32 | name: mariadb-app
33 | namespace: mariadb
34 | labels:
35 | app: mariadb-app
36 | annotations:
37 | apps.open-cluster-management.io/github-path: 04.Application-Lifecycle/demo-application/mariadb-resources
38 | spec:
39 | channel: mariadb/mariadb-app
40 | placement:
41 | placementRef:
42 | kind: PlacementRule
43 | name: dev-clusters
44 | ---
45 | apiVersion: app.k8s.io/v1beta1
46 | kind: Application
47 | metadata:
48 | name: mariadb-app
49 | namespace: mariadb
50 | spec:
51 | componentKinds:
52 | - group: apps.open-cluster-management.io
53 | kind: Subscription
54 | descriptor: {}
55 | selector:
56 | matchExpressions:
57 | - key: app
58 | operator: In
59 | values:
60 | - mariadb-app
61 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-policies/policy-deletekubeadmin.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-remove-kubeadmin
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: AC Access Control
10 | policy.open-cluster-management.io/controls: AC-2 Account Management
11 | spec:
12 | remediationAction: inform
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-remove-kubeadmin
20 | spec:
21 | remediationAction: inform
22 | severity: low
23 | namespaceSelector:
24 | include:
25 | - kube-system
26 | object-templates:
27 | - complianceType: mustnothave
28 | objectDefinition:
29 | kind: Secret
30 | metadata:
31 | name: kubeadmin
32 | type: Opaque
33 | ---
34 | apiVersion: policy.open-cluster-management.io/v1
35 | kind: PlacementBinding
36 | metadata:
37 | name: remove-kubeadmin-binding
38 | namespace: rhacm-policies
39 | placementRef:
40 | name: dev-clusters
41 | kind: PlacementRule
42 | apiGroup: apps.open-cluster-management.io
43 | subjects:
44 | - name: policy-remove-kubeadmin
45 | kind: Policy
46 | apiGroup: policy.open-cluster-management.io
47 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-application/rhacm-resources/application.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: mariadb-metrics
6 | ---
7 | apiVersion: apps.open-cluster-management.io/v1
8 | kind: Channel
9 | metadata:
10 | name: mariadb-metrics
11 | namespace: mariadb-metrics
12 | spec:
13 | type: Git
14 | pathname: https://github.com/michaelkotelnikov/rhacm-workshop.git
15 | ---
16 | apiVersion: apps.open-cluster-management.io/v1
17 | kind: PlacementRule
18 | metadata:
19 | name: prod-clusters
20 | namespace: mariadb-metrics
21 | spec:
22 | clusterConditions:
23 | - type: ManagedClusterConditionAvailable
24 | status: "True"
25 | clusterSelector:
26 | matchLabels:
27 | environment: production
28 | ---
29 | apiVersion: apps.open-cluster-management.io/v1
30 | kind: Subscription
31 | metadata:
32 | name: mariadb-metrics
33 | namespace: mariadb-metrics
34 | labels:
35 | app: mariadb-metrics
36 | annotations:
37 | apps.open-cluster-management.io/github-path: 05.Governance-Risk-Compliance/exercise/exercise-application/application-resources
38 | apps.open-cluster-management.io/git-branch: master
39 | spec:
40 | channel: mariadb-metrics/mariadb-metrics
41 | placement:
42 | placementRef:
43 | kind: PlacementRule
44 | name: prod-clusters
45 | ---
46 | apiVersion: app.k8s.io/v1beta1
47 | kind: Application
48 | metadata:
49 | name: mariadb-metrics
50 | namespace: mariadb-metrics
51 | spec:
52 | componentKinds:
53 | - group: apps.open-cluster-management.io
54 | kind: Subscription
55 | descriptor: {}
56 | selector:
57 | matchExpressions:
58 | - key: app
59 | operator: In
60 | values:
61 | - mariadb-metrics
62 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-application/rhacm-resources/application.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: webserver-ocp-version
6 | ---
7 | apiVersion: apps.open-cluster-management.io/v1
8 | kind: Channel
9 | metadata:
10 | name: webserver-ocp-version
11 | namespace: webserver-ocp-version
12 | spec:
13 | type: Git
14 | pathname: https://github.com/michaelkotelnikov/rhacm-workshop.git
15 | ---
16 | apiVersion: apps.open-cluster-management.io/v1
17 | kind: PlacementRule
18 | metadata:
19 | name: dev-clusters
20 | namespace: webserver-ocp-version
21 | spec:
22 | clusterConditions:
23 | - type: ManagedClusterConditionAvailable
24 | status: "True"
25 | clusterSelector:
26 | matchLabels:
27 | environment: dev
28 | ---
29 | apiVersion: apps.open-cluster-management.io/v1
30 | kind: Subscription
31 | metadata:
32 | name: webserver-ocp-version
33 | namespace: webserver-ocp-version
34 | labels:
35 | app: webserver-ocp-version
36 | annotations:
37 | apps.open-cluster-management.io/github-path: 05.Governance-Risk-Compliance/demo/demo-application/application-resources
38 | apps.open-cluster-management.io/git-branch: master
39 | spec:
40 | channel: webserver-ocp-version/webserver-ocp-version
41 | placement:
42 | placementRef:
43 | kind: PlacementRule
44 | name: dev-clusters
45 | ---
46 | apiVersion: app.k8s.io/v1beta1
47 | kind: Application
48 | metadata:
49 | name: webserver-ocp-version
50 | namespace: webserver-ocp-version
51 | spec:
52 | componentKinds:
53 | - group: apps.open-cluster-management.io
54 | kind: Subscription
55 | descriptor: {}
56 | selector:
57 | matchExpressions:
58 | - key: app
59 | operator: In
60 | values:
61 | - webserver-ocp-version
62 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-policies/policy-configmap-ocp-version.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-clusterclaims
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: CM Configuration Management
10 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-clusterclaims
20 | spec:
21 | namespaceSelector:
22 | exclude:
23 | - kube-*
24 | include:
25 | - default
26 | object-templates:
27 | - complianceType: musthave
28 | objectDefinition:
29 | kind: ConfigMap
30 | apiVersion: v1
31 | metadata:
32 | name: application-prod
33 | namespace: webserver-ocp-version
34 | data:
35 | application.html: |
36 | Underlying OpenShift version is '{{ fromClusterClaim "version.openshift.io" }}'
37 | remediationAction: enforce
38 | severity: low
39 | ---
40 | apiVersion: policy.open-cluster-management.io/v1
41 | kind: PlacementBinding
42 | metadata:
43 | name: clusterclaims-binding
44 | namespace: rhacm-policies
45 | placementRef:
46 | name: dev-clusters
47 | kind: PlacementRule
48 | apiGroup: apps.open-cluster-management.io
49 | subjects:
50 | - name: policy-clusterclaims
51 | kind: Policy
52 | apiGroup: policy.open-cluster-management.io
53 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-templates/metrics-configmap.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-mariadb-metrics
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: CM Configuration Management
10 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-mariadb-metrics
20 | spec:
21 | namespaceSelector:
22 | exclude:
23 | - kube-*
24 | include:
25 | - default
26 | object-templates:
27 | - complianceType: musthave
28 | objectDefinition:
29 | kind: ConfigMap
30 | apiVersion: v1
31 | metadata:
32 | name: metrics-connection-string
33 | namespace: mariadb-metrics
34 | data:
35 | connection_string: 'root:{{ ( fromSecret "mariadb-metrics" "mariadb" "MYSQL_ROOT_PASSWORD" ) | base64dec }}@({{ (lookup "v1" "Service" "mariadb-metrics" "mariadb").spec.clusterIP }}:3306)/'
36 | remediationAction: enforce
37 | severity: low
38 | ---
39 | apiVersion: policy.open-cluster-management.io/v1
40 | kind: PlacementBinding
41 | metadata:
42 | name: mariadb-metrics-binding
43 | namespace: rhacm-policies
44 | placementRef:
45 | name: prod-policies-clusters
46 | kind: PlacementRule
47 | apiGroup: apps.open-cluster-management.io
48 | subjects:
49 | - name: policy-mariadb-metrics
50 | kind: Policy
51 | apiGroup: policy.open-cluster-management.io
52 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-policies/limitrange-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1
2 | kind: Policy
3 | metadata:
4 | name: policy-limitrange
5 | namespace: rhacm-policies
6 | annotations:
7 | policy.open-cluster-management.io/standards: NIST SP 800-53
8 | policy.open-cluster-management.io/categories: SC System and Communications Protection
9 | policy.open-cluster-management.io/controls: SC-6 Resource Availability
10 | spec:
11 | remediationAction: enforce
12 | disabled: false
13 | policy-templates:
14 | - objectDefinition:
15 | apiVersion: policy.open-cluster-management.io/v1
16 | kind: ConfigurationPolicy
17 | metadata:
18 | name: policy-limitrange-example
19 | spec:
20 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
21 | severity: medium
22 | namespaceSelector:
23 | include: ["webserver-acm"]
24 | object-templates:
25 | - complianceType: mustonlyhave
26 | objectDefinition:
27 | apiVersion: v1
28 | kind: LimitRange # limit memory usage
29 | metadata:
30 | name: webserver-limit-range
31 | spec:
32 | limits:
33 | - default:
34 | memory: 512Mi
35 | defaultRequest:
36 | memory: 256Mi
37 | type: Container
38 | ---
39 | apiVersion: policy.open-cluster-management.io/v1
40 | kind: PlacementBinding
41 | metadata:
42 | name: binding-policy-limitrange
43 | namespace: rhacm-policies
44 | placementRef:
45 | name: prod-policies-clusters
46 | kind: PlacementRule
47 | apiGroup: apps.open-cluster-management.io
48 | subjects:
49 | - name: policy-limitrange
50 | kind: Policy
51 | apiGroup: policy.open-cluster-management.io
52 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/demo-policy/rhacm-resources/policy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-remove-dangerous-namespace
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: CM Configuration Management
10 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
11 | spec:
12 | remediationAction: inform
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-remove-dangerous-namespace
20 | spec:
21 | remediationAction: inform
22 | severity: low
23 | namespaceSelector:
24 | include:
25 | - default
26 | object-templates:
27 | - complianceType: mustnothave
28 | objectDefinition:
29 | kind: Namespace
30 | metadata:
31 | name: forbidden-namespace
32 | ---
33 | apiVersion: policy.open-cluster-management.io/v1
34 | kind: PlacementBinding
35 | metadata:
36 | name: remove-dangerous-namespace-binding
37 | namespace: rhacm-policies
38 | placementRef:
39 | name: placement-remove-dangerous-namespace
40 | kind: PlacementRule
41 | apiGroup: apps.open-cluster-management.io
42 | subjects:
43 | - name: policy-remove-dangerous-namespace
44 | kind: Policy
45 | apiGroup: policy.open-cluster-management.io
46 | ---
47 | apiVersion: apps.open-cluster-management.io/v1
48 | kind: PlacementRule
49 | metadata:
50 | name: placement-remove-dangerous-namespace
51 | namespace: rhacm-policies
52 | spec:
53 | clusterConditions:
54 | - status: 'True'
55 | type: ManagedClusterConditionAvailable
56 | clusterSelector:
57 | matchExpressions:
58 | - key: environment
59 | operator: In
60 | values:
61 | - production
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-policies/policy-image.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-imagemanifestvuln
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: SI System and Information Integrity
10 | policy.open-cluster-management.io/controls: SI-4 Information System Monitoring
11 | spec:
12 | disabled: false
13 | policy-templates:
14 | - objectDefinition:
15 | apiVersion: policy.open-cluster-management.io/v1
16 | kind: ConfigurationPolicy
17 | metadata:
18 | name: policy-imagemanifestvuln-example-sub
19 | spec:
20 | remediationAction: enforce
21 | severity: high
22 | object-templates:
23 | - complianceType: musthave
24 | objectDefinition:
25 | apiVersion: operators.coreos.com/v1alpha1
26 | kind: Subscription
27 | metadata:
28 | name: container-security-operator
29 | namespace: openshift-operators
30 | spec:
31 | installPlanApproval: Automatic
32 | name: container-security-operator
33 | source: redhat-operators
34 | sourceNamespace: openshift-marketplace
35 | - objectDefinition:
36 | apiVersion: policy.open-cluster-management.io/v1
37 | kind: ConfigurationPolicy
38 | metadata:
39 | name: policy-imagemanifestvuln-example-imv
40 | spec:
41 | remediationAction: inform
42 | severity: high
43 | namespaceSelector:
44 | exclude: ["openshift-*"]
45 | include: ["*"]
46 | object-templates:
47 | - complianceType: mustnothave # mustnothave any ImageManifestVuln object
48 | objectDefinition:
49 | apiVersion: secscan.quay.redhat.com/v1alpha1
50 | kind: ImageManifestVuln # checking for a kind
51 | ---
52 | apiVersion: policy.open-cluster-management.io/v1
53 | kind: PlacementBinding
54 | metadata:
55 | name: image-placement-binding
56 | namespace: rhacm-policies
57 | placementRef:
58 | name: dev-clusters
59 | kind: PlacementRule
60 | apiGroup: apps.open-cluster-management.io
61 | subjects:
62 | - name: policy-imagemanifestvuln
63 | kind: Policy
64 | apiGroup: policy.open-cluster-management.io
65 |
--------------------------------------------------------------------------------
/04.Application-Lifecycle/exercise-application/rhacm-resources/application.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: webserver-acm
6 | ---
7 | apiVersion: apps.open-cluster-management.io/v1
8 | kind: Channel
9 | metadata:
10 | name: webserver-app
11 | namespace: webserver-acm
12 | spec:
13 | type: Git
14 | pathname: https://github.com/michaelkotelnikov/rhacm-workshop.git
15 | ---
16 | apiVersion: apps.open-cluster-management.io/v1
17 | kind: PlacementRule
18 | metadata:
19 | name: dev-clusters
20 | namespace: webserver-acm
21 | spec:
22 | clusterConditions:
23 | - type: ManagedClusterConditionAvailable
24 | status: "True"
25 | clusterSelector:
26 | matchLabels:
27 | environment: dev
28 | ---
29 | apiVersion: apps.open-cluster-management.io/v1
30 | kind: PlacementRule
31 | metadata:
32 | name: prod-clusters
33 | namespace: webserver-acm
34 | spec:
35 | clusterConditions:
36 | - type: ManagedClusterConditionAvailable
37 | status: "True"
38 | clusterSelector:
39 | matchLabels:
40 | environment: production
41 | ---
42 | apiVersion: apps.open-cluster-management.io/v1
43 | kind: Subscription
44 | metadata:
45 | name: webserver-app-prod
46 | namespace: webserver-acm
47 | labels:
48 | app: webserver-app
49 | annotations:
50 | apps.open-cluster-management.io/github-path: 04.Application-Lifecycle/exercise-application/application-resources
51 | spec:
52 | channel: webserver-acm/webserver-app
53 | placement:
54 | placementRef:
55 | kind: PlacementRule
56 | name: prod-clusters
57 | ---
58 | apiVersion: apps.open-cluster-management.io/v1
59 | kind: Subscription
60 | metadata:
61 | name: webserver-app-dev
62 | namespace: webserver-acm
63 | labels:
64 | app: webserver-app
65 | annotations:
66 | apps.open-cluster-management.io/github-path: 04.Application-Lifecycle/exercise-application/application-resources
67 | apps.open-cluster-management.io/git-branch: dev
68 | spec:
69 | channel: webserver-acm/webserver-app
70 | placement:
71 | placementRef:
72 | kind: PlacementRule
73 | name: dev-clusters
74 | ---
75 | apiVersion: app.k8s.io/v1beta1
76 | kind: Application
77 | metadata:
78 | name: webserver-app
79 | namespace: webserver-acm
80 | spec:
81 | componentKinds:
82 | - group: apps.open-cluster-management.io
83 | kind: Subscription
84 | descriptor: {}
85 | selector:
86 | matchExpressions:
87 | - key: app
88 | operator: In
89 | values:
90 | - webserver-app
91 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/ansible-playbooks/roles/k8s-rotate-certificate/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create certificate file directory
2 | file:
3 | path: "{{ certificate_path }}"
4 | state: directory
5 |
6 | - name: Create CA private key
7 | shell: "openssl genrsa -out {{ certificate_path }}/rootCA.key 2048"
8 |
9 | - name: Copy openssl configurations for CA certificate
10 | copy:
11 | src: "openssl.cnf"
12 | dest: "{{ certificate_path }}/openssl.cnf"
13 |
14 | - name: Create CA certificate
15 | shell: openssl req -x509 -new -nodes -key {{ certificate_path }}/rootCA.key -days 1024 -out {{ certificate_path }}/rootCA.pem -config {{ certificate_path }}/openssl.cnf
16 |
17 | - name: Create server private key
18 | shell: "openssl genrsa -out {{ certificate_path }}/{{ cluster_domain }}.key 2048"
19 |
20 | - name: Copy openssl configurations for server certificate
21 | template:
22 | src: "openssl-ingress.cnf.j2"
23 | dest: "{{ certificate_path }}/openssl-ingress.cnf"
24 |
25 | - name: Create CSR for server certificate
26 | shell: "openssl req -new -key {{ certificate_path }}/{{ cluster_domain }}.key -out {{ certificate_path }}/{{ cluster_domain }}.csr -config {{ certificate_path }}/openssl-ingress.cnf"
27 |
28 | - name: Create server certificate
29 | shell: "openssl x509 -req -days 365 -in {{ certificate_path }}/{{ cluster_domain }}.csr -CA {{ certificate_path }}/rootCA.pem -CAkey {{ certificate_path }}/rootCA.key -CAcreateserial -out {{ certificate_path }}/{{ cluster_domain }}.crt -extensions req_ext -extfile {{ certificate_path }}/openssl-ingress.cnf -sha256"
30 |
31 | - name: Login into Kubernetes cluster
32 | shell: "oc login -u {{ k8s_username }} -p {{ k8s_password }} {{ k8s_api_url }}"
33 |
34 | - name: Create configmap for CA
35 | shell: "oc create configmap custom-ca --from-file=ca-bundle.crt={{ certificate_path }}/rootCA.pem -n openshift-config"
36 |
37 | - name: Update cluster proxy CA
38 | shell: oc patch proxy/cluster --type=merge --patch='{"spec":{"trustedCA":{"name":"custom-ca"}}}'
39 |
40 | - name: Create secret for server certificate
41 | shell: "oc create secret tls ingress-cert --cert={{ certificate_path }}/{{ cluster_domain }}.crt --key={{ certificate_path }}/{{ cluster_domain }}.key -n openshift-ingress"
42 |
43 | - name: Update cluster ingress certificate
44 | shell: oc patch ingresscontroller.operator default --type=merge -p '{"spec":{"defaultCertificate":{"name":"ingress-cert"}}}' -n openshift-ingress-operator
45 |
46 | - name: Delete old certificate
47 | shell: "oc delete secret/{{ old_certificate }}"
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/INSTRUCTOR.md:
--------------------------------------------------------------------------------
1 | # Instructor Guide
2 |
3 | This section seeks to help an instructor setup an Ansible Automation Platform environment to provide the workshop participants to use.
4 |
5 | ## Prerequisites
6 | * Install an Ansible Automation Platform instance on an external VM (Red Hat employees can use RHPDS).
7 |
8 | ## Preperations for exercise 1 (Integration with Applications Lifecycle) -
9 | 1. Log into the Ansible Automation Platform web interface and create an Ansible Automation Platform application at _Administration_ -> _Applications_.
10 | 2. Create token for admin user for application at _Users_-> _admin_ -> _Tokens_ -> _Add_. Select the created application and the _Write_ scope. Copy the token to a local machine.
11 | 3. In order to host the participants log files, on the Ansible Automation Platform server, install an httpd server by running the next command - `yum install -y httpd`.
12 | 4. To allow the httpd server to serve on port 80, on the Ansible Automation Platform server, remove all `port 80` listeners at - `/etc/nginx/nginx.conf`.
13 | 5. Restart the Nginx server to apply the configurations, run the next command - `systemctl restart nginx`.
14 | 6. Create a log directory for the participants by running the next command - `mkdir /var/www/html/logs`.
15 | 7. Create an inventory with a reference to the local Ansible Automation Platform server in it.
16 | 8. Create a `Machine Credential` for the root user of the Automation Platform server.
17 | 9. In the Ansible Automation Platform web interface, create a project, and point it to the workshop's git repository (https://github.com/michaelkotelnikov/rhacm-workshop.git).
18 | 10. Create Ansible Automation Platform job template, name it Logger, make sure to allow `prompt vars` and `promt inventories` by ticking the boxes next to the instances. Associate the job template with the `07.Ansible-Tower-Integration/ansible-playbooks/logger-playbook.yml` playbook. Associate the job template with the created inventory and credentials.
19 | 11. Provide the participants with the token you created in `step 2` alongside the web URL for the Ansible Automation Platform web server. Also, provide participants with a user / password to login into the web portal in order to troubleshoot the exercise.
20 |
21 | ## Preperations for exercise 2 (Integration with Governance Policies) -
22 | 1. Create a job template named K8S-Namespace, associate it with the project, secret and inventory created in the previous exercise. Make sure to associate the job template with the `07.Ansible-Tower-Integration/ansible-playbooks/namespace-playbook.yml` playbook.
23 | 2. Provide the participants with the token you created in `step 2` in the previous exercise alongside the web URL for the Ansible Automation Platform web server. Also, provide participants with a user / password to login into the web portal in order to troubleshoot the exercise.
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/exercise/exercise-policies/networkpolicy-policy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-networkpolicy-webserver
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: SC System and Communications Protection
10 | policy.open-cluster-management.io/controls: SC-7 Boundary Protection
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-networkpolicy-denyall-webserver
20 | spec:
21 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
22 | severity: medium
23 | namespaceSelector:
24 | include: ["webserver-acm"]
25 | object-templates:
26 | - complianceType: musthave
27 | objectDefinition:
28 | kind: NetworkPolicy
29 | apiVersion: networking.k8s.io/v1
30 | metadata:
31 | name: deny-by-default
32 | spec:
33 | podSelector:
34 | ingress: []
35 | - objectDefinition:
36 | apiVersion: policy.open-cluster-management.io/v1
37 | kind: ConfigurationPolicy
38 | metadata:
39 | name: policy-networkpolicy-allow-ingress-webserver
40 | spec:
41 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
42 | severity: medium
43 | namespaceSelector:
44 | include: ["webserver-acm"]
45 | object-templates:
46 | - complianceType: musthave
47 | objectDefinition:
48 | kind: NetworkPolicy
49 | apiVersion: networking.k8s.io/v1
50 | metadata:
51 | name: allow-ingress-8080
52 | spec:
53 | ingress:
54 | - ports:
55 | - protocol: TCP
56 | port: 8080
57 | - from:
58 | - namespaceSelector:
59 | matchLabels:
60 | network.openshift.io/policy-group: ingress
61 | podSelector: {}
62 | policyTypes:
63 | - Ingress
64 | ---
65 | apiVersion: policy.open-cluster-management.io/v1
66 | kind: PlacementBinding
67 | metadata:
68 | name: binding-policy-networkpolicy-webserver
69 | namespace: rhacm-policies
70 | placementRef:
71 | name: prod-policies-clusters
72 | kind: PlacementRule
73 | apiGroup: apps.open-cluster-management.io
74 | subjects:
75 | - name: policy-networkpolicy-webserver
76 | kind: Policy
77 | apiGroup: policy.open-cluster-management.io
78 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-compliance-operator/policy-compliance-operator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1
2 | kind: Policy
3 | metadata:
4 | name: policy-compliance-operator
5 | namespace: rhacm-policies
6 | annotations:
7 | policy.open-cluster-management.io/standards: NIST SP 800-53
8 | policy.open-cluster-management.io/categories: CA Security Assessment and Authorization
9 | policy.open-cluster-management.io/controls: CA-2 Security Assessments
10 | spec:
11 | remediationAction: enforce
12 | disabled: false
13 | policy-templates:
14 | - objectDefinition:
15 | apiVersion: policy.open-cluster-management.io/v1
16 | kind: ConfigurationPolicy
17 | metadata:
18 | name: comp-operator-ns
19 | spec:
20 | remediationAction: enforce
21 | severity: high
22 | object-templates:
23 | - complianceType: musthave
24 | objectDefinition:
25 | apiVersion: v1
26 | kind: Namespace
27 | metadata:
28 | name: openshift-compliance
29 | - objectDefinition:
30 | apiVersion: policy.open-cluster-management.io/v1
31 | kind: ConfigurationPolicy
32 | metadata:
33 | name: comp-operator-operator-group
34 | spec:
35 | remediationAction: enforce
36 | severity: high
37 | object-templates:
38 | - complianceType: musthave
39 | objectDefinition:
40 | apiVersion: operators.coreos.com/v1
41 | kind: OperatorGroup
42 | metadata:
43 | name: compliance-operator
44 | namespace: openshift-compliance
45 | spec:
46 | targetNamespaces:
47 | - openshift-compliance
48 | - objectDefinition:
49 | apiVersion: policy.open-cluster-management.io/v1
50 | kind: ConfigurationPolicy
51 | metadata:
52 | name: comp-operator-subscription
53 | spec:
54 | remediationAction: enforce
55 | severity: high
56 | object-templates:
57 | - complianceType: musthave
58 | objectDefinition:
59 | apiVersion: operators.coreos.com/v1alpha1
60 | kind: Subscription
61 | metadata:
62 | name: compliance-operator
63 | namespace: openshift-compliance
64 | spec:
65 | installPlanApproval: Automatic
66 | name: compliance-operator
67 | source: redhat-operators
68 | sourceNamespace: openshift-marketplace
69 | ---
70 | apiVersion: policy.open-cluster-management.io/v1
71 | kind: PlacementBinding
72 | metadata:
73 | name: policy-comp-operator-binding
74 | namespace: rhacm-policies
75 | placementRef:
76 | name: dev-clusters
77 | kind: PlacementRule
78 | apiGroup: apps.open-cluster-management.io
79 | subjects:
80 | - name: policy-compliance-operator
81 | kind: Policy
82 | apiGroup: policy.open-cluster-management.io
83 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/exercise-compliance-operator/policy-compliance-operator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1
2 | kind: Policy
3 | metadata:
4 | name: policy-compliance-operator
5 | namespace: rhacm-policies
6 | annotations:
7 | policy.open-cluster-management.io/standards: NIST SP 800-53
8 | policy.open-cluster-management.io/categories: CA Security Assessment and Authorization
9 | policy.open-cluster-management.io/controls: CA-2 Security Assessments
10 | spec:
11 | remediationAction: enforce
12 | disabled: false
13 | policy-templates:
14 | - objectDefinition:
15 | apiVersion: policy.open-cluster-management.io/v1
16 | kind: ConfigurationPolicy
17 | metadata:
18 | name: comp-operator-ns
19 | spec:
20 | remediationAction: enforce
21 | severity: high
22 | object-templates:
23 | - complianceType: musthave
24 | objectDefinition:
25 | apiVersion: v1
26 | kind: Namespace
27 | metadata:
28 | name: openshift-compliance
29 | - objectDefinition:
30 | apiVersion: policy.open-cluster-management.io/v1
31 | kind: ConfigurationPolicy
32 | metadata:
33 | name: comp-operator-operator-group
34 | spec:
35 | remediationAction: enforce
36 | severity: high
37 | object-templates:
38 | - complianceType: musthave
39 | objectDefinition:
40 | apiVersion: operators.coreos.com/v1
41 | kind: OperatorGroup
42 | metadata:
43 | name: compliance-operator
44 | namespace: openshift-compliance
45 | spec:
46 | targetNamespaces:
47 | - openshift-compliance
48 | - objectDefinition:
49 | apiVersion: policy.open-cluster-management.io/v1
50 | kind: ConfigurationPolicy
51 | metadata:
52 | name: comp-operator-subscription
53 | spec:
54 | remediationAction: enforce
55 | severity: high
56 | object-templates:
57 | - complianceType: musthave
58 | objectDefinition:
59 | apiVersion: operators.coreos.com/v1alpha1
60 | kind: Subscription
61 | metadata:
62 | name: compliance-operator
63 | namespace: openshift-compliance
64 | spec:
65 | installPlanApproval: Automatic
66 | name: compliance-operator
67 | source: redhat-operators
68 | sourceNamespace: openshift-marketplace
69 | ---
70 | apiVersion: policy.open-cluster-management.io/v1
71 | kind: PlacementBinding
72 | metadata:
73 | name: policy-comp-operator-binding
74 | namespace: rhacm-policies
75 | placementRef:
76 | name: prod-policies-clusters
77 | kind: PlacementRule
78 | apiGroup: apps.open-cluster-management.io
79 | subjects:
80 | - name: policy-compliance-operator
81 | kind: Policy
82 | apiGroup: policy.open-cluster-management.io
83 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-gatekeeper/policy-gatekeeper-operator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-gatekeeper-operator
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: CM Configuration Management
10 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: gatekeeper-operator-product-sub
20 | spec:
21 | remediationAction: enforce
22 | severity: high
23 | object-templates:
24 | - complianceType: musthave
25 | objectDefinition:
26 | apiVersion: operators.coreos.com/v1alpha1
27 | kind: Subscription
28 | metadata:
29 | name: gatekeeper-operator-product
30 | namespace: openshift-operators
31 | spec:
32 | channel: stable
33 | installPlanApproval: Automatic
34 | name: gatekeeper-operator-product
35 | source: redhat-operators
36 | sourceNamespace: openshift-marketplace
37 | - objectDefinition:
38 | apiVersion: policy.open-cluster-management.io/v1
39 | kind: ConfigurationPolicy
40 | metadata:
41 | name: gatekeeper
42 | spec:
43 | remediationAction: enforce
44 | severity: high
45 | object-templates:
46 | - complianceType: musthave
47 | objectDefinition:
48 | apiVersion: operator.gatekeeper.sh/v1alpha1
49 | kind: Gatekeeper
50 | metadata:
51 | name: gatekeeper
52 | spec:
53 | audit:
54 | logLevel: INFO
55 | replicas: 1
56 | image:
57 | image: 'registry.redhat.io/rhacm2/gatekeeper-rhel8:v3.3.0'
58 | validatingWebhook: Enabled
59 | mutatingWebhook: Disabled
60 | webhook:
61 | emitAdmissionEvents: Enabled
62 | logLevel: INFO
63 | replicas: 2
64 | ---
65 | apiVersion: policy.open-cluster-management.io/v1
66 | kind: PlacementBinding
67 | metadata:
68 | name: binding-policy-gatekeeper-operator
69 | placementRef:
70 | name: placement-policy-gatekeeper-operator
71 | kind: PlacementRule
72 | apiGroup: apps.open-cluster-management.io
73 | subjects:
74 | - name: policy-gatekeeper-operator
75 | kind: Policy
76 | apiGroup: policy.open-cluster-management.io
77 | ---
78 | apiVersion: apps.open-cluster-management.io/v1
79 | kind: PlacementRule
80 | metadata:
81 | name: placement-policy-gatekeeper-operator
82 | spec:
83 | clusterConditions:
84 | - status: "True"
85 | type: ManagedClusterConditionAvailable
86 | clusterSelector:
87 | matchExpressions:
88 | - { key: environment, operator: In, values: ["dev"] }
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-compliance-operator/policy-moderate-scan.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1
2 | kind: Policy
3 | metadata:
4 | name: policy-moderate-scan
5 | namespace: rhacm-policies
6 | annotations:
7 | policy.open-cluster-management.io/standards: NIST SP 800-53
8 | policy.open-cluster-management.io/categories: CM Configuration Management
9 | policy.open-cluster-management.io/controls: CM-6 Configuration Settings
10 | spec:
11 | remediationAction: enforce
12 | disabled: false
13 | policy-templates:
14 | - objectDefinition:
15 | apiVersion: policy.open-cluster-management.io/v1
16 | kind: ConfigurationPolicy
17 | metadata:
18 | name: compliance-moderate-scan
19 | spec:
20 | remediationAction: enforce
21 | severity: high
22 | object-templates:
23 | - complianceType: musthave # this template creates ScanSettingBinding:moderate
24 | objectDefinition:
25 | apiVersion: compliance.openshift.io/v1alpha1
26 | kind: ScanSettingBinding
27 | metadata:
28 | name: moderate
29 | namespace: openshift-compliance
30 | profiles:
31 | - apiGroup: compliance.openshift.io/v1alpha1
32 | kind: Profile
33 | name: ocp4-moderate
34 | - apiGroup: compliance.openshift.io/v1alpha1
35 | kind: Profile
36 | name: rhcos4-moderate
37 | settingsRef:
38 | apiGroup: compliance.openshift.io/v1alpha1
39 | kind: ScanSetting
40 | name: default
41 | - objectDefinition:
42 | apiVersion: policy.open-cluster-management.io/v1
43 | kind: ConfigurationPolicy
44 | metadata:
45 | name: compliance-suite-moderate
46 | spec:
47 | remediationAction: inform
48 | severity: high
49 | object-templates:
50 | - complianceType: musthave # this template checks if scan has completed by checking the status field
51 | objectDefinition:
52 | apiVersion: compliance.openshift.io/v1alpha1
53 | kind: ComplianceSuite
54 | metadata:
55 | name: moderate
56 | namespace: openshift-compliance
57 | status:
58 | phase: DONE
59 | - objectDefinition:
60 | apiVersion: policy.open-cluster-management.io/v1
61 | kind: ConfigurationPolicy
62 | metadata:
63 | name: compliance-suite-moderate-results
64 | spec:
65 | remediationAction: inform
66 | severity: high
67 | object-templates:
68 | - complianceType: mustnothave # this template reports the results for scan suite: moderate by looking at ComplianceCheckResult CRs
69 | objectDefinition:
70 | apiVersion: compliance.openshift.io/v1alpha1
71 | kind: ComplianceCheckResult
72 | metadata:
73 | namespace: openshift-compliance
74 | labels:
75 | compliance.openshift.io/check-status: FAIL
76 | compliance.openshift.io/suite: moderate
77 | ---
78 | apiVersion: policy.open-cluster-management.io/v1
79 | kind: PlacementBinding
80 | metadata:
81 | name: policy-moderate-scan-binding
82 | namespace: rhacm-policies
83 | placementRef:
84 | name: dev-clusters
85 | kind: PlacementRule
86 | apiGroup: apps.open-cluster-management.io
87 | subjects:
88 | - name: policy-moderate-scan
89 | kind: Policy
90 | apiGroup: policy.open-cluster-management.io
91 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/exercise-compliance-operator/policy-moderate-scan.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy.open-cluster-management.io/v1
2 | kind: Policy
3 | metadata:
4 | name: policy-moderate-scan
5 | namespace: rhacm-policies
6 | annotations:
7 | policy.open-cluster-management.io/standards: NIST SP 800-53
8 | policy.open-cluster-management.io/categories: CM Configuration Management
9 | policy.open-cluster-management.io/controls: CM-6 Configuration Settings
10 | spec:
11 | remediationAction: enforce
12 | disabled: false
13 | policy-templates:
14 | - objectDefinition:
15 | apiVersion: policy.open-cluster-management.io/v1
16 | kind: ConfigurationPolicy
17 | metadata:
18 | name: compliance-moderate-scan
19 | spec:
20 | remediationAction: enforce
21 | severity: high
22 | object-templates:
23 | - complianceType: musthave # this template creates ScanSettingBinding:moderate
24 | objectDefinition:
25 | apiVersion: compliance.openshift.io/v1alpha1
26 | kind: ScanSettingBinding
27 | metadata:
28 | name: moderate
29 | namespace: openshift-compliance
30 | profiles:
31 | - apiGroup: compliance.openshift.io/v1alpha1
32 | kind: Profile
33 | name: ocp4-moderate
34 | - apiGroup: compliance.openshift.io/v1alpha1
35 | kind: Profile
36 | name: rhcos4-moderate
37 | settingsRef:
38 | apiGroup: compliance.openshift.io/v1alpha1
39 | kind: ScanSetting
40 | name: default
41 | - objectDefinition:
42 | apiVersion: policy.open-cluster-management.io/v1
43 | kind: ConfigurationPolicy
44 | metadata:
45 | name: compliance-suite-moderate
46 | spec:
47 | remediationAction: inform
48 | severity: high
49 | object-templates:
50 | - complianceType: musthave # this template checks if scan has completed by checking the status field
51 | objectDefinition:
52 | apiVersion: compliance.openshift.io/v1alpha1
53 | kind: ComplianceSuite
54 | metadata:
55 | name: moderate
56 | namespace: openshift-compliance
57 | status:
58 | phase: DONE
59 | - objectDefinition:
60 | apiVersion: policy.open-cluster-management.io/v1
61 | kind: ConfigurationPolicy
62 | metadata:
63 | name: compliance-suite-moderate-results
64 | spec:
65 | remediationAction: inform
66 | severity: high
67 | object-templates:
68 | - complianceType: mustnothave # this template reports the results for scan suite: moderate by looking at ComplianceCheckResult CRs
69 | objectDefinition:
70 | apiVersion: compliance.openshift.io/v1alpha1
71 | kind: ComplianceCheckResult
72 | metadata:
73 | namespace: openshift-compliance
74 | labels:
75 | compliance.openshift.io/check-status: FAIL
76 | compliance.openshift.io/suite: moderate
77 | ---
78 | apiVersion: policy.open-cluster-management.io/v1
79 | kind: PlacementBinding
80 | metadata:
81 | name: policy-moderate-scan-binding
82 | namespace: rhacm-policies
83 | placementRef:
84 | name: prod-policies-clusters
85 | kind: PlacementRule
86 | apiGroup: apps.open-cluster-management.io
87 | subjects:
88 | - name: policy-moderate-scan
89 | kind: Policy
90 | apiGroup: policy.open-cluster-management.io
91 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/demo/demo-policies/policy-rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-rbac-mariadb
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: AC Access Control
10 | policy.open-cluster-management.io/controls: AC-3 Access Enforcement
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-role-mariadb-rollout
20 | spec:
21 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
22 | severity: high
23 | namespaceSelector:
24 | include: ["mariadb"]
25 | object-templates:
26 | - complianceType: mustonlyhave # role definition should exact match
27 | objectDefinition:
28 | apiVersion: rbac.authorization.k8s.io/v1
29 | kind: Role
30 | metadata:
31 | name: dc-rollout-role
32 | rules:
33 | - apiGroups: ["apps.openshift.io", ""]
34 | resources: ["deploymentconfigs"]
35 | verbs: ["get", "list", "watch", "patch", "update"]
36 | - apiGroups: [""]
37 | resources: ["replicationcontrollers"]
38 | verbs: ["update", "patch", "list", "get"]
39 | - apiGroups: [""]
40 | resources: ["pods"]
41 | verbs: ["get", "list", "watch"]
42 | - objectDefinition:
43 | apiVersion: policy.open-cluster-management.io/v1
44 | kind: ConfigurationPolicy
45 | metadata:
46 | name: mariadb-operators-group-config-policy
47 | spec:
48 | remediationAction: enforce
49 | severity: high
50 | object-templates:
51 | - complianceType: mustonlyhave
52 | objectDefinition:
53 | apiVersion: user.openshift.io/v1
54 | kind: Group
55 | metadata:
56 | name: mariadb-operators
57 | users:
58 | - user1
59 | - objectDefinition:
60 | apiVersion: policy.open-cluster-management.io/v1
61 | kind: ConfigurationPolicy
62 | metadata:
63 | name: mariadb-dc-rollout-rolebinding
64 | spec:
65 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
66 | severity: high
67 | namespaceSelector:
68 | include: ["mariadb"]
69 | object-templates:
70 | - complianceType: musthave
71 | objectDefinition:
72 | kind: RoleBinding # role binding must exist
73 | apiVersion: rbac.authorization.k8s.io/v1
74 | metadata:
75 | name: dc-rollout-rolebinding
76 | subjects:
77 | - kind: Group
78 | name: mariadb-operators # Name is case sensitive
79 | apiGroup: rbac.authorization.k8s.io
80 | roleRef:
81 | kind: Role #this must be Role or ClusterRole
82 | name: dc-rollout-role # this must match the name of the Role or ClusterRole you wish to bind to
83 | apiGroup: rbac.authorization.k8s.io
84 | ---
85 | apiVersion: policy.open-cluster-management.io/v1
86 | kind: PlacementBinding
87 | metadata:
88 | name: role-placement-binding
89 | namespace: rhacm-policies
90 | placementRef:
91 | name: dev-clusters
92 | kind: PlacementRule
93 | apiGroup: apps.open-cluster-management.io
94 | subjects:
95 | - name: policy-rbac-mariadb
96 | kind: Policy
97 | apiGroup: policy.open-cluster-management.io
98 |
--------------------------------------------------------------------------------
/02.Cluster-Management/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 2 - Managing an existing cluster using Advanced Cluster Management
2 |
3 | In this exercise you manage the existing cluster on the Red Hat Advanced Cluster Management stack - `local-cluster`. You will attach labels to the cluster, visualize its resources and perform updates to the OpenShift Platform.
4 |
5 |
6 | ## 2.1 Import an existing cluster
7 |
8 | 1. Modify the attributes of the managed cluster in Red Hat Advanced Cluster Management -
9 | * **Name**: local-cluster
10 | * **labels**:
11 | * environment=dev
12 | * owner=<your-name>
13 |
14 | In order to associate the labels with local-cluster, follow the next steps (You may use the presentation for guidance) -
15 |
16 | * Navigate to **Clusters** -> **local-cluster** -> **Actions** -> **Edit labels**.
17 | * Add the labels in the `key=value` format.
18 |
19 | 2. Log into the cluster using the **oc** cli tool.
20 |
21 | ```
22 | $ oc login -u -p https://api.cluster.2222.sandbox.opentlc.com:6443
23 | ```
24 |
25 | 3. Make sure that all of the agent pods are up and running on the cluster.
26 |
27 | ```
28 | $ oc get pods -n open-cluster-management-agent
29 | NAME READY STATUS RESTARTS AGE
30 | klusterlet-645d98d7d5-hnn2z 1/1 Running 0 46m
31 | klusterlet-registration-agent-66fdc479cf-ltlx6 1/1 Running 0 46m
32 | klusterlet-registration-agent-66fdc479cf-qnhzj 1/1 Running 0 46m
33 | klusterlet-registration-agent-66fdc479cf-t8x5n 1/1 Running 0 46m
34 | klusterlet-work-agent-6b8b99b899-27ht9 1/1 Running 0 46m
35 | klusterlet-work-agent-6b8b99b899-95dkr 1/1 Running 1 46m
36 | klusterlet-work-agent-6b8b99b899-vdp9r 1/1 Running 0 46m
37 |
38 | $ oc get pods -n open-cluster-management-agent-addon
39 | NAME READY STATUS RESTARTS AGE
40 | application-manager-7c8879d57f-4x7ft 1/1 Running 0 24m
41 | cert-policy-controller-7584887cdf-2vkv5 1/1 Running 0 24m
42 | config-policy-controller-56d8d84c8c-p8z72 1/1 Running 0 24m
43 | governance-policy-framework-65c46c46c8-xtgfq 2/2 Running 0 24m
44 | iam-policy-controller-56b5bf6486-795wd 1/1 Running 0 24m
45 | klusterlet-addon-workmgr-55bc5d4fd-2jp55 1/1 Running 0 24m
46 | ```
47 |
48 | ## 2.2 Analyzing the managed cluster
49 |
50 | In this exercise you will be using the Red Hat Advanced Cluster Management portal to analyze the managed cluster’s resources. You may use the workshop presentation for examples and guidance.
51 |
52 | 1. Using Red Hat Advanced Cluster Management, find out what is the cloud provider of the managed cluster.
53 | 2. Using Red Hat Advanced Cluster Management, find out the number of nodes that make up the managed cluster. How many CPUs does each node have?
54 | 3. Using Red Hat Advanced Cluster Management, check out whether all users can provision new projects on local-cluster (check if the **self-provisioners** ClusterRoleBinding has the system:authenticated:oauth group associated with it).
55 | 4. Using Red Hat Advanced Cluster Management, check what **channel version** is associated with local-cluster (stable / candidate / fast) - (Search for **kind:ClusterVersion** CR).
56 | 5. Using Red Hat Advanced Cluster Management -
57 | * Check the port number that the **alertmanager-main-0** pod listens on local-cluster (can be found using the pod logs and pod resource definition).
58 | * Check the full path of the **alertmanager-main-0** pod configuration file (can be found using the pod logs and pod resource definition).
59 |
60 |
61 | ## 2.3 Upgrade the cluster using Advanced Cluster Management
62 |
63 | **NOTE**: Do this exercise towards the end of the day. The upgrading process may take up to an hour to complete.
64 |
65 | 1. Change the **channel version** on the local-cluster from stable-**4.x** to stable-**4.x+1**.
66 | 2. Upgrade the cluster using Red Hat Advanced Cluster Management.
67 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-gatekeeper/policy-gatekeeper-disable-nodeport.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-gatekeeper-disallow-nodeport
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: CM Configuration Management
10 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-gatekeeper-disallow-nodeport
20 | spec:
21 | remediationAction: enforce
22 | severity: low
23 | object-templates:
24 | - complianceType: musthave
25 | objectDefinition:
26 | apiVersion: templates.gatekeeper.sh/v1beta1
27 | kind: ConstraintTemplate
28 | metadata:
29 | name: k8sblocknodeport
30 | annotations:
31 | description: Disallows all Services with type NodePort.
32 | spec:
33 | crd:
34 | spec:
35 | names:
36 | kind: K8sBlockNodePort
37 | targets:
38 | - target: admission.k8s.gatekeeper.sh
39 | rego: |
40 | package k8sblocknodeport
41 | violation[{"msg": msg}] {
42 | input.review.kind.kind == "Service"
43 | input.review.object.spec.type == "NodePort"
44 | msg := "User is not allowed to create service of type NodePort"
45 | }
46 | - complianceType: musthave
47 | objectDefinition:
48 | apiVersion: constraints.gatekeeper.sh/v1beta1
49 | kind: K8sBlockNodePort
50 | metadata:
51 | name: block-node-port
52 | spec:
53 | match:
54 | kinds:
55 | - apiGroups: [""]
56 | kinds: ["Service"]
57 | - objectDefinition:
58 | apiVersion: policy.open-cluster-management.io/v1
59 | kind: ConfigurationPolicy
60 | metadata:
61 | name: policy-gatekeeper-audit-disallow-nodeport
62 | spec:
63 | remediationAction: inform # will be overridden by remediationAction in parent policy
64 | severity: low
65 | object-templates:
66 | - complianceType: musthave
67 | objectDefinition:
68 | apiVersion: constraints.gatekeeper.sh/v1beta1
69 | kind: K8sBlockNodePort
70 | metadata:
71 | name: block-node-port
72 | status:
73 | totalViolations: 0
74 | - objectDefinition:
75 | apiVersion: policy.open-cluster-management.io/v1
76 | kind: ConfigurationPolicy
77 | metadata:
78 | name: policy-gatekeeper-admission-disallow-nodeport
79 | spec:
80 | remediationAction: inform # will be overridden by remediationAction in parent policy
81 | severity: low
82 | object-templates:
83 | - complianceType: mustnothave
84 | objectDefinition:
85 | apiVersion: v1
86 | kind: Event
87 | metadata:
88 | namespace: openshift-gatekeeper-system # set it to the actual namespace where gatekeeper is running if different
89 | annotations:
90 | constraint_action: deny
91 | constraint_kind: K8sBlockNodePort
92 | constraint_name: block-node-port
93 | event_type: violation
94 | ---
95 | apiVersion: policy.open-cluster-management.io/v1
96 | kind: PlacementBinding
97 | metadata:
98 | name: binding-policy-gatekeeper-disallow-nodeport
99 | namespace: rhacm-policies
100 | placementRef:
101 | name: placement-policy-gatekeeper-disallow-nodeport
102 | kind: PlacementRule
103 | apiGroup: apps.open-cluster-management.io
104 | subjects:
105 | - name: policy-gatekeeper-disallow-nodeport
106 | kind: Policy
107 | apiGroup: policy.open-cluster-management.io
108 | ---
109 | apiVersion: apps.open-cluster-management.io/v1
110 | kind: PlacementRule
111 | metadata:
112 | name: placement-policy-gatekeeper-disallow-nodeport
113 | namespace: rhacm-policies
114 | spec:
115 | clusterConditions:
116 | - status: "True"
117 | type: ManagedClusterConditionAvailable
118 | clusterSelector:
119 | matchExpressions:
120 | - { key: environment, operator: In, values: ["dev"] }
121 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/demo-policy-generator/complex-policy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: policy.open-cluster-management.io/v1
3 | kind: Policy
4 | metadata:
5 | name: policy-networkpolicy
6 | namespace: rhacm-policies
7 | annotations:
8 | policy.open-cluster-management.io/standards: NIST SP 800-53
9 | policy.open-cluster-management.io/categories: SC System and Communications Protection
10 | policy.open-cluster-management.io/controls: SC-7 Boundary Protection
11 | spec:
12 | remediationAction: enforce
13 | disabled: false
14 | policy-templates:
15 | - objectDefinition:
16 | apiVersion: policy.open-cluster-management.io/v1
17 | kind: ConfigurationPolicy
18 | metadata:
19 | name: policy-networkpolicy-application-1
20 | spec:
21 | remediationAction: enforce
22 | severity: medium
23 | namespaceSelector:
24 | include: ["default"]
25 | object-templates:
26 | - complianceType: musthave
27 | objectDefinition:
28 | apiVersion: networking.k8s.io/v1
29 | kind: NetworkPolicy
30 | metadata:
31 | labels:
32 | network-policy-generator.stackrox.io/generated: "true"
33 | name: stackrox-generated-webserver
34 | namespace: application-1
35 | spec:
36 | podSelector:
37 | matchLabels:
38 | app: webserver
39 | policyTypes:
40 | - Ingress
41 | - objectDefinition:
42 | apiVersion: policy.open-cluster-management.io/v1
43 | kind: ConfigurationPolicy
44 | metadata:
45 | name: policy-networkpolicy-application-2
46 | spec:
47 | remediationAction: enforce
48 | severity: medium
49 | namespaceSelector:
50 | include: ["default"]
51 | object-templates:
52 | - complianceType: musthave
53 | objectDefinition:
54 | apiVersion: networking.k8s.io/v1
55 | kind: NetworkPolicy
56 | metadata:
57 | labels:
58 | network-policy-generator.stackrox.io/generated: "true"
59 | name: stackrox-generated-webserver
60 | namespace: application-2
61 | spec:
62 | ingress:
63 | - from:
64 | - namespaceSelector:
65 | matchLabels:
66 | kubernetes.io/metadata.name: application-1
67 | podSelector:
68 | matchLabels:
69 | app: webserver
70 | ports:
71 | - port: 8080
72 | protocol: TCP
73 | podSelector:
74 | matchLabels:
75 | app: webserver
76 | policyTypes:
77 | - Ingress
78 | - objectDefinition:
79 | apiVersion: policy.open-cluster-management.io/v1
80 | kind: ConfigurationPolicy
81 | metadata:
82 | name: policy-networkpolicy-application-3
83 | spec:
84 | remediationAction: enforce
85 | severity: medium
86 | namespaceSelector:
87 | include: ["default"]
88 | object-templates:
89 | - complianceType: musthave
90 | objectDefinition:
91 | apiVersion: networking.k8s.io/v1
92 | kind: NetworkPolicy
93 | metadata:
94 | labels:
95 | network-policy-generator.stackrox.io/generated: "true"
96 | name: stackrox-generated-webserver
97 | namespace: application-3
98 | spec:
99 | podSelector:
100 | matchLabels:
101 | app: webserver
102 | policyTypes:
103 | - Ingress
104 | - objectDefinition:
105 | apiVersion: policy.open-cluster-management.io/v1
106 | kind: ConfigurationPolicy
107 | metadata:
108 | name: policy-networkpolicy-application-4
109 | spec:
110 | remediationAction: enforce
111 | severity: medium
112 | namespaceSelector:
113 | include: ["default"]
114 | object-templates:
115 | - complianceType: musthave
116 | objectDefinition:
117 | apiVersion: networking.k8s.io/v1
118 | kind: NetworkPolicy
119 | metadata:
120 | labels:
121 | network-policy-generator.stackrox.io/generated: "true"
122 | name: stackrox-generated-webserver
123 | namespace: application-4
124 | spec:
125 | podSelector:
126 | matchLabels:
127 | app: webserver
128 | policyTypes:
129 | - Ingress
130 | ---
131 | apiVersion: policy.open-cluster-management.io/v1
132 | kind: PlacementBinding
133 | metadata:
134 | name: policy-networkpolicy-binding
135 | namespace: rhacm-policies
136 | placementRef:
137 | name: placement-policy-networkpolicy
138 | kind: PlacementRule
139 | apiGroup: apps.open-cluster-management.io
140 | subjects:
141 | - name: policy-networkpolicy
142 | kind: Policy
143 | apiGroup: policy.open-cluster-management.io
144 | ---
145 | apiVersion: apps.open-cluster-management.io/v1
146 | kind: PlacementRule
147 | metadata:
148 | name: placement-policy-networkpolicy
149 | namespace: rhacm-policies
150 | spec:
151 | clusterConditions:
152 | - status: "True"
153 | type: ManagedClusterConditionAvailable
154 | clusterSelector:
155 | matchExpressions:
156 | - {key: environment, operator: In, values: ["dev"]}
157 |
--------------------------------------------------------------------------------
/06.Advanced-Policy-Management/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 6 - Open Policy Agent Gatekeeper
2 |
3 | In this exercise you will go through the Compliance features that come with Open Policy Agent Gatekeeper and the Compliance Operator. You will apply a number of policies to the cluster in order to comply with global security and management standards.
4 |
5 | ## Gatekeeper
6 |
7 | In this section you create and manage Gatekeeper policies. The policies are based on the REGO policy language.
8 |
9 | Apply the next policy to the hub cluster. The policy installs the Gatekeeper operator on the managed cluster.
10 |
11 | ```
12 | $ cat >> policy-gatekeeper-operator.yaml << EOF
13 | ---
14 | apiVersion: policy.open-cluster-management.io/v1
15 | kind: Policy
16 | metadata:
17 | name: policy-gatekeeper-operator
18 | namespace: rhacm-policies
19 | annotations:
20 | policy.open-cluster-management.io/standards: NIST SP 800-53
21 | policy.open-cluster-management.io/categories: CM Configuration Management
22 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
23 | spec:
24 | remediationAction: enforce
25 | disabled: false
26 | policy-templates:
27 | - objectDefinition:
28 | apiVersion: policy.open-cluster-management.io/v1
29 | kind: ConfigurationPolicy
30 | metadata:
31 | name: gatekeeper-operator-product-sub
32 | spec:
33 | remediationAction: enforce
34 | severity: high
35 | object-templates:
36 | - complianceType: musthave
37 | objectDefinition:
38 | apiVersion: operators.coreos.com/v1alpha1
39 | kind: Subscription
40 | metadata:
41 | name: gatekeeper-operator-product
42 | namespace: openshift-operators
43 | spec:
44 | channel: stable
45 | installPlanApproval: Automatic
46 | name: gatekeeper-operator-product
47 | source: redhat-operators
48 | sourceNamespace: openshift-marketplace
49 | - objectDefinition:
50 | apiVersion: policy.open-cluster-management.io/v1
51 | kind: ConfigurationPolicy
52 | metadata:
53 | name: gatekeeper
54 | spec:
55 | remediationAction: enforce
56 | severity: high
57 | object-templates:
58 | - complianceType: musthave
59 | objectDefinition:
60 | apiVersion: operator.gatekeeper.sh/v1alpha1
61 | kind: Gatekeeper
62 | metadata:
63 | name: gatekeeper
64 | spec:
65 | audit:
66 | logLevel: INFO
67 | replicas: 1
68 | image:
69 | image: 'registry.redhat.io/rhacm2/gatekeeper-rhel8:v3.3.0'
70 | validatingWebhook: Enabled
71 | mutatingWebhook: Disabled
72 | webhook:
73 | emitAdmissionEvents: Enabled
74 | logLevel: INFO
75 | replicas: 2
76 | ---
77 | apiVersion: policy.open-cluster-management.io/v1
78 | kind: PlacementBinding
79 | metadata:
80 | name: binding-policy-gatekeeper-operator
81 | namespace: rhacm-policies
82 | placementRef:
83 | name: placement-policy-gatekeeper-operator
84 | kind: PlacementRule
85 | apiGroup: apps.open-cluster-management.io
86 | subjects:
87 | - name: policy-gatekeeper-operator
88 | kind: Policy
89 | apiGroup: policy.open-cluster-management.io
90 | ---
91 | apiVersion: apps.open-cluster-management.io/v1
92 | kind: PlacementRule
93 | metadata:
94 | name: placement-policy-gatekeeper-operator
95 | namespace: rhacm-policies
96 | spec:
97 | clusterConditions:
98 | - status: "True"
99 | type: ManagedClusterConditionAvailable
100 | clusterSelector:
101 | matchExpressions:
102 | - { key: environment, operator: In, values: ["production"] }
103 | EOF
104 |
105 | $ oc apply -f policy-gatekeeper-operator.yaml
106 | ```
107 |
108 | ### Policy #1 - Disallow unencrypted routes
109 |
110 | Apply the next policy to the hub cluster in order to deny the creation of http (not encrypted traffic) routes on the managed clusters -
111 |
112 | ```
113 | $ cat >> policy-gatekeeper-httpsonly.yaml << EOF
114 | ---
115 | apiVersion: policy.open-cluster-management.io/v1
116 | kind: Policy
117 | metadata:
118 | name: policy-gatekeeper-route-httpsonly
119 | namespace: rhacm-policies
120 | annotations:
121 | policy.open-cluster-management.io/standards: NIST SP 800-53
122 | policy.open-cluster-management.io/categories: CM Configuration Management
123 | policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
124 | spec:
125 | remediationAction: enforce
126 | disabled: false
127 | policy-templates:
128 | - objectDefinition:
129 | apiVersion: policy.open-cluster-management.io/v1
130 | kind: ConfigurationPolicy
131 | metadata:
132 | name: policy-gatekeeper-route-httpsonly
133 | spec:
134 | remediationAction: enforce
135 | severity: low
136 | object-templates:
137 | - complianceType: musthave
138 | objectDefinition:
139 | apiVersion: templates.gatekeeper.sh/v1beta1
140 | kind: ConstraintTemplate
141 | metadata:
142 | name: k8shttpsonly
143 | annotations:
144 | description: Requires Route resources to be HTTPS only.
145 | spec:
146 | crd:
147 | spec:
148 | names:
149 | kind: K8sHttpsOnly
150 | targets:
151 | - target: admission.k8s.gatekeeper.sh
152 | rego: |
153 | package k8shttpsonly
154 | violation[{"msg": msg}] {
155 | input.review.object.kind == "Route"
156 | re_match("^(route.openshift.io)/", input.review.object.apiVersion)
157 | route := input.review.object
158 | not https_complete(route)
159 | msg := sprintf("Route should be https. tls configuration is required for %v", [route.metadata.name])
160 | }
161 | https_complete(route) = true {
162 | route.spec["tls"]
163 | count(route.spec.tls) > 0
164 | }
165 | - complianceType: musthave
166 | objectDefinition:
167 | apiVersion: constraints.gatekeeper.sh/v1beta1
168 | kind: K8sHttpsOnly
169 | metadata:
170 | name: route-https-only
171 | spec:
172 | match:
173 | kinds:
174 | - apiGroups: ["route.openshift.io"]
175 | kinds: ["Route"]
176 | - objectDefinition:
177 | apiVersion: policy.open-cluster-management.io/v1
178 | kind: ConfigurationPolicy
179 | metadata:
180 | name: policy-gatekeeper-audit-httpsonly
181 | spec:
182 | remediationAction: inform # will be overridden by remediationAction in parent policy
183 | severity: low
184 | object-templates:
185 | - complianceType: musthave
186 | objectDefinition:
187 | apiVersion: constraints.gatekeeper.sh/v1beta1
188 | kind: K8sHttpsOnly
189 | metadata:
190 | name: route-https-only
191 | status:
192 | totalViolations: 0
193 | - objectDefinition:
194 | apiVersion: policy.open-cluster-management.io/v1
195 | kind: ConfigurationPolicy
196 | metadata:
197 | name: policy-gatekeeper-admission-httpsonly
198 | spec:
199 | remediationAction: inform # will be overridden by remediationAction in parent policy
200 | severity: low
201 | object-templates:
202 | - complianceType: mustnothave
203 | objectDefinition:
204 | apiVersion: v1
205 | kind: Event
206 | metadata:
207 | namespace: openshift-gatekeeper-system # set it to the actual namespace where gatekeeper is running if different
208 | annotations:
209 | constraint_action: deny
210 | constraint_kind: K8sHttpsOnly
211 | constraint_name: route-https-only
212 | event_type: violation
213 | ---
214 | apiVersion: policy.open-cluster-management.io/v1
215 | kind: PlacementBinding
216 | metadata:
217 | name: binding-policy-gatekeeper-route-httpsonly
218 | namespace: rhacm-policies
219 | placementRef:
220 | name: placement-policy-gatekeeper-route-httpsonly
221 | kind: PlacementRule
222 | apiGroup: apps.open-cluster-management.io
223 | subjects:
224 | - name: policy-gatekeeper-route-httpsonly
225 | kind: Policy
226 | apiGroup: policy.open-cluster-management.io
227 | ---
228 | apiVersion: apps.open-cluster-management.io/v1
229 | kind: PlacementRule
230 | metadata:
231 | name: placement-policy-gatekeeper-route-httpsonly
232 | namespace: rhacm-policies
233 | spec:
234 | clusterConditions:
235 | - status: "True"
236 | type: ManagedClusterConditionAvailable
237 | clusterSelector:
238 | matchExpressions:
239 | - { key: environment, operator: In, values: ["production"] }
240 | EOF
241 |
242 | $ oc apply -f policy-gatekeeper-httpsonly.yaml
243 | ```
244 |
245 | Wait until both policies are in a compliant state before you move forward with the exercise.
246 |
247 | Login to the managed cluster and try creating a web server using the next commands -
248 |
249 | ```
250 | $ oc new-project httpd-test
251 |
252 | $ oc new-app httpd
253 | ```
254 |
255 | Try exposing the web server using an unsecure route
256 |
257 | ```
258 | $ oc expose svc/httpd
259 | ```
260 |
261 | Try exposing the web server using a secure route
262 |
263 | ```
264 | $ oc create route edge --service=httpd
265 | ```
266 |
267 | ### Policy #2 - Namespace Management
268 |
269 | In this section you will create a Gatekeeper based policy. The policy will disallow namespaces with the `state: dangerous` label. If a namespace has this label, its creation will be disallowed. Make sure to create a message that indicates the error.
270 |
271 | An example of a disallowed namespace:
272 |
273 | ```
274 | {
275 | "apiVersion": "v1",
276 | "kind": "Namespace",
277 | "metadata": {
278 | "labels": {
279 | "state": "dangerous"
280 | },
281 | "name": "michael"
282 | }
283 | }
284 | ```
285 |
286 | You make use the presentation and the previously created policies as a reference for this policy. Use the [rego playground](https://play.openpolicyagent.org/) to check the validity of your rego policy.
287 |
288 | Check the validity of your policy by creating a violating namespace. The creation of the namespace should be disallowed -
289 |
290 | ```
291 | $ cat >> gatekeeper-disallowed-namespace.yaml << EOF
292 | apiVersion: v1
293 | kind: Namespace
294 | metadata:
295 | labels:
296 | state: dangerous
297 | name: michael
298 | EOF
299 |
300 | $ oc apply -f gatekeeper-disallowed-namespace.yaml
301 | ```
302 |
303 | ## Compliance Operator Integration
304 |
305 | In this section you will perform an integration between Red Hat Advanced Cluster Management and the OpenSCAP Compliance Operator. You will create an RHACM policy that deploys the Compliance Operator. Afterwards, you will create an RHACM policy that initiates a compliance scan and monitors the results.
306 |
307 | Run the next command to deploy the Compliance Operator using an RHACM policy -
308 |
309 | ```
310 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/06.Gatekeeper-Integration/exercise-compliance-operator/policy-compliance-operator.yaml
311 | ```
312 |
313 | Make sure that the policy has been deployed successfully in RHACM's Governance dashboard - The policy status needs to be **compliant**. The Compliance Operator is deployed in the `openshift-compliance` namespace on the managed cluster.
314 |
315 | ```
316 | $ oc get pods -n openshift-compliance
317 | NAME READY STATUS RESTARTS AGE
318 | compliance-operator-8c9bc7466-8h4js 1/1 Running 1 7m27s
319 | ocp4-openshift-compliance-pp-6d7c7db4bd-wb5vf 1/1 Running 0 4m51s
320 | rhcos4-openshift-compliance-pp-c7b548bd-8pbhq 1/1 Running 0 4m51s
321 | ```
322 |
323 | Now that the Compliance Operator is deployed, initiate a compliance scan using an RHACM policy. To initiate a compliance scan, run the next command -
324 |
325 | ```
326 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/06.Gatekeeper-Integration/exercise-compliance-operator/policy-moderate-scan.yaml
327 | ```
328 |
329 | After running the command, a compliance scan is initiated. The scan will take about 5 minutes to complete. Run the next command on the managed cluster to check the status of the scan -
330 |
331 | ```
332 | $ oc get compliancescan -n openshift-compliance
333 | NAME PHASE RESULT
334 | ocp4-moderate RUNNING NOT-AVAILABLE
335 | rhcos4-moderate-master RUNNING NOT-AVAILABLE
336 | rhcos4-moderate-worker RUNNING NOT-AVAILABLE
337 | ```
338 |
339 | When the scan completes, the `PHASE` field will change to `DONE`.
340 |
341 | After the scan completes, navigate to the RHACM governance dashboard. Note that the newly created policy is in a non-compliant state. Click on the policy name and navigate to **Status**. The `compliance-suite-moderate-results` ConfigurationPolicy is non-compliant because multiple ComplianceCheckResult objects indicate a `FAIL` check-status. To investigate the failing rules, press on _View details_ next to the `compliance-suite-moderate-results` ConfigurationPolicy.
342 |
343 | Scroll down, you will notice all failing compliance check results. To understand why these rules failed the scan press on `View yaml` next to the failing rule name.
344 |
345 | - Investigate the `ocp4-moderate-banner-or-login-template-set` ComplianceCheckResult. See what you can do to remediate the issue.
346 | - Investigate the `ocp4-moderate-configure-network-policies-namespaces` ComplianceCheckResult. See what you can do to remediate the issue.
347 | - Investigate the `rhcos4-moderate-master-no-empty-passwords` ComplianceCheckResult. See what you can do to remediate the issue.
--------------------------------------------------------------------------------
/03.Observability/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 3 - Observability
2 |
3 | In this exercise you enable and use the `Observability` function in Red Hat Advanced Cluster Management. You will configure observability, explore the built-in dashboards, enable custom alerts using Thanos Ruler and design custom dashboards for your own organizational needs.
4 |
5 | ### 3.1 - Deploying Observability
6 |
7 | This part focuses on the `Observability` addon deployment. In order to deploy the functionality, you have to obtain an object storage provider. For this deployment you will use [minio](https://min.io/). A PVC will be assigned to a minio pod which will create a bucket and export an S3 object storage endpoint. The endpoint's information and credentials are exported in a secret with the `thanos-object-storage` name.
8 |
9 | To create a namespace for the `MCO operator` to run in, run the next commands on the hub cluster -
10 |
11 | ```
12 | $ oc login -u -p https://api.cluster.2222.sandbox.opentlc.com:6443
13 |
14 | $ oc new-project open-cluster-management-observability
15 | ```
16 |
17 | To create the `minio` deployment run the next commands on the hub cluster -
18 |
19 | ```
20 | $ git clone https://github.com/open-cluster-management/multicluster-observability-operator.git
21 |
22 | $ oc apply -k multicluster-observability-operator/examples/minio/ -n open-cluster-management-observability
23 | ```
24 |
25 | After running the command, a `minio` deployment will be available. The S3 endpoint is now exported in the `thanos-object-storage` secret.
26 |
27 | ```
28 | $ oc extract secret/thanos-object-storage --to=- -n open-cluster-management-observability
29 |
30 | # thanos.yaml
31 | type: s3
32 | config:
33 | bucket: "thanos"
34 | endpoint: "minio:9000"
35 | insecure: true
36 | access_key: "minio"
37 | secret_key: "minio123"
38 | ```
39 |
40 | To create an instance of `Multi Cluster Obervability`, apply the next object to the `open-cluster-management-observability` namespace on the hub cluster.
41 |
42 | **NOTE** If you're not using an OpenShift cluster that's deployed on AWS, make sure to modify the StorageClass definition in the below YAML.
43 |
44 | ```
45 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/03.Observability/exercise/multiclusterobservability.yaml -n open-cluster-management-observability
46 | ```
47 |
48 | Make sure that both `multicluster-observability-operator` and `endpoint-operator` are deployed (all pods must be in `Running` state).
49 |
50 | ```
51 | $ oc get pods -n open-cluster-management-observability
52 |
53 | NAME READY STATUS RESTARTS AGE
54 | grafana-dev-5f9585d797-qnpms 2/2 Running 0 27h
55 | minio-79c7ff488d-wbzrm 1/1 Running 0 2d1h
56 | observability-alertmanager-0 3/3 Running 0 2d1h
57 | observability-alertmanager-1 3/3 Running 0 2d1h
58 | observability-alertmanager-2 3/3 Running 0 2d1h
59 | observability-grafana-6556b6d979-n72hv 2/2 Running 0 2d1h
60 | observability-grafana-6556b6d979-rjf27 2/2 Running 0 2d1h
61 | observability-observatorium-api-84fd8849b7-f9bkv 1/1 Running 0 2d1h
62 | observability-observatorium-api-84fd8849b7-z5b9r 1/1 Running 0 2d1h
63 | observability-observatorium-operator-74975fc6fb-gl4pr 1/1 Running 0 2d1h
64 | observability-rbac-query-proxy-66c4944d4d-9ptzp 2/2 Running 0 2d1h
65 | observability-rbac-query-proxy-66c4944d4d-rkl9g 2/2 Running 0 2d1h
66 | observability-thanos-compact-0 1/1 Running 0 2d1h
67 | observability-thanos-query-5bf8459f67-bgpzw 1/1 Running 0 2d1h
68 | observability-thanos-query-5bf8459f67-rzwss 1/1 Running 0 2d1h
69 | observability-thanos-query-frontend-d6bd84889-9n9kw 1/1 Running 0 2d1h
70 | observability-thanos-query-frontend-d6bd84889-bvn9f 1/1 Running 0 2d1h
71 | observability-thanos-query-frontend-memcached-0 2/2 Running 0 2d1h
72 | observability-thanos-query-frontend-memcached-1 2/2 Running 0 2d1h
73 | observability-thanos-query-frontend-memcached-2 2/2 Running 0 2d1h
74 | observability-thanos-receive-controller-7c775bcdff-7b5gb 1/1 Running 0 2d1h
75 | observability-thanos-receive-default-0 1/1 Running 0 2d1h
76 | observability-thanos-receive-default-1 1/1 Running 0 2d1h
77 | observability-thanos-receive-default-2 1/1 Running 0 2d1h
78 | observability-thanos-rule-0 2/2 Running 0 27h
79 | observability-thanos-rule-1 2/2 Running 0 27h
80 | observability-thanos-rule-2 2/2 Running 0 27h
81 | observability-thanos-store-memcached-0 2/2 Running 0 2d1h
82 | observability-thanos-store-memcached-1 2/2 Running 0 2d1h
83 | observability-thanos-store-memcached-2 2/2 Running 0 2d1h
84 | observability-thanos-store-shard-0-0 1/1 Running 2 2d1h
85 | observability-thanos-store-shard-1-0 1/1 Running 2 2d1h
86 | observability-thanos-store-shard-2-0 1/1 Running 2 2d1h
87 |
88 | $ oc get pods -n open-cluster-management-addon-observability
89 |
90 | NAME READY STATUS RESTARTS AGE
91 | endpoint-observability-operator-764b6c666-9s7nz 1/1 Running 0 2d1h
92 | metrics-collector-deployment-765946868-hmk5d 1/1 Running 0 2d1h
93 | ```
94 |
95 | Now, that all pods are running, log into RHACM's dashboard and navigate to **Clusters** -> **Grafana (top right side)**. Make sure that the dashboards are available and graphs are present.
96 |
97 | ### 3.2 - Explore the default Grafana dashboards
98 |
99 | This part focuses on the default Grafana dashboards that come with RHACM. Each dashboard has its own characteristics and provides valuable information to a system administrator in the organization. This section contains multiple tasks that require you to look for certain values in the default dashboards that come with `MCO`.
100 |
101 | - Find the maximum latency value for the `local-cluster` API server.
102 | - Find out how much % of `local-cluster`'s memory is utilized.
103 | - Find what is the size of the etcd database in `local-cluster`.
104 | - Find the namespace that consumes the most CPU in `local-cluster`.
105 | - Find what's the node in `local-cluster` that consumes the most % memory.
106 | - Find what's the `apiserver` (openshift-apiserver namespace) pod CPU utilization and quota.
107 |
108 | ### 3.3 - Creating a custom alert
109 |
110 | In this part you will configure custom alerts to monitor your environment. By configuring the alert, you will be able to receive a notification if a rule that you have configured is violated by one of managed clusters in RHACM.
111 |
112 | #### 3.3.1 - Alert #1
113 |
114 | The first alert you will configure in the exercise will initiate a notification when a cluster's memory utilization reaches over 20%. In order to create the alert, create the next ConfigMap in the `open-cluster-management-observability` namespace (Make sure to go through the alert before applying it!).
115 |
116 | ```
117 | apiVersion: v1
118 | data:
119 | custom_rules.yaml: |
120 | groups:
121 | - name: cluster-health
122 | rules:
123 | - alert: ClusterMemoryHighUsage
124 | annotations:
125 | summary: Notify when memory utilization on a cluster is greater than the defined utilization limit - 20%
126 | description: "The cluster has a high memory usage: {{ $value }} for {{ $labels.cluster }}."
127 | expr: |
128 | 1 - sum(:node_memory_MemAvailable_bytes:sum) by (cluster) / sum(kube_node_status_allocatable{resource="memory"}) by (cluster) > 0.2
129 | for: 5s
130 | labels:
131 | cluster: "{{ $labels.cluster }}"
132 | severity: critical
133 | kind: ConfigMap
134 | metadata:
135 | name: thanos-ruler-custom-rules
136 | namespace: open-cluster-management-observability
137 | ```
138 |
139 | Now that the alert is configured, check whether the alert is initiated or not. To check the alert, navigate to the Grafana instance you've deployed in the previous task. In the Grafana instance, go to the 'Explore' dashboard (compass icon on the left sidebar). Before checking whether the alert is initiated or not, run the alert's query to check the memory utilization in the `local-cluster` cluster. Copy the next expression to the `query` tab, and press `SHIFT + ENTER` to run the query.
140 |
141 | ```
142 | 1 - sum(:node_memory_MemAvailable_bytes:sum) by (cluster) / sum(kube_node_status_allocatable{resource="memory"}) by (cluster)
143 | ```
144 |
145 | The result is a number that identifies the % of memory utilization of a cluster. For example, if the result is `0.1`, the memory utilization of a cluster is `10%`.
146 |
147 | Try running the next query -
148 |
149 | ```
150 | 1 - sum(:node_memory_MemAvailable_bytes:sum) by (cluster) / sum(kube_node_status_allocatable{resource="memory"}) by (cluster) > 0.2
151 | ```
152 |
153 | The query checks whether the result of the previous query is more than `0.2` (20%). If the query checks out, it will present all clusters that utilize more than 20% of their memory - in your case, its only `local-cluster`.
154 |
155 | Now that you understand the mechanism behind alerting, try running a query that displays the active alerts in your environment. The query should display the alert that you've configured in the previous steps. Copy the next expression to the `query` tab, and press `SHIFT + ENTER` to run it.
156 |
157 | ```
158 | ALERTS{alertname="ClusterMemoryHighUsage"}
159 | ```
160 |
161 | The initiated alert should now appear.
162 |
163 | #### 3.3.2 - Alert #2
164 |
165 | The second alert will monitor the etcd database size. An alert will be initiated if the etcd database size in `local-cluster` reaches more than 100MiB. This time, you will create the alert expression by yourself (HINT: you can use the ACM - Clusters Overview dashboard for help).
166 |
167 | In order to deploy the second alert to `MCO` add the new alert definition to the `ConfigMap` you have created for the previous alert. The ConfigMap should look like -
168 |
169 | ```
170 | apiVersion: v1
171 | data:
172 | custom_rules.yaml: |
173 | groups:
174 | - name: cluster-health
175 | rules:
176 | - alert: ClusterMemoryHighUsage
177 | annotations:
178 | summary: Notify when memory utilization on a cluster is greater than the defined utilization limit - 20%
179 | description: "The cluster has a high memory usage: {{ $value }} for {{ $labels.cluster }}."
180 | expr: |
181 | 1 - sum(:node_memory_MemAvailable_bytes:sum) by (cluster) / sum(kube_node_status_allocatable{resource="memory"}) by (cluster) > 0.2
182 | for: 5s
183 | labels:
184 | cluster: "{{ $labels.cluster }}"
185 | severity: critical
186 | - alert: ExampleSecondAlert
187 | annotations:
188 | summary: Example Summary
189 | description: "Example description"
190 | expr: |
191 | ...
192 | kind: ConfigMap
193 | metadata:
194 | name: thanos-ruler-custom-rules
195 | namespace: open-cluster-management-observability
196 | ```
197 |
198 | Make sure that the alert works as expected.
199 |
200 | ### 3.4 - Creating a custom dashboard
201 |
202 | In this section you will add your own dashboard to the default dashboards that come with MCO.
203 |
204 | Before you can create a custom dashboard, you need to spin up an instance of a "Development Grafana" in which you'll design your dashboard. Follow the steps described in slides 85 and 86 in the [workshop's presentation](https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing) to create the development instance of Grafana.
205 |
206 | **NOTE** Make sure to log into the Grafana Dev instance with the wanted `admin` user before you run the `./switch-to-grafana-admin.sh` script!
207 |
208 |
209 | #### 3.4.1 - Panel #1 - Available memory per node
210 |
211 | The dashboard you design in this part will present a graph that aggregates all available nodes in all clusters and show their available memory over a defined time period. In order to configure that dashboard, follow the next steps -
212 |
213 | - Log into the development instance.
214 | - Press on the large `+` on the left sidebar, select `Dashboard`.
215 | - A panel will appear in the new dashboard. Press on `Add an empty panel` in order to create a custom graph.
216 | - Enter the next query in the `Metrics browser` tab - `node_memory_MemAvailable_bytes{cluster="local-cluster"}`.
217 | - Enter the next label into the `Legend` field - `{{ instance }}`.
218 | - In the right menu, scroll down to the `Standard options` section. In the `Unit` section, select `Data` -> `bytes (IEC)`.
219 | - In the same menu, add `0` to the `Min` key.
220 | - In the top of the right menu, provide your panel with a name at - `Panel title`.
221 | - Press on `Apply` at the top right end of the screen.
222 | - You have created your first panel!
223 |
224 | 
225 |
226 | #### 3.4.2 - Panel #2 - Available CPU per node
227 |
228 | For this panel, you will create a same graph like in the previous section, but this time, you will monitor the node's available CPU. While creating the panel, make sure that you use the correct `Units`.
229 |
230 | 
231 |
232 | Make sure that you get the correct values by running the next command on the hub cluster -
233 |
234 | ```
235 | $ oc adm top node
236 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
237 | ip-10-0-138-131.us-east-2.compute.internal 2064m 27% 10496Mi 34%
238 | ip-10-0-148-108.us-east-2.compute.internal 3259m 21% 11826Mi 19%
239 | ip-10-0-166-190.us-east-2.compute.internal 7359m 47% 16188Mi 26%
240 | ip-10-0-186-66.us-east-2.compute.internal 1786m 23% 8773Mi 28%
241 | ip-10-0-202-11.us-east-2.compute.internal 1754m 23% 8781Mi 28%
242 | ```
243 |
244 | #### 3.4.3 - Export the dashboard to the main Grafana instance
245 |
246 | Until now, you have worked on the "Development" Grafana instance. It's time to export the dashboard you've created to the main "Production" Grafana instance. Before you begin the export process, make sure to save your dashboard by pressing `CTRL + S`. Provide the dashboard with a simple, declarative name.
247 |
248 | To export the dashboard to the "Production" instance, follow the steps described in slides 87,88 in the [workshop's presentation](https://docs.google.com/presentation/d/1LCPvIT_nF5hwnrfYdlD0Zie4zdDxc0kxZtW3Io5jfFk/edit?usp=sharing).
249 |
250 | Make sure that the dashboard is available in the Production Grafana instance in the 'Custom' directory.
251 |
--------------------------------------------------------------------------------
/07.Ansible-Tower-Integration/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 7 - Ansible Tower Integration
2 |
3 | In this exercise you will go through Ansible Tower integration with Red Hat Advanced Cluster Management for Kubernetes. You will associate AnsibleJob hooks to applications and integrate AnsibleJobs with policy violations. Ansible Tower has already been configured for your use by the instructor. You will only configure Red Hat Advanced Cluster Management for Kubernetes.
4 |
5 | The instructor will provide you with -
6 |
7 | * Ansible Tower URL
8 | * Ansible Tower web UI username / password
9 | * Ansible Tower Access Token for API requests
10 |
11 | ## Before You Begin
12 |
13 | In this section you will create the basic integration between RHACM and Ansible Tower. The integration is based on `Ansible Automation Platform Operator`. Make sure to install the operator before you begin the next exercises.
14 |
15 | Installing the operator can be done by running the next commands on the hub cluster -
16 |
17 | ```
18 | $ oc create namespace ansible-automation-platform
19 |
20 | $ cat >> ansible-operatorgroup.yaml << EOF
21 | apiVersion: operators.coreos.com/v1
22 | kind: OperatorGroup
23 | metadata:
24 | name: ansible-automation-operator-gp
25 | namespace: ansible-automation-platform
26 | EOF
27 |
28 | $ oc apply -f ansible-operatorgroup.yaml
29 |
30 | $ cat >> ansible-operator.yaml << EOF
31 | apiVersion: operators.coreos.com/v1alpha1
32 | kind: Subscription
33 | metadata:
34 | name: ansible-automation-operator
35 | namespace: ansible-automation-platform
36 | spec:
37 | channel: stable-2.1-cluster-scoped
38 | installPlanApproval: Automatic
39 | name: ansible-automation-platform-operator
40 | source: redhat-operators
41 | sourceNamespace: openshift-marketplace
42 | EOF
43 |
44 | $ oc apply -f ansible-operator.yaml
45 | ```
46 |
47 | The operator will now begin the installation process.
48 |
49 | ## Ansible Tower Application Integration
50 |
51 | In this section, you will configure Ansible Tower Jobs to run as your RHACM Application deploys. The first job will run as a _prehook_ while the second job will run as a _posthook_. The _prehook_ runs before the application resources start the deployment process while the _posthook_ job runs as soon as the resources are deployed.
52 |
53 | Both Ansible Job hooks initiate the same Job Template on Ansible Tower called _Logger_. The _Logger_ Job Template creates a log in a dedicated file for each initiation of the Job Template. Afterwards, the _Logger_ Job Template exposes the log file on a local web server on Ansible Tower.
54 |
55 | The participants can view all log files on the Ansible Tower server by navigating to the URL provided by the instructor in **port 80**.
56 |
57 | The _Logger_ Ansible Role can be found at [logger role](ansible-playbooks/roles/logger) directory.
58 |
59 | ### Setting up Authentication
60 |
61 | In order to allow RHACM to access Ansible Tower you must set up a Namespace scoped secret for RHACM to use. RHACM uses the secret to authenticate against the Ansible Tower instance. The secret contains the Ansible Tower URL and Access Token.
62 |
63 | Before creating the secret itself, make sure the namespace that populates the secret exists by running the next command on the **hub** cluster -
64 |
65 | ```
66 | $ oc create namespace mariadb
67 | ```
68 |
69 | To create the secret, navigate to **Credentials** -> **Add credentials** -> **Red Hat Ansible Automation Platform** in the RHACM UI and fill the next fields -
70 |
71 | - Credentials name: **ansible-tower**
72 | - Namespace: **mariadb**
73 |
74 | Press **Next**.
75 |
76 | At the next screen, specify the **Ansible Tower host** and **Ansible Tower token** provided by the instructor.
77 |
78 | Press **Next**. Review the information, and press on **Add**.
79 |
80 | ### Setting up the Application
81 |
82 | Before you continue, create a fork of the next GitHub repository - [https://github.com/michaelkotelnikov/rhacm-workshop](https://github.com/michaelkotelnikov/rhacm-workshop). As a result, you will have your own version of the repository - [https://github.com/<your-username>/rhacm-workshop](https://github.com/michaelkotelnikov/rhacm-workshop).
83 |
84 | Change the `log_file_name` variable value from `rhacm.log` to `.log` (e.g `michael.log`) in the [prehook](demo-application/mariadb-resources/prehook/pre_log.yaml) and [posthook](demo-application/mariadb-resources/posthook/post_log.yaml) definition in **your fork** of the repository.
85 |
86 | Change the `pathname` definition in the Channel resource in the [application.yml](demo-application/rhacm-resources/application.yml) file in **your fork** of the repository. Change the `pathname` value from `https://github.com/michaelkotelnikov/rhacm-workshop.git` to `https://github.com//rhacm-workshop.git` (**Make sure to update to the real name**).
87 |
88 | Apply the application resources from **your fork** -
89 |
90 | ```
91 | $ oc apply -f https://raw.githubusercontent.com//rhacm-workshop/master/07.Ansible-Tower-Integration/demo-application/rhacm-resources/application.yml
92 | ```
93 |
94 | Navigate to **Applications** -> **mariadb-app** in RHACM's UI. Note that the application has been deployed successfully alongside its pre and post hooks.
95 |
96 | 
97 |
98 | If you navigate to `http:///logs/.log` you will notice the output of the Logger Ansible Job Template.
99 |
100 | ```
101 | Wed Sep 29 16:20:27 UTC 2021 Ansible Job was triggered by mariadb as prehook in clusters ['local-cluster'].
102 | Wed Sep 29 16:21:19 UTC 2021 Ansible Job was triggered by mariadb as posthook in clusters ['local-cluster'].
103 | ```
104 |
105 | Note that the posthook executed ~1 min after the prehook.
106 |
107 | Run the next commands to see more information about the executed AnsibleJobs. Each AnsibleJob instance has valuable information for troubleshooting and diagnostics -
108 |
109 | ```
110 | $ oc get ansiblejob -n mariadb
111 |
112 | NAME AGE
113 | postjob-1-4be802 13m
114 | prejob-1-4be802 15m
115 |
116 | $ oc describe ansiblejob prejob-1-4be802 -n mariadb
117 |
118 | Name: prejob-1-4be802
119 | Namespace: mariadb
120 | Labels: tower_job_id=13
121 | Annotations: apps.open-cluster-management.io/hook-type: prehook
122 | apps.open-cluster-management.io/hosting-subscription: mariadb/mariadb-app
123 | API Version: tower.ansible.com/v1alpha1
124 | Kind: AnsibleJob
125 | Metadata:
126 | Manager: OpenAPI-Generator
127 | Operation: Update
128 | Time: 2021-09-29T16:20:30Z
129 | Owner References:
130 | API Version: apps.open-cluster-management.io/v1
131 | Kind: Subscription
132 | Name: mariadb-app
133 | UID: d5207886-dc95-4668-a96c-d7cc6468e079
134 | Resource Version: 513833
135 | UID: 0314f132-3495-4328-b84c-2d6815d25f5e
136 | Spec:
137 | extra_vars:
138 | hook_type: prehook
139 | log_file_name: michael.log
140 | target_clusters:
141 | local-cluster
142 | trigger_name: mariadb
143 | job_template_name: Logger
144 | tower_auth_secret: ansible-tower
145 | Status:
146 | Ansible Job Result:
147 | Changed: true
148 | Elapsed: 6.197
149 | Failed: false
150 | Finished: 2021-09-29T16:20:28.465789Z
151 | Started: 2021-09-29T16:20:22.268481Z
152 | Status: successful
153 | URL: https://student1.a32d.example.opentlc.com/#/jobs/playbook/13
154 | Conditions:
155 | Ansible Result:
156 | Changed: 0
157 | Completion: 2021-09-29T16:20:37.920281
158 | Failures: 0
159 | Ok: 3
160 | Skipped: 0
161 | Last Transition Time: 2021-09-29T16:19:08Z
162 | Message: Awaiting next reconciliation
163 | Reason: Successful
164 | Status: True
165 | Type: Running
166 | k8sJob:
167 | Created: true
168 | Env:
169 | Secret Namespaced Name: mariadb/ansible-tower
170 | Template Name: Logger
171 | Verify SSL: false
172 | Message: Monitor the job.batch status for more details with the following commands:
173 | 'kubectl -n mariadb get job.batch/prejob-1-4be802'
174 | 'kubectl -n mariadb describe job.batch/prejob-1-4be802'
175 | 'kubectl -n mariadb logs -f job.batch/prejob-1-4be802'
176 | Namespaced Name: mariadb/prejob-1-4be802
177 | Message: This job instance is already running or has reached its end state.
178 | Events:
179 | ```
180 |
181 | More information can be found in the Ansible Tower UI. Log into the Ansible Tower UI using the URL and credentials provided by the instructor.
182 |
183 | At the main dashboard, take a look at the **Recent Job Runs** tab. Press on the `Logger` Job Run that matches your timestamp.
184 |
185 | 
186 |
187 | ### Updating an Application
188 |
189 | Now that you have seen how Ansible Tower integrates with RHACM Applications, let's add another resource to the application. Adding a resource to the application demonstrates how AnsibleJobs are affected by changes in the application structure.
190 |
191 | In **your fork** of this repository, add a file called `service.yaml` under the [mariadb-resources](./mariadb-resources) directory. Paste the next resource to the `service.yaml` file. Make sure to commit the changes to GitHub.
192 |
193 | ```
194 | apiVersion: v1
195 | kind: Service
196 | metadata:
197 | labels:
198 | app: mariadb
199 | name: mariadb
200 | namespace: mariadb
201 | spec:
202 | ports:
203 | - name: 3306-tcp
204 | port: 3306
205 | protocol: TCP
206 | targetPort: 3306
207 | selector:
208 | app: mariadb
209 | sessionAffinity: None
210 | type: ClusterIP
211 | ```
212 |
213 | Note that after applying the resource, more AnsibleJob resources have been created in the `mariadb` namespace.
214 |
215 | ```
216 | $ oc get ansiblejob
217 |
218 | NAME AGE
219 | postjob-1-d53c95 3m18s
220 | postjob-1-e18776 28m
221 | prejob-1-d53c95 3m59s
222 | prejob-1-e18776 28m
223 | ```
224 |
225 | The first AnsibleJob ran before applying the new Service resource, while the second one ran after applying it.
226 |
227 | If you take a look at the log file created by Ansible Tower at `http:///logs/.log` you'll notice the **new** logs that the Logger Job Template has created. Note the timestamp of the latest logs in the file.
228 |
229 | ```
230 | Wed Sep 29 16:20:27 UTC 2021 Ansible Job was triggered by mariadb as prehook in clusters ['local-cluster'].
231 | Wed Sep 29 16:21:19 UTC 2021 Ansible Job was triggered by mariadb as posthook in clusters ['local-cluster'].
232 | Wed Sep 29 17:20:57 UTC 2021 Ansible Job was triggered by mariadb as prehook in clusters ['local-cluster'].
233 | Wed Sep 29 17:21:49 UTC 2021 Ansible Job was triggered by mariadb as posthook in clusters ['local-cluster'].
234 | ```
235 |
236 | ## Ansible Tower Governance Integration
237 |
238 | In this section, you will configure Ansible Tower Jobs to run as a violation is initiated in one of your policies.
239 |
240 | ### Setting up Authentication
241 |
242 | In order to allow RHACM to access Ansible Tower you must set up a **Namespace scoped** secret for RHACM to use. A secret must be created for each namespace that interacts with Ansible Tower. Therefore, you must create the secret in the namespace that contains the policies as well.
243 |
244 | Before creating the secret itself, make sure the namespace that populates the secret exists by running the next command -
245 |
246 | ```
247 | $ oc create namespace rhacm-policies
248 | ```
249 |
250 | To create the secret, navigate to **Credentials** -> **Red Hat Ansible Automation Platform** in the RHACM UI and fill the next fields -
251 |
252 | - Credentials name: **ansible-tower**
253 | - Namespace: **rhacm-policies**
254 |
255 | Press **Next**.
256 |
257 | At the next screen, specify the **Ansible Tower host** and **Ansible Tower token** provided by the instructor.
258 |
259 | Press **Next**. Review the information, and press on **Add**.
260 |
261 | ### Policy Automation - #1 - Delete Namespace if violation initiates
262 |
263 | In this example you will create a policy that monitors whether a _forbidden namespace_ exists. If the namespace exists a violation will be initiated. Once the violation is initiated an Ansible Job Template will be triggered. The Ansible Job Template will remediate the violation using an Ansible role. A role has already been configured for this scenario at - [ansible-playbooks/roles/k8s-namespace](ansible-playbooks/roles/k8s-namespace).
264 |
265 | #### Configuring the Policy
266 |
267 | The next Policy will initiate an alert if a namespace with the name `forbidden-namespace` is present in the cluster. Apply the policy to the hub cluster -
268 |
269 | ```
270 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/07.Ansible-Tower-Integration/demo-policy/rhacm-resources/policy.yaml
271 | ```
272 |
273 | After creating the policy, make sure that the policy works as expected. Create a namespace with the name `forbidden-namespace`, on the managed cluster.
274 |
275 | ```
276 | $ oc create namespace forbidden-namespace
277 | ```
278 |
279 | Make sure that a violation is initiated.
280 |
281 | 
282 |
283 | #### Configuring PolicyAutomation
284 |
285 | Now that a policy is configured, create a PolicyAutomation object that will initiate an Ansible Job that will remediate the violation. Copy the next PolicyAutomation object definition to your local workstation.
286 |
287 | ```
288 | apiVersion: policy.open-cluster-management.io/v1beta1
289 | kind: PolicyAutomation
290 | metadata:
291 | name: namespace-policy-automation
292 | namespace: rhacm-policies
293 | spec:
294 | automationDef:
295 | extra_vars:
296 | k8s_api_url:
297 | k8s_password:
298 | k8s_username:
299 | name: K8S-Namespace
300 | secret: ansible-tower
301 | type: AnsibleJob
302 | mode: once
303 | policyRef: policy-remove-dangerous-namespace
304 | ```
305 |
306 | Modify the PolicyAutomation object with parameters relevant to your cluster.
307 |
308 | - k8s_api_url refers to the API URL of your **managed** OpenShift / K8S cluster. e.g - 'https://api.cluster.sandbox.opentlc.com:6443'
309 | - k8s_password refers to the password you're going to use to authenticate to the **managed** OpenShift / K8S cluster.
310 | - k8s_username refers to the username you're going to use to authenticate to the **managed** OpenShift / K8S cluster.
311 |
312 | After modifying the parameters, create the PolicyAutomation object on the hub cluster in the `rhacm-policies` namespace.
313 |
314 | Note that as soon as you create the PolicyAutomatiob object, an AnsibleJob object is created in the `rhacm-policies` namespace. The AnsibleJob marks that the Ansible Job Template on Ansible Tower has been initiated.
315 |
316 | ```
317 | $ oc get ansiblejob -n rhacm-policies
318 |
319 | NAME AGE
320 | namespace-policy-automation-once-2bgv8 46s
321 | ```
322 |
323 | If you log into the Ansible Tower web interface, you'll notice that the K8S-Namespace Job Template has been initiated. The Job indicates that the forbidden namespace has been removed.
324 |
325 | 
326 |
327 | Now, take a look at the Governance dashboard in RHACM. Note that the violation is no longer present in the policy you have created. The forbidden namespace is no longer present.
328 |
329 | 
--------------------------------------------------------------------------------
/04.Application-Lifecycle/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 4 - Application Lifecycle
2 |
3 | In this exercise you will deploy a demo application onto the cluster using Red Hat Advanced Cluster Management for Kubernetes. You will manage the application versions and use cluster labels to configure placement mechanisms.
4 |
5 | In this exercise you will try to deploy an application that manages two versions -
6 |
7 | * Development - [https://github.com/michaelkotelnikov/rhacm-workshop/tree/dev/04.Application-Lifecycle/exercise-application/application-resources](https://github.com/michaelkotelnikov/rhacm-workshop/tree/dev/04.Application-Lifecycle/exercise-application/application-resources)
8 | * Production - [https://github.com/michaelkotelnikov/rhacm-workshop/tree/master/04.Application-Lifecycle/exercise-application/application-resources](https://github.com/michaelkotelnikov/rhacm-workshop/tree/master/04.Application-Lifecycle/exercise-application/application-resources)
9 |
10 | Both versions of the application are stored in the same Git repository, while the **production** version is stored in the **master** branch, and the **development** version is stored in the **dev** branch.
11 |
12 | The Application is a simple web application that provides a different output in each version. The **development** application will provide a web page, while the **production** application will provide a different web page.
13 |
14 | Each version of the application will run on a cluster with the relevant tag. The application with the **development** version will run on clusters with the **environment=dev** label, while the application with the **production** version will run on clusters with the **environment=production** label.
15 |
16 | To achieve a functioning application create the next resources -
17 |
18 | * **Namespace** - Create a namespace in which the custom resources are going to be deployed on the hub.
19 |
20 | ```
21 | $ cat >> namespace.yaml << EOF
22 | ---
23 | apiVersion: v1
24 | kind: Namespace
25 | metadata:
26 | name: webserver-acm
27 | EOF
28 |
29 | $ oc apply -f namespace.yaml
30 | ```
31 |
32 |
33 | * **Channel** - Create a channel that refers to the GitHub repository in which the application’s resources are placed. The GitHub repository is at - [https://github.com/michaelkotelnikov/rhacm-workshop.git](https://github.com/michaelkotelnikov/rhacm-workshop.git)
34 |
35 | ```
36 | $ cat >> channel.yaml << EOF
37 | ---
38 | apiVersion: apps.open-cluster-management.io/v1
39 | kind: Channel
40 | metadata:
41 | name: webserver-app
42 | namespace: webserver-acm
43 | spec:
44 | type: Git
45 | pathname: https://github.com/michaelkotelnikov/rhacm-workshop.git
46 | EOF
47 |
48 | $ oc apply -f channel.yaml
49 | ```
50 |
51 |
52 | * **PlacementRule** - Create a PlacementRule that aggregates all clusters with the **environment=dev** label. This PlacementRule will be used to group all clusters that will run the development version of the application.
53 |
54 | ```
55 | $ cat >> placementrule-dev.yaml << EOF
56 | ---
57 | apiVersion: apps.open-cluster-management.io/v1
58 | kind: PlacementRule
59 | metadata:
60 | name: dev-clusters
61 | namespace: webserver-acm
62 | spec:
63 | clusterConditions:
64 | - type: ManagedClusterConditionAvailable
65 | status: "True"
66 | clusterSelector:
67 | matchLabels:
68 | environment: dev
69 | EOF
70 |
71 | $ oc apply -f placementrule-dev.yaml
72 | ```
73 |
74 | * **Subscription** - Create a subscription that binds between the defined above **PlacementRule** and **Channel** resources. The subscription will point to the relevant path on which the application resources are present - _04.Application-Lifecycle/exercise-application/application-resources_. Furthermore, the Subscription will point to the **dev** branch, in order to deploy the development version of the application.
75 |
76 | ```
77 | $ cat >> subscription-dev.yaml << EOF
78 | ---
79 | apiVersion: apps.open-cluster-management.io/v1
80 | kind: Subscription
81 | metadata:
82 | name: webserver-app-dev
83 | namespace: webserver-acm
84 | labels:
85 | app: webserver-app
86 | annotations:
87 | apps.open-cluster-management.io/github-path: 04.Application-Lifecycle/exercise-application/application-resources
88 | apps.open-cluster-management.io/git-branch: dev
89 | spec:
90 | channel: webserver-acm/webserver-app
91 | placement:
92 | placementRef:
93 | kind: PlacementRule
94 | name: dev-clusters
95 | EOF
96 |
97 | $ oc apply -f subscription-dev.yaml
98 | ```
99 |
100 | * **Application** - Create an Application resource to aggregate Subscription resources. The Application resource aggregates the Subscription resources by using labels. In this case, you will be using the label - **app: webserver-app**.
101 |
102 | ```
103 | $ cat >> application.yaml << EOF
104 | ---
105 | apiVersion: app.k8s.io/v1beta1
106 | kind: Application
107 | metadata:
108 | name: webserver-app
109 | namespace: webserver-acm
110 | spec:
111 | componentKinds:
112 | - group: apps.open-cluster-management.io
113 | kind: Subscription
114 | descriptor: {}
115 | selector:
116 | matchExpressions:
117 | - key: app
118 | operator: In
119 | values:
120 | - webserver-app
121 | EOF
122 |
123 | $ oc apply -f application.yaml
124 | ```
125 |
126 | After the resources are created. In the RHACM portal, navigate to **Applications** -> **<application name>**. Make sure that the resources are created.
127 |
128 | Run the next command on the managed cluster -
129 |
130 | ```
131 | $ oc get route -n webserver-acm
132 | ```
133 |
134 | Navigate to the application's frontend at **https://<route-url>/application.html**
135 |
136 | Make sure that the application is running the **development version** on the cluster. Validate that the application is deployed by running the next command -
137 |
138 | ```
139 | $ oc get pods -n webserver-acm
140 | ```
141 |
142 | Now that you have the **Development** version of the application running, it’s time to deploy the **Production** version alongside the **Development** version. Create the next resources -
143 |
144 | * **PlacementRule** - Create a PlacementRule that aggregates the **production** clusters using the **environment=production** label.
145 |
146 | ```
147 | $ cat >> placementrule-production.yaml << EOF
148 | ---
149 | apiVersion: apps.open-cluster-management.io/v1
150 | kind: PlacementRule
151 | metadata:
152 | name: prod-clusters
153 | namespace: webserver-acm
154 | spec:
155 | clusterConditions:
156 | - type: ManagedClusterConditionAvailable
157 | status: "True"
158 | clusterSelector:
159 | matchLabels:
160 | environment: production
161 | EOF
162 |
163 | $ oc apply -f placementrule-production.yaml
164 | ```
165 |
166 | * **Subscription** - Create a Subscription that maps the newly created **PlacementRule** to the previously created **Channel**. The subscription uses the **master** branch in the **Channel** in order to run the **production** version of the application.
167 |
168 | ```
169 | $ cat >> subscription-production.yaml << EOF
170 | ---
171 | apiVersion: apps.open-cluster-management.io/v1
172 | kind: Subscription
173 | metadata:
174 | name: webserver-app-prod
175 | namespace: webserver-acm
176 | labels:
177 | app: webserver-app
178 | annotations:
179 | apps.open-cluster-management.io/github-path: 04.Application-Lifecycle/exercise-application/application-resources
180 | spec:
181 | channel: webserver-acm/webserver-app
182 | placement:
183 | placementRef:
184 | kind: PlacementRule
185 | name: prod-clusters
186 | EOF
187 |
188 | $ oc apply -f subscription-production.yaml
189 | ```
190 |
191 | After creating the resources, navigate to **Applications** -> **webserver-app**. On the left, at the `Subscription` sidebar, choose `All Subscriptions`. Note that the newly created Subscription does not deploy any resource on any of the clusters since there are no clusters with the **environment=production** label.
192 |
193 | 
194 |
195 | In order to deploy the production application on **local-cluster** -
196 |
197 | * navigate to **Clusters** -> **local-cluster** -> **Actions** -> **Edit labels**.
198 | * Remove the **environment=dev** label.
199 | * Add the **environment=production** label.
200 |
201 | Wait for about 2 minutes for the application to redeploy on **local-cluster**, and navigate to **Applications** -> **webserver-app**.
202 |
203 | Note that the application is now deployed at its “production” version on **local-cluster**.
204 |
205 | 
206 |
207 | Click on the application’s route resource, and navigate to **https://<route-url>/application.html**. The application now serves a different webpage, indicating that the application is in a production state.
208 |
209 | **NOTE:** All of the resources you have configured in this exercise are present in the [git repository](https://github.com/michaelkotelnikov/rhacm-workshop.git). The resources can be created by running the next command -
210 |
211 | ```
212 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/04.Application-Lifecycle/exercise-application/rhacm-resources/application.yaml
213 | ```
214 |
215 | # ArgoCD Integration
216 |
217 | This section discusses the process of deploying an application using ArgoCD in an Advanced Cluster Management for Kubernetes environment. The section will follow the process of ArgoCD installation, integration with RHACM and application deployment.
218 |
219 | Before you begin, make sure to delete all of the resources you created in the previous exercise -
220 |
221 | ```
222 | $ oc delete project webserver-acm
223 | ```
224 |
225 | ## ArgoCD Installation
226 |
227 | As described in the workshop. An ArgoCD / OpenShift GitOps instance has to be installed in order to begin the integration with RHACM. Install the `openshift-gitops` operator by applying the next resource to the hub cluster -
228 |
229 | ```
230 | $ cat >> openshift-gitops-operator.yaml << EOF
231 | ---
232 | apiVersion: operators.coreos.com/v1alpha1
233 | kind: Subscription
234 | metadata:
235 | name: openshift-gitops-operator
236 | namespace: openshift-operators
237 | spec:
238 | channel: stable
239 | installPlanApproval: Automatic
240 | name: openshift-gitops-operator
241 | source: redhat-operators
242 | sourceNamespace: openshift-marketplace
243 | EOF
244 |
245 | $ oc apply -f openshift-gitops-operator.yaml
246 | ```
247 |
248 | After installing the operator on the hub cluster. Create the ArgoCD CustomResource. The ArgoCD CR spins an instance of ArgoCD using the `openshift-gitops` operator.
249 |
250 | ```
251 | $ cat >> argocd.yaml << EOF
252 | ---
253 | apiVersion: argoproj.io/v1alpha1
254 | kind: ArgoCD
255 | metadata:
256 | finalizers:
257 | - argoproj.io/finalizer
258 | name: openshift-gitops
259 | namespace: openshift-gitops
260 | spec:
261 | server:
262 | autoscale:
263 | enabled: false
264 | grpc:
265 | ingress:
266 | enabled: false
267 | ingress:
268 | enabled: false
269 | resources:
270 | limits:
271 | cpu: 500m
272 | memory: 256Mi
273 | requests:
274 | cpu: 125m
275 | memory: 128Mi
276 | route:
277 | enabled: true
278 | service:
279 | type: ''
280 | grafana:
281 | enabled: false
282 | EOF
283 |
284 | $ oc apply -f argocd.yaml
285 | ```
286 |
287 | Make sure that the ArgoCD instance is running by navigating to ArgoCD's web UI. The URL can be found be running the next command -
288 |
289 | ```
290 | $ oc get route -n openshift-gitops
291 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
292 | ...
293 | openshift-gitops-server openshift-gitops-server-openshift-gitops. openshift-gitops-server https passthrough/Redirect None
294 | ```
295 |
296 | Log into the ArgoCD instance by pressing on `Log In via OpenShift`.
297 |
298 | Now that you have a running instance of ArgoCD, let's integrate it with RHACM!
299 |
300 | ## Preparing RHACM for ArgoCD Integration
301 |
302 | In this part you will create the resources to import `local-cluster` into ArgoCD's managed clusters.
303 |
304 | Create the next ManagedClusterSet resource. The ManagedClusterSet resource will include the `local-cluster` cluster. The ManagedClusterSet resource is associated with the `openshift-gitops` namespace.
305 |
306 | ```
307 | $ cat >> managedclusterset.yaml << EOF
308 | ---
309 | apiVersion: cluster.open-cluster-management.io/v1beta2
310 | kind: ManagedClusterSet
311 | metadata:
312 | name: all-clusters
313 | EOF
314 |
315 | $ oc apply -f managedclusterset.yaml
316 | ```
317 |
318 | Now, import `local-cluster` into the ManagedClusterSet resource. Importation will be done by adding the `cluster.open-cluster-management.io/clusterset: all-clusters` label to the `local-cluster` ManagedCluster resource -
319 |
320 | ```
321 | $ oc edit managedcluster local-cluster
322 | ...
323 | labels:
324 | ...
325 | cloud: Amazon
326 | cluster.open-cluster-management.io/clusterset: all-clusters
327 | ...
328 | ```
329 |
330 | Create the ManagedClusterSetBinding resource to bind the `local-cluster` ManagedClusterSet resource to the `openshift-gitops` resource. Creating the ManagedClusterSetBinding resource will allow ArgoCD to access `local-cluster` information and import it into its management stack.
331 |
332 | ```
333 | $ cat >> managedclustersetbinding.yaml << EOF
334 | ---
335 | apiVersion: cluster.open-cluster-management.io/v1beta2
336 | kind: ManagedClusterSetBinding
337 | metadata:
338 | name: all-clusters
339 | namespace: openshift-gitops
340 | spec:
341 | clusterSet: all-clusters
342 | EOF
343 |
344 | $ oc apply -f managedclustersetbinding.yaml
345 | ```
346 |
347 | Create the Placement resource and bind it to `all-clusters` ManagedClusterSet. Note that you will not be using any special filters in this exercise.
348 |
349 | ```
350 | $ cat >> placement.yaml << EOF
351 | ---
352 | apiVersion: cluster.open-cluster-management.io/v1beta1
353 | kind: Placement
354 | metadata:
355 | name: all-clusters
356 | namespace: openshift-gitops
357 | spec:
358 | clusterSets:
359 | - all-clusters
360 | EOF
361 |
362 | $ oc apply -f placement.yaml
363 | ```
364 |
365 | Create the GitOpsServer resource to indicate the location of ArgoCD and the placement resource -
366 |
367 | ```
368 | $ cat >> gitopsserver.yaml << EOF
369 | ---
370 | apiVersion: apps.open-cluster-management.io/v1beta1
371 | kind: GitOpsCluster
372 | metadata:
373 | name: gitops-cluster
374 | namespace: openshift-gitops
375 | spec:
376 | argoServer:
377 | cluster: local-cluster
378 | argoNamespace: openshift-gitops
379 | placementRef:
380 | kind: Placement
381 | apiVersion: cluster.open-cluster-management.io/v1alpha1
382 | name: all-clusters
383 | EOF
384 |
385 | $ oc apply -f gitopsserver.yaml
386 | ```
387 |
388 | Make sure that `local cluster` is imported into ArgoCD. In ArgoCD's web UI, on the left menu bar, navigate to **Manage your repositories, projects, settings** -> **Clusters**. You should see `local-cluster` in the cluster list.
389 |
390 | 
391 |
392 | ## Deploying an ApplicationSet using ArgoCD
393 |
394 | Now that you integrated ArgoCD with RHACM, let's deploy an ApplicationSet resource using ArgoCD. The applications you're going to create in this part are based on the same applications you have created in the beginning of the exercise - One web server application for a development environment and one for a production environment.
395 |
396 | The applications are based on one [helm](https://helm.sh/) chart. Each application in the set is identified by its own unique `values.yaml` file. The applications are using the same baseline kubernetes resources at - [exercise-argocd/application-resources/templates](exercise-argocd/application-resources/templates), but they are using different `values` files at - [exercise-argocd/application-resources/values](exercise-argocd/application-resources/values). Each instance of the application uses a separate values set. The ApplicationSet resource iterates over the directories in the [exercise-argocd/application-resources/values](exercise-argocd/application-resources/values) directory and creates an instance of an application for each directory name.
397 |
398 | To create the ApplicationSet resource run the next commands -
399 |
400 | ```
401 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/04.Application-Lifecycle/exercise-argocd/argocd-resources/appproject.yaml
402 |
403 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/04.Application-Lifecycle/exercise-argocd/argocd-resources/applicationset.yaml
404 | ```
405 |
406 | Note that two application instances have been created in the ArgoCD UI -
407 |
408 | 
409 |
410 | After viewing the applications and their resources in the ArgoCD dashboard, log into RHACM's web console, and navigate to **Applications**. Note that RHACM identifies the deployed ApplicationSet and provides an entry for both applications -
411 |
412 | 
413 |
414 | The deployed application resources can be seen in the ApplicationSet instance in RHACM -
415 |
416 | 
417 |
418 | Make sure that the application is available by navigating to its Route resource.
419 |
420 | ```
421 | $ oc get route -n webserver-prod
422 |
423 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
424 | webserver webserver-webserver-prod.apps. /application.html webserver 8080-tcp edge None
425 | ```
426 |
--------------------------------------------------------------------------------
/05.Governance-Risk-Compliance/README.md:
--------------------------------------------------------------------------------
1 | # Exercise 5 - Governance Risk and Compliance
2 |
3 | In this exercise you will go through the Compliance features that come with Red Hat Advanced Cluster Management for Kubernetes. You will apply a number of policies to the cluster in order to comply with global security and management standards.
4 |
5 | **NOTE!** The exercise depends on the ACM application deployed in the previous exercise (NOT the application deployed using ArgoCD). If the application is not available in your environment, run the next command to deploy it -
6 |
7 | ```
8 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/04.Application-Lifecycle/exercise-application/rhacm-resources/application.yaml
9 | ```
10 |
11 | **NOTE!** Make sure that the `environment=production` label is associated with the managed cluster!
12 |
13 | Before you start creating the policies, make sure to create a namespace to populate the CRs that associate with RHACM policies.
14 |
15 | ```
16 | $ cat >> policies-namespace.yaml << EOF
17 | ---
18 | apiVersion: v1
19 | kind: Namespace
20 | metadata:
21 | name: rhacm-policies
22 | EOF
23 |
24 | $ oc apply -f policies-namespace.yaml
25 | ```
26 |
27 | After the namespace is created, create a PlacementRule resource. We will use the PlacementRule to associate the below policies with all clusters that are associated with the environment=production label.
28 |
29 | ```
30 | $ cat >> placementrule-policies.yaml << EOF
31 | ---
32 | apiVersion: apps.open-cluster-management.io/v1
33 | kind: PlacementRule
34 | metadata:
35 | name: prod-policies-clusters
36 | namespace: rhacm-policies
37 | spec:
38 | clusterConditions:
39 | - type: ManagedClusterConditionAvailable
40 | status: "True"
41 | clusterSelector:
42 | matchLabels:
43 | environment: production
44 | EOF
45 |
46 | $ oc apply -f placementrule-policies.yaml
47 | ```
48 |
49 | ## Policy #1 - Network Security
50 |
51 | In this section you will apply a NetworkPolicy object onto the cluster in order to limit access to the application you have created in the previous exercise. You will only allow traffic that comes from OpenShift’s Ingress Controller in port 8080. All other traffic will be dropped.
52 |
53 | The policy you’ll create in this section will use the _enforce_ remediation action in order to create the NetworkPolicy objects if they do not exist.
54 |
55 | We will configure the policy definition in two stages -
56 |
57 |
58 | ### Stage 1 - Deny all traffic to the application namespace
59 |
60 | The policy you will configure in this section is enforcing a _deny all_ NetworkPolicy in the webserver-acm namespace on the managed cluster. A _deny all_ NetworkPolicy object example -
61 |
62 | ```
63 | kind: NetworkPolicy
64 | apiVersion: networking.k8s.io/v1
65 | metadata:
66 | name: deny-by-default
67 | spec:
68 | podSelector:
69 | ingress: []
70 | ```
71 |
72 | In order to create a _deny all_ NetworkPolicy object on the managed cluster using Red Hat Advanced Cluster Management for Kubernetes, apply the next commands to the hub cluster -
73 |
74 | ```
75 | $ cat >> denyall-networkpolicy-policy.yaml << EOF
76 | ---
77 | apiVersion: policy.open-cluster-management.io/v1
78 | kind: Policy
79 | metadata:
80 | name: policy-networkpolicy-webserver
81 | namespace: rhacm-policies
82 | annotations:
83 | policy.open-cluster-management.io/standards: NIST SP 800-53
84 | policy.open-cluster-management.io/categories: SC System and Communications Protection
85 | policy.open-cluster-management.io/controls: SC-7 Boundary Protection
86 | spec:
87 | remediationAction: enforce
88 | disabled: false
89 | policy-templates:
90 | - objectDefinition:
91 | apiVersion: policy.open-cluster-management.io/v1
92 | kind: ConfigurationPolicy
93 | metadata:
94 | name: policy-networkpolicy-denyall-webserver
95 | spec:
96 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
97 | severity: medium
98 | namespaceSelector:
99 | include: ["webserver-acm"]
100 | object-templates:
101 | - complianceType: musthave
102 | objectDefinition:
103 | kind: NetworkPolicy
104 | apiVersion: networking.k8s.io/v1
105 | metadata:
106 | name: deny-by-default
107 | spec:
108 | podSelector:
109 | ingress: []
110 | ---
111 | apiVersion: policy.open-cluster-management.io/v1
112 | kind: PlacementBinding
113 | metadata:
114 | name: binding-policy-networkpolicy-webserver
115 | namespace: rhacm-policies
116 | placementRef:
117 | name: prod-policies-clusters
118 | kind: PlacementRule
119 | apiGroup: apps.open-cluster-management.io
120 | subjects:
121 | - name: policy-networkpolicy-webserver
122 | kind: Policy
123 | apiGroup: policy.open-cluster-management.io
124 | EOF
125 |
126 | $ oc apply -f denyall-networkpolicy-policy.yaml
127 | ```
128 |
129 | The above command creates two objects _Policy_ and _PlacementBinding_.
130 |
131 | * The _Policy_ objects define the NetworkPolicy that will be deployed on the managed cluster. It associates the NetworkPolicy to the webserver-acm namespace, and enforces it.
132 | * The _PlacementRule_ resource associates the _Policy_ object with the _PlacementRule _resource that was created in the beginning of the exercise. Thereby, allowing the Policy to apply to all clusters with the _environment=production_ label.
133 |
134 | After the creation of the objects, navigate to **Governance** -> **Policies** in the Red Hat Advanced Cluster Management for Kubernetes console. Note that the policy is configured, and the managed cluster is compliant.
135 |
136 | Make sure that the policy is effective by trying to navigate to the application once again - **https://<webserver application route>/application.html**. (The application should not be accessible).
137 |
138 | In order to understand the difference between the various _complianceType_ values you can consult [https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.4/html-single/governance/index#configuration-policy-yaml-table](https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/2.4/html-single/governance/index#configuration-policy-yaml-table):
139 | * `musthave` will enforce the object and a subset of the fields
140 | * `mustonlyhave` will enforce the object with name and all of its fields
141 | * `mustnothave` will enforce that an object with the same name or labels must not exist
142 |
143 | ### Stage 2 - Allow traffic from the Ingress Controller
144 |
145 | In this section, you will modify the policy you have created in the previous section. You will add another ObjectDefinition entry to the policy. The ObjectDefinition will apply a second NetworkPolicy object onto the webserver-acm namespace in the managed cluster. The NetworkPolicy object will allow traffic from the Ingress Controller to reach the webserver application in port 8080. An example definition of the NetworkPolicy object -
146 |
147 | ```
148 | apiVersion: networking.k8s.io/v1
149 | kind: NetworkPolicy
150 | metadata:
151 | name: allow-from-openshift-ingress
152 | spec:
153 | ingress:
154 | - ports:
155 | - protocol: TCP
156 | port: 8080
157 | - from:
158 | - namespaceSelector:
159 | matchLabels:
160 | network.openshift.io/policy-group: ingress
161 | podSelector: {}
162 | policyTypes:
163 | - Ingress
164 | ```
165 |
166 | Adding the NetworkPolicy to the existing policy can be done by running the next command -
167 |
168 | ```
169 | $ cat >> networkpolicy-policy.yaml << EOF
170 | ---
171 | apiVersion: policy.open-cluster-management.io/v1
172 | kind: Policy
173 | metadata:
174 | name: policy-networkpolicy-webserver
175 | namespace: rhacm-policies
176 | annotations:
177 | policy.open-cluster-management.io/standards: NIST SP 800-53
178 | policy.open-cluster-management.io/categories: SC System and Communications Protection
179 | policy.open-cluster-management.io/controls: SC-7 Boundary Protection
180 | spec:
181 | remediationAction: enforce
182 | disabled: false
183 | policy-templates:
184 | - objectDefinition:
185 | apiVersion: policy.open-cluster-management.io/v1
186 | kind: ConfigurationPolicy
187 | metadata:
188 | name: policy-networkpolicy-denyall-webserver
189 | spec:
190 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
191 | severity: medium
192 | namespaceSelector:
193 | include: ["webserver-acm"]
194 | object-templates:
195 | - complianceType: musthave
196 | objectDefinition:
197 | kind: NetworkPolicy
198 | apiVersion: networking.k8s.io/v1
199 | metadata:
200 | name: deny-by-default
201 | spec:
202 | podSelector:
203 | ingress: []
204 | - objectDefinition:
205 | apiVersion: policy.open-cluster-management.io/v1
206 | kind: ConfigurationPolicy
207 | metadata:
208 | name: policy-networkpolicy-allow-ingress-webserver
209 | spec:
210 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
211 | severity: medium
212 | namespaceSelector:
213 | include: ["webserver-acm"]
214 | object-templates:
215 | - complianceType: musthave
216 | objectDefinition:
217 | kind: NetworkPolicy
218 | apiVersion: networking.k8s.io/v1
219 | metadata:
220 | name: allow-ingress-8080
221 | spec:
222 | ingress:
223 | - ports:
224 | - protocol: TCP
225 | port: 8080
226 | - from:
227 | - namespaceSelector:
228 | matchLabels:
229 | network.openshift.io/policy-group: ingress
230 | podSelector: {}
231 | policyTypes:
232 | - Ingress
233 | ---
234 | apiVersion: policy.open-cluster-management.io/v1
235 | kind: PlacementBinding
236 | metadata:
237 | name: binding-policy-networkpolicy-webserver
238 | namespace: rhacm-policies
239 | placementRef:
240 | name: prod-policies-clusters
241 | kind: PlacementRule
242 | apiGroup: apps.open-cluster-management.io
243 | subjects:
244 | - name: policy-networkpolicy-webserver
245 | kind: Policy
246 | apiGroup: policy.open-cluster-management.io
247 | EOF
248 |
249 | $ oc apply -f networkpolicy-policy.yaml
250 | ```
251 |
252 | After applying the above policy, the application will be reachable from OpenShift’s ingress controller only. Any other traffic will be dropped.
253 |
254 | Make sure that the managed cluster is compliant to the policy by navigating to **Governance** -> **Policies** in the Red Hat Advanced Cluster Management for Kubernetes console.
255 |
256 | 
257 |
258 | Make sure that the application is accessible now at - **https://<webserver application route>/application.html**.
259 |
260 | ## Policy #2 - Quota Management
261 |
262 | In this section you will apply a LimitRange object onto the cluster in order to limit the application’s resource consumption. You will configure a LimitRange object that limits the application’s container memory to 512Mb.
263 |
264 | The policy you will create defines the next LimitRange object in the webserver-acm namespace -
265 |
266 | ```
267 | apiVersion: v1
268 | kind: LimitRange # limit memory usage
269 | metadata:
270 | name: webserver-limit-range
271 | spec:
272 | limits:
273 | - default:
274 | memory: 512Mi
275 | defaultRequest:
276 | memory: 256Mi
277 | type: Container
278 | ```
279 |
280 | In order to apply the LimitRange object to the managed cluster using Red Hat Advanced Cluster Management for Kubernetes, run the next commands -
281 |
282 | ```
283 | $ cat >> limitrange-policy.yaml << EOF
284 | apiVersion: policy.open-cluster-management.io/v1
285 | kind: Policy
286 | metadata:
287 | name: policy-limitrange
288 | namespace: rhacm-policies
289 | annotations:
290 | policy.open-cluster-management.io/standards: NIST SP 800-53
291 | policy.open-cluster-management.io/categories: SC System and Communications Protection
292 | policy.open-cluster-management.io/controls: SC-6 Resource Availability
293 | spec:
294 | remediationAction: enforce
295 | disabled: false
296 | policy-templates:
297 | - objectDefinition:
298 | apiVersion: policy.open-cluster-management.io/v1
299 | kind: ConfigurationPolicy
300 | metadata:
301 | name: policy-limitrange-example
302 | spec:
303 | remediationAction: enforce # the policy-template spec.remediationAction is overridden by the preceding parameter value for spec.remediationAction.
304 | severity: medium
305 | namespaceSelector:
306 | include: ["webserver-acm"]
307 | object-templates:
308 | - complianceType: mustonlyhave
309 | objectDefinition:
310 | apiVersion: v1
311 | kind: LimitRange # limit memory usage
312 | metadata:
313 | name: webserver-limit-range
314 | spec:
315 | limits:
316 | - default:
317 | memory: 512Mi
318 | defaultRequest:
319 | memory: 256Mi
320 | type: Container
321 | ---
322 | apiVersion: policy.open-cluster-management.io/v1
323 | kind: PlacementBinding
324 | metadata:
325 | name: binding-policy-limitrange
326 | namespace: rhacm-policies
327 | placementRef:
328 | name: prod-policies-clusters
329 | kind: PlacementRule
330 | apiGroup: apps.open-cluster-management.io
331 | subjects:
332 | - name: policy-limitrange
333 | kind: Policy
334 | apiGroup: policy.open-cluster-management.io
335 | EOF
336 |
337 | $ oc apply -f limitrange-policy.yaml
338 | ```
339 |
340 | Make sure that the managed cluster is compliant to the policy by navigating to **Governance** -> **Policies** in the Red Hat Advanced Cluster Management for Kubernetes console.
341 |
342 | Make sure that the LimitRange object is created in your managed cluster -
343 |
344 | * Validate that the LimitRange object is created in the webserver-acm namespace -
345 |
346 | ```
347 | $ oc get limitrange webserver-limit-range -o yaml -n webserver-acm
348 | ```
349 |
350 | As the admin user in the managed cluster, try to modify the values of the LimitRange resource (change the memory limit from 512Mi to 1024Mi) -
351 |
352 | ```
353 | $ oc whoami
354 | admin
355 |
356 | $ oc edit limitrange/webserver-limit-range -n webserver-acm
357 | ```
358 |
359 | Notice that if you list the LimitRange resource again, the value of the memory limit is back to 512Mi. The 1024Mi value was overridden by the Red Hat Advanced Cluster Management’s policy controller. Changing the LimitRange’s values is only possible by editing the Policy object on the hub cluster.
360 |
361 | ```
362 | $ oc get limitrange webserver-limit-range -o yaml -n webserver-acm
363 | ...
364 | limits:
365 | - default:
366 | memory: 512Mi
367 | ...
368 | ```
369 |
370 | ## Policy #3 - Namespace management
371 |
372 | In this section, you will create a policy that `informs` if a namespace with the name `rhacm-dangerous-policy-namespace` is present. Make sure to create the policy in the `rhacm-policies` namespace You may use the workshop presentation and the policies you've created in this exercise as a reference for the creation of this policy.
373 |
374 | After deploying the policy, make sure that it is in a `compliant` state.
375 |
376 | Create a namespace with the `rhacm-dangerous-policy-namespace` name on the managed cluster. Make sure that a violation is initiated.
377 |
378 | ```
379 | $ oc create namespace rhacm-dangerous-policy-namespace
380 | ```
381 |
382 | Change the remediationAction in your policy to `enforce`. The violation should be remediated.
383 |
384 | ## Using GitOps
385 |
386 | In this section you will use RHACM’s built-in GitOps mechanism to manage your policies. You will deploy the above policies, and manage them in a GitOps friendly way.
387 |
388 | Before you start this section of the exercise, make sure you delete the namespace containing the policies you used in the previous section.
389 |
390 | ```
391 | $ oc delete project rhacm-policies
392 | ```
393 |
394 | 1. For this exercise, create a fork of the next GitHub repository - [https://github.com/michaelkotelnikov/rhacm-workshop](https://github.com/michaelkotelnikov/rhacm-workshop)
395 |
396 | As a result, you will have your own version of the repository - [https://github.com/<your-username>/rhacm-workshop](https://github.com/michaelkotelnikov/rhacm-workshop)
397 |
398 | 2. Afterwards, create a namespace on which you will deploy the RHACM resources (Use the namespace.yaml file in the forked repository) -
399 |
400 | ```
401 | $ oc apply -f https://raw.githubusercontent.com//rhacm-workshop/master/05.Governance-Risk-Compliance/exercise/namespace.yaml
402 | ```
403 |
404 | 3. Now, clone the official policy-collection GitHub repository to your machine. The repository contains a binary named **deploy.sh**. The binary is used to associate policies in a GitHub repository to a running Red Hat Advanced Cluster Management for Kubernetes cluster.
405 |
406 | ```
407 | $ git clone https://github.com/open-cluster-management/policy-collection.git
408 |
409 | $ cd policy-collection/deploy/
410 | ```
411 |
412 | 4.a. If you are using the kubeadmin user, create an identity provider by running the next commands (It is not possible to create policies via GitOps using the kubeadmin user). The identity provider will create the `workshop-admin` user -
413 |
414 | ```
415 | $ htpasswd -c -B -b htpasswd workshop-admin redhat
416 |
417 | $ oc create secret generic localusers --from-file htpasswd=htpasswd -n openshift-config
418 |
419 | $ oc adm policy add-cluster-role-to-user cluster-admin workshop-admin
420 |
421 | $ oc get -o yaml oauth cluster > oauth.yaml
422 | ```
423 |
424 | 4.b. Edit the `oauth.yaml` file. The result should look like -
425 |
426 | ```
427 | apiVersion: config.openshift.io/v1
428 | kind: OAuth
429 | ...output omitted...
430 | spec:
431 | identityProviders:
432 | - htpasswd:
433 | fileData:
434 | name: localusers
435 | mappingMethod: claim
436 | name: local-users
437 | type: HTPasswd
438 | ```
439 |
440 | 4.c. Replace the cluster's identity provider by running the next command -
441 |
442 | ```
443 | $ oc replace -f oauth.yaml
444 | ```
445 |
446 | 4.d. Login with the created user -
447 |
448 | ```
449 | $ oc login -u workshop-admin -p redhat
450 | ```
451 |
452 | 5. Run the next command to allow your username deploy policies via Git (If you're not using the `workshop-admin` user to run the command, make sure to edit the command in order to associate your user with the `subscription-admin` ClusterRole. Make sure to run the command even if you are using an administrative user!) -
453 |
454 | ```
455 | $ oc patch clusterrolebinding.rbac open-cluster-management:subscription-admin -p '{"subjects": [{"apiGroup":"rbac.authorization.k8s.io", "kind":"User", "name":"workshop-admin"}]}'
456 | ```
457 |
458 | 6. You can now deploy the policies from your forked repository to Advanced Cluster Management.
459 |
460 | ```
461 | $ ./deploy.sh --url https://github.com//rhacm-workshop.git --branch master --path 05.Governance-Risk-Compliance/exercise/exercise-policies --namespace rhacm-policies
462 | ```
463 |
464 | 7. Make sure that the policies are deployed in the **Governance** -> **Policies** tab in the Advanced Cluster Management for Kubernetes console.
465 |
466 | 
467 |
468 |
469 | 8. Edit the LimitRange policy in [https://github.com/<your-username>/rhacm-workshop/blob/master/05.Governance-Risk-Compliance/exercise/exercise-policies/limitrange-policy.yaml](https://github.com/michaelkotelnikov/rhacm-workshop/blob/master/05.Governance-Risk-Compliance/exercise/exercise-policies/limitrange-policy.yaml). Change the default container limit from 512Mi to 1024Mi.
470 |
471 | 9. Make sure that you commit, and push the change to your fork.
472 |
473 | 10. Log into managed cluster. Make sure that the change in GitHub was applied to the LimitRange resource.
474 |
475 | ```
476 | $ oc get limitrange webserver-limit-range -o yaml -n webserver-acm
477 | ...
478 | limits:
479 | - default:
480 | memory: 1Gi
481 | ...
482 | ```
483 |
484 | ## Templating Policies
485 |
486 | In this section you will use RHACM's templating mechanism for governance policies. In this scenario, you will create an RHACM application. The application deploys a mariadb database and a Prometheus exporter ([mysqld-exporter](https://github.com/prometheus/mysqld_exporter)) that connects to the database and exports metrics.
487 |
488 | The mysqld-exporter requires mariadb's connection information in order to connect to the database and export the metrics. Since secrets like _database passwords_ can be automatically generated in production environments, it might be required to use a dynamic template that passes such information to the exporter's configuration.
489 |
490 | In this scenario, you will pass two templated variables to the mysqld-exporter deployment using a dedicated ConfigMap resource. The variables are merged into a single *connection string* that the exporter uses to connect to the mariadb database.
491 |
492 | - _mariadb Service endpoint_ - The ConfigMap will populate the mariadb Service resource ClusterIP dynamically. The service endpoint might be different between managed clusters, using a template in this scenario can help the stability of the system. The `lookup` function is used to identify the service's ClusterIP - `{{ (lookup "v1" "Service" "mariadb-metrics" "mariadb").spec.clusterIP }}`.
493 | - _mariadb Root password_ - The ConfigMap will provide the connection password dynamically. The password can be different for database instances in multi cluster environments. Using a template in this scenario can solve inconsistencies between clusters. The `fromSecret` function is used to pull the password from mariadb's secret - `{{ fromSecret "mariadb-metrics" "mariadb" "MYSQL_ROOT_PASSWORD"}}`
494 |
495 | To further understand the structure of the application, go over the [application resources](exercise/exercise-application). All of the application resources are present in this directory besides the ConfigMap resource which is created using a templated policy.
496 |
497 | The next [templated policy](exercise/exercise-templates/metrics-configmap.yaml) is used to create the ConfigMap resource that the exporter uses as a connection string -
498 |
499 | ```
500 | kind: ConfigMap
501 | apiVersion: v1
502 | metadata:
503 | name: metrics-connection-string
504 | namespace: mariadb-metrics
505 | data:
506 | connection_string: 'root:{{ fromSecret "mariadb-metrics" "mariadb" "MYSQL_ROOT_PASSWORD"}}@({{ (lookup "v1" "Service" "mariadb-metrics" "mariadb").spec.clusterIP }}:3306)/'
507 | ```
508 |
509 | Deploy the templated policy by running the next command on the hub cluster -
510 |
511 | ```
512 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/05.Governance-Risk-Compliance/exercise/exercise-templates/metrics-configmap.yaml
513 | ```
514 |
515 | The policy will appear at the Governance dashboard at a non-compliant state. The policy depends on the `mariadb` Secret resource and the `mariadb` Service resource. Since you have not created them yet, the policy is not able to create the desired ConfigMap resource.
516 |
517 | Deploy the mariadb-metrics application in order to create the mariadb and exporter instances. Deploy the application by running the next command -
518 |
519 | ```
520 | $ oc apply -f https://raw.githubusercontent.com/michaelkotelnikov/rhacm-workshop/master/05.Governance-Risk-Compliance/exercise/exercise-application/rhacm-resources/application.yaml
521 | ```
522 |
523 | Wait until the application is available. After the application is available, make sure that the policy you have deployed is compliant in the Governance dashboard. Make sure that the template worked by running the next command on the managed cluster.
524 |
525 | ```
526 | $ oc get configmap metrics-connection-string -o yaml -n mariadb-metrics
527 | apiVersion: v1
528 | data:
529 | connection_string: root:cmVkaGF0@(172.30.14.60:3306)/
530 | kind: ConfigMap
531 | metadata:
532 | name: metrics-connection-string
533 | namespace: mariadb-metrics
534 | ```
535 |
536 | Navigate to the URL exported by the `Route` resource in the `mariadb-metrics` namespace. The `Route` exposes the mariadb metrics from the exporter instance.
537 |
538 | ```
539 | $ oc get route -n mariadb-metrics
540 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
541 | mysqld-exporter mysqld-exporter-mariadb-metrics.apps.cluster-6f0a.6f0a.sandbox664.opentlc.com mysqld-exporter 9104-tcp edge None
542 | ```
543 |
544 | Mariadb metrics are presented by running the next command -
545 |
546 | ```
547 | $ curl https:///metrics -k
548 | ...
549 | # HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
550 | # TYPE go_memstats_heap_inuse_bytes gauge
551 | go_memstats_heap_inuse_bytes 3.80928e+06
552 | # HELP go_memstats_heap_objects Number of allocated objects.
553 | # TYPE go_memstats_heap_objects gauge
554 | go_memstats_heap_objects 7487
555 | # HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
556 | # TYPE go_memstats_heap_released_bytes gauge
557 | go_memstats_heap_released_bytes 6.270976e+07
558 | # HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
559 | # TYPE go_memstats_heap_sys_bytes gauge
560 | go_memstats_heap_sys_bytes 6.668288e+07
561 | # HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
562 | # TYPE go_memstats_last_gc_time_seconds gauge
563 | go_memstats_last_gc_time_seconds 0
564 | # HELP go_memstats_lookups_total Total number of pointer lookups.
565 | # TYPE go_memstats_lookups_total counter
566 | go_memstats_lookups_total 0
567 | # HELP go_memstats_mallocs_total Total number of mallocs.
568 | # TYPE go_memstats_mallocs_total counter
569 | go_memstats_mallocs_total 8093
570 | # HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
571 | # TYPE go_memstats_mcache_inuse_bytes gauge
572 | go_memstats_mcache_inuse_bytes 19200
573 | # HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
574 | # TYPE go_memstats_mcache_sys_bytes gauge
575 | go_memstats_mcache_sys_bytes 32768
576 | # HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
577 | # TYPE go_memstats_mspan_inuse_bytes gauge
578 | go_memstats_mspan_inuse_bytes 63376
579 | # HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
580 | # TYPE go_memstats_mspan_sys_bytes gauge
581 | go_memstats_mspan_sys_bytes 65536
582 | # HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
583 | # TYPE go_memstats_next_gc_bytes gauge
584 | go_memstats_next_gc_bytes 4.473924e+06
585 | ...
586 | ```
587 |
--------------------------------------------------------------------------------