├── 1.md ├── 2.md ├── 3.md ├── 4.md ├── 5.md ├── 6.md ├── 7.md ├── 8.md ├── 9.md ├── README.md ├── assets ├── demo-arch.png ├── federated-mongo-arch.png ├── gitops.png ├── lab-4-topology.png ├── lab-5-topology.png ├── lab-6-topology.png ├── lab-7-topology.png ├── lab-env-overview.png ├── ocp-router-1.gif ├── tip-icon.png └── warning-icon.png ├── haproxy-yaml ├── haproxy ├── haproxy-clusterip-service.yaml ├── haproxy-deployment.yaml ├── haproxy.tmpl └── old │ ├── haproxy.backup │ ├── haproxy.denemeler │ ├── haproxy.last │ ├── haproxy.yedek │ ├── haproxy2 │ ├── hcacert.pem │ ├── heisenbug.pem │ ├── mongo-secret.yaml │ ├── pemdeneme.pem │ └── sslhaproxy-deployment.yaml ├── lab-4-acm ├── 01_namespace.yaml ├── 02_channel.yaml ├── 03_application.yaml ├── 04_placementrule_cluster1only.yaml └── 05_subscription.yaml ├── lab-4-assets ├── deployment.yaml ├── namespace.yaml └── service.yaml ├── lab-5-acm ├── 01_namespace.yaml ├── 02_channel.yaml ├── 03_application_webapp.yaml ├── 04_placement_cluster1.yaml ├── 04_placement_cluster2.yaml ├── 04_placement_cluster3.yaml ├── 05_subscription_cluster1.yaml ├── 05_subscription_cluster2.yaml └── 05_subscription_cluster3.yaml ├── lab-5-assets ├── base │ ├── configmap.yaml │ ├── deployment.yaml │ ├── kustomization.yaml │ ├── namespace.yaml │ ├── route.yaml │ ├── route.yaml.backup │ └── service.yaml └── overlays │ ├── cluster1 │ ├── configmap.yaml │ ├── deployment.yaml │ ├── kustomization.yaml │ ├── route.yaml │ └── route.yaml.backup │ ├── cluster2 │ ├── configmap.yaml │ ├── deployment.yaml │ ├── kustomization.yaml │ ├── route.yaml │ └── route.yaml.backup │ └── cluster3 │ ├── configmap.yaml │ ├── deployment.yaml │ ├── kustomization.yaml │ ├── route.yaml │ └── route.yaml.backup ├── lab-6-acm ├── 01_namespace.yaml ├── 02_channel.yaml ├── 03_application_mongo.yaml ├── 04_placement_cluster1.yaml ├── 04_placement_cluster2.yaml ├── 04_placement_cluster3.yaml ├── 05_subscription_cluster1.yaml ├── 05_subscription_cluster2.yaml └── 05_subscription_cluster3.yaml ├── lab-6-assets ├── .gitignore ├── base │ ├── kustomization.yaml │ ├── mongo-pvc.yaml │ ├── mongo-route.yaml │ ├── mongo-rs-deployment.yaml │ ├── mongo-rs-deployment.yaml.backup │ ├── mongo-secret.yaml │ ├── mongo-secret.yaml.backup │ ├── mongo-service.yaml │ └── namespace.yaml ├── ca-key.pem ├── ca.csr ├── ca.pem ├── exportvariables ├── mongodb.csr └── overlays │ ├── cluster1 │ ├── kustomization.yaml │ ├── mongo-route.yaml │ └── mongo-route.yaml.backup │ ├── cluster2 │ ├── kustomization.yaml │ ├── mongo-route.yaml │ └── mongo-route.yaml.backup │ └── cluster3 │ ├── kustomization.yaml │ ├── mongo-route.yaml │ └── mongo-route.yaml.backup ├── lab-7-acm ├── 01_namespace.yaml ├── 02_channel.yaml ├── 03_application_pacman.yaml ├── 04_placement_cluster1.yaml ├── 04_placement_cluster2.yaml ├── 04_placement_cluster3.yaml ├── 05_subscription_cluster1.yaml ├── 05_subscription_cluster2.yaml └── 05_subscription_cluster3.yaml └── lab-7-assets ├── base ├── kustomization.yaml ├── pacman-cluster-role-binding.yaml ├── pacman-cluster-role.yaml ├── pacman-deployment.yaml ├── pacman-deployment.yaml.backup ├── pacman-namespace.yaml ├── pacman-route.yaml ├── pacman-route.yaml.backup ├── pacman-secret.yaml ├── pacman-service-account.yaml └── pacman-service.yaml └── overlays ├── cluster1 ├── kustomization.yaml └── pacman-deployment.yaml ├── cluster2 ├── kustomization.yaml └── pacman-deployment.yaml └── cluster3 ├── kustomization.yaml └── pacman-deployment.yaml /1.md: -------------------------------------------------------------------------------- 1 | # Introduction and Prerequisites 2 | In this lab we assumed that you want to try RH ACM gitops functions, you have one hub cluster and three managed clusters. 3 | Managed clusters are added to hub cluster. 4 | 5 | 6 | At this point you should be logged in to the BastionVM and you should have the information for all three environment defined by the GuidGrabber. 7 | 8 | During this lab we will utilize the RH ACM to define the clusters and repositories in which the RHACM deployment will manage. 9 | 10 | We will use [cfssl tooling](https://cfssl.org/) when dealing with TLS certificates. 11 | 12 | ## Verifications 13 | 14 | Verify that RHACM is working: 15 | 16 | ~~~sh 17 | oc config use-context hubcluster 18 | oc get multiclusterhub -A 19 | NAMESPACE NAME AGE 20 | open-cluster-management multiclusterhub 4d19h 21 | ~~~ 22 | 23 | Verify that [cfssl](https://github.com/cloudflare/cfssl/blob/master/README.md#using-the-command-line-tool) is working: 24 | 25 | ~~~sh 26 | cfssl version 27 | 28 | Version: 1.2.0 29 | Revision: dev 30 | Runtime: go1.6 31 | ~~~ 32 | 33 | Verify that [cfssljson](https://github.com/cloudflare/cfssl/blob/master/README.md#using-the-command-line-tool) is working: 34 | 35 | ~~~sh 36 | cfssljson -help 37 | 38 | Usage of cfssljson: 39 | 40 | ~~~ 41 | 42 | ***Please note:*** Ensure to clone this repo under your Github account. You'll need to edit the lab resources and make them availaible on your Github repo during the lab. 43 | 44 | [Home](./README.md) 45 | -------------------------------------------------------------------------------- /2.md: -------------------------------------------------------------------------------- 1 | 2 | ## Configure OpenShift client context for cluster admin access 3 | 4 | You should have three OCP managed cluster and on OCP hub cluster 5 | 6 | 7 | 8 | ~~~sh 9 | # Login into hub cluster 10 | oc login -u admin -p XXXX --insecure-skip-tls-verify https://api.YOURCLUSTER1.DOMAIN:6443 11 | # Set the name of the context 12 | oc config rename-context $(oc config current-context) hubcluster 13 | # Login into 1st cluster (A environment) 14 | oc login -u admin -p XXXX --insecure-skip-tls-verify https://api.YOURCLUSTER1.DOMAIN:6443 15 | # Set the name of the context 16 | oc config rename-context $(oc config current-context) cluster1 17 | # Login into 2nd cluster (B environment) 18 | oc login -u admin -p XXXX --insecure-skip-tls-verify https://api.YOURCLUSTER2.DOMAIN:6443 19 | # Set the name of the context 20 | oc config rename-context $(oc config current-context) cluster2 21 | # Login into 3rd cluster (C environment) 22 | oc login -u admin -p XXXX --insecure-skip-tls-verify https://api.YOURCLUSTER3.DOMAIN:6443 23 | # Set the name of the context 24 | oc config rename-context $(oc config current-context) cluster3 25 | ~~~ 26 | 27 | 28 | Confirm functionality of the new configured contexts. You will see that as you switch contexts and list nodes, that you are indeed switching to different clusters. Close out this validation by switching back to the `cluster1` context. 29 | 30 | ~~~sh 31 | # Switch to hub 32 | oc config use-context hubcluster 33 | # Switch to cluster1 34 | oc config use-context cluster1 35 | # List the nodes in cluster1 36 | oc get nodes 37 | # Switch to cluster2 38 | oc config use-context cluster2 39 | # List the nodes in cluster2 40 | oc get nodes 41 | # Switch to cluster3 42 | oc config use-context cluster3 43 | # List the nodes in cluster3 44 | oc get nodes 45 | # Switch back to cluster1 46 | oc config use-context cluster1 47 | ~~~ 48 | 49 | After this our current client context is `admin` in `cluster1`. 50 | 51 | ~~~sh 52 | # List the context currently being used and the user 53 | oc config current-context && oc whoami 54 | 55 | cluster1 56 | admin 57 | ~~~ 58 | 59 | 60 | -------------------------------------------------------------------------------- /3.md: -------------------------------------------------------------------------------- 1 | # Git Ops Introduction 2 | RHACM also having GitOps capabilities for deploying and managing Kubernetes services. The term GitOps is applied to applications or tools that are used to the deploy YAML files that are stored within a git repository. This repository can be private repository on GitHub or GitLab, on premise, or within a public git repository. GitOps tools can be triggered to deploy or update Kubernetes objects either manually, through a webhook, or automatically. Some GitOps tools even offer the ability to prune or remove resources automatically that are not currently defined wtihin the git repository. 3 | 4 | * Configuration is “pulled” into an environment similiar to running a `git pull` on your system 5 | * Some tools require a component to be installed on all clusters. These tools will pull the objects into the cluster. 6 | * Some tools do not require any remote resources to be running. Permissions are created on the remote clusters and configuration is pushed to the cluster. 7 | * Templating available with [Helm](https://helm.sh/) or [Kustomize](https://kustomize.io/) 8 | * Quite simply “kubectl apply -f $path” runs over and over 9 | * No Kubernetes object type creation limitations 10 | 11 | ![GitOps](assets/gitops.png) 12 | 13 | ## RHACM 14 | 15 | ## Installing Red Hat Advanced Cluster Management for Kubernetes 16 | 17 | RHACM can be installed through an Operator via the Red Hat Operators Catalog. 18 | 19 | You can follow the public documentation available here: https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/1.0/html/install/ 20 | 21 | ## Accessing the Web UI 22 | 23 | You can then get the WebUI url for your just installed RHACM with this command: 24 | 25 | ~~~sh 26 | oc --context hubcluster -n open-cluster-management get route multicloud-console -o jsonpath="{.status.ingress[*].host}{\"\n\"}" 27 | ~~~ 28 | 29 | ## Adding Clusters 30 | 31 | After accessing RHACM WebUI you can then import your clusters following the official documentation: https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/1.0/html/manage_cluster/index 32 | 33 | ## List the available clusters 34 | 35 | After completing the import you should be able to see the just imported clusters: 36 | 37 | ~~~sh 38 | oc get managedclusters -A --show-labels 39 | as4test as4test api.as4test.lp.int:6443 Ready 1d 40 | heisenbug heisenbug api.heisenbug.lplab.online:6443 Ready 1d 41 | jelly jelly api.jelly.lplab.online:6443 Ready 1d 42 | ~~~ 43 | 44 | ## Labeling the imported (or created) clusters 45 | 46 | For achieving the right placement of the next labs, you'll need to set a label for identifying your just imported (or created) clusters. 47 | 48 | ***Please Note: pay attention to assign the right label to the respective cluster.*** 49 | 50 | ~~~sh 51 | oc label managedcluster -n YOUR_ACM_CLUSTER1_NAME YOUR_ACM_CLUSTER1_NAME clusterid=cluster1 52 | oc label managedcluster -n YOUR_ACM_CLUSTER2_NAME YOUR_ACM_CLUSTER2_NAME clusterid=cluster2 53 | oc label managedcluster -n YOUR_ACM_CLUSTER3_NAME YOUR_ACM_CLUSTER3_NAME clusterid=cluster3 54 | ~~~ 55 | 56 | You can check the correct labeling by executing this command: 57 | 58 | ~~~sh 59 | oc get managedclusters -A --show-labels 60 | ~~~ 61 | 62 | 63 | [Home](./README.md) 64 | -------------------------------------------------------------------------------- /4.md: -------------------------------------------------------------------------------- 1 | # Deploying and Managing a Project with GitOps 2 | 3 | Now that we have the clusters defined, let’s deploy a project which includes a service, namespace, and deployment. 4 | 5 | This lab will use cluster1 only. The first step is to ensure that no resources currently exist in the *simple-app* namespace. 6 | ~~~sh 7 | # Use the `oc` command to list deployments 8 | oc --context cluster1 -n simple-app get deployment 9 | 10 | No resources found. 11 | ~~~ 12 | 13 | ## Deploy the application 14 | 15 | The simple application we are using is an Apache web server serving a default index page. 16 | 17 | The simple application includes the following resources: 18 | 19 | - A [Namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) for the deployment and service. 20 | - A [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) of Apache web server. 21 | - A [Service](https://kubernetes.io/docs/concepts/services-networking/service/). 22 | 23 | The [lab-4-assets](./lab-4-assets) contains definitions to deploy these resources. 24 | For example: the [simple Apache deployment template](./lab-4-assets/deployment.yaml) 25 | 26 | 27 | 1- Change the directory lab-4-acm 28 | ~~~sh 29 | cd rhacmgitopslab/lab-4-acm/ 30 | ~~~ 31 | 32 | 2- Ensure to load the "hubcluster" context 33 | ~~~sh 34 | oc config use-context hubcluster 35 | ~~~ 36 | 37 | 3- Create namespace 38 | ~~~sh 39 | oc create -f 01_namespace.yaml 40 | ~~~ 41 | 42 | 4- Create channel 43 | ~~~sh 44 | oc create -f 02_channel.yaml 45 | ~~~ 46 | 47 | 5- Create application 48 | ~~~sh 49 | oc create -f 03_application.yaml 50 | ~~~ 51 | 52 | 6- Create placementrule for putting application just on Openshift cluster1 53 | ~~~sh 54 | oc create -f 04_placementrule_cluster1only.yaml 55 | ~~~ 56 | 57 | 7- Create subscription 58 | ~~~sh 59 | oc create -f 05_subscription.yaml 60 | ~~~ 61 | 62 | 63 | 64 | 65 | ## Verify that the application is running 66 | 67 | Verify that the various resources have been deployed. 68 | 69 | ~~~sh 70 | # The command below will display objects in the simple-app namespace 71 | oc --context cluster1 -n simple-app get deployments,services,pods 72 | ~~~ 73 | 74 | Expose a route to allow for external ingress and verify the application can be queried. 75 | 76 | ~~~sh 77 | # Expose the route for the httpd service 78 | oc --context=cluster1 -n simple-app expose service httpd 79 | # Get the Route hostname 80 | url="http://$(oc --context=cluster1 -n simple-app get route httpd -o jsonpath='{.spec.host}')" 81 | # We will wait 5 seconds to allow for proper propagation 82 | sleep 5 83 | # Access the route 84 | curl -s $url | grep "This page" 85 | ~~~ 86 | 87 | ## State Recovery 88 | 89 | We are going to delete the deployment and watch RHACM will notice that the status is *unhealthy* and redeploy the deployment. 90 | 91 | ~~~sh 92 | # Use the `oc` command to delete the httpd deployment object 93 | oc --context cluster1 -n simple-app delete deployment httpd 94 | ~~~ 95 | 96 | As you can see the deployment is missing. 97 | ~~~sh 98 | # Use the `oc` command to list deployments in the simple-app namespace 99 | oc --context cluster1 -n simple-app get deployments 100 | ~~~ 101 | 102 | The deployment is missing so let's check to ensure RHACM deploying 103 | 104 | ~~~sh 105 | # Use this `oc` command to check application status 106 | oc describe deployable -n simple-app httpd-deployable 107 | 108 | ...... 109 | ..... 110 | Statuses: 111 | /: 112 | Packages: 113 | Federation - Deployment - Httpd: 114 | Last Update Time: 2020-06-03T08:18:48Z 115 | Phase: Subscribed 116 | Resource Status: 117 | Available Replicas: 1 118 | Conditions: 119 | Last Transition Time: 2020-06-03T08:17:15Z 120 | Last Update Time: 2020-06-03T08:17:15Z 121 | Message: Deployment has minimum availability. 122 | Reason: MinimumReplicasAvailable 123 | Status: True 124 | Type: Available 125 | Last Transition Time: 2020-06-03T08:16:39Z 126 | Last Update Time: 2020-06-03T08:17:15Z 127 | Message: ReplicaSet "httpd-54b9dfb679" has successfully progressed. 128 | Reason: NewReplicaSetAvailable 129 | Status: True 130 | Type: Progressing 131 | Observed Generation: 1 132 | Ready Replicas: 1 133 | Replicas: 1 134 | Updated Replicas: 1 135 | 136 | ~~~ 137 | 138 | Verify the deployment is once again present after the synchronization. 139 | 140 | ***Please note: you may have to wait few seconds before the resources are correctly re-created.*** 141 | 142 | ~~~sh 143 | # Use the `oc` command to list deployments in the simple-app namespace 144 | oc --context cluster1 -n simple-app get deployments 145 | ~~~ 146 | 147 | Connecting to RHACM WebUI you should see a topology like this: 148 | 149 | ![Lab 4 Topology](./assets/lab-4-topology.png) 150 | 151 | [Home](./README.md) 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | -------------------------------------------------------------------------------- /5.md: -------------------------------------------------------------------------------- 1 | # Customizing deployments 2 | Certain situations exist especially in multicluster scenarios and in development workflows where an application requires specific values to be unique for that environment. 3 | *Kustomize* was created to handle that specific situation and as of Kubernetes 1.14 the functionality is integrated into `kubectl`. 4 | 5 | 6 | ## Creating a Kustomized App 7 | This lab is going to walk through the details of deploying a project and resources that use the same YAML files but are unique per cluster based on values provided by *Kustomize*. 8 | 9 | The [lab-5-assets](./lab-5-assets/base) contains definitions to deploy these resources. 10 | 11 | The assets will be loaded into RHACM to be deployed and managed but each cluster will have unique values used by the configmap, deployment, and service. 12 | 13 | 14 | ## Kustomize Overlays 15 | *Kustomize* offers the ability to create [overlays](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#overlay). These are directories with subdirectories below that contain specific changes. 16 | 17 | We will use *Kustomize* to deploy the same resources in our 3 clusters but the configmap will be different per cluster. We will also specify different amounts of `replicas` to be deployed per cluster. 18 | 19 | First, we need to modify the Route hostnames that will be used by our clusters. 20 | 21 | Change directory to `lab-5-assets` to see the assets to be created. 22 | ~~~sh 23 | cd rhacmgitopslab/lab-5-assets 24 | ~~~ 25 | 26 | Modify the route hostname for the application 27 | ~~~sh 28 | cp overlays/cluster1/route.yaml.backup overlays/cluster1/route.yaml 29 | cp overlays/cluster2/route.yaml.backup overlays/cluster2/route.yaml 30 | cp overlays/cluster3/route.yaml.backup overlays/cluster3/route.yaml 31 | 32 | # Define the variable of `ROUTE_CLUSTER1` 33 | ROUTE_CLUSTER1=web-app.$(oc --context=cluster1 get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 34 | # Define the variable of `ROUTE_CLUSTER2` 35 | ROUTE_CLUSTER2=web-app.$(oc --context=cluster2 get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 36 | # Define the variable of `ROUTE_CLUSTER3` 37 | ROUTE_CLUSTER3=web-app.$(oc --context=cluster3 get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 38 | # Replace the value of changeme with `ROUTE_CLUSTER1` in the file `route.yaml` 39 | sed -i "s/changeme/${ROUTE_CLUSTER1}/" overlays/cluster1/route.yaml 40 | # Replace the value of changeme with `ROUTE_CLUSTER2` in the file `route.yaml` 41 | sed -i "s/changeme/${ROUTE_CLUSTER2}/" overlays/cluster2/route.yaml 42 | # Replace the value of changeme with `ROUTE_CLUSTER3` in the file `route.yaml` 43 | sed -i "s/changeme/${ROUTE_CLUSTER3}/" overlays/cluster3/route.yaml 44 | ~~~ 45 | 46 | Before committing our changes we need to update also the main Github repository in the file describe the "Channel" resource: 47 | ~~~sh 48 | sed -i 's/ansonmez/YOUR_GITHUB_USERNAME/g' rhacmgitopslab/lab-5-acm/02_channel.yaml 49 | ~~~ 50 | 51 | We are going to commit our changes, because we'll need them for our GitOps deployment model. 52 | 53 | ~~~sh 54 | # Stage your changes to be sent to the git repository 55 | git commit -am 'Route Hostname configured for all three clusters' 56 | # Push your commits to the git repository 57 | git push origin master 58 | ~~~ 59 | 60 | Within the directory [overlay-assets](./lab-5-assets/overlay-assets/overlays) you will find three directories, one per cluster. These directories contain a unique configmap, deployment and route file with the specific modifications for each cluster. You will use these files to define any customization. 61 | 62 | For example, in cluster1 we will have 1 replica while cluster 3 will have 3 replicas. Below you will see the customizations for cluster3. 63 | 64 | The ConfigMap will tell you which cluster the application is running in. 65 | 66 | ~~~yaml 67 | apiVersion: v1 68 | kind: ConfigMap 69 | metadata: 70 | name: the-map 71 | data: 72 | altGreeting: "The app is running on cluster3" 73 | ~~~ 74 | 75 | The deployment file will override the amount of replicas in the original `deployment.yaml` with the value defined within the overlay directory. 76 | 77 | ~~~yaml 78 | apiVersion: apps/v1 79 | kind: Deployment 80 | metadata: 81 | name: the-deployment 82 | spec: 83 | replicas: 3 84 | ~~~ 85 | 86 | If your organization also wanted to do canary deployments. The image could be changed within the *kustomize.yaml* file, an example can be found [here](https://github.com/kubernetes-sigs/kustomize/tree/master/examples/transformerconfigs#images-transformer) 87 | 88 | 89 | 1- Change the directory lab-5-acm 90 | ~~~sh 91 | cd rhacmgitopslab/lab-5-acm/ 92 | ~~~ 93 | 94 | 2- Ensure to load the "hubcluster" context 95 | ~~~sh 96 | oc config use-context hubcluster 97 | ~~~ 98 | 99 | 3- Create namespace 100 | ~~~sh 101 | oc create -f 01_namespace.yaml 102 | ~~~ 103 | 104 | 4- Create channel 105 | ~~~sh 106 | oc create -f 02_channel.yaml 107 | ~~~ 108 | 109 | 5- Create application 110 | ~~~sh 111 | oc create -f 03_application_webapp.yaml 112 | ~~~ 113 | 114 | 6- Create placementrule for each cluster 115 | Be sure your clusters are labeled in RHACM. clusterid: cluster1, clusterid: cluster2, clusterid: cluster3 116 | ~~~sh 117 | oc describe clusters -A 118 | ..... 119 | ... 120 | Name: jelly 121 | Namespace: jelly 122 | Labels: city=ankara 123 | cloud=Other 124 | clusterid=cluster2 125 | name=jelly 126 | vendor=OpenShift 127 | 128 | ~~~ 129 | Create placment rules 130 | ~~~sh 131 | oc create -f 04_placement_cluster1.yaml 132 | oc create -f 04_placement_cluster2.yaml 133 | oc create -f 04_placement_cluster3.yaml 134 | ~~~ 135 | 136 | 7- Create subscription 137 | ~~~sh 138 | oc create -f 05_subscription_cluster1.yaml 139 | oc create -f 05_subscription_cluster2.yaml 140 | oc create -f 05_subscription_cluster3.yaml 141 | ~~~ 142 | 143 | 144 | Verify the deployments have been created on all the clusters. 145 | 146 | ~~~sh 147 | # The for loop below will show the status of the deployment on the three clusters 148 | for cluster in cluster1 cluster2 cluster3; do 149 | echo ------------ ${cluster} deployments ------------ 150 | oc --context ${cluster} -n web-app get deployments 151 | done 152 | ~~~ 153 | 154 | ~~~sh 155 | # The for loop below will get the `OpenShift Route` and then curl the application 156 | for cluster in cluster1 cluster2 cluster3; do 157 | echo ------------ ${cluster} app ------------ 158 | url=$(oc --context ${cluster} -n web-app get route the-route -o jsonpath='{.spec.host}') 159 | curl http://$url 160 | done 161 | ~~~ 162 | 163 | Connecting to RHACM WebUI you should see a topology like this: 164 | 165 | ![Lab 5 Topology](./assets/lab-5-topology.png) 166 | 167 | [Home](./README.md) 168 | -------------------------------------------------------------------------------- /6.md: -------------------------------------------------------------------------------- 1 | # Deploying MongoDB 2 | 3 | The files within the [directory](./lab-6-assets) are used to deploy 4 | MongoDB. We will define all 3 clusters that are used within RHACM. 5 | 6 | ## Architecture 7 | 8 | Shown below is the architecture definition for our MongoDB Cluster. 9 | 10 | ![MongoDB Cluster Architecture](./assets/federated-mongo-arch.png) 11 | 12 | * There is a MongoDB pod running on each OpenShift Cluster, each pod has its own storage (provisioned by a StorageClass) 13 | * The MongoDB pods are configured as a MongoDB ReplicaSet and communicate using TLS 14 | * The OCP routes are configured as Passthrough, we need the connection to remain plain TLS (no HTTP headers involved) 15 | * The MongoDB pods interact with each other using the OCP Routes (the nodes where these pods are running must be able to connect to the OCP routes) 16 | * The apps consuming the MongoDB ReplicaSet have to use the proper MongoDB connection string URI 17 | 18 | **How is the MongoDB ReplicaSet configured?** 19 | 20 | Each OpenShift cluster has a MongoDB _pod_ running. There is a _service_ which routes the traffic received on port 27017/TCP to the MongoDB pod port 27017/TCP. 21 | 22 | There is an _OpenShift Route_ created on each cluster, the _route_ has been configured to be passthrough (the HAProxy Router won't add HTTP headers to the connections). Each _route_ listens on port 443/TCP and reverse proxies traffic received to the mongo _service_ 27017/TCP. 23 | 24 | The MongoDB pods have been configured to use TLS, so all connections will be made using TLS (a TLS certificate with routes and services DNS names configured as SANs is used by MongoDB pods). 25 | 26 | Once the three pods are up and running, the MongoDB ReplicaSet is configured using the route hostnames as MongoDB endpoints. e.g: 27 | 28 | Primary Replica: mongo-cluster1.apps.cluster1.example.com:443
29 | Secondary Replicas: mongo-cluster2.apps.cluster2.example.com:443, mongo-cluster3.apps.cluster3.example.com:443 30 | 31 | In case a MongoDB pod fails / is stopped, the ReplicaSet will reconfigure itself, and once the failed / stopped pod is back, the ReplicaSet will include it again. (MongoDB Quorum Algorithm will decide if the ReplicaSet is RO or RW based on the quorum). 32 | 33 | **NOTE:** This configuration doesn't require the different cluster networks to be aware of each other. We could get the same functionality using: 34 | 35 | A) LoadBalancer Services (pods will use the service ip instead of the OpenShift Route)
36 | B) Site2Site VPN like solution (pods will connect to ClusterIP / NodePort services directly) 37 | 38 | ## Prerequisites 39 | 40 | ### Creating Certificates 41 | This demonstration uses MongoDB with TLS enabled. The example below will create a 42 | generic CA, key, and certificate. 43 | 44 | Follow the steps below to create the required files in the `mongo-yaml` directory: 45 | 46 | 1. Change directory to `lab-6-assets` 47 | 48 | ~~~sh 49 | cd rhacmgitopslab/lab-6-assets 50 | ~~~ 51 | 52 | 2. Create the file `ca-config.json`: 53 | 54 | ~~~sh 55 | cat > ca-config.json < ca-csr.json < mongodb-csr.json < mongo.pem 152 | ~~~ 153 | 154 | ### Update MongoDB yaml files 155 | 156 | The lab content provides the required files for deploying the MongoDB cluster members in YAML format. We will modify specific values and then commit them to our git repo and use Argo CD to deploy them. 157 | 158 | Before deploying MongoDB the yaml files need to be updated to define the certificates that 159 | were created and routing endpoints that will be used. 160 | 161 | 1. Configure `MongoDB's PEM` 162 | 163 | ~~~sh 164 | cp base/mongo-secret.yaml.backup base/mongo-secret.yaml 165 | # Place the value of the `mongodb.pem` into the `mongo-secret.yaml` 166 | sed -i "s/mongodb.pem: .*$/mongodb.pem: $(openssl base64 -A < mongo.pem)/" base/mongo-secret.yaml 167 | # Place the value of the `ca.pem` into the `mongo-secret.yaml` 168 | sed -i "s/ca.pem: .*$/ca.pem: $(openssl base64 -A < ca.pem)/" base/mongo-secret.yaml 169 | ~~~ 170 | 2. Configure `MongoDB's Endpoints` 171 | 172 | ~~~sh 173 | # Place the value of `ROUTE_CLUSTER1` in the `mongo-rs-deployment.yaml` file 174 | cp base/mongo-rs-deployment.yaml.backup base/mongo-rs-deployment.yaml 175 | sed -i "s/primarynodehere/${ROUTE_CLUSTER1}:443/" base/mongo-rs-deployment.yaml 176 | # Place the value of `ROUTE_CLUSTER1`, `ROUTE_CLUSTER2, and `ROUTE_CLUSTER3` in the `mongo-rs-deployment.yaml` file 177 | sed -i "s/replicamembershere/${ROUTE_CLUSTER1}:443,${ROUTE_CLUSTER2}:443,${ROUTE_CLUSTER3}:443/" base/mongo-rs-deployment.yaml 178 | ~~~ 179 | 180 | 3. Configure `MongoDB OpenShift Route Names` 181 | 182 | ~~~sh 183 | cp overlays/cluster1/mongo-route.yaml.backup overlays/cluster1/mongo-route.yaml 184 | cp overlays/cluster2/mongo-route.yaml.backup overlays/cluster2/mongo-route.yaml 185 | cp overlays/cluster3/mongo-route.yaml.backup overlays/cluster3/mongo-route.yaml 186 | # Replace the value of changme with `ROUTE_CLUSTER1` in the file `mongo-route.yaml` 187 | sed -i "s/mongocluster1route/${ROUTE_CLUSTER1}/" overlays/cluster1/mongo-route.yaml 188 | # Replace the value of changme with `ROUTE_CLUSTER2` in the file `mongo-route.yaml` 189 | sed -i "s/mongocluster2route/${ROUTE_CLUSTER2}/" overlays/cluster2/mongo-route.yaml 190 | # Replace the value of changme with `ROUTE_CLUSTER3` in the file `mongo-route.yaml` 191 | sed -i "s/mongocluster3route/${ROUTE_CLUSTER3}/" overlays/cluster3/mongo-route.yaml 192 | ~~~ 193 | 194 | 4. Before committing our changes we need to update also the main Github repository in the file describe the "Channel" resource: 195 | 196 | ~~~sh 197 | sed -i 's/ansonmez/YOUR_GITHUB_USERNAME/g' rhacmgitopslab/lab-6-acm/02_channel.yaml 198 | ~~~ 199 | 200 | 5. Commit the changes 201 | 202 | ~~~sh 203 | # Stage your changes to be sent to the git repository 204 | git commit -am 'MongoDB Certificates and MongoDB Routes' 205 | # Push your commits to the git repository 206 | git push origin master 207 | ~~~ 208 | 209 | ## Deploying the MongoDB Cluster 210 | Similar to the previous labs we need to define the app with RHACM. 211 | Be sure that lable your clusters in ACM with clustername: cluster1 ,clustername: cluster2, clustername: cluster3 212 | 213 | 214 | Do these on RH ACM cluster 215 | ~~~sh 216 | cd rhacmgitopslab/lab-6-acm 217 | oc config use-context hubcluster 218 | ~~~ 219 | 220 | 1- Create namespace 221 | ~~~sh 222 | oc create -f 01_namespace.yaml 223 | ~~~ 224 | 225 | 2- Create channel 226 | ~~~sh 227 | oc create -f 02_channel.yaml 228 | ~~~ 229 | 230 | 3- Create application 231 | ~~~sh 232 | oc create -f 03_application_mongo.yaml 233 | ~~~ 234 | 235 | 4- Create placement for each cluster 236 | ~~~sh 237 | oc create -f 04_placement_cluster1.yaml 238 | oc create -f 04_placement_cluster2.yaml 239 | oc create -f 04_placement_cluster3.yaml 240 | ~~~ 241 | 242 | 5- Create subscription for deployment 243 | ~~~sh 244 | oc create -f 05_subscription_cluster1.yaml 245 | oc create -f 05_subscription_cluster2.yaml 246 | oc create -f 05_subscription_cluster3.yaml 247 | ~~~ 248 | 249 | 250 | Validate the namespace exists in the three clusters. 251 | ~~~sh 252 | # The for loop below will show the status of the namespace on the three clusters 253 | for cluster in cluster1 cluster2 cluster3; do oc --context $cluster get namespace mongo; done 254 | 255 | NAME STATUS AGE 256 | mongo Active 6s 257 | NAME STATUS AGE 258 | mongo Active 5s 259 | NAME STATUS AGE 260 | mongo Active 5s 261 | ~~~ 262 | 263 | Verify `OpenShift Routes` creation. 264 | 265 | ~~~sh 266 | # The for loop below will display the route on the three clusters 267 | for cluster in cluster1 cluster2 cluster3; do oc --context $cluster -n mongo get route mongo; done 268 | ~~~ 269 | 270 | ## Configuring MongoDB ReplicaSet 271 | 272 | At this point we should have 3 independent MongoDB instances running, one on each cluster. Let's verify all replicas are up and running. 273 | 274 | > ![TIP](assets/tip-icon.png) **NOTE:** This may take a minute or two 275 | 276 | ~~~sh 277 | 278 | #### CHECK HERE 279 | 280 | #Check for the pods to be in the Ready state 281 | for cluster in cluster1 cluster2 cluster3; do oc --context $cluster -n mongo get pods ; done 282 | NAME READY STATUS RESTARTS AGE 283 | mongo-56d576cb44-gbz8k 1/1 Running 0 3m26s 284 | NAME READY STATUS RESTARTS AGE 285 | mongo-d5f96bb56-5rnkx 1/1 Running 0 3m34s 286 | NAME READY STATUS RESTARTS AGE 287 | mongo-85f4ff8f46-h4m29 1/1 Running 0 2m16s 288 | 289 | ~~~ 290 | 291 | Now that all replicas are up and running we are going to configure a MongoDB ReplicaSet so all three replicas work as a cluster. 292 | This procedure has been automated and you only need to add a label to the MongoDB Pod you want to act as primary replica. We are going to use the pod running on `cluster1` as primary replica. 293 | 294 | ~~~sh 295 | # Select Primary MongoDB pod 296 | MONGO_POD=$(oc --context=cluster1 -n mongo get pod --selector="name=mongo" --output=jsonpath='{.items..metadata.name}') 297 | # Label primary pod 298 | oc --context=cluster1 -n mongo label pod $MONGO_POD replicaset=primary 299 | ~~~ 300 | 301 | The MongoDB ReplicaSet is being configured now, let's wait for it to be configured and check the ReplicaSet Status to ensure it has been properly configured. 302 | 303 | If you want to get a more detailed view of the configuration, you can run the following command and you will get a huge json output with the status of the ReplicaSet: 304 | 305 | ~~~sh 306 | # Select Primary MongoDB pod 307 | MONGO_POD=$(oc --context=cluster1 -n mongo get pod --selector="name=mongo" --output=jsonpath='{.items..metadata.name}') 308 | # Get replicaset status 309 | oc --context=cluster1 -n mongo exec $MONGO_POD \ 310 | -- bash -c 'mongo --norc --quiet --username=admin --password=$MONGODB_ADMIN_PASSWORD --host localhost admin --tls --tlsCAFile /opt/mongo-ssl/ca.pem --eval "rs.status()"' 311 | ~~~ 312 | 313 | Connecting to RHACM WebUI you should see a topology like this: 314 | 315 | ![Lab 6 Topology](./assets/lab-6-topology.png) 316 | 317 | [Home](./README.md) 318 | -------------------------------------------------------------------------------- /7.md: -------------------------------------------------------------------------------- 1 | # Deploying Pacman 2 | 3 | The files within the [directory](./lab-7-assets) are used by RHACM 4 | Pacman on multiple OpenShift clusters. 5 | 6 | ## Architecture 7 | 8 | Below is the architecture definition for our Pacman Application. 9 | 10 | ![Pacman Application Architecture](./assets/demo-arch.png) 11 | 12 | * There is a Pacman pod running on each OpenShift Cluster 13 | * There is an HAProxy Load Balancer which load balances the traffic on 14 | `pacman.example.com` across the three Pacman replicas 15 | * Pacman saves highscores into MongoDB, the connection string to the database includes 16 | all the three replicas hostnames 17 | 18 | **How a user accesses the Pacman application** 19 | 20 | 1. The user points their browser to `pacman.example.com`, the DNS server will return the IP Address of the HAProxy 21 | 2. The browser will send a request to the HAProxy asking for the host `pacman.example.com` 22 | 3. The HAProxy will lookup the available backends servers for the frontend configuration `pacman.example.com` 23 | 4. The HAProxy will reverse proxy to one of the available backends servers (pacman pods) using Round Robin as load balance mechanism 24 | 5. The user will be connected to the backend server and presented with the Pacman app 25 | 26 | ## Prerequisites 27 | 28 | ### Deploying HAProxy 29 | 30 | We are going to deploy the HAProxy server on one of the clusters (`cluster1`), the HAProxy could be hosted externally as well. 31 | 32 | 1. Change directory to `haproxy-yaml` 33 | 34 | ```sh 35 | cd ~/rhacmgitopslab/haproxy-yaml 36 | ``` 37 | 2. Create the namespace where the HAProxy LB will be deployed 38 | 39 | ```sh 40 | oc --context hubcluster create ns haproxy-lb 41 | ``` 42 | 3. Create the HAProxy Route for external access 43 | 44 | > ![TIP](assets/tip-icon.png) **NOTE:** HAPROXY_LB_ROUTE is what the diagram shows as `pacman.example.com` 45 | 46 | ```sh 47 | # Define the variable of `HAPROXY_LB_ROUTE` 48 | HAPROXY_LB_ROUTE=pacman-multicluster.$(oc --context=hubcluster get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 49 | 50 | # Use the value of `HAPROXY_LB_ROUTE` to create a route with the `oc` command 51 | oc --context hubcluster -n haproxy-lb create route edge haproxy-lb \ 52 | --service=haproxy-lb-service --port=8080 --insecure-policy=Allow \ 53 | --hostname=${HAPROXY_LB_ROUTE} 54 | ``` 55 | > ![TIP](assets/tip-icon.png) **NOTE:** The HAProxy Route will be used as the Pacman application entry point. Since we want to have HA across clusters, we will be access Pacman application using our HAProxy rather than OpenShift Routes 56 | 4. Create the configmap with the HAProxy configuration file 57 | 58 | ```sh 59 | # Define the variable of `PACMAN_INGRESS` 60 | PACMAN_INGRESS=pacman-ingress.$(oc --context=hubcluster get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 61 | # Define the variable of `PACMAN_CLUSTER1` 62 | PACMAN_CLUSTER1=pacman.$(oc --context=cluster1 get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 63 | # Define the variable of `PACMAN_CLUSTER2` 64 | PACMAN_CLUSTER2=pacman.$(oc --context=cluster2 get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 65 | # Define the variable of `PACMAN_CLUSTER3` 66 | PACMAN_CLUSTER3=pacman.$(oc --context=cluster3 get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 67 | # Copy the sample configmap 68 | cp haproxy.tmpl haproxy 69 | # Update the HAProxy configuration 70 | sed -i "/option httpchk GET/a \ \ \ \ http-request set-header Host ${PACMAN_INGRESS}" haproxy 71 | # Replace the value with the variable `PACMAN_INGRESS` 72 | sed -i "s//${PACMAN_INGRESS}/g" haproxy 73 | # Replace the value with the variable `PACMAN_CLUSTER1` 74 | sed -i "s/ :/cluster1 ${PACMAN_CLUSTER1}:80/g" haproxy 75 | # Replace the value with the variable `PACMAN_CLUSTER2` 76 | sed -i "s/ :/cluster2 ${PACMAN_CLUSTER2}:80/g" haproxy 77 | # Replace the value with the variable `PACMAN_CLUSTER3` 78 | sed -i "s/ :/cluster3 ${PACMAN_CLUSTER3}:80/g" haproxy 79 | # Create the configmap 80 | oc --context hubcluster -n haproxy-lb create configmap haproxy --from-file=haproxy 81 | ``` 82 | > ![TIP](assets/tip-icon.png) **NOTE:** If you are curious about the HAProxy configuration, you can have a look at the `haproxy` file and review the frontend and backend sections 83 | 5. Create the HAProxy Service referenced in the HAProxy Route 84 | 85 | ```sh 86 | oc --context hubcluster -n haproxy-lb create -f haproxy-clusterip-service.yaml 87 | ``` 88 | 6. Create the HAProxy Deployment 89 | 90 | ```sh 91 | oc --context hubcluster -n haproxy-lb create -f haproxy-deployment.yaml 92 | ``` 93 | 7. Verify HAProxy is working 94 | 95 | 7.1 Check for HAProxy deployment to be ready 96 | 97 | ```sh 98 | 99 | oc --context hubcluster -n haproxy-lb get pods 100 | NAME READY STATUS RESTARTS AGE 101 | haproxy-lb-6f69d59979-z2hll 1/1 Running 0 25s 102 | 103 | ``` 104 | 7.2 Try to access HAProxy 105 | 106 | > ![WARNING](assets/warning-icon.png) **NOTE:** 503 Service Unavailable means that no backend servers are available to handle HAProxy forwarded requests, but HAProxy is working fine. 107 | 108 | ```sh 109 | # Get the HAProxy LB Route 110 | HAPROXY_LB_ROUTE=$(oc --context hubcluster -n haproxy-lb get route haproxy-lb -o jsonpath='{.status.ingress[*].host}') 111 | # Access HAProxy 112 | curl -k https://${HAPROXY_LB_ROUTE} 113 | 114 |

503 Service Unavailable

115 | No server is available to handle this request. 116 | 117 | ``` 118 | 119 | ## Deploying the Pacman Application 120 | 121 | Now that the Mongo cluster has been configured, it is time to deploy the *pacman* application. 122 | 123 | First, change to the directory of the Pacman demo folder. 124 | ~~~sh 125 | cd ~/rhacmgitopslab/lab-7-assets 126 | ~~~ 127 | 128 | For the *pacman* application, the file `pacman-deployment-rs.yaml` needs to reflect the MongoDB endpoint. The MongoDB endpoint is used to save scores from the game. 129 | Provide the value of the MongoDB server(s) to be used for the scores to be recorded for the *pacman* game. 130 | 131 | ~~~sh 132 | # Define the variable of `MONGO_CLUSTER1` 133 | MONGO_CLUSTER1=$(oc --context=cluster1 -n mongo get route mongo -o jsonpath='{.status.ingress[*].host}') 134 | # Define the variable of `MONGO_CLUSTER2` 135 | MONGO_CLUSTER2=$(oc --context=cluster2 -n mongo get route mongo -o jsonpath='{.status.ingress[*].host}') 136 | # Define the variable of `MONGO_CLUSTER3` 137 | MONGO_CLUSTER3=$(oc --context=cluster3 -n mongo get route mongo -o jsonpath='{.status.ingress[*].host}') 138 | # Use the variables to define the replica members 139 | cp base/pacman-deployment.yaml.backup base/pacman-deployment.yaml 140 | sed -i "s/replicamembershere/${MONGO_CLUSTER1},${MONGO_CLUSTER2},${MONGO_CLUSTER3}/g" base/pacman-deployment.yaml 141 | ~~~ 142 | 143 | A value must be provided to be the publicly accessible address for the *pacman* application. 144 | ~~~sh 145 | # Define the variable of `PACMAN_INGRESS` 146 | PACMAN_INGRESS=pacman-ingress.$(oc --context=hubcluster get ingresses.config.openshift.io cluster -o jsonpath='{ .spec.domain }') 147 | # Replace the generic value with the variable `PACMAN_INGRESS` 148 | cp base/pacman-route.yaml.backup base/pacman-route.yaml 149 | sed -i "s/pacmanhosthere/${PACMAN_INGRESS}/g" base/pacman-route.yaml 150 | ~~~ 151 | 152 | Before committing our changes we need to update also the main Github repository in the file describe the "Channel" resource: 153 | 154 | ~~~sh 155 | sed -i 's/ansonmez/YOUR_GITHUB_USERNAME/g' rhacmgitopslab/lab-7-acm/02_channel.yaml 156 | ~~~ 157 | 158 | 159 | 1. Commit the files to the git repo 160 | 161 | ~~~sh 162 | # Stage your changes to be sent to the git repository 163 | git commit -am 'Values for MongoDB Replicas and Ingress Hostname' 164 | # Push your commits to the git repository 165 | git push origin master 166 | ~~~ 167 | 168 | 2. We will deploy it on cluster1, cluster2 and cluster3 via RH ACM. We will use kustomize and overlays for creating the objects as we will be using the values to modify replica count and the image. 169 | 170 | ~~~sh 171 | cd ~/rhacmgithub/lab-7-acm 172 | ~~~ 173 | 1- Create namespace 174 | ~~~sh 175 | oc create -f 01_namespace.yaml 176 | ~~~ 177 | 178 | 2- Create channel 179 | ~~~sh 180 | oc create -f 02_channel.yaml 181 | ~~~ 182 | 183 | 3 - Create application 184 | ~~~sh 185 | oc create -f 03_application_pacman.yaml 186 | ~~~ 187 | 188 | 4- Create placement for each cluster 189 | ~~~sh 190 | oc create -f 04_placement_cluster1.yaml 191 | oc create -f 04_placement_cluster2.yaml 192 | oc create -f 04_placement_cluster3.yaml 193 | ~~~ 194 | 195 | 5- Create subscription for deployment 196 | ~~~sh 197 | oc create -f 05_subscription_cluster1.yaml 198 | oc create -f 05_subscription_cluster2.yaml 199 | oc create -f 05_subscription_cluster3.yaml 200 | ~~~ 201 | 202 | 203 | 204 | 3. Validate the namespace exists in the three clusters. 205 | ~~~sh 206 | # The for loop will verify the `namespace` is on the three clusters 207 | for i in cluster1 cluster2 cluster3; do oc get namespace pacman --context $i; done 208 | 209 | NAME STATUS AGE 210 | pacman Active 8s 211 | NAME STATUS AGE 212 | pacman Active 4s 213 | NAME STATUS AGE 214 | pacman Active 6s 215 | ~~~ 216 | 217 | 218 | 4. Wait for the deployment to become ready 219 | ~~~sh 220 | for i in cluster1 cluster2 cluster3; do oc get -n pacman deployments --context $i; done 221 | NAME READY UP-TO-DATE AVAILABLE AGE 222 | pacman 3/3 3 3 35s 223 | NAME READY UP-TO-DATE AVAILABLE AGE 224 | pacman 1/1 1 1 10s 225 | NAME READY UP-TO-DATE AVAILABLE AGE 226 | pacman 1/1 1 1 16s 227 | ~~~ 228 | 229 | ## Play the Game 230 | The game should be available now at the publicly accessible address. Make sure to save the high score at the end of the game. This shows the data being persisted back to the database. 231 | 232 | You can go ahead and open the url returned by the following command in your browser: 233 | 234 | ~~~sh 235 | oc --context=hubcluster -n haproxy-lb get route haproxy-lb -o jsonpath="{.status.ingress[*].host}{\"\n\"}" 236 | 237 | e.g: pacman-multicluster.apps.cluster-b5b7.b5b7.sandbox362.opentlc.com 238 | ~~~ 239 | 240 | Connecting to RHACM WebUI you should see a topology like this: 241 | 242 | ![Lab 7 Topology](./assets/lab-7-topology.png) 243 | 244 | [Home](./README.md) 245 | -------------------------------------------------------------------------------- /8.md: -------------------------------------------------------------------------------- 1 | # Application Portability 2 | On occasion it may required to move an application off of a cluster or ensure that no traffic is routed to the cluster. To do this we will modify the deployment values within the deployment overlays that were used when we created the pacman application in the last lab. 3 | 4 | Before we begin we will validate that every cluster is running the pacman application. 5 | ~~~sh 6 | # The for loop will check for the Deployment objects in each cluster 7 | for cluster in cluster1 cluster2 cluster3;do echo "*** $cluster ***"; oc get deployment --context $cluster -n pacman;done 8 | 9 | *** cluster1 *** 10 | NAME READY UP-TO-DATE AVAILABLE AGE 11 | pacman 1/1 0 0 6m21s 12 | *** cluster2 *** 13 | NAME READY UP-TO-DATE AVAILABLE AGE 14 | pacman 1/1 1 1 6m21s 15 | *** cluster3 *** 16 | NAME READY UP-TO-DATE AVAILABLE AGE 17 | pacman 1/1 0 0 6m21s 18 | ~~~ 19 | 20 | We would first like to ensure that cluster1 does not have any replicas running at all. To do this we will modify the overlay file for the pacman deployment. We will use directory from the previous labs to make modifications to the application. 21 | 22 | ~~~sh 23 | # Browse to the directory 24 | cd ~/rhacmgitopslab/lab-7-assets 25 | # Modify the replica count in cluster1 26 | sed -i 's/replicas: [0-9]/replicas: 0/g' overlays/cluster1/pacman-deployment.yaml 27 | # Stage your changes to be sent to the git repository 28 | git commit -am 'Cluster1 replicas scaled to 0' 29 | # Push your commits to the git repository 30 | git push origin master 31 | ~~~ 32 | 33 | > ![TIP](assets/tip-icon.png) **NOTE:** By default, a GitHub channel subscription clones the GitHub repository specified in the channel every minute and applies changes when the commit ID has changed. Alternatively, you can configure your subscription to apply changes only when the GitHub repository sends repo PUSH and PULL webhook event notifications. 34 | > 35 | > https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/1.0/html-single/manage_applications/index#channel-samples 36 | 37 | RHACM will update the deployment settings. Let's check the application status. 38 | Now we should see no replicas running on cluster1: 39 | 40 | ~~~sh 41 | # The for loop will output the `deployment`in the three clusters 42 | for cluster in cluster1 cluster2 cluster3;do echo "*** $cluster ***"; oc get deployment --context $cluster -n pacman;done 43 | ~~~ 44 | 45 | > ![TIP](assets/tip-icon.png) **NOTE:** You can now go ahead and play Pacman, you should see that Pacman application requests are load balanced across all two remaining clusters. 46 | 47 | We can also remove cluster2 which would only allow cluster3 to run the game. 48 | 49 | ~~~sh 50 | # Modify the replica count in cluster2 51 | sed -i 's/replicas: [0-9]/replicas: 0/g' overlays/cluster2/pacman-deployment.yaml 52 | # Stage your changes to be sent to the git repository 53 | git commit -am 'Cluster2 replicas scaled to 0' 54 | # Push your commits to the git repository 55 | git push origin master 56 | ~~~ 57 | 58 | Again, RHACM will take care of updating the deployment. 59 | 60 | ~~~sh 61 | # The for loop will output the `deployment`in the three clusters 62 | for cluster in cluster1 cluster2 cluster3;do echo "*** $cluster ***"; oc get deployment --context $cluster -n pacman;done 63 | 64 | *** cluster1 *** 65 | NAME READY UP-TO-DATE AVAILABLE AGE 66 | pacman 0/0 0 0 7m51s 67 | *** cluster2 *** 68 | NAME READY UP-TO-DATE AVAILABLE AGE 69 | pacman 0/0 0 0 7m32s 70 | *** cluster3 *** 71 | NAME READY UP-TO-DATE AVAILABLE AGE 72 | pacman 1/1 1 1 7m31s 73 | ~~~ 74 | 75 | > ![TIP](assets/tip-icon.png) **NOTE:** You can now go ahead and play Pacman, you should see that Pacman application requests are sent to the one remaining cluster. 76 | 77 | To populate the application back to all clusters, modify the replica count to be 1 for both cluster1 and cluster2. 78 | 79 | ~~~sh 80 | # Modify the replica count in cluster1 81 | sed -i 's/replicas: [0-9]/replicas: 1/g' overlays/cluster1/pacman-deployment.yaml 82 | # Modify the replica count in cluster2 83 | sed -i 's/replicas: [0-9]/replicas: 1/g' overlays/cluster2/pacman-deployment.yaml 84 | # Stage your changes to be sent to the git repository 85 | git commit -am 'Scale back cluster1 and cluster2 to 1' 86 | # Push your commits to the git repository 87 | git push origin master 88 | ~~~ 89 | 90 | In a few seconds the deployments will be synched with pacman pods in all three clusters. 91 | 92 | Verify the deployments have the required replicas again in all three clusters. 93 | 94 | ~~~sh 95 | # The for loop will output the `deployment`in the three clusters 96 | for cluster in cluster1 cluster2 cluster3;do echo "*** $cluster ***"; oc get deployment --context $cluster -n pacman;done 97 | 98 | *** cluster1 *** 99 | NAME READY UP-TO-DATE AVAILABLE AGE 100 | pacman 1/1 1 1 24s 101 | *** cluster2 *** 102 | NAME READY UP-TO-DATE AVAILABLE AGE 103 | pacman 1/1 1 1 9m24s 104 | *** cluster3 *** 105 | NAME READY UP-TO-DATE AVAILABLE AGE 106 | pacman 1/1 1 1 25s 107 | ~~~ 108 | 109 | The most important thing to note during the modification of which clusters are running the 110 | *pacman* application is that the scores persist regardless of which cluster the application is running and HAProxy always ensures the application is available. 111 | 112 | 113 | [Home](./README.md) 114 | -------------------------------------------------------------------------------- /9.md: -------------------------------------------------------------------------------- 1 | # Canary Deployments 2 | A common practice is to deploy a new version of an application to a small subset of the available clusters. Using *Kustomize* again we will modify the pacman image and then promote that image to be running on all clusters. 3 | 4 | Ensure that the pacman application is still running in all 3 clusters. 5 | ~~~sh 6 | # The for loop will output the `deployment`in the three clusters 7 | for cluster in cluster1 cluster2 cluster3;do echo "*** $cluster ***"; oc get deployment --context $cluster -n pacman;done 8 | 9 | *** cluster1 *** 10 | NAME READY UP-TO-DATE AVAILABLE AGE 11 | pacman 1/1 1 1 6m21s 12 | *** cluster2 *** 13 | NAME READY UP-TO-DATE AVAILABLE AGE 14 | pacman 1/1 1 1 6m21s 15 | *** cluster3 *** 16 | NAME READY UP-TO-DATE AVAILABLE AGE 17 | pacman 1/1 1 1 6m21s 18 | ~~~ 19 | 20 | We would first like to ensure that cluster3 has the new container image configured. To do this we will modify the file `~/rhacmgitopslab/lab-7-assets/overlays/cluster3/pacman-deployment.yaml`. This lab will use the same directories that were used in lab 7. 21 | 22 | > ![TIP](assets/tip-icon.png) **NOTE:** We are changing the image used by the `pacman` deployment 23 | 24 | ~~~sh 25 | # Change the directory 26 | cd ~/rhacmgitopslab/lab-7-assets/overlays/cluster3 27 | # Modify the `pacman-deployment.yaml` 28 | cat > pacman-deployment.yaml < ![TIP](assets/tip-icon.png) **NOTE:** By default, a GitHub channel subscription clones the GitHub repository specified in the channel every minute and applies changes when the commit ID has changed. Alternatively, you can configure your subscription to apply changes only when the GitHub repository sends repo PUSH and PULL webhook event notifications. 60 | > 61 | > https://access.redhat.com/documentation/en-us/red_hat_advanced_cluster_management_for_kubernetes/1.0/html-single/manage_applications/index#channel-samples 62 | 63 | Validate that the image successfully deployed. You should see the out of `quay.io/mavazque/pacman:latest`. 64 | 65 | ~~~sh 66 | oc get deployment --context cluster3 -n pacman pacman -o=jsonpath="{$.spec.template.spec.containers[:1].image}{\"\n\"}" 67 | ~~~ 68 | 69 | Because we know that the application deploys and passes startup procedures successfully then we know we can supply the image to the other clusters. 70 | 71 | ~~~sh 72 | # Change directories 73 | cd ~/rhacmgithub/lab-7-assets/base 74 | # Modify the image used by the `pacman-deployment.yaml` 75 | sed -i 's~quay.io/ifont/pacman-nodejs-app:latest~quay.io/mavazque/pacman:latest~g' pacman-deployment.yaml 76 | ~~~ 77 | 78 | Before committing the code to the git repository and deploying the code. We need to set cluster3's application deployment back to the original values since the image will be defined from the base directory now. 79 | 80 | ~~~sh 81 | # Change directories 82 | cd ~/rhacmgitopslab/lab-7-assets/ 83 | # Remove the cluster3 `pacman-deployment.yaml` 84 | rm -rf overlays/cluster3/pacman-deployment.yaml 85 | # Copy the `pacman-deployment.yaml` from cluster1 to cluster3 86 | cp overlays/cluster1/pacman-deployment.yaml overlays/cluster3/pacman-deployment.yaml 87 | ~~~ 88 | 89 | Now we will submit the changes to our git repo which should deploy the new image to all clusters. 90 | 91 | ~~~sh 92 | # Stage your changes to be sent to the git repository 93 | git commit -am 'new image to all clusters' 94 | # Push your commits to the git repository 95 | git push origin master 96 | ~~~ 97 | 98 | RHACM will push new pacman image in all three clusters. 99 | 100 | 101 | Verify the image deployed is our newest pacman image. 102 | 103 | ~~~sh 104 | # The for loop will output the image being used by each cluster for the pacman application 105 | for cluster in cluster1 cluster2 cluster3;do echo "$cluster "; oc get deployment --context $cluster -n pacman pacman -o=jsonpath='{$.spec.template.spec.containers[:1].image}'; echo "";done 106 | 107 | cluster1 108 | quay.io/mavazque/pacman:latest 109 | cluster2 110 | quay.io/mavazque/pacman:latest 111 | cluster3 112 | quay.io/mavazque/pacman:latest 113 | ~~~ 114 | 115 | Of course now point to your *pacman* url and check if everything is working! 116 | 117 | ~~~sh 118 | oc --context=hubcluster -n haproxy-lb get route haproxy-lb -o jsonpath="{.status.ingress[*].host}{\"\n\"}" 119 | 120 | e.g: pacman-multicluster.apps.cluster-b5b7.b5b7.sandbox362.opentlc.com 121 | ~~~ 122 | 123 | 124 | [Home](./README.md) 125 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hands on with Red Hat Advanced Cluster Management: Application Portability 2 | 3 | During the labs you will be deploying workloads on three OpenShift 4 clusters. 4 | 5 | One cluster will be Hub and other three cluster will be Spoke clusters. 6 | 7 | We assume clusters are ready Red Hat ACM installed and you have access to clusters, anyway we link the proper documentation during the lab so you can proceed with needed steps. 8 | 9 | ***Please note:*** Just because we're going to use deployment model based on GitOps, we need to leverage Github, so the first prerequisite before starting the lab is to *clone* this repo. 10 | 11 | * [Lab - Prerequisites](./1.md)
12 | * [Lab - Login into OpenShift Clusters and Configure Context](./2.md)
13 | * [Lab - Git Ops Introduction](./3.md)
14 | * [Lab - Deploying and Managing a Project with GitOps](./4.md)
15 | * [Lab - Customizing Deployments](./5.md)
16 | * [Lab - Deploying MongoDB](./6.md)
17 | * [Lab - Deploying Pacman](./7.md)
18 | * [Lab - Application Portability](./8.md)
19 | * [Lab - Canary Deployments](./9.md)
20 | 21 | ----- 22 | Disclaimer 23 | This lab is RH ACM translated version of https://github.com/openshift/federation-dev . 24 | Thanks to contributors of referenced lab. 25 | ----- 26 | -------------------------------------------------------------------------------- /assets/demo-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/demo-arch.png -------------------------------------------------------------------------------- /assets/federated-mongo-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/federated-mongo-arch.png -------------------------------------------------------------------------------- /assets/gitops.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/gitops.png -------------------------------------------------------------------------------- /assets/lab-4-topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/lab-4-topology.png -------------------------------------------------------------------------------- /assets/lab-5-topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/lab-5-topology.png -------------------------------------------------------------------------------- /assets/lab-6-topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/lab-6-topology.png -------------------------------------------------------------------------------- /assets/lab-7-topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/lab-7-topology.png -------------------------------------------------------------------------------- /assets/lab-env-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/lab-env-overview.png -------------------------------------------------------------------------------- /assets/ocp-router-1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/ocp-router-1.gif -------------------------------------------------------------------------------- /assets/tip-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/tip-icon.png -------------------------------------------------------------------------------- /assets/warning-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/assets/warning-icon.png -------------------------------------------------------------------------------- /haproxy-yaml/haproxy: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | 37 | #--------------------------------------------------------------------- 38 | # common defaults that all the 'listen' and 'backend' sections will 39 | # use if not designated in their block 40 | #--------------------------------------------------------------------- 41 | defaults 42 | mode http 43 | log global 44 | option httplog 45 | option dontlognull 46 | option http-server-close 47 | option forwardfor except 127.0.0.0/8 48 | option redispatch 49 | retries 2 50 | timeout http-request 5s 51 | timeout queue 1m 52 | timeout connect 5000 53 | timeout client 30000 54 | timeout server 30000 55 | timeout http-keep-alive 10s 56 | timeout check 5000 57 | maxconn 3000 58 | default-server init-addr last,libc,none 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend main 64 | bind *:8080 65 | 66 | default_backend app 67 | 68 | #--------------------------------------------------------------------- 69 | # round robin balancing between the various backends 70 | #--------------------------------------------------------------------- 71 | backend app 72 | balance roundrobin 73 | option httpchk GET / HTTP/1.1\r\nHost:\ 74 | mode http 75 | server : check 76 | server : check 77 | server : check 78 | 79 | -------------------------------------------------------------------------------- /haproxy-yaml/haproxy-clusterip-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: haproxy-lb 6 | name: haproxy-lb-service 7 | spec: 8 | ports: 9 | - port: 8080 10 | protocol: TCP 11 | targetPort: 8080 12 | selector: 13 | app: haproxy-lb 14 | sessionAffinity: None 15 | type: ClusterIP 16 | -------------------------------------------------------------------------------- /haproxy-yaml/haproxy-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: haproxy-lb 5 | labels: 6 | app: haproxy-lb 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: haproxy-lb 12 | template: 13 | metadata: 14 | labels: 15 | app: haproxy-lb 16 | spec: 17 | containers: 18 | - image: haproxytech/haproxy-alpine:2.0.4 19 | imagePullPolicy: Always 20 | name: haproxy 21 | ports: 22 | - containerPort: 8443 23 | protocol: TCP 24 | - containerPort: 8080 25 | protocol: TCP 26 | volumeMounts: 27 | - mountPath: /etc/haproxy 28 | name: config 29 | volumes: 30 | - configMap: 31 | defaultMode: 420 32 | items: 33 | - key: haproxy 34 | path: haproxy.cfg 35 | name: haproxy 36 | name: config 37 | -------------------------------------------------------------------------------- /haproxy-yaml/haproxy.tmpl: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | 37 | #--------------------------------------------------------------------- 38 | # common defaults that all the 'listen' and 'backend' sections will 39 | # use if not designated in their block 40 | #--------------------------------------------------------------------- 41 | defaults 42 | mode http 43 | log global 44 | option httplog 45 | option dontlognull 46 | option http-server-close 47 | option forwardfor except 127.0.0.0/8 48 | option redispatch 49 | retries 2 50 | timeout http-request 5s 51 | timeout queue 1m 52 | timeout connect 5000 53 | timeout client 30000 54 | timeout server 30000 55 | timeout http-keep-alive 10s 56 | timeout check 5000 57 | maxconn 3000 58 | default-server init-addr last,libc,none 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend main 64 | bind *:8080 65 | 66 | default_backend app 67 | 68 | #--------------------------------------------------------------------- 69 | # round robin balancing between the various backends 70 | #--------------------------------------------------------------------- 71 | backend app 72 | balance roundrobin 73 | option httpchk GET / HTTP/1.1\r\nHost:\ 74 | mode http 75 | server : check 76 | server : check 77 | server : check 78 | 79 | -------------------------------------------------------------------------------- /haproxy-yaml/old/haproxy.backup: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | 37 | #--------------------------------------------------------------------- 38 | # common defaults that all the 'listen' and 'backend' sections will 39 | # use if not designated in their block 40 | #--------------------------------------------------------------------- 41 | defaults 42 | mode http 43 | log global 44 | option httplog 45 | option dontlognull 46 | option http-server-close 47 | option forwardfor except 127.0.0.0/8 48 | option redispatch 49 | retries 2 50 | timeout http-request 5s 51 | timeout queue 1m 52 | timeout connect 5000 53 | timeout client 30000 54 | timeout server 30000 55 | timeout http-keep-alive 10s 56 | timeout check 5000 57 | maxconn 3000 58 | default-server init-addr last,libc,none 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend main 64 | bind *:8080 65 | default_backend app 66 | 67 | #--------------------------------------------------------------------- 68 | # round robin balancing between the various backends 69 | #--------------------------------------------------------------------- 70 | backend app 71 | balance roundrobin 72 | # option httpchk GET / HTTP/1.1\r\nHost:\ pacman-ingress.apps.as4xy.lp.int 73 | mode http 74 | server istanbul pacman-pacman.apps.heisenbug.lplab.online:443 check 75 | server ankara pacman-pacman.apps.jelly.lplab.online:443 check 76 | -------------------------------------------------------------------------------- /haproxy-yaml/old/haproxy.denemeler: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | # tune.maxrewrite 1638400 37 | # tune.bufsize 1638400 38 | # tune.http.maxhdr 10000 39 | # tune.ssl.default-dh-param 2048 40 | 41 | #--------------------------------------------------------------------- 42 | # common defaults that all the 'listen' and 'backend' sections will 43 | # use if not designated in their block 44 | #--------------------------------------------------------------------- 45 | defaults 46 | mode http 47 | log global 48 | stats enable 49 | stats hide-version 50 | stats refresh 30s 51 | stats show-node 52 | stats auth admin:password 53 | stats uri /haproxy?stats 54 | # option httplog 55 | option tcplog 56 | option dontlognull 57 | # option http-server-close 58 | option forwardfor except 127.0.0.0/8 59 | option redispatch 60 | retries 2 61 | timeout http-request 5s 62 | timeout queue 1m 63 | timeout connect 5000 64 | timeout client 30000 65 | timeout server 30000 66 | timeout http-keep-alive 10s 67 | timeout check 5000 68 | maxconn 3000 69 | default-server init-addr last,libc,none 70 | 71 | #--------------------------------------------------------------------- 72 | # main frontend which proxys to the backends 73 | #--------------------------------------------------------------------- 74 | frontend main 75 | bind *:8443 76 | # bind *:8443 ssl crt /opt/haproxy-ssl/haproxy.pem 77 | mode http 78 | # http-request set-header X-Forwarded-For %[src] 79 | # reqadd X-Forwarded-Proto:\ https 80 | # option http-server-close 81 | default_backend app 82 | 83 | #--------------------------------------------------------------------- 84 | # round robin balancing between the various backends 85 | #--------------------------------------------------------------------- 86 | backend app 87 | mode http 88 | # option httpchk GET / HTTP/1.1\r\nHost:\ pacman-multi.apps.as4xy.lp.int 89 | http-request set-header Host pacman-multi.apps.as4xy.lp.int 90 | balance roundrobin 91 | server istanbul pacman.apps.heisenbug.lplab.online:80 92 | server ankara pacman.apps.jelly.lplab.online:80 93 | -------------------------------------------------------------------------------- /haproxy-yaml/old/haproxy.last: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | 37 | #--------------------------------------------------------------------- 38 | # common defaults that all the 'listen' and 'backend' sections will 39 | # use if not designated in their block 40 | #--------------------------------------------------------------------- 41 | defaults 42 | mode http 43 | log global 44 | option httplog 45 | option dontlognull 46 | option http-server-close 47 | option forwardfor except 127.0.0.0/8 48 | option redispatch 49 | retries 2 50 | timeout http-request 5s 51 | timeout queue 1m 52 | timeout connect 5000 53 | timeout client 30000 54 | timeout server 30000 55 | timeout http-keep-alive 10s 56 | timeout check 5000 57 | maxconn 3000 58 | default-server init-addr last,libc,none 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend main 64 | bind *:8080 65 | 66 | default_backend app 67 | 68 | #--------------------------------------------------------------------- 69 | # round robin balancing between the various backends 70 | #--------------------------------------------------------------------- 71 | backend app 72 | balance roundrobin 73 | ###option httpchk GET / HTTP/1.1\r\nHost:\ pacman-ingress.apps.as4xy.lp.int 74 | http-request set-header Host pacman-ingress.apps.as4xy.lp.int 75 | mode http 76 | server jelly1 192.168.191.64:80 check 77 | server jelly2 192.168.191.62 check 78 | # server heisenbug1 79 | # server istanbul pacman.apps.heisenbug.lplab.online:80 80 | # server istanbul 192.168.191.180:80 check 81 | # server ankara2 192.168.191.60:80 check 82 | # server ankara pacman.apps.jelly.lplab.online:80 83 | -------------------------------------------------------------------------------- /haproxy-yaml/old/haproxy.yedek: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | 37 | #--------------------------------------------------------------------- 38 | # common defaults that all the 'listen' and 'backend' sections will 39 | # use if not designated in their block 40 | #--------------------------------------------------------------------- 41 | defaults 42 | mode http 43 | log global 44 | option httplog 45 | option dontlognull 46 | option http-server-close 47 | # option forwardfor except 127.0.0.0/8 48 | option redispatch 49 | retries 2 50 | timeout http-request 5s 51 | timeout queue 1m 52 | timeout connect 5000 53 | timeout client 30000 54 | timeout server 30000 55 | timeout http-keep-alive 10s 56 | timeout check 5000 57 | maxconn 3000 58 | default-server init-addr last,libc,none 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend main 64 | bind *:8080 65 | default_backend app 66 | 67 | #--------------------------------------------------------------------- 68 | # round robin balancing between the various backends 69 | #--------------------------------------------------------------------- 70 | backend app 71 | balance roundrobin 72 | # option httpchk GET / HTTP/1.1\r\nHost:\ pacman-ingress.apps.as4xy.lp.int 73 | mode tcp 74 | server istanbul pacman-pacman.apps.heisenbug.lplab.online:443 check 75 | server ankara pacman-pacman.apps.jelly.lplab.online:443 check 76 | -------------------------------------------------------------------------------- /haproxy-yaml/old/haproxy2: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | # unecessary since already in a container and runs w/ a non-root user 29 | # chroot /var/lib/haproxy 30 | # user haproxy 31 | # group haproxy 32 | # daemon 33 | 34 | pidfile /var/lib/haproxy/haproxy.pid 35 | maxconn 4000 36 | 37 | #--------------------------------------------------------------------- 38 | # common defaults that all the 'listen' and 'backend' sections will 39 | # use if not designated in their block 40 | #--------------------------------------------------------------------- 41 | defaults 42 | mode http 43 | log global 44 | option httplog 45 | option dontlognull 46 | option http-server-close 47 | option forwardfor except 127.0.0.0/8 48 | option redispatch 49 | retries 2 50 | timeout http-request 5s 51 | timeout queue 1m 52 | timeout connect 5000 53 | timeout client 30000 54 | timeout server 30000 55 | timeout http-keep-alive 10s 56 | timeout check 5000 57 | maxconn 3000 58 | default-server init-addr last,libc,none 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend main 64 | bind *:8080 65 | 66 | default_backend app 67 | 68 | #--------------------------------------------------------------------- 69 | # round robin balancing between the various backends 70 | #--------------------------------------------------------------------- 71 | backend app 72 | balance roundrobin 73 | option httpchk GET / HTTP/1.1\r\nHost:\ pacman-multi.apps.as4xy.lp.int 74 | http-request set-header Host pacman-multi.apps.as4xy.lp.int 75 | mode http 76 | server : check 77 | server : check 78 | server : check 79 | 80 | -------------------------------------------------------------------------------- /haproxy-yaml/old/hcacert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDLzCCAhegAwIBAgIIXz68+kCvRFwwDQYJKoZIhvcNAQELBQAwJjEkMCIGA1UE 3 | AwwbaW5ncmVzcy1vcGVyYXRvckAxNTg4ODg1OTQ3MB4XDTIwMDUwNzIxMTIyOFoX 4 | DTIyMDUwNzIxMTIyOVowKDEmMCQGA1UEAwwdKi5hcHBzLmhlaXNlbmJ1Zy5scGxh 5 | Yi5vbmxpbmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGhc5Czwds 6 | 3eBJhpc11obMm39TNwxNQkn+vhWr6swt/0qO8tIkRRm2DIiV0XmmAAck34+yJXnZ 7 | Y4sZD3N4kWXjjywA+c3XXjLqwv7HiBoNfzXdQsAqTVMiVJ++Zwe2qgzLXWmvrD4C 8 | Sdx1jm74/CU32O2XR7fXHfygzy7JZbY/a/BLeiySYjFhVLRj1mfh93rjjCZbVqgz 9 | eKrmB/kaYj6lz4VAXgPTJE0VYl/5LcM07SssUp/Qbol7zQyM22kkdDoxiUUNMZIg 10 | XNnrdrlx3YLVAkObL4NCWJztVuATlslZPSWKL1fDQkNidmmH5a9unQJxdA805ueI 11 | 3ZRABHcWsVB7AgMBAAGjXzBdMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr 12 | BgEFBQcDATAMBgNVHRMBAf8EAjAAMCgGA1UdEQQhMB+CHSouYXBwcy5oZWlzZW5i 13 | dWcubHBsYWIub25saW5lMA0GCSqGSIb3DQEBCwUAA4IBAQCBFP8WbTKKdq2ORve5 14 | 0UTopZNQb0X1TA69IRi6VLVKGgSbHE8QIllX83hoTln2Pi+Pfm54momg+W8yTv4U 15 | CKV43HN4WfuLJTDbptOuFPqfsmqWReC6IQTRv4YPB7T5WAiysGBuHv3m7gM9Pvvj 16 | Al9UAdE1LIObU844TdH5DRvO5lsdStay22mQwAtJ9Y464mcN/y5gRxXW772Zf3Wv 17 | t21QfUK684Uo5w9LOu8QRRAmS3ICAgvYGwjeGYJ6LYS+9z7Y2ggvHC5L6Ue9m7Hu 18 | JAc6GeBRpbZFj0HhrDvSAppBYOFK5og2sUzQqrFlyBUdyu38cJIOl3ZweLU1Bvg6 19 | zkiO 20 | -----END CERTIFICATE----- 21 | -----BEGIN CERTIFICATE----- 22 | MIIC7TCCAdWgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtpbmdy 23 | ZXNzLW9wZXJhdG9yQDE1ODg4ODU5NDcwHhcNMjAwNTA3MjExMjI3WhcNMjIwNTA3 24 | MjExMjI4WjAmMSQwIgYDVQQDDBtpbmdyZXNzLW9wZXJhdG9yQDE1ODg4ODU5NDcw 25 | ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIw6K6whtdMfH42vyoLsNp 26 | V/xz7XcSONkmgTUmipkb//WIawvD2k2bELKsz91Fag4xG44XaqlF0NYXaXqJ8CQh 27 | SmLmb7YEnI6alMqsXzXHOkTl2ZTsUCETND8sK5dtQqEBZeBN3/ywVfL42SufFgxw 28 | 3RO4XwhiCW4sg6UshoYn7DoQ55PfrWphjX4OHHdKI5kKzH3DNQ2leJBh6WHmbvWN 29 | 4zHsfm1x26nWkyoOEaeuedzdYf251Uw1WgUcHogbVaSnSlggG5YGs1wMG15z69ia 30 | 3S/l7vM2wFhshEFZavCkHwJm3znTDclmr9+jyxYfDk5ePF8fVVgfF+smcRHHrcWR 31 | AgMBAAGjJjAkMA4GA1UdDwEB/wQEAwICpDASBgNVHRMBAf8ECDAGAQH/AgEAMA0G 32 | CSqGSIb3DQEBCwUAA4IBAQCVut3BTmzWoSqm7QPGxbXpOFNI1CEZ8FAufW2Qh2/b 33 | Kz3JdnGPBDOkxAy+ZO3w4jr5n/H26oat1337e1JJeaX+W2t29CufCKAk6aTxCmB6 34 | 0NDyBmXRSJ082yAF1oZSiJdHOSRFvthOFIOF5I7IsGHXiJobX3eHF+yyTk9gRdUQ 35 | yGbEA5Tt2Z9FS508bNNW4X+fxB/QQRAgRuxOSM6eb8L3didOYKR+jdxnPcPyAsTz 36 | fmk1rn1ZL+ZQtH+k4/fu/tdVfSbZRf++S6a5YpsEgKp78ES5/nhuNEtAP2nQnAvt 37 | KSVWqSbohAFKX7QbbIDZkwsumB5l3rBknbEpuhqqY1x/ 38 | -----END CERTIFICATE----- 39 | -------------------------------------------------------------------------------- /haproxy-yaml/old/heisenbug.pem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansonmez/rhacmgitopslab/cc3091a314d4f41ef11c37207855e1b470fb1d40/haproxy-yaml/old/heisenbug.pem -------------------------------------------------------------------------------- /haproxy-yaml/old/mongo-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: haproxy-ssl 5 | namespace: haproxy-lb 6 | type: Opaque 7 | data: 8 | haproxy.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBc0xEditMUEUvMm5WRUZuQ2JnWlZscjV1TDJKaFF2K213S0ZKdE4zaG9jN2NxVktDCmRHSVBIeGFhSVkvZWsydU5RdG50YzZpNGt4VHoxRHVnQ3FBTG5mMVQvdkdoQkhDMitZWmF0WjFqcDB6eHRoVVcKcXU3Zm5HdS9IdlhpR0lUYWJ3d2FjRGt1ZjRTeGoraU5Gb3Q4Vi9KUFp2M04yR09pSll2eGNkL25YbGt2WXVFegpEcWpxbFFDK2pCcjM0SWNOMWFCOGRWMXFQdGNLdUxuVUczZ0tRVTV1VmNxZXMzc2dkRG9HeFRod0pVZGQxaW0wCmtGL0k1T09KNHRsNkRTczhjV0ljNE1mOFBUWThSby9Ma3lub0lhREk0QkdZMHJta2JXWlVzZiszSVFYWm5RZSsKVDJCa0p4MFEzUTJINDhVc3lmc05MMUhwV1ozRVFCdlcvYmlVandJREFRQUJBb0lCQUFVSDVLMUdJUlBNcFo1agpqOSttR29YQ1FrempGalhjSlpsU2ZsRENDSXljNFRlSSsrSG9jTm5Id1BlM3F2U3ptVUFRQ0d6RW53ekJDZmcvCkFsMkxGUW1ZanUyeFg0NUt6MHJuZ21tdFk0cjk2OXZ0V2NuZ0owbkMzNXc0RUFuMnJtbC9CTG14Unh1bVlZL2gKWlRNQkliYzNWUDl2dG82MHQ0Y3VKdC9CS2t0TFhxSWdhcm5FaVJhbVFVTmo3eWRpODkxN2d0aVhDMEpVNDZ3UApuTEpJeVFkNnFyTW40U1g1NFhGUGYyVm13QWVmWmNuZEFUbm9zb0JXWlRpMVp4VVhyZFJ5V25ad2p3eHBINHN2CnF0YlJZUEVocy9HK2FnWnF6aEZEZnp2S1RjazAwUTdmbElZOWg0L0hXQWM5Nll6VjBsOE1QOEFRTStrNzdXU3cKMEZEV3MyRUNnWUVBNWV2OVJLTXphaS9McTF6NUNRKzc1SnJJcFVwbzg1ZUxxeHo0dWFMR2JhUEJ6SElpZ2ZMbApONFczeFRHd3FGTHlKYXhGeWlhRVNhYjhBbWNNNGh5cEVZLzlxN0V5T0xiYUVEYjNMcGN5a3cyeTRlOGI1VHc4Ck5BTFpza3Y0c2pUQlFSVndMdkNmbXcvZy9rRlZQQ0xLMGRlNm9RM0F3RWp6bXNCWERLOUVuemNDZ1lFQXhMdFgKbXJVWVhqV04vTHU2SkNPa2sxdE9WRVhjRUdUZmlrZ3FYT0x6anR3bWlYNVVZakR4RmNSbUxBMFA4L0k5YmNVVgpKOEFHUHA2aml1WXNxQTlvekROb2J1SExWcnhqM0hKN2htc1B5Uko0Q3p4ZHVZS2UyUEp0MWM1YlVDdlhvUlA5CktpV3NnaDdMRFpiS0JhNzBiWHd1ZWxzeHJhdHdkSmh2VWFBVWNXa0NnWUVBcXJkWG5XQWtLK01ETWVEZGxFblgKZEQwdFgvVnBDZjFyR2dsdjhMNFk0WTJtdHZ2RC9ZTHhpUkZEU3lyREJhaFkzWG9WYzZ5M3FzeE84Q1c5M2lIegpzdk9jSWFCM0xyOHVHTDRIT05QUkhIa2VhajJWRTlkdXlCdVpzcFpRT1ZtTWFKdlhhVE1HL3lhQ0k2YzA5MnJKCjk1ZGJubnZNczdrM0Q1OWl3eXRjSTI4Q2dZQWlCRkxOaVdyQlAzbGhyZ1F0bzR3ZGZyZzRraXZQR2VJZFF3VXcKTW4zeXdkWllCK1FIWm5jSlVJQnFOWWdBb201RXdNTTdzRlJrRjRJbnprMENja2VvcDJyellVK0xtOStZaXFNWgpSd0hmdnJYTEc1RUpOckRJeW9KN2FjY1lRYnh6T2NXWXVCTHdIT3IzV285c0ZDaG5sZ1ZqV3Nsb1FqQ2J1SS9uCkNVci9vUUtCZ0dGdUIyeWhvRHN4VE1JYkxFTlNaQVluTXNmTVlRTHBTOU9VclhqQldTM0JaZDB6MU5FM1B1NkMKb1ppV3FGQkZhaEhWdVBTcXVxR243emtxbXovaDVpeHVQWjF6SjhPd1JvaEU4eUM2SHYvYkxTRHlwWGQ4Q1podQpldlI3aFgySE1RM253RzMzTkU2WFdMYWxKZGM4T3BkK0tLUnRiWkpwNzJmcmZ5c09iV3dzCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlFWERDQ0EwU2dBd0lCQWdJVUtCUkNkSktDNktuSloyR3l4alErWVFkSlgza3dEUVlKS29aSWh2Y05BUUVMCkJRQXdTREVQTUEwR0ExVUVCaE1HVkhWeWEyVjVNUkV3RHdZRFZRUUhFd2hKYzNSaGJtSjFiREVOTUFzR0ExVUUKQ2hNRVVraFRRVEVUTUJFR0ExVUVBeE1LUzNWaVpYSnVaWFJsY3pBZUZ3MHlNREExTWpNeE56VTRNREJhRncweQpNVEExTWpNeE56VTRNREJhTUVneER6QU5CZ05WQkFZVEJsUjFjbXRsZVRFUk1BOEdBMVVFQnhNSVNYTjBZVzVpCmRXd3hEVEFMQmdOVkJBb1RCRkpJVTBFeEV6QVJCZ05WQkFNVENtdDFZbVZ5Ym1WMFpYTXdnZ0VpTUEwR0NTcUcKU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQ3dzTy80czhUL2FkVVFXY0p1QmxXV3ZtNHZZbUZDLzZiQQpvVW0wM2VHaHp0eXBVb0owWWc4ZkZwb2hqOTZUYTQxQzJlMXpxTGlURlBQVU82QUtvQXVkL1ZQKzhhRUVjTGI1CmhscTFuV09uVFBHMkZSYXE3dCtjYTc4ZTllSVloTnB2REJwd09TNS9oTEdQNkkwV2kzeFg4azltL2MzWVk2SWwKaS9GeDMrZGVXUzlpNFRNT3FPcVZBTDZNR3ZmZ2h3M1ZvSHgxWFdvKzF3cTR1ZFFiZUFwQlRtNVZ5cDZ6ZXlCMApPZ2JGT0hBbFIxM1dLYlNRWDhqazQ0bmkyWG9OS3p4eFloemd4L3c5Tmp4R2o4dVRLZWdob01qZ0VaalN1YVJ0ClpsU3gvN2NoQmRtZEI3NVBZR1FuSFJEZERZZmp4U3pKK3cwdlVlbFpuY1JBRzliOXVKU1BBZ01CQUFHamdnRTgKTUlJQk9EQU9CZ05WSFE4QkFmOEVCQU1DQmFBd0hRWURWUjBsQkJZd0ZBWUlLd1lCQlFVSEF3RUdDQ3NHQVFVRgpCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdIUVlEVlIwT0JCWUVGTDV4SUptRkFMSFN5QnA0bW9ZZzRrQzEzb3VuCk1COEdBMVVkSXdRWU1CYUFGSktwaWI0TjJxemIyWDZqKy9ReWJXSC9wN0F3TUlHNEJnTlZIUkVFZ2JBd2dhMkMKQ1d4dlkyRnNhRzl6ZElJVmJHOWpZV3hvYjNOMExteHZZMkZzWkc5dFlXbHVnaXB0YjI1bmJ5MXBjM1JoYm1KMQpiQzVoY0hCekxtaGxhWE5sYm1KMVp5NXNjR3hoWWk1dmJteHBibVdDSkcxdmJtZHZMV0Z1YTJGeVlTNWhjSEJ6CkxtcGxiR3g1TG14d2JHRmlMbTl1YkdsdVpZSUZiVzl1WjIrQ0MyMXZibWR2TG0xdmJtZHZnaDF0YjI1bmJ5NXQKYjI1bmJ5NXpkbU11WTJ4MWMzUmxjaTVzYjJOaGJJY0Vmd0FBQVRBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQQp1bGpMZDNoblpPY0VLVWlUSVl0cSt6UEoyU1hPOE5SQWhQVGpob1B3VklONDRmRWZPdSt6UUJ6ZXBjaHFIQko2CmNGYTdOSFUrR1Jubzk3dktxZFZMdU1SOGVGWlJJMlpQc2pVVlJ4b1IyaE5GNGxGcXVYMmJjS0tIak0zelFsVDUKcGExSUE4ZHpWVVY5VWlVcnRjaURFS3F6Sll4SUJLVkQ1dy9pcU9tMTBleDN6bWdLM2hQYURNaUVNeUVDR0duUAppQnZoWGdVQ01xdW1yVVc5MkRMUFJkS0lkMldmeWthUjhXNWlPOStITVBiNWJMVUZwSnhEa0QzS3RqdjFScWtZCjFrckpzSWpoRjhDMTkzUUNCbk5Eb0dXV3AzRjBCODZPTXR0UXd3NFpvWnJhdFB1bTcrMFhON3VIY3hBMGdEa3YKa0VuSVU2RFdOeWc3c0MvQTVpY1F3UT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 9 | ca.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURoRENDQW15Z0F3SUJBZ0lVVGhMMm9BdEZRai9kRWlvdisrQ2s1SmJxVCtBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1NERVBNQTBHQTFVRUJoTUdWSFZ5YTJWNU1SRXdEd1lEVlFRSEV3aEpjM1JoYm1KMWJERU5NQXNHQTFVRQpDaE1FVWtoVFFURVRNQkVHQTFVRUF4TUtTM1ZpWlhKdVpYUmxjekFlRncweU1EQTFNak14TnpRNE1EQmFGdzB5Ck5UQTFNakl4TnpRNE1EQmFNRWd4RHpBTkJnTlZCQVlUQmxSMWNtdGxlVEVSTUE4R0ExVUVCeE1JU1hOMFlXNWkKZFd3eERUQUxCZ05WQkFvVEJGSklVMEV4RXpBUkJnTlZCQU1UQ2t0MVltVnlibVYwWlhNd2dnRWlNQTBHQ1NxRwpTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEbnY4cU5qZXoxZktTVTZDK0xwYk9oejFxWXZ4YVBNSU1aCjlMM3FBcExXdHB6YTV4NzV6ZG03M1Z2YjRNSVNBUDJCU3FoRjQrMnltMlhwTzVrdmg1VTlvSkNTYm5LS0xQK2kKckhCa0NwRC9EMXQ0R2I1QVhUb2ZPaHJSMW8xOWxLUVE3N3dXbUljSE5lK3lLRGdSYjFFTFVwOE85eW5aZFl0cQpiL1VGa3pvVzR0NXlYL1B0U3IvbXNrVStmZFRYRFZkM3JDaDljQi9tK00wZjR6U1dYV1hBWlpKSzB5YlM1UTdGCkw3TUdPZlYzMlJHYWNPQXhSclAzMVNjN0tHaEsvdlpRTnc1ZGp2TXgrMzRMZFc0cFp6QlZ5S2hBTUJvTTJVaS8KRXNGK1NsNE9BbStRTU1IVlMxZFhGcXE1dW9sV1NEckE1OGVEME1FTGJUYTVJU0R0L3NrbEFnTUJBQUdqWmpCawpNQTRHQTFVZER3RUIvd1FFQXdJQkJqQVNCZ05WSFJNQkFmOEVDREFHQVFIL0FnRUNNQjBHQTFVZERnUVdCQlNTCnFZbStEZHFzMjlsK28vdjBNbTFoLzZld01EQWZCZ05WSFNNRUdEQVdnQlNTcVltK0RkcXMyOWwrby92ME1tMWgKLzZld01EQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFjSHVsS1JYeFlVQXJBSUcyLzdDYUMzODBFaXJhNHdPSwpTR0NVUHJDM0lVVnZlQkZWRVZKV1FGc2ZJbDRpeWhON1hZMjF0dGtYNUNVY29ZTkVWeVVrYjBiOGdYeTU2a2o0Cm8xam5NVW1yeHkvcXRtc1QzYVk2L2g1Wm01RTNDd3VjZ0ZiTTlVNFcxVzJNQzdTOU93clBra2pmckNZNkFhTEkKeGo2dlR2TGNST29ndERGVXl5bUpPSi8vS2wxWXJxOWhFMWxSZVN0R0dXRm9XaEZUZENjQnNXSkdaekpSamlPegpqNitwV1p1a0JkMGNjTlNHT1p0cEdneVJYeHhSaVQwLzliZkkrRjZuVE9velFDRlRrS3JieXFvY3lWZ2hMMTk1CkdVK2FLNmZnUmorY3Nra3M3T3RjRXJ2VDdscEV2eWk2RnQ1T1lPcGxQaEtndWZFZlRnelBsdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 10 | -------------------------------------------------------------------------------- /haproxy-yaml/old/pemdeneme.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAsLDv+LPE/2nVEFnCbgZVlr5uL2JhQv+mwKFJtN3hoc7cqVKC 3 | dGIPHxaaIY/ek2uNQtntc6i4kxTz1DugCqALnf1T/vGhBHC2+YZatZ1jp0zxthUW 4 | qu7fnGu/HvXiGITabwwacDkuf4Sxj+iNFot8V/JPZv3N2GOiJYvxcd/nXlkvYuEz 5 | DqjqlQC+jBr34IcN1aB8dV1qPtcKuLnUG3gKQU5uVcqes3sgdDoGxThwJUdd1im0 6 | kF/I5OOJ4tl6DSs8cWIc4Mf8PTY8Ro/LkynoIaDI4BGY0rmkbWZUsf+3IQXZnQe+ 7 | T2BkJx0Q3Q2H48UsyfsNL1HpWZ3EQBvW/biUjwIDAQABAoIBAAUH5K1GIRPMpZ5j 8 | j9+mGoXCQkzjFjXcJZlSflDCCIyc4TeI++HocNnHwPe3qvSzmUAQCGzEnwzBCfg/ 9 | Al2LFQmYju2xX45Kz0rngmmtY4r969vtWcngJ0nC35w4EAn2rml/BLmxRxumYY/h 10 | ZTMBIbc3VP9vto60t4cuJt/BKktLXqIgarnEiRamQUNj7ydi8917gtiXC0JU46wP 11 | nLJIyQd6qrMn4SX54XFPf2VmwAefZcndATnosoBWZTi1ZxUXrdRyWnZwjwxpH4sv 12 | qtbRYPEhs/G+agZqzhFDfzvKTck00Q7flIY9h4/HWAc96YzV0l8MP8AQM+k77WSw 13 | 0FDWs2ECgYEA5ev9RKMzai/Lq1z5CQ+75JrIpUpo85eLqxz4uaLGbaPBzHIigfLl 14 | N4W3xTGwqFLyJaxFyiaESab8AmcM4hypEY/9q7EyOLbaEDb3Lpcykw2y4e8b5Tw8 15 | NALZskv4sjTBQRVwLvCfmw/g/kFVPCLK0de6oQ3AwEjzmsBXDK9EnzcCgYEAxLtX 16 | mrUYXjWN/Lu6JCOkk1tOVEXcEGTfikgqXOLzjtwmiX5UYjDxFcRmLA0P8/I9bcUV 17 | J8AGPp6jiuYsqA9ozDNobuHLVrxj3HJ7hmsPyRJ4CzxduYKe2PJt1c5bUCvXoRP9 18 | KiWsgh7LDZbKBa70bXwuelsxratwdJhvUaAUcWkCgYEAqrdXnWAkK+MDMeDdlEnX 19 | dD0tX/VpCf1rGglv8L4Y4Y2mtvvD/YLxiRFDSyrDBahY3XoVc6y3qsxO8CW93iHz 20 | svOcIaB3Lr8uGL4HONPRHHkeaj2VE9duyBuZspZQOVmMaJvXaTMG/yaCI6c092rJ 21 | 95dbnnvMs7k3D59iwytcI28CgYAiBFLNiWrBP3lhrgQto4wdfrg4kivPGeIdQwUw 22 | Mn3ywdZYB+QHZncJUIBqNYgAom5EwMM7sFRkF4Inzk0Cckeop2rzYU+Lm9+YiqMZ 23 | RwHfvrXLG5EJNrDIyoJ7accYQbxzOcWYuBLwHOr3Wo9sFChnlgVjWsloQjCbuI/n 24 | CUr/oQKBgGFuB2yhoDsxTMIbLENSZAYnMsfMYQLpS9OUrXjBWS3BZd0z1NE3Pu6C 25 | oZiWqFBFahHVuPSquqGn7zkqmz/h5ixuPZ1zJ8OwRohE8yC6Hv/bLSDypXd8CZhu 26 | evR7hX2HMQ3nwG33NE6XWLalJdc8Opd+KKRtbZJp72frfysObWws 27 | -----END RSA PRIVATE KEY----- 28 | -----BEGIN CERTIFICATE----- 29 | MIIEXDCCA0SgAwIBAgIUKBRCdJKC6KnJZ2GyxjQ+YQdJX3kwDQYJKoZIhvcNAQEL 30 | BQAwSDEPMA0GA1UEBhMGVHVya2V5MREwDwYDVQQHEwhJc3RhbmJ1bDENMAsGA1UE 31 | ChMEUkhTQTETMBEGA1UEAxMKS3ViZXJuZXRlczAeFw0yMDA1MjMxNzU4MDBaFw0y 32 | MTA1MjMxNzU4MDBaMEgxDzANBgNVBAYTBlR1cmtleTERMA8GA1UEBxMISXN0YW5i 33 | dWwxDTALBgNVBAoTBFJIU0ExEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqG 34 | SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwsO/4s8T/adUQWcJuBlWWvm4vYmFC/6bA 35 | oUm03eGhztypUoJ0Yg8fFpohj96Ta41C2e1zqLiTFPPUO6AKoAud/VP+8aEEcLb5 36 | hlq1nWOnTPG2FRaq7t+ca78e9eIYhNpvDBpwOS5/hLGP6I0Wi3xX8k9m/c3YY6Il 37 | i/Fx3+deWS9i4TMOqOqVAL6MGvfghw3VoHx1XWo+1wq4udQbeApBTm5Vyp6zeyB0 38 | OgbFOHAlR13WKbSQX8jk44ni2XoNKzxxYhzgx/w9NjxGj8uTKeghoMjgEZjSuaRt 39 | ZlSx/7chBdmdB75PYGQnHRDdDYfjxSzJ+w0vUelZncRAG9b9uJSPAgMBAAGjggE8 40 | MIIBODAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUF 41 | BwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFL5xIJmFALHSyBp4moYg4kC13oun 42 | MB8GA1UdIwQYMBaAFJKpib4N2qzb2X6j+/QybWH/p7AwMIG4BgNVHREEgbAwga2C 43 | CWxvY2FsaG9zdIIVbG9jYWxob3N0LmxvY2FsZG9tYWlugiptb25nby1pc3RhbmJ1 44 | bC5hcHBzLmhlaXNlbmJ1Zy5scGxhYi5vbmxpbmWCJG1vbmdvLWFua2FyYS5hcHBz 45 | LmplbGx5LmxwbGFiLm9ubGluZYIFbW9uZ2+CC21vbmdvLm1vbmdvgh1tb25nby5t 46 | b25nby5zdmMuY2x1c3Rlci5sb2NhbIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA 47 | uljLd3hnZOcEKUiTIYtq+zPJ2SXO8NRAhPTjhoPwVIN44fEfOu+zQBzepchqHBJ6 48 | cFa7NHU+GRno97vKqdVLuMR8eFZRI2ZPsjUVRxoR2hNF4lFquX2bcKKHjM3zQlT5 49 | pa1IA8dzVUV9UiUrtciDEKqzJYxIBKVD5w/iqOm10ex3zmgK3hPaDMiEMyECGGnP 50 | iBvhXgUCMqumrUW92DLPRdKId2WfykaR8W5iO9+HMPb5bLUFpJxDkD3Ktjv1RqkY 51 | 1krJsIjhF8C193QCBnNDoGWWp3F0B86OMttQww4ZoZratPum7+0XN7uHcxA0gDkv 52 | kEnIU6DWNyg7sC/A5icQwQ== 53 | -----END CERTIFICATE----- 54 | -------------------------------------------------------------------------------- /haproxy-yaml/old/sslhaproxy-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: haproxy-lb 5 | labels: 6 | app: haproxy-lb 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: haproxy-lb 12 | template: 13 | metadata: 14 | labels: 15 | app: haproxy-lb 16 | spec: 17 | containers: 18 | - image: haproxytech/haproxy-alpine:2.0.4 19 | imagePullPolicy: Always 20 | name: haproxy 21 | ports: 22 | - containerPort: 8443 23 | protocol: TCP 24 | - containerPort: 8080 25 | protocol: TCP 26 | volumeMounts: 27 | - mountPath: /etc/haproxy 28 | name: config 29 | - mountPath: /opt/haproxy-ssl/ 30 | name: haproxy-ssl 31 | volumes: 32 | - name: haproxy-ssl 33 | secret: 34 | secretName: haproxy-ssl 35 | - configMap: 36 | defaultMode: 420 37 | items: 38 | - key: haproxy 39 | path: haproxy.cfg 40 | name: haproxy 41 | name: config 42 | -------------------------------------------------------------------------------- /lab-4-acm/01_namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: simple-app 5 | -------------------------------------------------------------------------------- /lab-4-acm/02_channel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Channel 3 | metadata: 4 | name: federation 5 | namespace: simple-app 6 | spec: 7 | pathname: 'https://github.com/ansonmez/rhacmgitopslab.git' 8 | type: GitHub 9 | -------------------------------------------------------------------------------- /lab-4-acm/03_application.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: app.k8s.io/v1beta1 2 | kind: Application 3 | metadata: 4 | name: httpd 5 | namespace: simple-app 6 | spec: 7 | selector: 8 | matchExpressions: 9 | - key: app 10 | operator: In 11 | values: 12 | - httpd 13 | componentKinds: 14 | - group: app.ibm.com/v1alpha1 15 | kind: Subscription 16 | -------------------------------------------------------------------------------- /lab-4-acm/04_placementrule_cluster1only.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | labels: 5 | app: httpd 6 | name: cluster1only 7 | namespace: simple-app 8 | spec: 9 | clusterSelector: 10 | matchLabels: 11 | clusterid: cluster1 12 | -------------------------------------------------------------------------------- /lab-4-acm/05_subscription.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: httpd 5 | namespace: simple-app 6 | labels: 7 | app: httpd 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-4-assets 11 | spec: 12 | channel: simple-app/federation 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster1only 17 | -------------------------------------------------------------------------------- /lab-4-assets/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: httpd 6 | name: httpd 7 | namespace: simple-app 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: httpd 13 | template: 14 | metadata: 15 | labels: 16 | app: httpd 17 | spec: 18 | containers: 19 | - image: rhscl/httpd-24-rhel7 20 | name: httpd 21 | ports: 22 | - containerPort: 8080 23 | -------------------------------------------------------------------------------- /lab-4-assets/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: simple-app 5 | -------------------------------------------------------------------------------- /lab-4-assets/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: httpd 7 | name: httpd 8 | namespace: simple-app 9 | spec: 10 | ports: 11 | - port: 8080 12 | protocol: TCP 13 | targetPort: 8080 14 | selector: 15 | app: httpd 16 | sessionAffinity: None 17 | type: ClusterIP 18 | -------------------------------------------------------------------------------- /lab-5-acm/01_namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: web-app 5 | -------------------------------------------------------------------------------- /lab-5-acm/02_channel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Channel 3 | metadata: 4 | name: web-app-channel 5 | namespace: web-app 6 | spec: 7 | pathname: 'https://github.com/ansonmez/rhacmgitopslab.git' 8 | type: GitHub 9 | -------------------------------------------------------------------------------- /lab-5-acm/03_application_webapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: app.k8s.io/v1beta1 2 | kind: Application 3 | metadata: 4 | name: web-app 5 | namespace: web-app 6 | spec: 7 | selector: 8 | matchExpressions: 9 | - key: deployment 10 | operator: In 11 | values: 12 | - hello 13 | componentKinds: 14 | - group: app.ibm.com/v1alpha1 15 | kind: Subscription 16 | -------------------------------------------------------------------------------- /lab-5-acm/04_placement_cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: cluster1 5 | namespace: web-app 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster1 10 | -------------------------------------------------------------------------------- /lab-5-acm/04_placement_cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: cluster2 5 | namespace: web-app 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster2 10 | -------------------------------------------------------------------------------- /lab-5-acm/04_placement_cluster3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: cluster3 5 | namespace: web-app 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster3 10 | -------------------------------------------------------------------------------- /lab-5-acm/05_subscription_cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: web-app-cluster1 5 | namespace: web-app 6 | labels: 7 | deployment: hello 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-5-assets/overlays/cluster1 11 | spec: 12 | channel: web-app/web-app-channel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster1 17 | -------------------------------------------------------------------------------- /lab-5-acm/05_subscription_cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: web-app-cluster2 5 | namespace: web-app 6 | labels: 7 | deployment: hello 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-5-assets/overlays/cluster2 11 | spec: 12 | channel: web-app/web-app-channel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster2 17 | -------------------------------------------------------------------------------- /lab-5-acm/05_subscription_cluster3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: web-app-cluster3 5 | namespace: web-app 6 | labels: 7 | deployment: hello 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-5-assets/overlays/cluster3 11 | spec: 12 | channel: web-app/web-app-channel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster3 17 | -------------------------------------------------------------------------------- /lab-5-assets/base/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: the-map 5 | data: 6 | altGreeting: "Good Morning!" 7 | enableRisky: "false" 8 | -------------------------------------------------------------------------------- /lab-5-assets/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: the-deployment 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | deployment: hello 10 | template: 11 | metadata: 12 | labels: 13 | deployment: hello 14 | spec: 15 | containers: 16 | - name: the-container 17 | image: monopole/hello:1 18 | command: ["/hello", 19 | "--port=8080", 20 | "--enableRiskyFeature=$(ENABLE_RISKY)"] 21 | ports: 22 | - containerPort: 8080 23 | env: 24 | - name: ALT_GREETING 25 | valueFrom: 26 | configMapKeyRef: 27 | name: the-map 28 | key: altGreeting 29 | - name: ENABLE_RISKY 30 | valueFrom: 31 | configMapKeyRef: 32 | name: the-map 33 | key: enableRisky 34 | -------------------------------------------------------------------------------- /lab-5-assets/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - deployment.yaml 3 | - service.yaml 4 | - configmap.yaml 5 | - route.yaml 6 | - namespace.yaml 7 | -------------------------------------------------------------------------------- /lab-5-assets/base/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: web-app 5 | -------------------------------------------------------------------------------- /lab-5-assets/base/route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: changeme 7 | port: 8 | targetPort: 8080 9 | to: 10 | kind: Service 11 | name: the-service 12 | weight: 100 13 | status: 14 | ingress: [] 15 | -------------------------------------------------------------------------------- /lab-5-assets/base/route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: changeme 7 | port: 8 | targetPort: 8080 9 | to: 10 | kind: Service 11 | name: the-service 12 | weight: 100 13 | status: 14 | ingress: [] 15 | -------------------------------------------------------------------------------- /lab-5-assets/base/service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: the-service 5 | spec: 6 | selector: 7 | deployment: hello 8 | type: ClusterIP 9 | ports: 10 | - protocol: TCP 11 | port: 8080 12 | targetPort: 8080 13 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster1/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: the-map 5 | data: 6 | altGreeting: "The app is running on cluster1" 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster1/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: the-deployment 5 | spec: 6 | replicas: 3 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | org: RHTE 3 | commonAnnotations: 4 | note: Hello, I am at Istanbul 5 | bases: 6 | - ../../base 7 | patchesStrategicMerge: 8 | - configmap.yaml 9 | - deployment.yaml 10 | - route.yaml 11 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster1/route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: web-app.apps.dev.a4e4.sandbox1545.opentlc.com 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster1/route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: changeme 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster2/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: the-map 5 | data: 6 | altGreeting: "The app is running on cluster2" 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster2/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: the-deployment 5 | spec: 6 | replicas: 5 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster2/kustomization.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | org: RHTE 3 | commonAnnotations: 4 | note: Hello, I am Ankara cluster! 5 | bases: 6 | - ../../base 7 | patchesStrategicMerge: 8 | - configmap.yaml 9 | - deployment.yaml 10 | - route.yaml 11 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster2/route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: web-app.apps.cluster-df04.gcp.testdrive.openshift.com 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster2/route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: changeme 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster3/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: the-map 5 | data: 6 | altGreeting: "The app is running on cluster3" 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster3/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: the-deployment 5 | spec: 6 | replicas: 3 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster3/kustomization.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | org: RHTE 3 | commonAnnotations: 4 | note: Hello, I am cluster3! 5 | bases: 6 | - ../../base 7 | patchesStrategicMerge: 8 | - configmap.yaml 9 | - deployment.yaml 10 | - route.yaml 11 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster3/route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: web-app.apps.cluster-ec17.ec17.sandbox28.opentlc.com 7 | -------------------------------------------------------------------------------- /lab-5-assets/overlays/cluster3/route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: the-route 5 | spec: 6 | host: changeme 7 | -------------------------------------------------------------------------------- /lab-6-acm/01_namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: mongo 5 | -------------------------------------------------------------------------------- /lab-6-acm/02_channel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Channel 3 | metadata: 4 | name: mongochannel 5 | namespace: mongo 6 | spec: 7 | pathname: 'https://github.com/ansonmez/rhacmgitopslab.git' 8 | type: GitHub 9 | -------------------------------------------------------------------------------- /lab-6-acm/03_application_mongo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: app.k8s.io/v1beta1 2 | kind: Application 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | selector: 8 | matchExpressions: 9 | - key: name 10 | operator: In 11 | values: 12 | - mongo 13 | componentKinds: 14 | - group: app.ibm.com/v1alpha1 15 | kind: Subscription 16 | -------------------------------------------------------------------------------- /lab-6-acm/04_placement_cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: cluster1 5 | namespace: mongo 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster1 10 | -------------------------------------------------------------------------------- /lab-6-acm/04_placement_cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: cluster2 5 | namespace: mongo 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster2 10 | -------------------------------------------------------------------------------- /lab-6-acm/04_placement_cluster3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: cluster3 5 | namespace: mongo 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster3 10 | -------------------------------------------------------------------------------- /lab-6-acm/05_subscription_cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: mongo-cluster1 5 | namespace: mongo 6 | labels: 7 | name: mongo 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-6-assets/overlays/cluster1 11 | spec: 12 | channel: mongo/mongochannel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster1 17 | -------------------------------------------------------------------------------- /lab-6-acm/05_subscription_cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: mongo-cluster2 5 | namespace: mongo 6 | labels: 7 | name: mongo 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-6-assets/overlays/cluster2 11 | spec: 12 | channel: mongo/mongochannel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster2 17 | -------------------------------------------------------------------------------- /lab-6-acm/05_subscription_cluster3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: mongo-cluster3 5 | namespace: mongo 6 | labels: 7 | name: mongo 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-6-assets/overlays/cluster3 11 | spec: 12 | channel: mongo/mongochannel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: cluster3 17 | -------------------------------------------------------------------------------- /lab-6-assets/.gitignore: -------------------------------------------------------------------------------- 1 | ca-config.json 2 | ca-csr.json 3 | mongodb-csr.json 4 | mongodb-key.pem 5 | mongodb.pem 6 | mongo.pem 7 | -------------------------------------------------------------------------------- /lab-6-assets/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - mongo-pvc.yaml 3 | - mongo-rs-deployment.yaml 4 | - mongo-secret.yaml 5 | - mongo-service.yaml 6 | - mongo-route.yaml 7 | - namespace.yaml 8 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | name: mongo 6 | name: mongo 7 | namespace: mongo 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 10Gi 14 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: placeholder 8 | port: 9 | targetPort: 27017 10 | tls: 11 | insecureEdgeTerminationPolicy: Redirect 12 | termination: passthrough 13 | to: 14 | kind: Service 15 | name: mongo 16 | weight: 100 17 | status: 18 | ingress: [] 19 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-rs-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: mongo 6 | name: mongo 7 | namespace: mongo 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: mongo 13 | template: 14 | metadata: 15 | labels: 16 | name: mongo 17 | spec: 18 | containers: 19 | - image: quay.io/mavazque/mongodb:autors2 20 | name: mongo 21 | ports: 22 | - containerPort: 27017 23 | env: 24 | - name: MONGODB_USER 25 | valueFrom: 26 | secretKeyRef: 27 | key: database-user 28 | name: mongodb-secret 29 | - name: MONGODB_PASSWORD 30 | valueFrom: 31 | secretKeyRef: 32 | key: database-password 33 | name: mongodb-secret 34 | - name: MONGODB_ADMIN_PASSWORD 35 | valueFrom: 36 | secretKeyRef: 37 | key: database-admin-password 38 | name: mongodb-secret 39 | - name: MONGODB_DATABASE 40 | valueFrom: 41 | secretKeyRef: 42 | key: database-name 43 | name: mongodb-secret 44 | - name: MONGODB_KEYFILE_VALUE 45 | valueFrom: 46 | secretKeyRef: 47 | key: keyfile-value 48 | name: mongodb-secret 49 | - name: MONGODB_REPLICA_NAME 50 | value: "rs0" 51 | - name: PRIMARY_NODE 52 | value: "primarynodehere" 53 | - name: REPLICA_MEMBERS 54 | value: "replicamembershere" 55 | volumeMounts: 56 | - mountPath: /data/db 57 | name: mongodb-data 58 | - mountPath: /opt/mongo-ssl/ 59 | name: mongodb-ssl 60 | - mountPath: /var/tmp/podinfo 61 | name: pod-info 62 | volumes: 63 | - name: mongodb-data 64 | persistentVolumeClaim: 65 | claimName: mongo 66 | - name: mongodb-ssl 67 | secret: 68 | secretName: mongodb-ssl 69 | - name: pod-info 70 | downwardAPI: 71 | items: 72 | - path: "labels" 73 | fieldRef: 74 | fieldPath: metadata.labels 75 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-rs-deployment.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: mongo 6 | name: mongo 7 | namespace: mongo 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: mongo 13 | template: 14 | metadata: 15 | labels: 16 | name: mongo 17 | spec: 18 | containers: 19 | - image: quay.io/mavazque/mongodb:autors2 20 | name: mongo 21 | ports: 22 | - containerPort: 27017 23 | env: 24 | - name: MONGODB_USER 25 | valueFrom: 26 | secretKeyRef: 27 | key: database-user 28 | name: mongodb-secret 29 | - name: MONGODB_PASSWORD 30 | valueFrom: 31 | secretKeyRef: 32 | key: database-password 33 | name: mongodb-secret 34 | - name: MONGODB_ADMIN_PASSWORD 35 | valueFrom: 36 | secretKeyRef: 37 | key: database-admin-password 38 | name: mongodb-secret 39 | - name: MONGODB_DATABASE 40 | valueFrom: 41 | secretKeyRef: 42 | key: database-name 43 | name: mongodb-secret 44 | - name: MONGODB_KEYFILE_VALUE 45 | valueFrom: 46 | secretKeyRef: 47 | key: keyfile-value 48 | name: mongodb-secret 49 | - name: MONGODB_REPLICA_NAME 50 | value: "rs0" 51 | - name: PRIMARY_NODE 52 | value: "primarynodehere" 53 | - name: REPLICA_MEMBERS 54 | value: "replicamembershere" 55 | volumeMounts: 56 | - mountPath: /data/db 57 | name: mongodb-data 58 | - mountPath: /opt/mongo-ssl/ 59 | name: mongodb-ssl 60 | - mountPath: /var/tmp/podinfo 61 | name: pod-info 62 | volumes: 63 | - name: mongodb-data 64 | persistentVolumeClaim: 65 | claimName: mongo 66 | - name: mongodb-ssl 67 | secret: 68 | secretName: mongodb-ssl 69 | - name: pod-info 70 | downwardAPI: 71 | items: 72 | - path: "labels" 73 | fieldRef: 74 | fieldPath: metadata.labels 75 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: List 3 | metadata: 4 | name: mongodb-secret-list 5 | items: 6 | - apiVersion: v1 7 | kind: Secret 8 | metadata: 9 | name: mongodb-secret 10 | namespace: mongo 11 | type: Opaque 12 | data: 13 | database-admin-password: Y2x5ZGU= 14 | database-name: cGFjbWFu 15 | database-password: cGlua3k= 16 | database-user: Ymxpbmt5 17 | keyfile-value: cjNkaDR0Cg== 18 | - apiVersion: v1 19 | kind: Secret 20 | metadata: 21 | name: mongodb-ssl 22 | namespace: mongo 23 | type: Opaque 24 | data: 25 | mongodb.pem: DUMMY_PEM 26 | ca.pem: DUMMY_PEM 27 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-secret.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: List 3 | metadata: 4 | name: mongodb-secret-list 5 | items: 6 | - apiVersion: v1 7 | kind: Secret 8 | metadata: 9 | name: mongodb-secret 10 | namespace: mongo 11 | type: Opaque 12 | data: 13 | database-admin-password: Y2x5ZGU= 14 | database-name: cGFjbWFu 15 | database-password: cGlua3k= 16 | database-user: Ymxpbmt5 17 | keyfile-value: cjNkaDR0Cg== 18 | - apiVersion: v1 19 | kind: Secret 20 | metadata: 21 | name: mongodb-ssl 22 | namespace: mongo 23 | type: Opaque 24 | data: 25 | mongodb.pem: DUMMY_PEM 26 | ca.pem: DUMMY_PEM 27 | -------------------------------------------------------------------------------- /lab-6-assets/base/mongo-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: mongo 6 | name: mongo 7 | namespace: mongo 8 | spec: 9 | ports: 10 | - port: 27017 11 | protocol: TCP 12 | targetPort: 27017 13 | selector: 14 | name: mongo 15 | type: ClusterIP 16 | -------------------------------------------------------------------------------- /lab-6-assets/base/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: mongo 5 | -------------------------------------------------------------------------------- /lab-6-assets/ca-key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpQIBAAKCAQEAu7IM9nS8aXoIEiwEry9ZI40KWc8ktrYsPXReZK5oDfoey+EJ 3 | 90nd7UTdBYKNknVse9GWKTw7njKdgsdde/Xg56GQJPcVpuS5Mox6ZJTMFQnycwGt 4 | 3k2CO3hcrd9qK6r7LrAOmhpk1xgPg66SdtZ4rjcBxmmMRW45XcBRwLp5Ys/OFpRO 5 | aDu+uJ8WAy9hlrVFLuSMr+IbUYBxx/qP9+/07bPPiz+VRVHKofWWkjGxGYxXtkcv 6 | 93gc8BUScejoCU5FZZx7peF+ICjp1vdHbEpfHfleFTyqyMVGrZEJn9TmVo696wDr 7 | uibYDqzzwNARfi5iLnTMTW6iWTZv2fjgjrCfjQIDAQABAoIBAG1EEdfHzYd5ywkJ 8 | JxD092eCy+vMcwsKsmZ0QB9MiLbaTONTdoHHNrLRy/lRLyl7wlOyh0AqGGrX/GoN 9 | tZZcZTlzDH74eFpX/Jg4v2eeV9BHoiThbV5ksNP+FbYCwcP6HY/oXhfIFbxB/YYj 10 | MpCL+mUxEI95Ene42xyAwRb5d4DViVnBFvmgpSuJSmRrJQZwcBbYR9S1KEJ2UEbZ 11 | 1qOLs/UqCZHCtY4CBV3UyOk8Mx/V3TDOARUoZMvEXNMIOXaeWbsTqTovmdbFFsmJ 12 | 1iM8o1QqP4X3QcpXtUguvaf9J04CsSNtDf8pk38UN/ZcPGtG8YOmU03I8/lLxMNV 13 | DQgQNeECgYEA3xQQr6S/rrwZsouMzGyNLf80eZ+hCtEYYu6+GDGm4k7ez9o5AQI7 14 | 2zLgW2gZa1LhY6/yLl+t/92VoQ9rzKZ7uvMxPrfMvZdgwdQiP2nIjG7PpqWQ2tn9 15 | 3B0UU6RxYkx8UXjoIMNqnGif2AadZqLv3kl1tI2PTBUfYFn4QvTIC+cCgYEA12U3 16 | HwpmAg4jb7jD9kLdzqtdM49jfmMbVLSO/auJffmC0rxgjSvmFXC7ik1UCvgndQbY 17 | 8GTkFOKNxYFf3/hSkLfKfbx5pL0uRabSBwJvUCEI1vHoNDI+pGDmUF/OIQg7w1AF 18 | rFaA2FYR5Gtr0WblpIO6tiGz2CgSIo/8pA/qamsCgYEAzp/IQNiJj3C9IKqvlJ2r 19 | OwKaeVkJEnrQarqWKtS6rf291apciHmoNYVvWFsYyFbxW6OZ36hSA6Abux4MjHdc 20 | PbKKV3xpUObJAV/bBP+XebQd9E6A2KV3xQzOShcTyqfrso8Z+1bmBc/G1pJx8qJT 21 | KYLDWqSQKTmbYWSUVnhiXtsCgYEAs90bSFpgkDrKMJLzYIK5wcirTVDhXjrYKcWU 22 | 2hMR/xYJvnv7jCcqPKEkmfq0wWwzVq/fG+7D3wTIKjM8okQ0fhBbkN+AHKa1KwiA 23 | vcW0ug672PGsW1nRoWYhLOCzujWr4g7CjgS2FvDYhAuHvxbTkKtHkKtYCJp9eksO 24 | Rtnbf/0CgYEAqx4Zijo+7u+/tOQTBXvFjoWJPGrAxZ6wDpgF2g97gBbB9bakOQx5 25 | CeOV6p9O1CaNxUbixvkESqJnJ5yw2DLkRS8gwAdrJr4dBpDQSO7mT48IFVhR+j0h 26 | Fncpg5mj7fXFmmAR2BFHLIJiRq7m0IGwk7u/o5HVm8xHMhD8KV2txVE= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /lab-6-assets/ca.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICqjCCAZICAQAwZTELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYD 3 | VQQHEwZBdXN0aW4xEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAlRYMRMw 4 | EQYDVQQDEwpLdWJlcm5ldGVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC 5 | AQEAu7IM9nS8aXoIEiwEry9ZI40KWc8ktrYsPXReZK5oDfoey+EJ90nd7UTdBYKN 6 | knVse9GWKTw7njKdgsdde/Xg56GQJPcVpuS5Mox6ZJTMFQnycwGt3k2CO3hcrd9q 7 | K6r7LrAOmhpk1xgPg66SdtZ4rjcBxmmMRW45XcBRwLp5Ys/OFpROaDu+uJ8WAy9h 8 | lrVFLuSMr+IbUYBxx/qP9+/07bPPiz+VRVHKofWWkjGxGYxXtkcv93gc8BUScejo 9 | CU5FZZx7peF+ICjp1vdHbEpfHfleFTyqyMVGrZEJn9TmVo696wDruibYDqzzwNAR 10 | fi5iLnTMTW6iWTZv2fjgjrCfjQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAFD3 11 | viubnTBtU6zpu3KL5qhSd1SWOd2IbO7BGalVPHAwnU/Jq699b/gexxvU24XjW3yq 12 | FXHM1ZnssQ2oFGN8wzlT6Do4lArXLhmhFSIokg6qFo1RfoJU654nU0VoOvta1xXm 13 | F6wuWc4CMJhYnlibILx9mpsymtYZ3Q/0oxP1FHw74J8dpbGQxnarWfzXC8VDvJ4n 14 | S11ulueWPrllqxtEH98tWnvMzUGmgODyxsNj5yyatxkW7kwSyLHed+CeqLSMgWkk 15 | ZpBF3I82+ZHI75IJT2JZapiEo0ZTAPC+/n3FEoppKSFNwybCYgVptpxSno60QS/h 16 | UwFXMuKjvqll7JgBqX0= 17 | -----END CERTIFICATE REQUEST----- 18 | -------------------------------------------------------------------------------- /lab-6-assets/ca.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDmjCCAoKgAwIBAgIUR9Ev0p0LQzp8adCz3r345vRDLlQwDQYJKoZIhvcNAQEL 3 | BQAwZTELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYDVQQHEwZBdXN0 4 | aW4xEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAlRYMRMwEQYDVQQDEwpL 5 | dWJlcm5ldGVzMB4XDTIwMDYxMDE0NTcwMFoXDTI1MDYwOTE0NTcwMFowZTELMAkG 6 | A1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYDVQQHEwZBdXN0aW4xEzARBgNV 7 | BAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAlRYMRMwEQYDVQQDEwpLdWJlcm5ldGVz 8 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu7IM9nS8aXoIEiwEry9Z 9 | I40KWc8ktrYsPXReZK5oDfoey+EJ90nd7UTdBYKNknVse9GWKTw7njKdgsdde/Xg 10 | 56GQJPcVpuS5Mox6ZJTMFQnycwGt3k2CO3hcrd9qK6r7LrAOmhpk1xgPg66SdtZ4 11 | rjcBxmmMRW45XcBRwLp5Ys/OFpROaDu+uJ8WAy9hlrVFLuSMr+IbUYBxx/qP9+/0 12 | 7bPPiz+VRVHKofWWkjGxGYxXtkcv93gc8BUScejoCU5FZZx7peF+ICjp1vdHbEpf 13 | HfleFTyqyMVGrZEJn9TmVo696wDruibYDqzzwNARfi5iLnTMTW6iWTZv2fjgjrCf 14 | jQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV 15 | HQ4EFgQUb1GgGoYP8aDf4ZXSBnTvpOeewHUwDQYJKoZIhvcNAQELBQADggEBALfF 16 | GAHn6TRZ/BwGmR9LaKr9uW6RgKJ+pXJgJkS4OpzP0dvN/iuFvNJAO4zIeloUJv4i 17 | 1MwJGpiczPF4qvoXnq194dgX2G+C6Wgm0zwU4P70heZy2SbyALrxagMvj0oXgs3V 18 | ezvL2KJy1mBA6xcAvCU2JXOPMmnbBlXVMyXaKwcEUeTsJJrXeXdMi8BvXTs2+d0/ 19 | fpHn+vcVn4MXcyE9WIQR2NBVqvxn48WWfXMs831WKK5gxMGvY5i1eToJ2C6AVdB4 20 | Mnpwm9bInfp5+mX9+LgtGtzLT21A53wAvxkpIpRW/l/aL9IRAHj5OYPHz3ST+GfJ 21 | XHgOW8xtBdbdAmHN1Wo= 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /lab-6-assets/exportvariables: -------------------------------------------------------------------------------- 1 | # Define the `NAMESPACE` variable 2 | NAMESPACE=mongo 3 | # Define the `SERVICE_NAME` variable 4 | SERVICE_NAME=mongo 5 | # Define the variable of `ROUTE_CLUSTER1` 6 | ROUTE_CLUSTER1=mongo-istanbul.apps.heisenbug.lplab.online 7 | # Define the variable of `ROUTE_CLUSTER2` 8 | ROUTE_CLUSTER2=mongo-ankara.apps.jelly.lplab.online 9 | SANS="localhost,localhost.localdomain,127.0.0.1,${ROUTE_CLUSTER1},${ROUTE_CLUSTER2},${SERVICE_NAME},${SERVICE_NAME}.${NAMESPACE},${SERVICE_NAME}.${NAMESPACE}.svc.cluster.local" 10 | -------------------------------------------------------------------------------- /lab-6-assets/mongodb.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIDzTCCArUCAQAwZTELMAkGA1UEBhMCVVMxDjAMBgNVBAgTBVRleGFzMQ8wDQYD 3 | VQQHEwZBdXN0aW4xEzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAlRYMRMw 4 | EQYDVQQDEwprdWJlcm5ldGVzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC 5 | AQEAvAO7zbDwKDjiKURzsyNrj5YqVXrSjcHDTyuRrfTLy4JbzlfBgo2KbJmLFO/1 6 | UCn0qgRlTeR+CcxdNnuNC4E048dUsqlOmDyztcBnnGMhD1w/YeTS3wAK1JMPTcun 7 | Y5wQmdibTqbWYjr6keVZZvdwdXjNxi5Jc5ix+z3bQQI4jxs7InkBH6Ux2fTAIV4e 8 | SzDSGY7gGQWO4rJG4rxKlw28GoX4+WSgbAe3ayoeoOahImEYUTr5N6Hy3TJ0s5CZ 9 | SFcr6VxkGBEVKENzMmHY1JruxZpr86UZqY49gOgKURCBEkRwiH0MmhzRN0pVHv4J 10 | VRl4moI3S+r/Mbq8JlI6n3bu9wIDAQABoIIBITCCAR0GCSqGSIb3DQEJDjGCAQ4w 11 | ggEKMIIBBgYDVR0RBIH+MIH7gglsb2NhbGhvc3SCFWxvY2FsaG9zdC5sb2NhbGRv 12 | bWFpboI0bW9uZ28tY2x1c3RlcjEuYXBwcy5sYWItMzY2OS5zYW5kYm94MTIzNy5v 13 | cGVudGxjLmNvbYIzbW9uZ28tY2x1c3RlcjIuYXBwcy5sYWItYmJmYi5zYW5kYm94 14 | MjgzLm9wZW50bGMuY29tgjNtb25nby1jbHVzdGVyMy5hcHBzLmxhYi1kNGFkLnNh 15 | bmRib3gzMjQub3BlbnRsYy5jb22CBW1vbmdvggttb25nby5tb25nb4IdbW9uZ28u 16 | bW9uZ28uc3ZjLmNsdXN0ZXIubG9jYWyHBH8AAAEwDQYJKoZIhvcNAQELBQADggEB 17 | ALFkMODhw19HSN3NFcgAAQEYBc1LDUwYD0X2gcU5WlUFaNn6g+KoH6DEEhvkCtK1 18 | fXLBE3ywIwv3+SnuDGAqOmPP+8uw7GBhhUkkdVvBlxU4Ybv6d98UOEPPy5vfkj/M 19 | h66rZmEaUX/UrnmwpzqlP0KHGT4MxzcNLVmWeHfnK+5t3o6WmsvJfrMnriz102x7 20 | X1uyovqd83efvtzk1Hm7Zon1RQ2lE/lL61f2luIxbjQUK4i7jmKF31ZC6sl4sZfU 21 | NSR7LtNksfYg+y9RfstfbHt9pZBxe1I0q5XVxfSWa8RD9dIVOYf0fOb3hF+50Vlj 22 | c0UsuKzMUZUToE+KCZBgXc4= 23 | -----END CERTIFICATE REQUEST----- 24 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | org: RHTr 3 | commonAnnotations: 4 | cluster: istanbul 5 | bases: 6 | - ../../base 7 | patchesStrategicMerge: 8 | - mongo-route.yaml 9 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster1/mongo-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: mongocluster1route 8 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster1/mongo-route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: mongocluster1route 8 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster2/kustomization.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | org: RHTr 3 | commonAnnotations: 4 | cluster: ankara 5 | bases: 6 | - ../../base 7 | patchesStrategicMerge: 8 | - mongo-route.yaml 9 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster2/mongo-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: mongocluster2route 8 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster2/mongo-route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: mongocluster2route 8 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster3/kustomization.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | org: RHTE 3 | commonAnnotations: 4 | cluster: cluster3 5 | bases: 6 | - ../../base 7 | patchesStrategicMerge: 8 | - mongo-route.yaml 9 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster3/mongo-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: mongocluster3route 8 | -------------------------------------------------------------------------------- /lab-6-assets/overlays/cluster3/mongo-route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: mongo 5 | namespace: mongo 6 | spec: 7 | host: mongocluster3route 8 | -------------------------------------------------------------------------------- /lab-7-acm/01_namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: pacman 5 | -------------------------------------------------------------------------------- /lab-7-acm/02_channel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Channel 3 | metadata: 4 | name: pacmanchannel 5 | namespace: pacman 6 | spec: 7 | pathname: 'https://github.com/ansonmez/rhacmgitopslab.git' 8 | type: GitHub 9 | -------------------------------------------------------------------------------- /lab-7-acm/03_application_pacman.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: app.k8s.io/v1beta1 2 | kind: Application 3 | metadata: 4 | name: pacman 5 | namespace: pacman 6 | spec: 7 | selector: 8 | matchExpressions: 9 | - key: name 10 | operator: In 11 | values: 12 | - pacman 13 | componentKinds: 14 | - group: app.ibm.com/v1alpha1 15 | kind: Subscription 16 | -------------------------------------------------------------------------------- /lab-7-acm/04_placement_cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: pacmancluster1 5 | namespace: pacman 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster1 10 | -------------------------------------------------------------------------------- /lab-7-acm/04_placement_cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: pacmancluster2 5 | namespace: pacman 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster2 10 | -------------------------------------------------------------------------------- /lab-7-acm/04_placement_cluster3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: PlacementRule 3 | metadata: 4 | name: pacmancluster3 5 | namespace: pacman 6 | spec: 7 | clusterSelector: 8 | matchLabels: 9 | clusterid: cluster3 10 | -------------------------------------------------------------------------------- /lab-7-acm/05_subscription_cluster1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: pacman-cluster1 5 | namespace: pacman 6 | labels: 7 | name: pacman 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-7-assets/overlays/cluster1 11 | spec: 12 | channel: pacman/pacmanchannel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: pacmancluster1 17 | -------------------------------------------------------------------------------- /lab-7-acm/05_subscription_cluster2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: pacman-cluster2 5 | namespace: pacman 6 | labels: 7 | name: pacman 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-7-assets/overlays/cluster2 11 | spec: 12 | channel: pacman/pacmanchannel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: pacmancluster2 17 | -------------------------------------------------------------------------------- /lab-7-acm/05_subscription_cluster3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps.open-cluster-management.io/v1 2 | kind: Subscription 3 | metadata: 4 | name: pacman-cluster3 5 | namespace: pacman 6 | labels: 7 | name: pacman 8 | annotations: 9 | apps.open-cluster-management.io/github-branch: master 10 | apps.open-cluster-management.io/github-path: lab-7-assets/overlays/cluster3 11 | spec: 12 | channel: pacman/pacmanchannel 13 | placement: 14 | placementRef: 15 | kind: PlacementRule 16 | name: pacmancluster3 17 | -------------------------------------------------------------------------------- /lab-7-assets/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - pacman-secret.yaml 3 | - pacman-service.yaml 4 | - pacman-service-account.yaml 5 | - pacman-cluster-role.yaml 6 | - pacman-cluster-role-binding.yaml 7 | - pacman-deployment.yaml 8 | - pacman-namespace.yaml 9 | - pacman-route.yaml 10 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: pacman 5 | namespace: pacman 6 | subjects: 7 | - kind: ServiceAccount 8 | name: pacman 9 | namespace: pacman 10 | roleRef: 11 | kind: ClusterRole 12 | name: pacman 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: pacman 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods","nodes"] 8 | verbs: ["get", "watch", "list"] 9 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: pacman 6 | name: pacman 7 | namespace: pacman 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: pacman 13 | template: 14 | metadata: 15 | labels: 16 | name: pacman 17 | spec: 18 | serviceAccount: pacman 19 | containers: 20 | - image: quay.io/ifont/pacman-nodejs-app 21 | name: pacman 22 | ports: 23 | - containerPort: 8080 24 | env: 25 | - name: MONGO_SERVICE_HOST 26 | # Single member MongoDB 27 | # value: primarymongohere 28 | # Replicaset (federated) MongoDB 29 | value: replicamembershere 30 | # Comment out MONGO_REPLICA_SET for the single-member Mongo 31 | - name: MONGO_REPLICA_SET 32 | value: rs0 33 | - name: MONGO_AUTH_USER 34 | valueFrom: 35 | secretKeyRef: 36 | key: database-user 37 | name: mongodb-users-secret 38 | - name: MONGO_AUTH_PWD 39 | valueFrom: 40 | secretKeyRef: 41 | key: database-password 42 | name: mongodb-users-secret 43 | - name: MONGO_DATABASE 44 | value: pacman 45 | - name: MY_MONGO_PORT 46 | value: "443" 47 | - name: MONGO_USE_SSL 48 | value: "true" 49 | - name: MONGO_VALIDATE_SSL 50 | value: "false" 51 | - name: MY_NODE_NAME 52 | valueFrom: 53 | fieldRef: 54 | fieldPath: spec.nodeName 55 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-deployment.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: pacman 6 | name: pacman 7 | namespace: pacman 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: pacman 13 | template: 14 | metadata: 15 | labels: 16 | name: pacman 17 | spec: 18 | serviceAccount: pacman 19 | containers: 20 | - image: quay.io/ifont/pacman-nodejs-app 21 | name: pacman 22 | ports: 23 | - containerPort: 8080 24 | env: 25 | - name: MONGO_SERVICE_HOST 26 | # Single member MongoDB 27 | # value: primarymongohere 28 | # Replicaset (federated) MongoDB 29 | value: replicamembershere 30 | # Comment out MONGO_REPLICA_SET for the single-member Mongo 31 | - name: MONGO_REPLICA_SET 32 | value: rs0 33 | - name: MONGO_AUTH_USER 34 | valueFrom: 35 | secretKeyRef: 36 | key: database-user 37 | name: mongodb-users-secret 38 | - name: MONGO_AUTH_PWD 39 | valueFrom: 40 | secretKeyRef: 41 | key: database-password 42 | name: mongodb-users-secret 43 | - name: MONGO_DATABASE 44 | value: pacman 45 | - name: MY_MONGO_PORT 46 | value: "443" 47 | - name: MONGO_USE_SSL 48 | value: "true" 49 | - name: MONGO_VALIDATE_SSL 50 | value: "false" 51 | - name: MY_NODE_NAME 52 | valueFrom: 53 | fieldRef: 54 | fieldPath: spec.nodeName 55 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: pacman 5 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: pacman 5 | namespace: pacman 6 | spec: 7 | host: pacmanhosthere 8 | port: 9 | targetPort: 8080 10 | to: 11 | kind: Service 12 | name: pacman 13 | weight: 100 14 | status: 15 | ingress: 16 | - conditions: 17 | - status: "True" 18 | type: Admitted 19 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-route.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: pacman 5 | namespace: pacman 6 | spec: 7 | host: pacmanhosthere 8 | port: 9 | targetPort: 8080 10 | to: 11 | kind: Service 12 | name: pacman 13 | weight: 100 14 | status: 15 | ingress: 16 | - conditions: 17 | - status: "True" 18 | type: Admitted 19 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mongodb-users-secret 5 | namespace: pacman 6 | type: Opaque 7 | data: 8 | database-admin-password: Y2x5ZGU= 9 | database-name: cGFjbWFu 10 | database-password: cGlua3k= 11 | database-user: Ymxpbmt5 12 | keyfile-value: cjNkaDR0Cg== 13 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: pacman 5 | namespace: pacman 6 | -------------------------------------------------------------------------------- /lab-7-assets/base/pacman-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: pacman 6 | name: pacman 7 | namespace: pacman 8 | spec: 9 | ports: 10 | - port: 8080 11 | protocol: TCP 12 | targetPort: 8080 13 | selector: 14 | name: pacman 15 | sessionAffinity: None 16 | type: ClusterIP 17 | -------------------------------------------------------------------------------- /lab-7-assets/overlays/cluster1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../base 3 | patchesStrategicMerge: 4 | - pacman-deployment.yaml 5 | -------------------------------------------------------------------------------- /lab-7-assets/overlays/cluster1/pacman-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: pacman 6 | name: pacman 7 | namespace: pacman 8 | spec: 9 | replicas: 1 10 | -------------------------------------------------------------------------------- /lab-7-assets/overlays/cluster2/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../base 3 | patchesStrategicMerge: 4 | - pacman-deployment.yaml 5 | -------------------------------------------------------------------------------- /lab-7-assets/overlays/cluster2/pacman-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: pacman 6 | name: pacman 7 | namespace: pacman 8 | spec: 9 | replicas: 1 10 | -------------------------------------------------------------------------------- /lab-7-assets/overlays/cluster3/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../base 3 | patchesStrategicMerge: 4 | - pacman-deployment.yaml 5 | 6 | -------------------------------------------------------------------------------- /lab-7-assets/overlays/cluster3/pacman-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | name: pacman 6 | name: pacman 7 | namespace: pacman 8 | spec: 9 | replicas: 1 10 | --------------------------------------------------------------------------------