├── .gitignore ├── 3.x ├── .gitignore ├── README.md ├── SSH-GUIDE.md ├── create_ocp3_workshop.sh ├── delete_ocp3_workshop.sh ├── my_vars.yml.sample └── ocp3_vars.yml ├── 4.x ├── .gitignore ├── README.md ├── create_ocp4_workshop.sh ├── delete_ocp4_workshop.sh ├── my_vars.yml.sample └── ocp4_vars.yml ├── LICENSE ├── README.md ├── archive_deleted.yml ├── constraints.txt ├── cors ├── README.md └── cors.yaml ├── demos ├── 2019_07_12 │ ├── README.md │ └── with-pv.yaml ├── 2019_07_Hackfest │ ├── Debugging.md │ ├── Migrate.md │ ├── README.md │ ├── Setup.md │ ├── files │ │ └── mssql-scc.yaml │ └── screenshots │ │ ├── 1.png │ │ ├── 10.png │ │ ├── 11.png │ │ ├── 12.png │ │ ├── 13.png │ │ ├── 14.png │ │ ├── 15.png │ │ ├── 16.png │ │ ├── 2.png │ │ ├── 3-a.png │ │ ├── 3.png │ │ ├── 4.png │ │ ├── 5-a.png │ │ ├── 5.5.png │ │ ├── 5.png │ │ ├── 6.png │ │ ├── 7.png │ │ ├── 8.png │ │ ├── 9.png │ │ ├── Screenshot from 2019-07-26 07-09-43.png │ │ ├── Screenshot from 2019-07-26 07-48-42.png │ │ ├── dest-app.png │ │ ├── dest-project.png │ │ ├── dest-route.png │ │ ├── dest.png │ │ ├── mssql3.png │ │ ├── mw3.png │ │ ├── mw4.png │ │ ├── pv1.png │ │ └── pv2.png ├── 2019_RHTE │ ├── keynote │ │ ├── .gitignore │ │ ├── 3.x │ │ │ ├── .gitignore │ │ │ ├── my_vars.yml.sample │ │ │ └── ocp3_vars.yml │ │ ├── 4.x │ │ │ ├── .gitignore │ │ │ ├── my_vars.yml.sample │ │ │ └── ocp4_vars.yml │ │ ├── README.md │ │ ├── create_rhte_env.sh │ │ ├── delete_rhte_env.sh │ │ ├── post-install │ │ │ ├── cors.yml │ │ │ ├── include │ │ │ │ ├── common.yml │ │ │ │ ├── connection.yml │ │ │ │ ├── migcluster.yaml.j2 │ │ │ │ ├── migstorage.yml.j2 │ │ │ │ └── mssql-scc.yaml.j2 │ │ │ ├── migcluster.yml │ │ │ └── minio.yml │ │ └── secret.yml.sample │ └── labs │ │ ├── 1.md │ │ ├── 2.md │ │ ├── 3.md │ │ ├── 4.md │ │ ├── 5.md │ │ ├── 6.md │ │ ├── 7.md │ │ ├── 8.md │ │ ├── 9.md │ │ ├── README.md │ │ ├── files │ │ ├── cpma.yaml │ │ ├── mssql-scc.yaml │ │ └── sock-shop-scc.yaml │ │ ├── noobaa.md │ │ ├── screenshots │ │ ├── lab1 │ │ │ ├── lab-env-overview.png │ │ │ └── request-env-gg.png │ │ ├── lab2 │ │ │ ├── minio-bucket-creation.png │ │ │ ├── minio-mybucket.png │ │ │ ├── minio_login.png │ │ │ └── ssh-details-gg.png │ │ ├── lab3 │ │ │ └── cpma-report-html.png │ │ ├── lab4 │ │ │ ├── cam-main-screen.png │ │ │ ├── copypv.png │ │ │ ├── mig-process.png │ │ │ ├── migtooling.png │ │ │ ├── movepv.png │ │ │ ├── stage-migrate.png │ │ │ └── velero.png │ │ ├── lab5 │ │ │ ├── .DS_Store │ │ │ ├── cam-add-cluster-success.png │ │ │ ├── cam-add-cluster.png │ │ │ ├── cam-add-repo-success.png │ │ │ ├── cam-add-repo.png │ │ │ ├── cam-clusters-added.png │ │ │ ├── cam-main-screen.png │ │ │ ├── cam-mig-plan-1.png │ │ │ ├── cam-mig-plan-2.png │ │ │ ├── cam-mig-plan-3.png │ │ │ ├── cam-mig-plan-4.png │ │ │ ├── cam-mig-plan-5.png │ │ │ ├── cam-mig-plan-added.png │ │ │ ├── cam-migration-complete.png │ │ │ ├── cam-progress-bar.png │ │ │ ├── cam-quiesce.png │ │ │ ├── cam-repo-added.png │ │ │ ├── cpma-mssql-report.png │ │ │ ├── mssql-add-product.png │ │ │ ├── mssql-added-product.png │ │ │ ├── mssql-app-route.png │ │ │ ├── mssql-namespace-detail.png │ │ │ ├── mssql-persistent-app-ocp4.png │ │ │ ├── mssql-product-catalog.png │ │ │ ├── mssql-pv-yaml.png │ │ │ ├── mssql-pv.png │ │ │ ├── mssql-pvc.png │ │ │ ├── mssql-pvcs-cpma.png │ │ │ ├── mssql-sccs-cpma.png │ │ │ └── ocp-4-console.png │ │ ├── lab6 │ │ │ ├── ocp4-sock-shop-pv-yaml.png │ │ │ ├── ocp4-sock-shop-pvcs.png │ │ │ ├── ocp4-sock-shop.png │ │ │ ├── sock-shop-arch.png │ │ │ ├── sock-shop-main.png │ │ │ ├── sock-shop-mig-plan-2.png │ │ │ ├── sock-shop-mig-plan-3.png │ │ │ ├── sock-shop-mig-plan-4.png │ │ │ ├── sock-shop-mig-plan-5.png │ │ │ ├── sock-shop-mig-plan-complete.png │ │ │ ├── sock-shop-mig-plan-quiesce.png │ │ │ ├── sock-shop-mig-plan-view.png │ │ │ ├── sock-shop-mig-plan.png │ │ │ ├── sock-shop-progress.png │ │ │ ├── sock-shop-pvc-cpma.png │ │ │ ├── sock-shop-register.png │ │ │ ├── sock-shop-scc-cpma.png │ │ │ └── success.png │ │ ├── lab7 │ │ │ ├── mig-custom-resources.png │ │ │ └── mig-plan-failed.png │ │ └── noobaa │ │ │ ├── .DS_Store │ │ │ ├── noobaa-bucket-created.png │ │ │ ├── noobaa-buckets-screen.png │ │ │ ├── noobaa-create-bucket-screen.png │ │ │ └── noobaa-login-screen.png │ │ └── scripts │ │ ├── README.md │ │ ├── cors.yaml │ │ └── lab8 │ │ ├── deploy.sh │ │ ├── destroy.sh │ │ └── probe.sh └── 2020_Summit │ ├── labs │ ├── 1.md │ ├── 2.md │ ├── 3.md │ ├── 4.md │ ├── 5.md │ ├── 6.md │ ├── 7.md │ ├── 8.md │ ├── 9.md │ ├── README.md │ ├── files │ │ ├── cpma.yaml │ │ ├── mssql-scc.yaml │ │ └── sock-shop-scc.yaml │ ├── noobaa.md │ ├── screenshots │ │ ├── lab1 │ │ │ ├── lab-env-overview.png │ │ │ └── request-env-gg.png │ │ ├── lab2 │ │ │ ├── minio-bucket-creation.png │ │ │ ├── minio-mybucket.png │ │ │ ├── minio_login.png │ │ │ └── ssh-details-gg.png │ │ ├── lab3 │ │ │ └── cpma-report-html.png │ │ ├── lab4 │ │ │ ├── cam-main-screen.png │ │ │ ├── copypv.png │ │ │ ├── mig-process.png │ │ │ ├── migtooling.png │ │ │ ├── movepv.png │ │ │ ├── stage-migrate.png │ │ │ └── velero.png │ │ ├── lab5 │ │ │ ├── .DS_Store │ │ │ ├── cam-add-cluster-success.png │ │ │ ├── cam-add-cluster.png │ │ │ ├── cam-add-repo-success.png │ │ │ ├── cam-add-repo.png │ │ │ ├── cam-clusters-added.png │ │ │ ├── cam-main-screen.png │ │ │ ├── cam-mig-plan-1.png │ │ │ ├── cam-mig-plan-2.png │ │ │ ├── cam-mig-plan-3.png │ │ │ ├── cam-mig-plan-4.png │ │ │ ├── cam-mig-plan-5.png │ │ │ ├── cam-mig-plan-added.png │ │ │ ├── cam-migration-complete.png │ │ │ ├── cam-progress-bar.png │ │ │ ├── cam-quiesce.png │ │ │ ├── cam-repo-added.png │ │ │ ├── cpma-mssql-report.png │ │ │ ├── mssql-add-product.png │ │ │ ├── mssql-added-product.png │ │ │ ├── mssql-app-route.png │ │ │ ├── mssql-namespace-detail.png │ │ │ ├── mssql-persistent-app-ocp4.png │ │ │ ├── mssql-product-catalog.png │ │ │ ├── mssql-pv-yaml.png │ │ │ ├── mssql-pv.png │ │ │ ├── mssql-pvc.png │ │ │ ├── mssql-pvcs-cpma.png │ │ │ ├── mssql-sccs-cpma.png │ │ │ └── ocp-4-console.png │ │ ├── lab6 │ │ │ ├── ocp4-sock-shop-pv-yaml.png │ │ │ ├── ocp4-sock-shop-pvcs.png │ │ │ ├── ocp4-sock-shop.png │ │ │ ├── sock-shop-arch.png │ │ │ ├── sock-shop-main.png │ │ │ ├── sock-shop-mig-plan-2.png │ │ │ ├── sock-shop-mig-plan-3.png │ │ │ ├── sock-shop-mig-plan-4.png │ │ │ ├── sock-shop-mig-plan-5.png │ │ │ ├── sock-shop-mig-plan-complete.png │ │ │ ├── sock-shop-mig-plan-quiesce.png │ │ │ ├── sock-shop-mig-plan-view.png │ │ │ ├── sock-shop-mig-plan.png │ │ │ ├── sock-shop-progress.png │ │ │ ├── sock-shop-pvc-cpma.png │ │ │ ├── sock-shop-register.png │ │ │ ├── sock-shop-scc-cpma.png │ │ │ └── success.png │ │ ├── lab7 │ │ │ ├── mig-custom-resources.png │ │ │ └── mig-plan-failed.png │ │ └── noobaa │ │ │ ├── .DS_Store │ │ │ ├── noobaa-bucket-created.png │ │ │ ├── noobaa-buckets-screen.png │ │ │ ├── noobaa-create-bucket-screen.png │ │ │ └── noobaa-login-screen.png │ └── scripts │ │ ├── README.md │ │ ├── bookbag.yml │ │ ├── cors.yaml │ │ └── lab8 │ │ ├── deploy.sh │ │ ├── destroy.sh │ │ └── probe.sh │ └── tests │ ├── 3.x │ ├── .gitignore │ ├── README.md │ ├── create_ocp3_workshop.sh │ ├── delete_ocp3_workshop.sh │ ├── my_vars.yml.sample │ └── ocp3_vars.yml │ ├── 4.x │ ├── .gitignore │ ├── create_ocp4_workshop.sh │ ├── delete_ocp4_workshop.sh │ ├── my_vars.yml.sample │ └── ocp4_vars.yml │ ├── README.md │ └── post-install.yaml ├── files ├── README.md ├── bookbag-oadp.yml ├── bookbag.yml ├── lab8 │ ├── deploy.sh │ ├── destroy.sh │ └── probe.sh └── prepare_station.sh ├── requirements.txt ├── secret.ocp3.yml.sample ├── secret.ocp4.yml.sample ├── secret.yml.sample └── workloads ├── README.md ├── ansible.cfg ├── deploy_workload.sh ├── workload.yml └── workload_vars ├── ceph.yml ├── mediawiki.yml ├── migration.yml ├── minio.yml ├── mssql.yml ├── noobaa.yml ├── ocs-poc.yml ├── parks-app.yml ├── robot-shop.yml ├── rocket-chat.yml └── sock-shop.yml /.gitignore: -------------------------------------------------------------------------------- 1 | env 2 | my_vars.yml 3 | secret.yml 4 | secret.ocp3.yml 5 | secret.ocp4.yml 6 | -------------------------------------------------------------------------------- /3.x/.gitignore: -------------------------------------------------------------------------------- 1 | my_vars.yml 2 | secret.yml 3 | 4 | -------------------------------------------------------------------------------- /3.x/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | The scripts here will deploy an OpenShift 3 cluster with a Bastion host, all ssh traffic to cluster needs to use the bastion as proxy. Provisioning will likely take on order of ~70-90 minutes to complete. 3 | 4 | # Usage 5 | ## Create Cluster 6 | 1. Ensure that you have a `../secret.yml` in the parent directory 7 | 1. Ensure that you have `cp my_vars.yml.sample my_vars.yml` and you have edited 'my_vars.yml' 8 | 1. Ensure that `AGNOSTICD_HOME` environment variable is set 9 | 1. Run: `create_ocp3_workshop.sh` 10 | 1. Wait ... ~70 - 90 minutes 11 | 12 | ## Destroy Cluster 13 | 1. Ensure that you have a `../secret.yml` in the parent directory 14 | 1. Ensure that you have `cp my_vars.sample.yml my_vars.yml` and you have edited 'my_vars.yml' 15 | 1. Ensure that `AGNOSTICD_HOME` environment variable is set 16 | 1. Run: `delete_ocp3_workshop.sh` 17 | 1. Wait ... ~5 minutes 18 | * If something goes wrong and you need to do a manual deletion, you can clean up the AWS resources by finding the relevant CloudFormation template and deleting it via the AWS Management Console looking at the CloudFormation service in the correct region. 19 | 20 | # Tips 21 | 22 | ## Example: oc login 23 | 24 | $ oc login https://master.jmatthewsagn1.mg.dog8code.com:443 -u opentlc-mgr -p r3dh4t1! 25 | The server uses a certificate signed by an unknown authority. 26 | You can bypass the certificate check, but any data you send to the server could be intercepted by others. 27 | Use insecure connections? (y/n): yes 28 | 29 | # or alternative to create a new kubeconfig file to reference later 30 | export KUBECONFIG=~/.agnosticd/jmatthewsagn1/kubeconfig 31 | $ oc login https://master.jmatthewsagn1.mg.dog8code.com -u opentlc-mgr -p r3dh4t1! --config ${KUBECONFIG} 32 | 33 | 34 | 35 | ## Example: log into console 36 | 1. Look for info of the console in stdout 37 | 38 | skipping: [localhost] => (item=user.info: Openshift Master Console: https://master.jmatthewsagn1.mg.dog8code.com/console) => {"item": "user.info: Openshift Master Console: https://master.jmatthewsagn1.mg.dog8code.com/console"} 39 | 40 | * Visit: https://master.jmatthewsagn1.mg.dog8code.com 41 | 42 | * Username: opentlc-mgr 43 | * Password: r3dh4t1! 44 | * Can change admin user name with 45 | 46 | * -e 'admin_user=*some_name* 47 | 48 | 49 | ## Example: SSH into nodes 50 | 1. Find the output_dir, defined in `my_vars.yml` 51 | 1. Use the generated `*_ssh_conf` in the output directory to leverage the bastion as a proxy 52 | 53 | * Example: 54 | 55 | $ ssh -F /tmp/agnostic_jmatthewsagn1/ocp-workshop_jmatthewsagn1_ssh_conf master1 56 | 57 | 58 | You can also link the SSH config in your default SSH configuration as described in this [doc](./SSH-GUIDE.md) 59 | 60 | # Troubleshooting 61 | 62 | ## Playbook hangs on a shell command 63 | The overall installation should take ~ 50 minutes to complete. 64 | If your playbook hangs for unusually longer, find out which task it is hung on. Login to the remote host and pull ansible logs from /root/ansible.log 65 | It will most likely be the case that the task is already completed on remote host but your local playbook is waiting for completion. In that case, you can add a tag to the task on which the playbook hung and run the playbook again by skipping those tasks using `--skip-tags `. (only applicable when the task is completed without errors) 66 | More info in https://github.com/redhat-cop/agnosticd/issues/464 67 | 68 | ## Reloading master configuration 69 | From OKD 3.10 onwards, master processes are nomore managed by systemd on host. They run as pods on the master node. Run /usr/local/bin/master-restart api and /usr/local/bin/master-restart controller to restart master api and controller after updating master-config. 70 | 71 | ## Playbook exits with error `some required packages are available at higher version than requested` (atomic-openshift package in particular) 72 | Make sure that variable osrelease is set to 3.11.104. 73 | Versions < 3.11.98 are expected to throw this error 74 | 75 | ## `oc command not found` error 76 | This is because OCP did not get installed at all. 77 | Make sure you have set software_to_deploy variable to openshift 78 | -------------------------------------------------------------------------------- /3.x/SSH-GUIDE.md: -------------------------------------------------------------------------------- 1 | # SSH configuration for 3.x clusters 2 | 3 | AgnosticD exports a set of SSH configurations post cluster installation. It lets users SSH into master, worker and support nodes directly from their computer without having to SSH into the bastion host. Another advantage of using the auto-generated SSH config is that the users can use the internal DNS addresses of the nodes in order to SSH into them. The internal DNS addresses can be easily found in the output of `oc get nodes` command. 4 | 5 | ## How to setup? 6 | 7 | The auto-generated SSH config can be found in the `output_dir` specified in the `my_vars.yml` file [here](https://github.com/konveyor/mig-agnosticd/blob/cb744dda550a74a6b23c57a08733597f2aa69bb9/3.x/my_vars.yml.sample#L14). 8 | 9 | In the output directory, there are two SSH configs `ocp-workshop_${YOUR_GUID}_ssh_conf` and `ssh-config-ocp-workshop-${YOUR_GUID}`. Just include these files at the very top in your `~/.ssh/config` file : 10 | 11 | ```sh 12 | Include ${your_output_dir}/ocp-workshop_${YOUR_GUID}_ssh_conf 13 | Include ${your_output_dir}/ssh-config-ocp-workshop-${YOUR_GUID} 14 | ... 15 | ... 16 | ``` 17 | 18 | Once set up, try SSHing into your internal nodes directly from your computer. See example below : 19 | 20 | ```sh 21 | # Find the internal DNS addresses of nodes 22 | [user@localhost DataGenerator]$ oc get nodes 23 | NAME STATUS ROLES AGE VERSION 24 | infranode1.guid.internal Ready infra 36d v1.11.0+d4cacc0 25 | master1.guid.internal Ready master 36d v1.11.0+d4cacc0 26 | node1.guid.internal Ready compute 36d v1.11.0+d4cacc0 27 | node2.guid.internal Ready compute 36d v1.11.0+d4cacc0 28 | support1.guid.internal Ready compute 36d v1.11.0+d4cacc0 29 | support2.guid.internal Ready compute 36d v1.11.0+d4cacc0 30 | support3.guid.internal Ready compute 36d v1.11.0+d4cacc0 31 | 32 | # use the internal address to ssh 33 | [user@localhost DataGenerator]$ ssh node1.guid.internal 34 | Last login: Thu Jul 9 21:30:59 2020 from 192.168.0.8 35 | [ec2-user@node1 ~]$ 36 | ``` 37 | 38 | If SSH complains about bad permissions on the config files, simply run : 39 | 40 | ```sh 41 | chmod 600 ${your_output_dir}/ocp-workshop_${YOUR_GUID}_ssh_conf 42 | chmod 600 ${your_output_dir}/ssh-config-ocp-workshop-${YOUR_GUID} 43 | ``` 44 | 45 | If your SSH client doesn't allow including config files, you can just copy the contents of both the files directly in your main `~/.ssh/config` file. 46 | -------------------------------------------------------------------------------- /3.x/create_ocp3_workshop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OUR_DIR=`pwd` 4 | 5 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 6 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 7 | exit 8 | fi 9 | 10 | pushd . 11 | cd ${AGNOSTICD_HOME} 12 | ansible-playbook ${AGNOSTICD_HOME}/ansible/main.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/ocp3_vars.yml -e @${OUR_DIR}/../secret.yml -e @${OUR_DIR}/../secret.ocp3.yml "$@" 13 | rc=$? 14 | popd 15 | exit ${rc} 16 | -------------------------------------------------------------------------------- /3.x/delete_ocp3_workshop.sh: -------------------------------------------------------------------------------- 1 | OUR_DIR=`pwd` 2 | 3 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 4 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 5 | exit 6 | fi 7 | 8 | pushd . 9 | cd ${AGNOSTICD_HOME} 10 | ansible-playbook ${AGNOSTICD_HOME}/ansible/configs/ocp-workshop/destroy_env.yml ${OUR_DIR}/../archive_deleted.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/ocp3_vars.yml -e @${OUR_DIR}/../secret.yml "$@" 11 | popd 12 | -------------------------------------------------------------------------------- /3.x/my_vars.yml.sample: -------------------------------------------------------------------------------- 1 | ## Must Change 2 | email: "jmatthew@redhat.com" 3 | 4 | # The 'guid' will be used to construct a unique URL for your cluster 5 | # it will also be used to generate the output directory for all of the 6 | # files created for this cluster, ssh_conf, kubeconfig, etc. 7 | # 8 | guid: jwm0701ocp3a 9 | 10 | # Avoid using /tmp if files are automatically purged after some time 11 | # For example on MacOS, /tmp is purged after a week or so 12 | # If you lose the files for output_dir of agnosticd it can make it hard 13 | # to ssh into the provisioned hosts or clean up 14 | output_dir: "/home/REPLACE_USERNAME/.agnosticd/{{ guid }}" 15 | 16 | subdomain_base_suffix: .mg.dog8code.com 17 | HostedZoneId: Z2GE8CSGW2ZA8W 18 | 19 | 20 | key_name: libra # your private key [ must be present at ~/.ssh/.pem ] 21 | 22 | cloud_provider: ec2 23 | aws_region: us-west-2 24 | 25 | 26 | cloud_tags: # list of custom tags to add to your aws resources 27 | - owner: "{{ email }}" 28 | 29 | # if you want to install OCS with your OCP 3 30 | # environment, then set this var to true. 31 | install_glusterfs: false 32 | 33 | node_instance_count: 2 34 | -------------------------------------------------------------------------------- /3.x/ocp3_vars.yml: -------------------------------------------------------------------------------- 1 | env_type: "ocp-workshop" 2 | # Uncomment the following lines for other 3.x releases 3 | # repo_verion: "3.7" 4 | # osrelease: "3.7.119" 5 | # repo_version: "3.9" 6 | # osrelease: "3.9.99" 7 | # repo_version: "3.10" 8 | # osrelease: "3.10.181" 9 | repo_version: "3.11" 10 | osrelease: "3.11.272" 11 | software_to_deploy: "openshift" 12 | course_name: "ocp-workshop" 13 | platform: "aws" 14 | install_k8s_modules: true 15 | 16 | # multi-user configuration 17 | install_idm: htpasswd 18 | user_count: 20 19 | user_password: r3dh4t1! 20 | remove_self_provisioners: false 21 | 22 | bastion_instance_type: "t2.large" 23 | master_instance_type: "m4.large" 24 | infranode_instance_type: "m4.2xlarge" 25 | node_instance_type: "m4.xlarge" 26 | support_instance_type: "m4.large" 27 | 28 | support_instance_public_dns: true 29 | 30 | nfs_exports_config: "*(insecure,rw,no_root_squash,no_wdelay,sync)" 31 | nfs_server_address: "support1.{{ guid }}{{ subdomain_base_suffix }}" 32 | 33 | # You can set the following variable to 'true' to enable 34 | # Let's Encrypt certificates for your cluster. However, 35 | # due to weekly limits, it is discouraged to do so. 36 | # install_lets_encrypt_certificates: true 37 | # 38 | 39 | # Archive content to this location and clean output_dir to prevent re-use 40 | # of a deleted OCP cluster's files 41 | archive_dir: "{{ output_dir | dirname }}/archive" 42 | 43 | uuid: "{{ guid }}" 44 | -------------------------------------------------------------------------------- /4.x/.gitignore: -------------------------------------------------------------------------------- 1 | my_vars.yml 2 | secret.yml 3 | 4 | -------------------------------------------------------------------------------- /4.x/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | The scripts here will deploy an OpenShift 4 cluster with a Bastion host. Provisioning will likely take on order of ~60 minutes to complete. 3 | 4 | # Usage 5 | ## Create Cluster 6 | 1. Ensure that you have a `../secret.yml` in the parent directory 7 | 1. Ensure that you have `cp my_vars.sample.yml my_vars.yml` and you have edited 'my_vars.yml' 8 | 1. Ensure that `AGNOSTICD_HOME` environment variable is set 9 | 1. Run: `create_ocp4_workshop.sh` 10 | 1. Wait ... ~60 minutes 11 | 12 | ## Destroy Cluster 13 | 1. Ensure that you have a `../secret.yml` in the parent directory 14 | 1. Ensure that you have `cp my_vars.sample.yml my_vars.yml` and you have edited 'my_vars.yml' 15 | 1. Ensure that `AGNOSTICD_HOME` environment variable is set 16 | 1. Run: `delete_ocp4_workshop.sh` 17 | 1. Wait ... ~5-10 minutes 18 | * If something goes wrong, manual cleanup is difficult. The rough steps are later in this document. 19 | 20 | 21 | 22 | 23 | 24 | # Tips 25 | 26 | ## Example: Running `oc` or `kubectl` commands with provided 'KUBECONFIG', need to `export KUBECONFIG` 27 | You will find a kubeconfig file has been copied to your host in the `output_dir` specified in `my_vars.yml` 28 | 29 | * Example, my output_dir is `/tmp/agnostic_jwm0702ocp4a` and in there is a file `ocp4-workshop_jwm0702ocp4a_kubeconfig` 30 | 31 | $ export KUBECONFIG=/tmp/agnostic_jwm0702ocp4a/ocp4-workshop_jwm0702ocp4a_kubeconfig 32 | $ oc whoami 33 | system:admin 34 | 35 | ## Obtaining kubeadmin password 36 | 37 | Look in the directory configured by `output_dir` specified in `my_vars.yml`, in that directory will be a file `*_kubeadmin-password` it will contain the password for 'kubeadmin' 38 | 39 | $ cat /tmp/agnostic_jwm0702ocp4a/ocp4-workshop_jwm0702ocp4a_kubeadmin-password 40 | d34yS-vY429C-7fj2-CmEa9 41 | 42 | 43 | 44 | ## Log into the web console 45 | The console address is computed as: 46 | `https://console-openshift-console.apps.cluster-${GUID}.${guid}.${subdomain_base_suffix}/` 47 | 48 | * For example in `my_vars.yml` I have 49 | 50 | guid: jwm0702ocp4a 51 | subdomain_base_suffix: .mg.dog8code.com 52 | 53 | * So my console is at: https://console-openshift-console.apps.cluster-jwm0702ocp4a.jwm0702ocp4a.mg.dog8code.com/ 54 | 55 | * I can also determine this by looking at the routes in the `openshift-console` namespace on the cluster 56 | 57 | 58 | $ export KUBECONFIG=/tmp/agnostic_jwm0702ocp4a/ocp4-workshop_jwm0702ocp4a_kubeconfig 59 | $ oc get routes -n openshift-console 60 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 61 | console console-openshift-console.apps.cluster-jwm0702ocp4a.jwm0702ocp4a.mg.dog8code.com console https reencrypt/Redirect None 62 | downloads downloads-openshift-console.apps.cluster-jwm0702ocp4a.jwm0702ocp4a.mg.dog8code.com downloads http edge None 63 | 64 | * I log in with credentials of: 65 | 66 | * Username: 'kubeadmin' 67 | * Password: From `${output_dir}/ocp4-workshop_${guid}_kubeadmin-password` 68 | 69 | 70 | 71 | 72 | ## SSH 73 | OCP 4 environments are not typically ones you will SSH into, yet for this case the install is happening on the 'clientvm', so you may find it useful to ssh to the clientvm 74 | 75 | * To find the 'clientvm' look at your `output_dir` from `my_vars.yml`, then find the file ending in `*_ssh_conf` in that directory. 76 | 77 | * Example, mine is `/tmp/agnostic_jwm0702ocp4a/ocp4-workshop_jwm0702ocp4a_ssh_conf` 78 | 79 | $ cat /tmp/agnostic_jwm0702ocp4a/ocp4-workshop_jwm0702ocp4a_ssh_conf 80 | ##### BEGIN ADDED BASTION PROXY HOST clientvm.jwm0702ocp4a.internal ocp4-workshop-jwm0702ocp4a ###### 81 | Host clientvm.jwm0702ocp4a.internal clientvm 82 | Hostname ec2-34-209-31-72.us-west-2.compute.amazonaws.com 83 | IdentityFile ~/.ssh/libra.pem 84 | IdentitiesOnly yes 85 | User ec2-user 86 | ControlMaster auto 87 | ControlPath /tmp/jwm0702ocp4a-%r-%h-%p 88 | ControlPersist 5m 89 | StrictHostKeyChecking no 90 | ConnectTimeout 60 91 | ConnectionAttempts 10 92 | UserKnownHostsFile /tmp/agnostic_jwm0702ocp4a/ocp4-workshop_jwm0702ocp4a_ssh_known_hosts 93 | ##### END ADDED BASTION PROXY HOST clientvm.jwm0702ocp4a.internal ocp4-workshop-jwm0702ocp4a ###### 94 | * SSH into the clientvm 95 | 96 | $ ssh -i ~/.ssh/libra.pem ec2-user@ec2-34-209-31-72.us-west-2.compute.amazonaws.com 97 | [ec2-user@clientvm 0 ~]$ cd cluster-jwm0702ocp4a/ 98 | [ec2-user@clientvm 0 ~/cluster-jwm0702ocp4a]$ ls 99 | auth/ metadata.json terraform.aws.auto.tfvars terraform.tfstate terraform.tfvars tls/ 100 | $ openshift-install destroy cluster 101 | 102 | 103 | # Troubleshooting 104 | 105 | ## Manual cleanup if something goes wrong from `delete_ocp4_workshop.sh` 106 | 1. Attempt to run "openshift-install destroy cluster" from the clientvm that deployed the cluster 107 | 108 | * Follow steps above in 'Tips' to ssh into your clientvm 109 | 110 | $ ssh -i ~/.ssh/libra.pem ec2-user@ec2-34-209-31-72.us-west-2.compute.amazonaws.com 111 | 112 | [ec2-user@clientvm 0 ~]$ cd cluster-jwm0702ocp4a/ 113 | 114 | [ec2-user@clientvm 0 ~/cluster-jwm0702ocp4a]$ ls 115 | auth/ metadata.json terraform.aws.auto.tfvars terraform.tfstate terraform.tfvars tls/ 116 | 117 | $ openshift-install destroy cluster 118 | 119 | 1. After you have cleaned up the terraform provisioned resources and only after, then you can tear down the clientvm by deleting the associated CloudFormation template 120 | 121 | * Remember...the terraform state of the ocp 4 cluster is stored locally on the clientvm .... don't delete the clientvm until you have attempt to run `openshift-install destroy cluster` 122 | 123 | 124 | ## Error converting YAML to JSON. Could not find expected key ‘Install Config’ 125 | This is most likely because of a bad custom configuration file generated for cluster installation. 126 | In my case, it was because I did not put ocp4_pull_secret (pull-secret) within single inverted commas. 127 | 128 | ## Playbook hangs on `openshift-installer create` phase 129 | Make sure that the cluster creation is complete. To ensure that it is complete, login to clientvm host. There would be a directory created at /home/ec2-user/cluster-\ for your cluster installation. Tail .openshift_installer.log file in that directory. Check whether the cluster creation step succeded. If not, wait till it completes. You can also ensure whether the openshift-installer process exited. If all of this looks okay to you, then your playbook just hung on shell completion or a bad network connection. You can follow steps in this issue. 130 | 131 | ## `htpasswd` replaces the default kubeadmin user 132 | Use opentlc-mgr as user and r3dh4t1! as password 133 | -------------------------------------------------------------------------------- /4.x/create_ocp4_workshop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OUR_DIR=`pwd` 3 | 4 | OS="$(uname)" 5 | if [ "$OS" = "Darwin" ]; then 6 | # Required for MacOS with virtualenv 7 | # as per https://github.com/konveyor/mig-agnosticd/issues/182 8 | export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES 9 | fi 10 | 11 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 12 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 13 | exit 14 | fi 15 | 16 | pushd . 17 | cd ${AGNOSTICD_HOME} 18 | ansible-playbook ${AGNOSTICD_HOME}/ansible/main.yml -e ACTION="create" -e @${OUR_DIR}/ocp4_vars.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/../secret.yml -e @${OUR_DIR}/../secret.ocp4.yml "$@" 19 | rc=$? 20 | popd 21 | exit ${rc} 22 | -------------------------------------------------------------------------------- /4.x/delete_ocp4_workshop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | OUR_DIR=`pwd` 3 | 4 | OS="$(uname)" 5 | if [ "$OS" = "Darwin" ]; then 6 | # Required for MacOS with virtualenv 7 | # as per https://github.com/konveyor/mig-agnosticd/issues/182 8 | export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES 9 | fi 10 | 11 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 12 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 13 | exit 14 | fi 15 | 16 | pushd . 17 | cd ${AGNOSTICD_HOME} 18 | ansible-playbook ${AGNOSTICD_HOME}/ansible/destroy.yml ${OUR_DIR}/../archive_deleted.yml -e ACTION="destroy" -e @${OUR_DIR}/ocp4_vars.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/../secret.yml -e @${OUR_DIR}/../secret.ocp4.yml "$@" 19 | rc=$? 20 | popd 21 | exit ${rc} 22 | -------------------------------------------------------------------------------- /4.x/my_vars.yml.sample: -------------------------------------------------------------------------------- 1 | ## Must Change 2 | email: "jmatthew@redhat.com" 3 | 4 | # The 'guid' will be used to construct a unique URL for your cluster 5 | # it will also be used to generate the output directory for all of the 6 | # files created for this cluster, ssh_conf, kubeconfig, etc. 7 | # 8 | guid: jwm0701ocp4b 9 | 10 | # Avoid using /tmp if files are automatically purged after some time 11 | # For example on MacOS, /tmp is purged after a week or so 12 | # If you lose the files for output_dir of agnosticd it can make it hard 13 | # to ssh into the provisioned hosts or clean up 14 | output_dir: "/home/REPLACE_USERNAME/.agnosticd/{{ guid }}" 15 | 16 | subdomain_base_suffix: .mg.dog8code.com 17 | HostedZoneId: Z2GE8CSGW2ZA8W 18 | 19 | 20 | key_name: libra # your private key [ must be present at ~/.ssh/.pem ] 21 | cloud_provider: ec2 22 | aws_region: us-west-2 23 | 24 | 25 | cloud_tags: # list of custom tags to add to your aws resources 26 | - owner: "{{ email }}" 27 | 28 | 29 | # https://access.redhat.com/solutions/7007136 30 | # Need at least 4.11.36 or 4.12.12 31 | # ocp4_installer_version: "4.13.0" 32 | 33 | -------------------------------------------------------------------------------- /4.x/ocp4_vars.yml: -------------------------------------------------------------------------------- 1 | cloudformation_retries: 0 2 | env_type: ocp4-cluster 3 | software_to_deploy: openshift4 4 | 5 | # 6 | # Note that there was an issue in earlier 4.11 and 4.12 clusters with AWS: 7 | # https://access.redhat.com/solutions/7007136 8 | # Need to have at least: 4.11.36 or 4.12.12 9 | ocp4_installer_version: "4.12.12" 10 | osrelease: "4.6.0" 11 | 12 | update_packages: true 13 | install_ocp4: true 14 | # note konveyor projects will be expected to ship w/ FIPS compliance 15 | # https://docs.openshift.com/container-platform/4.9/installing/installing-fips.html 16 | # https://github.com/redhat-cop/agnosticd/blob/development/ansible/roles/host-ocp4-installer/templates/install-config.yaml.j2#L6 17 | # ocp4_fips_enable: true 18 | 19 | # Next settings are for Dev Preview releases of OpenShift 20 | # These will override the installer version with the direct downloads 21 | # Set ocp4_installer_use_dev_preview=True to enable the installer and client URLs 22 | # ocp4_installer_use_dev_preview: False 23 | # ocp4_installer_url: https://mirror.openshift.com/pub/openshift-v4/clients/ocp-dev-preview/4.2.0-0.nightly-2019-08-27-072819/openshift-install-linux-4.2.0-0.nightly-2019-08-27-072819.tar.gz 24 | # ocp4_client_url: https://mirror.openshift.com/pub/openshift-v4/clients/ocp-dev-preview/4.2.0-0.nightly-2019-08-27-072819/openshift-client-linux-4.2.0-0.nightly-2019-08-27-072819.tar.gz 25 | 26 | install_opentlc_integration: false 27 | install_idm: htpasswd 28 | install_ipa_client: false 29 | install_ftl: false 30 | smoke_tests: false 31 | 32 | # Set authentication passwords in secret file 33 | ocp4_workload_authentication_htpasswd_user_count: 0 34 | 35 | # We are intentionally setting the below to empty to reduce setup time 36 | # and skip deploying workloads we are not interested in. 37 | # TODO: In future we will likely place the roles we want to install for OCP Migration here. 38 | default_workloads: [] 39 | infra_workloads: [] 40 | student_workloads: [] 41 | 42 | # You can enable following workload to allow your cluster 43 | # to use "Let's Encrypt" certificates instead of self-signed 44 | # certificates. However, due to weekly rate limits, it is 45 | # discouraged to do so. 46 | # default_workloads: [ocp4-workload-enable-lets-encrypt-certificates] 47 | 48 | clientvm_instance_type: "t2.medium" 49 | clientvm_instance_count: 1 50 | master_instance_type: "m4.xlarge" 51 | master_instance_count: 3 52 | worker_instance_type: "m4.xlarge" 53 | worker_instance_count: 3 54 | 55 | # Archive content to this location and clean output_dir to prevent re-use 56 | # of a deleted OCP cluster's files 57 | archive_dir: "{{ output_dir | dirname }}/archive" 58 | 59 | uuid: "{{ guid }}" 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OCP Migration Environments 2 | 3 | This repository contains configuration files and scripts intended to deploy OCP 3.x and 4.x environments to aid in developing and testing Cluster Application Migration Tool. All workflows are leveraging https://github.com/redhat-cop/agnosticd. 4 | 5 | This repository is focused on providing the exact scripts/config files we are using so we may all work from a common configuration. The intent is that all future contributions continue to be done upstream in https://github.com/redhat-cop/agnosticd and this repo serves as the specific bash and yaml files to kick off agnosticd in the exact manner we require. 6 | 7 | Note we are only provisioning to AWS with the provided configurations. 8 | 9 | # What is offered 10 | ## AWS OCP 3.x Environment (Intended to be our Source Cluster) 11 | An OpenShift Ansible provisioned AWS multi-node environment leveraging CloudFormation for AWS infrastructure. 12 | 13 | The https://github.com/konveyor/mig-agnosticd/tree/master/3.x directory provides a means of deploying a '3.11' cluster. 14 | Additionally we will install: 15 | - Velero (our Fork): https://github.com/konveyor/velero 16 | - Velero Plugins: 17 | 18 | - https://github.com/konveyor/openshift-migration-plugin 19 | - https://github.com/konveyor/openshift-velero-plugin 20 | - Restic (our Fork): https://github.com/konveyor/restic 21 | 22 | ## AWS OCP 4.x Environment (Intended to be our Destination Cluster) 23 | An OpenShift Installer provisoned (IPI) AWS multi-node environment leveraging Terraform via the installer. 24 | 25 | The https://github.com/konveyor/mig-agnosticd/tree/master/4.x directory provides a means of deploying a '4.1' cluster. 26 | Additionally we will install: 27 | - Velero (our Fork): https://github.com/konveyor/velero 28 | - Velero Plugins: 29 | 30 | - https://github.com/konveyor/openshift-migration-plugin 31 | - https://github.com/konveyor/openshift-velero-plugin 32 | - Restic (our Fork): https://github.com/konveyor/restic 33 | - Migration CRDs/Controllers: https://github.com/konveyor/mig-controller 34 | - Migration UI: https://github.com/konveyor/mig-ui 35 | 36 | 37 | 38 | # How are the environments provisioned 39 | This repository is simply configuration files to drive https://github.com/redhat-cop/agnosticd, 'agnosticd' is a collection of Ansible configs/roles we are leveraging to deploy our OCP environments. 40 | 41 | The installation of the Velero and Migration bits are handled via roles in agnosticd which are leveraging the below operators: 42 | - https://github.com/konveyor/velero-operator 43 | - https://github.com/konveyor/mig-operator 44 | 45 | At this point in time, the operators are _not_ integrated with OLM. The intent is that agnosticd is installing our operators via regular Deployments and then creating the needed CR's to instruct Operators to install their applications. 46 | 47 | # Pre-requisites 48 | 49 | 1. Follow the [Software Requirements on workstation](https://github.com/redhat-cop/agnosticd/blob/development/docs/Preparing_your_workstation.adoc#software-required-for-deployment) from agnosticd docs 50 | 51 | 52 | * https://github.com/redhat-cop/agnosticd/blob/development/docs/Preparing_your_workstation.adoc#software-required-for-deployment 53 | 54 | 1. AWS Access, you will need AWS access according to the needs of OCP 3.x and 4.x deployments. 55 | 56 | - Admin Access is currently required for OCP 4.x 57 | - Access to a HostedZone in AWS Route53 is required, meaning you need a domain name managed by Route53 which can serve as the subdomain for your clusters 58 | 1. Checkout of https://github.com/redhat-cop/agnosticd 59 | 1. Environment Variable set of 'AGNOSTICD_HOME' pointing to your agnosticd checkout 60 | 1. Creation of secret files in the base directory of this repo, see https://github.com/konveyor/mig-agnosticd/blob/master/secret.yml.sample 61 | 62 | Intent is that you will do something like: 63 | 64 | - `cp secret.yml.sample secret.yml` 65 | - `vim secret.yml` # and update the variables as comments instruct 66 | - Additionally, based on the environment (OCP4 or OCP3) you're provisioning, you will also need to configure an additional secret specific to the OCP version: 67 | - For OCP 3, you will copy the `secret.ocp3.yml.sample` to `secret.ocp3.yml` and update the file 68 | - For OCP 4, you will copy the `secret.ocp4.yml.sample` to `secret.ocp4.yml` and update the file 69 | 70 | # Pre-provisioning Steps 71 | ``` 72 | # Clone 'agnosticd' repo, which 'mig-agnosticd' (this repo) will call into for provisioning 73 | git clone https://github.com/redhat-cop/agnosticd.git 74 | cd agnosticd 75 | export AGNOSTICD_HOME=`pwd` # Consider exporting 'AGNOSTICD_HOME' in ~/.bashrc to the full repo path for future use. 76 | cd .. 77 | 78 | # Clone 'mig-agnosticd' repo (this repo) 79 | git clone https://github.com/konveyor/mig-agnosticd.git 80 | cd mig-agnosticd 81 | cp secret.yml.sample secret.yml 82 | vim secret.yml # Update based on comments in file 83 | 84 | # Fill out required vars for provisioning OpenShift 3.x 85 | cd 3.x 86 | cp my_vars.yml.sample my_vars.yml 87 | vim my_vars.yml # Update based on comments in file 88 | cd .. 89 | 90 | # Fill out required vars for provisioning OpenShift 4.x 91 | cd 4.x 92 | cp my_vars.yml.sample my_vars.yml 93 | vim my_vars.yml # Update based on comments in file 94 | cd .. 95 | ``` 96 | 97 | # FIPS - note a fips option has been added to ocp4_vars.yml 98 | FIPS is disabled by default. 99 | ``` 100 | # ocp4_fips_enable: true 101 | ``` 102 | 103 | ## Virtualenv (optional) 104 | * Installing Virtualenv 105 | ``` 106 | python3 -m venv env 107 | ``` 108 | 109 | * Activate Virtualenv and install requirements 110 | ``` 111 | source env/bin/activate 112 | PIP_CONSTRAINT=constraints.txt pip3 install -r requirements.txt 113 | ansible-galaxy collection install amazon.aws:2.2.0 community.aws:2.1.0 114 | ``` 115 | * See [PyYAML 5.4.1: AttributeError: cython_sources #207](https://github.com/migtools/mig-agnosticd/issues/207) for why the PIP_CONSTRAINT file is being used. 116 | 117 | * To update any requirements 118 | ``` 119 | pip3 freeze > requirements.txt 120 | ``` 121 | 122 | 123 | ## Running AgnosticD to provision OpenShift 3 + 4 Clusters 124 | 125 | Before provisioning, ensure you have populated all necessary vars in: 126 | - `./secret.yml` 127 | - `./3.x/my_vars.yml` 128 | - `./4.x/my_vars.yml` 129 | 130 | **To provision an OpenShift Cluster with AgnosticD:** 131 | - 3.x cluster, see [./3.x/README.md](https://github.com/konveyor/mig-agnosticd/blob/master/3.x/README.md). 132 | - 4.x cluster, see [./4.x/README.md](https://github.com/konveyor/mig-agnosticd/blob/master/4.x/README.md). 133 | 134 | -------------------------------------------------------------------------------- /archive_deleted.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Archive files for {{ guid }}" 3 | hosts: localhost 4 | connection: local 5 | gather_facts: False 6 | become: no 7 | tasks: 8 | - name: Check for existing archive directory 9 | stat: 10 | path: "{{ archive_dir }}" 11 | register: archive_dir_details 12 | - name: Create archive directory 13 | file: 14 | path: "{{ archive_dir }}" 15 | state: directory 16 | when: not archive_dir_details.stat.exists 17 | - name: "Process {{ guid }} archive" 18 | block: 19 | - name: "Generate archive name for {{ guid }}" 20 | set_fact: 21 | guid_archive: "{{ guid }}.{{ lookup('pipe','date +%s') }}-{{ 9999 | random }}" 22 | run_once: yes 23 | - name: "Archive {{ guid }} to {{ guid_archive }}" 24 | archive: 25 | path: "{{ output_dir }}" 26 | dest: "{{ archive_dir }}/{{ guid_archive }}.tgz" 27 | format: gz 28 | remove: true 29 | rescue: 30 | - debug: 31 | msg: "Something went awry with {{ guid }} archival" 32 | -------------------------------------------------------------------------------- /constraints.txt: -------------------------------------------------------------------------------- 1 | cython < 3.0 2 | 3 | -------------------------------------------------------------------------------- /cors/README.md: -------------------------------------------------------------------------------- 1 | ## Openshift 3.x Cross Origin Resource Sharing (CORS) Settings for Mig UI 2 | 3 | This is a standalone script to apply Mig UI's [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) settings on Openshift 3.x clusters. 4 | 5 | Before running this script, please make sure that you deployed Mig UI on your 4.x cluster. It is assumed that you launched clusters with `mig-agnosticd`. 6 | 7 | This script uses the same `my_vars.yml` file you used when launching a new cluster. Make sure those files are present for both the clusters. 8 | 9 | ``` 10 | ../3.x/my_vars.yml 11 | ../4.x/my_vars.yml 12 | ``` 13 | 14 | ### Usage 15 | 16 | Run the script 17 | 18 | ``` 19 | ansible-playbook cors.yaml 20 | ``` 21 | 22 | There is no need to set any variables, the script reads `my_vars.yml` files and puts the right settings. 23 | 24 | ### Debugging CORS 25 | One method for debugging CORS is to simulate requests to the server with curl and look at the returned header. 26 | The below is an example of a script you can run to simulate a request with curl so you can see if the `access-control-allow-origin` header is accepting the mig-ui route. 27 | 28 | This script assumes you change the variables for `OCP3_SERVER` and `UI_ROUTE` before running 29 | 30 | # OCP 3 Cluster 31 | # Change this value to your OCP 3 url 32 | OCP3_SERVER=https://master.jwm0710ocp3a.mg.example.com.com 33 | 34 | # The route of mig-ui, most likely on OCP 4 cluster 35 | # Change this value to your mig-ui route 36 | UI_ROUTE=https://migration-openshift-migration.apps.cluster-jwm0710ocp4a.jwm0710ocp4a.mg.example.com.com 37 | 38 | # The namespace you have CAM installed to 39 | CAM_NAMESPACE=openshift-migration 40 | 41 | curl -v -k -X OPTIONS \ 42 | "${OCP3_SERVER}/apis/migration.openshift.io/v1alpha1/namespaces/${CAM_NAMESPACE}/migclusters" \ 43 | -H "Access-Control-Request-Method: GET" \ 44 | -H "Access-Control-Request-Headers: authorization" \ 45 | -H "Origin: ${UI_ROUTE}" 46 | 47 | After running the above you want to see output similar to the below, you are looking for the path of the mig-ui route being in `access-control-allow-origin` 48 | 49 | < HTTP/2 204 50 | < access-control-allow-credentials: true 51 | < access-control-allow-headers: Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, X-Requested-With, If-Modified-Since 52 | < access-control-allow-methods: POST, GET, OPTIONS, PUT, DELETE, PATCH 53 | < access-control-allow-origin: https://migration-openshift-migration.apps.cluster-jwm0710ocp4a.jwm0710ocp4a.mg.example.com.com 54 | < access-control-expose-headers: Date 55 | < cache-control: no-store 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /cors/cors.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - name: "Reading v4 variables" 6 | include_vars: "../4.x/my_vars.yml" 7 | 8 | - name: "Setting facts" 9 | set_fact: 10 | guid_v4: "{{ guid }}" 11 | subdomain_v4: "{{ subdomain_base_suffix }}" 12 | 13 | - name: "Reading v3 variables" 14 | include_vars: "../3.x/my_vars.yml" 15 | 16 | - name: "Setting facts" 17 | set_fact: 18 | guid_v3: "{{ guid }}" 19 | subdomain_v3: "{{ subdomain_base_suffix }}" 20 | output_dir_v3: "{{ output_dir }}" 21 | 22 | - name: "Registering host" 23 | add_host: 24 | hostname: "master.{{ guid_v3 }}{{ subdomain_v3 }}" 25 | groups: "remote" 26 | 27 | - hosts: remote 28 | vars_files: 29 | - "../3.x/my_vars.yml" 30 | vars: 31 | ansible_ssh_private_key_file: "{{ output_dir }}/{{ guid }}key" 32 | ansible_user: ec2-user 33 | tasks: 34 | - name: "Including v4 variables" 35 | include_vars: "../4.x/my_vars.yml" 36 | delegate_to: localhost 37 | 38 | - name: "Adding new CORS rules" 39 | lineinfile: 40 | insertafter: "corsAllowedOrigins:" 41 | line: "- (?i)//migration-openshift-migration\\.apps\\.cluster-{{ guid }}\\.{{ guid }}\\{{ subdomain_base_suffix }}" 42 | path: /etc/origin/master/master-config.yaml 43 | become: yes 44 | 45 | - name: "Checking if atomic-openshift services exist [1]" 46 | shell: "systemctl status atomic-openshift-master-api" 47 | register: status 48 | become: yes 49 | ignore_errors: yes 50 | 51 | - name: "Applying new configuration [atomic-openshift services]" 52 | service: 53 | name: "{{ item }}" 54 | state: restarted 55 | loop: 56 | - atomic-openshift-master-api 57 | - atomic-openshift-master-controllers 58 | become: yes 59 | when: status.rc == 0 60 | 61 | - name: "Applying new configuration [master-restart]" 62 | shell: "/usr/local/bin/master-restart {{ item }}" 63 | loop: 64 | - api 65 | - controllers 66 | when: status.rc != 0 67 | become: yes 68 | -------------------------------------------------------------------------------- /demos/2019_07_12/with-pv.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 the Heptio Ark contributors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | --- 16 | apiVersion: v1 17 | kind: Namespace 18 | metadata: 19 | name: nginx-example 20 | labels: 21 | app: nginx 22 | 23 | --- 24 | kind: PersistentVolumeClaim 25 | apiVersion: v1 26 | metadata: 27 | name: nginx-logs 28 | namespace: nginx-example 29 | labels: 30 | app: nginx 31 | spec: 32 | accessModes: 33 | - ReadWriteOnce 34 | resources: 35 | requests: 36 | storage: 50Mi 37 | 38 | --- 39 | apiVersion: apps/v1beta1 40 | kind: Deployment 41 | metadata: 42 | name: nginx-deployment 43 | namespace: nginx-example 44 | spec: 45 | replicas: 1 46 | template: 47 | metadata: 48 | labels: 49 | app: nginx 50 | spec: 51 | volumes: 52 | - name: nginx-logs 53 | persistentVolumeClaim: 54 | claimName: nginx-logs 55 | containers: 56 | - image: docker.io/twalter/openshift-nginx 57 | name: nginx 58 | ports: 59 | - containerPort: 8081 60 | volumeMounts: 61 | - mountPath: "/var/log/nginx" 62 | name: nginx-logs 63 | readOnly: false 64 | 65 | --- 66 | apiVersion: v1 67 | kind: Service 68 | metadata: 69 | labels: 70 | app: nginx 71 | name: my-nginx 72 | namespace: nginx-example 73 | spec: 74 | ports: 75 | - port: 8081 76 | targetPort: 8081 77 | selector: 78 | app: nginx 79 | type: LoadBalancer 80 | 81 | --- 82 | apiVersion: route.openshift.io/v1 83 | kind: Route 84 | metadata: 85 | name: my-nginx 86 | namespace: nginx-example 87 | labels: 88 | app: nginx 89 | service: my-nginx 90 | spec: 91 | to: 92 | kind: Service 93 | name: my-nginx 94 | port: 95 | targetPort: 8081 96 | 97 | -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/Debugging.md: -------------------------------------------------------------------------------- 1 | # Debugging Failed Migrations 2 | 3 | __PLACEHOLDER__ 4 | -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/Migrate.md: -------------------------------------------------------------------------------- 1 | # Migration Tutorial 2 | 3 | In this tutorial, we are walked through how to successfully migrate an 4 | application workload (selected via namespace) from OCP 3.x to OCP 4.x using the 5 | Application Migration Tool. 6 | 7 | First, open up the migration UI. To get the route, run the following command on 8 | the destination cluster: 9 | ```bash 10 | $ oc get routes migration -n mig -o jsonpath='{.spec.host}' 11 | migration-mig.apps.cluster-dymurray-ocp4.dymurray-ocp4.mg.example.com 12 | ``` 13 | 14 | The screen should look something like: 15 | 16 | ![1](./screenshots/1.png?raw=true "1") 17 | 18 | ## Add a cluster 19 | 20 | First thing we want to do is add the source OCP cluster we wish to migrate the 21 | application from. Click `Add cluster`: 22 | 23 | ![1](./screenshots/1.png?raw=true "1") 24 | 25 | Fill out the neccessary information. Be sure to see the [Setup 26 | Document](./Setup.md#source-cluster) to get the service account token and URL. 27 | 28 | ![3-a](./screenshots/3-a.png?raw=true "3a") 29 | 30 | When done, click `Check connection`. You should see a `Success!` message. Click 31 | `Add`. 32 | 33 | ![3](./screenshots/3.png?raw=true "2") 34 | 35 | ## Setup an AWS S3 bucket as a replication repository 36 | 37 | Next we want to add a replication repository. Click `Add Repository`: 38 | 39 | ![4](./screenshots/4.png?raw=true "4") 40 | 41 | Fill out the AWS S3 bucket credential information. 42 | 43 | ![5](./screenshots/5.png?raw=true "5") 44 | 45 | Click `Check connection` and you should see a `Success!` message appear if 46 | everything looks good. 47 | 48 | ![5.a](./screenshots/5-a.png?raw=true "5a") 49 | 50 | Click `Add` and view your repository on the main menu. Now that we have a 51 | replication repository specified and both the source and destination clusters 52 | defined, we can create a migration plan. Click `Add Plan`: 53 | 54 | ![6](./screenshots/6.png?raw=true "6") 55 | 56 | ## (Optional) View the application you wish to migrate 57 | 58 | * Mediawiki 59 | ![mw3](./screenshots/mw3.png?raw=true "mw3") 60 | 61 | * MSSQL 62 | ![mssql3](./screenshots/mssql3.png?raw=true "mssql3") 63 | 64 | ## Create a migration plan 65 | 66 | Now that we have a replication repository specified and both the source and 67 | destination clusters defined, we can create a migration plan. Click `Add Plan`: 68 | 69 | ![7](./screenshots/7.png?raw=true "7") 70 | 71 | Fill out a plan name: 72 | 73 | ![8](./screenshots/8.png?raw=true "8") 74 | 75 | Select the source cluster name: 76 | 77 | ![9](./screenshots/9.png?raw=true "9") 78 | 79 | Select the namespace(s) you wish to migrate over. This will be either 80 | `mssql-persistent` or `mediawiki`: 81 | 82 | ![10](./screenshots/10.png?raw=true "10") 83 | 84 | Now we are displayed a list of persistent volumes associated with our 85 | application workload. Select which type of action you would like to perform on 86 | the PV. For this example, let's select `copy`: 87 | 88 | ![11](./screenshots/11.png?raw=true "11") 89 | 90 | Select your migration targets. Select the previously created replication 91 | repository and leave the target cluster field as `host`. Optionally change the 92 | name of the namespace you wish to have on the destination cluster. 93 | 94 | ![12](./screenshots/12.png?raw=true "12") 95 | 96 | After validating the migration plan, you will see a `Ready` message and you can 97 | click `Close`: 98 | 99 | ![13](./screenshots/13.png?raw=true "13") 100 | 101 | ## Migrate the application workload 102 | 103 | Now we can select `Migrate` or `Stage` on the application. Since we don't care 104 | about downtime for this example, let's select `Migrate`: 105 | 106 | ![14](./screenshots/14.png?raw=true "14") 107 | 108 | Optionally choose to *not* terminate the application on the source cluster. 109 | Leave it unchecked and select `Migrate`. 110 | 111 | ![15](./screenshots/15.png?raw=true "15") 112 | 113 | Once done, you should see `Migration Succeeded` on the migration plan: 114 | 115 | ![16](./screenshots/16.png?raw=true "16") 116 | 117 | 118 | ## Verify application is functioning on destination cluster 119 | 120 | Let's first open the OCP 4.1 web console: 121 | 122 | ![console](./screenshots/dest.png?raw=true "console") 123 | 124 | Click on the `mssql-persistent` or `mediawiki` namespace: 125 | 126 | ![ns](./screenshots/dest-project.png?raw=true "ns") 127 | 128 | Click on the `mssql-app-deployment` or `mediawiki` deployment object to 129 | retrieve the route: 130 | 131 | ![route](./screenshots/dest-route.png?raw=true "route") 132 | 133 | Open the route and verify the application is functional: 134 | 135 | * MSSQL 136 | ![app](./screenshots/dest-app.png?raw=true "app") 137 | 138 | * Mediawiki 139 | ![mw4](./screenshots/mw4.png?raw=true "mw4") 140 | 141 | ## Bonus: Check out copied PV 142 | 143 | To verify the application actually copied the PV data over to a new volume, 144 | let's confirm we are no longer using an NFS volume. If everything worked as 145 | expected, our OCP 4.1 cluster will use it's default storage class (gp2) to 146 | provision an AWS EBS volume. 147 | 148 | Click on the `Storage` tab in the web console: 149 | 150 | ![pv](./screenshots/pv1.png?raw=true "pv") 151 | 152 | Click on the persistent volume and verify that it is using Amazon Elastic Block 153 | Storage as the provisioner: 154 | 155 | ![pv2](./screenshots/pv2.png?raw=true "pv2") 156 | 157 | -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/README.md: -------------------------------------------------------------------------------- 1 | # App Migration Tool Tutorial 2 | 3 | This tutorial will show you how to use the App Migration Tool to migrate 4 | application workloads from an OCP 3.x cluster to OCP 4.x. 5 | 6 | The overall steps tutorial will walk you through are: 7 | 8 | * [Setup](Setup.md) 9 | 1. Provision OCP 3.11 multi-node environment in AWS via 10 | [mig-agnosticd](https://github.com/konveyor/mig-agnosticd/3.x/) to be used as 11 | the source cluster 12 | 1. Provision OCP 4.1 multi-node environment in AWS via 13 | [mig-agnosticd](https://github.com/konveyor/mig-agnosticd/4.x/) to be used as 14 | the destination cluster 15 | 1. Deploy [migration 16 | workloads](https://github.com/konveyor/mig-agnosticd/tree/master/workloads) 17 | onto both clusters 18 | 1. Deploy [application 19 | workloads](https://github.com/konveyor/mig-agnosticd/tree/master/workloads) 20 | onto source cluster to be migrated 21 | 22 | * [Migrate with the Web UI](Migrate.md) 23 | 1. Add the OCP 3.11 cluster as a migration source 24 | 1. Add an AWS S3 bucket replication repository 25 | 1. Create a migration plan to migrate an application workload to OCP 4.1 26 | 1. Migrate the application workload 27 | 1. Verify the application is functional on OCP 4.1 28 | -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/files/mssql-scc.yaml: -------------------------------------------------------------------------------- 1 | allowHostDirVolumePlugin: false 2 | allowHostIPC: false 3 | allowHostNetwork: false 4 | allowHostPID: false 5 | allowHostPorts: false 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: true 8 | allowedCapabilities: null 9 | apiVersion: security.openshift.io/v1 10 | defaultAddCapabilities: null 11 | fsGroup: 12 | type: RunAsAny 13 | groups: [] 14 | kind: SecurityContextConstraints 15 | metadata: 16 | creationTimestamp: null 17 | name: mssql-persistent-scc 18 | selfLink: /apis/security.openshift.io/v1/securitycontextconstraints/mssql-persistent-scc 19 | priority: null 20 | readOnlyRootFilesystem: false 21 | requiredDropCapabilities: null 22 | runAsUser: 23 | type: RunAsAny 24 | seLinuxContext: 25 | type: RunAsAny 26 | supplementalGroups: 27 | type: RunAsAny 28 | users: 29 | - system:admin 30 | - system:serviceaccount:mssql-persistent:mssql-persistent-sa 31 | volumes: 32 | - awsElasticBlockStore 33 | - azureDisk 34 | - azureFile 35 | - cephFS 36 | - cinder 37 | - configMap 38 | - downwardAPI 39 | - emptyDir 40 | - fc 41 | - flexVolume 42 | - flocker 43 | - gcePersistentDisk 44 | - gitRepo 45 | - glusterfs 46 | - iscsi 47 | - nfs 48 | - persistentVolumeClaim 49 | - photonPersistentDisk 50 | - portworxVolume 51 | - projected 52 | - quobyte 53 | - rbd 54 | - scaleIO 55 | - secret 56 | - storageOS 57 | - vsphere 58 | -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/1.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/10.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/11.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/12.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/13.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/13.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/14.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/15.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/15.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/16.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/2.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/3-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/3-a.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/3.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/4.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/5-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/5-a.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/5.5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/5.5.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/5.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/6.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/7.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/8.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/9.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/Screenshot from 2019-07-26 07-09-43.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/Screenshot from 2019-07-26 07-09-43.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/Screenshot from 2019-07-26 07-48-42.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/Screenshot from 2019-07-26 07-48-42.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/dest-app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/dest-app.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/dest-project.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/dest-project.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/dest-route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/dest-route.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/dest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/dest.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/mssql3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/mssql3.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/mw3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/mw3.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/mw4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/mw4.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/pv1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/pv1.png -------------------------------------------------------------------------------- /demos/2019_07_Hackfest/screenshots/pv2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_07_Hackfest/screenshots/pv2.png -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/.gitignore: -------------------------------------------------------------------------------- 1 | *.log 2 | my_vars.yml 3 | secret.yml 4 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/3.x/.gitignore: -------------------------------------------------------------------------------- 1 | my_vars.yml 2 | secret.yml 3 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/3.x/my_vars.yml.sample: -------------------------------------------------------------------------------- 1 | ## Must Change 2 | email: "" 3 | 4 | # The 'guid' will be used to construct a unique URL for your cluster 5 | # it will also be used to generate the output directory for all of the 6 | # files created for this cluster, ssh_conf, kubeconfig, etc. 7 | guid: "" 8 | 9 | # output_dir stores deployment logs and other important information related to the deployment 10 | # Avoid using /tmp if files are automatically purged after some time 11 | # For example on MacOS, /tmp is purged after a week or so 12 | # If you lose the files for output_dir of agnosticd it can make it hard 13 | # to ssh into the provisioned hosts or clean up 14 | output_dir: "" 15 | 16 | # subdomain to use for your cluster 17 | subdomain_base_suffix: .mg.dog8code.com 18 | # Route 53 Zone ID for your subdomain 19 | HostedZoneId: Z2GE8CSGW2ZA8W 20 | 21 | # you'll be able to connect to the bastion host of your cluster 22 | # using this key. must be present at ~/.ssh/.pem 23 | key_name: libra 24 | 25 | cloud_provider: ec2 26 | # change to your preferred region 27 | aws_region: us-east-1 28 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/3.x/ocp3_vars.yml: -------------------------------------------------------------------------------- 1 | env_type: "ocp-workshop" 2 | repo_version: 3.11 3 | osrelease: 3.11.104 4 | software_to_deploy: "openshift" 5 | course_name: "ocp-workshop" 6 | platform: "aws" 7 | install_k8s_modules: true 8 | bastion_instance_type: "t2.large" 9 | master_instance_type: "m4.large" 10 | infranode_instance_type: "m4.large" 11 | node_instance_type: "m4.xlarge" 12 | support_instance_type: "m4.large" 13 | support_instance_public_dns: true 14 | nfs_exports_config: "*(insecure,rw,no_root_squash,no_wdelay,sync)" 15 | nfs_server_address: "support1.{{ guid }}{{ subdomain_base_suffix }}" 16 | cloud_tags: 17 | - owner: "{{ email }}" 18 | node_instance_count: 2 19 | mssql_private_img: true 20 | mig_operator_deploy_ui: false 21 | mig_operator_deploy_controller: false 22 | mig_operator_repo_branch: rhte2019 23 | enable_workshops_catalog: false 24 | install_ipa_client: false 25 | install_openshiftapb: false 26 | install_student_user: true 27 | lets_encrypt_production: false 28 | ovs_plugin: networkpolicy 29 | run_ocp_diagnostics: false 30 | infra_workloads: 'ocp-workload-migration,ocp-workload-mssql,ocp-workload-sock-shop,ocp-workload-robot-shop' 31 | num_users: '1' 32 | platform: 'SPP' 33 | student_name: 'lab-user' 34 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/4.x/.gitignore: -------------------------------------------------------------------------------- 1 | my_vars.yml 2 | secret.yml 3 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/4.x/my_vars.yml.sample: -------------------------------------------------------------------------------- 1 | ## Must Change 2 | email: "" 3 | 4 | # The 'guid' will be used to construct a unique URL for your cluster 5 | # it will also be used to generate the output directory for all of the 6 | # files created for this cluster, ssh_conf, kubeconfig, etc. 7 | guid: "" 8 | 9 | # output_dir stores deployment logs and other important information related to the deployment 10 | # Avoid using /tmp if files are automatically purged after some time 11 | # For example on MacOS, /tmp is purged after a week or so 12 | # If you lose the files for output_dir of agnosticd it can make it hard 13 | # to ssh into the provisioned hosts or clean up 14 | output_dir: "" 15 | 16 | # subdomain to use for your cluster 17 | subdomain_base_suffix: .mg.dog8code.com 18 | # Route 53 Zone ID for your subdomain 19 | HostedZoneId: Z2GE8CSGW2ZA8W 20 | 21 | # you'll be able to connect to the bastion host of your cluster 22 | # using this key. must be present at ~/.ssh/.pem 23 | key_name: libra 24 | 25 | cloud_provider: ec2 26 | # change to your preferred region 27 | aws_region: us-east-1 28 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/4.x/ocp4_vars.yml: -------------------------------------------------------------------------------- 1 | cloudformation_retries: 0 2 | env_type: ocp4-workshop 3 | software_to_deploy: none 4 | 5 | ocp4_installer_version: "4.1.0" 6 | osrelease: 4.1.0 7 | repo_version: '4.1' 8 | bastion_instance_type: t2.medium 9 | install_ocp4: true 10 | install_opentlc_integration: false 11 | install_idm: false 12 | install_ipa_client: false 13 | default_workloads: ["ocp-workload-migration","ocp4-workload-minio"] 14 | _minio_access_key: minio 15 | _minio_secret_key: minio123 16 | infra_workloads: [] 17 | student_workloads: [] 18 | mig_operator_repo_branch: rhte2019 19 | clientvm_instance_type: "t2.medium" 20 | clientvm_instance_count: 1 21 | master_instance_type: "m4.xlarge" 22 | master_instance_count: 3 23 | worker_instance_type: "m4.xlarge" 24 | worker_instance_count: 3 25 | _infra_node_instance_type: "m4.large" 26 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/README.md: -------------------------------------------------------------------------------- 1 | # RHTE Keynote 2 | 3 | This is a guide to deploy OCP3 and OCP4 clusters on AWS for RHTE Keynote scenes. 4 | 5 | # Before you proceed 6 | 7 | We'd need : 8 | - access to an AWS account which has permissions to create AWS resources required by both the clusters. 9 | - secret token to deploy OpenShift 4 cluster. 10 | - domain for your clusters with HostedZone created in Route 53 11 | - a private content repository. 12 | 13 | ## Requirements 14 | 15 | You need following packages installed on your host machine from where you'll launch this deployment. 16 | 17 | - Python 2.7.x (Python 3.x may not work) 18 | - Ansible 2.7.6+ 19 | - Git 20 | - awscli 21 | - python-boto3 22 | - python2-boto 23 | 24 | # Installation 25 | 26 | ## Clone AgnosticD 27 | 28 | Clone AgnosticD repository to your preferred location : 29 | 30 | ```bash 31 | git clone https://github.com/redhat-cop/agnosticd ~/ 32 | ``` 33 | 34 | Set the environment variable `AGNOSTICD_HOME` to the location of the AgnosticD repo : 35 | 36 | ```bash 37 | export AGNOSTICD_HOME=~/agnosticd/ 38 | ``` 39 | 40 | ## Prepare your secret file 41 | 42 | ### Secret Variables 43 | 44 | Copy `secret.yml.sample` to `secret.yml` : 45 | 46 | ```bash 47 | cp secret.yml.sample secret.yml 48 | ``` 49 | 50 | Fill `secret.yml` with your secret values. Make sure all variables are filled. 51 | 52 | ### OCP Cluster Variables 53 | 54 | Copy `3.x/my_vars.yml.sample` to `3.x/my_vars.yml` : 55 | 56 | ```bash 57 | cp 3.x/my_vars.yml.sample 3.x/my_vars.yml 58 | ``` 59 | 60 | Copy `4.x/my_vars.yml.sample` to `4.x/my_vars.yml`: 61 | 62 | ```bash 63 | cp 4.x/my_vars.yml.sample 4.x/my_vars.yml 64 | ``` 65 | 66 | Fill all `3.x/my_vars.yml` and `4.x/my_vars.yml` files with your values. Follow comments above each variable for more information. 67 | 68 | ## Launch the environments 69 | 70 | Once the variable files are ready, to launch a new keynote environment : 71 | 72 | ```bash 73 | ./create_rhte_env.sh 74 | ``` 75 | 76 | To destroy the keynote environment : 77 | 78 | ```bash 79 | ./delete_rhte_env.sh 80 | ``` 81 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/create_rhte_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | INFO='\033[0;33m' 4 | ERROR='\033[0;31m' 5 | SUCCESS='\033[0;32m' 6 | NC='\033[0m' 7 | 8 | info() { 9 | echo -e "${INFO}${1}${NC}" 10 | } 11 | 12 | error() { 13 | echo -e "${ERROR}${1}${NC}" 14 | } 15 | 16 | success() { 17 | echo -e "${SUCCESS}${1}${NC}" 18 | } 19 | 20 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 21 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 22 | exit 23 | fi 24 | 25 | OUR_DIR=`pwd` 26 | pushd ${OUR_DIR} &> /dev/null 27 | cd ${AGNOSTICD_HOME} 28 | 29 | echo "Creating OCP3 env..." 30 | ansible-playbook ${AGNOSTICD_HOME}/ansible/main.yml -e @${OUR_DIR}/3.x/my_vars.yml -e @${OUR_DIR}/3.x/ocp3_vars.yml -e @${OUR_DIR}/secret.yml &> ${OUR_DIR}/ocp3.log & 31 | pid_v3=$! 32 | info "Run 'tail -f ocp3.log' for deployment logs" 33 | 34 | echo "Creating OCP4 env..." 35 | ansible-playbook ${AGNOSTICD_HOME}/ansible/main.yml -e @${OUR_DIR}/4.x/my_vars.yml -e @${OUR_DIR}/4.x/ocp4_vars.yml -e @${OUR_DIR}/secret.yml &> ${OUR_DIR}/ocp4.log & 36 | pid_v4=$! 37 | info "Run 'tail -f ocp4.log' for deployment logs" 38 | popd &> /dev/null 39 | 40 | failed=false 41 | 42 | echo "Waiting for OCP deployments to complete..." 43 | if ! wait $pid_v3; then 44 | error "OCP3 deployment failed. See deployment logs in 'ocp3.log'..." 45 | info "Attempting rollback..." 46 | ansible-playbook ${AGNOSTICD_HOME}/ansible/configs/ocp-workshop/destroy_env.yml -e @${OUR_DIR}/3.x/my_vars.yml -e @${OUR_DIR}/3.x/ocp3_vars.yml -e @${OUR_DIR}/secret.yml &> ocp3.delete.log 47 | info "Rollback complete..." 48 | failed=true 49 | fi 50 | 51 | if ! wait $pid_v4; then 52 | error "OCP4 deployment failed. See deployment logs in 'ocp4.log'..." 53 | info "Attempting rollback..." 54 | ansible-playbook ${AGNOSTICD_HOME}/ansible/configs/ocp4-workshop/destroy_env.yml -e @${OUR_DIR}/4.x/my_vars.yml -e @${OUR_DIR}/4.x/ocp4_vars.yml -e @${OUR_DIR}/secret.yml &> ocp4.delete.log 55 | info "Rollback complete..." 56 | failed=true 57 | fi 58 | 59 | if [ "$failed" = true ]; then 60 | exit 1 61 | fi 62 | 63 | success "Cluster deployments succeded..." 64 | 65 | echo "Creating Minio mig storage on destination..." 66 | ansible-playbook post-install/minio.yml 67 | 68 | 69 | echo "Creating mig clusters on destination..." 70 | ansible-playbook post-install/migcluster.yml 71 | 72 | echo "Adding CORS settings on source cluster..." 73 | ansible-playbook post-install/cors.yml 74 | 75 | success "Success..." 76 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/delete_rhte_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OUR_DIR=`pwd` 4 | 5 | INFO='\033[0;32m' 6 | NC='\033[0m' 7 | 8 | info() { 9 | echo -e "${INFO}${1}${NC}" 10 | } 11 | 12 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 13 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 14 | exit 15 | fi 16 | 17 | pushd ${OUR_DIR} 18 | cd ${AGNOSTICD_HOME} 19 | 20 | option="$1" 21 | 22 | echo "Deleting OCP3 environment..." 23 | ansible-playbook ${AGNOSTICD_HOME}/ansible/configs/ocp-workshop/destroy_env.yml -e @${OUR_DIR}/3.x/my_vars.yml -e @${OUR_DIR}/3.x/ocp3_vars.yml -e @${OUR_DIR}/secret.yml &> ${OUR_DIR}/ocp3.delete.log & 24 | pid_v3=$! 25 | info "Run 'tail -f ${OUR_DIR}/ocp3.delete.log' for deletion logs" 26 | 27 | echo "Deleting OCP4 environment..." 28 | ansible-playbook ${AGNOSTICD_HOME}/ansible/configs/ocp4-workshop/destroy_env.yml -e @${OUR_DIR}/4.x/my_vars.yml -e @${OUR_DIR}/4.x/ocp4_vars.yml -e @${OUR_DIR}/secret.yml &> ${OUR_DIR}/ocp4.delete.log & 29 | pid_v4=$! 30 | info "Run 'tail -f ${OUR_DIR}/ocp4.delete.log' for deletion logs" 31 | popd 32 | 33 | failed=false 34 | 35 | echo "Waiting for deletion to complete..." 36 | if ! wait $pid_v3; then 37 | echo "OCP3 deletion failed. Please try again..." 38 | failed=true 39 | fi 40 | 41 | if ! wait $pid_v4; then 42 | echo "OCP4 deletion failed. Please try again..." 43 | failed=true 44 | fi 45 | 46 | if [ "$failed" = true ]; then 47 | exit 1 48 | fi 49 | 50 | echo "Success..." 51 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/cors.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - name: "Reading v4 variables" 6 | include_vars: "../4.x/my_vars.yml" 7 | 8 | - name: "Setting facts" 9 | set_fact: 10 | guid_v4: "{{ guid }}" 11 | subdomain_v4: "{{ subdomain_base_suffix }}" 12 | 13 | - name: "Reading v3 variables" 14 | include_vars: "../3.x/my_vars.yml" 15 | 16 | - name: "Setting facts" 17 | set_fact: 18 | guid_v3: "{{ guid }}" 19 | subdomain_v3: "{{ subdomain_base_suffix }}" 20 | output_dir_v3: "{{ output_dir }}" 21 | 22 | - name: "Registering host" 23 | add_host: 24 | hostname: "master.{{ guid_v3 }}{{ subdomain_v3 }}" 25 | groups: "remote" 26 | 27 | - hosts: remote 28 | vars_files: 29 | - "../3.x/my_vars.yml" 30 | vars: 31 | ansible_ssh_private_key_file: "{{ output_dir }}/{{ guid }}key" 32 | ansible_user: ec2-user 33 | tasks: 34 | - name: "Including v4 variables" 35 | include_vars: "../4.x/my_vars.yml" 36 | delegate_to: localhost 37 | 38 | - name: "Adding new CORS rules" 39 | lineinfile: 40 | insertafter: "corsAllowedOrigins:" 41 | line: "- (?i)//migration-mig\\.apps\\.cluster-{{ guid }}\\.{{ guid }}\\{{ subdomain_base_suffix }}" 42 | path: /etc/origin/master/master-config.yaml 43 | become: yes 44 | 45 | - name: "Checking if atomic-openshift services exist [1]" 46 | shell: "systemctl status atomic-openshift-master-api" 47 | register: status 48 | become: yes 49 | ignore_errors: yes 50 | 51 | - name: "Applying new configuration [atomic-openshift services]" 52 | service: 53 | name: "{{ item }}" 54 | state: restarted 55 | loop: 56 | - atomic-openshift-master-api 57 | - atomic-openshift-master-controllers 58 | become: yes 59 | when: status.rc == 0 60 | 61 | - name: "Applying new configuration [master-restart]" 62 | shell: "/usr/local/bin/master-restart {{ item }}" 63 | loop: 64 | - api 65 | - controller 66 | when: status.rc != 0 67 | become: yes 68 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/include/common.yml: -------------------------------------------------------------------------------- 1 | # common tasks to read cluster variables 2 | - name: "Reading cluster v{{ ocp_version }} variables..." 3 | include_vars: "{{ item }}" 4 | loop: 5 | - "../{{ ocp_version }}.x/my_vars.yml" 6 | - "../{{ ocp_version }}.x/ocp{{ ocp_version }}_vars.yml" 7 | 8 | - name: "Registering host..." 9 | add_host: 10 | groups: "{{ remote_group|d('remote') }}" 11 | hostname: "bastion.{{ guid }}{{ subdomain_base_suffix }}" 12 | 13 | - name: "Getting ssh key" 14 | set_fact: 15 | ssh_key: "~/.ssh/{{ key_name }}.pem" 16 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/include/connection.yml: -------------------------------------------------------------------------------- 1 | # set of tasks to ensure oc is logged in on bastion node 2 | - block: 3 | - name: "Checking if oc is logged in" 4 | shell: "oc status" 5 | register: oc_status 6 | ignore_errors: yes 7 | - name: "Copying local kubeconfig to bastion" 8 | copy: 9 | src: "{{ output_dir }}/{{ env_type }}_{{ guid }}_kubeconfig" 10 | dest: "/home/{{ ansible_user }}/.kube/config" 11 | when: oc_status.rc != 0 and ocp_version == '4' 12 | - name: "Trying to login to cluster" 13 | shell: "oc login -u {{ admin_user | default('opentlc-mgr') }} -p {{ admin_password | default('r3dh4t1!') }} https://master.{{ guid }}{{ subdomain_base_suffix }} --config /home/ec2-user/.kube/config --insecure-skip-tls-verify=true" 14 | when: oc_status.rc != 0 and ocp_version == '3' 15 | 16 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/include/migcluster.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Cluster 3 | apiVersion: clusterregistry.k8s.io/v1alpha1 4 | metadata: 5 | namespace: mig 6 | name: remote-cluster 7 | spec: 8 | kubernetesApiEndpoints: 9 | serverEndpoints: 10 | - clientCIDR: "0.0.0.0" 11 | # [!] Change serverAddress to point at your remote cluster login endpoint 12 | serverAddress: "https://master1.{{ guid_v3 }}{{ subdomain_v3 }}:443" 13 | --- 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: sa-token-remote 18 | namespace: mig 19 | type: Opaque 20 | data: 21 | saToken: {{ mig_sa_token|b64encode }} 22 | --- 23 | apiVersion: migration.openshift.io/v1alpha1 24 | kind: MigCluster 25 | metadata: 26 | labels: 27 | controller-tools.k8s.io: "1.0" 28 | name: migcluster-remote 29 | namespace: mig 30 | spec: 31 | isHostCluster: false 32 | clusterRef: 33 | name: remote-cluster 34 | namespace: mig 35 | serviceAccountSecretRef: 36 | name: sa-token-remote 37 | namespace: mig 38 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/include/migstorage.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | namespace: mig 6 | name: migstorage-creds 7 | type: Opaque 8 | data: 9 | aws-access-key-id: {{ _minio_access_key|b64encode }} 10 | aws-secret-access-key: {{ _minio_secret_key|b64encode }} 11 | --- 12 | apiVersion: migration.openshift.io/v1alpha1 13 | kind: MigStorage 14 | metadata: 15 | labels: 16 | controller-tools.k8s.io: "1.0" 17 | name: migstorage-sample 18 | namespace: mig 19 | spec: 20 | backupStorageProvider: aws 21 | volumeSnapshotProvider: aws 22 | 23 | backupStorageConfig: 24 | awsBucketName: {{ minio_bucket }} 25 | awsS3ForcePathStyle: true 26 | awsS3Url: {{ minio_url }} 27 | credsSecretRef: 28 | namespace: mig 29 | name: migstorage-creds 30 | 31 | volumeSnapshotConfig: 32 | awsRegion: {{ region }} 33 | credsSecretRef: 34 | namespace: mig 35 | name: migstorage-creds 36 | 37 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/include/mssql-scc.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: SecurityContextConstraints 3 | apiVersion: v1 4 | metadata: 5 | name: mssql-persistent-scc 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: true 8 | runAsUser: 9 | type: RunAsAny 10 | seLinuxContext: 11 | type: RunAsAny 12 | fsGroup: 13 | type: RunAsAny 14 | supplementalGroups: 15 | type: RunAsAny 16 | volumes: 17 | - '*' 18 | users: 19 | - system:admin 20 | - system:serviceaccount:mssql-persistent:mssql-persistent-sa 21 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/migcluster.yml: -------------------------------------------------------------------------------- 1 | # creates migcluster on destination 2 | - hosts: localhost 3 | connection: local 4 | vars: 5 | - ocp_version: "3" 6 | tasks: 7 | - include_tasks: "./include/common.yml" 8 | 9 | - hosts: remote 10 | vars_files: 11 | - "../3.x/my_vars.yml" 12 | - "../3.x/ocp3_vars.yml" 13 | vars: 14 | - ansible_ssh_private_key_file: "~/.ssh/{{ key_name }}.pem" 15 | - ansible_user: "{{ remote_user|d('ec2-user') }}" 16 | - ocp_version: "3" 17 | tasks: 18 | - include_tasks: "./include/connection.yml" 19 | 20 | - name: "Reading Mig SA token" 21 | shell: "oc sa get-token -n mig mig" 22 | register: mig_token 23 | 24 | - name: "Registering vars" 25 | set_fact: 26 | sa_token: "{{ mig_token.stdout }}" 27 | guid_v3: "{{ guid }}" 28 | subdomain_v3: "{{ subdomain_base_suffix }}" 29 | delegate_to: localhost 30 | delegate_facts: true 31 | run_once: true 32 | 33 | - hosts: localhost 34 | connection: local 35 | vars: 36 | - ocp_version: "4" 37 | - remote_group: ocp4 38 | tasks: 39 | - include_tasks: "./include/common.yml" 40 | 41 | - hosts: ocp4 42 | vars: 43 | - migcluster_name: "ocp3-cluster" 44 | - ocp_version: "4" 45 | tasks: 46 | - include_tasks: "./include/connection.yml" 47 | 48 | - tempfile: 49 | state: directory 50 | register: temp_dir 51 | 52 | - debug: 53 | msg: "{{ hostvars['localhost'] }}" 54 | 55 | - name: "Generating migcluster definition" 56 | template: 57 | src: "./include/{{ item.src }}" 58 | dest: "{{ temp_dir.path }}/{{ item.dest }}" 59 | loop: 60 | - src: migcluster.yaml.j2 61 | dest: migcluster.yaml 62 | - src: mssql-scc.yaml.j2 63 | dest: mssql-scc.yaml 64 | vars: 65 | - mig_sa_token: "{{ hostvars['localhost']['sa_token'] }}" 66 | - guid_v3: "{{ hostvars['localhost']['guid_v3'] }}" 67 | - subdomain_v3: "{{ hostvars['localhost']['subdomain_v3'] }}" 68 | 69 | - name: "Attempting to create migcluster" 70 | shell: "oc apply -f {{ item }} -n mig" 71 | loop: 72 | - "{{ temp_dir.path }}/migcluster.yaml" 73 | - "{{ temp_dir.path }}/mssql-scc.yaml" 74 | 75 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/post-install/minio.yml: -------------------------------------------------------------------------------- 1 | # creates minio bucket on destination cluster 2 | - hosts: localhost 3 | connection: local 4 | vars: 5 | - ocp_version: "4" 6 | tasks: 7 | - include_tasks: "./include/common.yml" 8 | 9 | - hosts: remote 10 | vars_files: 11 | - "../4.x/my_vars.yml" 12 | - "../4.x/ocp4_vars.yml" 13 | vars: 14 | - ansible_ssh_private_key_file: "~/.ssh/{{ key_name }}.pem" 15 | - ansible_user: "{{ remote_user|d('ec2-user') }}" 16 | - minio_bucket: "mig-bucket" 17 | tasks: 18 | - include_tasks: "./include/connection.yml" 19 | 20 | - name: "Reading minio route" 21 | shell: "oc get route minio -n gpte-minio -o go-template='{{ '{{' }} .spec.host {{ '}}' }}{{ '{{' }} println {{ '}}' }}'" 22 | register: minio_route 23 | 24 | - block: 25 | - name: "Checking if S3 bucket exists" 26 | shell: "aws s3 ls --endpoint http://{{ minio_route.stdout }} | awk '{ print $3 }'" 27 | register: s3_buckets 28 | 29 | - name: "Creating S3 bucket" 30 | shell: "aws s3 mb s3://{{ minio_bucket }} --endpoint http://{{ minio_route.stdout }}" 31 | when: minio_bucket not in s3_buckets.stdout_lines 32 | register: new_bucket 33 | retries: 5 34 | until: new_bucket.rc == 0 35 | delay: 2 36 | environment: 37 | AWS_SECRET_ACCESS_KEY: "{{ _minio_secret_key }}" 38 | AWS_ACCESS_KEY_ID: "{{ _minio_access_key }}" 39 | 40 | - name: "Creating temp dir" 41 | tempfile: 42 | state: directory 43 | register: temp_dir 44 | 45 | - name: "Generating migstorage definition" 46 | template: 47 | src: "./include/migstorage.yml.j2" 48 | dest: "{{ temp_dir.path }}/migstorage.yml" 49 | vars: 50 | - region: "{{ aws_region }}" 51 | - minio_url: "http://{{ minio_route.stdout }}" 52 | 53 | - name: "Creating migstorage" 54 | shell: "oc apply -f {{ temp_dir.path }}/migstorage.yml" 55 | -------------------------------------------------------------------------------- /demos/2019_RHTE/keynote/secret.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | aws_access_key_id: "REPLACE_WITH_ACCESS_KEY_ID" 3 | aws_secret_access_key: "REPLACE_WITH_SECRET_ACCESS_KEY" 4 | 5 | # User credentials for access.redhat.com 6 | # Used to fetch images from registry.redhat.io 7 | redhat_registry_user: "replace_with_portal_credentials" 8 | redhat_registry_password: "portal_password" 9 | 10 | # Obtain the token by going to try.openshift.com 11 | # Be sure that when you add your token in you use ' ' 12 | # The token will contain " inside of it, you need to wrap with ' to escape 13 | # Make sure your token looks like this : 14 | # ocp4_token: '{"auths":{"cloud.openshift.com":{"auth":"","email":"john@doe.com"},"quay.io":{"auth":"","email":"john@doe.com"},"registry.connect.redhat.com":{"auth":"","email":"john@doe.com"},"registry.redhat.io":{"auth":"","email":"john@doe.com"}}}' 15 | ocp4_token: '' 16 | 17 | # Below is if you have a mirror of content you want to use 18 | # Reach out to one of the developers if you need access to an internal mirror we have 19 | own_repo_path: "http://REPLACE_ME/repos/ocp/{{ osrelease }}/" 20 | 21 | 22 | # If not using, 'own_repo_path', you need to supply credentials for 23 | # subscription manager to register for yum content 24 | # Uncomment the below if you are not using 'own_repo_path' and enter credentials for subscription manager 25 | #rhel_subscription_user: "replace_with_username" 26 | #rhel_subscription_pass: "replace_with_password" 27 | #repo_method="rhn" 28 | #rhn_pool_id_string="Employee SKU" 29 | 30 | # Following variables hold private values required to launch MsSQL workloads 31 | mssql_private_img_registry: "" 32 | mssql_private_img_registry_user: "" 33 | mssql_private_img_registry_pass: "" 34 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/1.md: -------------------------------------------------------------------------------- 1 | 2 | # 1.0 Accessing the Lab Environment 3 | 4 | ## 1.1 Getting your dedicated lab environments using Guid Grabber 5 | 6 | On your laptop, open **TWO** browser tabs to [*Lab GUID Assignment page*](https://www.opentlc.com/gg/gg.cgi?profile=generic_rhte). 7 | 8 | From this page, you will be assigned your unique GUID, which you will use to access your unique lab environment and systems. 9 | 10 | For this lab you will need two different environments (v3.11 and v4.1), pay attention while selecting the *Lab Code* from the drop down list. 11 | 12 | **You are not required to login anywhere at this point, in this intro lab you will only get the information related to your assigned lab environments.** 13 | 14 | ### 1.1.1 Getting 3.11 Cluster Environment - Browser Tab 1 15 | 16 | From the drop down list: 17 | 18 | 1. Choose the lab `A0005-A - OpenShift Cluster Application Migration (CAM) Lab` 19 | 2. Enter the activation key shared by the instructors 20 | 3. Enter your email address 21 | 4. Click submit 22 | 5. Save the information on screen 23 | 24 | e.g: 25 | 26 | ![Request Env GuidGrabber](screenshots/lab1/request-env-gg.png) 27 | 28 | The resulting *Lab Information page* will display your lab's GUID and other useful information about your lab environment. 29 | Take note of your assigned GUID. 30 | 31 | You will use this GUID to access your lab's environment and systems. 32 | Your unique GUID will also be embedded in your lab environment's host names. 33 | 34 | **Be sure to note that this GUID is for your v3.11 environment.** 35 | 36 | ### 1.1.2 Getting 4.1 Cluster Environment - Browser Tab 2 37 | 38 | From the drop down list: 39 | 40 | 1. Choose the lab `A0005-B - OpenShift Cluster Application Migration (CAM) Lab` 41 | 2. Enter the activation key shared by the instructors 42 | 3. Enter your email address 43 | 4. Click submit 44 | 5. Save the information on screen 45 | 46 | **Be sure to note that this GUID is for your v4.1 environment.** 47 | 48 | ## 1.2 Lab Environment Overview 49 | 50 | ![Lab Environment Overview](screenshots/lab1/lab-env-overview.png) 51 | 52 | ### 1.2.1 SSH Hosts 53 | 54 | | Hostname | User | Password | Description | 55 | | ---- | ---- | ---- | ---- | 56 | |`bastion..domain` |lab-user | r3dh4t1!| bastion host for 3.11 environment | 57 | |`bastion..domain` |lab-user | r3dh4t1! | client-vm for 4.1 environment | 58 | 59 | Next Lab: [Lab 2 - Prerequisites and Setup](./2.md)
60 | [Home](./README.md) 61 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/4.md: -------------------------------------------------------------------------------- 1 | # 4.0 Cluster Application Migration Tool (CAM) 2 | 3 | CAM is designed to migrate Application Workloads between OpenShift clusters. Specifically, CAM handles migration of k8s resource objects, persistent volumes, and internal images. CAM is designed to provide a migration experience while focusing on minimizing application downtime through the process. 4 | 5 | ## 4.1 Upstream Projects 6 | 7 | CAM leverages two upstream projects: [Velero](https://github.com/heptio/velero) and [Restic](https://restic.net/). Velero (formerly Heptio Ark) gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Restic is a backup program that is fast, efficient and secure. 8 | 9 | ![Velero Logo](./screenshots/lab4/velero.png) 10 | 11 | ## 4.2 Architecture 12 | 13 | CAM is implemented as a native k8s API extension through a custom resource definition. CAM orchestrates usage of Velero for performing backup/restores. OpenShift specific functionality is implemented in a series of Velero plugins. CAM is also equipped with a React/Patternfly 4 web UI to provide simple interactive experience. 14 | 15 | #### IMPORTANT 16 | 17 | > * **Migration is at scope of a Namespace**. Future versions will allow selecting resources inside of a Namespace 18 | 19 | >* **Cluster Scoped Resources are not handled**. Cluster Role Bindings, SCCs, etc are not handled with migration. 20 | 21 | > * **‘cluster-admin’ role required for initial release targeting OCP 4.2**. 22 | 23 | ## 4.3 Persistent Volume Handling 24 | 25 | CAM provides two methods for migrating persistent volumes: (1) **Move** and (2) **Copy**. 26 | 27 | 1. Move or "swinging" the PV recreates the PVC/PV definitions from source cluster to destination cluster. This option is highly desirable for environments with shared storage between the source and target clusters (i.e. NFS). 28 | 29 | ![Move PV Diagram](./screenshots/lab4/movepv.png) 30 | 31 | 2. Copy creates a copy of the data from source cluster into the destination cluster. This option involves creating a PVC on destination and allowing cluster to find a PV to bind to the claim. We then copy data into the PV. 32 | 33 | ![Copy PV Diagram](./screenshots/lab4/copypv.png) 34 | 35 | ## 4.4 Actions (Stage and Migrate) 36 | 37 | CAM introduces two actions on a Migration Plan: 38 | 39 | * **Stage** - Seeds data while leaving application up and running. 40 | 41 | * **Migrate** - Quiesces the application and migrates deltas from stage runs. 42 | 43 | ![Migration Actions](./screenshots/lab4/stage-migrate.png) 44 | 45 | #### IMPORTANT 46 | 47 | > * Stage can be run multiple times on a Migration Plan 48 | 49 | > * Migrate can only be run once. 50 | 51 | ## 4.5 Migration Process 52 | 53 | ![Migration Process](./screenshots/lab4/mig-process.png) 54 | 55 | There are 3 steps to the migration process within the CAM tool: 56 | 57 | 1. **Plan** 58 | * Select source cluster 59 | * Select namespaces 60 | * Choose Copy or Move for each Persistent volume 61 | * Specify intermediate object storage 62 | * Select destination cluster 63 | 64 | 65 | 2. **Stage** 66 | * Stages the data from source to destination cluster 67 | * May be run multiple times 68 | * No downtime during this step for source applications 69 | 70 | 71 | 3. **Migrate** 72 | * Quiesce the application 73 | * Migrate any delta state since last stage run 74 | 75 | ## 4.6 WebUI 76 | 77 | Let's bring up the webUI in preparation for our first Application Migration in Lab 5. 78 | 79 | If you don't remember the route from Lab 2, let's grab it again from our 4.1 environment. 80 | 81 | 1. Log into the 4.1 environment 82 | ```bash 83 | $ oc login https://api.cluster-GUID.GUID.DOMAIN:6443 -u admin -p r3dh4t1! 84 | The server uses a certificate signed by an unknown authority. 85 | You can bypass the certificate check, but any data you send to the server could be intercepted by others. 86 | Use insecure connections? (y/n): y 87 | ``` 88 | 2. Determine the route of `mig-ui`, we will use this later to enable a CORS header on the OCP 3.x side. 89 | ```bash 90 | $ oc get routes migration -n mig -o jsonpath='{.spec.host}' 91 | migration-mig.apps.cluster-a21d.a21d.sandbox67.opentlc.com 92 | ``` 93 | 94 | 3. Open Browser to https://migration-mig.apps.cluster-GUID.GUID.DOMAIN 95 | 96 | ![CAM Main Screen](./screenshots/lab4/cam-main-screen.png) 97 | 98 | We are now ready to perform our first Application Migration. 99 | 100 | Next Lab: [Lab 5 - Migrate MSSQL Application](./5.md)
101 | Previous Lab: [Lab 3 - CPMA Overview](./3.md)
102 | [Home](./README.md) 103 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/6.md: -------------------------------------------------------------------------------- 1 | # 6.0 Migrate Sock-Shop Application 2 | 3 | In this lab we are going to migrate Weaveworks' Production Internet Store, the Sock Shop. This store has lots of traffic daily, and Weaveworks is very concerned about minimizing any downtime/unavailability of the store. The Sock Shop is running on your 3.11 cluster, and Weaveworks is very interested in many of the new features of the OpenShift 4.x platform, and wants to migrate. 4 | 5 | ## 6.1 Application Architecture 6 | 7 | The architecture of the Sock Shop application was intentionally designed to provide as many micro-services as possible. As seen in the image below, the micro-services are roughly defined by the function in an ECommerce site. Networks are specified, but due to technology limitations may not be implemented in some deployments. All services communicate using REST over HTTP. 8 | 9 | ![Sock Shop Architecture](./screenshots/lab6/sock-shop-arch.png) 10 | 11 | If we login to our 3.11 cluster, we can see the app running: 12 | 13 | ```bash 14 | $ oc get pods -n sock-shop 15 | NAME READY STATUS RESTARTS AGE 16 | carts-77555f7648-sdx48 1/1 Running 0 3h 17 | carts-db-74db84c448-lv8sk 1/1 Running 0 3h 18 | catalogue-b5fc87544-cvhb8 1/1 Running 0 3h 19 | catalogue-db-1-c2f4w 1/1 Running 0 3h 20 | front-end-5c49687b5c-qgzkj 1/1 Running 0 3h 21 | orders-56b86d7dd7-gsnxh 1/1 Running 0 3h 22 | orders-db-7645cb4d78-pmg8s 1/1 Running 0 3h 23 | payment-685fdbcf67-4kgzn 1/1 Running 0 3h 24 | queue-master-58bcb789cd-thq9v 1/1 Running 0 3h 25 | rabbitmq-798d7b5976-7mgdl 2/2 Running 0 3h 26 | session-db-7cc8ddc4cc-pxvmw 1/1 Running 0 3h 27 | shipping-5ccdd4b459-dsvxf 1/1 Running 0 3h 28 | user-5648777687-2zkgs 1/1 Running 0 3h 29 | user-db-b655656b7-48qzs 1/1 Running 0 3h 30 | ``` 31 | 32 | Let's get the route to the application, and bring up the webUI. 33 | 34 | ```bash 35 | $ oc get route -n sock-shop 36 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 37 | front-end front-end-sock-shop.apps.da30.events.opentlc.com / front-end 8079 None 38 | ``` 39 | 40 | ![Sock Shop Main Page](./screenshots/lab6/sock-shop-main.png) 41 | 42 | ## 6.2 Migration planning 43 | 44 | Again, the first step is to check out the CPMA generated report that we created and downloaded in Lab 3. 45 | 46 | 1. Point your browser to `File:///tmp/cpma/report.html`. 47 | 48 | 2. Open the `Cluster report` section of the report and click on `Namespaces`. You'll see this section contains lots of detailed information by section. Let's focus our attention in two areas: PVCs and SCCs. 49 | 50 | 3. Click to open the `PVCs` section. You'll see that the Sock Shop application has 4 PVCs defined. Each of these are 10Gig volumes backed by NFS. We will need to account for all 4 of these in our Migration Plan. 51 | 52 | ![Sock Shop PVC CPMA](./screenshots/lab6/sock-shop-pvc-cpma.png) 53 | 54 | 4. Click to open the `SCCs` section. You'll see that the Sock Shop application makes use of a custom Security Context Constraint. Again, we will need to apply this to our 4.1 cluster in preparation for migration. Let's do this now. 55 | 56 | ![Sock Shop PVC CPMA](./screenshots/lab6/sock-shop-scc-cpma.png) 57 | 58 | ### 6.2.1 Create Sock Shop Security Context Constraint 59 | 60 | The custom SCC yaml is available [here](./files/sock-shop-scc.yaml)(right-click Raw, Save Link As if using local machine). This file is also located on the bastion if desired to create the SCC from there. 61 | 62 | 1. Run the following to recreate Sock Shop's `scc` on the destination 4.1 cluster: 63 | ```bash 64 | $ oc create -f sock-shop-scc.yaml 65 | securitycontextconstraints.security.openshift.io/sock-shop created 66 | ``` 67 | 68 | ## 6.3 Using CAM 69 | 70 | Next, let's open up the migration UI. Again, to get the route, run the following command on the destination 4.1 cluster: 71 | ```bash 72 | $ oc get routes migration -n mig -o jsonpath='{.spec.host}' 73 | migration-mig.apps.cluster-a21d.a21d.sandbox67.opentlc.com 74 | ``` 75 | 76 | Since we already have our source cluster, target cluster, & replication repository defined; we can move right to creating a migration plan. Click `Add Plan`: 77 | 78 | ![Sock Shop Mig Plan](./screenshots/lab6/sock-shop-mig-plan.png) 79 | 80 | Fill out a plan name. Click Next. 81 | 82 | ![Sock Shop Plan 2](./screenshots/lab6/sock-shop-mig-plan-2.png) 83 | 84 | Select the source and target cluster, the replication repository, and the `sock-shop` namespace (which we want to migrate over). Click Next. 85 | 86 | ![Sock Shop Mig Plan 3](./screenshots/lab6/sock-shop-mig-plan-3.png) 87 | 88 | Now we are displayed the list of persistent volumes associated with our application workload. We should see the four volumes as listed in the CPMA report. Select which type of action you would like to perform on each PV. Since, minimizing downtime is very important in this example, and both of our clusters have access to the NFS shared storage, let's select `move` for each PV. ***Move will re-map the PVs from the source cluster to target cluster, so it's the fastest option for handling state.*** Click Next. 89 | 90 | | WARNING: If you don't see any of the PV listed in the CPMA report, try refreshing the browser tab! | 91 | | --- | 92 | 93 | ![Sock Shop Mig Plan 4](./screenshots/lab6/sock-shop-mig-plan-4.png) 94 | 95 | Since all our PVs are being `moved`, so destination storage classes need to be specified. Click Next. 96 | 97 | ![Sock Shop Mig Plan 5](./screenshots/lab6/sock-shop-mig-plan-5.png) 98 | 99 | After validating the migration plan, you will see a `Ready` message and you can click `Close`. 100 | 101 | 102 | 103 | ### 6.3.1 Migrate the Application Workload 104 | 105 | Now we can select `Migrate` or `Stage` on the application. Since we have chosen to `move` our four PVs, we will click on `Migrate`. *Stage will skip any PVs not using copy.* 106 | 107 | ![Sock Shop Mig Plan View](./screenshots/lab6/sock-shop-mig-plan-view.png) 108 | 109 | Optionally choose to *not* terminate the application on the source cluster. 110 | Leave it unchecked and select `Migrate`. 111 | 112 | ![Sock Shop Quiesce](./screenshots/lab6/sock-shop-mig-plan-quiesce.png) 113 | 114 | The migration will progress with a progress bar showing each step in the process. 115 | 116 | ![Sock Shop Progress Bar](./screenshots/lab6/sock-shop-progress.png) 117 | 118 | Once done, you should see `Migration Succeeded` on the migration plan. 119 | 120 | ![Sock Shop Migration Complete](./screenshots/lab6/sock-shop-mig-plan-complete.png) 121 | 122 | 123 | ### 6.3.2 Verify application is functioning on Destination Cluster 124 | 125 | Let's first open the OCP 4.1 web console and open the `sock-shop` namespace. 126 | 127 | ![console](./screenshots/lab6/ocp4-sock-shop.png) 128 | 129 | Click on the Storage > PVCs to see the persistent volume claims for the application. 130 | 131 | ![route](./screenshots/lab6/ocp4-sock-shop-pvcs.png) 132 | 133 | Drilling down into one of the PVs, we can verify that the underlying storage is the NFS server instance running on our OCP3 cluster. This verifies that our PVs were indeed remapped via `move`. 134 | 135 | ![app](./screenshots/lab6/ocp4-sock-shop-pv-yaml.png) 136 | 137 | ![Success](./screenshots/lab6/success.png) 138 | **Great job**, you have now successfully migrated two applications to your target cluster! 139 | 140 | However, what happens when things don't go as planned? In the next lab, we will examine some techniques for debugging failed migrations. 141 | 142 | Next Lab: [Lab 7 - Debugging Failed Migrations](./7.md)
143 | Previous Lab: [Lab 5 - Migrate MSSQL Application](./5.md)
144 | [Home](./README.md) 145 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/9.md: -------------------------------------------------------------------------------- 1 | # 9.0 Wrap Up 2 | 3 | **Congratulations** and **Thank You** for participating in the OpenShift Application Migration Lab. We hope you have found it to be both informative and thorough. By now you have completed the following activities: 4 | 5 | * Familiarized yourself with the CAM WebUI 6 | * Leveraged CPMA to generate a cluster report 7 | * Migrated multiple applications (stateless and stateful) from OpenShift 3 to OpenShift 4 8 | * Performed both Copy and Move operations for handling Persistent Volumes 9 | * Leveraged the API to migrate multiple namespaces in a single Migration Plan 10 | 11 | We've included a couple of additional applications in the 3.11 cluster deployment (Parks App & Robot Shop). Feel Free to perform migrations of these applications too (time permitting). 12 | 13 | ## 9.1 More Information 14 | 15 | | Type | Link | 16 | | ---- | ---- | 17 | |Source Code| https://github.com/konveyor/mig-operator | 18 | |Source Code | https://github.com/konveyor/mig-ui | 19 | |Source Code | https://github.com/konveyor/mig-controller | 20 | |Source Code | https://github.com/konveyor/mig-demo-apps | 21 | | YouTube Channel | https://www.youtube.com/channel/UCBDU5UK5Okg3mlIMygpkbNA?view_as=subscriber | 22 | 23 | 24 | Previous Lab: [Lab 8 - Migration at Scale via API (optional)](./8.md)
25 | [Home](./README.md) 26 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/README.md: -------------------------------------------------------------------------------- 1 | # RHTE 2019: OpenShift Cluster Application Migration Lab 2 | 3 | This guide will provide you with hands-on exposure to the new OpenShift Migration Tooling through a series of labs migrating application workloads (both stateful and stateless) between OpenShift Clusters. 4 | 5 | ### Goals 6 | 7 | * Migrate several application workloads (both stateful and stateless) from an OpenShift 3.11 cluster to an OpenShift 4.1 cluster using the Cluster Application Migration Tool (CAM) 8 | * Utilize the Control Plane Migration Assistance Tool (CPMA) to generate a detailed report identifying areas of custom configuration that need to be considered as part of application migration planning. 9 | 10 | ### Labs 11 | 12 | [Lab 1 - Introduction](./1.md)
13 | [Lab 2 - Prerequisites and Setup](./2.md)
14 | [Lab 3 - CPMA Overview](./3.md)
15 | [Lab 4 - CAM Overview](./4.md)
16 | [Lab 5 - Migrate MSSQL Application](./5.md)
17 | [Lab 6 - Migrate Sock Shop Application](./6.md)
18 | [Lab 7 - Debugging Failed Migrations](./7.md)
19 | [Lab 8 - Migration at Scale via API (optional)](./8.md)
20 | [Lab 9 - Wrap Up](./9.md)
21 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/files/cpma.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - name: "Reading v4 variables" 6 | include_vars: "../4.x/my_vars.yml" 7 | 8 | - name: "Setting facts" 9 | set_fact: 10 | guid_v4: "{{ guid }}" 11 | subdomain_v4: "{{ subdomain_base_suffix }}" 12 | 13 | - name: "Reading v3 variables" 14 | include_vars: "../3.x/my_vars.yml" 15 | 16 | - name: "Setting facts" 17 | set_fact: 18 | guid_v3: "{{ guid }}" 19 | subdomain_v3: "{{ subdomain_base_suffix }}" 20 | output_dir_v3: "{{ output_dir }}" 21 | 22 | - name: "Registering host" 23 | add_host: 24 | hostname: "bastion.{{ guid_v3 }}{{ subdomain_v3 }}" 25 | groups: "remote" 26 | 27 | - hosts: remote 28 | vars_files: 29 | - "../3.x/my_vars.yml" 30 | vars: 31 | ansible_ssh_private_key_file: "{{ output_dir }}/{{ guid }}key" 32 | ansible_user: ec2-user 33 | tasks: 34 | - name: "Including v4 variables" 35 | include_vars: "../4.x/my_vars.yml" 36 | delegate_to: localhost 37 | 38 | - name: Copy ssh key to bastion host 39 | copy: 40 | src: ~/.ssh/libra.pem 41 | dest: /home/ec2-user/.ssh/libra.pem 42 | owner: ec2-user 43 | group: ec2-user 44 | mode: '0600' 45 | 46 | - name: Create directory if it does not exist 47 | file: 48 | path: ~/cpma/bin 49 | state: directory 50 | mode: '0755' 51 | 52 | - name: Download cpma binary 53 | get_url: 54 | url: https://cpma.s3.us-east-2.amazonaws.com/cpma 55 | dest: ~/cpma/bin/cpma 56 | mode: 'u+rwx' 57 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/files/mssql-scc.yaml: -------------------------------------------------------------------------------- 1 | allowHostDirVolumePlugin: false 2 | allowHostIPC: false 3 | allowHostNetwork: false 4 | allowHostPID: false 5 | allowHostPorts: false 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: true 8 | allowedCapabilities: null 9 | apiVersion: security.openshift.io/v1 10 | defaultAddCapabilities: null 11 | fsGroup: 12 | type: RunAsAny 13 | groups: [] 14 | kind: SecurityContextConstraints 15 | metadata: 16 | creationTimestamp: null 17 | name: mssql-persistent-scc 18 | selfLink: /apis/security.openshift.io/v1/securitycontextconstraints/mssql-persistent-scc 19 | priority: null 20 | readOnlyRootFilesystem: false 21 | requiredDropCapabilities: null 22 | runAsUser: 23 | type: RunAsAny 24 | seLinuxContext: 25 | type: RunAsAny 26 | supplementalGroups: 27 | type: RunAsAny 28 | users: 29 | - system:admin 30 | - system:serviceaccount:mssql-persistent:mssql-persistent-sa 31 | volumes: 32 | - awsElasticBlockStore 33 | - azureDisk 34 | - azureFile 35 | - cephFS 36 | - cinder 37 | - configMap 38 | - downwardAPI 39 | - emptyDir 40 | - fc 41 | - flexVolume 42 | - flocker 43 | - gcePersistentDisk 44 | - gitRepo 45 | - glusterfs 46 | - iscsi 47 | - nfs 48 | - persistentVolumeClaim 49 | - photonPersistentDisk 50 | - portworxVolume 51 | - projected 52 | - quobyte 53 | - rbd 54 | - scaleIO 55 | - secret 56 | - storageOS 57 | - vsphere 58 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/files/sock-shop-scc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: SecurityContextConstraints 3 | apiVersion: security.openshift.io/v1 4 | metadata: 5 | name: sock-shop 6 | readOnlyRootFilesystem: true 7 | allowPrivilegedContainer: true 8 | runAsUser: 9 | type: RunAsAny 10 | seLinuxContext: 11 | type: RunAsAny 12 | supplementalGroups: 13 | type: RunAsAny 14 | volumes: 15 | - '*' 16 | users: 17 | - system:admin 18 | - system:serviceaccount:sock-shop:sock-shop 19 | requiredDropCapabilities: 20 | - all 21 | defaultAddCapabilities: 22 | - CHOWN 23 | - SETGID 24 | - SETUID 25 | - NET_BIND_SERVICE 26 | - DAC_OVERRIDE 27 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab1/lab-env-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab1/lab-env-overview.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab1/request-env-gg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab1/request-env-gg.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab2/minio-bucket-creation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab2/minio-bucket-creation.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab2/minio-mybucket.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab2/minio-mybucket.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab2/minio_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab2/minio_login.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab2/ssh-details-gg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab2/ssh-details-gg.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab3/cpma-report-html.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab3/cpma-report-html.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/cam-main-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/cam-main-screen.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/copypv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/copypv.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/mig-process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/mig-process.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/migtooling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/migtooling.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/movepv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/movepv.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/stage-migrate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/stage-migrate.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab4/velero.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab4/velero.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/.DS_Store -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-add-cluster-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-add-cluster-success.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-add-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-add-cluster.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-add-repo-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-add-repo-success.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-add-repo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-add-repo.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-clusters-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-clusters-added.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-main-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-main-screen.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-1.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-2.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-3.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-4.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-5.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-mig-plan-added.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-migration-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-migration-complete.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-progress-bar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-progress-bar.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-quiesce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-quiesce.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cam-repo-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cam-repo-added.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/cpma-mssql-report.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/cpma-mssql-report.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-add-product.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-add-product.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-added-product.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-added-product.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-app-route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-app-route.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-namespace-detail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-namespace-detail.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-persistent-app-ocp4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-persistent-app-ocp4.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-product-catalog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-product-catalog.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-pv-yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-pv-yaml.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-pv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-pv.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-pvc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-pvc.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-pvcs-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-pvcs-cpma.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/mssql-sccs-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/mssql-sccs-cpma.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab5/ocp-4-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab5/ocp-4-console.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/ocp4-sock-shop-pv-yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/ocp4-sock-shop-pv-yaml.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/ocp4-sock-shop-pvcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/ocp4-sock-shop-pvcs.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/ocp4-sock-shop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/ocp4-sock-shop.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-arch.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-main.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-2.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-3.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-4.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-5.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-complete.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-quiesce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-quiesce.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan-view.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-mig-plan.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-progress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-progress.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-pvc-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-pvc-cpma.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-register.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-register.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/sock-shop-scc-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/sock-shop-scc-cpma.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab6/success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab6/success.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab7/mig-custom-resources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab7/mig-custom-resources.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/lab7/mig-plan-failed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/lab7/mig-plan-failed.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/noobaa/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/noobaa/.DS_Store -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/noobaa/noobaa-bucket-created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/noobaa/noobaa-bucket-created.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/noobaa/noobaa-buckets-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/noobaa/noobaa-buckets-screen.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/noobaa/noobaa-create-bucket-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/noobaa/noobaa-create-bucket-screen.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/screenshots/noobaa/noobaa-login-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2019_RHTE/labs/screenshots/noobaa/noobaa-login-screen.png -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/scripts/README.md: -------------------------------------------------------------------------------- 1 | # Scripts for Migration Demos in RHTE 2019 Labs 2 | 3 | ## Applying CORS Settings 4 | 5 | Login to the bastion host using the information available in your lab console : 6 | 7 | ```bash 8 | ssh @bastion.. 9 | ``` 10 | 11 | The `cors.yaml` playbook is available in the home directory on bastion host. 12 | 13 | To apply CORS settings on your OCP3 cluster : 14 | 15 | ```bash 16 | GUID_4= DOMAIN= ansible-playbook cors.yaml 17 | ``` 18 | 19 | Example usage : 20 | 21 | ```bash 22 | GUID_4=09kt DOMAIN=events.opentlc.com ansible-playbook cors.yaml 23 | ``` 24 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/scripts/cors.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - name: "Fixing permissions of the key file" 6 | file: 7 | path: "/home/{{ lookup('env', 'USER') }}/.ssh/openshift_key" 8 | owner: "{{ lookup('env', 'USER') }}" 9 | mode: "0600" 10 | become: yes 11 | 12 | - hosts: masters 13 | vars: 14 | ansible_user: ec2-user 15 | ansible_ssh_private_key_file: "/home/{{ lookup('env', 'USER') }}/.ssh/openshift_key" 16 | guid_v4: "{{ lookup('env', 'GUID_4') }}" 17 | subdomain: "{{ lookup('env', 'DOMAIN') |d('events.opentlc.com') }}" 18 | tasks: 19 | - block: 20 | - name: "Adding new CORS rules" 21 | lineinfile: 22 | insertafter: "corsAllowedOrigins:" 23 | line: "- (?i)//migration-mig\\.apps\\.cluster-{{ guid_v4 }}\\.{{ guid_v4 }}\\.{{ subdomain }}" 24 | path: /etc/origin/master/master-config.yaml 25 | become: yes 26 | 27 | - name: "Checking if atomic-openshift services exist" 28 | shell: "systemctl status atomic-openshift-master-api" 29 | register: status 30 | become: yes 31 | ignore_errors: yes 32 | 33 | - name: "Applying new configuration [atomic-openshift services]" 34 | service: 35 | name: "{{ item }}" 36 | state: restarted 37 | loop: 38 | - atomic-openshift-master-api 39 | - atomic-openshift-master-controllers 40 | become: yes 41 | when: status.rc == 0 42 | 43 | - name: "Applying new configuration [master-restart]" 44 | shell: "/usr/local/bin/master-restart {{ item }}" 45 | loop: 46 | - api 47 | - controller 48 | when: status.rc != 0 49 | become: yes 50 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/scripts/lab8/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deploys "Hello, OpenShift" app to random X number of namespaces 4 | 5 | echo "Number of namespaces? "; read x 6 | 7 | echo $x > .ns 8 | 9 | ns_prefix="hello-openshift-" 10 | app_manifest=https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json 11 | 12 | for i in $(seq 1 $x); do 13 | oc create namespace "$ns_prefix""$i" 14 | oc apply -f $app_manifest -n "$ns_prefix""$i" 15 | oc expose pod hello-openshift -n "$ns_prefix""$i" 16 | oc expose svc hello-openshift -n "$ns_prefix""$i" 17 | done 18 | 19 | echo "Finding routes..." 20 | 21 | for i in $(seq 1 $x); do 22 | oc get route hello-openshift -n "$ns_prefix""$i" -o go-template='{{ .spec.host }}{{ println }}' 23 | done 24 | 25 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/scripts/lab8/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deletes "Hello, OpenShift" app previously deployed to X namespaces using deploy.sh 4 | 5 | x=$(cat .ns) 6 | 7 | ns_prefix="hello-openshift-" 8 | app_manifest=https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json 9 | 10 | for i in $(seq 1 $x); do 11 | oc delete project "$ns_prefix""$i" 12 | done 13 | 14 | echo "Done..." 15 | -------------------------------------------------------------------------------- /demos/2019_RHTE/labs/scripts/lab8/probe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # wgets routes created in previously deployed "Hello,OpenShift" app 4 | 5 | GREEN='\033[0;33m' 6 | NC='\033[0m' 7 | 8 | x=$(cat .ns) 9 | 10 | ns_prefix="hello-openshift-" 11 | 12 | for i in $(seq 1 $x); do 13 | echo -e "${GREEN}Probing app in namespace ""$ns_prefix""$i""${NC}" 14 | route=$(oc get route hello-openshift -n "$ns_prefix""$i" -o go-template='{{ .spec.host }}{{ println }}') 15 | curl http://${route} 16 | done 17 | 18 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/1.md: -------------------------------------------------------------------------------- 1 | 2 | # 1.0 Accessing the Lab Environment 3 | 4 | ## 1.1 Getting your dedicated lab environments using Guid Grabber 5 | 6 | On your laptop, open **TWO** browser tabs to [*Lab GUID Assignment page*](https://www.opentlc.com/gg/gg.cgi?profile=generic_tester). 7 | 8 | From this page, you will be assigned your unique GUID, which you will use to access your unique lab environment and systems. 9 | 10 | For this lab you will need two different environments (v3.11 and v4.1), pay attention while selecting the *Lab Code* from the drop down list. 11 | 12 | **You are not required to login anywhere at this point, in this intro lab you will only get the information related to your assigned lab environments.** 13 | 14 | ### 1.1.1 Getting 3.11 Cluster Environment - Browser Tab 1 15 | 16 | From the drop down list: 17 | 18 | 1. Choose the lab `A0005-A - OpenShift Cluster Application Migration (CAM) Lab` 19 | 2. Enter the activation key shared by the instructors 20 | 3. Enter your email address 21 | 4. Click submit 22 | 5. Save the information on screen 23 | 24 | e.g: 25 | 26 | ![Request Env GuidGrabber](screenshots/lab1/request-env-gg.png) 27 | 28 | The resulting *Lab Information page* will display your lab's GUID and other useful information about your lab environment. 29 | Take note of your assigned GUID. 30 | 31 | You will use this GUID to access your lab's environment and systems. 32 | Your unique GUID will also be embedded in your lab environment's host names. 33 | 34 | **Be sure to note that this GUID is for your v3.11 environment.** 35 | 36 | ### 1.1.2 Getting 4.1 Cluster Environment - Browser Tab 2 37 | 38 | From the drop down list: 39 | 40 | 1. Choose the lab `A0005-B - OpenShift Cluster Application Migration (CAM) Lab` 41 | 2. Enter the activation key shared by the instructors 42 | 3. Enter your email address 43 | 4. Click submit 44 | 5. Save the information on screen 45 | 46 | **Be sure to note that this GUID is for your v4.1 environment.** 47 | 48 | ## 1.2 Lab Environment Overview 49 | 50 | ![Lab Environment Overview](screenshots/lab1/lab-env-overview.png) 51 | 52 | ### 1.2.1 SSH Hosts 53 | 54 | | Hostname | User | Password | Description | 55 | | ---- | ---- | ---- | ---- | 56 | |`bastion..domain` |lab-user | r3dh4t1!| bastion host for 3.11 environment | 57 | |`bastion..domain` |lab-user | r3dh4t1! | client-vm for 4.1 environment | 58 | 59 | Next Lab: [Lab 2 - Prerequisites and Setup](./2.md)
60 | [Home](./README.md) 61 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/4.md: -------------------------------------------------------------------------------- 1 | # 4.0 Cluster Application Migration Tool (CAM) 2 | 3 | CAM is designed to migrate Application Workloads between OpenShift clusters. Specifically, CAM handles migration of k8s resource objects, persistent volumes, and internal images. CAM is designed to provide a migration experience while focusing on minimizing application downtime through the process. 4 | 5 | ## 4.1 Upstream Projects 6 | 7 | CAM leverages two upstream projects: [Velero](https://github.com/heptio/velero) and [Restic](https://restic.net/). Velero (formerly Heptio Ark) gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Restic is a backup program that is fast, efficient and secure. 8 | 9 | ![Velero Logo](./screenshots/lab4/velero.png) 10 | 11 | ## 4.2 Architecture 12 | 13 | CAM is implemented as a native k8s API extension through a custom resource definition. CAM orchestrates usage of Velero for performing backup/restores. OpenShift specific functionality is implemented in a series of Velero plugins. CAM is also equipped with a React/Patternfly 4 web UI to provide simple interactive experience. 14 | 15 | #### IMPORTANT 16 | 17 | > * **Migration is at scope of a Namespace**. Future versions will allow selecting resources inside of a Namespace 18 | 19 | >* **Cluster Scoped Resources are not handled**. Cluster Role Bindings, SCCs, etc are not handled with migration. 20 | 21 | > * **‘cluster-admin’ role required for initial release targeting OCP 4.2**. 22 | 23 | ## 4.3 Persistent Volume Handling 24 | 25 | CAM provides two methods for migrating persistent volumes: (1) **Move** and (2) **Copy**. 26 | 27 | 1. Move or "swinging" the PV recreates the PVC/PV definitions from source cluster to destination cluster. This option is highly desirable for environments with shared storage between the source and target clusters (i.e. NFS). 28 | 29 | ![Move PV Diagram](./screenshots/lab4/movepv.png) 30 | 31 | 2. Copy creates a copy of the data from source cluster into the destination cluster. This option involves creating a PVC on destination and allowing cluster to find a PV to bind to the claim. We then copy data into the PV. 32 | 33 | ![Copy PV Diagram](./screenshots/lab4/copypv.png) 34 | 35 | ## 4.4 Actions (Stage and Migrate) 36 | 37 | CAM introduces two actions on a Migration Plan: 38 | 39 | * **Stage** - Seeds data while leaving application up and running. 40 | 41 | * **Migrate** - Quiesces the application and migrates deltas from stage runs. 42 | 43 | ![Migration Actions](./screenshots/lab4/stage-migrate.png) 44 | 45 | #### IMPORTANT 46 | 47 | > * Stage can be run multiple times on a Migration Plan 48 | 49 | > * Migrate can only be run once. 50 | 51 | ## 4.5 Migration Process 52 | 53 | ![Migration Process](./screenshots/lab4/mig-process.png) 54 | 55 | There are 3 steps to the migration process within the CAM tool: 56 | 57 | 1. **Plan** 58 | * Select source cluster 59 | * Select namespaces 60 | * Choose Copy or Move for each Persistent volume 61 | * Specify intermediate object storage 62 | * Select destination cluster 63 | 64 | 65 | 2. **Stage** 66 | * Stages the data from source to destination cluster 67 | * May be run multiple times 68 | * No downtime during this step for source applications 69 | 70 | 71 | 3. **Migrate** 72 | * Quiesce the application 73 | * Migrate any delta state since last stage run 74 | 75 | ## 4.6 WebUI 76 | 77 | Let's bring up the webUI in preparation for our first Application Migration in Lab 5. 78 | 79 | If you don't remember the route from Lab 2, let's grab it again from our 4.1 environment. 80 | 81 | 1. Log into the 4.1 environment 82 | ```bash 83 | $ oc login https://api.cluster-GUID.GUID.DOMAIN:6443 -u admin -p r3dh4t1! 84 | The server uses a certificate signed by an unknown authority. 85 | You can bypass the certificate check, but any data you send to the server could be intercepted by others. 86 | Use insecure connections? (y/n): y 87 | ``` 88 | 2. Determine the route of `mig-ui`, we will use this later to enable a CORS header on the OCP 3.x side. 89 | ```bash 90 | $ oc get routes migration -n mig -o jsonpath='{.spec.host}' 91 | migration-mig.apps.cluster-a21d.a21d.sandbox67.opentlc.com 92 | ``` 93 | 94 | 3. Open Browser to https://migration-mig.apps.cluster-GUID.GUID.DOMAIN 95 | 96 | ![CAM Main Screen](./screenshots/lab4/cam-main-screen.png) 97 | 98 | We are now ready to perform our first Application Migration. 99 | 100 | Next Lab: [Lab 5 - Migrate MSSQL Application](./5.md)
101 | Previous Lab: [Lab 3 - CPMA Overview](./3.md)
102 | [Home](./README.md) 103 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/6.md: -------------------------------------------------------------------------------- 1 | # 6.0 Migrate Sock-Shop Application 2 | 3 | In this lab we are going to migrate Weaveworks' Production Internet Store, the Sock Shop. This store is has lots of traffic daily, and Weaveworks is very concerned about minimizing any downtime/unavailability of the store. The Sock Shop is running on your 3.11 cluster, and Weaveworks is very interested in many of the new features of the OpenShift 4.x platform, and wants to migrate. 4 | 5 | ## 6.1 Application Architecture 6 | 7 | The architecture of the Sock Shop application was intentionally designed to provide as many micro-services as possible. As seen in the image below, the micro-services are roughly defined by the function in an ECommerce site. Networks are specified, but due to technology limitations may not be implemented in some deployments. All services communicate using REST over HTTP. 8 | 9 | ![Sock Shop Architecture](./screenshots/lab6/sock-shop-arch.png) 10 | 11 | If we login to our 3.11 cluster, we can see the app running: 12 | 13 | ```bash 14 | $ oc get pods -n sock-shop 15 | NAME READY STATUS RESTARTS AGE 16 | carts-77555f7648-sdx48 1/1 Running 0 3h 17 | carts-db-74db84c448-lv8sk 1/1 Running 0 3h 18 | catalogue-b5fc87544-cvhb8 1/1 Running 0 3h 19 | catalogue-db-1-c2f4w 1/1 Running 0 3h 20 | front-end-5c49687b5c-qgzkj 1/1 Running 0 3h 21 | orders-56b86d7dd7-gsnxh 1/1 Running 0 3h 22 | orders-db-7645cb4d78-pmg8s 1/1 Running 0 3h 23 | payment-685fdbcf67-4kgzn 1/1 Running 0 3h 24 | queue-master-58bcb789cd-thq9v 1/1 Running 0 3h 25 | rabbitmq-798d7b5976-7mgdl 2/2 Running 0 3h 26 | session-db-7cc8ddc4cc-pxvmw 1/1 Running 0 3h 27 | shipping-5ccdd4b459-dsvxf 1/1 Running 0 3h 28 | user-5648777687-2zkgs 1/1 Running 0 3h 29 | user-db-b655656b7-48qzs 1/1 Running 0 3h 30 | ``` 31 | 32 | Let's get the route to the application, and bring up the webUI. 33 | 34 | ```bash 35 | $ oc get route -n sock-shop 36 | NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD 37 | front-end front-end-sock-shop.apps.da30.events.opentlc.com / front-end 8079 None 38 | ``` 39 | 40 | ![Sock Shop Main Page](./screenshots/lab6/sock-shop-main.png) 41 | 42 | ## 6.2 Migration planning 43 | 44 | Again, the first step is to check out the CPMA generated report that we created and downloaded in Lab 3. 45 | 46 | 1. Point your browser to `File:///tmp/cpma/report.html`. 47 | 48 | 2. Open the `Cluster report` section of the report and click on `Namespaces`. You'll see this section contains lots of detailed information by section. Let's focus our attention in two areas: PVCs and SCCs. 49 | 50 | 3. Click to open the `PVCs` section. You'll see that the Sock Shop application has 4 PVCs defined. Each of these are 10Gig volumes backed by NFS. We will need to account for all 4 of these in our Migration Plan. 51 | 52 | ![Sock Shop PVC CPMA](./screenshots/lab6/sock-shop-pvc-cpma.png) 53 | 54 | 4. Click to open the `SCCs` section. You'll see that the Sock Shop application makes use of a custom Security Context Constraint. Again, we will need to apply this to our 4.1 cluster in preparation for migration. Let's do this now. 55 | 56 | ![Sock Shop PVC CPMA](./screenshots/lab6/sock-shop-scc-cpma.png) 57 | 58 | ### 6.2.1 Create Sock Shop Security Context Constraint 59 | 60 | The custom SCC yaml is available [here](./files/sock-shop-scc.yaml)(right-click Raw, Save Link As if using local machine). This file is also located on the bastion if desired to create the SCC from there. 61 | 62 | 1. Run the following to recreate Sock Shop's `scc` on the destination 4.1 cluster: 63 | ```bash 64 | $ oc create -f sock-shop-scc.yaml 65 | securitycontextconstraints.security.openshift.io/sock-shop created 66 | ``` 67 | 68 | ## 6.3 Using CAM 69 | 70 | Next, let's open up the migration UI. Again, to get the route, run the following command on the destination 4.1 cluster: 71 | ```bash 72 | $ oc get routes migration -n mig -o jsonpath='{.spec.host}' 73 | migration-mig.apps.cluster-a21d.a21d.sandbox67.opentlc.com 74 | ``` 75 | 76 | Since we already have our source cluster, target cluster, & replication repository defined; we can move right to creating a migration plan. Click `Add Plan`: 77 | 78 | ![Sock Shop Mig Plan](./screenshots/lab6/sock-shop-mig-plan.png) 79 | 80 | Fill out a plan name. Click Next. 81 | 82 | ![Sock Shop Plan 2](./screenshots/lab6/sock-shop-mig-plan-2.png) 83 | 84 | Select the source and target cluster, the replication repository, and the `sock-shop` namespace (which we want to migrate over). Click Next. 85 | 86 | ![Sock Shop Mig Plan 3](./screenshots/lab6/sock-shop-mig-plan-3.png) 87 | 88 | Now we are displayed the list of persistent volumes associated with our application workload. We should see the four volumes as listed in the CPMA report. Select which type of action you would like to perform on each PV. Since, minimizing downtime is very important in this example, and both of our clusters have access to the NFS shared storage, let's select `move` for each PV. ***Move will re-map the PVs from the source cluster to target cluster, so it's the fastest option for handling state.*** Click Next. 89 | 90 | ![Sock Shop Mig Plan 4](./screenshots/lab6/sock-shop-mig-plan-4.png) 91 | 92 | Since all our PVs are being `moved`, so destination storage classes need to be specified. Click Next. 93 | 94 | ![Sock Shop Mig Plan 5](./screenshots/lab6/sock-shop-mig-plan-5.png) 95 | 96 | After validating the migration plan, you will see a `Ready` message and you can click `Close`. 97 | 98 | 99 | 100 | ### 6.3.1 Migrate the Application Workload 101 | 102 | Now we can select `Migrate` or `Stage` on the application. Since we have chosen to `move` our four PVs, we will click on `Migrate`. *Stage will skip any PVs not using copy.* 103 | 104 | ![Sock Shop Mig Plan View](./screenshots/lab6/sock-shop-mig-plan-view.png) 105 | 106 | Optionally choose to *not* terminate the application on the source cluster. 107 | Leave it unchecked and select `Migrate`. 108 | 109 | ![Sock Shop Quiesce](./screenshots/lab6/sock-shop-mig-plan-quiesce.png) 110 | 111 | The migration will progress with a progress bar showing each step in the process. 112 | 113 | ![Sock Shop Progress Bar](./screenshots/lab6/sock-shop-progress.png) 114 | 115 | Once done, you should see `Migration Succeeded` on the migration plan. 116 | 117 | ![Sock Shop Migration Complete](./screenshots/lab6/sock-shop-mig-plan-complete.png) 118 | 119 | 120 | ### 6.3.2 Verify application is functioning on Destination Cluster 121 | 122 | Let's first open the OCP 4.1 web console and open the `sock-shop` namespace. 123 | 124 | ![console](./screenshots/lab6/ocp4-sock-shop.png) 125 | 126 | Click on the Storage > PVCs to see the persistent volume claims for the application. 127 | 128 | ![route](./screenshots/lab6/ocp4-sock-shop-pvcs.png) 129 | 130 | Drilling down into one of the PVs, we can verify that the underlying storage is the NFS server instance running on our OCP3 cluster. This verifies that our PVs were indeed remapped via `move`. 131 | 132 | ![app](./screenshots/lab6/ocp4-sock-shop-pv-yaml.png) 133 | 134 | ![Success](./screenshots/lab6/success.png) 135 | **Great job**, you have now successfully migrated two applications to your target cluster! 136 | 137 | However, what happens when things don't go as planned? In the next lab, we will examine some techniques for debugging failed migrations. 138 | 139 | Next Lab: [Lab 7 - Debugging Failed Migrations](./7.md)
140 | Previous Lab: [Lab 5 - Migrate MSSQL Application](./5.md)
141 | [Home](./README.md) 142 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/9.md: -------------------------------------------------------------------------------- 1 | # 9.0 Wrap Up 2 | 3 | **Congratulations** and **Thank You** for participating in the OpenShift Application Migration Lab. We hope you have found it to be both informative and thorough. By now you have completed the following activities: 4 | 5 | * Familiarized yourself with the CAM WebUI 6 | * Leveraged CPMA to generate a cluster report 7 | * Migrated multiple applications (stateless and stateful) from OpenShift 3 to OpenShift 4 8 | * Performed both Copy and Move operations for handling Persistent Volumes 9 | * Leveraged the API to migrate multiple namespaces in a single Migration Plan 10 | 11 | We've included a couple of additional applications in the 3.11 cluster deployment (Parks App & Robot Shop). Feel Free to perform migrations of these applications too (time permitting). 12 | 13 | ## 9.1 More Information 14 | 15 | | Type | Link | 16 | | ---- | ---- | 17 | |Source Code| https://github.com/konveyor/mig-operator | 18 | |Source Code | https://github.com/konveyor/mig-ui | 19 | |Source Code | https://github.com/konveyor/mig-controller | 20 | |Source Code | https://github.com/konveyor/mig-demo-apps | 21 | | YouTube Channel | https://www.youtube.com/channel/UCBDU5UK5Okg3mlIMygpkbNA?view_as=subscriber | 22 | 23 | 24 | Previous Lab: [Lab 8 - Migration at Scale via API (optional)](./8.md)
25 | [Home](./README.md) 26 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/README.md: -------------------------------------------------------------------------------- 1 | # RHTE 2019: OpenShift Cluster Application Migration Lab 2 | 3 | This guide will provide you with hands-on exposure to the new OpenShift Migration Tooling through a series of labs migrating application workloads (both stateful and stateless) between OpenShift Clusters. 4 | 5 | ### Goals 6 | 7 | * Migrate several application workloads (both stateful and stateless) from an OpenShift 3.11 cluster to an OpenShift 4.1 cluster using the Cluster Application Migration Tool (CAM) 8 | * Utilize the Control Plane Migration Assistance Tool (CPMA) to generate a detailed report identifying areas of custom configuration that need to be considered as part of application migration planning. 9 | 10 | ### Labs 11 | 12 | [Lab 1 - Introduction](./1.md)
13 | [Lab 2 - Prerequisites and Setup](./2.md)
14 | [Lab 3 - CPMA Overview](./3.md)
15 | [Lab 4 - CAM Overview](./4.md)
16 | [Lab 5 - Migrate MSSQL Application](./5.md)
17 | [Lab 6 - Migrate Sock Shop Application](./6.md)
18 | [Lab 7 - Debugging Failed Migrations](./7.md)
19 | [Lab 8 - Migration at Scale via API (optional)](./8.md)
20 | [Lab 9 - Wrap Up](./9.md)
21 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/files/cpma.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - name: "Reading v4 variables" 6 | include_vars: "../4.x/my_vars.yml" 7 | 8 | - name: "Setting facts" 9 | set_fact: 10 | guid_v4: "{{ guid }}" 11 | subdomain_v4: "{{ subdomain_base_suffix }}" 12 | 13 | - name: "Reading v3 variables" 14 | include_vars: "../3.x/my_vars.yml" 15 | 16 | - name: "Setting facts" 17 | set_fact: 18 | guid_v3: "{{ guid }}" 19 | subdomain_v3: "{{ subdomain_base_suffix }}" 20 | output_dir_v3: "{{ output_dir }}" 21 | 22 | - name: "Registering host" 23 | add_host: 24 | hostname: "bastion.{{ guid_v3 }}{{ subdomain_v3 }}" 25 | groups: "remote" 26 | 27 | - hosts: remote 28 | vars_files: 29 | - "../3.x/my_vars.yml" 30 | vars: 31 | ansible_ssh_private_key_file: "{{ output_dir }}/{{ guid }}key" 32 | ansible_user: ec2-user 33 | tasks: 34 | - name: "Including v4 variables" 35 | include_vars: "../4.x/my_vars.yml" 36 | delegate_to: localhost 37 | 38 | - name: Copy ssh key to bastion host 39 | copy: 40 | src: ~/.ssh/libra.pem 41 | dest: /home/ec2-user/.ssh/libra.pem 42 | owner: ec2-user 43 | group: ec2-user 44 | mode: '0600' 45 | 46 | - name: Create directory if it does not exist 47 | file: 48 | path: ~/cpma/bin 49 | state: directory 50 | mode: '0755' 51 | 52 | - name: Download cpma binary 53 | get_url: 54 | url: https://cpma.s3.us-east-2.amazonaws.com/cpma 55 | dest: ~/cpma/bin/cpma 56 | mode: 'u+rwx' 57 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/files/mssql-scc.yaml: -------------------------------------------------------------------------------- 1 | allowHostDirVolumePlugin: false 2 | allowHostIPC: false 3 | allowHostNetwork: false 4 | allowHostPID: false 5 | allowHostPorts: false 6 | allowPrivilegeEscalation: true 7 | allowPrivilegedContainer: true 8 | allowedCapabilities: null 9 | apiVersion: security.openshift.io/v1 10 | defaultAddCapabilities: null 11 | fsGroup: 12 | type: RunAsAny 13 | groups: [] 14 | kind: SecurityContextConstraints 15 | metadata: 16 | creationTimestamp: null 17 | name: mssql-persistent-scc 18 | selfLink: /apis/security.openshift.io/v1/securitycontextconstraints/mssql-persistent-scc 19 | priority: null 20 | readOnlyRootFilesystem: false 21 | requiredDropCapabilities: null 22 | runAsUser: 23 | type: RunAsAny 24 | seLinuxContext: 25 | type: RunAsAny 26 | supplementalGroups: 27 | type: RunAsAny 28 | users: 29 | - system:admin 30 | - system:serviceaccount:mssql-persistent:mssql-persistent-sa 31 | volumes: 32 | - awsElasticBlockStore 33 | - azureDisk 34 | - azureFile 35 | - cephFS 36 | - cinder 37 | - configMap 38 | - downwardAPI 39 | - emptyDir 40 | - fc 41 | - flexVolume 42 | - flocker 43 | - gcePersistentDisk 44 | - gitRepo 45 | - glusterfs 46 | - iscsi 47 | - nfs 48 | - persistentVolumeClaim 49 | - photonPersistentDisk 50 | - portworxVolume 51 | - projected 52 | - quobyte 53 | - rbd 54 | - scaleIO 55 | - secret 56 | - storageOS 57 | - vsphere 58 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/files/sock-shop-scc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: SecurityContextConstraints 3 | apiVersion: v1 4 | metadata: 5 | name: sock-shop 6 | readOnlyRootFilesystem: true 7 | allowPrivilegedContainer: true 8 | runAsUser: 9 | type: RunAsAny 10 | seLinuxContext: 11 | type: RunAsAny 12 | supplementalGroups: 13 | type: RunAsAny 14 | volumes: 15 | - '*' 16 | users: 17 | - system:admin 18 | - system:serviceaccount:sock-shop:sock-shop 19 | requiredDropCapabilities: 20 | - all 21 | defaultAddCapabilities: 22 | - CHOWN 23 | - SETGID 24 | - SETUID 25 | - NET_BIND_SERVICE 26 | - DAC_OVERRIDE -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab1/lab-env-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab1/lab-env-overview.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab1/request-env-gg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab1/request-env-gg.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab2/minio-bucket-creation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab2/minio-bucket-creation.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab2/minio-mybucket.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab2/minio-mybucket.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab2/minio_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab2/minio_login.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab2/ssh-details-gg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab2/ssh-details-gg.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab3/cpma-report-html.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab3/cpma-report-html.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/cam-main-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/cam-main-screen.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/copypv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/copypv.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/mig-process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/mig-process.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/migtooling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/migtooling.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/movepv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/movepv.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/stage-migrate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/stage-migrate.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab4/velero.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab4/velero.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/.DS_Store -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-add-cluster-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-add-cluster-success.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-add-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-add-cluster.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-add-repo-success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-add-repo-success.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-add-repo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-add-repo.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-clusters-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-clusters-added.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-main-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-main-screen.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-1.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-2.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-3.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-4.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-5.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-mig-plan-added.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-migration-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-migration-complete.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-progress-bar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-progress-bar.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-quiesce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-quiesce.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cam-repo-added.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cam-repo-added.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/cpma-mssql-report.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/cpma-mssql-report.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-add-product.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-add-product.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-added-product.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-added-product.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-app-route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-app-route.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-namespace-detail.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-namespace-detail.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-persistent-app-ocp4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-persistent-app-ocp4.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-product-catalog.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-product-catalog.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-pv-yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-pv-yaml.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-pv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-pv.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-pvc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-pvc.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-pvcs-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-pvcs-cpma.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/mssql-sccs-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/mssql-sccs-cpma.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab5/ocp-4-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab5/ocp-4-console.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/ocp4-sock-shop-pv-yaml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/ocp4-sock-shop-pv-yaml.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/ocp4-sock-shop-pvcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/ocp4-sock-shop-pvcs.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/ocp4-sock-shop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/ocp4-sock-shop.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-arch.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-main.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-2.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-3.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-4.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-5.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-complete.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-complete.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-quiesce.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-quiesce.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan-view.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-mig-plan.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-progress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-progress.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-pvc-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-pvc-cpma.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-register.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-register.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/sock-shop-scc-cpma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/sock-shop-scc-cpma.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab6/success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab6/success.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab7/mig-custom-resources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab7/mig-custom-resources.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/lab7/mig-plan-failed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/lab7/mig-plan-failed.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/noobaa/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/noobaa/.DS_Store -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/noobaa/noobaa-bucket-created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/noobaa/noobaa-bucket-created.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/noobaa/noobaa-buckets-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/noobaa/noobaa-buckets-screen.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/noobaa/noobaa-create-bucket-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/noobaa/noobaa-create-bucket-screen.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/screenshots/noobaa/noobaa-login-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migtools/mig-agnosticd/af3a0cb96935d2ff18da3523363c745465f87e3b/demos/2020_Summit/labs/screenshots/noobaa/noobaa-login-screen.png -------------------------------------------------------------------------------- /demos/2020_Summit/labs/scripts/README.md: -------------------------------------------------------------------------------- 1 | # Scripts for Migration Demos in RHTE 2019 Labs 2 | 3 | ## Applying CORS Settings 4 | 5 | Login to the bastion host using the information available in your lab console : 6 | 7 | ```bash 8 | ssh @bastion.. 9 | ``` 10 | 11 | The `cors.yaml` playbook is available in the home directory on bastion host. 12 | 13 | To apply CORS settings on your OCP3 cluster : 14 | 15 | ```bash 16 | GUID_4= DOMAIN= ansible-playbook cors.yaml 17 | ``` 18 | 19 | Example usage : 20 | 21 | ```bash 22 | GUID_4=09kt DOMAIN=events.opentlc.com ansible-playbook cors.yaml 23 | ``` 24 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/scripts/bookbag.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - when: ocp3_password is not defined or ocp4_password is not defined 6 | fail: 7 | msg: | 8 | Variables 'ocp3_password' and 'ocp4_password' are required: 9 | * ocp3_password : SSH password to login to OCP3 cluster 10 | * ocp4_password : SSH password to login to OCP4 cluster 11 | 12 | - name: "Reading cluster information file" 13 | set_fact: 14 | ocp3_info: 15 | ocp3_guid: "{{ lookup('ini', 'guid section=OCP3 file=cluster.info') }}" 16 | ocp3_domain: "{{ lookup('ini', 'domain section=OCP3 file=cluster.info') | regex_replace('^\\.', '') }}" 17 | ocp3_ssh_user: "{{ lookup('ini', 'student_name section=OCP3 file=cluster.info') }}" 18 | ocp3_password: "{{ ocp3_password }}" 19 | ocp4_info: 20 | ocp4_guid: "{{ lookup('ini', 'guid section=OCP4 file=cluster.info') }}" 21 | ocp4_domain: "{{ lookup('ini', 'domain section=OCP4 file=cluster.info') | regex_replace('^\\.', '') }}" 22 | ocp4_ssh_user: "{{ lookup('ini', 'student_name section=OCP4 file=cluster.info') }}" 23 | ocp4_password: "{{ ocp4_password }}" 24 | 25 | - name: "Creating bookbag project" 26 | shell: "oc create ns lab-instructions" 27 | register: output 28 | failed_when: output.stderr and not 'AlreadyExists' in output.stderr 29 | 30 | - set_fact: 31 | bookbag_repo: "https://github.com/konveyor/labs.git" 32 | bookbag_dir: "/home/{{ ansible_user }}/lab-instructions" 33 | bookbag_build_dir: "mtc/bookbag" 34 | 35 | - name: "Fetching bookbag repo" 36 | git: 37 | repo: "{{ bookbag_repo }}" 38 | dest: "{{ bookbag_dir }}" 39 | update: yes 40 | 41 | - name: "Building bookbag image" 42 | shell: "{{ item }}" 43 | args: 44 | chdir: "{{ bookbag_dir }}/{{ bookbag_build_dir }}" 45 | loop: 46 | - "oc project lab-instructions" 47 | - "oc process -f build-template.yaml -p GIT_REPO='{{ bookbag_repo }}' | oc apply -f -" 48 | - "oc start-build bookbag --follow --from-dir=." 49 | 50 | - name: "Deploying bookbag image" 51 | shell: "oc process -f deploy-template.yaml -p WORKSHOP_VARS='{{ ocp3_info | combine(ocp4_info, recursive=true) | to_json }}' | oc apply -f -" 52 | args: 53 | chdir: "{{ bookbag_dir }}/{{ bookbag_build_dir }}" 54 | 55 | - name: "Read bookbag route" 56 | shell: "oc get route -n lab-instructions bookbag -o go-template='{{ '{{' }} .spec.host {{ '}}' }}{{ '{{' }} println {{ '}}' }}'" 57 | register: output 58 | 59 | - debug: 60 | msg: "Route to bookbag : {{ output.stdout }}" 61 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/scripts/cors.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - name: "Fixing permissions of the key file" 6 | file: 7 | path: "/home/{{ lookup('env', 'USER') }}/.ssh/openshift_key" 8 | owner: "{{ lookup('env', 'USER') }}" 9 | mode: "0600" 10 | become: yes 11 | 12 | - hosts: masters 13 | vars: 14 | ansible_user: ec2-user 15 | ansible_ssh_private_key_file: "/home/{{ lookup('env', 'USER') }}/.ssh/openshift_key" 16 | guid_v4: "{{ lookup('env', 'GUID_4') }}" 17 | subdomain: "{{ lookup('env', 'DOMAIN') |d('events.opentlc.com') }}" 18 | tasks: 19 | - block: 20 | - name: "Adding new CORS rules" 21 | lineinfile: 22 | insertafter: "corsAllowedOrigins:" 23 | line: "- (?i)//migration-mig\\.apps\\.cluster-{{ guid_v4 }}\\.{{ guid_v4 }}\\.{{ subdomain }}" 24 | path: /etc/origin/master/master-config.yaml 25 | become: yes 26 | 27 | - name: "Checking if atomic-openshift services exist" 28 | shell: "systemctl status atomic-openshift-master-api" 29 | register: status 30 | become: yes 31 | ignore_errors: yes 32 | 33 | - name: "Applying new configuration [atomic-openshift services]" 34 | service: 35 | name: "{{ item }}" 36 | state: restarted 37 | loop: 38 | - atomic-openshift-master-api 39 | - atomic-openshift-master-controllers 40 | become: yes 41 | when: status.rc == 0 42 | 43 | - name: "Applying new configuration [master-restart]" 44 | shell: "/usr/local/bin/master-restart {{ item }}" 45 | loop: 46 | - api 47 | - controller 48 | when: status.rc != 0 49 | become: yes 50 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/scripts/lab8/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deploys "Hello, OpenShift" app to random X number of namespaces 4 | 5 | echo "Number of namespaces? "; read x 6 | 7 | echo $x > .ns 8 | 9 | ns_prefix="hello-openshift-" 10 | app_manifest=https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json 11 | 12 | for i in $(seq 1 $x); do 13 | oc create namespace "$ns_prefix""$i" 14 | oc apply -f $app_manifest -n "$ns_prefix""$i" 15 | oc expose pod hello-openshift -n "$ns_prefix""$i" 16 | oc expose svc hello-openshift -n "$ns_prefix""$i" 17 | done 18 | 19 | echo "Finding routes..." 20 | 21 | for i in $(seq 1 $x); do 22 | oc get route hello-openshift -n "$ns_prefix""$i" -o go-template='{{ .spec.host }}{{ println }}' 23 | done 24 | 25 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/scripts/lab8/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deletes "Hello, OpenShift" app previously deployed to X namespaces using deploy.sh 4 | 5 | x=$(cat .ns) 6 | 7 | ns_prefix="hello-openshift-" 8 | app_manifest=https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json 9 | 10 | for i in $(seq 1 $x); do 11 | oc delete project "$ns_prefix""$i" 12 | done 13 | 14 | echo "Done..." 15 | -------------------------------------------------------------------------------- /demos/2020_Summit/labs/scripts/lab8/probe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # wgets routes created in previously deployed "Hello,OpenShift" app 4 | 5 | GREEN='\033[0;33m' 6 | NC='\033[0m' 7 | 8 | x=$(cat .ns) 9 | 10 | ns_prefix="hello-openshift-" 11 | 12 | for i in $(seq 1 $x); do 13 | echo -e "${GREEN}Probing app in namespace ""$ns_prefix""$i""${NC}" 14 | route=$(oc get route hello-openshift -n "$ns_prefix""$i" -o go-template='{{ .spec.host }}{{ println }}') 15 | curl http://${route} 16 | done 17 | 18 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/3.x/.gitignore: -------------------------------------------------------------------------------- 1 | my_vars.yml 2 | secret.yml 3 | 4 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/3.x/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | The scripts here will deploy an OpenShift 3.11 cluster with the same variables as intended for Summit 2020 Labs for OpenShift Migrations. 3 | Provisioning will likely take on order of ~70-90 minutes to complete. 4 | 5 | # Usage 6 | ## Create Cluster 7 | 1. Ensure that you have a `../../../../secret.yml` in the top level directory of this repo 8 | 1. Ensure that you have `cp my_vars.yml.sample my_vars.yml` and you have edited 'my_vars.yml' 9 | 1. Ensure that `AGNOSTICD_HOME` environment variable is set 10 | 1. Run: `create_ocp3_workshop.sh` 11 | 1. Wait ... ~70 - 90 minutes 12 | 13 | ## Destroy Cluster 14 | 1. Ensure that you have a `../../../../secret.yml` in the parent directory 15 | 1. Ensure that you have `cp my_vars.sample.yml my_vars.yml` and you have edited 'my_vars.yml' 16 | 1. Ensure that `AGNOSTICD_HOME` environment variable is set 17 | 1. Run: `delete_ocp3_workshop.sh` 18 | 1. Wait ... ~5-10 minutes 19 | * If something goes wrong and you need to do a manual deletion, you can clean up the AWS resources by finding the relevant CloudFormation template and deleting it via the AWS Management Console looking at the CloudFormation service in the correct region. 20 | 21 | # Tips 22 | 23 | ## Example: oc login 24 | 25 | $ oc login https://master.jmatthewsagn1.mg.dog8code.com:443 -u admin -p r3dh4t1! 26 | The server uses a certificate signed by an unknown authority. 27 | You can bypass the certificate check, but any data you send to the server could be intercepted by others. 28 | Use insecure connections? (y/n): yes 29 | 30 | # or alternative to create a new kubeconfig file to reference later 31 | export KUBECONFIG=~/.agnosticd/jmatthewsagn1/kubeconfig 32 | $ oc login https://master.jmatthewsagn1.mg.dog8code.com -u admin -p r3dh4t1! --config ${KUBECONFIG} 33 | 34 | 35 | 36 | ## Example: log into console 37 | 1. Look for info of the console in stdout 38 | 39 | skipping: [localhost] => (item=user.info: Openshift Master Console: https://master.jmatthewsagn1.mg.dog8code.com/console) => {"item": "user.info: Openshift Master Console: https://master.jmatthewsagn1.mg.dog8code.com/console"} 40 | 41 | * Visit: https://master.jmatthewsagn1.mg.dog8code.com 42 | 43 | * Username: admin 44 | * Password: r3dh4t1! 45 | * Can change admin user name with 46 | 47 | * -e 'admin_user=*some_name* 48 | 49 | 50 | ## Example: SSH into nodes 51 | 1. Find the output_dir, defined in `my_vars.yml` 52 | 1. Use the generated `*_ssh_conf` in the output directory to leverage the bastion as a proxy 53 | 54 | * Example: 55 | 56 | $ ssh -F /tmp/agnostic_jmatthewsagn1/ocp-workshop_jmatthewsagn1_ssh_conf master1 57 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/3.x/create_ocp3_workshop.sh: -------------------------------------------------------------------------------- 1 | OUR_DIR=`pwd` 2 | 3 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 4 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 5 | exit 6 | fi 7 | 8 | pushd . 9 | cd ${AGNOSTICD_HOME} 10 | ansible-playbook ${AGNOSTICD_HOME}/ansible/main.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/ocp3_vars.yml -e @${OUR_DIR}/../../../../secret.yml 11 | popd 12 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/3.x/delete_ocp3_workshop.sh: -------------------------------------------------------------------------------- 1 | OUR_DIR=`pwd` 2 | 3 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 4 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 5 | exit 6 | fi 7 | 8 | pushd . 9 | cd ${AGNOSTICD_HOME} 10 | ansible-playbook ${AGNOSTICD_HOME}/ansible/configs/ocp-workshop/destroy_env.yml ${OUR_DIR}/../../../../archive_deleted.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/ocp3_vars.yml -e @${OUR_DIR}/../../../../secret.yml 11 | popd 12 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/3.x/my_vars.yml.sample: -------------------------------------------------------------------------------- 1 | email: "" 2 | guid: ocp3-labs 3 | 4 | output_dir: "/home//.agnosticd/{{ guid }}" 5 | 6 | subdomain_base_suffix: .mg.dog8code.com 7 | HostedZoneId: Z2GE8CSGW2ZA8W 8 | 9 | key_name: libra # your private key [ must be present at ~/.ssh/.pem ] 10 | 11 | cloud_provider: ec2 12 | aws_region: us-west-2 13 | 14 | cloud_tags: # list of custom tags to add to your aws resources 15 | - owner: "{{ email }}" 16 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/3.x/ocp3_vars.yml: -------------------------------------------------------------------------------- 1 | platform: "aws" 2 | env_type: "ocp-workshop" 3 | software_to_deploy: "openshift" 4 | #install_idm: false 5 | #install_openshiftapb: false 6 | #install_lets_encrypt_certificates: false 7 | #enable_workshops_catalog: false 8 | remove_self_provisioners: true 9 | install_student_user: true 10 | student_name: lab-user 11 | student_password: r3dh4t1! 12 | admin_user: admin 13 | cloudformation_retries: 0 14 | repo_version: "3.11" 15 | osrelease: "3.11.161" 16 | install_k8s_modules: true 17 | 18 | bastion_instance_type: t2.large 19 | master_instance_type: m4.2xlarge 20 | infranode_instance_type: m4.2xlarge 21 | node_instance_type: m4.2xlarge 22 | support_instance_type: m4.large 23 | node_instance_count: 2 24 | 25 | support_instance_public_dns: true 26 | install_glusterfs: true 27 | 28 | mssql_cpu_requests: "0.5" 29 | mssql_memory_requests: "2Gi" 30 | infra_workloads: "ocp-workload-migration,ocp-workload-parks-app,ocp-workload-sock-shop,ocp-workload-mssql,ocp-workload-robot-shop,ocp-workload-file-uploader,ocp-workload-rocket-chat,ocp-workload-mig-verification" 31 | nfs_exports_config: "*(insecure,rw,no_root_squash,no_wdelay,sync)" 32 | nfs_server_address: "support1.{{ guid }}{{ subdomain_base_suffix }}" 33 | 34 | archive_dir: "{{ output_dir | dirname }}/archive" 35 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/4.x/.gitignore: -------------------------------------------------------------------------------- 1 | my_vars.yml 2 | secret.yml 3 | 4 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/4.x/create_ocp4_workshop.sh: -------------------------------------------------------------------------------- 1 | OUR_DIR=`pwd` 2 | 3 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 4 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 5 | exit 6 | fi 7 | 8 | pushd . 9 | cd ${AGNOSTICD_HOME} 10 | ansible-playbook ${AGNOSTICD_HOME}/ansible/main.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/ocp4_vars.yml -e @${OUR_DIR}/../../../../secret.yml 11 | popd 12 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/4.x/delete_ocp4_workshop.sh: -------------------------------------------------------------------------------- 1 | OUR_DIR=`pwd` 2 | 3 | if [[ -z "${AGNOSTICD_HOME}" ]]; then 4 | echo "Please ensure that 'AGNOSTICD_HOME' is set before running." 5 | exit 6 | fi 7 | 8 | pushd . 9 | cd ${AGNOSTICD_HOME} 10 | ansible-playbook ./ansible/configs/ocp4-workshop/destroy_env.yml ${OUR_DIR}/../../../../archive_deleted.yml -e @${OUR_DIR}/my_vars.yml -e @${OUR_DIR}/ocp4_vars.yml -e @${OUR_DIR}/../../../../secret.yml -vv 11 | popd 12 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/4.x/my_vars.yml.sample: -------------------------------------------------------------------------------- 1 | email: "" 2 | 3 | guid: "" 4 | 5 | output_dir: "/home//.agnosticd/{{ guid }}" 6 | 7 | subdomain_base_suffix: .mg.dog8code.com 8 | HostedZoneId: Z2GE8CSGW2ZA8W 9 | 10 | 11 | key_name: libra # your private key [ must be present at ~/.ssh/.pem ] 12 | cloud_provider: ec2 13 | aws_region: us-west-2 14 | 15 | 16 | cloud_tags: # list of custom tags to add to your aws resources 17 | - owner: "{{ email }}" 18 | 19 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/4.x/ocp4_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: "aws" 3 | env_type: ocp4-workshop 4 | cloudformation_retries: 0 5 | software_to_deploy: none 6 | 7 | ocp4_installer_version: "4.5.9" 8 | osrelease: 4.1.0 9 | bastion_instance_type: t2.medium 10 | install_ocp4: true 11 | 12 | install_opentlc_integration: false 13 | install_idm: htpasswd 14 | install_ipa_client: false 15 | install_ftl: false 16 | install_student_user: true 17 | student_name: lab-user 18 | student_password: r3dh4t1! 19 | default_workloads: ["ocp4-workload-project-request-template", "ocp4-workload-ocs-poc", "ocp4-workload-migration", "ocp4-workload-mig-verification"] 20 | infra_workloads: [] 21 | student_workloads: [] 22 | 23 | clientvm_instance_type: "t2.medium" 24 | clientvm_instance_count: 1 25 | master_instance_type: "m4.2xlarge" 26 | master_instance_count: 3 27 | worker_instance_type: "m4.2xlarge" 28 | worker_instance_count: 3 29 | 30 | ocs_namespace: openshift-storage 31 | ocs_mcg_core_cpu: 0.1 32 | ocs_mcg_db_cpu: 0.1 33 | ocs_mcg_core_mem: 1Gi 34 | ocs_mcg_pv_pool_bucket_name: migstorage 35 | ocs_migstorage: true 36 | ocs_migstorage_namespace: openshift-storage 37 | ocs_operator_workload_destroy: false 38 | archive_dir: "{{ output_dir | dirname }}/archive" 39 | 40 | # _infra_node_instance_type: "m4.large" 41 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/README.md: -------------------------------------------------------------------------------- 1 | ## Lab Tests 2 | 3 | Misc. scripts to deploy 2020 Summit lab-like environments. 4 | 5 | ## Instructions 6 | 7 | Prepare your `my_vars.yml` files in both [3.x](./3.x/) and [4.x](./4.x/) directories. 8 | 9 | Make sure you have secret.yml file created [here](../../../../secret.yml). 10 | 11 | To deploy OCP 3 Lab environment, run : 12 | 13 | ``` 14 | ./create_ocp3_workshop.sh 15 | ``` 16 | 17 | This deploys OCP3 with Migration workload, sample apps, Gluster and NFS. 18 | 19 | To delete, run : 20 | 21 | ``` 22 | ./delete_ocp3_workshop.sh 23 | ``` 24 | 25 | To deploy OCP 4 Lab environment, run : 26 | 27 | ``` 28 | ./create_ocp4_workshop.sh 29 | ``` 30 | 31 | This deploys OCP4 with Migration workload, and OCS Operator. 32 | 33 | To delete, run : 34 | 35 | ``` 36 | ./delete_ocp4_workshop.sh 37 | ``` 38 | -------------------------------------------------------------------------------- /demos/2020_Summit/tests/post-install.yaml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | connection: local 3 | tasks: 4 | - block: 5 | - name: "Generating agnosticd_user_info [1]..." 6 | include_vars: 7 | file: "{{ item }}_vars.yml" 8 | loop: 9 | - './3.x/my' 10 | - './3.x/ocp3' 11 | - name: "Generating agnosticd_user_info [2]..." 12 | set_fact: 13 | ocp3_info: 14 | data: 15 | ocp3_guid: "{{ guid }}" 16 | ocp3_domain: "{{ guid }}{{ subdomain_base_suffix }}" 17 | ocp3_ssh_user: "{{ student_name }}" 18 | ocp3_password: "{{ student_password }}" 19 | - name: "Generating agnosticd_user_info [3]..." 20 | include_vars: 21 | file: "{{ item }}_vars.yml" 22 | loop: 23 | - './4.x/my' 24 | - './4.x/ocp4' 25 | - name: "Generating agnosticd_user_info [4]..." 26 | set_fact: 27 | ocp4_info: 28 | data: 29 | ocp4_guid: "{{ guid }}" 30 | ocp4_domain: "{{ guid }}{{ subdomain_base_suffix }}" 31 | ocp4_ssh_user: "{{ student_name }}" 32 | ocp4_password: "{{ student_password }}" 33 | - name: "Generating agnosticd_user_info [5]..." 34 | set_fact: 35 | agnosticd_user_info: "{{ ocp3_info | combine(ocp4_info, recursive=True) }}" 36 | 37 | - block: 38 | - name: "Logging in 4.x cluster..." 39 | copy: 40 | src: "/home/ec2-user/.kube/" 41 | dest: "/home/{{ agnosticd_user_info.data.ocp4_ssh_user }}/.kube" 42 | remote_src: true 43 | mode: "0777" 44 | owner: "{{ agnosticd_user_info.data.ocp4_ssh_user }}" 45 | become: yes 46 | 47 | - name: "Creating bookbag project" 48 | shell: "oc create ns lab-instructions" 49 | register: output 50 | failed_when: output.stderr and not 'AlreadyExists' in output.stderr 51 | 52 | - set_fact: 53 | bookbag_repo: "https://gitlab.com/2020-summit-labs/openshift-migration-lab-bookbag.git" 54 | bookbag_dir: "/home/{{ ansible_user }}/lab-instructions" 55 | - name: "Fetching bookbag repo" 56 | git: 57 | repo: "{{ bookbag_repo }}" 58 | dest: "{{ bookbag_dir }}" 59 | update: yes 60 | 61 | - name: "Building bookbag image" 62 | shell: "{{ item }}" 63 | args: 64 | chdir: "{{ bookbag_dir }}" 65 | loop: 66 | - "oc project lab-instructions" 67 | - "oc process -f build-template.yaml -p GIT_REPO='{{ bookbag_repo }}' | oc apply -f -" 68 | - "oc start-build bookbag --follow" 69 | 70 | - name: "Deploying bookbag image" 71 | shell: "oc process -f deploy-template.yaml -p WORKSHOP_VARS='{{ agnosticd_user_info.data | to_json }}' | oc apply -f -" 72 | args: 73 | chdir: "{{ bookbag_dir }}" 74 | - name: "Read bookbag route" 75 | shell: "oc get route -n lab-instructions bookbag -o go-template='{{ '{{' }} .spec.host {{ '}}' }}{{ '{{' }} println {{ '}}' }}'" 76 | register: output 77 | - debug: 78 | msg: "Route to bookbag : {{ output.stdout }}" 79 | vars: 80 | ansible_ssh_private_key_file: "~/.ssh/{{ key_name }}.pem" 81 | ansible_user: "ec2-user" 82 | agnosticd_user_info: "{{ agnosticd_user_info }}" 83 | delegate_to: "bastion.{{ agnosticd_user_info.data.ocp4_domain }}" 84 | -------------------------------------------------------------------------------- /files/README.md: -------------------------------------------------------------------------------- 1 | # Files that can be shared accross environments 2 | 3 | This directory contains files which can be pulled in and copied to bastion hosts during deployment of the labs. 4 | 5 | -------------------------------------------------------------------------------- /files/bookbag-oadp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | 6 | - name: "Reading cluster information file" 7 | set_fact: 8 | ocp4_info: 9 | ocp4_guid: "{{ lookup('ini', 'guid section=OCP4 file=cluster.info') }}" 10 | ocp4_domain: "{{ lookup('ini', 'domain section=OCP4 file=cluster.info') | regex_replace('^\\.', '') }}" 11 | ocp4_ssh_user: "{{ lookup('ini', 'student_name section=OCP4 file=cluster.info') }}" 12 | ocp4_bastion: "{{ lookup('ini', 'bastion section=OCP4 file=cluster.info') }}" 13 | ocp4_password: "{{ ocp4_password }}" 14 | 15 | - set_fact: 16 | bookbag_repo: "https://github.com/konveyor/labs.git" 17 | bookbag_dir: "/home/{{ ansible_user }}/lab-instructions" 18 | bookbag_build_dir: "oadp/bookbag" 19 | 20 | - name: "Building bookbag image" 21 | shell: "{{ item }}" 22 | args: 23 | chdir: "{{ bookbag_dir }}/{{ bookbag_build_dir }}" 24 | loop: 25 | - "oc project lab-instructions" 26 | - "oc process -f build-template.yaml -p GIT_REPO='{{ bookbag_repo }}' | oc apply -f -" 27 | - "oc start-build bookbag --follow --from-dir={{ bookbag_dir }}" 28 | 29 | - name: "Deploying bookbag image" 30 | shell: "oc process -f deploy-template.yaml -p WORKSHOP_VARS='{{ ocp4_info | to_json }}' | oc apply -f -" 31 | args: 32 | chdir: "{{ bookbag_dir }}/{{ bookbag_build_dir }}" 33 | 34 | - name: "Read bookbag route" 35 | shell: "oc get route -n lab-instructions bookbag -o go-template='{{ '{{' }} .spec.host {{ '}}' }}{{ '{{' }} println {{ '}}' }}'" 36 | register: output 37 | 38 | - debug: 39 | msg: "Route to bookbag : {{ output.stdout }}" 40 | -------------------------------------------------------------------------------- /files/bookbag.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | tasks: 5 | - when: ocp3_password is not defined or ocp4_password is not defined 6 | fail: 7 | msg: | 8 | Variables 'ocp3_password' and 'ocp4_password' are required: 9 | * ocp3_password : SSH password to login to OCP3 cluster 10 | * ocp4_password : SSH password to login to OCP4 cluster 11 | 12 | - name: "Reading cluster information file" 13 | set_fact: 14 | ocp3_info: 15 | ocp3_guid: "{{ lookup('ini', 'guid section=OCP3 file=cluster.info') }}" 16 | ocp3_domain: "{{ lookup('ini', 'domain section=OCP3 file=cluster.info') | regex_replace('^\\.', '') }}" 17 | ocp3_ssh_user: "{{ lookup('ini', 'student_name section=OCP3 file=cluster.info') }}" 18 | ocp3_password: "{{ ocp3_password }}" 19 | ocp3_bastion: "{{ lookup('ini', 'bastion section=OCP3 file=cluster.info') }}" 20 | ocp4_info: 21 | ocp4_guid: "{{ lookup('ini', 'guid section=OCP4 file=cluster.info') }}" 22 | ocp4_domain: "{{ lookup('ini', 'domain section=OCP4 file=cluster.info') | regex_replace('^\\.', '') }}" 23 | ocp4_ssh_user: "{{ lookup('ini', 'student_name section=OCP4 file=cluster.info') }}" 24 | ocp4_bastion: "{{ lookup('ini', 'bastion section=OCP4 file=cluster.info') }}" 25 | ocp4_password: "{{ ocp4_password }}" 26 | 27 | - name: "Creating bookbag project" 28 | shell: "oc create ns lab-instructions" 29 | register: output 30 | failed_when: output.stderr and not 'AlreadyExists' in output.stderr 31 | 32 | - set_fact: 33 | bookbag_repo: "https://github.com/konveyor/labs.git" 34 | bookbag_dir: "/home/{{ ansible_user }}/lab-instructions" 35 | bookbag_build_dir: "mtc/bookbag" 36 | 37 | - name: "Fetching bookbag repo" 38 | git: 39 | repo: "{{ bookbag_repo }}" 40 | dest: "{{ bookbag_dir }}" 41 | update: yes 42 | 43 | - name: "Building bookbag image" 44 | shell: "{{ item }}" 45 | args: 46 | chdir: "{{ bookbag_dir }}/{{ bookbag_build_dir }}" 47 | loop: 48 | - "oc project lab-instructions" 49 | - "oc process -f build-template.yaml -p GIT_REPO='{{ bookbag_repo }}' | oc apply -f -" 50 | - "oc start-build bookbag --follow --from-dir={{ bookbag_dir }}" 51 | 52 | - name: "Deploying bookbag image" 53 | shell: "oc process -f deploy-template.yaml -p WORKSHOP_VARS='{{ ocp3_info | combine(ocp4_info, recursive=true) | to_json }}' | oc apply -f -" 54 | args: 55 | chdir: "{{ bookbag_dir }}/{{ bookbag_build_dir }}" 56 | 57 | - name: "Read bookbag route" 58 | shell: "oc get route -n lab-instructions bookbag -o go-template='{{ '{{' }} .spec.host {{ '}}' }}{{ '{{' }} println {{ '}}' }}'" 59 | register: output 60 | 61 | - debug: 62 | msg: "Route to bookbag : {{ output.stdout }}" 63 | -------------------------------------------------------------------------------- /files/lab8/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deploys "Hello, OpenShift" app to random X number of namespaces 4 | 5 | printf "Number of namespaces? " 6 | read x 7 | 8 | echo $x > .ns 9 | 10 | ns_prefix="hello-openshift-" 11 | app_manifest=https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json 12 | 13 | for i in $(seq 1 $x); do 14 | oc create namespace "$ns_prefix""$i" 15 | oc apply -f $app_manifest -n "$ns_prefix""$i" 16 | oc expose pod hello-openshift -n "$ns_prefix""$i" 17 | oc expose svc hello-openshift -n "$ns_prefix""$i" 18 | done 19 | 20 | echo "Finding routes..." 21 | 22 | for i in $(seq 1 $x); do 23 | oc get route hello-openshift -n "$ns_prefix""$i" -o go-template='{{ .spec.host }}{{ println }}' 24 | done 25 | 26 | -------------------------------------------------------------------------------- /files/lab8/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # deletes "Hello, OpenShift" app previously deployed to X namespaces using deploy.sh 4 | 5 | x=$(cat .ns) 6 | 7 | ns_prefix="hello-openshift-" 8 | app_manifest=https://raw.githubusercontent.com/openshift/origin/master/examples/hello-openshift/hello-pod.json 9 | 10 | for i in $(seq 1 $x); do 11 | oc delete project "$ns_prefix""$i" 12 | done 13 | 14 | echo "Done..." 15 | -------------------------------------------------------------------------------- /files/lab8/probe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # wgets routes created in previously deployed "Hello,OpenShift" app 4 | 5 | GREEN='\033[0;33m' 6 | NC='\033[0m' 7 | 8 | if [ -z "$1" ] 9 | then 10 | x=$(cat .ns) 11 | else 12 | x=$1 13 | fi 14 | 15 | ns_prefix="hello-openshift-" 16 | 17 | for i in $(seq 1 $x); do 18 | echo -e "${GREEN}Probing app in namespace ""$ns_prefix""$i""${NC}" 19 | route=$(oc get route hello-openshift -n "$ns_prefix""$i" -o go-template='{{ .spec.host }}{{ println }}') 20 | curl http://${route} 21 | done 22 | 23 | -------------------------------------------------------------------------------- /files/prepare_station.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script aims to automate Bookbag setup for MTC Lab 4 | # Since there is currently no easy way to guess the OCP3 5 | # enviromnent, we need to ask a user for GUID. 6 | # Based on the given GUID we can set the rest of the Bookbag instructions up 7 | 8 | # These should be overwritten by Ansible after deployment. 9 | # BEGIN ANSIBLE MANAGED BLOCK 10 | STUDENT=STUDENT 11 | PASSWORD=PASSWORD 12 | API_LOGIN=API_LOGIN 13 | API_PASS=API_PASS 14 | API_ADDRESS=API_ADDRESS 15 | LOCAL_GUID=GUID 16 | # END ANSIBLE MANAGED BLOCK 17 | 18 | HOME="/home/$STUDENT" 19 | 20 | main(){ 21 | 22 | # Print a welcome message and ask user for input 23 | welcome_message 24 | check_guid 25 | 26 | # Try to reach the OCP 3 cluster, and copy the cluster.info over 27 | get_cluster_info 28 | 29 | # Run the bookbag playbook 30 | deploy_bookbag 31 | 32 | # Modify bashrc and move the script away to 'startup' after completion 33 | cleanup 34 | 35 | } 36 | 37 | 38 | 39 | 40 | 41 | 42 | # Functions go here 43 | # Function which welcomes the user and asks for OCP3 hostname 44 | 45 | welcome_message() { 46 | clear 47 | cat << EOF 48 | ██╗ ██╗███████╗██╗ ██████╗ ██████╗ ███╗ ███╗███████╗ 49 | ██║ ██║██╔════╝██║ ██╔════╝██╔═══██╗████╗ ████║██╔════╝ 50 | ██║ █╗ ██║█████╗ ██║ ██║ ██║ ██║██╔████╔██║█████╗ 51 | ██║███╗██║██╔══╝ ██║ ██║ ██║ ██║██║╚██╔╝██║██╔══╝ 52 | ╚███╔███╔╝███████╗███████╗╚██████╗╚██████╔╝██║ ╚═╝ ██║███████╗ 53 | ╚══╝╚══╝ ╚══════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝ 54 | 55 | ████████╗ ██████╗ 56 | ╚══██╔══╝██╔═══██╗ 57 | ██║ ██║ ██║ 58 | ██║ ██║ ██║ 59 | ██║ ╚██████╔╝ 60 | ╚═╝ ╚═════╝ 61 | 62 | ███╗ ███╗████████╗ ██████╗ ██╗ █████╗ ██████╗ 63 | ████╗ ████║╚══██╔══╝██╔════╝ ██║ ██╔══██╗██╔══██╗ 64 | ██╔████╔██║ ██║ ██║ ██║ ███████║██████╔╝ 65 | ██║╚██╔╝██║ ██║ ██║ ██║ ██╔══██║██╔══██╗ 66 | ██║ ╚═╝ ██║ ██║ ╚██████╗ ███████╗██║ ██║██████╔╝ 67 | ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝╚═════╝ 68 | 69 | ================================================================= 70 | EOF 71 | printf "\nPlease enter your OCP3 bastion hostname. \nThat is the one you received FOR YOUR OCP3 environment: " 72 | check_hostname 73 | } 74 | 75 | check_guid(){ 76 | # Check if the GUID is from the LOCAL system 77 | while [ $GUID = $LOCAL_GUID ] 78 | do 79 | printf "\nPlease enter the OCP3 hostname, NOT the OCP4 (local) system hostname: " 80 | check_hostname 81 | done 82 | printf "Your OCP3 GUID is $GUID. \nWorking...\n" 83 | } 84 | 85 | check_hostname(){ 86 | read HOSTNAME 87 | if [[ "$HOSTNAME" =~ "@" ]] 88 | then 89 | # Someone pasted the whole thing, with the username, strip it 90 | HOSTNAME=$(echo $HOSTNAME|cut -d @ -f 2) 91 | fi 92 | GUID=$(echo $HOSTNAME|cut -d . -f 2) 93 | # printf "GUID: $GUID\n" 94 | } 95 | 96 | get_cluster_info(){ 97 | printf "Checking cluster connectivity\n" 98 | check_host=(sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no $STUDENT@$HOSTNAME ls cluster.info) 99 | until "${check_host[@]}" 100 | do 101 | printf "Host still not reachable. Waiting 15s and trying again\n" 102 | sleep 15 103 | done 104 | # Getting and merging the cluster.info files. 105 | if sshpass -p "$PASSWORD" scp $STUDENT@$HOSTNAME:./cluster.info cluster.ocp3 106 | then 107 | printf "Grabbing cluster info from OCP3 cluster\n" 108 | if [ -f cluster.orig ]; then 109 | cp cluster.orig cluster.info 110 | fi 111 | cp cluster.info cluster.orig 112 | cat cluster.ocp3 >> cluster.info 113 | else 114 | printf "Couldn't copy the cluster.info file from OCP3\n" 115 | exit 1 116 | fi 117 | } 118 | 119 | deploy_bookbag(){ 120 | # We have to oc login to be able to make changes to the cluster 121 | printf "Logging into OCP4 cluster\n" 122 | oc login -u $API_LOGIN -p $API_PASS --insecure-skip-tls-verify=true $API_ADDRESS 123 | 124 | # Now run the ansible-playbook to deploy Bookbag 125 | printf "Running bookbag installation script\n" 126 | ansible-playbook -e ansible_user=$STUDENT -e ocp3_password=$PASSWORD -e ocp4_password=$PASSWORD bookbag.yml > >(tee -a bookbag.log) 2> >(tee -a bookbag_err.log >&2) 127 | BOOKBAG_URL=$(sed -n 's/.*\(bookbag-.*\)".*/\1/p' bookbag.log) 128 | printf "\nWaiting for Bookbag to become available" 129 | until [[ $(curl -k -s https://$BOOKBAG_URL) =~ "Redirecting" ]] 130 | do 131 | printf "." 132 | sleep 10 133 | done 134 | printf "\n\n\t\tYour Bookbag is up and running. \n\t\t You can reach it via:\n" 135 | printf "\n\t https://$BOOKBAG_URL\n\n" 136 | printf "\n\t\t\tHappy Migrating!\n\n" 137 | } 138 | 139 | enable_nooba_admin(){ 140 | # This is done now via ansible on deployment. Left here for posterity. 141 | 142 | oc adm groups new cluster-admins 143 | oc adm policy add-cluster-role-to-group cluster-admin cluster-admins 144 | oc adm groups add-users cluster-admins admin 145 | } 146 | 147 | cleanup(){ 148 | mkdir startup 149 | mv prepare_station.sh startup 150 | sed -i '/prepare_station.sh/d' $HOME/.bashrc 151 | } 152 | 153 | main "$@" 154 | exit 155 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==6.5.0 2 | ansible-core==2.13.5 3 | awscli==1.21.0 4 | boto==2.49.0 5 | boto3==1.19.0 6 | botocore==1.22.0 7 | cffi==1.15.0 8 | colorama==0.4.3 9 | cryptography==35.0.0 10 | distro==1.8.0 11 | docutils==0.15.2 12 | Jinja2==3.0.2 13 | jmespath==0.10.0 14 | MarkupSafe==2.0.1 15 | packaging==21.3 16 | passlib==1.7.4 17 | pyasn1==0.4.8 18 | pycparser==2.20 19 | pyparsing==3.0.9 20 | python-dateutil==2.8.2 21 | PyYAML==5.4.1 22 | resolvelib==0.8.1 23 | rsa==4.7.2 24 | s3transfer==0.5.0 25 | selinux==0.2.1 26 | six==1.16.0 27 | urllib3==1.26.7 28 | -------------------------------------------------------------------------------- /secret.ocp3.yml.sample: -------------------------------------------------------------------------------- 1 | # 2 | # Below is if you have a mirror of content you want to use 3 | # Reach out to one of the developers if you need access to an internal mirror we have 4 | # 5 | # own_repo_path: "http://REPLACE_ME/repos/ocp/{{ osrelease }}/" 6 | 7 | # If not using, 'own_repo_path', you need to supply credentials for 8 | # subscription manager to register for yum content 9 | # 10 | # Uncomment the below if you are not using 11 | # 'own_repo_path' and enter credentials for subscription manager 12 | # 13 | #rhel_subscription_user: "replace_with_username" 14 | #rhel_subscription_pass: "replace_with_password" 15 | #repo_method="rhn" 16 | #rhn_pool_id_string="Employee SKU" 17 | 18 | -------------------------------------------------------------------------------- /secret.ocp4.yml.sample: -------------------------------------------------------------------------------- 1 | # If you are using satellite as a repository, set the following: 2 | # Variables to be set are: 3 | repo_method: satellite 4 | satellite_url: your.satellite.host 5 | set_repositories_satellite_url: "{{ satellite_url }}" 6 | satellite_org: your_org 7 | set_repositories_satellite_org: "{{ satellite_org }}" 8 | satellite_activationkey: activation_key 9 | set_repositories_satellite_activationkey: "{{ satellite_activationkey }}" 10 | 11 | -------------------------------------------------------------------------------- /secret.yml.sample: -------------------------------------------------------------------------------- 1 | --- 2 | aws_access_key_id: "REPLACE_WITH_ACCESS_KEY_ID" 3 | aws_secret_access_key: "REPLACE_WITH_SECRET_ACCESS_KEY" 4 | 5 | # User credentials for access.redhat.com 6 | # Used to fetch images from registry.redhat.io 7 | redhat_registry_user: "replace_with_portal_credentials" 8 | redhat_registry_password: "portal_password" 9 | 10 | -------------------------------------------------------------------------------- /workloads/README.md: -------------------------------------------------------------------------------- 1 | ## Deploying AgnosticD workloads 2 | 3 | This is a collection of scripts that help you deploy workloads on AgnosticD OpenShift clusters. 4 | 5 | These AgnosticD workloads assist in automated deployment of: 6 | - The OpenShift 3->4 Application Migration Tool 7 | - Various sample apps for use when testing App Migration capabilities from OpenShift 3->4 8 | 9 | The workloads are available in AgnosticD repo. Make sure you have cloned the repo and set AGNOSTICD_HOME environment variable to the location of the repo. 10 | 11 | ```bash 12 | git clone https://github.com/redhat-cop/agnosticd 13 | ``` 14 | 15 | ### Deploying a workload 16 | 17 | #### Usage: `./deploy_workload.sh ` 18 | |Flag|Purpose|Accepted Values| 19 | |---|---|---| 20 | |`-a`|Workload Action|[`create`, `delete`]| 21 | |`-w`|Workload Name|[`migration`, `mssql`, ...]| 22 | |`-v`|AgnosticD OpenShift Version|[`3`, `4`]| 23 | 24 | To create a new workload: 25 | 26 | ```bash 27 | ./deploy_workload.sh -a create -w -v 28 | ``` 29 | 30 | `workload_name` is the name of the workload to deploy. 31 | 32 | `ocp_version` is the version of the AgnosticD OpenShift cluster to deploy the workload on (`3` or `4`). 33 | 34 | To remove the workload: 35 | 36 | ```bash 37 | ./deploy_workload.sh -a remove -w -v 38 | ``` 39 | 40 | To print help: 41 | 42 | ```bash 43 | ./deploy_workload.sh -h 44 | ``` 45 | 46 | #### Deploying migration workloads 47 | 48 | Available migration workloads - 49 | 50 | * migration : Mig Operator workload to deploy UI, Controller and Velero 51 | * mssql : MsSQL server with a sample frontend app 52 | * minio : Minio server to provide S3 API endpoint 53 | * noobaa : NooBaa operator workload on OCP 4 54 | * ocs-poc : OpenShift Container Storage for OpenShift 4.x 55 | * parks-app : Demo application 56 | * sock-shop : Demo application 57 | * robot-shop : Demo application 58 | 59 | ```bash 60 | # Deploy Migration components to OpenShift 3 (velero) 61 | ./deploy_workload.sh -a create -w migration -v 3 62 | 63 | # Deploy Migration components to OpenShift 4 (velero, mig-controller, mig-ui) 64 | ./deploy_workload.sh -a create -w migration -v 4 65 | 66 | # Deploy mssql sample app to OpenShift 3 67 | ./deploy_workload.sh -a create -w mssql -v 3 68 | 69 | # Deploy OCS cluster to OpenShift 4 70 | ./deploy_workload.sh -a create -w ocs-poc -v 4 71 | 72 | # Deploy minio to OpenShift 4 73 | ./deploy_workload.sh -a create -w minio -v 4 74 | # Access key: minio; Secret key: minio123 75 | ``` 76 | 77 | ### About Workload Configuration 78 | 79 | Migration workloads have default variables set in `workload_vars/.yml`. 80 | 81 | You may change these variables for your use-case, but the defaults will allow for for Migration of apps from OpenShift 3->4 with the Migration Controller + UI located on OpenShift 4. 82 | 83 | 84 | ### Potential Errors 85 | 86 | #### Failed to connect to the host via ssh 87 | 88 | ``` 89 | TASK [Gathering Facts] ******************************************************************************************** 90 | fatal: [bastion.jwm0819ocp4d.mg.example.com]: UNREACHABLE! => {"changed": false, "msg": "Failed to connect to the host via ssh: Warning: Permanently added 'bastion.jwm0819ocp4d.mg.example.com,18.189.40.236' (ECDSA) to the list of known hosts.\r\nec2-user@bastion.jwm0819ocp4d.mg.example.com: Permission denied (publickey,gssapi-keyex,gssapi-with-mic).", "unreachable": true} 91 | ``` 92 | 93 | The above may be related to having an issue sshing into the bastion node. 94 | We typically configure our local machines where we are running mig-agnosticd with a '~/.ssh/config' that will chose the correct ssh key to use via. 95 | The ssh key to use corresponds to what is set with the entry 'key_name:' in your 'my_vars.yml' 96 | 97 | ``` 98 | # Looking at ~/.ssh/config 99 | Host *.mg.example.com 100 | User ec2-user 101 | IdentityFile /home/myusername/.ssh/mysshkey.pem 102 | ``` 103 | 104 | -------------------------------------------------------------------------------- /workloads/ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null 3 | -------------------------------------------------------------------------------- /workloads/deploy_workload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script deploys a workload on specified OCP cluster 4 | # It assumes that the cluster is launched with mig-agnosticd 5 | 6 | USAGE="$(basename "$0") [-h] [-w WORKLOAD] [-e OCP_VERSION (3 or 4)] [-a ACTION] 7 | 8 | Options : 9 | -h Show this help 10 | -w Name of the workload to deploy 11 | -v OCP Version (3 or 4) 12 | -a Action ('create' or 'remove') 13 | -m my_vars.yml directory (optional) 14 | 15 | Note : 16 | Make sure your cluster was launched using mig-agnosticd 17 | 18 | Example Usage : 19 | $(basename "$0") -w migration -v 3 -a remove 20 | $(basename "$0") -w mssql -v 4 -a create 21 | " 22 | 23 | WORKLOAD="" 24 | OCP="" 25 | OPTARG="" 26 | MY_VARS_DIR="" 27 | 28 | while getopts ':hw:v:a:m:' option; do 29 | case "$option" in 30 | h) echo "$USAGE" 31 | exit 32 | ;; 33 | w) WORKLOAD=$OPTARG 34 | ;; 35 | v) OCP=$OPTARG 36 | MY_VARS_DIR="../${OCP}.x" 37 | ;; 38 | a) ACTION=$OPTARG 39 | ;; 40 | m) MY_VARS_DIR="$OPTARG" 41 | ;; 42 | :) printf "missing argument for -%s\n" "$OPTARG" >&2 43 | echo "$USAGE" >&2 44 | exit 1 45 | ;; 46 | \?) printf "illegal option: -%s\n" "$OPTARG" >&2 47 | echo "$USAGE" >&2 48 | exit 1 49 | ;; 50 | esac 51 | done 52 | if ((OPTIND < 6)) 53 | then 54 | echo -e "Missing required options...\n" 55 | echo "$USAGE" 56 | exit 1 57 | fi 58 | shift $((OPTIND -1)) 59 | 60 | 61 | if [ -z ${AGNOSTICD_HOME} ]; then 62 | echo "Please set AGNOSTICD_HOME env variable..." 63 | exit 1 64 | fi 65 | 66 | if [ ${ACTION} != "create" ] && [ ${ACTION} != "remove" ]; then 67 | echo -e "Invalid action...\n" 68 | echo "$USAGE" 69 | exit 1 70 | fi 71 | 72 | export ANSIBLE_ROLES_PATH=${AGNOSTICD_HOME}/ansible/roles 73 | ANSIBLE_LIBRARY=${AGNOSTICD_HOME}/ansible/library ansible-playbook ./workload.yml \ 74 | -e"action=${ACTION}" \ 75 | -e"workload=${WORKLOAD}" \ 76 | -e"agnosticd_home=${AGNOSTICD_HOME}" \ 77 | -e "my_vars_dir=${MY_VARS_DIR}" \ 78 | -e"ocp_version=${OCP}" 79 | rc=$? 80 | unset ANSIBLE_ROLES_PATH 81 | exit ${rc} 82 | 83 | -------------------------------------------------------------------------------- /workloads/workload.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "{{ 'Deploying' if action=='create' else 'Removing' }} Workload" 3 | hosts: localhost 4 | vars_files: 5 | - "{{ my_vars_dir | mandatory }}/my_vars.yml" 6 | connection: local 7 | tasks: 8 | - name: "Registering bastion host" 9 | add_host: 10 | groups: remote 11 | hostname: "bastion.{{ guid }}{{ subdomain_base_suffix }}" 12 | 13 | - hosts: remote 14 | remote_user: "{{ remote_user|d('ec2-user') }}" 15 | vars: 16 | ansible_ssh_private_key_file: "{{ output_dir }}/ssh_provision_{{ guid }}" 17 | ansible_user: ec2-user 18 | vars_files: 19 | - "{{ my_vars_dir | mandatory }}/my_vars.yml" 20 | - "{{ my_vars_dir | mandatory }}/ocp{{ ocp_version }}_vars.yml" 21 | tasks: 22 | - block: 23 | - name: "Checking connection with cluster" 24 | shell: "oc status" 25 | register: oc_status 26 | ignore_errors: yes 27 | - name: "Copying local Kubeconfig to bastion [4.x]" 28 | copy: 29 | src: "{{ output_dir }}/{{ env_type }}_{{ guid }}_kubeconfig" 30 | dest: "/home/ec2-user/.kube/config" 31 | when: ocp_version == '4' and oc_status.rc != 0 32 | - name: "Create kubeconfig" 33 | file: 34 | path: "{{ item.name }}" 35 | mode: 0777 36 | owner: ec2-user 37 | state: "{{ item.state }}" 38 | loop: 39 | - name: "/home/ec2-user/.kube" 40 | state: directory 41 | - name: "/home/ec2-user/.kube/config" 42 | state: touch 43 | become: yes 44 | when: ocp_version == '3' and oc_status.rc != 0 45 | - name: "Logging in [3.x]" 46 | shell: "oc login -u {{ admin_user | default('opentlc-mgr') }} -p {{ admin_password | default('r3dh4t1!') }} https://master.{{ guid }}{{ subdomain_base_suffix }} --insecure-skip-tls-verify=true --config /home/ec2-user/.kube/config" 47 | when: ocp_version == '3' and oc_status.rc != 0 48 | 49 | - name: "Including secrets and workload variables..." 50 | include_vars: "{{ item }}" 51 | loop: 52 | - "../secret.yml" 53 | - "./workload_vars/{{ workload }}.yml" 54 | 55 | - when: ocp_version == '3' 56 | delegate_to: localhost 57 | block: 58 | - name: "Checking if workload exists [3.x]..." 59 | stat: 60 | path: "{{ agnosticd_home }}/ansible/roles/ocp-workload-{{ workload }}" 61 | register: workload_exists 62 | - set_fact: 63 | workload_name: "ocp-workload-{{ workload }}" 64 | when: workload_exists.stat.exists 65 | - fail: 66 | msg: "Workload does not exist for specified OCP version." 67 | when: not workload_exists.stat.exists 68 | 69 | - when: ocp_version == '4' 70 | delegate_to: localhost 71 | block: 72 | - name: "Checking if workload exists [4.x]..." 73 | stat: 74 | path: "{{ agnosticd_home }}/ansible/roles/ocp4-workload-{{ workload }}" 75 | register: workload_exists 76 | - name: "Checking if shared workload exists [4.x]..." 77 | stat: 78 | path: "{{ agnosticd_home }}/ansible/roles/ocp-workload-{{ workload }}" 79 | register: shared_workload_exists 80 | when: not workload_exists.stat.exists 81 | - set_fact: 82 | workload_name: "ocp4-workload-{{ workload }}" 83 | when: workload_exists.stat.exists 84 | - set_fact: 85 | workload_name: "ocp-workload-{{ workload }}" 86 | when: not workload_exists.stat.exists and shared_workload_exists.stat.exists 87 | - fail: 88 | msg: "Workload does not exist for specified OCP version." 89 | when: not workload_exists.stat.exists and not shared_workload_exists.stat.exists 90 | 91 | - include_role: 92 | name: "{{ workload_name }}" 93 | vars: 94 | ACTION: "{{ action }}" 95 | ansible_ssh_private_key_file: "{{ output_dir }}/ssh_provision_{{ guid }}" 96 | ansible_user: ec2-user 97 | playbook_dir: "{{ agnosticd_home }}" 98 | hosts: remote 99 | ansible_python_interpreter: "{{ '/usr/bin/python' if ocp_version == '3' else '/opt/virtualenvs/k8s/bin/python' }}" 100 | -------------------------------------------------------------------------------- /workloads/workload_vars/ceph.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ceph_osd: 3 | resources: 4 | requests: 5 | cpu: '0.1' 6 | memory: 2Gi 7 | ceph_mon: 8 | resources: 9 | requests: 10 | cpu: '0.2' 11 | memory: 3Gi 12 | ceph_mgr: 13 | resources: 14 | requests: 15 | cpu: '0.2' 16 | memory: 3Gi 17 | ceph_mds: 18 | resources: 19 | requests: 20 | cpu: '0.1' 21 | memory: 2Gi 22 | 23 | ocs_operator_channel: stable-4.2 24 | ocs_source_namespace: openshift-marketplace 25 | ocs_source: redhat-operators 26 | -------------------------------------------------------------------------------- /workloads/workload_vars/mediawiki.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /workloads/workload_vars/migration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mig_operator_repo_branch: release-1.0 3 | mig_ocp_version: '{{ ocp_version }}' 4 | -------------------------------------------------------------------------------- /workloads/workload_vars/minio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | TARGET_HOST: "bastion.{{ guid }}{{ subdomain_base_suffix }}" 3 | _minio_access_key: "minio" 4 | _minio_secret_key: "minio123" 5 | -------------------------------------------------------------------------------- /workloads/workload_vars/mssql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mssql_pv_provider: default 3 | -------------------------------------------------------------------------------- /workloads/workload_vars/noobaa.yml: -------------------------------------------------------------------------------- 1 | --- 2 | noobaa_namespace: noobaa 3 | -------------------------------------------------------------------------------- /workloads/workload_vars/ocs-poc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ocs_mcg_core_cpu: 200m 3 | ocs_mcg_core_mem: 256Mi 4 | ocs_mcg_db_cpu: 200m 5 | ocs_mcg_db_mem: 256Mi 6 | ocs_ceph_mds_cpu: 256m 7 | ocs_ceph_mds_mem: 512Mi 8 | ocs_ceph_mon_cpu: 256m 9 | ocs_ceph_mon_mem: 512Mi 10 | ocs_ceph_mgr_cpu: 256m 11 | ocs_ceph_mgr_mem: 512Mi 12 | ocs_ceph_osd_cpu: 256m 13 | ocs_ceph_osd_mem: 512Mi 14 | ocs_channel: stable-4.7 15 | ocs_operator_source: redhat-operators 16 | ocs_operator_source_namespace: openshift-marketplace 17 | -------------------------------------------------------------------------------- /workloads/workload_vars/parks-app.yml: -------------------------------------------------------------------------------- 1 | --- 2 | parks_app_manifest: https://raw.githubusercontent.com/konveyor/mig-demo-apps/master/apps/parks-app/manifest.yaml 3 | 4 | -------------------------------------------------------------------------------- /workloads/workload_vars/robot-shop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | robot_shop_manifest: https://raw.githubusercontent.com/konveyor/mig-demo-apps/master/apps/robot-shop/manifest.yaml 3 | -------------------------------------------------------------------------------- /workloads/workload_vars/rocket-chat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /workloads/workload_vars/sock-shop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sock_shop_manifest: https://raw.githubusercontent.com/konveyor/mig-demo-apps/master/apps/sock-shop/manifest.yaml --------------------------------------------------------------------------------