├── .gitignore ├── roles ├── ocp4-etcd │ └── files │ │ ├── requirements │ │ └── who-is-ocp4-etcd-leader.py ├── ocp4-etcd-restore │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── ocp4-etcd-backup │ ├── defaults │ │ └── main.yml │ ├── README.md │ └── tasks │ │ └── main.yml ├── ocp4-machineautoscaler │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── machineautoscaler.yml.j2 │ └── tasks │ │ └── main.yml ├── ocp4-etcd-healthcheck │ ├── tasks │ │ └── main.yml │ └── files │ │ └── check_etcd.sh ├── deadman │ ├── templates │ │ └── hosts.conf.j2 │ └── tasks │ │ └── main.yml ├── ocp4-htpasswd-providers │ ├── templates │ │ └── oauth-template.yml.j2 │ └── tasks │ │ └── main.yml ├── ocp4-machinehealthcheck │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── machinehealthcheck.yml.j2 │ └── tasks │ │ └── main.yml ├── ocp4-clusterautoscaler │ ├── defaults │ │ └── main.yml │ ├── templates │ │ └── clusterautoscaler.yml.j2 │ └── tasks │ │ └── main.yml ├── ocp4-cluster-status │ └── tasks │ │ └── main.yml ├── ocp4-chronyd │ ├── templates │ │ ├── mc-chrony-configuration.yaml.j2 │ │ └── chrony.conf.j2 │ └── tasks │ │ └── main.yml ├── ocp4-secret │ └── tasks │ │ └── main.yml ├── htpasswd-secret │ └── tasks │ │ └── main.yml └── ocp4-vsphere-machineset │ ├── tasks │ └── main.yml │ └── templates │ └── ocp4-machineset.yml.j2 ├── .work-in-progress ├── metering │ ├── bucket │ │ └── .minio.sys │ │ │ ├── backend-encrypted │ │ │ ├── format.json │ │ │ ├── buckets │ │ │ └── .tracker.bin │ │ │ └── config │ │ │ ├── config.json │ │ │ └── iam │ │ │ └── format.json │ └── ns.yml ├── efk │ ├── 5-install-efk.sh │ ├── 2-view-operator.sh │ ├── 3-subscribe-efk.sh │ ├── cluster-logging-subscription.yaml │ ├── elasticsearch-operator-subscription.yaml │ ├── 4-check-subscribe.sh │ ├── 1-create-project.sh │ ├── efk-poc-install-empty.yml │ ├── efk-poc-install.yml │ └── efk-poc-install-gp2.yml ├── kubernetes-horizontal-white.png ├── project-requrest-template │ ├── run.sh │ └── project-requrest-template.yml ├── custom-branding.yml ├── create_userroot.sh ├── custom-branding.sh └── add-self-signed-ssl-certificate.yml ├── requirements.txt ├── pingpong.yml ├── healthcheck-etcd.yml ├── force-node-scaling-event.sh ├── monitoring-host-status.yml ├── 0-get-ocp-cluster-info.yml ├── add-machineautoscaler.yml ├── backup-etcd.yml ├── shell.sh ├── assets └── fake-heavy-job.yml ├── add-machinehealthcheck.yml ├── config-time-service.yml ├── shell-prompt.yml ├── ansible.cfg ├── check-disk-performance-etcd.yml ├── restore-etcd.yml ├── hosts ├── add-clusterautoscaler.yml ├── graceful-ocp4-shutdown.yml ├── check-system-time.yml ├── graceful-ocp4-reboot.yml ├── add-vsphere-machineset.yml ├── ionice-etcd.yml ├── remove-kubeadmin.yml ├── archive-container-images.yml ├── add-ocp4-account.yml └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode -------------------------------------------------------------------------------- /roles/ocp4-etcd/files/requirements: -------------------------------------------------------------------------------- 1 | openshift-client 2 | -------------------------------------------------------------------------------- /.work-in-progress/metering/bucket/.minio.sys/backend-encrypted: -------------------------------------------------------------------------------- 1 | encrypted -------------------------------------------------------------------------------- /.work-in-progress/efk/5-install-efk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | oc create -f efk-poc-install.yml 5 | -------------------------------------------------------------------------------- /roles/ocp4-etcd-restore/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | bastion_backup_dir: "/tmp" 3 | bastion_etcd_dirname: "" 4 | masters_backup_dir: "/home/core" 5 | -------------------------------------------------------------------------------- /.work-in-progress/efk/2-view-operator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | oc get packagemanifests {cluster-logging,elasticsearch-operator} -n openshift-marketplace 3 | -------------------------------------------------------------------------------- /.work-in-progress/metering/bucket/.minio.sys/format.json: -------------------------------------------------------------------------------- 1 | {"version":"1","format":"fs","id":"d8c2b1d7-0a2d-47fc-b3bb-722803c4a593","fs":{"version":"2"}} -------------------------------------------------------------------------------- /.work-in-progress/kubernetes-horizontal-white.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pichuang/openshift4-toolbox/HEAD/.work-in-progress/kubernetes-horizontal-white.png -------------------------------------------------------------------------------- /.work-in-progress/efk/3-subscribe-efk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | oc apply -f elasticsearch-operator-subscription.yaml 4 | oc apply -f cluster-logging-subscription.yaml 5 | -------------------------------------------------------------------------------- /.work-in-progress/metering/bucket/.minio.sys/buckets/.tracker.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pichuang/openshift4-toolbox/HEAD/.work-in-progress/metering/bucket/.minio.sys/buckets/.tracker.bin -------------------------------------------------------------------------------- /.work-in-progress/metering/bucket/.minio.sys/config/config.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pichuang/openshift4-toolbox/HEAD/.work-in-progress/metering/bucket/.minio.sys/config/config.json -------------------------------------------------------------------------------- /.work-in-progress/metering/bucket/.minio.sys/config/iam/format.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pichuang/openshift4-toolbox/HEAD/.work-in-progress/metering/bucket/.minio.sys/config/iam/format.json -------------------------------------------------------------------------------- /roles/ocp4-etcd-backup/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # backup_dst_file_latest: "{{ backup_bastion_rootdir }}/latest-snapshot.db" 3 | 4 | bastion_backup_dir: "/tmp" 5 | etcd_backup_dir: "/home/core" 6 | -------------------------------------------------------------------------------- /roles/ocp4-etcd-backup/README.md: -------------------------------------------------------------------------------- 1 | ocp4-etcd-backup 2 | ========= 3 | 4 | Fork from https://github.com/acidonper/ocp42-etcd-backup-restore-ansible 5 | 6 | OCP 4.6+ ETCD Backup role performs a ETCD cluster data backup 7 | 8 | 9 | -------------------------------------------------------------------------------- /.work-in-progress/metering/ns.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: openshift-metering 5 | annotations: 6 | openshift.io/node-selector: "" 7 | labels: 8 | openshift.io/cluster-monitoring: "true" 9 | -------------------------------------------------------------------------------- /roles/ocp4-machineautoscaler/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | machineautoscaler_name: "" 3 | machineautoscaler_filename: "" 4 | infrastructure_id: "" 5 | 6 | role: "worker" 7 | yaml_directory_name: "/tmp/machineautoscaler" 8 | minReplicas: 1 9 | maxReplicas: 2 -------------------------------------------------------------------------------- /roles/ocp4-etcd-healthcheck/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for roles/ocp4-etcd-healthcheck 3 | 4 | - name: Run check_etcd.sh script 5 | script: 6 | cmd: check_etcd.sh 7 | register: result 8 | 9 | - debug: 10 | msg: "{{ result.stdout }}" 11 | -------------------------------------------------------------------------------- /.work-in-progress/project-requrest-template/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # https://docs.openshift.com/container-platform/4.6/applications/quotas/quotas-setting-per-project.html#quotas-scopes_quotas-setting-per-project 4 | oc apply -f project-requrest-template.yml -n openshift-config 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | bcrypt==4.1.2 2 | cffi==1.16.0 3 | cryptography==42.0.1 4 | numpy==1.26.3 5 | openshift-client==1.0.20 6 | pandas==2.2.0 7 | paramiko==3.4.0 8 | pycparser==2.21 9 | PyNaCl==1.5.0 10 | python-dateutil==2.8.2 11 | pytz==2023.3.post1 12 | PyYAML==6.0.1 13 | six==1.16.0 14 | tzdata==2023.4 15 | -------------------------------------------------------------------------------- /pingpong.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook -i hosts pingpong.yaml 3 | # 4 | 5 | --- 6 | - hosts: openshift4 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | 12 | tasks: 13 | - name: Test Connectivity 14 | ping: 15 | 16 | -------------------------------------------------------------------------------- /.work-in-progress/custom-branding.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operator.openshift.io/v1 3 | kind: Console 4 | metadata: 5 | name: cluster 6 | spec: 7 | customization: 8 | customProductName: WhiteProduct 9 | customLogoFile: 10 | name: console-custom-logo 11 | key: kubernetes-horizontal-white.png 12 | -------------------------------------------------------------------------------- /.work-in-progress/efk/cluster-logging-subscription.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: cluster-logging 6 | namespace: openshift-logging 7 | spec: 8 | channel: "4.6" 9 | name: cluster-logging 10 | source: redhat-operators 11 | sourceNamespace: openshift-marketplace 12 | -------------------------------------------------------------------------------- /healthcheck-etcd.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook -i hosts healthcheck-etcd.yml 3 | # 4 | 5 | --- 6 | - hosts: master 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | 12 | tasks: 13 | - name: Check ETCD Status 14 | import_role: 15 | name: ocp4-etcd-healthcheck 16 | -------------------------------------------------------------------------------- /force-node-scaling-event.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PROJECT_NAME="test-clusterautoscale" 4 | 5 | oc new-project ${PROJECT_NAME} 6 | oc project ${PROJECT_NAME} 7 | oc create -n ${PROJECT_NAME} -f assets/fake-heavy-job.yml 8 | 9 | sleep 10 10 | 11 | oc get pods -n ${PROJECT_NAME} -w 12 | 13 | echo 14 | echo "watch -n10 'oc get machines -n openshift-machine-api'" 15 | echo -------------------------------------------------------------------------------- /monitoring-host-status.yml: -------------------------------------------------------------------------------- 1 | # 2 | # deadman is an observation software for host status using ping. 3 | # https://github.com/upa/deadman 4 | # 5 | # ansible-playbook monitoring-host-status.yml 6 | # 7 | --- 8 | - hosts: bastion 9 | gather_facts: true 10 | vars: 11 | repo_location: "/tmp/deadman" 12 | tasks: 13 | - import_role: 14 | name: deadman 15 | -------------------------------------------------------------------------------- /.work-in-progress/efk/elasticsearch-operator-subscription.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: elasticsearch-operator 6 | namespace: openshift-operators-redhat 7 | spec: 8 | channel: "4.6" 9 | name: elasticsearch-operator 10 | source: redhat-operators 11 | sourceNamespace: openshift-marketplace 12 | -------------------------------------------------------------------------------- /roles/deadman/templates/hosts.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # OpenShift 4 Cluster 3 | # 4 | {% for host in groups['openshift4'] %} 5 | {{host}} {{ hostvars[host].ansible_host }} 6 | {% endfor %} 7 | 8 | # 9 | # Bastion Information 10 | # 11 | gateway {{ ansible_default_ipv4.gateway }} 12 | {% for host_dns in ansible_dns['nameservers'] %} 13 | bastion-dns {{ host_dns }} 14 | {% endfor %} 15 | -------------------------------------------------------------------------------- /.work-in-progress/efk/4-check-subscribe.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | oc get csv -n openshift-operators-redhat 5 | oc get csv -n openshift-logging 6 | 7 | echo 8 | 9 | oc api-resources --api-group=logging.openshift.io 10 | 11 | echo 12 | 13 | oc get crd -l operators.coreos.com/elasticsearch-operator.openshift-operators-redhat 14 | oc get crd -l operators.coreos.com/cluster-logging.openshift-logging 15 | -------------------------------------------------------------------------------- /0-get-ocp-cluster-info.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: bastion 3 | gather_facts: false 4 | collections: 5 | - kubernetes.core 6 | 7 | tasks: 8 | - name: Do not invalidate cache before getting information 9 | k8s_cluster_info: 10 | invalidate_cache: True 11 | register: api_status 12 | 13 | - name: Output API status 14 | debug: 15 | msg: "{{ api_status.version }}" -------------------------------------------------------------------------------- /add-machineautoscaler.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook add-machineautoscaler.yml 3 | # 4 | 5 | --- 6 | - hosts: localhost 7 | gather_facts: false 8 | vars: 9 | role: "worker" 10 | yaml_directory_name: "/tmp/machineautoscaler" 11 | minReplicas: 1 12 | maxReplicas: 3 13 | 14 | tasks: 15 | - name: Create MachineAutoscaler 16 | import_role: 17 | name: ocp4-machineautoscaler 18 | -------------------------------------------------------------------------------- /roles/ocp4-htpasswd-providers/templates/oauth-template.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: config.openshift.io/v1 3 | kind: OAuth 4 | metadata: 5 | name: cluster 6 | spec: 7 | identityProviders: 8 | - name: {{ identity_provider_name }} 9 | challenge: true 10 | login: true 11 | mappingMethod: claim 12 | type: HTPasswd 13 | htpasswd: 14 | fileData: 15 | name: {{ htpasswd_filename }} 16 | -------------------------------------------------------------------------------- /roles/ocp4-machinehealthcheck/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | machinehealcheck_name: "" 3 | machinehealcheck_filename: "" 4 | infrastructure_id: "" 5 | 6 | role: "worker" 7 | yaml_directory_name: "/tmp/machinehealthcheck" 8 | unhealthyConditions: 9 | - { type: "Ready", timeout: "300s", status: "False" } 10 | - { type: "Ready", timeout: "300s", status: "Unknown" } 11 | maxUnhealthy: "40%" 12 | nodeStartupTimeout: "10m" -------------------------------------------------------------------------------- /backup-etcd.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook -i hosts backup-etcd.yml 3 | # 4 | 5 | --- 6 | - hosts: master 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | 12 | tasks: 13 | - name: Perform ETCD Backup 14 | import_role: 15 | name: ocp4-etcd-backup 16 | vars: 17 | bastion_backup_dir: "/tmp" 18 | run_once: true 19 | -------------------------------------------------------------------------------- /roles/ocp4-etcd-healthcheck/files/check_etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | id=$(sudo crictl ps --name etcdctl | awk 'FNR==2{ print $1}') 3 | sudo crictl exec -it $id /bin/bash -c "etcdctl member list -w table" 4 | echo 5 | sudo crictl exec -it $id /bin/bash -c "etcdctl endpoint health --cluster" 6 | echo 7 | sudo crictl exec -it $id /bin/bash -c "etcdctl endpoint health -w table" 8 | echo 9 | sudo crictl exec -it $id /bin/bash -c "etcdctl endpoint status -w table --cluster" -------------------------------------------------------------------------------- /shell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # shell.sh "timedatectl" 5 | # shell.sh "timedatectl | grep -i "Local time"" 6 | # 7 | 8 | display_usage() { 9 | echo -e "\nUsage: $0 [command] \n" 10 | echo -e "Example: shell.sh \"timedatectl\"" 11 | echo -e "Example: shell.sh \"timedatectl | grep -i \"Local time\"\" \n" 12 | } 13 | 14 | 15 | if [ $# -lt 1 ] 16 | then 17 | display_usage 18 | exit 1 19 | fi 20 | 21 | ansible openshift4 -m shell -a "$1" 22 | -------------------------------------------------------------------------------- /roles/deadman/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Pull upa/deadman repos 3 | git: 4 | repo: https://github.com/upa/deadman 5 | dest: "{{ repo_location }}" 6 | # update: yes 7 | # version: master 8 | 9 | - name: Create hostfile 10 | template: 11 | src: hosts.conf.j2 12 | dest: "{{ repo_location }}/openshift4.conf" 13 | 14 | - name: Please use command manually 15 | debug: 16 | msg: "{{ repo_location }}/deadman {{ repo_location }}/openshift4.conf" -------------------------------------------------------------------------------- /assets/fake-heavy-job.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | generateName: fake-heavy-job- 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: fake-work 11 | image: busybox 12 | command: ["sleep", "300"] 13 | resources: 14 | requests: 15 | memory: 500Mi 16 | cpu: 500m 17 | restartPolicy: Never 18 | backoffLimit: 4 19 | completions: 50 20 | parallelism: 50 -------------------------------------------------------------------------------- /roles/ocp4-machineautoscaler/templates/machineautoscaler.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling.openshift.io/v1beta1 3 | kind: MachineAutoscaler 4 | metadata: 5 | name: ma-{{ infrastructure_id }}-{{ role }} 6 | namespace: openshift-machine-api 7 | spec: 8 | minReplicas: {{ minReplicas }} 9 | maxReplicas: {{ maxReplicas }} 10 | scaleTargetRef: 11 | apiVersion: machine.openshift.io/v1beta1 12 | kind: MachineSet 13 | name: {{ infrastructure_id }}-{{ role }} 14 | -------------------------------------------------------------------------------- /.work-in-progress/create_userroot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | USERROOT="useroot" 6 | DC_NAME="nginx-deployment" 7 | 8 | # 1. Create a new SA - userroot 9 | oc create serviceaccount ${USERROOT} 10 | 11 | # 2. Add the SA into anyuid SCC list 12 | oc adm policy add-scc-to-user anyuid -z userroot 13 | 14 | # 3. Patch specific DeploymentConfig to userroot 15 | oc patch deployment ${DC_NAME} --patch {"spec":{"template":{"spec":{"serviceAccountName":"${USERROOT}"}}}} 16 | 17 | -------------------------------------------------------------------------------- /roles/ocp4-clusterautoscaler/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | infrastructure_id: "" 3 | clusterautoscaler_name: "" 4 | clusterautoscaler_filename: "" 5 | 6 | yaml_directory_name: "/tmp/clusterautoscaler" 7 | balanceSimilarNodeGroups: true 8 | podPriorityThreshold: -10 9 | maxNodesTotal: 8 10 | cores: 11 | mins: 8 12 | maxs: 128 13 | memory: 14 | min: 4 15 | max: 256 16 | scaleDown: 17 | enabled: true 18 | delayAfterAdd: 10m 19 | delayAfterDelete: 5m 20 | delayAfterFailure: 30s 21 | unneededTime: 60s -------------------------------------------------------------------------------- /.work-in-progress/custom-branding.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # https://www.openshift.com/blog/openshift-4-pro-tip-custom-branding 4 | 5 | # Artwork 6 | # https://github.com/cncf/artwork/blob/master/examples/graduated.md#kubernetes-logos 7 | 8 | # Add a Custom Logo and Product Name 9 | oc create configmap console-custom-logo \ 10 | --from-file=kubernetes-horizontal-white.png \ 11 | -n openshift-config 12 | 13 | # Edit the web console's operator configuration 14 | 15 | oc apply -f custom-branding.yml 16 | 17 | -------------------------------------------------------------------------------- /add-machinehealthcheck.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | vars: 5 | role: "worker" 6 | yaml_directory_name: "/tmp/machinehealthcheck" 7 | unhealthyConditions: 8 | - { type: "Ready", timeout: "300s", status: "False" } 9 | - { type: "Ready", timeout: "300s", status: "Unknown" } 10 | maxUnhealthy: "40%" 11 | nodeStartupTimeout: "10m" 12 | 13 | tasks: 14 | - name: Create MachineHealthCheck 15 | import_role: 16 | name: ocp4-machinehealthcheck 17 | -------------------------------------------------------------------------------- /.work-in-progress/efk/1-create-project.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat << EOF | oc create -f - 4 | --- 5 | apiVersion: v1 6 | kind: Namespace 7 | metadata: 8 | name: openshift-operators-redhat 9 | annotations: 10 | openshift.io/node-selector: "" 11 | labels: 12 | openshift.io/cluster-monitoring: "true" 13 | EOF 14 | 15 | cat << EOF | oc create -f - 16 | --- 17 | apiVersion: v1 18 | kind: Namespace 19 | metadata: 20 | name: openshift-logging 21 | annotations: 22 | openshift.io/node-selector: "" 23 | labels: 24 | openshift.io/cluster-monitoring: "true" 25 | EOF 26 | -------------------------------------------------------------------------------- /config-time-service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: openshift4 3 | gather_facts: no 4 | vars: 5 | chrony_location: "/tmp/chronyd/" 6 | timezone: "Asia/Taipei" 7 | ntp_server_list: 8 | - ntp.pichuang.local 9 | - time.google.com 10 | mcp: 11 | - worker 12 | - master 13 | 14 | tasks: 15 | - import_role: 16 | name: ocp4-chronyd 17 | run_once: true 18 | delegate_to: localhost 19 | 20 | 21 | - name: Setup "{{ timezone }}" 22 | command: 23 | cmd: "timedatectl set-timezone {{ timezone }}" 24 | become: true 25 | -------------------------------------------------------------------------------- /shell-prompt.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook -i hosts shell_prompt.yaml 3 | # 4 | 5 | --- 6 | - hosts: openshift4 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | 12 | vars_prompt: 13 | - name: command 14 | prompt: > 15 | "Type Command# " 16 | default: 'whoami' 17 | private: no 18 | 19 | tasks: 20 | - name: Type Command 21 | shell: 22 | cmd: "{{ command }}" 23 | register: result 24 | 25 | - debug: 26 | msg: "{{ result.stdout }}" 27 | 28 | -------------------------------------------------------------------------------- /roles/ocp4-cluster-status/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reboot all OCP4 nodes 3 | reboot: 4 | msg: "Reboot manually using ansible-playbook" 5 | reboot_timeout: 1800 6 | test_command: uptime 7 | connect_timeout: 5 8 | pre_reboot_delay: 0 9 | post_reboot_delay: 30 10 | become: true 11 | register: result 12 | when: state == "reboot" 13 | 14 | - name: Shutdown all OCP4 nodes 15 | command: 16 | cmd: "shutdown -h 1" 17 | become: true 18 | register: result 19 | when: state == "shutdown" 20 | 21 | - pause: 22 | minutes: 1 23 | 24 | - debug: 25 | msg: "{{ result }}" 26 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | forks = 20 3 | host_key_checking = False 4 | roles_path = roles/ 5 | gathering = smart 6 | fact_caching = jsonfile 7 | fact_caching_connection = /tmp/ansible/facts 8 | fact_caching_timeout = 600 9 | log_path = /tmp/ansible.log 10 | nocows = 1 11 | callbacks_enabled = profile_tasks, timer, skippy 12 | inventory = ./hosts 13 | retry_files_enabled = False 14 | #ask_vault_pass = true 15 | stdout_callback = debug 16 | deprecation_warnings = False 17 | 18 | [privilege_escalation] 19 | become = False 20 | 21 | [ssh_connection] 22 | ssh_args = -o ControlMaster=auto -o ControlPersist=600s -o ServerAliveInterval=60 23 | pipelining = True 24 | timeout = 10 -------------------------------------------------------------------------------- /check-disk-performance-etcd.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook check-disk-performance-etcd.yml 3 | # 4 | 5 | --- 6 | - hosts: master 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | vars: 12 | etcd_image: "quay.io/openshift-scale/etcd-perf" 13 | 14 | tasks: 15 | - name: Use 'fio' to Check Etcd Disk Performance 16 | shell: 17 | cmd: "podman run --volume /var/lib/etcd:/var/lib/etcd:Z {{ etcd_image }}" 18 | become: true 19 | register: result 20 | 21 | - set_fact: 22 | result_regex: "99th" 23 | 24 | - debug: 25 | msg: "{{ result.stdout }}" 26 | -------------------------------------------------------------------------------- /restore-etcd.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook restore-etcd.yml 3 | # 4 | 5 | --- 6 | - hosts: all 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | 12 | vars_prompt: 13 | - name: bastion_backup_dir 14 | prompt: > 15 | "Type Backup path [/tmp] #" 16 | default: '/tmp/' 17 | private: no 18 | 19 | - name: bastion_etcd_dirname 20 | prompt: > 21 | "Type Backup directory name [etcd-backup]#" 22 | default: 'etcd-backup' 23 | private: no 24 | 25 | tasks: 26 | - name: Perform ETCD Restore 27 | import_role: 28 | name: ocp4-etcd-restore 29 | 30 | -------------------------------------------------------------------------------- /hosts: -------------------------------------------------------------------------------- 1 | [master] 2 | master0 ansible_host=192.168.76.1 3 | master1 ansible_host=192.168.76.2 4 | master2 ansible_host=192.168.76.3 5 | 6 | [worker] 7 | worker0 ansible_host=192.168.76.11 8 | worker1 ansible_host=192.168.76.12 9 | worker2 ansible_host=192.168.76.13 10 | 11 | [bastion] 12 | localhost ansible_host=localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python3" 13 | 14 | [openshift4:children] 15 | master 16 | worker 17 | 18 | [openshift4:vars] 19 | ansible_ssh_user=core 20 | ansible_ssh_private_key_file=~/.ssh/dmz-ocp4-rsa 21 | ansible_python_interpreter=/usr/libexec/platform-python 22 | http_proxy="" 23 | https_proxy="" 24 | 25 | [bastion:vars] 26 | location="japaneast" -------------------------------------------------------------------------------- /add-clusterautoscaler.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook add-machineautoscaler.yml 3 | # 4 | 5 | --- 6 | - hosts: localhost 7 | gather_facts: false 8 | vars: 9 | yaml_directory_name: "/tmp/clusterautoscaler" 10 | balanceSimilarNodeGroups: true 11 | podPriorityThreshold: -10 12 | maxNodesTotal: 20 13 | cores: 14 | mins: 8 15 | maxs: 128 16 | memory: 17 | min: 4 18 | max: 256 19 | scaleDown: 20 | enabled: true 21 | delayAfterAdd: 10m 22 | delayAfterDelete: 5m 23 | delayAfterFailure: 5m 24 | unneededTime: 5m 25 | 26 | tasks: 27 | - name: Create ClusterAutoscaler 28 | import_role: 29 | name: ocp4-clusterautoscaler 30 | -------------------------------------------------------------------------------- /roles/ocp4-chronyd/templates/mc-chrony-configuration.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: machineconfiguration.openshift.io/v1 3 | kind: MachineConfig 4 | metadata: 5 | labels: 6 | machineconfiguration.openshift.io/role: "{{ item }}" 7 | name: "99-{{ item }}-chrony-configuration" 8 | spec: 9 | config: 10 | ignition: 11 | config: {} 12 | security: 13 | tls: {} 14 | timeouts: {} 15 | version: 2.2.0 16 | networkd: {} 17 | passwd: {} 18 | storage: 19 | files: 20 | - contents: 21 | source: "data:text/plain;charset=utf-8;base64,{{ chrony_base64 }}" 22 | verification: {} 23 | filesystem: root 24 | mode: 420 25 | path: /etc/chrony.conf 26 | osImageURL: "" -------------------------------------------------------------------------------- /graceful-ocp4-shutdown.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook graceful-ocp4-reboot.yml 3 | # 4 | 5 | --- 6 | - hosts: openshift4 7 | gather_facts: false 8 | serial: 1 9 | max_fail_percentage: 0 10 | vars_prompt: 11 | - name: read_this 12 | prompt: > 13 | "Are you sure you wanna SHUTDOWN the whole OpenShift 4 cluster? [No/yes]" 14 | default: 'no' 15 | private: no 16 | 17 | pre_tasks: 18 | - name: Exit playbook, if you dont know anything 19 | fail: 20 | msg: > 21 | "Thanks god you use this playbook and avoid the disaster!" 22 | when: read_this != 'yes' 23 | 24 | tasks: 25 | - name: "Shutdown ocp4 nodes" 26 | import_role: 27 | name: ocp4-cluster-status 28 | vars: 29 | state: shutdown 30 | 31 | -------------------------------------------------------------------------------- /check-system-time.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook check-timedatectl.yml 3 | # 4 | 5 | --- 6 | - hosts: openshift4 7 | gather_facts: no 8 | 9 | tasks: 10 | - name: Check Timedatectl 11 | command: 12 | cmd: "timedatectl" 13 | become: true 14 | register: timedatectl_result 15 | 16 | - debug: 17 | msg: "{{ timedatectl_result.stdout }}" 18 | 19 | - name: Check Chronyd status 20 | command: 21 | cmd: "chronyc -4 -n sources" 22 | register: chronyc_result 23 | 24 | - debug: 25 | msg: "{{ chronyc_result.stdout }}" 26 | 27 | - name: Check Chrony source 28 | command: "chronyc sourcestats" 29 | register: sourcestats_result 30 | 31 | - debug: 32 | msg: "{{ sourcestats_result.stdout }}" 33 | 34 | -------------------------------------------------------------------------------- /graceful-ocp4-reboot.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook graceful-ocp4-reboot.yml 3 | # 4 | 5 | --- 6 | - hosts: openshift4 7 | gather_facts: false 8 | serial: 1 9 | max_fail_percentage: 0 10 | vars_prompt: 11 | - name: read_this 12 | prompt: > 13 | "Are you sure you wanna reboot the whole OpenShift 4 cluster? [No/yes]" 14 | default: 'no' 15 | private: no 16 | 17 | pre_tasks: 18 | - name: Exit playbook, if you dont know anything 19 | fail: 20 | msg: > 21 | "Thanks god you use this playbook and avoid the disaster!" 22 | when: read_this != 'yes' 23 | 24 | tasks: 25 | 26 | - name: "Reboot ocp4 nodes" 27 | import_role: 28 | name: ocp4-cluster-status 29 | vars: 30 | state: reboot 31 | 32 | -------------------------------------------------------------------------------- /roles/ocp4-secret/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Handle Secret - {{ filename }} 3 | block: 4 | - name: Create Secret 5 | command: 6 | cmd: "oc create secret generic {{ filename }} --from-file htpasswd={{ directory_name }}/{{ filename }} -n openshift-config" 7 | ignore_errors: True 8 | 9 | rescue: 10 | - name: Update Secret 11 | shell: 12 | cmd: oc create secret generic {{ filename }} --from-file htpasswd={{ directory_name }}/{{ filename }} -n openshift-config --dry-run=client -o yaml | oc apply -f - 13 | 14 | - name: Describe Secret - {{ filename }} 15 | command: 16 | cmd: "oc describe secret {{ filename }} -n openshift-config" 17 | register: result 18 | 19 | - name: oc describe secret {{ filename }} 20 | debug: 21 | msg: "{{ result.stdout_lines }}" 22 | -------------------------------------------------------------------------------- /roles/ocp4-clusterautoscaler/templates/clusterautoscaler.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: "autoscaling.openshift.io/v1" 2 | kind: ClusterAutoscaler 3 | metadata: 4 | name: default 5 | spec: 6 | balanceSimilarNodeGroups: {{ balanceSimilarNodeGroups }} 7 | podPriorityThreshold: {{ podPriorityThreshold }} 8 | resourceLimits: 9 | maxNodesTotal: {{ maxNodesTotal }} 10 | cores: 11 | min: {{ cores.mins }} 12 | max: {{ cores.maxs }} 13 | memory: 14 | min: {{ memory.min }} 15 | max: {{ memory.max }} 16 | scaleDown: 17 | enabled: {{ scaleDown.enabled }} 18 | delayAfterAdd: {{ scaleDown.delayAfterAdd }} 19 | delayAfterDelete: {{ scaleDown.delayAfterDelete }} 20 | delayAfterFailure: {{ scaleDown.delayAfterFailure }} 21 | unneededTime: {{ scaleDown.unneededTime }} 22 | -------------------------------------------------------------------------------- /roles/ocp4-htpasswd-providers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Htpasswd Provider YAML 3 | template: 4 | src: oauth-template.yml.j2 5 | dest: "{{ directory_name }}/{{ identity_provider_name }}" 6 | 7 | - name: Backup Current Identity Provider 8 | shell: 9 | cmd: 'oc get oauth -o yaml > "$(date "+%Y%m%d%H%M%S")"-identity-provider-backup' 10 | chdir: "{{ directory_name }}" 11 | 12 | - name: Apply Identity Provider - {{ identity_provider_name }} 13 | command: 14 | cmd: "oc apply -f {{ identity_provider_name }}" 15 | chdir: "{{ directory_name }}" 16 | 17 | - name: Describe OAuth 18 | command: 19 | cmd: "oc describe oauth cluster" 20 | register: result 21 | 22 | - name: oc describe oauth {{ identity_provider_name }} cluster 23 | debug: 24 | msg: "{{ result.stdout_lines }}" -------------------------------------------------------------------------------- /add-vsphere-machineset.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook add-vsphere-machineset.yml 3 | # 4 | 5 | --- 6 | - hosts: localhost 7 | gather_facts: false 8 | vars: 9 | infrastructure_id: "" 10 | role: "worker" 11 | vm_cpu: 4 12 | vm_corepersocket: 1 13 | vm_ram: 16384 14 | vm_disk: 40 15 | vm_network_name: DPG-Vlan76-ocp4 16 | vm_template_name: rhcos-vmware.x86_64 17 | vcenter_datacenter_name: HomeCloud-DC 18 | vcenter_datastore_name: esxi-1.ocp 19 | vcenter_vm_folder_path: /HomeCloud-DC/vm/dmz-ocp4-76 20 | vsphere_resource_pool: ocp4-worker-pool 21 | vcenter_server_ip: vcenter.pichuang.local 22 | yaml_directory_name: "/tmp/machineset" 23 | 24 | tasks: 25 | - name: Create vSphere MachinesSet 26 | import_role: 27 | name: ocp4-vsphere-machineset 28 | -------------------------------------------------------------------------------- /roles/htpasswd-secret/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install the latest version of http-tools 3 | command: "yum install httpd-tools -y" 4 | 5 | - name: Add or Update User into htpasswd 6 | block: 7 | - name: Add/Update User - {{ username }} 8 | command: "htpasswd -b {{ filename }} {{ username }} {{ password }}" 9 | args: 10 | chdir: "{{ directory_name }}" 11 | 12 | rescue: 13 | - name: Add User - {{ username }} and Create {{ filename }} 14 | command: "htpasswd -b -c {{ filename }} {{ username }} {{ password }}" 15 | args: 16 | chdir: "{{ directory_name }}" 17 | 18 | - name: Cat {{ filename }} 19 | command: /usr/bin/cat {{ filename }} 20 | args: 21 | chdir: "{{ directory_name }}" 22 | register: result 23 | 24 | - name: Print {{ filename }} 25 | debug: 26 | msg: "{{ result.stdout_lines }}" -------------------------------------------------------------------------------- /roles/ocp4-machinehealthcheck/templates/machinehealthcheck.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: machine.openshift.io/v1beta1 3 | kind: MachineHealthCheck 4 | metadata: 5 | name: {{ machinehealcheck_name }} 6 | namespace: openshift-machine-api 7 | annotations: 8 | machine.openshift.io/remediation-strategy: "" 9 | spec: 10 | selector: 11 | matchLabels: 12 | machine.openshift.io/cluster-api-cluster: {{ infrastructure_id }} 13 | machine.openshift.io/cluster-api-machine-role: {{ role }} 14 | machine.openshift.io/cluster-api-machine-type: {{ role }} 15 | machine.openshift.io/cluster-api-machineset: {{ infrastructure_id }}-{{ role }} 16 | unhealthyConditions: 17 | {% for item in unhealthyConditions %} 18 | - type: {{ item.type }} 19 | timeout: {{ item.timeout }} 20 | status: "{{ item.status }}" 21 | {% endfor %} 22 | maxUnhealthy: {{ maxUnhealthy }} 23 | nodeStartupTimeout: {{ nodeStartupTimeout }} -------------------------------------------------------------------------------- /ionice-etcd.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook -i hosts ionice-etcd.yaml 3 | # 4 | 5 | --- 6 | - hosts: master 7 | gather_facts: false 8 | environment: 9 | http_proxy: "{{ http_proxy }}" 10 | https_proxy: "{{ https_proxy }}" 11 | 12 | tasks: 13 | - name: Query etcd process 14 | command: 15 | cmd: "pgrep etcd" 16 | register: result 17 | 18 | - set_fact: 19 | etcd_process: "{{ result.stdout_lines }}" 20 | 21 | - name: Show current priority and class of etcd process 22 | shell: 23 | cmd: "ionice -p {{ item }}" 24 | with_items: 25 | - "{{ result.stdout_lines }}" 26 | register: gg 27 | 28 | - debug: 29 | msg: "{{ item }}" 30 | with_items: 31 | - "{{ gg.results }}" 32 | 33 | - name: Setup priority 0 and class 2 for etcd process 34 | shell: 35 | cmd: "ionice -c2 -n0 -p {{ item }}" 36 | become: true 37 | with_items: 38 | - "{{ result.stdout_lines }}" -------------------------------------------------------------------------------- /roles/ocp4-chronyd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create "{{ chrony_location }}" directory 3 | file: 4 | path: "{{ chrony_location }}" 5 | state: directory 6 | 7 | - name: Create the chrony.conf file 8 | template: 9 | src: chrony.conf.j2 10 | dest: "{{ chrony_location }}/chrony.conf" 11 | mode: '0664' 12 | 13 | - name: Save base64 of config file 14 | set_fact: 15 | chrony_base64: "{{ (lookup('file', '{{ chrony_location }}/chrony.conf') + '\n') | b64encode }}" 16 | 17 | - name: Generate the machineconfigs for chrony 18 | template: 19 | src: mc-chrony-configuration.yaml.j2 20 | dest: "{{ chrony_location }}/99-{{ item }}-chrony-configuration.yaml" 21 | mode: '0664' 22 | with_items: 23 | - "{{ mcp }}" 24 | 25 | - name: Apply machineconfig-chrony-configuration.yml 26 | command: 27 | cmd: "oc apply -f {{ chrony_location }}/99-{{ item }}-chrony-configuration.yaml" 28 | with_items: 29 | - "{{ mcp }}" 30 | -------------------------------------------------------------------------------- /.work-in-progress/efk/efk-poc-install-empty.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "logging.openshift.io/v1" 3 | kind: "ClusterLogging" 4 | metadata: 5 | name: "instance" 6 | namespace: "openshift-logging" 7 | spec: 8 | managementState: "Managed" 9 | logStore: 10 | type: "elasticsearch" 11 | retentionPolicy: 12 | application: 13 | maxAge: 7d 14 | infra: 15 | maxAge: 7d 16 | audit: 17 | maxAge: 7d 18 | elasticsearch: 19 | nodeCount: 3 20 | storage: 21 | storage: {} 22 | resources: 23 | requests: 24 | memory: "8Gi" 25 | proxy: 26 | resources: 27 | limits: 28 | memory: 256Mi 29 | requests: 30 | memory: 256Mi 31 | redundancyPolicy: "SingleRedundancy" 32 | visualization: 33 | type: "kibana" 34 | kibana: 35 | replicas: 1 36 | curation: 37 | type: "curator" 38 | curator: 39 | schedule: "30 3 * * *" 40 | collection: 41 | logs: 42 | type: "fluentd" 43 | fluentd: {} 44 | -------------------------------------------------------------------------------- /.work-in-progress/efk/efk-poc-install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "logging.openshift.io/v1" 3 | kind: "ClusterLogging" 4 | metadata: 5 | name: "instance" 6 | namespace: "openshift-logging" 7 | spec: 8 | managementState: "Managed" 9 | logStore: 10 | type: "elasticsearch" 11 | retentionPolicy: 12 | application: 13 | maxAge: 7d 14 | infra: 15 | maxAge: 7d 16 | audit: 17 | maxAge: 7d 18 | elasticsearch: 19 | nodeCount: 3 20 | storage: 21 | storageClassName: "gp2" 22 | size: 200G 23 | resources: 24 | requests: 25 | memory: "8Gi" 26 | proxy: 27 | resources: 28 | limits: 29 | memory: 256Mi 30 | requests: 31 | memory: 256Mi 32 | redundancyPolicy: "SingleRedundancy" 33 | visualization: 34 | type: "kibana" 35 | kibana: 36 | replicas: 1 37 | curation: 38 | type: "curator" 39 | curator: 40 | schedule: "30 3 * * *" 41 | collection: 42 | logs: 43 | type: "fluentd" 44 | fluentd: {} 45 | -------------------------------------------------------------------------------- /.work-in-progress/efk/efk-poc-install-gp2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "logging.openshift.io/v1" 3 | kind: "ClusterLogging" 4 | metadata: 5 | name: "instance" 6 | namespace: "openshift-logging" 7 | spec: 8 | managementState: "Managed" 9 | logStore: 10 | type: "elasticsearch" 11 | retentionPolicy: 12 | application: 13 | maxAge: 7d 14 | infra: 15 | maxAge: 7d 16 | audit: 17 | maxAge: 7d 18 | elasticsearch: 19 | nodeCount: 3 20 | storage: 21 | storageClassName: "gp2" 22 | size: 200G 23 | resources: 24 | requests: 25 | memory: "8Gi" 26 | proxy: 27 | resources: 28 | limits: 29 | memory: 256Mi 30 | requests: 31 | memory: 256Mi 32 | redundancyPolicy: "SingleRedundancy" 33 | visualization: 34 | type: "kibana" 35 | kibana: 36 | replicas: 1 37 | curation: 38 | type: "curator" 39 | curator: 40 | schedule: "30 3 * * *" 41 | collection: 42 | logs: 43 | type: "fluentd" 44 | fluentd: {} 45 | -------------------------------------------------------------------------------- /roles/ocp4-chronyd/templates/chrony.conf.j2: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 3 | {% for server in ntp_server_list %} 4 | pool {{ server }} iburst 5 | {% endfor %} 6 | 7 | # Record the rate at which the system clock gains/losses time. 8 | driftfile /var/lib/chrony/drift 9 | 10 | # Allow the system clock to be stepped in the first three updates 11 | # if its offset is larger than 1 second. 12 | makestep 1.0 3 13 | 14 | # Enable kernel synchronization of the real-time clock (RTC). 15 | rtcsync 16 | 17 | # Enable hardware timestamping on all interfaces that support it. 18 | #hwtimestamp * 19 | 20 | # Increase the minimum number of selectable sources required to adjust 21 | # the system clock. 22 | #minsources 2 23 | 24 | # Allow NTP client access from local network. 25 | #allow 192.168.0.0/16 26 | 27 | # Serve time even if not synchronized to a time source. 28 | #local stratum 10 29 | 30 | # Specify file containing keys for NTP authentication. 31 | keyfile /etc/chrony.keys 32 | 33 | # Get TAI-UTC offset and leap seconds from the system tz database. 34 | leapsectz right/UTC 35 | 36 | # Specify directory for log files. 37 | logdir /var/log/chrony 38 | 39 | # Select which information is logged. 40 | #log measurements statistics tracking -------------------------------------------------------------------------------- /roles/ocp4-vsphere-machineset/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain the infrastructure ID by running 3 | command: 'oc get -o jsonpath="{.status.infrastructureName}{\"\n\"}" infrastructure cluster' 4 | register: infra_id 5 | 6 | - debug: 7 | msg: "{{ infra_id.stdout }}" 8 | 9 | - set_fact: 10 | infrastructure_id: "{{ infra_id.stdout }}" 11 | machineset_filename: "{{ infra_id.stdout }}-{{ role }}-machineset.yml" 12 | 13 | - name: Create "{{ yaml_directory_name }}" 14 | file: 15 | path: "{{ yaml_directory_name }}" 16 | state: directory 17 | 18 | - name: Create machineset YAML 19 | template: 20 | src: ocp4-machineset.yml.j2 21 | dest: "{{ yaml_directory_name }}/{{ machineset_filename }}" 22 | 23 | - name: Before apply machineset 24 | command: 25 | cmd: "oc get machineset -n openshift-machine-api" 26 | register: result 27 | 28 | - debug: 29 | msg: "{{ result.stdout }}" 30 | 31 | - name: Apply {{ yaml_directory_name }}/{{ machineset_filename }} 32 | command: 33 | cmd: "oc apply -f {{ yaml_directory_name }}/{{ machineset_filename }}" 34 | register: result 35 | 36 | - debug: 37 | msg: "Stdout: {{ result.stdout }}\nStderr: {{ result.stderr }}" 38 | 39 | - name: After apply machineset 40 | command: 41 | cmd: "oc get machineset -n openshift-machine-api" 42 | register: result 43 | 44 | - debug: 45 | msg: "{{ result.stdout }}" -------------------------------------------------------------------------------- /remove-kubeadmin.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook remove-kubeadmin.yml 3 | # 4 | 5 | --- 6 | - hosts: localhost 7 | gather_facts: false 8 | 9 | vars_prompt: 10 | - name: read_this 11 | prompt: > 12 | "READ THIS FIRSTIf you follow this procedure before another user is a cluster-admin, then OpenShift Container Platform **MUST BE** reinstalled. It is not possible to undo this command. Agree? [No/yes]" 13 | default: "no" 14 | private: no 15 | 16 | - name: cluster_admin_username 17 | prompt: > 18 | "Type cluster_admin role Username" 19 | default: "" 20 | private: no 21 | 22 | - name: cluster_admin_password 23 | prompt: > 24 | "Type cluster_admin role Password" 25 | default: "" 26 | private: yes 27 | 28 | tasks: 29 | - name: Exit playbook, if you dont know anything 30 | fail: 31 | msg: > 32 | "Please read this first https://docs.openshift.com/container-platform/4.6/authentication/remove-kubeadmin.html" 33 | when: read_this != 'yes' 34 | 35 | - name: OpenShift CLI login 36 | command: "oc login --insecure-skip-tls-verify=true -u {{ cluster_admin_username }} -p {{ cluster_admin_password }}" 37 | no_log: true 38 | 39 | - name: Removing the kubeadmin user 40 | command: 41 | cmd: "oc delete secrets kubeadmin -n kube-system" 42 | register: result 43 | when: read_this == "yes" 44 | 45 | - debug: 46 | msg: "{{ result }}" -------------------------------------------------------------------------------- /roles/ocp4-clusterautoscaler/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain the infrastructure ID by running 3 | command: 'oc get -o jsonpath="{.status.infrastructureName}{\"\n\"}" infrastructure cluster' 4 | register: infra_id 5 | 6 | - debug: 7 | msg: "{{ infra_id.stdout }}" 8 | 9 | - set_fact: 10 | infrastructure_id: "{{ infra_id.stdout }}" 11 | 12 | clusterautoscaler_name: "ca-{{ infra_id.stdout }}-default" 13 | clusterautoscaler_filename: "ca-{{ infra_id.stdout }}-default.yml" 14 | 15 | - name: Create "{{ yaml_directory_name }}" 16 | file: 17 | path: "{{ yaml_directory_name }}" 18 | state: directory 19 | 20 | - name: Create ClusterAutoScaler File - "{{ yaml_directory_name }}/{{ clusterautoscaler_filename }}" 21 | template: 22 | src: clusterautoscaler.yml.j2 23 | dest: "{{ yaml_directory_name }}/{{ clusterautoscaler_filename }}" 24 | 25 | - name: Before apply ClusterAutoScaler 26 | command: 27 | cmd: "oc get clusterautoscaler -n openshift-machine-api" 28 | register: result 29 | 30 | - debug: 31 | msg: "{{ result.stdout }}" 32 | 33 | - name: Apply {{ yaml_directory_name }}/{{ clusterautoscaler_filename }} 34 | command: 35 | cmd: "oc apply -f {{ yaml_directory_name }}/{{ clusterautoscaler_filename }}" 36 | register: result 37 | 38 | - debug: 39 | msg: "Stdout: {{ result.stdout }}\nStderr: {{ result.stderr }}" 40 | 41 | - name: After apply clusterautoscaler 42 | command: 43 | cmd: "oc get clusterautoscaler -n openshift-machine-api" 44 | register: result 45 | 46 | - debug: 47 | msg: "{{ result.stdout }}" -------------------------------------------------------------------------------- /roles/ocp4-machineautoscaler/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain the infrastructure ID by running 3 | command: 'oc get -o jsonpath="{.status.infrastructureName}{\"\n\"}" infrastructure cluster' 4 | register: infra_id 5 | 6 | - debug: 7 | msg: "{{ infra_id.stdout }}" 8 | 9 | - set_fact: 10 | infrastructure_id: "{{ infra_id.stdout }}" 11 | machineautoscaler_name: "ma-{{ infra_id.stdout }}-{{ role }}" 12 | machineautoscaler_filename: "ma-{{ infra_id.stdout }}-{{ role }}.yml" 13 | 14 | - name: Create "{{ yaml_directory_name }}" 15 | file: 16 | path: "{{ yaml_directory_name }}" 17 | state: directory 18 | 19 | - name: Create MachineAutoScaler File - "{{ yaml_directory_name }}/{{ machineautoscaler_filename }}" 20 | template: 21 | src: machineautoscaler.yml.j2 22 | dest: "{{ yaml_directory_name }}/{{ machineautoscaler_filename }}" 23 | 24 | - name: Before apply MachineAutoScaler 25 | command: 26 | cmd: "oc get machineautoscaler -n openshift-machine-api" 27 | register: result 28 | 29 | - debug: 30 | msg: "{{ result.stdout }}" 31 | 32 | - name: Apply {{ yaml_directory_name }}/{{ machineautoscaler_filename }} 33 | command: 34 | cmd: "oc apply -f {{ yaml_directory_name }}/{{ machineautoscaler_filename }}" 35 | register: result 36 | 37 | - debug: 38 | msg: "Stdout: {{ result.stdout }}\nStderr: {{ result.stderr }}" 39 | 40 | - name: After apply machineautoscaler 41 | command: 42 | cmd: "oc get machineautoscaler -n openshift-machine-api" 43 | register: result 44 | 45 | - debug: 46 | msg: "{{ result.stdout }}" 47 | -------------------------------------------------------------------------------- /roles/ocp4-machinehealthcheck/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain the infrastructure ID by running 3 | command: 'oc get -o jsonpath="{.status.infrastructureName}{\"\n\"}" infrastructure cluster' 4 | register: infra_id 5 | 6 | - debug: 7 | msg: "{{ infra_id.stdout }}" 8 | 9 | - set_fact: 10 | infrastructure_id: "{{ infra_id.stdout }}" 11 | machinehealcheck_name: "{{ infra_id.stdout }}-{{ role }}-machinehealthcheck" 12 | machinehealcheck_filename: "mhc-{{ infra_id.stdout }}-{{ role }}.yml" 13 | 14 | - name: Create "{{ yaml_directory_name }}" 15 | file: 16 | path: "{{ yaml_directory_name }}" 17 | state: directory 18 | 19 | - name: Create MachineHealthCheck File - "{{ yaml_directory_name }}/{{ machinehealcheck_filename }}" 20 | template: 21 | src: machinehealthcheck.yml.j2 22 | dest: "{{ yaml_directory_name }}/{{ machinehealcheck_filename }}" 23 | 24 | - name: Before apply MachineHealthCheck 25 | command: 26 | cmd: "oc get machinehealthcheck -n openshift-machine-api" 27 | register: result 28 | 29 | - debug: 30 | msg: "{{ result.stdout }}" 31 | 32 | - name: Apply {{ yaml_directory_name }}/{{ machinehealcheck_filename }} 33 | command: 34 | cmd: "oc apply -f {{ yaml_directory_name }}/{{ machinehealcheck_filename }}" 35 | register: result 36 | 37 | - debug: 38 | msg: "Stdout: {{ result.stdout }}\nStderr: {{ result.stderr }}" 39 | 40 | - name: After apply machinehealthcheck 41 | command: 42 | cmd: "oc get machinehealthcheck -n openshift-machine-api" 43 | register: result 44 | 45 | - debug: 46 | msg: "{{ result.stdout }}" -------------------------------------------------------------------------------- /.work-in-progress/project-requrest-template/project-requrest-template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: template.openshift.io/v1 2 | kind: Template 3 | metadata: 4 | creationTimestamp: null 5 | name: project-request 6 | objects: 7 | - apiVersion: project.openshift.io/v1 8 | kind: Project 9 | metadata: 10 | annotations: 11 | openshift.io/description: ${PROJECT_DESCRIPTION} 12 | openshift.io/display-name: ${PROJECT_DISPLAYNAME} 13 | openshift.io/requester: ${PROJECT_REQUESTING_USER} 14 | creationTimestamp: null 15 | name: ${PROJECT_NAME} 16 | spec: 17 | projectRequestMessage: "You requested a new project from homecloud" 18 | status: {} 19 | - kind: NetworkPolicy 20 | apiVersion: networking.k8s.io/v1 21 | metadata: 22 | name: default-deny-from-other-namespaces 23 | spec: 24 | podSelector: 25 | matchLabels: 26 | ingress: 27 | - from: 28 | - podSelector: {} 29 | - apiVersion: v1 30 | kind: ResourceQuota 31 | metadata: 32 | name: default-quota 33 | spec: 34 | hard: 35 | pods: '5' 36 | # requests.cpu: '10' 37 | # requests.memory: 10Gi 38 | # limits.cpu: '20' 39 | # limits.memory: 20Gi 40 | - apiVersion: rbac.authorization.k8s.io/v1 41 | kind: RoleBinding 42 | metadata: 43 | creationTimestamp: null 44 | name: admin 45 | namespace: ${PROJECT_NAME} 46 | roleRef: 47 | apiGroup: rbac.authorization.k8s.io 48 | kind: ClusterRole 49 | name: admin 50 | subjects: 51 | - apiGroup: rbac.authorization.k8s.io 52 | kind: User 53 | name: ${PROJECT_ADMIN_USER} 54 | parameters: 55 | - name: PROJECT_NAME 56 | - name: PROJECT_DISPLAYNAME 57 | - name: PROJECT_DESCRIPTION 58 | - name: PROJECT_ADMIN_USER 59 | - name: PROJECT_REQUESTING_USER 60 | -------------------------------------------------------------------------------- /roles/ocp4-etcd-restore/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Define backup dir name 3 | set_fact: 4 | etcd_tarball: "{{ bastion_etcd_dirname }}.tar.gz" 5 | 6 | - name: Debug Source Machine (Bastion) - Backup Path 7 | debug: 8 | msg: "Source Machine (Bastion) Backup Direcotory: {{ bastion_backup_dir }}/{{ bastion_etcd_dirname }}" 9 | 10 | - name: Debug Destination Machine (Masters) - Backup Path 11 | debug: 12 | msg: "Destination Machine (Masters) Backup Direcotory: {{ masters_backup_dir }}/{{ bastion_etcd_dirname }}" 13 | 14 | - name: Tar {{ backup_src_dir }} in Bastion node 15 | archive: 16 | path: "{{ bastion_backup_dir }}" 17 | dest: "{{ etcd_tarball }}" 18 | format: gz 19 | become: true 20 | 21 | - name: Perform ETCD restore 22 | shell: /usr/local/bin/cluster-backup.sh "{{ backup_src_dir }}" 23 | become: true 24 | 25 | - name: Tar {{ backup_src_dir }} in ETCD node 26 | archive: 27 | path: "{{ backup_src_dir }}" 28 | dest: "{{ etcd_tarball }}" 29 | format: gz 30 | become: true 31 | 32 | - name: Fetch ETCD backup dir 33 | fetch: 34 | src: "{{ etcd_tarball }}" 35 | dest: "{{ bastion_backup_dir }}/" 36 | flat: yes 37 | 38 | - name: Untar ETCD tarball in bastion node 39 | unarchive: 40 | src: "{{ bastion_backup_dir }}/{{ etcd_tarball }}" 41 | dest: "{{ bastion_backup_dir }}" 42 | connection: local 43 | 44 | # - name: Create a symbolic link to latest backup 45 | # file: 46 | # src: "{{ backup_dst_dir }}" 47 | # dest: "{{ backup_dst_dir_latest }}" 48 | # state: link 49 | # delegate_to: localhost 50 | 51 | - name: Clean backup file on etcd node 52 | file: 53 | path: "{{ item }}" 54 | state: absent 55 | with_items: 56 | - "{{ backup_src_dir }}" 57 | - "{{ etcd_backup_dir }}/{{ etcd_tarball }}" 58 | become: true 59 | 60 | - name: Clean backup file on master node 61 | file: 62 | path: "{{ item }}" 63 | state: absent 64 | with_items: 65 | - "{{ bastion_backup_dir }}/{{ etcd_tarball }}" 66 | connection: local -------------------------------------------------------------------------------- /archive-container-images.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook save-container-images.yml 3 | # 4 | 5 | --- 6 | - hosts: localhost 7 | become: True 8 | vars: 9 | archive_path: /tmp/containers 10 | containers: 11 | - name: docker.io/nicolaka/netshoot 12 | tag: latest 13 | id: 6ae5a524ab39 14 | - name: registry.redhat.io/rhel8/support-tools 15 | tag: 8.3 16 | id: 9b0af9ae39b1 17 | 18 | tasks: 19 | - name: Check directory {{ archive_path }} 20 | file: 21 | path: "{{ archive_path }}" 22 | state: directory 23 | 24 | - name: Mirror from online 25 | podman_image: 26 | name: "{{ item.name }}" 27 | tag: "{{ item.tag }}" 28 | pull: True 29 | tags: 30 | - online 31 | - mirror 32 | loop: "{{ containers }}" 33 | 34 | - name: Write to disk 35 | command: > 36 | skopeo copy 37 | containers-storage:{{ item.name }}:{{ item.tag }} 38 | docker-archive:{{ archive_path }}/{{ item.name | regex_replace('/', '+') }}__{{ item.tag }}__{{ item.id }}.tar 39 | args: 40 | creates: "{{ archive_path }}/{{ item.name | regex_replace('/', '+') }}__{{ item.tag }}__{{ item.id }}.tar" 41 | tags: 42 | - online 43 | - mirror 44 | loop: "{{ containers }}" 45 | 46 | - name: Gather info for all images 47 | podman_image_info: 48 | name: "{{ item.id }}" 49 | register: images 50 | tags: 51 | - offline 52 | loop: "{{ containers }}" 53 | 54 | - name: Populate from disk 55 | command: > 56 | skopeo copy 57 | docker-archive:{{ archive_path }}/{{ item.item.name | regex_replace('/', '+') }}__{{ item.item.tag }}__{{ item.id }}.tar 58 | containers-storage:{{ item.item.name }}:{{ item.item.tag }} 59 | when: item.images | length == 0 60 | tags: 61 | - offline 62 | - never 63 | loop: "{{ images.results }}" 64 | loop_control: 65 | label: "{{ item.item.name }}" -------------------------------------------------------------------------------- /roles/ocp4-vsphere-machineset/templates/ocp4-machineset.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: machine.openshift.io/v1beta1 3 | kind: MachineSet 4 | metadata: 5 | creationTimestamp: null 6 | labels: 7 | machine.openshift.io/cluster-api-cluster: {{ infrastructure_id }} 8 | name: {{ infrastructure_id }}-{{ role }} 9 | namespace: openshift-machine-api 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | machine.openshift.io/cluster-api-cluster: {{ infrastructure_id }} 15 | machine.openshift.io/cluster-api-machineset: {{ infrastructure_id }}-{{ role }} 16 | template: 17 | metadata: 18 | creationTimestamp: null 19 | labels: 20 | machine.openshift.io/cluster-api-cluster: {{ infrastructure_id }} 21 | machine.openshift.io/cluster-api-machine-role: {{ role }} 22 | machine.openshift.io/cluster-api-machine-type: {{ role }} 23 | machine.openshift.io/cluster-api-machineset: {{ infrastructure_id }}-{{ role }} 24 | spec: 25 | metadata: 26 | creationTimestamp: null 27 | labels: 28 | node-role.kubernetes.io/{{ role }}: "" 29 | providerSpec: 30 | value: 31 | apiVersion: vsphereprovider.openshift.io/v1beta1 32 | credentialsSecret: 33 | name: vsphere-cloud-credentials 34 | diskGiB: {{ vm_disk }} 35 | kind: VSphereMachineProviderSpec 36 | memoryMiB: {{ vm_ram }} 37 | metadata: 38 | creationTimestamp: null 39 | network: 40 | devices: 41 | - networkName: "{{ vm_network_name }}" 42 | numCPUs: {{ vm_cpu }} 43 | numCoresPerSocket: {{ vm_corepersocket }} 44 | snapshot: "" 45 | template: {{ vm_template_name }} 46 | userDataSecret: 47 | name: worker-user-data 48 | workspace: 49 | datacenter: {{ vcenter_datacenter_name }} 50 | datastore: {{ vcenter_datastore_name }} 51 | folder: {{ vcenter_vm_folder_path }} 52 | resourcepool: {{ vsphere_resource_pool }} 53 | server: {{ vcenter_server_ip }} -------------------------------------------------------------------------------- /add-ocp4-account.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook add-cluster-admin-account.yml 3 | # 4 | 5 | --- 6 | - hosts: localhost 7 | gather_facts: false 8 | 9 | vars_prompt: 10 | - name: username 11 | prompt: > 12 | "(Cluster Admin) Username #" 13 | default: 'ocproot' 14 | private: no 15 | 16 | - name: password 17 | prompt: > 18 | "(Cluster Admin) Password #" 19 | default: 'ocproot' 20 | private: yes 21 | 22 | - name: cluster_role 23 | prompt: > 24 | "Whcih cluster_role in OCP4? [cluster-admin/cluster-status/admin/edit/self-provisioner/view/basic-user]" 25 | default: 'cluster-admin' 26 | private: no 27 | 28 | - name: directory_name 29 | prompt: > 30 | "Root Directory Name #" 31 | default: '/tmp/local-password' 32 | private: no 33 | 34 | - name: htpasswd_filename 35 | prompt: > 36 | "HTPassword Filename #" 37 | default: 'localuser-htpasswd' 38 | private: no 39 | 40 | - name: identity_provider_name 41 | prompt: > 42 | "Identity Provider Name #" 43 | default: homecloud-htpasswd-provider 44 | private: no 45 | 46 | tasks: 47 | - name: Create local directory to save YAML files 48 | file: 49 | path: "{{ directory_name }}" 50 | state: directory 51 | 52 | - name: Create htpasswd file 53 | import_role: 54 | name: htpasswd-secret 55 | vars: 56 | filename: "{{ htpasswd_filename }}" 57 | 58 | - name: Import "{{ htpasswd_filename }}" into OpenShift 4 59 | import_role: 60 | name: ocp4-secret 61 | 62 | - name: Create Identity Provider - "{{ identity_provider_name }}" 63 | import_role: 64 | name: ocp4-htpasswd-providers 65 | 66 | - name: Assign Cluster Role 67 | command: 68 | cmd: "oc adm policy add-cluster-role-to-user {{ cluster_role }} {{ username }}" 69 | register: result 70 | 71 | - debug: 72 | msg: "{{ result.stdout_lines }}" 73 | 74 | - name: Test Login 75 | command: 76 | cmd: "oc login -u {{ username }} {{ password }}" 77 | no_log: True 78 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /roles/ocp4-etcd-backup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for roles/ocp4-etcd-backup 3 | 4 | - name: "Fail when a mandatory required vars are not defined" 5 | fail: 6 | msg: "Required variables are not defined" 7 | when: etcd_backup_dir is not defined 8 | or bastion_backup_dir is not defined 9 | 10 | - name: Generate snapshot dir name 11 | shell: 'echo "$(date "+%Y%m%d%H%M%S")-etcd-backup"' 12 | register: tmp_backup_dir 13 | changed_when: false 14 | 15 | - name: Define backup dir name 16 | set_fact: 17 | backup_src_dir: "{{ etcd_backup_dir }}/{{ tmp_backup_dir.stdout }}" 18 | backup_dst_dir: "{{ bastion_backup_dir }}/{{ tmp_backup_dir.stdout }}" 19 | etcd_tarball: "{{ tmp_backup_dir.stdout }}.tar.gz" 20 | 21 | - name: Debug Source Machine (Master) - Backup Path 22 | debug: 23 | msg: "Source Machine (Master) Backup Direcotory: {{ backup_src_dir }}" 24 | #verbosity: 1 25 | 26 | - name: Debug Destination Machine (Bastion) - Backup Path 27 | debug: 28 | msg: "Destination Machine (Bastion) Backup Direcotory: {{ backup_dst_dir }}" 29 | #verbosity: 1 30 | 31 | - name: Perform ETCD backup 32 | shell: /usr/local/bin/cluster-backup.sh "{{ backup_src_dir }}" 33 | become: true 34 | 35 | - name: Tar {{ backup_src_dir }} in ETCD node 36 | archive: 37 | path: "{{ backup_src_dir }}" 38 | dest: "{{ etcd_tarball }}" 39 | format: gz 40 | become: true 41 | 42 | - name: Fetch ETCD backup dir 43 | fetch: 44 | src: "{{ etcd_tarball }}" 45 | dest: "{{ bastion_backup_dir }}/" 46 | flat: yes 47 | 48 | - name: Untar ETCD tarball in bastion node 49 | unarchive: 50 | src: "{{ bastion_backup_dir }}/{{ etcd_tarball }}" 51 | dest: "{{ bastion_backup_dir }}" 52 | connection: local 53 | 54 | # - name: Create a symbolic link to latest backup 55 | # file: 56 | # src: "{{ backup_dst_dir }}" 57 | # dest: "{{ backup_dst_dir_latest }}" 58 | # state: link 59 | # delegate_to: localhost 60 | 61 | - name: Clean backup file on etcd node 62 | file: 63 | path: "{{ item }}" 64 | state: absent 65 | with_items: 66 | - "{{ backup_src_dir }}" 67 | - "{{ etcd_backup_dir }}/{{ etcd_tarball }}" 68 | become: true 69 | 70 | - name: Clean backup file on master node 71 | file: 72 | path: "{{ item }}" 73 | state: absent 74 | with_items: 75 | - "{{ bastion_backup_dir }}/{{ etcd_tarball }}" 76 | connection: local -------------------------------------------------------------------------------- /.work-in-progress/add-self-signed-ssl-certificate.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: no 4 | vars: 5 | ssl_directory: "/tmp/openssl" 6 | ssl_name: "api.dmz.ocp4.local" 7 | country_name: "TW" 8 | organization_name: "HomeCloud" 9 | email_address: "phil.huang@redhat.com" 10 | common_name: "api.dmz.ocp4.local" 11 | subject_alt_name: "DNS:api.dmz.ocp4.local" 12 | 13 | tasks: 14 | - name: Create SSL directories - {{ ssl_directory }} 15 | file: 16 | path: "{{ item }}" 17 | state: directory 18 | loop: 19 | - "{{ ssl_directory }}/crt" 20 | - "{{ ssl_directory }}/csr" 21 | - "{{ ssl_directory }}/private" 22 | 23 | - name: Generate an OpenSSL private key {{ ssl_name }} with the default values (4096 bits, RSA) 24 | openssl_privatekey: 25 | path: "{{ ssl_directory }}/private/{{ ssl_name }}.pem" 26 | type: RSA 27 | 28 | - name: Generate an OpenSSL Certificate Signing Request 29 | openssl_csr: 30 | path: "{{ ssl_directory }}/csr/{{ ssl_name }}.csr" 31 | privatekey_path: "{{ ssl_directory }}/private/{{ ssl_name }}.pem" 32 | country_name: "{{ country_name }}" 33 | organization_name: "{{ organization_name }}" 34 | email_address: "{{ email_address }}" 35 | common_name: "{{ common_name }}" 36 | subject_alt_name: "{{ subject_alt_name }}" 37 | 38 | - name: Generate a Self Signed OpenSSL certificate 39 | openssl_certificate: 40 | path: "{{ ssl_directory }}/crt/{{ ssl_name }}.crt" 41 | privatekey_path: "{{ ssl_directory }}/private/{{ ssl_name }}.pem" 42 | csr_path: "{{ ssl_directory }}/csr/{{ ssl_name }}.csr" 43 | provider: selfsigned 44 | 45 | # - name: Get information on the CSR 46 | # openssl_csr_info: 47 | # path: "{{ ssl_directory }}/csr/{{ ssl_name }}.csr" 48 | # register: result 49 | 50 | # - name: Dump CSR information 51 | # debug: 52 | # var: result 53 | 54 | - name: Get information on generated certificate 55 | openssl_certificate_info: 56 | path: "{{ ssl_directory }}/crt/{{ ssl_name }}.crt" 57 | register: result 58 | 59 | - name: Dump CRT information 60 | debug: 61 | var: result 62 | 63 | - name: Get information on private key 64 | openssl_privatekey_info: 65 | path: "{{ ssl_directory }}/private/{{ ssl_name }}.pem" 66 | register: result 67 | 68 | - name: Dump private key information 69 | debug: 70 | var: result 71 | -------------------------------------------------------------------------------- /roles/ocp4-etcd/files/who-is-ocp4-etcd-leader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Usage: ./who-is-ocp4-etcd-leader.py -h 5 | Return ETCD Leader ID: ./who-is-ocp4-etcd-leader.py id 6 | Return ETCD LEADER NAME: ./who-is-ocp4-etcd-leader.py name 7 | Return ETCD Leader ENDPOINT: ./who-is-ocp4-etcd-leader.py endpoint 8 | """ 9 | 10 | import openshift as oc 11 | import json 12 | import argparse 13 | 14 | # 15 | # Check OpenShift OC Command can work well 16 | # 17 | 18 | # print('OpenShift client version: {}'.format(oc.get_client_version())) 19 | # print('OpenShift server version: {}'.format(oc.get_server_version())) 20 | 21 | # 22 | # Obtain etcd information from OpenShfit OC commmand 23 | # 24 | 25 | with oc.project('openshift-etcd'), oc.timeout(10*60): 26 | 27 | etcd_selector = oc.selector('pods', labels={ 'app': 'etcd' }).qnames() 28 | # print(etcd_selector) 29 | # Output: ['pod/etcd-master0', 'pod/etcd-master1', 'pod/etcd-master2'] 30 | 31 | etcdctl_endpoint_status = "etcdctl endpoint status --cluster -w json" 32 | etcdctl_endpoint_status_result = oc.selector(etcd_selector[0]).object().execute(['/bin/bash'], 33 | container_name='etcdctl', stdin=etcdctl_endpoint_status).out() 34 | 35 | etcdctl_member_list = "etcdctl member list -w json" 36 | etcdctl_member_list_result = oc.selector(etcd_selector[0]).object().execute(['/bin/bash'], 37 | container_name='etcdctl', stdin=etcdctl_member_list).out() 38 | 39 | # 40 | # Find out etcd leader 41 | # NAME / ID / ENDPOINT 42 | # 43 | 44 | LEADER_ID = "" 45 | LEADER_ENDPOINT = "" 46 | LEADER_NAME = "" 47 | 48 | etcdctl_endpoint_status_json = json.loads(etcdctl_endpoint_status_result) 49 | for etcdctl_endpoint in etcdctl_endpoint_status_json: 50 | if etcdctl_endpoint['Status']['header']['member_id'] == etcdctl_endpoint['Status']['leader']: 51 | LEADER_ID = etcdctl_endpoint['Status']['leader'] 52 | LEADER_ENDPOINT = etcdctl_endpoint['Endpoint'] 53 | # print(LEADER_ID) # Return etcd endpoint 54 | # print(ENDPOINT) # Return etcd endpoint 55 | 56 | etcdctl_member_list_result_json = json.loads(etcdctl_member_list_result) 57 | for etcdctl_member in etcdctl_member_list_result_json['members']: 58 | # print(etcdctl_member['clientURLs']) 59 | if etcdctl_member['clientURLs'][0] == LEADER_ENDPOINT: 60 | LEADER_NAME = etcdctl_member['name'] 61 | # print(etcdctl_member['name']) # Return etcd leader name 62 | 63 | def id(args): 64 | print(LEADER_ID) 65 | 66 | def name(args): 67 | print(LEADER_NAME) 68 | 69 | def endpoint(args): 70 | print(LEADER_ENDPOINT) 71 | 72 | parser = argparse.ArgumentParser() 73 | subparsers = parser.add_subparsers() 74 | 75 | id_parser = subparsers.add_parser('id') 76 | id_parser.set_defaults(func=id) 77 | name_parser = subparsers.add_parser('name') 78 | name_parser.set_defaults(func=name) 79 | leader_parser = subparsers.add_parser('endpoint') 80 | leader_parser.set_defaults(func=endpoint) 81 | 82 | if __name__ == '__main__': 83 | args = parser.parse_args() 84 | args.func(args) # call the default function -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenShift 4 Toolbox 2 | 3 | > Speed delivery with Red Hat Ansible and OpenShift 4 4 | 5 | ``` 6 | \\\\\\\ 7 | \\\\\\\\\\\\ 8 | \\\\\\\\\\\\\\\ 9 | -----------,-| |C> // )\\\\| 10 | ,','| / || ,'/////| 11 | ---------,',' | (, || ///// 12 | || | \\ ||||//''''| 13 | Red Hat || | ||||||| _| 14 | OpenShift|| |______ `````\____/ \ 15 | 4 || | ,| _/_____/ \ 16 | || ,' ,' | / | 17 | ||,' ,' | | YOU \ | 18 | _________|/ ,' | / | | 19 | _____________,' ,',_____| | | | 20 | | ,',' | | | | 21 | | ,',' ____|_____/ / | 22 | | ,',' __/ | / | 23 | _____________|',' ///_/-------------/ | 24 | |===========,' 25 | ``` 26 | 27 | ## Current Environments 28 | 29 | - Red Hat OpenShift 4.6.17 using [vSphere UPI][1] 30 | - VMware vSphere 7.0.1 build: 17005016 31 | - Bastion 32 | - Red Hat Enterprise Linux 7.9 33 | - Red Hat Ansible 2.9.15 34 | - Python 3.6.8 35 | 36 | ## Toolbox 37 | 38 | ### OpenShift 4 ETCD 39 | 40 | - [x] ETCD Backup - `ansible-playbook backup-etcd.yml` 41 | - YouTube: https://youtu.be/hVijextRADs 42 | - [x] ETCD Health Check Report - `ansible-playbook healthcheck-etcd.yml` 43 | - YouTube: https://youtu.be/FGwCmCuQNrg 44 | - [x] ETCD Disk Performance Report - `ansible-playbook check-disk-performance-etcd.yml` 45 | - YouTube: https://youtu.be/6qjsh9J3ndM 46 | - [x] Use `ionice` to set high i/o priority for etcd process - `ansible-playbook ionice-etcd.yml` 47 | 48 | ### Execute commands on specifc nodes 49 | 50 | - [x] Run commands to multiple nodes within one command - `./shell.sh "timedatectl | grep -i "Local time""` 51 | - [x] Run commands to multiple nodes with prompt mode - `ansible-playbook shell_prompt.yml` 52 | 53 | ### OpenShift 4 Machine Management 54 | 55 | - [x] Add Machineset on vSphere - `ansible-playbook add-vsphere-machineset.yml` 56 | - [x] Add MachineHealthCheck - `ansible-playbook add-machinehealthcheck.yml` 57 | - YouTube: https://youtu.be/ZT1IWEiw-EY 58 | - [x] Add MachineAutoScaler - `ansible-playbook add-machineautoscaler.yml` 59 | - YouTube: https://youtu.be/vWrJ-NCO2oc 60 | - [x] Add ClusterAutoScaler - `ansible-playbook add-clusterautoscaler.yml` 61 | - [x] Causing a Scaling Event for testing purpose - `./force-node-scaling-event.sh` 62 | 63 | ### OpenShift 4 Power Control 64 | 65 | - [x] Reboot OpenShift cluster gracefully - `ansible-playbook graceful-ocp4-reboot.yml` 66 | - YouTube: https://youtu.be/G7XTY7TXltE 67 | - [x] Shutting down the cluster gracefully - `ansible-playbook graceful-ocp4-shutdown.yml` 68 | - YouTube: https://youtu.be/Q6rv2bLXoNA 69 | 70 | ### OpenShift 4 Authentication 71 | 72 | - [x] Add new account and identity provider - `ansible-playbook add-ocp4-account.yml` 73 | - [x] Disable default account `kubeadmin` - `ansible-playbook remove-kubeadmin.yml` 74 | 75 | ### OpenShift 4 Security 76 | 77 | - [ ] Pull Audit Log 78 | 79 | ### OpenShift 4 Time 80 | 81 | - [x] Check System Time - `ansible-playbook 82 | - [ ] check-system-time.yml` 83 | - [x] Change Timezone - `ansible-playbook config-time-service.yml` 84 | 85 | ### OpenShift 4 Certificates 86 | 87 | - [ ] Add API server certificates 88 | 89 | ### NFS 90 | 91 | - [ ] Install [NFS Suvbdir External Provisioner][6] 92 | 93 | ### Service Mesh 94 | 95 | - [x] Install [Red Hat Service Mesh][4] 96 | - [ ] [OpenShift Service Mesh Workshop][9] 97 | 98 | ### ACM 99 | 100 | - [x] Install [Red Hat Advanced Cluster Management for Kubernetes][5] 101 | 102 | ### Misc 103 | 104 | - [x] Save container images to tar archive - `ansible-playbook save-containe-images.yml` 105 | - [x] deadman is an observation software for host status using ping. - `ansible-playbook monitoring-host-reboot.yml` 106 | - [ ] Kubeeye 107 | 108 | ## Prerequisite 109 | 110 | 1. Edit `hosts`, `ansible.cfg` and put your own environment setting first 111 | 2. Use `ansible-playbook pingpong.yml` to connect to host and verify a usable python interpreter 112 | 3. (Optioanl) `pip3 install -r requirements.txt` 113 | 4. Do anything you want to do 114 | 115 | ## Tested Recording 116 | 117 | | Date | Status | OpenShift Version | Ansible Version | Bastion OS Version | 118 | |:--------:|:------:|:-----------------:|:---------------:|:------------------:| 119 | | 20240126 | OK | 4.12.27 | 2.16.2 | RHEL 9.3 | 120 | | 20210222 | OK | 4.6.1 | 2.9.15 | RHEL 7.9 | 121 | | 20210220 | OK | 4.6.17 | 2.9.15 | RHEL 7.9 | 122 | | 20210220 | OK | 4.6.16 | 2.4.2.0 | RHEL 7.9 | 123 | | 20210220 | OK | 4.5.31 | 2.4.2.0 | RHEL 7.9 | 124 | 125 | ## Develope Environment 126 | 127 | ```bash 128 | python3 -m venv .venv 129 | ansible-galaxy collection install kubernetes.core:3.0.0 --force 130 | pip install -r ~/.ansible/collections/ansible_collections/kubernetes/core/requirements.txt 131 | ``` 132 | 133 | ## Welcome to contribute! 134 | 135 | - [OpenSource Contribution Guidelines][3] 136 | 137 | ## References 138 | 139 | - [RedHatOfficial/ocp4-vsphere-upi-automation][1] 140 | - [openshift/training][2] 141 | - [openshift-tools Best Practices Guide][7] 142 | - [openshift-tools Style Guide][8] 143 | - [RedHatGov/service-mesh-workshop-dashboard][9] 144 | - [ahmetb/kubernetes-network-policy-recipes][10] 145 | - [Welcome to the Network Policy Editor!][11] 146 | 147 | [1]: https://github.com/RedHatOfficial/ocp4-vsphere-upi-automation 148 | [2]: https://github.com/openshift/training 149 | [3]: https://redhat-cop.github.io/contrib/ 150 | [4]: https://github.com/pichuang/redhat-service-mesh-demo 151 | [5]: https://github.com/pichuang/redhat-acm-demo 152 | [6]: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner 153 | [7]: https://github.com/openshift/openshift-tools/blob/prod/docs/best_practices_guide.adoc 154 | [8]: https://github.com/openshift/openshift-tools/blob/prod/docs/style_guide.adoc 155 | [9]: https://github.com/RedHatGov/service-mesh-workshop-dashboard/ 156 | [10]: https://github.com/ahmetb/kubernetes-network-policy-recipes 157 | [11]: https://editor.cilium.io/ --------------------------------------------------------------------------------