├── .github
├── ISSUE_TEMPLATE.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── .travis.yml
├── .yamllint
├── CONTRIBUTING.md
├── LICENSE.md
├── README.md
├── bin
├── README.md
└── cluster
├── ci
├── fetch-master.sh
├── install-tox.sh
├── openstack
│ ├── install-openshift.sh
│ ├── install.sh
│ ├── provision.sh
│ ├── teardown.sh
│ ├── validate.sh
│ └── vars.sh
└── run-tox.sh
├── inventory
└── aws
│ ├── ec2.ini
│ └── ec2.py
├── lookup_plugins
└── os_cinder.py
├── meta
└── main.yml
├── misc
└── gce-federation
│ ├── README.md
│ ├── ansible.cfg
│ ├── deploy-app.yaml
│ ├── federate.yaml
│ ├── files
│ ├── default_storage_class.yml
│ ├── mongo-deployment-rs.yaml
│ ├── mongo-pvc.yaml
│ ├── mongo-rs.yaml
│ ├── mongo-service.yaml
│ ├── pacman-deployment-rs.yaml
│ ├── pacman-namespace.yaml
│ ├── pacman-rs.yaml
│ └── pacman-service.yaml
│ ├── filter_plugins
│ └── gce_federation_filters.py
│ ├── init.yaml
│ ├── install.yaml
│ ├── inventory
│ ├── gce.py
│ ├── group_vars
│ │ └── federation
│ └── hosts
│ ├── library
│ └── gce.py
│ ├── push_images.sh
│ ├── teardown.yaml
│ └── templates
│ └── inventory.yaml.j2
├── molecule_common
├── base
│ └── Dockerfile
├── create.yml
├── destroy.yml
└── mock_aws
│ └── Dockerfile
├── playbooks
├── add-node-prerequisite.yaml
├── aws-prerequisite.yaml
├── aws
│ ├── README.md
│ └── openshift-cluster
│ │ ├── add_nodes.yml
│ │ ├── cluster_hosts.yml
│ │ ├── config.yml
│ │ ├── filter_plugins
│ │ ├── launch.yml
│ │ ├── library
│ │ └── ec2_ami_find.py
│ │ ├── list.yml
│ │ ├── lookup_plugins
│ │ ├── roles
│ │ ├── scaleup.yml
│ │ ├── service.yml
│ │ ├── tasks
│ │ └── launch_instances.yml
│ │ ├── templates
│ │ └── user_data.j2
│ │ ├── terminate.yml
│ │ ├── update.yml
│ │ └── vars.yml
├── deploy-host.yaml
├── empty-dir-quota.yaml
├── gce
│ ├── README.md
│ └── openshift-cluster
│ │ ├── add_nodes.yml
│ │ ├── cluster_hosts.yml
│ │ ├── config.yml
│ │ ├── filter_plugins
│ │ ├── launch.yml
│ │ ├── list.yml
│ │ ├── lookup_plugins
│ │ ├── roles
│ │ ├── service.yml
│ │ ├── tasks
│ │ └── launch_instances.yml
│ │ ├── terminate.yml
│ │ ├── update.yml
│ │ └── vars.yml
├── library
│ └── rpm_q.py
├── libvirt
│ ├── README.md
│ └── openshift-cluster
│ │ ├── cluster_hosts.yml
│ │ ├── config.yml
│ │ ├── filter_plugins
│ │ ├── launch.yml
│ │ ├── list.yml
│ │ ├── lookup_plugins
│ │ ├── roles
│ │ ├── service.yml
│ │ ├── tasks
│ │ ├── configure_libvirt.yml
│ │ ├── configure_libvirt_network.yml
│ │ ├── configure_libvirt_storage_pool.yml
│ │ └── launch_instances.yml
│ │ ├── templates
│ │ ├── domain.xml
│ │ ├── meta-data
│ │ ├── network.xml
│ │ ├── storage-pool.xml
│ │ └── user-data
│ │ ├── terminate.yml
│ │ ├── update.yml
│ │ └── vars.yml
├── openshift-prometheus
│ ├── README.md
│ ├── rules.yml
│ └── vars.yml
├── openshift-storage.yaml
├── openstack
│ ├── README.md
│ └── openshift-cluster
│ │ ├── cluster_hosts.yml
│ │ ├── config.yml
│ │ ├── files
│ │ ├── heat_stack.yaml
│ │ └── heat_stack_server.yaml
│ │ ├── filter_plugins
│ │ ├── launch.yml
│ │ ├── list.yml
│ │ ├── lookup_plugins
│ │ ├── roles
│ │ ├── terminate.yml
│ │ ├── update.yml
│ │ └── vars.yml
├── post-validation.yaml
├── prerequisite.yaml
├── provisioning
│ └── openstack
│ │ ├── README.md
│ │ ├── advanced-configuration.md
│ │ ├── ansible.cfg
│ │ ├── custom-actions
│ │ ├── add-cas.yml
│ │ ├── add-docker-registry.yml
│ │ ├── add-rhn-pools.yml
│ │ └── add-yum-repos.yml
│ │ ├── custom_flavor_check.yaml
│ │ ├── custom_image_check.yaml
│ │ ├── galaxy-requirements.yaml
│ │ ├── net_vars_check.yaml
│ │ ├── post-install.yml
│ │ ├── post-provision-openstack.yml
│ │ ├── pre-install.yml
│ │ ├── pre_tasks.yml
│ │ ├── prepare-and-format-cinder-volume.yaml
│ │ ├── prerequisites.yml
│ │ ├── provision-openstack.yml
│ │ ├── provision.yaml
│ │ ├── roles
│ │ ├── sample-inventory
│ │ ├── group_vars
│ │ │ ├── OSEv3.yml
│ │ │ └── all.yml
│ │ └── inventory.py
│ │ ├── scale-up.yaml
│ │ └── stack_params.yaml
├── roles
├── unregister.yaml
└── update.yaml
├── reference-architecture
├── 3.9
│ ├── README.md
│ └── playbooks
│ │ ├── deploy_aws.yaml
│ │ ├── deploy_aws_cns.yaml
│ │ ├── lookup_plugins
│ │ ├── ec2_zones_by_region.py
│ │ ├── iam_identity.py
│ │ └── route53_namservers.py
│ │ ├── roles
│ │ └── aws
│ │ │ ├── tasks
│ │ │ ├── configfiles.yaml
│ │ │ ├── configfiles_cns.yaml
│ │ │ ├── configfilesdata.yaml
│ │ │ ├── configfilesdata_cns.yaml
│ │ │ ├── ec2.yaml
│ │ │ ├── ec2_cns.yaml
│ │ │ ├── ec2elb.yaml
│ │ │ ├── ec2keypair.yaml
│ │ │ ├── eip.yaml
│ │ │ ├── elb.yaml
│ │ │ ├── gather_facts.yaml
│ │ │ ├── getazs.yaml
│ │ │ ├── getcreds.yaml
│ │ │ ├── getec2ami.yaml
│ │ │ ├── iam.yaml
│ │ │ ├── igw.yaml
│ │ │ ├── natgw.yaml
│ │ │ ├── route53.yaml
│ │ │ ├── route53record.yaml
│ │ │ ├── routetable.yaml
│ │ │ ├── routetablerule.yaml
│ │ │ ├── s3.yaml
│ │ │ ├── s3policy.yaml
│ │ │ ├── securitygroup.yaml
│ │ │ ├── securitygroup_cns.yaml
│ │ │ ├── securitygrouprule.yaml
│ │ │ ├── securitygrouprule_cns.yaml
│ │ │ ├── sshkeys.yaml
│ │ │ ├── subnet.yaml
│ │ │ ├── tag.yaml
│ │ │ ├── vpc.yaml
│ │ │ └── vpcdhcpopts.yaml
│ │ │ └── templates
│ │ │ ├── domaindelegation.j2
│ │ │ ├── ec2_userdata.sh.j2
│ │ │ ├── iam_policy_cpkuser.json.j2
│ │ │ ├── iam_policy_s3user.json.j2
│ │ │ ├── outputcpk.j2
│ │ │ ├── outputdomaindeleg.j2
│ │ │ ├── outputhosts.j2
│ │ │ ├── outputhostscns.j2
│ │ │ ├── outputhostsgfs.j2
│ │ │ ├── outputs3.j2
│ │ │ ├── outputurls.j2
│ │ │ ├── s3_bucket_policy_registry.json
│ │ │ └── ssh_config.j2
│ │ ├── undeploy_aws.yaml
│ │ └── vars
│ │ └── main.yaml
├── README.md
├── ansible-tower-integration
│ ├── Overview_Diagram.png
│ ├── README.md
│ ├── create_httpd_file
│ │ ├── create_httpd_file.yaml
│ │ └── create_httpd_file
│ │ │ ├── meta
│ │ │ └── main.yaml
│ │ │ └── tasks
│ │ │ └── main.yaml
│ ├── tower_config_aws
│ │ ├── schema.yml
│ │ ├── tower_config_aws.yaml
│ │ ├── tower_config_aws
│ │ │ ├── meta
│ │ │ │ └── main.yaml
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── tower_unconfig_aws
│ │ │ ├── meta
│ │ │ │ └── main.yaml
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ └── workflow-ocp-aws-install-extravars.yaml
│ ├── tower_config_azure
│ │ ├── schema-deploy.yaml
│ │ ├── schema-destroy.yaml
│ │ ├── tower-group-extravars.yaml
│ │ ├── tower_config_azure.yaml
│ │ ├── tower_config_azure
│ │ │ ├── meta
│ │ │ │ └── main.yaml
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ └── workflow-ocp-azure-extravars.yaml
│ ├── tower_unconfig_aws
│ │ ├── tower_unconfig_aws.yaml
│ │ └── tower_unconfig_aws
│ │ │ ├── meta
│ │ │ └── main.yaml
│ │ │ └── tasks
│ │ │ └── main.yaml
│ └── tower_unconfig_azure
│ │ ├── tower_unconfig_azure.yaml
│ │ └── tower_unconfig_azure
│ │ ├── meta
│ │ └── main.yaml
│ │ └── tasks
│ │ └── main.yaml
├── aws-ansible
│ ├── README.md
│ ├── add-cns-storage.py
│ ├── add-crs-storage.py
│ ├── add-node.py
│ ├── ansible.cfg
│ ├── images
│ │ └── arch.jpg
│ ├── inventory
│ │ └── aws
│ │ │ └── hosts
│ │ │ ├── ec2.ini
│ │ │ └── ec2.py
│ ├── ose-on-aws.py
│ └── playbooks
│ │ ├── add-crs.yaml
│ │ ├── add-node.yaml
│ │ ├── create-inventory-file.yaml
│ │ ├── infrastructure.yaml
│ │ ├── library
│ │ ├── cloudformation_facts.py
│ │ ├── ec2_vol_facts.py
│ │ ├── redhat_subscription.py
│ │ └── rpm_q.py
│ │ ├── minor-update.yaml
│ │ ├── node-setup.yaml
│ │ ├── openshift-install.yaml
│ │ ├── openshift-minor-upgrade.yaml
│ │ ├── openshift-setup.yaml
│ │ ├── roles
│ │ ├── cfn-outputs
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── cloudformation-infra
│ │ │ ├── files
│ │ │ │ ├── add-cns-storage-iops.json
│ │ │ │ ├── add-cns-storage.json
│ │ │ │ ├── add-crs-storage-iops.json
│ │ │ │ ├── add-crs-storage.json
│ │ │ │ ├── add-infra-node.json
│ │ │ │ ├── add-node.json
│ │ │ │ ├── brownfield-byo-bastion.json.j2
│ │ │ │ ├── brownfield.json.j2
│ │ │ │ ├── greenfield.json.j2
│ │ │ │ ├── user_data_bastion.yml
│ │ │ │ ├── user_data_gluster.yml
│ │ │ │ ├── user_data_master.yml
│ │ │ │ └── user_data_node.yml
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── gluster-instance-groups
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── host-up
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── instance-groups
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── inventory-file-creation
│ │ │ ├── files
│ │ │ │ └── inventory
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── non-atomic-docker-storage-setup
│ │ │ ├── defaults
│ │ │ │ └── main.yaml
│ │ │ ├── library
│ │ │ │ └── openshift_facts.py
│ │ │ ├── tasks
│ │ │ │ └── main.yaml
│ │ │ └── templates
│ │ │ │ ├── docker-storage-setup-dm.j2
│ │ │ │ └── docker-storage-setup.j2
│ │ ├── openshift-versions
│ │ │ ├── defaults
│ │ │ │ └── main.yaml
│ │ │ ├── library
│ │ │ │ └── openshift_facts.py
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── pre-install-check
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ ├── ssh-key
│ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ └── terminate-all
│ │ │ └── tasks
│ │ │ └── main.yaml
│ │ ├── teardown.yaml
│ │ ├── validation.yaml
│ │ └── vars
│ │ └── main.yaml
├── azure-ansible
│ ├── 3.5
│ │ ├── add_host.sh
│ │ ├── allinone.json
│ │ ├── allinone.sh
│ │ ├── ansibledeployocp
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── ansible.cfg
│ │ │ ├── hosts
│ │ │ ├── playbooks
│ │ │ │ ├── deploy.yaml
│ │ │ │ ├── prepare.yaml
│ │ │ │ ├── roles
│ │ │ │ │ ├── azure-delete
│ │ │ │ │ │ ├── defaults
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ │ └── tasks
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ ├── azure-deploy
│ │ │ │ │ │ ├── defaults
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ │ └── tasks
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ └── prepare
│ │ │ │ │ │ ├── defaults
│ │ │ │ │ │ └── main.yaml
│ │ │ │ │ │ └── tasks
│ │ │ │ │ │ └── main.yaml
│ │ │ │ └── test.yaml.example
│ │ │ └── vars.yaml.example
│ │ ├── azuredeploy.json
│ │ ├── azuredeploy.parameters.json
│ │ ├── bastion.json
│ │ ├── bastion.sh
│ │ ├── create_service_principle.sh
│ │ ├── infranode.json
│ │ ├── master.json
│ │ ├── master.sh
│ │ ├── node.json
│ │ ├── node.sh
│ │ └── testcases
│ │ │ └── test_pv.sh
│ ├── 3.6
│ │ ├── allinone.json
│ │ ├── allinone.sh
│ │ ├── ansibledeployocp
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── ansible.cfg
│ │ │ ├── hosts
│ │ │ ├── playbooks
│ │ │ │ ├── deploy.yaml
│ │ │ │ ├── destroy.yaml
│ │ │ │ ├── prepare.yaml
│ │ │ │ ├── roles
│ │ │ │ │ ├── azure-delete
│ │ │ │ │ │ ├── defaults
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ │ └── tasks
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ ├── azure-deploy
│ │ │ │ │ │ ├── defaults
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ │ └── tasks
│ │ │ │ │ │ │ └── main.yaml
│ │ │ │ │ └── prepare
│ │ │ │ │ │ ├── defaults
│ │ │ │ │ │ └── main.yaml
│ │ │ │ │ │ └── tasks
│ │ │ │ │ │ └── main.yaml
│ │ │ │ └── test.yaml.example
│ │ │ └── vars.yaml.example
│ │ ├── azuredeploy.json
│ │ ├── azuredeploy.parameters.json
│ │ ├── bastion.json
│ │ ├── bastion.sh
│ │ ├── bastion_cns.sh
│ │ ├── bastioncns.json
│ │ ├── create_service_principle.sh
│ │ ├── infranode.json
│ │ ├── master.json
│ │ ├── master.sh
│ │ ├── node.json
│ │ ├── node.sh
│ │ ├── nodecns.json
│ │ ├── ocpcns.json
│ │ └── testcases
│ │ │ └── test_pv.sh
│ ├── 3.7
│ │ ├── allinone.json
│ │ ├── allinone.sh
│ │ ├── azuredeploy.json
│ │ ├── azuredeploy.parameters.json
│ │ ├── bastion.json
│ │ ├── bastion.sh
│ │ ├── create_service_principle.sh
│ │ ├── infranode.json
│ │ ├── master.json
│ │ ├── master.sh
│ │ ├── node.json
│ │ ├── node.sh
│ │ └── testcases
│ │ │ └── test_pv.sh
│ ├── BUGS.md
│ ├── README.md
│ ├── images
│ │ ├── puttygen.png
│ │ └── terminal.png
│ ├── ssh_linux.md
│ ├── ssh_mac.md
│ └── ssh_windows.md
├── day2ops
│ ├── README.md
│ ├── images
│ │ └── README.md
│ ├── playbooks
│ │ ├── README.md
│ │ ├── controller_notification.yaml
│ │ └── openshift-etcd-disaster-recovery
│ │ │ ├── README.md
│ │ │ ├── ansible.cfg
│ │ │ ├── inventories
│ │ │ └── demo_lab01
│ │ │ │ └── ansible_hosts
│ │ │ ├── playbooks
│ │ │ ├── ocp-etc-dr-fallback.yml
│ │ │ ├── ocp-etc-dr-recover.yml
│ │ │ └── ocp-etc-dr-simulate.yml
│ │ │ └── roles
│ │ │ └── etcd_fallback
│ │ │ ├── files
│ │ │ └── etcd_rejoining_mode
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── templates
│ │ │ └── etcd-recovery
│ ├── roles
│ │ └── README.md
│ └── scripts
│ │ ├── README.md
│ │ ├── backup_master_node.sh
│ │ ├── ocp36-sat6.py
│ │ ├── project_export.sh
│ │ └── project_import.sh
├── gcp
│ ├── .gitignore
│ ├── 3.9
│ │ ├── README.md
│ │ ├── bastion.sh
│ │ ├── bastion.vars
│ │ ├── create_infrastructure.sh
│ │ ├── delete_infrastructure.sh
│ │ └── infrastructure.vars
│ ├── README.md
│ ├── ansible
│ │ ├── ansible.cfg
│ │ ├── inventory
│ │ │ ├── gce
│ │ │ │ └── hosts
│ │ │ │ │ └── gce.py
│ │ │ ├── group_vars
│ │ │ │ └── all
│ │ │ └── inventory
│ │ └── playbooks
│ │ │ ├── core-infra.yaml
│ │ │ ├── create-inventory-file.yaml
│ │ │ ├── gold-image-include.yaml
│ │ │ ├── gold-image.yaml
│ │ │ ├── library
│ │ │ └── redhat_subscription.py
│ │ │ ├── main.yaml
│ │ │ ├── openshift-install.yaml
│ │ │ ├── openshift-installer-common-vars.yaml
│ │ │ ├── openshift-minor-upgrade.yaml
│ │ │ ├── openshift-post.yaml
│ │ │ ├── openshift-scaleup.yaml
│ │ │ ├── prereq.yaml
│ │ │ ├── roles
│ │ │ ├── ansible-gcp
│ │ │ │ ├── tasks
│ │ │ │ │ └── main.yaml
│ │ │ │ └── templates
│ │ │ │ │ └── gce.ini.j2
│ │ │ ├── deployment-create
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── deployment-delete
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── dns-records-delete
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── dns-records
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── dns-zone
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── empty-image-delete
│ │ │ │ ├── defaults
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── empty-image
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── gcp-ssh-key
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── gold-image-instance
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── gold-image
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── instance-groups
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── inventory-file-creation
│ │ │ │ ├── tasks
│ │ │ │ │ └── main.yaml
│ │ │ │ └── templates
│ │ │ │ │ └── inventory.j2
│ │ │ ├── master-http-proxy
│ │ │ │ ├── handlers
│ │ │ │ │ └── main.yaml
│ │ │ │ ├── tasks
│ │ │ │ │ └── main.yaml
│ │ │ │ └── templates
│ │ │ │ │ └── haproxy.cfg.j2
│ │ │ ├── openshift-ansible-installer
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── pre-flight-validation
│ │ │ │ └── tasks
│ │ │ │ │ ├── check-package.yaml
│ │ │ │ │ └── main.yaml
│ │ │ ├── registry-bucket-delete
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── restrict-gce-metadata
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── rhel-image-delete
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── rhel-image
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── ssh-config-tmp-instance-delete
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── ssh-config-tmp-instance
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── ssh-proxy-delete
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── ssh-proxy
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── ssl-certificate-delete
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── ssl-certificate
│ │ │ │ ├── defaults
│ │ │ │ │ └── main.yaml
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── temp-instance-disk-delete
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ ├── wait-for-instance-group
│ │ │ │ └── tasks
│ │ │ │ │ └── main.yaml
│ │ │ └── wait-for-instance
│ │ │ │ └── tasks
│ │ │ │ └── main.yaml
│ │ │ ├── soft-teardown.yaml
│ │ │ ├── teardown.yaml
│ │ │ ├── unregister.yaml
│ │ │ └── validation.yaml
│ ├── config.yaml.example
│ ├── deployment-manager
│ │ ├── .gitignore
│ │ ├── core-config.yaml.j2
│ │ ├── core.jinja
│ │ ├── gold-image-config.yaml.j2
│ │ ├── gold-image.jinja
│ │ ├── network-config.yaml.j2
│ │ ├── network.jinja
│ │ ├── tmp-instance-config.yaml.j2
│ │ └── tmp-instance.jinja
│ ├── images
│ │ └── arch.png
│ └── ocp-on-gcp.sh
├── images
│ └── OSE-on-VMware-Architecture.jpg
├── osp-cli
│ ├── .gitignore
│ ├── OSEv3.yml.template
│ ├── README.adoc
│ ├── ansible.cfg
│ ├── bastion_host.sh
│ ├── ch5.3_control_network.sh
│ ├── ch5.3_tenant_network.sh
│ ├── ch5.4.2_bastion_security_group.sh
│ ├── ch5.4.3_master_security_group.sh
│ ├── ch5.4.5_infra_node_security_group.sh
│ ├── ch5.4.6_app_node_security_group.sh
│ ├── ch5.5.1_user_data.sh
│ ├── ch5.5.2_boot_bastion.sh
│ ├── ch5.5.3_boot_masters.sh
│ ├── ch5.5.4_cinder_volumes.sh
│ ├── ch5.5.5_boot_app_nodes.sh
│ ├── ch5.5.5_boot_infra_nodes.sh
│ ├── ch5.5.6_disable_port_security.sh
│ ├── ch5.5.7_create_floating_ip_addresses.sh
│ ├── ch5.8.1.1_register.sh
│ ├── ch5.8.1.2_enable_repos.sh
│ ├── ch5.8.1.3_install_openshift-ansible-playbooks.sh
│ ├── ch5.8.3_disable_peerdns_eth0_all_ansible.sh
│ ├── ch5.8.3_enable_eth1_all_ansible.sh
│ ├── ch5.8.3_enable_ocp_repo_all_ansible.sh
│ ├── ch5.8.3_enable_osp_repos_all_ansible.sh
│ ├── ch5.8.3_enable_server_repos_all_ansible.sh
│ ├── ch5.8.3_install_cloud_config_all_ansible.sh
│ ├── ch5.8.3_rhn_subscribe_all_ansible.sh
│ ├── ch5.8.4.1_install_base_packages.sh
│ ├── ch5.8.4.1_register_all_instances.sh
│ ├── ch5.8.4.1_rhn_subscribe_all.sh
│ ├── ch5.8.4_enable_lvmetad_nodes_ansible.sh
│ ├── ch5.8.5_configure_docker_storage_ansible.sh
│ ├── ch5.9_allow_docker_flannel.sh
│ ├── ch5.9_deploy_openshift.sh
│ ├── clean-all.sh
│ ├── clean-dns.sh
│ ├── generate_ansible_config.sh
│ ├── generate_dns_updates.sh
│ ├── generate_inventory.sh
│ ├── infrastructure.sh
│ ├── install_openshift.sh
│ ├── instance_hosts_ansible.sh
│ ├── prepare_bastion.sh
│ ├── prepare_osp_instances_ansible.sh
│ └── validation.yaml
├── osp-dns
│ ├── README.adoc
│ ├── ansible
│ │ ├── bind-server.yml
│ │ └── templates
│ │ │ ├── named.conf.j2
│ │ │ ├── update.key.j2
│ │ │ ├── zone.db.j2
│ │ │ ├── zones.conf-master.j2
│ │ │ └── zones.conf-slave.j2
│ ├── bin
│ │ └── add_a_record.py
│ ├── deploy-dns.yaml
│ ├── heat
│ │ ├── all_slaves.yaml
│ │ ├── dns_service.yaml
│ │ ├── fragments
│ │ │ ├── install_complete.sh
│ │ │ ├── install_python.sh
│ │ │ ├── notify.sh
│ │ │ └── rhn-register.sh
│ │ ├── hosts.yaml
│ │ ├── master.yaml
│ │ ├── network.yaml
│ │ └── one_slave.yaml
│ ├── library
│ │ ├── os_stack.py
│ │ └── redhat_subscription.py
│ └── vars.sample.yaml
├── rhv-ansible
│ ├── README.md
│ ├── ansible.cfg
│ ├── example
│ │ ├── docker-image-pull.yaml
│ │ ├── inventory
│ │ ├── inventory.yaml
│ │ ├── nsupdate-clean.txt
│ │ ├── ocp-vars.yaml
│ │ ├── ocp-vars.yaml.35
│ │ ├── ocp-vars.yaml.36
│ │ ├── ocp-vars.yaml.37
│ │ ├── ocp-vars.yaml.atomic
│ │ ├── ocp-vars.yaml.beta
│ │ ├── ocp-vars.yaml.cen39
│ │ ├── ocp-vars.yaml.centos
│ │ ├── onevm-uninstall.yaml
│ │ ├── ovirt-37-infra.yaml
│ │ ├── ovirt-39-infra.yaml
│ │ ├── ovirt-atomic-infra.yaml
│ │ ├── ovirt-cen39-infra.yaml
│ │ ├── ovirt-centos-infra.yaml
│ │ ├── ovirt-image-only.yaml
│ │ ├── ovirt-vm-infra.yaml
│ │ ├── ovirt-vm-uninstall.yaml
│ │ ├── redeploy.sh
│ │ ├── rhsm-subscription.yaml
│ │ ├── test-docker-storage.yaml
│ │ ├── test-instance-groups.yaml
│ │ ├── uninstall.yaml
│ │ └── vars
│ │ │ ├── ovirt-37-vars.yaml
│ │ │ ├── ovirt-39-vars.yaml
│ │ │ ├── ovirt-atomic-vars.yaml
│ │ │ ├── ovirt-cen39-vars.yaml
│ │ │ ├── ovirt-centos-vars.yaml
│ │ │ ├── ovirt-infra-vars.yaml
│ │ │ └── ovirt-vm-vars.yaml
│ ├── inventory
│ │ ├── ovirt.ini.example
│ │ └── ovirt4.py
│ ├── ovirt-infra-vars.yaml
│ ├── playbooks
│ │ ├── output-dns.yaml
│ │ ├── ovirt-vm-infra.yaml
│ │ └── ovirt-vm-uninstall.yaml
│ ├── requirements.txt
│ └── vault.yaml
└── vmware-ansible
│ ├── README.md
│ ├── ansible.cfg
│ ├── images
│ └── OCP-on-VMware-Architecture.jpg
│ ├── inventory
│ ├── inventory39
│ └── vsphere
│ │ └── vms
│ │ └── vmware_inventory.py
│ └── playbooks
│ ├── add-node-prerequisite.yaml
│ ├── add-node.yaml
│ ├── clean.yaml
│ ├── cleanup-cns.yaml
│ ├── cleanup-crs.yaml
│ ├── cns-node-setup.yaml
│ ├── cns-storage.yaml
│ ├── crs-node-setup.yaml
│ ├── crs-storage.yaml
│ ├── haproxy.yaml
│ ├── heketi-ocp.yaml
│ ├── heketi-setup.yaml
│ ├── infrastructure.yaml
│ ├── library
│ ├── rpm_q.py
│ ├── vmware_folder.py
│ └── vmware_resource_pool.py
│ ├── minor-update.yaml
│ ├── nfs.yaml
│ ├── node-setup.yaml
│ ├── ocp-configure.yaml
│ ├── ocp-demo.yaml
│ ├── ocp-install.yaml
│ ├── ocp-upgrade.yaml
│ ├── ocp39.yaml
│ ├── openshift-validate.yaml
│ ├── prerequisite.yaml
│ ├── prod-ose-cns.yaml
│ ├── prod-ose-crs.yaml
│ ├── prod.yaml
│ ├── roles
│ ├── cloud-provider-setup
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── vars
│ │ │ └── main.yaml
│ ├── create-vm-add-prod-ose
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── create-vm-cns-prod-ose
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── create-vm-crs-prod-ose
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── create-vm-haproxy
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── create-vm-nfs
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── create-vm-prod-ose
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── docker-storage-setup
│ │ ├── defaults
│ │ │ └── main.yaml
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ ├── docker-storage-setup-dm.j2
│ │ │ └── docker-storage-setup-overlayfs.j2
│ ├── etcd-storage
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── haproxy-server-config
│ │ ├── defaults
│ │ │ └── main.yaml
│ │ ├── handlers
│ │ │ └── main.yaml
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ └── haproxy.cfg.j2
│ ├── haproxy-server
│ │ ├── defaults
│ │ │ └── main.yaml
│ │ ├── handlers
│ │ │ └── main.yaml
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ └── haproxy.cfg.j2
│ ├── heketi-configure
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ ├── heketi-secret.yaml.j2
│ │ │ └── storage-crs.json.j2
│ ├── heketi-install
│ │ ├── handlers
│ │ │ └── main.yaml
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ └── heketi.json.j2
│ ├── heketi-ocp-clean
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ ├── heketi-secret.yaml.j2
│ │ │ └── storage-crs.json.j2
│ ├── heketi-ocp
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ ├── heketi-secret.yaml.j2
│ │ │ └── storage-crs.json.j2
│ ├── instance-groups
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── keepalived_haproxy
│ │ ├── defaults
│ │ │ └── main.yaml
│ │ ├── handlers
│ │ │ └── main.yaml
│ │ ├── tasks
│ │ │ └── main.yaml
│ │ └── templates
│ │ │ ├── firewall.sh.j2
│ │ │ └── keepalived.conf.j2
│ ├── nfs-server
│ │ ├── files
│ │ │ └── etc-sysconfig-nfs
│ │ ├── handlers
│ │ │ └── main.yaml
│ │ └── tasks
│ │ │ └── main.yaml
│ └── vmware-guest-setup
│ │ ├── handlers
│ │ └── main.yaml
│ │ ├── tasks
│ │ └── main.yaml
│ │ ├── templates
│ │ └── chrony.conf.j2
│ │ └── vars
│ │ └── main.yaml
│ └── setup.yaml
├── requirements.txt
├── roles
├── atomic-update
│ └── tasks
│ │ └── main.yml
├── common
│ └── defaults
│ │ └── main.yml
├── crs-subscription
│ └── tasks
│ │ └── main.yml
├── deploy-host-nonpriv
│ └── tasks
│ │ └── main.yaml
├── deploy-host
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── dns-records
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── dns-server-detect
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── dns-views
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-storage-setup
│ ├── defaults
│ │ └── main.yaml
│ ├── tasks
│ │ └── main.yaml
│ └── templates
│ │ ├── docker-storage-setup-dm.j2
│ │ └── docker-storage-setup-overlayfs.j2
├── docker
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── docker-storage-setup.j2
├── git-server
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ ├── handlers
│ │ └── main.yaml
│ ├── tasks
│ │ └── main.yaml
│ └── templates
│ │ └── git.conf.j2
├── gluster-crs-prerequisites
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── gluster-ports
│ ├── defaults
│ │ └── main.yaml
│ ├── handlers
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── gluster-rhsm-repos
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── hostnames
│ ├── tasks
│ │ └── main.yaml
│ ├── test
│ │ ├── inv
│ │ ├── roles
│ │ ├── test.retry
│ │ └── test.yaml
│ └── vars
│ │ ├── main.yaml
│ │ └── records.yaml
├── master-prerequisites
│ └── tasks
│ │ └── main.yaml
├── node-network-manager
│ └── tasks
│ │ └── main.yml
├── openshift-emptydir-quota
│ ├── defaults
│ │ └── main.yaml
│ ├── handlers
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── openshift-prep
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ └── prerequisites.yml
├── openshift-pv-cleanup
│ ├── README.md
│ ├── tasks
│ │ └── main.yml
│ └── test
│ │ ├── main.yml
│ │ └── roles
├── openshift-volume-quota
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── openstack-create-cinder-registry
│ └── tasks
│ │ └── main.yaml
├── openstack-stack
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── generate-templates.yml
│ │ ├── main.yml
│ │ └── subnet_update_dns_servers.yaml
│ ├── templates
│ │ ├── heat_stack.yaml.j2
│ │ ├── heat_stack_server.yaml.j2
│ │ └── user_data.j2
│ └── test
│ │ ├── roles
│ │ └── stack-create-test.yml
├── prerequisites
│ ├── defaults
│ │ └── main.yaml
│ ├── library
│ │ └── openshift_facts.py
│ └── tasks
│ │ └── main.yaml
├── registry-scaleup
│ ├── library
│ │ └── openshift_facts.py
│ └── tasks
│ │ └── main.yaml
├── rhsm-repos
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── rhsm-subscription
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── rhsm-timeout
│ ├── library
│ │ └── openshift_facts.py
│ └── tasks
│ │ └── main.yml
├── rhsm-unregister
│ └── tasks
│ │ └── main.yaml
├── rhsm
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── router-scaleup
│ ├── library
│ │ └── openshift_facts.py
│ └── tasks
│ │ └── main.yaml
├── seed-git-server
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ └── main.yaml
├── static_inventory
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── checkpoint.yml
│ │ ├── filter_out_new_app_nodes.yaml
│ │ ├── main.yml
│ │ ├── openstack.yml
│ │ ├── sshconfig.yml
│ │ └── sshtun.yml
│ └── templates
│ │ ├── inventory.j2
│ │ ├── openstack_ssh_config.j2
│ │ └── ssh-tunnel.service.j2
├── subscription-manager
│ ├── README.md
│ ├── pre_tasks
│ │ └── pre_tasks.yml
│ └── tasks
│ │ └── main.yml
├── update-instances
│ ├── library
│ │ └── openshift_facts.py
│ └── tasks
│ │ └── main.yaml
├── validate-app
│ ├── defaults
│ │ └── main.yaml
│ ├── library
│ │ └── openshift_facts.py
│ └── tasks
│ │ └── main.yaml
├── validate-etcd
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── validate-masters
│ └── tasks
│ │ └── main.yaml
└── validate-public
│ └── tasks
│ └── main.yaml
├── setup.cfg
├── setup.py
├── test-requirements.txt
├── tox.ini
└── vagrant
├── README.md
├── Vagrantfile
├── ansible.cfg
├── install.yaml
├── provision
└── setup.sh
├── roles
├── rhsm-repos
└── rhsm-subscription
└── tasks
├── install_bootstrap_enterprise.yaml
└── install_bootstrap_origin.yaml
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | #### What does this PR do?
2 | Brief explanation of the code or documentation change you've made
3 |
4 | #### How should this be manually tested?
5 | Include commands to run your new feature, and also post-run commands to validate that it worked. (please use code blocks to format code samples)
6 |
7 | #### Is there a relevant Issue open for this?
8 | Provide a link to any open issues that describe the problem you are solving.
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | #*#
3 | .idea
4 | *.iml
5 | *.komodoproject
6 | .loadpath
7 | .project
8 | *.pyc
9 | .pydevproject
10 | *.pyo
11 | *.redcar*
12 | .*.swp
13 | .sass-cache
14 | .rvmrc
15 | .DS_Store
16 | gce.ini
17 | multi_ec2.yaml
18 | multi_inventory.yaml
19 | .vagrant
20 | .tags*
21 | *.ini
22 | *ocp-installer
23 | add-node.json
24 | infrastructure.json
25 | ansible.log
26 | .tox
27 | .molecule
28 | static-inventory
29 | *.retry
30 | roles/infra-ansible/
31 | roles/openshift-ansible/
32 | pytestdebug.log
33 | .cache
34 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | sudo: required
3 |
4 | services:
5 | - docker
6 |
7 | cache:
8 | - pip
9 |
10 | language: python
11 | python:
12 | - "2.7"
13 | - "3.5"
14 |
15 | env:
16 | global:
17 | - CI_CONCURRENT_JOBS=1
18 | - OPENSHIFT_ANSIBLE_COMMIT=openshift-ansible-3.7.2-1-8-g56b529e
19 | matrix:
20 | - RUN_OPENSTACK_CI=false
21 | - RUN_OPENSTACK_CI=true
22 |
23 | matrix:
24 | exclude:
25 | - python: "3.5"
26 | env: RUN_OPENSTACK_CI=true
27 |
28 | before_install:
29 | - ci/fetch-master.sh
30 |
31 | install:
32 | - ci/install-tox.sh
33 | - ci/openstack/install.sh
34 |
35 | script:
36 | - ci/run-tox.sh
37 | - ci/openstack/provision.sh
38 | - travis_wait 40 ci/openstack/install-openshift.sh
39 | - ci/openstack/validate.sh
40 |
41 | after_script:
42 | - ci/openstack/teardown.sh
43 |
--------------------------------------------------------------------------------
/bin/README.md:
--------------------------------------------------------------------------------
1 | # The `bin/cluster` tool
2 |
3 | This tool was meant to be the entry point for managing OpenShift clusters,
4 | running against different "providers" (`aws`, `gce`, `libvirt`, `openstack`),
5 | though its use is now deprecated in favor of the [`byo`](../playbooks/byo)
6 | playbooks.
7 |
--------------------------------------------------------------------------------
/ci/fetch-master.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | git fetch origin master:master
4 |
5 | echo Modified files:
6 | git --no-pager diff --name-only master
7 | echo ==========
8 |
9 |
10 |
--------------------------------------------------------------------------------
/ci/install-tox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | source ci/openstack/vars.sh
6 | if [ "${RUN_OPENSTACK_CI:-}" == "true" ]; then
7 | echo RUN_OPENSTACK_CI is set to true, skipping the tox tests.
8 | exit
9 | fi
10 |
11 | pip install -r requirements.txt
12 | pip install tox-travis
13 |
--------------------------------------------------------------------------------
/ci/openstack/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euox pipefail
4 |
5 | source ci/openstack/vars.sh
6 | if [ "${RUN_OPENSTACK_CI:-}" != "true" ]; then
7 | echo RUN_OPENSTACK_CI is set to false, skipping the openstack end to end test.
8 | exit
9 | fi
10 |
11 | if [ "${CI_OVER_CAPACITY:-}" == "true" ]; then
12 | echo the CI is over capacity, skipping the end-end test.
13 | exit 1
14 | fi
15 |
16 | git clone https://github.com/openshift/openshift-ansible ../openshift-ansible
17 | cd ../openshift-ansible
18 | git checkout "${OPENSHIFT_ANSIBLE_COMMIT:-master}"
19 | git status
20 | git show --no-patch
21 | cd ../openshift-ansible-contrib
22 |
23 | pip install ansible==2.3.2.0 shade dnspython python-openstackclient python-heatclient
24 |
--------------------------------------------------------------------------------
/ci/openstack/teardown.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euox pipefail
4 |
5 | source ci/openstack/vars.sh
6 | if [ "${RUN_OPENSTACK_CI:-}" != "true" ]; then
7 | echo RUN_OPENSTACK_CI is set to false, skipping the openstack end to end test.
8 | exit
9 | fi
10 |
11 | openstack keypair delete "$KEYPAIR_NAME" || true
12 | openstack stack delete --wait --yes "$ENV_ID.example.com" || true
13 |
--------------------------------------------------------------------------------
/ci/run-tox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -euo pipefail
4 |
5 | source ci/openstack/vars.sh
6 | if [ "${RUN_OPENSTACK_CI:-}" == "true" ]; then
7 | echo RUN_OPENSTACK_CI is set to true, skipping the tox tests.
8 | exit
9 | fi
10 |
11 | tox
12 |
--------------------------------------------------------------------------------
/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 |
--------------------------------------------------------------------------------
/misc/gce-federation/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 | [defaults]
4 | forks = 10
5 | host_key_checking = False
6 | inventory = inventory
7 | retry_files_enabled = False
8 | #remote_user = fedora
9 | private_key_file=/home/rcook/.ssh/google_compute_engine
10 |
11 | [privilege_escalation]
12 | become = False
13 |
14 | [ssh_connection]
15 | ssh_args = -o ControlMaster=auto -o ControlPersist=900s
16 | control_path = %(directory)s/%%h-%%r
17 | pipelining = True
18 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/default_storage_class.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: StorageClass
3 | apiVersion: storage.k8s.io/v1beta1
4 | metadata:
5 | name: gce-pd
6 | annotations:
7 | storageclass.beta.kubernetes.io/is-default-class: "true"
8 | provisioner: kubernetes.io/gce-pd
9 | parameters:
10 | type: pd-standard
11 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/mongo-deployment-rs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | name: mongo
6 | name: mongo
7 | spec:
8 | replicas: 0
9 | template:
10 | metadata:
11 | labels:
12 | name: mongo
13 | spec:
14 | containers:
15 | - image: mongo
16 | name: mongo
17 | ports:
18 | - name: mongo
19 | containerPort: 27017
20 | args:
21 | - --replSet
22 | - rs0
23 | volumeMounts:
24 | - name: mongo-db
25 | mountPath: /data/db
26 | volumes:
27 | - name: mongo-db
28 | persistentVolumeClaim:
29 | claimName: mongo-storage
30 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/mongo-pvc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | name: mongo-storage
6 | spec:
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 8Gi
12 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/mongo-rs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: ReplicaSet
3 | metadata:
4 | labels:
5 | name: mongo
6 | name: mongo
7 | spec:
8 | replicas: 3
9 | template:
10 | metadata:
11 | labels:
12 | name: mongo
13 | spec:
14 | containers:
15 | - image: mongo
16 | name: mongo
17 | args:
18 | - --replSet
19 | - rs0
20 | ports:
21 | - name: mongo
22 | containerPort: 27017
23 | volumeMounts:
24 | - name: mongo-db
25 | mountPath: /data/db
26 | volumes:
27 | - name: mongo-db
28 | persistentVolumeClaim:
29 | claimName: mongo-storage
30 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/mongo-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | name: mongo
6 | name: mongo
7 | spec:
8 | type: LoadBalancer
9 | ports:
10 | - port: 27017
11 | targetPort: 27017
12 | selector:
13 | name: mongo
14 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/pacman-deployment-rs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | name: pacman
6 | name: pacman
7 | spec:
8 | replicas: 0
9 | template:
10 | metadata:
11 | labels:
12 | name: pacman
13 | spec:
14 | containers:
15 | - image: detiber/pacman:latest
16 | name: pacman
17 | env:
18 | - name: MONGO_REPLICA_SET
19 | value: rs0
20 | ports:
21 | - containerPort: 8080
22 | name: http-server
23 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/pacman-namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: pacman
6 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/pacman-rs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: ReplicaSet
3 | metadata:
4 | labels:
5 | name: pacman
6 | name: pacman
7 | spec:
8 | replicas: 3
9 | template:
10 | metadata:
11 | labels:
12 | name: pacman
13 | spec:
14 | containers:
15 | - image: detiber/pacman:latest
16 | name: pacman
17 | imagePullPolicy: Always
18 | ports:
19 | - containerPort: 8080
20 | name: http-server
21 |
--------------------------------------------------------------------------------
/misc/gce-federation/files/pacman-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: pacman
5 | labels:
6 | name: pacman
7 | spec:
8 | type: LoadBalancer
9 | ports:
10 | - port: 80
11 | targetPort: 8080
12 | protocol: TCP
13 | selector:
14 | name: pacman
15 |
--------------------------------------------------------------------------------
/misc/gce-federation/inventory/hosts:
--------------------------------------------------------------------------------
1 | [federation]
2 |
--------------------------------------------------------------------------------
/misc/gce-federation/push_images.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | IMAGE_NAMES="hello-openshift openvswitch node origin-f5-router origin-sti-builder origin-docker-builder origin-recycler origin-gitserver origin-federation origin-egress-router origin-docker-registry origin-keepalived-ipfailover origin-haproxy-router origin origin-pod origin-base origin-source origin-deployer"
4 |
5 |
6 | for image in $IMAGE_NAMES; do
7 | docker tag openshift/${image}:latest detiber/${image}:latest
8 | docker push detiber/${image}:latest
9 | done
10 |
--------------------------------------------------------------------------------
/molecule_common/base/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos/systemd:latest
2 |
3 | RUN yum makecache fast && yum update -y && \
4 | yum install -y python sudo yum-plugin-ovl && \
5 | sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf
6 |
--------------------------------------------------------------------------------
/molecule_common/destroy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Ansible 2.2 required
3 | - hosts: localhost
4 | connection: local
5 | gather_facts: no
6 | vars:
7 | molecule_scenario_basename: "{{ molecule_scenario_directory | basename }}"
8 | tasks:
9 | - name: Destroy molecule instance(s)
10 | docker_container:
11 | name: "{{ item }}"
12 | state: absent
13 | with_items: "{{ groups.test_group }}"
14 |
--------------------------------------------------------------------------------
/molecule_common/mock_aws/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos/systemd:latest
2 |
3 | RUN yum makecache fast && yum update -y && \
4 | yum install -y python sudo yum-plugin-ovl && \
5 | sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && \
6 | yum install -y epel-release && \
7 | yum install -y python python-pip python-flask git && \
8 | pip install git+https://github.com/detiber/moto@vpc_tenancy
9 |
--------------------------------------------------------------------------------
/playbooks/aws-prerequisite.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | gather_facts: yes
4 | become: yes
5 | serial: 1
6 | roles:
7 | - role: aws-rhsm-subscription
8 | when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
9 | ansible_distribution == "RedHat" and rhel_subscription_user is not defined
10 |
11 | - hosts: cluster_hosts
12 | gather_facts: no
13 | become: yes
14 | roles:
15 | - role: rhsm-repos
16 | when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
17 | ansible_distribution == "RedHat" and rhel_subscription_user is not defined
18 | - prerequisites
19 |
20 | - hosts: master
21 | gather_facts: no
22 | become: yes
23 | roles:
24 | - master-prerequisites
25 |
--------------------------------------------------------------------------------
/playbooks/aws/README.md:
--------------------------------------------------------------------------------
1 | # AWS playbooks
2 |
3 | This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
4 | which is community supported and most use is considered deprecated.
5 |
--------------------------------------------------------------------------------
/playbooks/aws/openshift-cluster/filter_plugins:
--------------------------------------------------------------------------------
1 | ../../../filter_plugins
--------------------------------------------------------------------------------
/playbooks/aws/openshift-cluster/list.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate oo_list_hosts group
3 | hosts: localhost
4 | gather_facts: no
5 | connection: local
6 | become: no
7 | vars_files:
8 | - vars.yml
9 | tasks:
10 | - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
11 | when: cluster_id != ''
12 | - set_fact: scratch_group=all
13 | when: cluster_id == ''
14 | - add_host:
15 | name: "{{ item }}"
16 | groups: oo_list_hosts
17 | ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
18 | ansible_become: "{{ deployment_vars[deployment_type].become }}"
19 | oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}"
20 | oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}"
21 | with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
22 | - debug:
23 | msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
24 |
--------------------------------------------------------------------------------
/playbooks/aws/openshift-cluster/lookup_plugins:
--------------------------------------------------------------------------------
1 | ../../../lookup_plugins
--------------------------------------------------------------------------------
/playbooks/aws/openshift-cluster/roles:
--------------------------------------------------------------------------------
1 | ../../../roles
--------------------------------------------------------------------------------
/playbooks/aws/openshift-cluster/templates/user_data.j2:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | {% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
3 | mounts:
4 | - [ xvdb ]
5 | - [ ephemeral0 ]
6 | {% endif %}
7 |
8 | write_files:
9 | {% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
10 | - content: |
11 | DEVS=/dev/xvdb
12 | VG=docker_vg
13 | path: /etc/sysconfig/docker-storage-setup
14 | owner: root:root
15 | permissions: '0644'
16 | {% endif %}
17 | {% if deployment_vars[deployment_type].become | bool %}
18 | - path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
19 | permissions: 440
20 | content: |
21 | Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
22 | {% endif %}
23 |
--------------------------------------------------------------------------------
/playbooks/deploy-host.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: no
4 | tasks:
5 | - name: Check for required variables
6 | fail:
7 | msg: "Please pass a valid provider: vsphere,aws,gcp,rhv,osp e.g. -e provider=vsphere"
8 | when: provider is not defined
9 | roles:
10 | - deploy-host-nonpriv
11 |
12 | - hosts: localhost
13 | gather_facts: yes
14 | become: yes
15 | roles:
16 | - rhsm
17 | - deploy-host
18 |
--------------------------------------------------------------------------------
/playbooks/empty-dir-quota.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: schedulable_nodes
3 | gather_facts: yes
4 | become: yes
5 | roles:
6 | - openshift-emptydir-quota
7 |
--------------------------------------------------------------------------------
/playbooks/gce/README.md:
--------------------------------------------------------------------------------
1 | # GCE playbooks
2 |
3 | This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
4 | which is community supported and most use is considered deprecated.
5 |
--------------------------------------------------------------------------------
/playbooks/gce/openshift-cluster/filter_plugins:
--------------------------------------------------------------------------------
1 | ../../../filter_plugins
--------------------------------------------------------------------------------
/playbooks/gce/openshift-cluster/lookup_plugins:
--------------------------------------------------------------------------------
1 | ../../../lookup_plugins
--------------------------------------------------------------------------------
/playbooks/gce/openshift-cluster/roles:
--------------------------------------------------------------------------------
1 | ../../../roles
--------------------------------------------------------------------------------
/playbooks/gce/openshift-cluster/vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | debug_level: 2
3 |
4 | deployment_rhel7_ent_base:
5 | image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}"
6 | machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
7 | ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
8 | become: yes
9 |
10 | deployment_vars:
11 | origin:
12 | image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}"
13 | machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}"
14 | ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}"
15 | become: yes
16 | enterprise: "{{ deployment_rhel7_ent_base }}"
17 | openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
18 | atomic-enterprise: "{{ deployment_rhel7_ent_base }}"
19 |
--------------------------------------------------------------------------------
/playbooks/library/rpm_q.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/lib_utils/library/rpm_q.py
--------------------------------------------------------------------------------
/playbooks/libvirt/README.md:
--------------------------------------------------------------------------------
1 | # libvirt playbooks
2 |
3 | This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
4 | which is community supported and most use is considered deprecated.
5 |
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/filter_plugins:
--------------------------------------------------------------------------------
1 | ../../../filter_plugins
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/list.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate oo_list_hosts group
3 | hosts: localhost
4 | become: no
5 | connection: local
6 | gather_facts: no
7 | vars_files:
8 | - vars.yml
9 | tasks:
10 | - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
11 | when: cluster_id != ''
12 | - set_fact: scratch_group=all
13 | when: cluster_id == ''
14 | - add_host:
15 | name: "{{ item }}"
16 | groups: oo_list_hosts
17 | ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
18 | ansible_become: "{{ deployment_vars[deployment_type].become }}"
19 | oo_public_ipv4: ""
20 | oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}"
21 | with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"
22 | - debug:
23 | msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
24 |
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/lookup_plugins:
--------------------------------------------------------------------------------
1 | ../../../lookup_plugins
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/roles:
--------------------------------------------------------------------------------
1 | ../../../roles
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: configure_libvirt_storage_pool.yml
3 | when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
4 |
5 | - include: configure_libvirt_network.yml
6 | when: libvirt_network is defined
7 |
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create the libvirt network for OpenShift
3 | virt_net:
4 | name: '{{ libvirt_network }}'
5 | state: '{{ item }}'
6 | autostart: 'yes'
7 | xml: "{{ lookup('template', 'network.xml') }}"
8 | uri: '{{ libvirt_uri }}'
9 | with_items:
10 | - present
11 | - active
12 |
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/templates/meta-data:
--------------------------------------------------------------------------------
1 | instance-id: {{ item[0] }}
2 | hostname: {{ item[0] }}
3 | local-hostname: {{ item[0] }}.example.com
4 |
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/templates/network.xml:
--------------------------------------------------------------------------------
1 |
2 | {{ libvirt_network }}
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml:
--------------------------------------------------------------------------------
1 |
2 | {{ libvirt_storage_pool }}
3 |
4 | {{ libvirt_storage_pool_path }}
5 |
6 |
7 |
--------------------------------------------------------------------------------
/playbooks/openshift-prometheus/README.md:
--------------------------------------------------------------------------------
1 | # OpenShift Prometheus
2 |
3 | Manage prometheus rules deployed on OpenShift.
4 |
5 | ## Dependencies
6 |
7 | - Assumes a running [prometheus server](https://github.com/openshift/origin/tree/master/examples/prometheus), optional [node_exporter](https://github.com/openshift/origin/blob/master/examples/prometheus/node-exporter.yaml).
8 | - Assumes an authenticated 'oc' client
9 | - Assumes a prometheus configuration with wildcard 'rules/*.rules'.
10 |
11 | ## Running
12 |
13 | 1. Update the 'vars.yml' file with your rules repo
14 | 1. Run the playbook
15 |
16 | ansible-playbook rules.yml
17 |
--------------------------------------------------------------------------------
/playbooks/openshift-prometheus/vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | custom_rules_repo: https://github.com/aweiteka/origin.git
3 | custom_rules_path: examples/prometheus/rules
4 | custom_rules_branch: ex-prom-rules
5 | custom_rules_configmap: base-rules
6 | metrics_namespace: openshift-metrics
7 |
--------------------------------------------------------------------------------
/playbooks/openshift-storage.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: schedulable_nodes
3 | gather_facts: no
4 | become: yes
5 | roles:
6 | - docker-storage-setup
7 | - openshift-volume-quota
8 |
--------------------------------------------------------------------------------
/playbooks/openstack/README.md:
--------------------------------------------------------------------------------
1 | # OpenStack playbooks
2 |
3 | This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
4 | which is community supported and most use is considered deprecated.
5 |
--------------------------------------------------------------------------------
/playbooks/openstack/openshift-cluster/filter_plugins:
--------------------------------------------------------------------------------
1 | ../../../filter_plugins
--------------------------------------------------------------------------------
/playbooks/openstack/openshift-cluster/lookup_plugins:
--------------------------------------------------------------------------------
1 | ../../../lookup_plugins
--------------------------------------------------------------------------------
/playbooks/openstack/openshift-cluster/roles:
--------------------------------------------------------------------------------
1 | ../../../roles
--------------------------------------------------------------------------------
/playbooks/post-validation.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: no
4 | become: no
5 | roles:
6 | - validate-public
7 |
8 | - hosts: masters
9 | gather_facts: no
10 | roles:
11 | - validate-masters
12 |
13 | - hosts: masters
14 | gather_facts: yes
15 | roles:
16 | - validate-etcd
17 |
18 | - hosts: single_master
19 | gather_facts: yes
20 | become: yes
21 | roles:
22 | - validate-app
23 |
--------------------------------------------------------------------------------
/playbooks/prerequisite.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: nodes
3 | gather_facts: yes
4 | become: yes
5 | roles:
6 | - rhsm-timeout
7 | - role: atomic-update
8 | when: openshift.common.is_atomic
9 |
10 | - hosts: nodes
11 | gather_facts: no
12 | become: yes
13 | serial: 1
14 | roles:
15 | - role: rhsm-subscription
16 | when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
17 | ansible_distribution == "RedHat" and ( rhsm_user is defined or rhsm_activation_key is defined)
18 |
19 | - hosts: nodes
20 | gather_facts: no
21 | become: yes
22 | roles:
23 | - role: rhsm-repos
24 | when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
25 | ansible_distribution == "RedHat" and ( rhsm_user is defined or rhsm_activation_key is defined)
26 | - prerequisites
27 |
28 | - hosts: masters
29 | gather_facts: yes
30 | become: yes
31 | roles:
32 | - master-prerequisites
33 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 | [defaults]
4 | ansible_user = openshift
5 | forks = 50
6 | # work around privilege escalation timeouts in ansible
7 | timeout = 30
8 | host_key_checking = false
9 | inventory = inventory
10 | inventory_ignore_extensions = secrets.py, .pyc, .cfg, .crt
11 | gathering = smart
12 | retry_files_enabled = false
13 | fact_caching = jsonfile
14 | fact_caching_connection = .ansible/cached_facts
15 | fact_caching_timeout = 900
16 | stdout_callback = skippy
17 | callback_whitelist = profile_tasks
18 | lookup_plugins = openshift-ansible-contrib/lookup_plugins
19 |
20 |
21 | [ssh_connection]
22 | ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no
23 | control_path = /var/tmp/%%h-%%r
24 | pipelining = True
25 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/custom-actions/add-cas.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | become: true
4 | vars:
5 | ca_files: []
6 | tasks:
7 | - name: Copy CAs to the trusted CAs location
8 | with_items: "{{ ca_files }}"
9 | copy:
10 | src: "{{ item }}"
11 | dest: /etc/pki/ca-trust/source/anchors/
12 | - name: Update trusted CAs
13 | shell: 'update-ca-trust enable && update-ca-trust extract'
14 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/custom-actions/add-rhn-pools.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | vars:
4 | rhn_pools: []
5 | tasks:
6 | - name: Attach additional RHN pools
7 | become: true
8 | with_items: "{{ rhn_pools }}"
9 | command: "/usr/bin/subscription-manager attach --pool={{ item }}"
10 | register: attach_rhn_pools_result
11 | until: attach_rhn_pools_result.rc == 0
12 | retries: 10
13 | delay: 1
14 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/custom-actions/add-yum-repos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | vars:
4 | yum_repos: []
5 | tasks:
6 | # enable additional yum repos
7 | - name: Add repository
8 | yum_repository:
9 | name: "{{ item.name }}"
10 | description: "{{ item.description }}"
11 | baseurl: "{{ item.baseurl }}"
12 | with_items: "{{ yum_repos }}"
13 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/custom_flavor_check.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Try to get flavor facts
3 | os_flavor_facts:
4 | name: "{{ flavor }}"
5 | register: flavor_result
6 | - name: Check that custom flavor is available
7 | assert:
8 | that: "flavor_result.ansible_facts.openstack_flavors"
9 | msg: "Flavor {{ flavor }} is not available."
10 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/custom_image_check.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Try to get image facts
3 | os_image_facts:
4 | image: "{{ image }}"
5 | register: image_result
6 | - name: Check that custom image is available
7 | assert:
8 | that: "image_result.ansible_facts.openstack_image"
9 | msg: "Image {{ image }} is not available."
10 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/galaxy-requirements.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # This is the Ansible Galaxy requirements file to pull in the correct roles
3 |
4 | # From 'infra-ansible'
5 | - src: https://github.com/redhat-cop/infra-ansible
6 | version: v1.0.0
7 |
8 | # From 'openshift-ansible'
9 | - src: https://github.com/openshift/openshift-ansible
10 | version: master
11 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/net_vars_check.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check the provider network configuration
3 | fail:
4 | msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network"
5 | when:
6 | - openstack_provider_network_name is defined
7 | - openstack_private_data_network_name is defined
8 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/pre-install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################
3 | # OpenShift Pre-Requisites
4 |
5 | # - subscribe hosts
6 | # - prepare docker
7 | # - other prep (install additional packages, etc.)
8 | #
9 | - hosts: OSEv3
10 | become: true
11 | roles:
12 | - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true }
13 | - { role: docker, tags: 'docker' }
14 | - { role: openshift-prep, tags: 'openshift-prep' }
15 |
16 | - hosts: localhost:cluster_hosts
17 | become: False
18 | tasks:
19 | - include: pre_tasks.yml
20 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/provision.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: "prerequisites.yml"
3 |
4 | - include: "provision-openstack.yml"
5 |
--------------------------------------------------------------------------------
/playbooks/provisioning/openstack/roles:
--------------------------------------------------------------------------------
1 | ../../../roles/
--------------------------------------------------------------------------------
/playbooks/roles:
--------------------------------------------------------------------------------
1 | ../roles
--------------------------------------------------------------------------------
/playbooks/unregister.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | gather_facts: yes
4 | become: yes
5 | roles:
6 | - rhsm-unregister
7 |
--------------------------------------------------------------------------------
/playbooks/update.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | gather_facts: yes
4 | become: yes
5 | roles:
6 | - update-instances
7 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/README.md:
--------------------------------------------------------------------------------
1 | # OpenShift 3.9 on AWS
2 | For more information on how to use these playbooks visit the reference architecture document
3 | https://access.redhat.com/documentation/en-us/reference_architectures/2018/html/deploying_and_managing_openshift_3.9_on_amazon_web_services/
4 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/deploy_aws_cns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: False
4 | vars_files:
5 | - vars/main.yaml
6 | tasks:
7 | - import_tasks: roles/aws/tasks/getcreds.yaml
8 | - import_tasks: roles/aws/tasks/getazs.yaml
9 | - import_tasks: roles/aws/tasks/getec2ami.yaml
10 | - import_tasks: roles/aws/tasks/vpcdhcpopts.yaml
11 | - import_tasks: roles/aws/tasks/vpc.yaml
12 | - import_tasks: roles/aws/tasks/subnet.yaml
13 | - import_tasks: roles/aws/tasks/securitygroup_cns.yaml
14 | - import_tasks: roles/aws/tasks/securitygrouprule_cns.yaml
15 | - import_tasks: roles/aws/tasks/ec2_cns.yaml
16 | - import_tasks: roles/aws/tasks/configfiles_cns.yaml
17 | - import_tasks: roles/aws/tasks/configfilesdata_cns.yaml
18 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/configfiles_cns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} config files (CNS)"
3 | file:
4 | path: "{{ item }}"
5 | state: "{{ 'touch' if (state is undefined or 'absent' not in state) else 'absent' }}"
6 | register: touchfiles
7 | with_items:
8 | - "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostscns"
9 | - "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostsgfs"
10 | changed_when: "\
11 | touchfiles.diff is defined \
12 | and \
13 | ( \
14 | ( touchfiles.diff.before.state == 'absent' and touchfiles.diff.after.state == 'touch' ) \
15 | or \
16 | ( touchfiles.diff.before.state == 'file' and touchfiles.diff.after.state == 'absent' ) \
17 | )"
18 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/configfilesdata_cns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Land openshift-ansible installer host inventory (CNS nodes)"
3 | blockinfile:
4 | path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostscns"
5 | create: yes
6 | marker: "#"
7 | content: |
8 | {{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputhostscns.j2') ) }}
9 |
10 | - name: "Land openshift-ansible installer host inventory (GFS)"
11 | blockinfile:
12 | path: "~/.ssh/config-{{ clusterid }}.{{ dns_domain }}-hostsgfs"
13 | create: yes
14 | marker: "#"
15 | content: |
16 | {{ lookup('template', ( playbook_dir + '/roles/aws/templates/outputhostsgfs.j2') ) }}
17 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/ec2keypair.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} EC2 KeyPair"
3 | ec2_key:
4 | key_material: "{{ lookup('file', '~/.ssh/' + clusterid + '.' + dns_domain + '.pub') | expanduser if (state is undefined or 'absent' not in state) else '' }}"
5 | name: "{{ clusterid }}.{{ dns_domain }}"
6 | region: "{{ aws_region }}"
7 | state: "{{ state | default('present') }}"
8 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/eip.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create EIP instance for EC2 / Bastion
3 | ec2_eip:
4 | device_id: "{{ ec2bastion.results[0].tagged_instances[0].id }}"
5 | in_vpc: yes
6 | region: "{{ aws_region }}"
7 | state: present
8 | retries: 3
9 | register: eipbastion
10 | when:
11 | - ( state is undefined ) or ( 'absent' not in state )
12 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/getazs.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Set fact: availability_zones"
3 | set_fact:
4 | vpc_subnet_azs: "{{ lookup('ec2_zones_by_region', creds).split(',') }}"
5 |
6 | - debug:
7 | msg:
8 | - 'Error in env; AWS VPC does NOT contain 3 at least availability zones. Please pick another region!'
9 | when: "( vpc_subnet_azs | length | int ) < 3"
10 | failed_when: "(vpc_subnet_azs | length | int ) < 3"
11 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/igw.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} IGW"
3 | ec2_vpc_igw:
4 | region: "{{ aws_region }}"
5 | state: "{{ state | default('present') }}"
6 | vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
7 | retries: 3
8 | delay: 3
9 | register: igw
10 | when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
11 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/routetable.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} RouteTable"
3 | ec2_vpc_route_table:
4 | purge_routes: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
5 | purge_subnets: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
6 | region: "{{ aws_region }}"
7 | state: "{{ state | default('present') }}"
8 | tags:
9 | Name: "{{ item.name }}"
10 | vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
11 | retries: 3
12 | delay: 3
13 | when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
14 | with_items:
15 | - name: routing
16 | - name: "{{ vpc_subnet_azs.0 }}"
17 | - name: "{{ vpc_subnet_azs.1 }}"
18 | - name: "{{ vpc_subnet_azs.2 }}"
19 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/s3.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} S3 bucket ( hosted registry )"
3 | s3_bucket:
4 | name: "{{ clusterid }}.{{ dns_domain }}-registry"
5 | region: "{{ aws_region }}"
6 | state: "{{ state | default('present') }}"
7 | tags:
8 | Clusterid: "{{ clusterid }}"
9 | retries: 3
10 | delay: 3
11 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/s3policy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Create S3 bucket policy ( hosted registry )"
3 | s3_bucket:
4 | name: "{{ clusterid }}.{{ dns_domain }}-registry"
5 | policy: "{{ lookup('template', playbook_dir + '/roles/aws/templates/s3_bucket_policy_registry.json', convert_data=False) | string }}"
6 | region: "{{ aws_region }}"
7 | state: "{{ state | default('present') }}"
8 | retries: 3
9 | delay: 3
10 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/securitygroup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} SecurityGroups"
3 | ec2_group:
4 | name: "{{ item.name }}"
5 | description: "{{ item.name }}"
6 | purge_rules: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
7 | purge_rules_egress: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
8 | region: "{{ aws_region }}"
9 | state: "{{ state | default('present') }}"
10 | vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
11 | when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
12 | with_items:
13 | - name: "node"
14 | - name: "master"
15 | - name: "infra"
16 | - name: "bastion"
17 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/securitygroup_cns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} SecurityGroup (CNS)"
3 | ec2_group:
4 | name: "{{ item.name }}"
5 | description: "{{ item.name }}"
6 | purge_rules: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
7 | purge_rules_egress: "{{ 'false' if (state is undefined or 'absent' not in state) else 'true' }}"
8 | region: "{{ aws_region }}"
9 | state: "{{ state | default('present') }}"
10 | vpc_id: "{{ vpc.vpc.id if (state is undefined or 'absent' not in state) else vpc.vpcs[0].vpc_id }}"
11 | when: ( vpc.vpc is defined ) or ( vpc.vpcs[0] is defined )
12 | with_items:
13 | - name: "cns"
14 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/tag.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | #How to initialize me: Add the following to your role
3 | #- import_task: roles/aws/tasks/aws-tag.yaml
4 | # vars:
5 | # - resource: "{{ vpcdhcpopts.dhcp_options_id }}"
6 | # - tagss: "Key1=Value1, Key2={{ var2 }}, string/{{ var3 }}={{ var3 }}"
7 |
8 | - name: Create tag
9 | ec2_tag:
10 | resource: "{{ resource }}"
11 | region: "{{ aws_region }}"
12 | state: present
13 | tags: "{{ tagss }}"
14 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/vpc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "{{ 'Create' if (state is undefined or 'absent' not in state) else 'Terminate' }} VPC"
3 | ec2_vpc_net:
4 | cidr_block: "{{ vpc_cidr }}"
5 | dhcp_opts_id: "{{ vpcdhcpopts.dhcp_options_id if (state is undefined or 'absent' not in state) else '' }}"
6 | name: "{{ clusterid }}"
7 | region: "{{ aws_region }}"
8 | state: "{{ state | default('present') }}"
9 | retries: 3
10 | delay: 5
11 | register: vpc
12 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/tasks/vpcdhcpopts.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Create VPC DHCP Options"
3 | ec2_vpc_dhcp_options:
4 | domain_name: "{{ 'ec2.internal' if (aws_region == 'us-east-1') else aws_region + '.compute.internal' }}"
5 | region: "{{ aws_region }}"
6 | dns_servers:
7 | - AmazonProvidedDNS
8 | inherit_existing: False
9 | retries: 3
10 | delay: 3
11 | register: vpcdhcpopts
12 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/domaindelegation.j2:
--------------------------------------------------------------------------------
1 | {% for n in ns %}
2 | {{ n }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/iam_policy_s3user.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Action": [
6 | "s3:*"
7 | ],
8 | "Resource": [
9 | "arn:aws:s3:::{{ clusterid }}.{{ dns_domain }}-registry",
10 | "arn:aws:s3:::{{ clusterid }}.{{ dns_domain }}-registry/*"
11 | ],
12 | "Effect": "Allow",
13 | "Sid": "1"
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/outputcpk.j2:
--------------------------------------------------------------------------------
1 | openshift_cloudprovider_kind=aws
2 | openshift_clusterid={{ clusterid }}
3 | {{ lookup('file', (
4 | '~/.ssh/config-' +
5 | clusterid +
6 | '.' +
7 | dns_domain +
8 | '-cpkuser_access_key'
9 | ) ) |
10 | regex_replace(".* OUTPUT .*", '') |
11 | trim }}
12 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/outputdomaindeleg.j2:
--------------------------------------------------------------------------------
1 | {% for n in ns %}
2 | {{ n }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/outputhosts.j2:
--------------------------------------------------------------------------------
1 | [masters]
2 | {%- for i in ec2master.results %}
3 | {{ i.tagged_instances[0].private_dns_name }} openshift_node_labels="{'region': 'master'}"
4 | {%- endfor %}
5 |
6 | [etcd]
7 |
8 | [etcd:children]
9 | masters
10 |
11 | [nodes]
12 | {% for i in ec2node.results -%}
13 | {{ i.tagged_instances[0].private_dns_name }} openshift_node_labels="{'region': 'apps'}"
14 | {% endfor -%}
15 | {% for i in ec2infra.results -%}
16 | {{ i.tagged_instances[0].private_dns_name }} openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
17 | {% endfor %}
18 | [nodes:children]
19 | masters
20 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/outputhostscns.j2:
--------------------------------------------------------------------------------
1 | {% for i in ec2cns.results %}
2 | {{ i.tagged_instances[0].private_dns_name }} openshift_schedulable=True
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/outputhostsgfs.j2:
--------------------------------------------------------------------------------
1 | [glusterfs]
2 | {% for i in ec2cns.results %}
3 | {{ i.tagged_instances[0].private_dns_name }} glusterfs_devices='[ "/dev/nvme3n1" ]'
4 | {% endfor %}
5 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/outputurls.j2:
--------------------------------------------------------------------------------
1 | openshift_master_default_subdomain=apps.{{ clusterid }}.{{ dns_domain }}
2 | openshift_master_cluster_hostname=master.{{ clusterid }}.{{ dns_domain }}
3 | openshift_master_cluster_public_hostname=master.{{ clusterid }}.{{ dns_domain }}
4 |
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/roles/aws/templates/s3_bucket_policy_registry.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "1",
6 | "Effect": "Allow",
7 | "Principal": {
8 | "AWS": "arn:aws:iam::{{ iam_identity }}:user/{{ clusterid }}.{{ dns_domain }}-registry"
9 | },
10 | "Action": "s3:*",
11 | "Resource": "arn:aws:s3:::{{ clusterid }}.{{ dns_domain }}-registry"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/reference-architecture/3.9/playbooks/vars/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | aws_cred_profile: "default"
3 |
4 | # password for ssh key - ~/.ssh/{{ clusterid }}.{{ dns_domain }}
5 | sshkey_password: 'abc123'
6 |
7 | clusterid: "refarch"
8 | dns_domain: "example.com"
9 | aws_region: "us-east-1"
10 |
11 | vpc_cidr: "172.16.0.0/16"
12 |
13 | subnets_public_cidr:
14 | - 172.16.0.0/24
15 | - 172.16.1.0/24
16 | - 172.16.2.0/24
17 |
18 | subnets_private_cidr:
19 | - 172.16.16.0/20
20 | - 172.16.32.0/20
21 | - 172.16.48.0/20
22 |
23 | ec2_type_bastion: "t2.medium"
24 |
25 | #ec2_count_master: 3
26 | ec2_type_master: "m5.2xlarge"
27 |
28 | #ec2_count_infra: 3
29 | ec2_type_infra: "m5.2xlarge"
30 |
31 | #ec2_count_node: 3
32 | ec2_type_node: "m5.2xlarge"
33 |
34 | #ec2_count_cns: 3
35 | ec2_type_cns: "m5.2xlarge"
36 |
37 | rhel_release: "rhel-7.5"
38 |
39 | #ec2ami: ami-abc3231a
40 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/Overview_Diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/ansible-tower-integration/Overview_Diagram.png
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/create_httpd_file/create_httpd_file.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: tag_openshift_role_master
3 | gather_facts: yes
4 | become: yes
5 | roles:
6 | - { role: create_httpd_file, when: ansible_os_family == 'RedHat' }
7 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/create_httpd_file/create_httpd_file/meta/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: James Labocki
4 | description: Takes username and password and creates httpd file for auth for OCP
5 | company: Red Hat, Inc.
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 6
12 | - 7
13 | categories:
14 | - packaging
15 | - system
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/create_httpd_file/create_httpd_file/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install httpd-tools
3 | yum:
4 | name: httpd-tools
5 | state: present
6 | become: true
7 |
8 | - name: create httpd password
9 | command: htpasswd -b /etc/origin/master/htpasswd {{ ocp_username }} {{ ocp_password }}
10 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_aws/schema.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - job_template: aws-infrastructure
3 | success_nodes:
4 | - inventory_source: REPLACEME
5 | success_nodes:
6 | - job_template: aws-openshift-install
7 | success_nodes:
8 | - job_template: create_httpd_file
9 | success_nodes:
10 | - job_template: redhat-access-insights-client
11 | success_nodes:
12 | - job_template: aws-openshift-cfme-ocp-provider
13 | success_nodes:
14 | - inventory_source: ONEMORETIME
15 | success_nodes:
16 | - job_template: aws-openshift-cfme-install
17 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_aws/tower_config_aws.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: no
4 | roles:
5 | - { role: tower_config_aws, when: ansible_os_family == 'RedHat' }
6 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_aws/tower_config_aws/meta/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: James Labocki
4 | description: Configures Tower to sync openshift-ansible-contrib
5 | company: Red Hat, Inc.
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 6
12 | - 7
13 | categories:
14 | - packaging
15 | - system
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_aws/tower_unconfig_aws/meta/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: James Labocki
4 | description: un-Configures Tower to sync openshift-ansible-contrib
5 | company: Red Hat, Inc.
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 6
12 | - 7
13 | categories:
14 | - packaging
15 | - system
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_azure/schema-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - job_template: azure-deploy-ocp
3 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_azure/schema-destroy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - job_template: azure-destroy-ocp
3 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_azure/tower-group-extravars.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | AZURE_GROUP_BY_RESOURCE_GROUP: yes
3 | AZURE_RESOURCE_GROUPS: jlopenshift3
4 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_azure/tower_config_azure.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: no
4 | roles:
5 | - { role: tower_config_azure, when: ansible_os_family == 'RedHat' }
6 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_config_azure/tower_config_azure/meta/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: James Labocki
4 | description: Configures Tower to sync openshift-ansible-contrib
5 | company: Red Hat, Inc.
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 6
12 | - 7
13 | categories:
14 | - packaging
15 | - system
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_unconfig_aws/tower_unconfig_aws.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: no
4 | roles:
5 | - { role: tower_unconfig_aws, when: ansible_os_family == 'RedHat' }
6 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_unconfig_aws/tower_unconfig_aws/meta/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: James Labocki
4 | description: un-Configures Tower to sync openshift-ansible-contrib
5 | company: Red Hat, Inc.
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 6
12 | - 7
13 | categories:
14 | - packaging
15 | - system
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_unconfig_azure/tower_unconfig_azure.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | become: no
4 | roles:
5 | - { role: tower_unconfig_azure, when: ansible_os_family == 'RedHat' }
6 |
--------------------------------------------------------------------------------
/reference-architecture/ansible-tower-integration/tower_unconfig_azure/tower_unconfig_azure/meta/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: James Labocki
4 | description: un-Configures Tower to sync openshift-ansible-contrib
5 | company: Red Hat, Inc.
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: EL
10 | versions:
11 | - 6
12 | - 7
13 | categories:
14 | - packaging
15 | - system
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 | [defaults]
4 | #callback_plugins = ../openshift-ansible/ansible-profile/callback_plugins
5 | forks = 50
6 | host_key_checking = False
7 | inventory = inventory/aws/hosts/ec2.py
8 | roles_path = /usr/share/ansible/openshift-ansible/roles:/opt/ansible/roles:./roles:../../roles
9 | remote_user = ec2-user
10 | gathering = smart
11 | retry_files_enabled = false
12 | nocows = true
13 | timeout = 90
14 | #lookup_plugins = ./playbooks/lookup_plugins
15 | #log_path = /tmp/ansible.log
16 |
17 | [privilege_escalation]
18 | become = True
19 |
20 | [ssh_connection]
21 | ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no
22 | control_path = /var/tmp/%%h-%%r
23 | pipelining = True
24 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/images/arch.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/aws-ansible/images/arch.jpg
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/add-crs.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | # Group systems
10 | - gluster-instance-groups
11 |
12 | - hosts: crs
13 | gather_facts: yes
14 | become: yes
15 | serial: 1
16 | vars_files:
17 | - vars/main.yaml
18 | roles:
19 | - rhsm-subscription
20 |
21 | - hosts: crs
22 | gather_facts: yes
23 | become: yes
24 | vars_files:
25 | - vars/main.yaml
26 | roles:
27 | - gluster-rhsm-repos
28 | - gluster-crs-prerequisites
29 | - gluster-ports
30 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/create-inventory-file.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | roles:
6 | - cfn-outputs
7 | - inventory-file-creation
8 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/infrastructure.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | vars:
9 | vpc_subnet_azs: "{{ lookup('ec2_zones_by_region', region) }}"
10 | roles:
11 | # Upload ssh-key
12 | - { role: ssh-key, when: create_key == "yes" }
13 | - { role: cfn-outputs, when: create_vpc == "no" and add_node == "yes" or add_node == "no" and deploy_crs is defined }
14 | # Create VPC and subnets in multiple AZ
15 | - pre-install-check
16 | - cloudformation-infra
17 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/library/rpm_q.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/lib_utils/library/rpm_q.py
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/openshift-install.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: yes
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | # Group systems
10 | - cfn-outputs
11 | - instance-groups
12 |
13 | - hosts: localhost
14 | connection: local
15 | gather_facts: no
16 | become: no
17 | vars_files:
18 | - vars/main.yaml
19 | roles:
20 | - host-up
21 |
22 | - hosts: nodes
23 | gather_facts: yes
24 | become: yes
25 | vars_files:
26 | - vars/main.yaml
27 | roles:
28 | - non-atomic-docker-storage-setup
29 | - openshift-versions
30 |
31 | - include: ../../../playbooks/prerequisite.yaml
32 |
33 | - include: openshift-setup.yaml
34 |
35 | - hosts: localhost
36 | connection: local
37 | gather_facts: no
38 | become: no
39 | roles:
40 | - cfn-outputs
41 | - inventory-file-creation
42 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/openshift-minor-upgrade.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: yes
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | # Group systems
10 | - cfn-outputs
11 | - instance-groups
12 |
13 | - include: minor-update.yaml
14 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/cfn-outputs/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get cfn stack outputs
3 | cloudformation_facts:
4 | stack_name: "{{ stack_name }}"
5 | region: "{{ region }}"
6 | register: stack
7 |
8 | - name: Set s3 facts
9 | set_fact:
10 | s3user_id: "{{ stack['ansible_facts']['cloudformation'][stack_name]['stack_outputs']['S3UserAccessId'] }}"
11 | s3user_secret: "{{ stack['ansible_facts']['cloudformation'][stack_name]['stack_outputs']['S3UserSecretKey'] }}"
12 | s3_bucket_name: "{{ stack['ansible_facts']['cloudformation'][stack_name]['stack_outputs']['S3Bucket'] }}"
13 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/cloudformation-infra/files/user_data_bastion.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | users:
3 | - default
4 |
5 | system_info:
6 | default_user:
7 | name: ec2-user
8 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/cloudformation-infra/files/user_data_gluster.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | cloud_config_modules:
3 |
4 | users:
5 | - default
6 |
7 | system_info:
8 | default_user:
9 | name: ec2-user
10 |
11 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/cloudformation-infra/files/user_data_node.yml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | cloud_config_modules:
3 | - disk_setup
4 | - mounts
5 |
6 | fs_setup:
7 | - label: emptydir
8 | filesystem: xfs
9 | device: /dev/xvdc
10 | partition: auto
11 |
12 | runcmd:
13 | - mkdir -p /var/lib/origin/openshift.local.volumes
14 |
15 | mounts:
16 | - [ /dev/xvdc, /var/lib/origin/openshift.local.volumes, xfs, "defaults,gquota" ]
17 |
18 | write_files:
19 | - content: |
20 | DEVS='/dev/xvdb'
21 | VG=docker_vol
22 | DATA_SIZE=95%VG
23 | STORAGE_DRIVER=overlay2
24 | CONTAINER_ROOT_LV_NAME=dockerlv
25 | CONTAINER_ROOT_LV_MOUNT_PATH=/var/lib/docker
26 | CONTAINER_ROOT_LV_SIZE=100%FREE
27 | ROOT_SIZE=45G
28 | GROWPART=true
29 | path: /etc/sysconfig/docker-storage-setup
30 | owner: root:root
31 |
32 | users:
33 | - default
34 |
35 | system_info:
36 | default_user:
37 | name: ec2-user
38 |
39 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/gluster-instance-groups/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add gluster instances to host group
3 | add_host:
4 | name: "{{ hostvars[item].ec2_tag_Name }}"
5 | groups: crs, storage
6 | with_items: "{{ groups['tag_StorageType_crs'] }}"
7 | when:
8 | - hostvars[item]['ec2_tag_aws_cloudformation_stack_name'] == "{{ stack_name }}-{{ glusterfs_stack_name }}"
9 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/host-up/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check to see if host is available
3 | wait_for:
4 | port: 22
5 | host: "bastion.{{ public_hosted_zone }}"
6 | state: started
7 | delay: "{{ host_up_time }}"
8 | when: byo_bastion == "no"
9 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/non-atomic-docker-storage-setup/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_dev: "/dev/xvdb"
3 | docker_vg: "docker_vol"
4 | docker_data_size: "95%VG"
5 | docker_dm_basesize: "3G"
6 | container_root_lv_name: "dockerlv"
7 | container_root_lv_mount_path: "/var/lib/docker"
8 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/non-atomic-docker-storage-setup/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/non-atomic-docker-storage-setup/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather facts
3 | openshift_facts:
4 | role: common
5 |
6 | - name: create the docker-storage-setup config file
7 | template:
8 | src: "{{ role_path }}/templates/docker-storage-setup.j2"
9 | dest: /etc/sysconfig/docker-storage-setup
10 | owner: root
11 | group: root
12 | mode: 0644
13 | when: not openshift.common.is_atomic | bool and ansible_distribution != 'CentOS'
14 |
15 | - name: create the docker-storage-setup config file
16 | template:
17 | src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
18 | dest: /etc/sysconfig/docker-storage-setup
19 | owner: root
20 | group: root
21 | mode: 0644
22 | when: ansible_distribution == 'CentOS' and not openshift.common.is_atomic | bool
23 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/non-atomic-docker-storage-setup/templates/docker-storage-setup-dm.j2:
--------------------------------------------------------------------------------
1 | DEVS="{{ docker_dev }}"
2 | VG="{{ docker_vg }}"
3 | DATA_SIZE="{{ docker_data_size }}"
4 | EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
5 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/non-atomic-docker-storage-setup/templates/docker-storage-setup.j2:
--------------------------------------------------------------------------------
1 | DEVS="{{ docker_dev }}"
2 | VG="{{ docker_vg }}"
3 | DATA_SIZE="{{ docker_data_size }}"
4 | STORAGE_DRIVER=overlay2
5 | CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
6 | CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
7 | CONTAINER_ROOT_LV_SIZE=100%FREE
8 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/openshift-versions/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | origin_release: "3.6.0"
3 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/openshift-versions/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/openshift-versions/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set Origin fact
3 | set_fact:
4 | openshift_release: "{{ origin_release }}"
5 | when: deployment_type == "origin"
6 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/pre-install-check/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get current Ansible version on local host
3 | assert:
4 | that: "ansible_version.full | version_compare('2.2', '>=')"
5 | msg: "You need Ansible version 2.2.0+"
6 |
7 | - name: Validate that openshift rpms are installed or git repo has been cloned
8 | stat:
9 | path: /usr/share/ansible/openshift-ansible
10 | register: openshift_directory
11 |
12 | - name: Fail if directory doesn't exist
13 | fail:
14 | msg: "The directory of /usr/share/ansible/ must contain OpenShift playbooks and roles which can be installed by rpm or git clone"
15 | when: openshift_directory.stat.exists == False
16 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/roles/ssh-key/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: OSE ec2 key
3 | ec2_key:
4 | name: "{{ keypair }}"
5 | region: "{{ region }}"
6 | key_material: "{{ item }}"
7 | with_file: "{{ key_path }}"
8 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | - cfn-outputs
10 | - instance-groups
11 |
12 | - include: ../../../playbooks/unregister.yaml
13 |
14 | - hosts: localhost
15 | connection: local
16 | gather_facts: no
17 | become: no
18 | vars_files:
19 | - vars/main.yaml
20 | roles:
21 | - terminate-all
22 |
--------------------------------------------------------------------------------
/reference-architecture/aws-ansible/playbooks/validation.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | become: no
5 | vars_files:
6 | - vars/main.yaml
7 | pre_tasks:
8 | - name: set fact
9 | set_fact:
10 | openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_public_hostname }}"
11 | - name: set fact
12 | set_fact:
13 | openshift_master_cluster_hostname: "{{ openshift_master_cluster_hostname }}"
14 | roles:
15 | # Group systems
16 | - instance-groups
17 |
18 | - include: ../../../playbooks/post-validation.yaml
19 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/*
2 | playbooks/*.retry
3 | vars.yaml
4 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | remote_tmp = ~/.ansible/tmp
3 | local_tmp = ~/.ansible/tmp
4 | host_key_checking = False
5 | forks=30
6 | gather_timeout=60
7 | timeout=240
8 | library = /usr/share/ansible:/usr/share/ansible/openshift-ansible/library
9 | [ssh_connection]
10 | control_path = ~/.ansible/cp/ssh%%h-%%p-%%r
11 | ssh_args = -o ControlMaster=auto -o ControlPersist=600s -o ControlPath=~/.ansible/cp-%h-%p-%r
12 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/hosts:
--------------------------------------------------------------------------------
1 | [bastions]
2 | b..cloudapp.azure.com
3 |
4 | [bastions:vars]
5 | ansible_ssh_user=
6 | remote_password=
7 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: false
6 | roles:
7 | - { role: azure-delete, tags: ['delete'] }
8 | - { role: azure-deploy, tags: ['deploy'] }
9 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/prepare.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: true
6 | roles:
7 | - { role: prepare, tags: ['prepare'] }
8 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/roles/azure-delete/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | location: "westus"
3 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/roles/azure-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Destroy Azure Deploy
3 | azure_rm_deployment:
4 | state: absent
5 | location: "{{ location }}"
6 | resource_group_name: "{{ resourcegroupname }}"
7 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/roles/azure-deploy/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | templatelink: "https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/master/reference-architecture/azure-ansible/3.5/azuredeploy.json"
3 | numberofnodes: 3
4 | image: "rhel"
5 | mastervmsize: "Standard_DS4_v2"
6 | infranodesize: "Standard_DS4_v2"
7 | nodevmsize: "Standard_DS4_v2"
8 | location: "westus"
9 | openshiftsdn: "redhat/openshift-ovs-multitenant"
10 | metrics: true
11 | logging: true
12 | opslogging: false
13 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/roles/prepare/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | packages:
3 | - ansible
4 | - python-devel
5 | - openssl-devel
6 | - gcc
7 |
8 | epelpackages:
9 | - python2-pip
10 | - python2-jmespath
11 |
12 | pippackages:
13 | - "packaging"
14 | - "msrestazure"
15 | - "azure==2.0.0rc5"
16 | # https://docs.ansible.com/ansible/guide_azure.html#requirements
17 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/ansibledeployocp/playbooks/roles/prepare/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add EPEL repository
3 | yum_repository:
4 | name: epel
5 | description: EPEL YUM repo
6 | mirrorlist: https://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=$basearch
7 | enabled: no
8 | gpgcheck: no
9 | tags: epel
10 |
11 | - name: Install required packages
12 | yum:
13 | name: "{{ item }}"
14 | state: latest
15 | disablerepo: "epel"
16 | with_items: "{{ packages }}"
17 | tags: packages
18 |
19 | - name: Install EPEL required packages
20 | yum:
21 | name: "{{ item }}"
22 | state: latest
23 | enablerepo: "epel"
24 | with_items: "{{ epelpackages }}"
25 | tags: epelpackages
26 |
27 | - name: Install pip required packages
28 | pip:
29 | name: "{{ item }}"
30 | with_items: "{{ pippackages }}"
31 | tags: pip
32 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/azuredeploy.parameters.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
3 | "contentVersion" : "1.0.0.0",
4 | "parameters" : {
5 | "adminUsername" : {
6 | "value" : ""
7 | },
8 | "adminPassword" : {
9 | "value" : ""
10 | },
11 | "sshKeyData" : {
12 | "value" : ""
13 | },
14 | "WildcardZone" : {
15 | "value" : ""
16 | },
17 | "numberOfNodes" : {
18 | "value" : 3
19 | },
20 | "image" : {
21 | "value" : "rhel"
22 | },
23 | "RHNUserName" : {
24 | "value" : ""
25 | },
26 | "RHNPassword" : {
27 | "value" : ""
28 | },
29 | "SubscriptionPoolId" : {
30 | "value" : ""
31 | },
32 | "sshPrivateData" : {
33 | "value" : ""
34 | }
35 | }
36 | }
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/create_service_principle.sh:
--------------------------------------------------------------------------------
1 | # $1 is resource group name
2 | # $2 is the password
3 | mkdir .ocpazure
4 | cd .ocpazure
5 | azure login
6 | azure account show >> account.out
7 | azure ad sp create -n $1 -p Pass@word1 --home-page http://${1}web --identifier-uris http://${1}web >> sp.out
8 | azure role assignment create --objectId ff863613-e5e2-4a6b-af07-fff6f2de3f4e -o Reader -c /subscriptions/{subscriptionId}/
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.5/testcases/test_pv.sh:
--------------------------------------------------------------------------------
1 | oc new-project demo
2 | oc new-app https://github.com/openshift/ruby-hello-world
3 | oc expose service ruby-hello-world
4 | oc process -n openshift mysql-persistent -v DATABASE_SERVICE_NAME=database | oc create -f -
5 | oc env dc database --list | oc env dc ruby-hello-world -e -
6 | oc get pods
7 | oc get pv
8 |
9 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/*
2 | playbooks/*.retry
3 | vars.yaml
4 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | remote_tmp = ~/.ansible/tmp
3 | local_tmp = ~/.ansible/tmp
4 | host_key_checking = False
5 | forks=30
6 | gather_timeout=60
7 | timeout=240
8 | library = /usr/share/ansible:/usr/share/ansible/openshift-ansible/library
9 | [ssh_connection]
10 | control_path = ~/.ansible/cp/ssh%%h-%%p-%%r
11 | ssh_args = -o ControlMaster=auto -o ControlPersist=600s -o ControlPath=~/.ansible/cp-%h-%p-%r
12 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/hosts:
--------------------------------------------------------------------------------
1 | [bastions]
2 | b..cloudapp.azure.com
3 |
4 | [bastions:vars]
5 | ansible_ssh_user=
6 | remote_password=
7 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: false
6 | roles:
7 | - { role: azure-delete, tags: ['delete'] }
8 | - { role: azure-deploy, tags: ['deploy'] }
9 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/destroy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: false
6 | roles:
7 | - { role: azure-delete, tags: ['delete'] }
8 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/prepare.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: true
6 | roles:
7 | - { role: prepare, tags: ['prepare'] }
8 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/roles/azure-delete/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | location: "westus"
3 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/roles/azure-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Destroy Azure Deploy
3 | azure_rm_deployment:
4 | state: absent
5 | location: "{{ location }}"
6 | resource_group_name: "{{ resourcegroupname }}"
7 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/roles/azure-deploy/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | templatelink: "https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/master/reference-architecture/azure-ansible/3.6/azuredeploy.json"
3 | numberofnodes: 3
4 | image: "rhel"
5 | mastervmsize: "Standard_DS4_v2"
6 | infranodesize: "Standard_DS4_v2"
7 | nodevmsize: "Standard_DS4_v2"
8 | location: "westus"
9 | openshiftsdn: "redhat/openshift-ovs-multitenant"
10 | metrics: true
11 | logging: true
12 | opslogging: false
13 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/roles/prepare/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | packages:
3 | - ansible
4 | - python-devel
5 | - openssl-devel
6 | - gcc
7 |
8 | epelpackages:
9 | - python2-pip
10 | - python2-jmespath
11 |
12 | pippackages:
13 | - "packaging"
14 | - "msrestazure"
15 | - "azure==2.0.0rc5"
16 | # https://docs.ansible.com/ansible/guide_azure.html#requirements
17 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/ansibledeployocp/playbooks/roles/prepare/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add EPEL repository
3 | yum_repository:
4 | name: epel
5 | description: EPEL YUM repo
6 | mirrorlist: https://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=$basearch
7 | enabled: no
8 | gpgcheck: no
9 | tags: epel
10 |
11 | - name: Install required packages
12 | yum:
13 | name: "{{ item }}"
14 | state: latest
15 | disablerepo: "epel"
16 | with_items: "{{ packages }}"
17 | tags: packages
18 |
19 | - name: Install EPEL required packages
20 | yum:
21 | name: "{{ item }}"
22 | state: latest
23 | enablerepo: "epel"
24 | with_items: "{{ epelpackages }}"
25 | tags: epelpackages
26 |
27 | - name: Install pip required packages
28 | pip:
29 | name: "{{ item }}"
30 | with_items: "{{ pippackages }}"
31 | tags: pip
32 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/azuredeploy.parameters.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
3 | "contentVersion" : "1.0.0.0",
4 | "parameters" : {
5 | "adminUsername" : {
6 | "value" : ""
7 | },
8 | "adminPassword" : {
9 | "value" : ""
10 | },
11 | "sshKeyData" : {
12 | "value" : ""
13 | },
14 | "WildcardZone" : {
15 | "value" : ""
16 | },
17 | "numberOfNodes" : {
18 | "value" : 3
19 | },
20 | "image" : {
21 | "value" : "rhel"
22 | },
23 | "RHNUserName" : {
24 | "value" : ""
25 | },
26 | "RHNPassword" : {
27 | "value" : ""
28 | },
29 | "SubscriptionPoolId" : {
30 | "value" : ""
31 | },
32 | "sshPrivateData" : {
33 | "value" : ""
34 | }
35 | }
36 | }
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/create_service_principle.sh:
--------------------------------------------------------------------------------
1 | # $1 is resource group name
2 | # $2 is the password
3 | mkdir .ocpazure
4 | cd .ocpazure
5 | azure login
6 | azure account show >> account.out
7 | azure ad sp create -n $1 -p Pass@word1 --home-page http://${1}web --identifier-uris http://${1}web >> sp.out
8 | azure role assignment create --objectId ff863613-e5e2-4a6b-af07-fff6f2de3f4e -o Reader -c /subscriptions/{subscriptionId}/
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.6/testcases/test_pv.sh:
--------------------------------------------------------------------------------
1 | oc new-project demo
2 | oc new-app https://github.com/openshift/ruby-hello-world
3 | oc expose service ruby-hello-world
4 | oc process -n openshift mysql-persistent -v DATABASE_SERVICE_NAME=database | oc create -f -
5 | oc env dc database --list | oc env dc ruby-hello-world -e -
6 | oc get pods
7 | oc get pv
8 |
9 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.7/azuredeploy.parameters.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema" : "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
3 | "contentVersion" : "1.0.0.0",
4 | "parameters" : {
5 | "adminUsername" : {
6 | "value" : ""
7 | },
8 | "adminPassword" : {
9 | "value" : ""
10 | },
11 | "sshKeyData" : {
12 | "value" : ""
13 | },
14 | "WildcardZone" : {
15 | "value" : ""
16 | },
17 | "numberOfNodes" : {
18 | "value" : 3
19 | },
20 | "image" : {
21 | "value" : "rhel"
22 | },
23 | "RHNUserName" : {
24 | "value" : ""
25 | },
26 | "RHNPassword" : {
27 | "value" : ""
28 | },
29 | "SubscriptionPoolId" : {
30 | "value" : ""
31 | },
32 | "sshPrivateData" : {
33 | "value" : ""
34 | }
35 | }
36 | }
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.7/create_service_principle.sh:
--------------------------------------------------------------------------------
1 | # $1 is resource group name
2 | # $2 is the password
3 | mkdir .ocpazure
4 | cd .ocpazure
5 | azure login
6 | azure account show >> account.out
7 | azure ad sp create -n $1 -p Pass@word1 --home-page http://${1}web --identifier-uris http://${1}web >> sp.out
8 | azure role assignment create --objectId ff863613-e5e2-4a6b-af07-fff6f2de3f4e -o Reader -c /subscriptions/{subscriptionId}/
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/3.7/testcases/test_pv.sh:
--------------------------------------------------------------------------------
1 | oc new-project demo
2 | oc new-app https://github.com/openshift/ruby-hello-world
3 | oc expose service ruby-hello-world
4 | oc process -n openshift mysql-persistent -v DATABASE_SERVICE_NAME=database | oc create -f -
5 | oc env dc database --list | oc env dc ruby-hello-world -e -
6 | oc get pods
7 | oc get pv
8 |
9 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/BUGS.md:
--------------------------------------------------------------------------------
1 | Bugs
2 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/images/puttygen.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/azure-ansible/images/puttygen.png
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/images/terminal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/azure-ansible/images/terminal.png
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/ssh_linux.md:
--------------------------------------------------------------------------------
1 | # Red Hat OpenShift Container Platform on Azure
2 |
3 | ## SSH Key Generation - Linux/CentOS/Fedora
4 | 1. Go to bash/command line/terminal on your client device.
5 | 2. Enter the commands:
6 | ```bash
7 | mkdir ~/.ssh
8 | chmod 700 ~/.ssh
9 | ssh-keygen -t rsa
10 | ```
11 | 3. At this point you will be prompted:
12 |
13 | ```bash
14 | Generating public/private RSA key pair.
15 | Enter file in which to save the key (/home/b/.ssh/id_rsa):
16 | Enter passphrase (empty for no passphrase):
17 | Enter same passphrase again:
18 | Your identification has been saved in /home/b/.ssh/id_rsa.
19 | Your public key has been saved in /home/b/.ssh/id_rsa.pub.
20 | ```
21 |
22 | Your public and private keys are now available in your home folder under the .ssh directory.
23 |
--------------------------------------------------------------------------------
/reference-architecture/azure-ansible/ssh_windows.md:
--------------------------------------------------------------------------------
1 | # Red Hat Open Shift Container Platform on Azure
2 |
3 | ## SSH Key Generation On Windows
4 | In Windows, use PuTTYgen to generate your public and private keys.
5 |
6 | 1. Install PuTTY -
7 | [PuTTY](http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html)
8 |
9 | 2. Launch PuTTYgen, and press the Generate Button. This will generate the key.
10 | ![PuttyGen Screenshot][PuTTYgen]
11 |
12 | 3. Leave Passphrase and Confirm Passprase Empty
13 |
14 | 4. Save the public and private keys by clicking the Save public key and Save private key buttons.
15 |
16 | 5. You will need the private and public keys for the Open Shift Install.
17 |
18 |
19 | [PuTTYgen]: https://github.com/openshift/openshift-ansible-contrib/raw/master/reference-architecture/azure-ansible/images/puttygen.png
20 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/README.md:
--------------------------------------------------------------------------------
1 | # OCP Day 2 operations
2 |
3 | This repository contains a series of scripts or tools related to the OCP day 2 operations reference architecture.
4 |
5 | ## Structure
6 |
7 | Location | Role
8 | ------------ | -------------
9 | [playbooks/](playbooks/) | Ansible playbooks
10 | [roles/](roles/) | Ansible roles used by playbooks
11 | [images/](images/) | Images
12 | [scripts/](scripts/) | Scripts
13 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/images/README.md:
--------------------------------------------------------------------------------
1 | # OCP Day 2 operations - Images
2 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/README.md:
--------------------------------------------------------------------------------
1 | # OCP Day 2 operations - Playbooks
2 |
3 | Playbook | Description
4 | ------------ | -------------
5 | [controller_notification.yaml](controller_notification.yaml) | Output the current host running the OCP controller
6 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/controller_notification.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | gather_facts: no
4 | become: true
5 | tasks:
6 | - name: Get controller ID in all masters
7 | shell: |
8 | journalctl -b -u atomic-openshift-master-controllers.service --output cat | grep "Attempting to acquire controller lease as" | tail -1 | awk '{ print $11 }' | sed 's/,//'
9 | changed_when: False
10 | register: masterid
11 |
12 | - name: Get etcd lease
13 | shell: |
14 | source /etc/profile.d/etcdctl.sh &&
15 | etcdctl2 get /openshift.io/leases/controllers
16 | changed_when: False
17 | register: controllerid
18 | delegate_to: "{{ groups.masters.0 }}"
19 | run_once: True
20 |
21 | - debug: msg="Controller running in {{ inventory_hostname }}"
22 | when: 'controllerid.stdout == masterid.stdout'
23 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/openshift-etcd-disaster-recovery/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | roles_path = roles/
3 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/openshift-etcd-disaster-recovery/inventories/demo_lab01/ansible_hosts:
--------------------------------------------------------------------------------
1 | [etcd]
2 | stretch-master-0.stretch.e2e.bos.redhat.com
3 | stretch-master-1.stretch.e2e.bos.redhat.com
4 | stretch-master-2.stretch.e2e.bos.redhat.com
5 |
6 | [etcd-pri]
7 | stretch-master-0.stretch.e2e.bos.redhat.com
8 | stretch-master-1.stretch.e2e.bos.redhat.com
9 |
10 | [etcd-sec]
11 | stretch-master-2.stretch.e2e.bos.redhat.com
12 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/openshift-etcd-disaster-recovery/playbooks/ocp-etc-dr-fallback.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure that we have a rw etcd
3 | hosts: etcd-sec[0]
4 | tasks:
5 | - name: Get the number of alive nodes
6 | shell: 'etcdctl -C https://{{ ansible_default_ipv4.address }}:2379 --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt cluster-health | grep "^member [a-f0-9]* is healthy:" | wc -l'
7 | become: True
8 | register: etcd_healthy_nodes
9 |
10 | - name: Ensure that all nodes are up and running
11 | hosts: etcd-pri
12 | serial: 1
13 | roles:
14 | - etcd_fallback
15 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/openshift-etcd-disaster-recovery/playbooks/ocp-etc-dr-simulate.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Simulate DR for ETCD
3 | hosts: etcd-pri
4 | tasks:
5 | - name: Stop the etcd services from primary DC
6 | systemd:
7 | name: 'etcd'
8 | state: stopped
9 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/openshift-etcd-disaster-recovery/roles/etcd_fallback/files/etcd_rejoining_mode:
--------------------------------------------------------------------------------
1 | [Service]
2 | ExecStartPre=/usr/bin/rm -rf /var/lib/etcd/member
3 | ExecStart=
4 | ExecStart=/usr/bin/docker run --name etcd_container --rm -v /var/lib/etcd/:/var/lib/etcd/:z -v /etc/etcd:/etc/etcd:ro --env-file=/etc/etcd/etcd.conf --env-file=/etc/sysconfig/etcd-recovery --net=host --entrypoint=/usr/bin/etcd registry.access.redhat.com/rhel7/etcd
5 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/playbooks/openshift-etcd-disaster-recovery/roles/etcd_fallback/templates/etcd-recovery:
--------------------------------------------------------------------------------
1 | {{ host_addition.stdout }}
2 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/roles/README.md:
--------------------------------------------------------------------------------
1 | # OCP Day 2 operations - Roles
2 |
--------------------------------------------------------------------------------
/reference-architecture/day2ops/scripts/README.md:
--------------------------------------------------------------------------------
1 | # OCP Day 2 operations - Scripts
2 |
3 | Script | Description
4 | ------------ | -------------
5 | [ocp36-sat6.py](ocp36-sat6.py) | Dynamically import docker images to local satellite via registry.access.redhat.com
6 | [project_export.sh](project_export.sh) | Export project content to json files
7 | [project_import.sh](project_import.sh) | Import exported project content in json files to OCP objects
8 | [backup_master_node.sh](backup_master_node.sh) | Create backup of important files on master or nodes hosts (root access required)
9 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/.gitignore:
--------------------------------------------------------------------------------
1 | config.yaml
2 | project.json
3 | .ansible
4 | ansible-*.log
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/3.9/bastion.vars:
--------------------------------------------------------------------------------
1 | export OCPVER=3.9
2 | # Configure AK & ORGID
3 | # or RHUSER & RHPASS
4 | export AK=myak
5 | export ORGID=6969
6 | # RHUSER=
7 | # RHPASS=
8 | export POOLID=xxx
9 | export MYUSER=cloud-user
10 | export PROJECTID=refarch
11 | export CLUSTERID=refarch
12 | export DOMAIN=example.com
13 | export SC_STORAGE=1G
14 | export METRICS_STORAGE=20Gi
15 | export LOGGING_STORAGE=100Gi
16 | # 'admin' user password. Generate it with:
17 | # htpasswd -nb admin password | awk -F: '{ print $2 }'
18 | # Beware with the single quotation marks if the variable contains dollar sign
19 | # In this case 'password' is used
20 | export HTPASSWD='$apr1$wa4YaR7W$jYiUbDt4WWAuTctQbGXAU0'
21 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 | [defaults]
4 | forks = 50
5 | host_key_checking = false
6 | inventory = inventory
7 | remote_user = cloud-user
8 | private_key_file = ~/.ssh/google_compute_engine
9 | gathering = smart
10 | roles_path = ../../../roles:../../../../openshift-ansible/roles
11 | library = ../../../../openshift-ansible/roles/openshift_facts/library:../../../../openshift-ansible/library
12 | retry_files_enabled = false
13 | fact_caching = jsonfile
14 | fact_caching_connection = .ansible/cached_facts
15 | fact_caching_timeout = 900
16 |
17 | [privilege_escalation]
18 | become = true
19 |
20 | [ssh_connection]
21 | ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no
22 | control_path = /var/tmp/%%h-%%r
23 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/inventory/inventory:
--------------------------------------------------------------------------------
1 | localhost ansible_connection=local ansible_become=false
2 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/create-inventory-file.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create static inventory file
3 | hosts: localhost
4 | roles:
5 | - inventory-file-creation
6 | tasks:
7 | - name: print message with the location of the inventory file
8 | debug:
9 | msg: Static inventory file created as ansible/static-inventory
10 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/gold-image.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if gold image exists
3 | hosts: localhost
4 | tasks:
5 | - name: check for gold image
6 | command: gcloud --project {{ gcloud_project }} compute images describe {{ gold_image }}
7 | register: gold_image_exists
8 | changed_when: false
9 | ignore_errors: true
10 |
11 | - include: gold-image-include.yaml
12 | when: hostvars['localhost']['gold_image_exists'] | failed
13 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: gold-image.yaml
3 | - include: core-infra.yaml
4 | - include: openshift-install.yaml
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/openshift-install.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create instance groups
3 | hosts: localhost
4 | roles:
5 | - instance-groups
6 |
7 | - include: ../../../../playbooks/prerequisite.yaml
8 | - include: ../../../../../openshift-ansible/playbooks/byo/config.yml
9 | - include: openshift-post.yaml
10 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/openshift-minor-upgrade.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create instance groups
3 | hosts: localhost
4 | roles:
5 | - instance-groups
6 |
7 | - include: ../../../../../openshift-ansible/playbooks/byo/openshift-cluster/upgrades/{{ openshift_vers | default('v3_6') }}/upgrade.yml
8 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/prereq.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: configure ansible connection to the gcp and some basic stuff
3 | hosts: localhost
4 | roles:
5 | - pre-flight-validation
6 | - openshift-ansible-installer
7 | - ansible-gcp
8 | - dns-zone
9 | - gcp-ssh-key
10 | - role: rhel-image
11 | when: openshift_deployment_type == 'openshift-enterprise'
12 | - role: empty-image
13 | - role: deployment-create
14 | deployment_name: network
15 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/deployment-create/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | deployment_name_with_prefix: '{{ prefix }}-{{ deployment_name }}'
3 | deployment_config_template: '{{ playbook_dir }}/../../deployment-manager/{{ deployment_name }}-config.yaml.j2'
4 | deployment_config: '{{ playbook_dir }}/../../deployment-manager/{{ deployment_name_with_prefix }}-config.yaml'
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/deployment-delete/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | deployment_name_with_prefix: '{{ prefix }}-{{ deployment_name }}'
3 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/deployment-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete deployment {{ deployment_name_with_prefix }}
3 | command: gcloud -q --project {{ gcloud_project }} deployment-manager deployments delete {{ deployment_name_with_prefix }}
4 | ignore_errors: true
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/dns-records-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete ns records
3 | gcdns_record:
4 | record: '{{ item.record }}'
5 | zone: '{{ public_hosted_zone }}'
6 | type: '{{ item.type }}'
7 | overwrite: true
8 | service_account_email: '{{ service_account_id }}'
9 | credentials_file: '{{ credentials_file }}'
10 | project_id: '{{ gcloud_project }}'
11 | state: absent
12 | with_items:
13 | - record: '{{ openshift_master_cluster_public_hostname }}'
14 | type: A
15 | - record: '{{ openshift_master_cluster_hostname }}'
16 | type: A
17 | - record: '{{ wildcard_zone }}'
18 | type: A
19 | - record: '*.{{ wildcard_zone }}'
20 | type: CNAME
21 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/empty-image-delete/defaults:
--------------------------------------------------------------------------------
1 | ../empty-image/defaults/
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/empty-image-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete empty gce image
3 | command: gcloud --project {{ gcloud_project }} compute images delete {{ empty_image_gce }}
4 | ignore_errors: true
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/empty-image/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | empty_image: empty-1g
3 | empty_image_raw: disk.raw
4 | empty_image_archive: '{{ empty_image }}.tar.gz'
5 | empty_image_bucket: gs://{{ gcloud_project }}-empty-raw-image
6 | empty_image_in_bucket: '{{ empty_image_bucket }}/{{ empty_image_archive }}'
7 | empty_image_gce: '{{ empty_image }}'
8 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/gcp-ssh-key/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | gcp_ssh_key: '~/.ssh/google_compute_engine'
3 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/gold-image/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create gold image
3 | include_role:
4 | name: deployment-create
5 | vars:
6 | deployment_name: gold-image
7 | deployment_name_with_prefix: '{{ prefix }}-{{ deployment_name }}{{ "-origin" if openshift_deployment_type == "origin" else "" }}'
8 |
9 | - name: delete temp instance disk
10 | gce_pd:
11 | name: '{{ prefix }}-tmp-instance'
12 | zone: '{{ gcloud_zone }}'
13 | service_account_email: '{{ service_account_id }}'
14 | credentials_file: '{{ credentials_file }}'
15 | project_id: '{{ gcloud_project }}'
16 | state: absent
17 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/inventory-file-creation/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create static inventory file
3 | template:
4 | src: inventory.j2
5 | dest: ../static-inventory
6 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/master-http-proxy/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart haproxy
3 | service:
4 | name: haproxy
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/master-http-proxy/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install haproxy
3 | package:
4 | name: haproxy
5 | state: present
6 |
7 | - name: configure haproxy
8 | template:
9 | src: haproxy.cfg.j2
10 | dest: /etc/haproxy/haproxy.cfg
11 | notify: restart haproxy
12 |
13 | - name: start and enable haproxy service
14 | service:
15 | name: haproxy
16 | enabled: true
17 | state: started
18 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/pre-flight-validation/tasks/check-package.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if package {{ item }} is installed
3 | command: rpm -q --whatprovides {{ item }}
4 | register: package_result
5 | ignore_errors: true
6 | changed_when: false
7 |
8 | - name: assert that package {{ item }} exists
9 | assert:
10 | that:
11 | - package_result | succeeded
12 | msg: Package '{{ item }}' is required. Please install it with your package manager, e.g. 'sudo yum install {{ item }}'
13 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/registry-bucket-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete registry bucket with all content
3 | command: gsutil -m rm -r gs://{{ gcs_registry_bucket }}
4 | ignore_errors: true
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/rhel-image-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete rhel gce image
3 | command: gcloud --project {{ gcloud_project }} compute images delete {{ rhel_image_gce }}
4 | ignore_errors: true
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/rhel-image/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | rhel_image_dir: '{{ rhel_image_path | dirname }}'
3 | rhel_image_raw: '{{ rhel_image_dir }}/disk.raw'
4 | rhel_image_archive: '{{ rhel_image_dir }}/{{ rhel_image }}.tar.gz'
5 | rhel_image_bucket: gs://{{ gcloud_project }}-rhel-guest-raw-image
6 | rhel_image_in_bucket: '{{ rhel_image_bucket }}/{{ rhel_image_archive | basename }}'
7 | rhel_image_gce_family: 'rhel-guest-clean'
8 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssh-config-tmp-instance-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete ssh config for ocp temp instance
3 | blockinfile:
4 | dest: '{{ ssh_config_file }}'
5 | create: true
6 | mode: 0600
7 | marker: '# {mark} OPENSHIFT ON GCP TEMP INSTANCE BLOCK'
8 | state: absent
9 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssh-config-tmp-instance/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: configure ssh for ocp temp instance
3 | blockinfile:
4 | dest: '{{ ssh_config_file }}'
5 | create: true
6 | mode: 0600
7 | marker: '# {mark} OPENSHIFT ON GCP TEMP INSTANCE BLOCK'
8 | state: present
9 | block: |
10 | Host {{ prefix }}-tmp-instance
11 | HostName {{ hostvars[prefix + '-tmp-instance']['gce_public_ip'] }}
12 | User cloud-user
13 | IdentityFile ~/.ssh/google_compute_engine
14 | UserKnownHostsFile ~/.ssh/google_compute_known_hosts
15 | HostKeyAlias compute.{{ hostvars[prefix + '-tmp-instance']['gce_id'] }}
16 | IdentitiesOnly yes
17 | CheckHostIP no
18 | StrictHostKeyChecking no
19 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssh-proxy-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete ssh proxy configuration
3 | blockinfile:
4 | dest: '{{ ssh_config_file }}'
5 | mode: 0600
6 | marker: '# {mark} OPENSHIFT ON GCP BLOCK'
7 | state: absent
8 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssh-proxy/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: configure ssh proxy via bastion host
3 | blockinfile:
4 | dest: '{{ ssh_config_file }}'
5 | create: true
6 | mode: 0600
7 | marker: '# {mark} OPENSHIFT ON GCP BLOCK'
8 | state: present
9 | block: |
10 | Host {{ prefix }}-bastion
11 | HostName {{ hostvars[prefix + '-bastion']['gce_public_ip'] }}
12 | User cloud-user
13 | IdentityFile ~/.ssh/google_compute_engine
14 | UserKnownHostsFile ~/.ssh/google_compute_known_hosts
15 | HostKeyAlias compute.{{ hostvars[prefix + '-bastion']['gce_id'] }}
16 | IdentitiesOnly yes
17 | CheckHostIP no
18 | StrictHostKeyChecking no
19 | {% for item in groups['tag_' + prefix] %}
20 | Host {{ item }}
21 | User cloud-user
22 | IdentityFile ~/.ssh/google_compute_engine
23 | proxycommand ssh {{ prefix }}-bastion -W %h:%p
24 | {% endfor %}
25 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssl-certificate-delete/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ssl_lb_cert: master-https-lb-cert
3 | ssl_lb_cert_with_prefix: '{{ prefix }}-{{ ssl_lb_cert }}'
4 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssl-certificate-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete ssl certificate
3 | command: gcloud --project {{ gcloud_project }} compute ssl-certificates delete {{ ssl_lb_cert_with_prefix }}
4 | ignore_errors: true
5 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/ssl-certificate/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ssl_lb_cert: master-https-lb-cert
3 | ssl_lb_cert_with_prefix: '{{ prefix }}-{{ ssl_lb_cert }}'
4 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/temp-instance-disk-delete/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: delete temp instance disk
3 | gce_pd:
4 | name: '{{ prefix }}-tmp-instance'
5 | zone: '{{ gcloud_zone }}'
6 | service_account_email: '{{ service_account_id }}'
7 | credentials_file: '{{ credentials_file }}'
8 | project_id: '{{ gcloud_project }}'
9 | state: absent
10 | ignore_errors: true
11 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/wait-for-instance-group/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: wait until the instance group {{ instance_group }} is ready
3 | command: gcloud --project {{ gcloud_project }} compute instance-groups managed describe {{ instance_group }} --region {{ gcloud_region }} --format 'yaml(targetSize, currentActions.none)'
4 | register: instance_group_size_and_action_none
5 | until: (instance_group_size_and_action_none.stdout | from_yaml).targetSize == (instance_group_size_and_action_none.stdout | from_yaml).currentActions.none
6 | retries: 20
7 | delay: 5
8 | changed_when: false
9 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/roles/wait-for-instance/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Ansible bug keeps wait_for_connection module from working through the bastion host
3 | # https://github.com/ansible/ansible/issues/23774
4 | - name: wait for the instance {{ instance }} to come up
5 | wait_for:
6 | host: '{{ instance }}'
7 | port: 22
8 | state: started
9 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/soft-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if ssh proxy is configured
3 | hosts: localhost
4 | tasks:
5 | - name: check if ssh proxy is configured
6 | command: grep -q 'OPENSHIFT ON GCP BLOCK' {{ ssh_config_file }}
7 | register: ssh_proxy_check
8 | ignore_errors: true
9 |
10 | - include: unregister.yaml
11 | when: hostvars['localhost'].ssh_proxy_check | succeeded
12 |
13 | - name: teardown the created infrastructure
14 | hosts: localhost
15 | roles:
16 | - registry-bucket-delete
17 | - dns-records-delete
18 | - role: deployment-delete
19 | deployment_name: core
20 | - ssh-proxy-delete
21 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: soft-teardown.yaml
3 |
4 | - name: teardown the created infrastructure
5 | hosts: localhost
6 | roles:
7 | - ssl-certificate-delete
8 | - role: deployment-delete
9 | deployment_name: tmp-instance
10 | - temp-instance-disk-delete
11 | - ssh-config-tmp-instance-delete
12 | - role: deployment-delete
13 | deployment_name: network
14 | - role: deployment-delete
15 | deployment_name: gold-image
16 | deployment_name_with_prefix: '{{ prefix }}-{{ deployment_name }}{{ "-origin" if openshift_deployment_type == "origin" else "" }}'
17 | when: delete_gold_image | bool
18 | - role: rhel-image-delete
19 | when: delete_image | bool
20 | - role: empty-image-delete
21 | when: delete_image | bool
22 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/unregister.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create instance groups
3 | hosts: localhost
4 | roles:
5 | - instance-groups
6 |
7 | - include: ../../../../playbooks/unregister.yaml
8 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/ansible/playbooks/validation.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create instance groups
3 | hosts: localhost
4 | roles:
5 | - instance-groups
6 |
7 | - include: ../../../../playbooks/post-validation.yaml
8 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/deployment-manager/.gitignore:
--------------------------------------------------------------------------------
1 | *.yaml
2 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/deployment-manager/gold-image-config.yaml.j2:
--------------------------------------------------------------------------------
1 | imports:
2 | - path: gold-image.jinja
3 | resources:
4 | - name: gold-image
5 | type: gold-image.jinja
6 | properties:
7 | prefix: {{ prefix }}
8 | zone: {{ gcloud_zone }}
9 | gold_image: {{ gold_image }}
10 | gold_image_family: {{ gold_image_family }}
11 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/deployment-manager/gold-image.jinja:
--------------------------------------------------------------------------------
1 | resources:
2 | - name: {{ properties['gold_image'] }}
3 | properties:
4 | family: {{ properties['gold_image_family'] }}
5 | sourceDisk: zones/{{ properties['zone'] }}/disks/{{ properties['prefix'] }}-tmp-instance
6 | type: compute.v1.image
7 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/deployment-manager/network-config.yaml.j2:
--------------------------------------------------------------------------------
1 | imports:
2 | - path: network.jinja
3 | resources:
4 | - name: network
5 | type: network.jinja
6 | properties:
7 | prefix: {{ prefix }}
8 | region: {{ gcloud_region }}
9 | console_port: {{ console_port }}
10 | gce_vpc_custom_subnet_cidr: {{ gce_vpc_custom_subnet_cidr }}
11 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/deployment-manager/tmp-instance-config.yaml.j2:
--------------------------------------------------------------------------------
1 | imports:
2 | - path: tmp-instance.jinja
3 | resources:
4 | - name: tmp-instance
5 | type: tmp-instance.jinja
6 | properties:
7 | prefix: {{ prefix }}
8 | zone: {{ gcloud_zone }}
9 | region: {{ gcloud_region }}
10 | gce_vpc_custom_subnet_cidr: {{ gce_vpc_custom_subnet_cidr }}
11 | source_family: {{ 'projects/' + gcloud_project + '/global/images/family/rhel-guest-clean' if openshift_deployment_type == 'openshift-enterprise' else 'projects/centos-cloud/global/images/family/centos-7' }}
12 |
--------------------------------------------------------------------------------
/reference-architecture/gcp/images/arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/gcp/images/arch.png
--------------------------------------------------------------------------------
/reference-architecture/images/OSE-on-VMware-Architecture.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/images/OSE-on-VMware-Architecture.jpg
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/.gitignore:
--------------------------------------------------------------------------------
1 | user-data
2 | REFARCH_ENV
3 | MANUAL_ENV
4 | keys
5 | OSEv3.yml
6 | inventory
7 | rhn_credentials
8 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 | [defaults]
4 | remote_user = cloud-user
5 | forks = 50
6 | host_key_checking = False
7 | gathering = smart
8 | retry_files_enabled = false
9 | nocows = true
10 |
11 | [privilege_escalation]
12 | become = True
13 |
14 | [ssh_connection]
15 | ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no
16 | control_path = /var/tmp/%%h-%%r
17 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/bastion_host.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Define RHN_USERNAME, RHN_PASSWORD and RHN_POOL_ID in this file
3 | [ -r ./rhn_credentials ] && source ./rhn_credentials
4 |
5 | sh ./ch5.8.1.1_register.sh
6 | sh ./ch5.8.1.2_enable_repos.sh
7 | sh ./ch5.8.1.3_install_openshift-ansible-playbooks.sh
8 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.3_control_network.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OCP3_DNS_NAMESERVER=${OCP3_DNS_NAMESERVER:-8.8.8.8}
3 | PUBLIC_NETWORK=${PUBLIC_NETWORK:-public_network}
4 | CONTROL_SUBNET_CIDR=${CONTROL_SUBNET_CIDR:-172.18.10.0/24}
5 |
6 | openstack network create control-network
7 | openstack subnet create --network control-network --subnet-range ${CONTROL_SUBNET_CIDR} \
8 | --dns-nameserver ${OCP3_DNS_NAMESERVER} control-subnet
9 | openstack router create control-router
10 | openstack router add subnet control-router control-subnet
11 | neutron router-gateway-set control-router ${PUBLIC_NETWORK}
12 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.3_tenant_network.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | TENANT_SUBNET_CIDR=${TENANT_SUBNET_CIDR:-172.18.20.0/24}
3 |
4 | openstack network create tenant-network
5 | openstack subnet create --network tenant-network \
6 | --subnet-range ${TENANT_SUBNET_CIDR} --gateway none tenant-subnet
7 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.4.2_bastion_security_group.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | openstack security group create bastion-sg
3 | openstack security group rule create --ingress --protocol icmp bastion-sg
4 | openstack security group rule create --protocol tcp \
5 | --dst-port 22 bastion-sg
6 | #Verification of security group
7 | openstack security group show bastion-sg
8 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.4.3_master_security_group.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | openstack security group create master-sg
3 | openstack security group rule create --protocol icmp master-sg
4 | neutron security-group-rule-create master-sg \
5 | --protocol tcp --port-range-min 22 --port-range-max 22 \
6 | --remote-group-id bastion-sg
7 |
8 |
9 | neutron security-group-rule-create master-sg \
10 | --protocol tcp --port-range-min 2380 --port-range-max 2380 \
11 | --remote-group-id master-sg
12 |
13 | for PORT in 53 2379 2380 8053 8443 10250 24224
14 | do
15 | openstack security group rule create --protocol tcp --dst-port $PORT master-sg
16 | done
17 |
18 | for PORT in 53 4789 8053 24224
19 | do
20 | openstack security group rule create --protocol udp --dst-port $PORT master-sg
21 | done
22 |
23 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.4.5_infra_node_security_group.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | openstack security group create infra-node-sg
3 | openstack security group rule create --protocol icmp infra-node-sg
4 | neutron security-group-rule-create infra-node-sg \
5 | --protocol tcp --port-range-min 22 --port-range-max 22 \
6 | --remote-group-id bastion-sg
7 |
8 | for PORT in 80 443 10250 4789
9 | do
10 | openstack security group rule create --protocol tcp --dst-port $PORT infra-node-sg
11 | done
12 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.4.6_app_node_security_group.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | openstack security group create app-node-sg
3 | openstack security group rule create --protocol icmp app-node-sg
4 | neutron security-group-rule-create app-node-sg \
5 | --protocol tcp --port-range-min 22 --port-range-max 22 \
6 | --remote-group-id bastion-sg
7 | openstack security group rule create --protocol tcp --dst-port 10250 app-node-sg
8 | openstack security group rule create --protocol udp --dst-port 4789 app-node-sg
9 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.5.2_boot_bastion.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OCP3_DOMAIN=${OCP3_DOMAIN:-ocp3.example.com}
3 | OCP3_CONTROL_DOMAIN=${OCP3_CONTROL_DOMAIN:-control.${OCP3_DOMAIN}}
4 | IMAGE=${IMAGE:-rhel7}
5 | FLAVOR=${FLAVOR:-m1.small}
6 | OCP3_KEY_NAME=${OCP3_KEY_NAME:-ocp3}
7 | netid1=$(openstack network list | awk "/control-network/ { print \$2 }")
8 | netid2=$(openstack network list | awk "/tenant-network/ { print \$2 }")
9 | openstack server create --flavor ${FLAVOR} --image ${IMAGE} \
10 | --key-name ${OCP3_KEY_NAME} \
11 | --nic net-id=$netid1 \
12 | --nic net-id=$netid2 \
13 | --security-group bastion-sg --user-data=user-data/bastion.yaml \
14 | bastion.${OCP3_CONTROL_DOMAIN}
15 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.5.3_boot_masters.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OCP3_DOMAIN=${OCP3_DOMAIN:-ocp3.example.com}
3 | OCP3_CONTROL_DOMAIN=${OCP3_CONTROL_DOMAIN:-control.${OCP3_DOMAIN}}
4 | OCP3_KEY_NAME=${OCP3_KEY_NAME:-ocp3}
5 | IMAGE=${IMAGE:-rhel7}
6 | FLAVOR=${FLAVOR:-m1.small}
7 | MASTER_COUNT=${MASTER_COUNT:-3}
8 | netid1=$(openstack network list | awk "/control-network/ { print \$2 }")
9 | netid2=$(openstack network list | awk "/tenant-network/ { print \$2 }")
10 | for HOSTNUM in $(seq 0 $(($MASTER_COUNT-1))) ; do
11 | openstack server create --flavor ${FLAVOR} --image ${IMAGE} \
12 | --key-name ${OCP3_KEY_NAME} \
13 | --nic net-id=$netid1 --nic net-id=$netid2 \
14 | --security-group master-sg --user-data=user-data/master-${HOSTNUM}.yaml \
15 | master-${HOSTNUM}.${OCP3_CONTROL_DOMAIN}
16 | done
17 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.5.4_cinder_volumes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | VOLUME_SIZE=${VOLUME_SIZE:-8}
3 | BASTION="bastion"
4 | INFRA_NODE_COUNT=${INFRA_NODE_COUNT:-2}
5 | APP_NODE_COUNT=${APP_NODE_COUNT:-3}
6 |
7 | INFRA_NODES=$(for I in $(seq 0 $(($INFRA_NODE_COUNT-1))) ; do echo infra-node-$I ; done)
8 | APP_NODES=$(for I in $(seq 0 $(($APP_NODE_COUNT-1))) ; do echo app-node-$I ; done)
9 | ALL_NODES="$INFRA_NODES $APP_NODES"
10 |
11 | for NODE in $ALL_NODES ; do
12 | cinder create --name ${NODE}-docker ${VOLUME_SIZE}
13 | done
14 |
15 | openstack volume list
16 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.5.7_create_floating_ip_addresses.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OCP3_DOMAIN=${OCP3_DOMAIN:-ocp3.example.com}
3 | OCP3_CONTROL_DOMAIN=${OCP3_CONTROL_DOMAIN:-control.${OCP3_DOMAIN}}
4 | PUBLIC_NETWORK=${PUBLIC_NETWORK:-public_network}
5 | MASTER_COUNT=${MASTER_COUNT:-3}
6 | INFRA_NODE_COUNT=${INFRA_NODE_COUNT:-2}
7 | APP_NODE_COUNT=${APP_NODE_COUNT:3}
8 |
9 | BASTION="bastion"
10 | MASTERS=$(for M in $(seq 0 $(($MASTER_COUNT-1))) ; do echo master-$M ; done)
11 | INFRA_NODES=$(for I in $(seq 0 $(($INFRA_NODE_COUNT-1))) ; do echo infra-node-$I ; done)
12 | for HOST in $BASTION $MASTERS $INFRA_NODES
13 | do
14 | openstack floating ip create ${PUBLIC_NETWORK}
15 | FLOATING_IP=$(openstack floating ip list | awk "/None/ { print \$4 }")
16 | openstack server add floating ip ${HOST}.${OCP3_CONTROL_DOMAIN} ${FLOATING_IP}
17 | done
18 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.1.1_register.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Set RHN_USERNAME, RHN_PASSWORD RHN_POOL_ID for your environment
3 | sudo subscription-manager register \
4 | --username $RHN_USERNAME \
5 | --password $RHN_PASSWORD
6 | sudo subscription-manager subscribe --pool $RHN_POOL_ID
7 |
8 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.1.2_enable_repos.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OSP_VERSION=${OSP_VERSION:-10}
3 | OCP3_VERSION=${OCP3_VERSION:-3.4}
4 |
5 | sudo subscription-manager repos --disable="*"
6 | sudo subscription-manager repos \
7 | --enable=rhel-7-server-rpms \
8 | --enable=rhel-7-server-extras-rpms \
9 | --enable=rhel-7-server-optional-rpms \
10 | --enable=rhel-7-server-ose-${OCP3_VERSION}-rpms \
11 | --enable=rhel-7-server-openstack-${OSP_VERSION}-rpms \
12 | --enable=rhel-7-fast-datapath-rpms
13 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.1.3_install_openshift-ansible-playbooks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sudo yum install -y openshift-ansible-playbooks
3 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_disable_peerdns_eth0_all_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ansible nodes -i inventory -m script -a \
3 | "/usr/bin/sed -i -e '/PEERDNS/s/=.*/=no/' /etc/sysconfig/network-scripts/ifcfg-eth0"
4 |
5 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_enable_eth1_all_ansible.sh:
--------------------------------------------------------------------------------
1 | ansible nodes -i inventory -a "/usr/sbin/ifup eth1"
2 | ansible nodes -i inventory -a \
3 | "/usr/sbin/iptables -t nat -A POSTROUTING -o eth1 -j MASQUERADE"
4 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_enable_ocp_repo_all_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OCP3_VERSION=${OCP3_VERSION:-3.4}
3 | ansible nodes -i inventory -a \
4 | "subscription-manager repos --enable=rhel-7-server-ose-${OCP3_VERSION}-rpms"
5 |
6 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_enable_osp_repos_all_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OSP_VERSION=${OSP_VERSION:-10}
3 | ansible nodes -i inventory -a \
4 | "subscription-manager repos --enable=rhel-7-server-openstack-${OSP_VERSION}-rpms"
5 |
6 | if [ "$OSP_VERSION" -lt 10 ] ; then
7 | ansible nodes -i inventory -a \
8 | "subscription-manager repos --enable=rhel-7-server-openstack-${OSP_VERSION}-director-rpms"
9 | fi
10 |
11 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_enable_server_repos_all_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ansible nodes -i inventory -m script -a \
3 | "/usr/sbin/subscription-manager repos --disable '*'"
4 |
5 | ansible nodes -i inventory -m script -a \
6 | "/usr/sbin/subscription-manager repos
7 | --enable=rhel-7-server-rpms \
8 | --enable=rhel-7-server-extras-rpms \
9 | --enable=rhel-7-server-optional-rpms"
10 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_install_cloud_config_all_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | PACKAGES="os-collect-config,python-zaqarclient,os-refresh-config,os-apply-config"
3 |
4 | ansible nodes -i inventory -m yum -a "name='$PACKAGES' state=present"
5 | ansible nodes -i inventory -m service -a \
6 | "name=os-collect-config enabled=yes state=started"
7 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.3_rhn_subscribe_all_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Pool IDs not yet available
3 | ansible nodes -i inventory -f 1 -m redhat_subscription -a \
4 | "state=present username=$RHN_USERNAME password=$RHN_PASSWORD"
5 | ansible nodes -i inventory -f 1 -a "subscription-manager attach --pool=${RHN_POOL_ID}"
6 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.4.1_install_base_packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | for H in $ALL_HOSTS
4 | do
5 | ssh $H sudo yum install -y wget git net-tools bind-utils iptables-services \
6 | bridge-utils bash-completion atomic-openshift-excluder \
7 | atomic-openshift-docker-excluder
8 | done
9 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.4.1_register_all_instances.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | RHN_USERNAME=${RHN_USERNAME:-changeme}
3 | RHN_PASSWORD=${RHN_PASSWORD:-changeme}
4 | RHN_POOL_ID=${RHN_POOL_ID:-changeme}
5 |
6 | for H in $ALL_HOSTS
7 | do
8 | ssh $H sudo subscription-manager register \
9 | --username ${RHN_USERNAME} --password ${RHN_PASSWORD}
10 | ssh $H sudo subscription-manager attach --pool ${RHN_POOL_ID}
11 | done
12 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.4.1_rhn_subscribe_all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | OCP3_VERSION=${OCP3_VERSION:-3.4}
3 |
4 | for H in $ALL_HOSTS
5 | do
6 | ssh $H sudo subscription-manager repos --disable="*"
7 | ssh $H sudo subscription-manager repos \
8 | --enable="rhel-7-server-rpms" \
9 | --enable="rhel-7-server-extras-rpms" \
10 | --enable="rhel-7-server-optional-rpms" \
11 | --enable="rhel-7-server-ose-${OCP3_VERSION}-rpms"
12 | done
13 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.4_enable_lvmetad_nodes_ansible.sh:
--------------------------------------------------------------------------------
1 | ansible nodes -i inventory -m yum -a "name=lvm2 state=present"
2 | ansible nodes -i inventory -m service -a \
3 | "name=lvm2-lvmetad enabled=yes state=started"
4 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.8.5_configure_docker_storage_ansible.sh:
--------------------------------------------------------------------------------
1 | ansible nodes -i inventory -m yum -a "name=docker state=present"
2 | cat <docker-storage-setup
3 | DEVS=/dev/vdb
4 | VG=docker-vg
5 | EOF
6 | ansible app-nodes,infra-nodes -i inventory -m copy -a "src=docker-storage-setup dest=/etc/sysconfig/docker-storage-setup force=yes"
7 | ansible nodes -i inventory -a "/usr/bin/docker-storage-setup"
8 | ansible nodes -i inventory -m service -a "name=docker enabled=yes state=started"
9 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.9_allow_docker_flannel.sh:
--------------------------------------------------------------------------------
1 | #ansible masters -i inventory \
2 | # -a '/usr/sbin/iptables -A DOCKER -p tcp -j ACCEPT'
3 | ansible nodes -i inventory \
4 | -a '/usr/sbin/iptables -A DOCKER -p tcp -j ACCEPT'
5 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/ch5.9_deploy_openshift.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | export OCP_ANSIBLE_ROOT=${OCP_ANSIBLE_ROOT:-/usr/share/ansible}
3 | export ANSIBLE_ROLES_PATH=${ANSIBLE_ROLES_PATH:-${OCP_ANSIBLE_ROOT}/openshift-ansible/roles}
4 | export ANSIBLE_HOST_KEY_CHECKING=False
5 |
6 | ansible-playbook -i ~/inventory \
7 | ${OCP_ANSIBLE_ROOT}/openshift-ansible/playbooks/byo/config.yml
8 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/prepare_osp_instances_ansible.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Copy files to the bastion
4 | # Prepare the bastion to configure the rest of the VMs
5 | #
6 | #
7 | OCP3_DOMAIN=${OCP3_DOMAIN:-ocp3.example.com}
8 | BASTION_HOST=${BASTION_HOST:-bastion.control.${OCP3_DOMAIN}}
9 |
10 | function floating_ip() {
11 | # HOSTNAME=$1
12 | openstack server show $1 -f json |
13 | jq -r '.addresses |
14 | match("control-network=[\\d.]+, ([\\d.]+)") |
15 | .captures[0].string'
16 | }
17 |
18 | BASTION_IP=$(floating_ip $BASTION_HOST)
19 |
20 | INSTANCE_FILES="ansible.cfg inventory instance_hosts_ansible.sh ch5.8.*_ansible.sh"
21 |
22 | sh ./generate_inventory.sh
23 |
24 | scp -i ${OCP3_KEY_FILE} ${INSTANCE_FILES} cloud-user@${BASTION_IP}:
25 | ssh -i ${OCP3_KEY_FILE} cloud-user@${BASTION_IP} sh ./instance_hosts_ansible.sh
26 |
--------------------------------------------------------------------------------
/reference-architecture/osp-cli/validation.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | become: no
5 | pre_tasks:
6 | - name: set fact
7 | set_fact:
8 | openshift_master_cluster_hostname: "{{ openshift_master_cluster_public_hostname }}"
9 | - name: set fact
10 | set_fact:
11 | openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_public_hostname }}"
12 | - name: set fact
13 | set_fact:
14 | openshift_master_cluster_hostname: "{{ openshift_master_cluster_hostname }}"
15 | - name: master group
16 | add_host:
17 | name: "{{ item }}"
18 | groups: single_master
19 | with_items: "{{ groups['masters'].0 }}"
20 |
21 | - include: ../../playbooks/post-validation.yaml
22 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/ansible/templates/update.key.j2:
--------------------------------------------------------------------------------
1 | key "update-key" {
2 | algorithm hmac-md5;
3 | secret "{{ update_key }}";
4 | };
5 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/ansible/templates/zone.db.j2:
--------------------------------------------------------------------------------
1 | $ORIGIN {{ zone }}.
2 | $TTL 300
3 | @ IN SOA {{ nameservers[0].name }}.{{ zone }}. {{ contact }} (
4 | 0 ; serial number
5 | 8h ; refresh
6 | 1h ; retry
7 | 7d ; expire
8 | 1d ) ; min TTL
9 |
10 | {% for ns in nameservers %}
11 | IN NS {{ ns.name }}.{{ zone }}.
12 | {% endfor %}
13 |
14 | {% for ns in nameservers %}
15 | {{ ns.name }} IN A {{ ns.address }}
16 | {% endfor %}
17 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/ansible/templates/zones.conf-master.j2:
--------------------------------------------------------------------------------
1 | include "/etc/named/update.key" ;
2 |
3 | zone {{ zone }} {
4 | type master ;
5 | file "/var/named/dynamic/zone.db" ;
6 | allow-update { key update-key ; } ;
7 | };
8 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/ansible/templates/zones.conf-slave.j2:
--------------------------------------------------------------------------------
1 | include "/etc/named/update.key" ;
2 |
3 | zone {{ zone }} {
4 | type slave ;
5 | masters { {% for ns in nameservers %} {%- if ns.name in masters -%} {{ ns.address }} ; {% endif -%} {% endfor -%} } ;
6 | file "/var/named/dynamic/zone.db" ;
7 | allow-update-forwarding { any ; } ;
8 | };
9 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/heat/fragments/install_complete.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/sh
2 | #
3 | #
4 | source /usr/local/lib/notify.sh
5 | echo "Install Complete at" $(date)
6 | notify_success "OpenShift node has been prepared for running ansible."
7 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/heat/fragments/install_python.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Ensure that Python is installed for Ansible
4 | #
5 | echo "Installing Python, DNF and Selinux packages for Ansible"
6 | [ -f /etc/fedora-release ] && dnf install -y python python-dnf libselinux-python
7 |
--------------------------------------------------------------------------------
/reference-architecture/osp-dns/heat/fragments/notify.sh:
--------------------------------------------------------------------------------
1 | # Send success status to OpenStack WaitCondition
2 | function notify_success() {
3 | $WC_NOTIFY --insecure --data-binary \
4 | "{\"status\": \"SUCCESS\", \"reason\": \"$1\", \"data\": \"$1\"}"
5 | exit 0
6 | }
7 |
8 | # Send success status to OpenStack WaitCondition
9 | function notify_failure() {
10 | $WC_NOTIFY --insecure --data-binary \
11 | "{\"status\": \"FAILURE\", \"reason\": \"$1\", \"data\": \"$1\"}"
12 | exit 1
13 | }
14 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | forks = 50
3 | host_key_checking = False
4 | inventory_ignore_extensions = .example, .ini, .pyc, .pem
5 | gathering = smart
6 | # Roles path assumes oVirt-ansible roles installed to /usr/share/ansible/roles via RPM
7 | # per instructions at: https://github.com/oVirt/ovirt-ansible
8 | roles_path = ../../roles:/usr/share/ansible/roles
9 | remote_user = root
10 | retry_files_enabled=False
11 | log_path=./ansible.log
12 | vault_password_file=~/.test_vault_pw
13 |
14 | #[ssh_connection]
15 | #ssh_args = -C -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -o PreferredAuthentications=publickey
16 | #control_path = /var/run/%%h-%%r
17 | #pipelining = True
18 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/docker-image-pull.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Docker Image Pre-Pull
3 | hosts: nodes
4 | vars:
5 | ose_tag: v3.9.15
6 | images:
7 | - ose-ansible
8 | - ose-cluster-capacity
9 | - ose-deployer
10 | - ose-docker-builder
11 | - ose-docker-registry
12 | - ose-haproxy-router
13 | - ose-pod
14 | - ose
15 | - node
16 | registry_prefix: registry.access.redhat.com/openshift3/
17 | tasks:
18 | - docker_image:
19 | name: "{{ registry_prefix }}{{ item }}"
20 | tag: "{{ ose_tag }}"
21 | with_items: "{{ images }}"
22 | ...
23 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-37-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ###
3 | ### Stub version of oVirt.vm-infra.yaml that uses a different vars file for testing
4 | ###
5 | - name: oVirt 37 infra
6 | hosts: localhost
7 | connection: local
8 | gather_facts: false
9 |
10 | vars_files:
11 | - vars/ovirt-37-vars.yaml
12 |
13 | pre_tasks:
14 | - name: Log in to oVirt
15 | ovirt_auth:
16 | url: "{{ engine_url }}"
17 | username: "{{ engine_user }}"
18 | password: "{{ engine_password }}"
19 | ca_file: "{{ engine_cafile | default(omit) }}"
20 | insecure: "{{ engine_insecure | default(true) }}"
21 | tags:
22 | - always
23 |
24 | roles:
25 | - oVirt.image-template
26 | - oVirt.vm-infra
27 |
28 | post_tasks:
29 | - name: Logout from oVirt
30 | ovirt_auth:
31 | state: absent
32 | ovirt_auth: "{{ ovirt_auth }}"
33 | tags:
34 | - always
35 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-39-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ###
3 | ### Stub version of oVirt.vm-infra.yaml that uses a different vars file for testing
4 | ###
5 | - name: oVirt 39 infra
6 | hosts: localhost
7 | connection: local
8 | gather_facts: false
9 |
10 | vars_files:
11 | - vars/ovirt-39-vars.yaml
12 |
13 | pre_tasks:
14 | - name: Log in to oVirt
15 | ovirt_auth:
16 | url: "{{ engine_url }}"
17 | username: "{{ engine_user }}"
18 | password: "{{ engine_password }}"
19 | ca_file: "{{ engine_cafile | default(omit) }}"
20 | insecure: "{{ engine_insecure | default(true) }}"
21 | tags:
22 | - always
23 |
24 | roles:
25 | - oVirt.image-template
26 | - oVirt.vm-infra
27 |
28 | post_tasks:
29 | - name: Logout from oVirt
30 | ovirt_auth:
31 | state: absent
32 | ovirt_auth: "{{ ovirt_auth }}"
33 | tags:
34 | - always
35 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-atomic-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ###
3 | ### Stub version of oVirt.vm-infra.yaml that uses a different vars file for testing
4 | ###
5 | - name: oVirt atomic infra
6 | hosts: localhost
7 | connection: local
8 | gather_facts: false
9 |
10 | vars_files:
11 | - vars/ovirt-atomic-vars.yaml
12 |
13 | pre_tasks:
14 | - name: Log in to oVirt
15 | ovirt_auth:
16 | url: "{{ engine_url }}"
17 | username: "{{ engine_user }}"
18 | password: "{{ engine_password }}"
19 | ca_file: "{{ engine_cafile | default(omit) }}"
20 | insecure: "{{ engine_insecure | default(true) }}"
21 | tags:
22 | - always
23 |
24 | roles:
25 | - oVirt.image-template
26 | - oVirt.vm-infra
27 |
28 | post_tasks:
29 | - name: Logout from oVirt
30 | ovirt_auth:
31 | state: absent
32 | ovirt_auth: "{{ ovirt_auth }}"
33 | tags:
34 | - always
35 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-cen39-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ###
3 | ### Stub version of oVirt.vm-infra.yaml that uses a different vars file for testing
4 | ###
5 | - name: oVirt centos infra
6 | hosts: localhost
7 | connection: local
8 | gather_facts: false
9 |
10 | vars_files:
11 | - vars/ovirt-cen39-vars.yaml
12 |
13 | pre_tasks:
14 | - name: Log in to oVirt
15 | ovirt_auth:
16 | url: "{{ engine_url }}"
17 | username: "{{ engine_user }}"
18 | password: "{{ engine_password }}"
19 | ca_file: "{{ engine_cafile | default(omit) }}"
20 | insecure: "{{ engine_insecure | default(true) }}"
21 | tags:
22 | - always
23 |
24 | roles:
25 | - oVirt.image-template
26 | - oVirt.vm-infra
27 |
28 | post_tasks:
29 | - name: Logout from oVirt
30 | ovirt_auth:
31 | state: absent
32 | ovirt_auth: "{{ ovirt_auth }}"
33 | tags:
34 | - always
35 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-centos-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ###
3 | ### Stub version of oVirt.vm-infra.yaml that uses a different vars file for testing
4 | ###
5 | - name: oVirt centos infra
6 | hosts: localhost
7 | connection: local
8 | gather_facts: false
9 |
10 | vars_files:
11 | - vars/ovirt-centos-vars.yaml
12 |
13 | pre_tasks:
14 | - name: Log in to oVirt
15 | ovirt_auth:
16 | url: "{{ engine_url }}"
17 | username: "{{ engine_user }}"
18 | password: "{{ engine_password }}"
19 | ca_file: "{{ engine_cafile | default(omit) }}"
20 | insecure: "{{ engine_insecure | default(true) }}"
21 | tags:
22 | - always
23 |
24 | roles:
25 | - oVirt.image-template
26 | - oVirt.vm-infra
27 |
28 | post_tasks:
29 | - name: Logout from oVirt
30 | ovirt_auth:
31 | state: absent
32 | ovirt_auth: "{{ ovirt_auth }}"
33 | tags:
34 | - always
35 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-image-only.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: oVirt infra
3 | hosts: localhost
4 | connection: local
5 | gather_facts: false
6 |
7 | vars_files:
8 | - ../playbooks/vars/ovirt-infra-vars.yaml
9 |
10 | pre_tasks:
11 | - name: Log in to oVirt
12 | ovirt_auth:
13 | url: "{{ engine_url }}"
14 | username: "{{ engine_user }}"
15 | password: "{{ engine_password }}"
16 | ca_file: "{{ engine_cafile | default(omit) }}"
17 | insecure: "{{ engine_insecure | default(true) }}"
18 | tags:
19 | - always
20 |
21 | roles:
22 | - oVirt.image-template
23 |
24 | post_tasks:
25 | - name: Logout from oVirt
26 | ovirt_auth:
27 | state: absent
28 | ovirt_auth: "{{ ovirt_auth }}"
29 | tags:
30 | - always
31 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/ovirt-vm-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: oVirt infra
3 | hosts: localhost
4 | connection: local
5 | gather_facts: false
6 |
7 | vars_files:
8 | - vars/ovirt-infra-vars.yaml
9 |
10 | pre_tasks:
11 | - name: Log in to oVirt
12 | ovirt_auth:
13 | url: "{{ engine_url }}"
14 | username: "{{ engine_user }}"
15 | password: "{{ engine_password }}"
16 | ca_file: "{{ engine_cafile | default(omit) }}"
17 | insecure: "{{ engine_insecure | default(true) }}"
18 | tags:
19 | - always
20 |
21 | roles:
22 | - oVirt.image-template
23 | - oVirt.vm-infra
24 |
25 | post_tasks:
26 | - name: Logout from oVirt
27 | ovirt_auth:
28 | state: absent
29 | ovirt_auth: "{{ ovirt_auth }}"
30 | tags:
31 | - always
32 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/test-docker-storage.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | roles:
5 | - instance-groups
6 | tags:
7 | - always
8 |
9 | - hosts: nodes
10 | roles:
11 | - role: docker-storage-setup
12 | docker_dev: '/dev/vdb'
13 | tags:
14 | - pre
15 | - storage
16 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/example/test-instance-groups.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | roles:
5 | - instance-groups
6 |
7 | - hosts: localhost
8 | tasks:
9 | - debug:
10 | var: 'groups.keys()'
11 | - debug:
12 | var: "groups['masters']"
13 | - debug:
14 | var: "groups['single_master']"
15 | - debug:
16 | var: "groups['schedulable_nodes']"
17 | - debug:
18 | var: "groups['etcd']"
19 | - debug:
20 | var: "hostvars[item]"
21 | with_items:
22 | - "{{groups['masters']}}"
23 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/playbooks/ovirt-vm-infra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: oVirt infra
3 | hosts: localhost
4 | connection: local
5 | gather_facts: false
6 |
7 | vars_files:
8 | - ../ovirt-infra-vars.yaml
9 |
10 | pre_tasks:
11 | - name: Log in to oVirt
12 | ovirt_auth:
13 | url: "{{ engine_url }}"
14 | username: "{{ engine_user }}"
15 | password: "{{ engine_password }}"
16 | ca_file: "{{ engine_cafile | default(omit) }}"
17 | insecure: "{{ engine_insecure | default(true) }}"
18 | tags:
19 | - always
20 |
21 | roles:
22 | - oVirt.image-template
23 | - oVirt.vm-infra
24 |
25 | post_tasks:
26 | - name: Logout from oVirt
27 | ovirt_auth:
28 | state: absent
29 | ovirt_auth: "{{ ovirt_auth }}"
30 | tags:
31 | - always
32 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/requirements.txt:
--------------------------------------------------------------------------------
1 | python>=2.7
2 | ovirt-engine-sdk-python>=4.0.0
3 |
--------------------------------------------------------------------------------
/reference-architecture/rhv-ansible/vault.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ## RHV Engine credentials
3 | vault_engine_url: https://engine.example.com/ovirt-engine/api
4 | vault_engine_user: admin@internal
5 | vault_engine_password:
6 | vault_root_ssh_key:
7 | vault_rhsub_user:
8 | vault_rhsub_password:
9 | vault_rhsub_pool:
10 | vault_rhsub_server:
11 | ...
12 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | #strategy_plugins = /root/mitogen-master/ansible_mitogen/plugins/strategy
3 | #strategy = mitogen
4 | forks = 50
5 | host_key_checking = False
6 | inventory = inventory/vsphere/vms/inventory39
7 | inventory = inventory/inventory39
8 | gathering = smart
9 | roles_path = /usr/share/ansible/openshift-ansible/roles:/opt/ansible/roles:./roles:../../roles
10 | remote_user = root
11 | private_key_file=ssh_key/ocp-installer
12 | retry_files_enabled=False
13 | log_path=./ansible.log
14 |
15 | [ssh_connection]
16 | ssh_args = -C -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no -o PreferredAuthentications=publickey
17 | control_path = /var/run/%%h-%%r
18 | pipelining = True
19 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/images/OCP-on-VMware-Architecture.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/openshift/openshift-ansible-contrib/cd17fa3c5b8cab87b2403bde3a560eadcdcd0955/reference-architecture/vmware-ansible/images/OCP-on-VMware-Architecture.jpg
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/add-node-prerequisite.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: new_nodes
3 | gather_facts: yes
4 | become: yes
5 | vars_files:
6 | - vars/main.yaml
7 | roles:
8 | - rhsm
9 |
10 | - hosts: new_nodes
11 | gather_facts: no
12 | become: yes
13 | vars_files:
14 | - vars/main.yaml
15 | roles:
16 | - prerequisites
17 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/add-node.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: no
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | - create-vm-add-prod-ose
10 |
11 | - hosts: new_nodes
12 | gather_facts: yes
13 | become: no
14 | vars_files:
15 | - vars/main.yaml
16 | roles:
17 | - instance-groups
18 | - rhsm
19 | - vmware-guest-setup
20 | - cloud-provider-setup
21 | - docker-storage-setup
22 | - openshift-volume-quota
23 | - include: add-node-prerequisite.yaml
24 |
25 | - include: node-setup.yaml
26 |
27 | - hosts: loadbalancer, master, infra
28 | gather_facts: yes
29 | become: no
30 | vars_files:
31 | - vars/main.yaml
32 | roles:
33 | - haproxy-server-config
34 |
35 | - hosts: single_master
36 | gather_facts: yes
37 | become: yes
38 | vars_files:
39 | - vars/main.yaml
40 | roles:
41 | - ../../../roles/router-scaleup
42 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/clean.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: nodes
3 | ignore_errors: yes
4 | roles:
5 | - rhsm-unregister
6 |
7 | - hosts: localhost
8 | user: root
9 | become: false
10 | ignore_errors: yes
11 | tasks:
12 | - name: Delete all added VMs
13 | vmware_guest:
14 | hostname: "{{ openshift_cloudprovider_vsphere_host }}"
15 | username: "{{ openshift_cloudprovider_vsphere_username }}"
16 | password: "{{ openshift_cloudprovider_vsphere_password }}"
17 | validate_certs: False
18 | name: "{{ hostvars[item].inventory_hostname }}"
19 | datacenter: "{{ openshift_cloudprovider_vsphere_datacenter }}"
20 | folder: "{{ openshift_cloudprovider_vsphere_datacenter }}/vm/{{ openshift_cloudprovider_vsphere_folder }}"
21 | state: absent
22 | force: true
23 | with_items:
24 | - "{{ groups['nodes'] }}"
25 | - "{{ groups['storage'] }}"
26 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/cns-node-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: storage
3 | gather_facts: yes
4 | become: no
5 | roles:
6 | - rhsm
7 | - vmware-guest-setup
8 | - cloud-provider-setup
9 | - docker-storage-setup
10 | - openshift-volume-quota
11 | - gluster-ports
12 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/cns-storage.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: prod-ose-cns.yaml
3 | tags: ['vms']
4 |
5 | - include: cns-node-setup.yaml
6 | tags: [ 'node-setup']
7 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/crs-node-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: crs
3 | gather_facts: yes
4 | become: no
5 | vars_files:
6 | - vars/main.yaml
7 | roles:
8 | - instance-groups
9 | - rhsm-subscription
10 | - gluster-rhsm-repos
11 | - vmware-guest-setup
12 | - docker-storage-setup
13 | - openshift-volume-quota
14 | - gluster-crs-prerequisites
15 | - gluster-ports
16 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/crs-storage.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: prod-ose-crs.yaml
3 | tags: ['vms']
4 |
5 | - include: crs-node-setup.yaml
6 | tags: [ 'node-setup' ]
7 |
8 | - include: heketi-setup.yaml
9 | tags: [ 'heketi-setup']
10 |
11 | - include: heketi-ocp.yaml
12 | tags: ['heketi-ocp']
13 |
14 | - include: cleanup-crs.yaml
15 | tags: ['clean']
16 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/haproxy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - create-vm-haproxy
5 |
6 | - name: Deploy ha proxy server
7 | hosts: lb
8 | roles:
9 | - rhsm
10 | - vmware-guest-setup
11 | ignore_errors: yes
12 |
13 | - name: Configure ha proxy server
14 | hosts: lb
15 | roles:
16 | - haproxy-server
17 |
18 | - name: Configure ha proxy server files
19 | hosts: lb, masters, infras
20 | roles:
21 | - haproxy-server-config
22 |
23 | - name: Configure ha proxy server files
24 | hosts: lb
25 | roles:
26 | - keepalived_haproxy
27 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/heketi-ocp.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: master
3 | gather_facts: yes
4 | vars_files:
5 | - vars/main.yaml
6 | roles:
7 | - instance-groups
8 |
9 | - hosts: single_master
10 | gather_facts: yes
11 | vars_files:
12 | - vars/main.yaml
13 | roles:
14 | - instance-groups
15 | - heketi-ocp
16 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/heketi-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: crs
3 | gather_facts: yes
4 | vars_files:
5 | - vars/main.yaml
6 | roles:
7 | - instance-groups
8 |
9 | - hosts: single_crs
10 | gather_facts: yes
11 | vars_files:
12 | - vars/main.yaml
13 | roles:
14 | - instance-groups
15 | - rhsm-subscription
16 | - gluster-rhsm-repos
17 | - heketi-install
18 | - heketi-configure
19 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/infrastructure.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: setup.yaml
3 | tags: ['setup']
4 |
5 | - include: nfs.yaml
6 | tags: ['nfs']
7 |
8 | - include: prod.yaml
9 | tags: ['prod']
10 |
11 | - include: haproxy.yaml
12 | tags: ['haproxy']
13 |
14 | - include: ocp-install.yaml
15 | tags: ['ocp-install']
16 |
17 | - include: ocp-configure.yaml
18 | tags: ['ocp-configure']
19 |
20 | - include: ocp-demo.yaml
21 | tags: ['ocp-demo']
22 |
23 | - include: ocp-upgrade.yaml
24 | tags: ['ocp-upgrade']
25 |
26 | - include: clean.yaml
27 | tags: ['clean']
28 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/library/rpm_q.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/lib_utils/library/rpm_q.py
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/nfs.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | roles:
4 | - create-vm-nfs
5 |
6 | - name: Deploy NFS server
7 | hosts: nfs
8 | gather_facts: true
9 | roles:
10 | - rhsm
11 | - vmware-guest-setup
12 | - nfs-server
13 | ignore_errors: yes
14 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/ocp-configure.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | vars_files:
5 | - vars/main.yaml
6 | roles:
7 | # Group systems
8 | - instance-groups
9 |
10 | - hosts: single_master
11 | gather_facts: yes
12 | vars_files:
13 | - vars/main.yaml
14 | roles:
15 | - instance-groups
16 | - storage-class-configure
17 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/ocp-demo.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | ignore_errors: yes
5 | vars_files:
6 | - vars/main.yaml
7 | pre_tasks:
8 | - name: set fact
9 | set_fact:
10 | openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_public_hostname }}"
11 | - name: set fact
12 | set_fact:
13 | openshift_master_cluster_hostname: "{{ openshift_master_cluster_hostname }}"
14 | roles:
15 | - instance-groups
16 |
17 | - name: Perform post validation steps
18 | include: ../../../playbooks/post-validation.yaml
19 | vars:
20 | validate_etcd_short_hostname: true
21 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/ocp-upgrade.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: yes
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | # Group systems
10 | - instance-groups
11 |
12 | - include: minor-update.yaml
13 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/ocp39.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: run pre-reqs
3 | import_playbook: /root/git/openshift-ansible-contrib-stretch/reference-architecture/vmware-ansible/openshift-ansible/playbooks/prerequisites.yml
4 |
5 | - name: call openshift includes for installer
6 | import_playbook: /root/git/openshift-ansible-contrib-stretch/reference-architecture/vmware-ansible/openshift-ansible/playbooks/deploy_cluster.yml
7 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/openshift-validate.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: yes
4 | vars_files:
5 | - vars/main.yaml
6 | pre_tasks:
7 | - name: set fact
8 | set_fact:
9 | openshift_master_cluster_public_hostname: "{{ openshift_master_cluster_public_hostname }}"
10 | - name: set fact
11 | set_fact:
12 | openshift_master_cluster_hostname: "{{ openshift_master_cluster_hostname }}"
13 | roles:
14 | # Group systems
15 | - instance-groups
16 |
17 | - include: ../../../playbooks/post-validation.yaml
18 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/prerequisite.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: cluster_hosts
3 | gather_facts: yes
4 | become: yes
5 | vars_files:
6 | - vars/main.yaml
7 | roles:
8 | - instance-groups
9 | - rhsm
10 |
11 | - hosts: cluster_hosts
12 | gather_facts: no
13 | vars_files:
14 | - vars/main.yaml
15 | become: yes
16 | roles:
17 | - prerequisites
18 |
19 | - hosts: master
20 | gather_facts: yes
21 | vars_files:
22 | - vars/main.yaml
23 | become: yes
24 | roles:
25 | - master-prerequisites
26 | - etcd-storage
27 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/prod-ose-cns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: yes
5 | become: no
6 | roles:
7 | # Group systems
8 | - create-vm-cns-prod-ose
9 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/prod-ose-crs.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | gather_facts: yes
5 | become: no
6 | vars_files:
7 | - vars/main.yaml
8 | roles:
9 | # Group systems
10 | - create-vm-crs-prod-ose
11 | - instance-groups
12 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/prod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create VMs from inventory file
3 | hosts: localhost
4 | roles:
5 | - create-vm-prod-ose
6 |
7 | - name: Prepare VMs for OSE3 install
8 | hosts: nodes
9 | roles:
10 | - rhsm
11 | - vmware-guest-setup
12 | - cloud-provider-setup
13 | - docker-storage-setup
14 | - openshift-volume-quota
15 | ignore_errors: yes
16 |
17 | - hosts: masters
18 | roles:
19 | - master-prerequisites
20 | - etcd-storage
21 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/cloud-provider-setup/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create /etc/origin/cloudprovider
3 | file:
4 | state: directory
5 | path: "{{ vsphere_conf_dir }}"
6 |
7 | - name: create the vsphere.conf file
8 | template:
9 | src: "{{ role_path }}/templates/vsphere.conf.j2"
10 | dest: /etc/origin/cloudprovider/vsphere.conf
11 | owner: root
12 | group: root
13 | mode: 0644
14 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/cloud-provider-setup/vars/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | vsphere_conf_dir: /etc/origin/cloudprovider
3 | vsphere_conf: "{{vsphere_conf_dir }}/vsphere.conf"
4 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/docker-storage-setup/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_dev: "/dev/sdb"
3 | docker_vg: "docker-vol"
4 | docker_data_size: "95%VG"
5 | docker_dm_basesize: "3G"
6 | container_root_lv_name: "dockerlv"
7 | container_root_lv_mount_path: "/var/lib/docker"
8 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2:
--------------------------------------------------------------------------------
1 | DEVS="{{ docker_dev }}"
2 | VG="{{ docker_vg }}"
3 | DATA_SIZE="{{ docker_data_size }}"
4 | EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
5 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2:
--------------------------------------------------------------------------------
1 | DEVS="{{ docker_dev }}"
2 | VG="{{ docker_vg }}"
3 | DATA_SIZE="{{ docker_data_size }}"
4 | STORAGE_DRIVER=overlay2
5 | CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
6 | CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
7 | CONTAINER_ROOT_LV_SIZE=100%FREE
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/etcd-storage/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create openshift volume group
3 | lvg: vg=etcd_vg pvs=/dev/sdd
4 |
5 | - name: Create lvm volumes
6 | lvol: vg=etcd_vg lv=etcd_lv size=95%FREE state=present shrink=no
7 |
8 | - name: Create local partition on lvm lv
9 | filesystem:
10 | fstype: xfs
11 | dev: /dev/etcd_vg/etcd_lv
12 |
13 | - name: Make mounts owned by nfsnobody
14 | file: path=/var/lib/etcd state=directory mode=0755
15 |
16 | - name: Mount the partition
17 | mount:
18 | name: /var/lib/etcd
19 | src: /dev/etcd_vg/etcd_lv
20 | fstype: xfs
21 | state: present
22 |
23 | - name: Remount new partition
24 | command: "mount -a"
25 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/haproxy-server-config/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | haproxy_socket: /var/lib/haproxy/stats
3 | haproxy_chroot: /var/lib/haproxy
4 | haproxy_user: haproxy
5 | haproxy_group: haproxy
6 |
7 | # Frontend settings.
8 | haproxy_frontend_name: 'hafrontend'
9 | haproxy_frontend_bind_address: '*'
10 | haproxy_frontend_port: 80
11 | haproxy_frontend_mode: 'http'
12 |
13 | # Backend settings.
14 | haproxy_backend_name: 'habackend'
15 | haproxy_backend_mode: 'http'
16 | haproxy_backend_balance_method: 'roundrobin'
17 | haproxy_backend_httpchk: 'HEAD / HTTP/1.1\r\nHost:localhost'
18 |
19 | # List of backend servers.
20 | haproxy_backend_servers: []
21 | # - name: app1
22 | # address: 192.168.0.1:80
23 | # - name: app2
24 | # address: 192.168.0.2:80
25 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/haproxy-server-config/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart haproxy
3 | service: name=haproxy state=restarted
4 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/haproxy-server-config/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - stat: path=/etc/haproxy/haproxy.cfg
3 | register: haproxy_cfg
4 |
5 | - name: Copy HAProxy configuration in place.
6 | template:
7 | src: haproxy.cfg.j2
8 | dest: /etc/haproxy/haproxy.cfg
9 | mode: 0644
10 | validate: haproxy -f %s -c -q
11 | notify: restart haproxy
12 | when: haproxy_cfg.stat.exists == True
13 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/haproxy-server/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | haproxy_socket: /var/lib/haproxy/stats
3 | haproxy_chroot: /var/lib/haproxy
4 | haproxy_user: haproxy
5 | haproxy_group: haproxy
6 |
7 | # Frontend settings.
8 | haproxy_frontend_name: 'hafrontend'
9 | haproxy_frontend_bind_address: '*'
10 | haproxy_frontend_port: 80
11 | haproxy_frontend_mode: 'http'
12 |
13 | # Backend settings.
14 | haproxy_backend_name: 'habackend'
15 | haproxy_backend_mode: 'http'
16 | haproxy_backend_balance_method: 'roundrobin'
17 | haproxy_backend_httpchk: 'HEAD / HTTP/1.1\r\nHost:localhost'
18 |
19 | # List of backend servers.
20 | haproxy_backend_servers: []
21 | # - name: app1
22 | # address: 192.168.0.1:80
23 | # - name: app2
24 | # address: 192.168.0.2:80
25 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/haproxy-server/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart haproxy
3 | service: name=haproxy state=restarted
4 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/haproxy-server/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure HAProxy is installed.
3 | yum: name=haproxy state=installed
4 |
5 | - name: Get HAProxy version.
6 | command: haproxy -v
7 | register: haproxy_version_result
8 | changed_when: false
9 | always_run: yes
10 |
11 | - name: open firewall for Openshift services
12 | command: iptables -I INPUT -p tcp --dport {{item}} -j ACCEPT
13 | with_items:
14 | - 8443
15 | - 443
16 | - 80
17 |
18 | - name: Save the iptables rules
19 | command: iptables-save
20 |
21 | - name: Ensure Firewalld is disabled
22 | service: name=firewalld state=stopped enabled=no
23 |
24 | - name: Ensure HAProxy is started and enabled on boot.
25 | service: name=haproxy state=started enabled=yes
26 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-configure/templates/heketi-secret.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: heketi-secret
5 | namespace: default
6 | data:
7 | key: "{{ heketi_secret }}"
8 | type: kubernetes.io/glusterfs
9 |
10 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-configure/templates/storage-crs.json.j2:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1beta1
2 | kind: StorageClass
3 | metadata:
4 | name: crs-gluster
5 | provisioner: kubernetes.io/glusterfs
6 | parameters:
7 | resturl: "http://{{ansible_default_ipv4.address }}:8080"
8 | restauthenabled: "true"
9 | restuser: "admin"
10 | secretNamespace: "default"
11 | secretName: "heketi-secret"
12 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-install/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart heketi
3 | service: name=heketi state=restarted
4 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-ocp-clean/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Switch to default project
3 | command: oc project default
4 |
5 | - name: Check to see if heketi secret is already created
6 | command: "oc get secrets"
7 | register: oc_secrets
8 |
9 | - name: Check to see if storage class is already created
10 | command: "oc get storageclass"
11 | register: storage_class
12 |
13 | - name: Remove storage class from OCP
14 | command: "oc delete storageclass crs-gluster"
15 | when: "'crs-gluster' in storage_class.stdout"
16 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-ocp-clean/templates/heketi-secret.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: heketi-secret
5 | namespace: default
6 | data:
7 | key: "{{ heketi_secret }}"
8 | type: kubernetes.io/glusterfs
9 |
10 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-ocp-clean/templates/storage-crs.json.j2:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1beta1
2 | kind: StorageClass
3 | metadata:
4 | name: crs-gluster
5 | provisioner: kubernetes.io/glusterfs
6 | parameters:
7 | resturl: "http://{{ansible_default_ipv4.address }}:8080"
8 | restauthenabled: "true"
9 | restuser: "admin"
10 | secretNamespace: "default"
11 | secretName: "heketi-secret"
12 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-ocp/templates/heketi-secret.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: heketi-secret
5 | namespace: default
6 | data:
7 | key: "{{ heketi_secret }}"
8 | type: kubernetes.io/glusterfs
9 |
10 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/heketi-ocp/templates/storage-crs.json.j2:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1beta1
2 | kind: StorageClass
3 | metadata:
4 | name: crs-gluster
5 | provisioner: kubernetes.io/glusterfs
6 | parameters:
7 | resturl: "http://{{ansible_default_ipv4.address }}:8080"
8 | restauthenabled: "true"
9 | restuser: "admin"
10 | secretNamespace: "default"
11 | secretName: "heketi-secret"
12 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/keepalived_haproxy/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | keepalived_priority_start: 100
3 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/keepalived_haproxy/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart keepalived
3 | service: name=keepalived state=restarted
4 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/keepalived_haproxy/templates/firewall.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | {% for host in groups['haproxy_group'] %}
3 | iptables -A INPUT -s {{ hostvars[host].ansible_default_ipv4 }} -j ACCEPT
4 | {% endfor %}
5 |
6 | iptables-save > /etc/sysconfig/iptables
7 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/nfs-server/files/etc-sysconfig-nfs:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | #
3 | #LOCKDARG=
4 | LOCKD_TCPPORT=32803
5 | LOCKD_UDPPORT=32769
6 | #
7 | RPCNFSDARGS=""
8 | #RPCNFSDCOUNT=16
9 | #NFSD_V4_GRACE=90
10 | #NFSD_V4_LEASE=90
11 | #
12 | RPCMOUNTDOPTS=""
13 | MOUNTD_PORT=892
14 | #
15 | STATDARG=""
16 | STATD_PORT=662
17 | #STATD_OUTGOING_PORT=2020
18 | #STATD_HA_CALLOUT="/usr/local/bin/foo"
19 | #
20 | SMNOTIFYARGS=""
21 | RPCIDMAPDARGS=""
22 | RPCGSSDARGS=""
23 | GSS_USE_PROXY="yes"
24 | RPCSVCGSSDARGS=""
25 | BLKMAPDARGS=""
26 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/nfs-server/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart nfs
3 | service: name=nfs state=restarted
4 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/vmware-guest-setup/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart chronyd
3 | service: name=chronyd state=restarted
4 |
5 | - name: restart networking
6 | service: name=networking state=restarted
7 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2:
--------------------------------------------------------------------------------
1 | # This file is managed by Ansible
2 |
3 | server 0.rhel.pool.ntp.org
4 | server 1.rhel.pool.ntp.org
5 | server 2.rhel.pool.ntp.org
6 | server 3.rhel.pool.ntp.org
7 |
8 | driftfile /var/lib/chrony/drift
9 | makestep 10 3
10 |
11 | keyfile /etc/chrony.keys
12 | commandkey 1
13 | generatecommandkey
14 |
15 | noclientlog
16 | logchange 0.5
17 |
18 | logdir /var/log/chrony
19 | log measurements statistics tracking
20 |
--------------------------------------------------------------------------------
/reference-architecture/vmware-ansible/playbooks/roles/vmware-guest-setup/vars/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | locale: en_US.UTF-8
3 | timezone: UTC
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible>=2.2
2 |
--------------------------------------------------------------------------------
/roles/common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | openshift_cluster_node_labels:
3 | app:
4 | region: primary
5 | infra:
6 | region: infra
7 |
--------------------------------------------------------------------------------
/roles/deploy-host-nonpriv/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # RHV and VSPHERE need ssh keys
3 | - block:
4 | - name: Search for SSH key
5 | stat:
6 | path: ~/.ssh/id_rsa
7 | register: ssh_key
8 |
9 | - name: Create SSH key if it is missing
10 | command: "ssh-keygen -N '' -f ~/.ssh/id_rsa"
11 | when: not ssh_key.stat.exists
12 | when: "'rhv' in provider or 'vsphere' in provider"
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/dns-records/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | use_bastion: False
3 |
--------------------------------------------------------------------------------
/roles/dns-server-detect/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | external_nsupdate_keys: {}
4 |
--------------------------------------------------------------------------------
/roles/dns-views/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | external_nsupdate_keys: {}
3 | named_private_recursion: 'yes'
4 | named_public_recursion: 'no'
5 |
--------------------------------------------------------------------------------
/roles/docker-storage-setup/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | docker_dev: "/dev/sdb"
3 | docker_vg: "docker-vol"
4 | docker_data_size: "95%VG"
5 | docker_dm_basesize: "3G"
6 | container_root_lv_name: "dockerlv"
7 | container_root_lv_mount_path: "/var/lib/docker"
8 | container_root_lv_size: "100%FREE"
9 |
--------------------------------------------------------------------------------
/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2:
--------------------------------------------------------------------------------
1 | DEVS="{{ docker_dev }}"
2 | VG="{{ docker_vg }}"
3 | DATA_SIZE="{{ docker_data_size }}"
4 | EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
5 |
--------------------------------------------------------------------------------
/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2:
--------------------------------------------------------------------------------
1 | DEVS="{{ docker_dev }}"
2 | VG="{{ docker_vg }}"
3 | DATA_SIZE="{{ docker_data_size }}"
4 | STORAGE_DRIVER=overlay2
5 | CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
6 | CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
7 | CONTAINER_ROOT_LV_SIZE="{{ container_root_lv_size }}"
8 |
--------------------------------------------------------------------------------
/roles/docker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | default_docker_storage_block_device: "/dev/vdb"
3 | default_docker_storage_volume_group: "docker_vg"
4 |
--------------------------------------------------------------------------------
/roles/docker/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: enable docker
3 | service:
4 | name: docker
5 | enabled: yes
6 |
7 | - name: restart docker
8 | service:
9 | name: docker
10 | enabled: yes
11 | state: restarted
12 |
--------------------------------------------------------------------------------
/roles/docker/templates/docker-storage-setup.j2:
--------------------------------------------------------------------------------
1 | DEVS={{ docker_storage_block_device }}
2 | VG={{ docker_storage_volume_group }}
3 |
--------------------------------------------------------------------------------
/roles/git-server/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | git_repo_home: "/opt/git"
3 | git_user: "git"
4 |
5 | # List of Authorized Keys to Add
6 | git_user_authorized_keys:
7 |
--------------------------------------------------------------------------------
/roles/git-server/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart httpd
3 | service:
4 | name: httpd
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/git-server/templates/git.conf.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | SetEnv GIT_PROJECT_ROOT {{git_repo_home}}
4 | SetEnv GIT_HTTP_EXPORT_ALL
5 | SetEnv REMOTE_USER=$REDIRECT_REMOTE_USER
6 | AliasMatch ^/git/(.*/objects/[0-9a-f]{2}/[0-9a-f]{38})$ {{git_repo_home}}/$1
7 | AliasMatch ^/git/(.*/objects/pack/pack-[0-9a-f]{40}.(pack|idx))$ {{git_repo_home}}/$1
8 | ScriptAliasMatch \
9 | "(?x)^/git/(.*/(HEAD | \
10 | info/refs | \
11 | objects/info/[^/]+ | \
12 | git-(upload|receive)-pack))$" \
13 | /usr/libexec/git-core/git-http-backend/$1
14 |
15 |
16 | Options +ExecCGI +FollowSymLinks
17 | Require all granted
18 |
19 |
20 |
21 |
22 | Options FollowSymLinks
23 | AllowOverride None
24 |
25 |
26 |
--------------------------------------------------------------------------------
/roles/gluster-crs-prerequisites/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | gluster_crs_required_packages: ['redhat-storage-server', 'heketi-client', 'iptables-services', 'iptables']
3 |
--------------------------------------------------------------------------------
/roles/gluster-crs-prerequisites/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Clear yum cache
3 | command: "yum clean all"
4 | ignore_errors: true
5 |
6 | - name: Install the required rpms
7 | package:
8 | name: "{{ item }}"
9 | state: latest
10 | with_items: "{{ gluster_crs_required_packages }}"
11 |
12 | - name: Stop firewalld
13 | service:
14 | name: firewalld
15 | state: stopped
16 | enabled: no
17 |
18 | - name: Start Glusterd and iptables
19 | service:
20 | name: "{{ item }}"
21 | state: started
22 | enabled: true
23 | with_items:
24 | - iptables
25 | - glusterd
26 |
--------------------------------------------------------------------------------
/roles/gluster-ports/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | gluster_ports: ['24007', '24008', '2222', '49152:49664']
3 | crs_ports: ['8080']
4 |
--------------------------------------------------------------------------------
/roles/gluster-ports/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart iptables
3 | service: name=iptables state=restarted
4 |
--------------------------------------------------------------------------------
/roles/gluster-rhsm-repos/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | gluster_repos: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms', 'rh-gluster-3-for-rhel-7-server-rpms']
3 |
--------------------------------------------------------------------------------
/roles/hostnames/test/inv:
--------------------------------------------------------------------------------
1 | [all:vars]
2 | dns_domain=example.com
3 |
4 | [openshift_masters]
5 | 192.168.124.41 dns_private_ip=1.1.1.41 dns_public_ip=192.168.124.41
6 | 192.168.124.117 dns_private_ip=1.1.1.117 dns_public_ip=192.168.124.117
7 |
8 | [openshift_nodes]
9 | 192.168.124.40 dns_private_ip=1.1.1.40 dns_public_ip=192.168.124.40
10 |
11 | #[dns]
12 | #192.168.124.117 dns_private_ip=1.1.1.117
13 |
--------------------------------------------------------------------------------
/roles/hostnames/test/roles:
--------------------------------------------------------------------------------
1 | ../../../roles/
--------------------------------------------------------------------------------
/roles/hostnames/test/test.retry:
--------------------------------------------------------------------------------
1 | 192.168.124.117
2 | 192.168.124.40
3 | 192.168.124.41
4 |
--------------------------------------------------------------------------------
/roles/hostnames/test/test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | roles:
4 | - role: hostnames
5 |
--------------------------------------------------------------------------------
/roles/hostnames/vars/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | counter: 1
3 |
--------------------------------------------------------------------------------
/roles/hostnames/vars/records.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Building Records"
3 | set_fact:
4 | dns_records_add:
5 | - view: private
6 | zone: example.com
7 | entries:
8 | - type: A
9 | hostname: master1.example.com
10 | ip: 172.16.15.94
11 | - type: A
12 | hostname: node1.example.com
13 | ip: 172.16.15.86
14 | - type: A
15 | hostname: node2.example.com
16 | ip: 172.16.15.87
17 | - view: public
18 | zone: example.com
19 | entries:
20 | - type: A
21 | hostname: master1.example.com
22 | ip: 10.3.10.116
23 | - type: A
24 | hostname: node1.example.com
25 | ip: 10.3.11.46
26 | - type: A
27 | hostname: node2.example.com
28 | ip: 10.3.12.6
29 |
--------------------------------------------------------------------------------
/roles/master-prerequisites/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install git
3 | package:
4 | name: git
5 | state: latest
6 |
--------------------------------------------------------------------------------
/roles/node-network-manager/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install NetworkManager
3 | package:
4 | name: NetworkManager
5 | state: present
6 |
7 | - name: configure NetworkManager
8 | lineinfile:
9 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
10 | regexp: '^{{ item }}='
11 | line: '{{ item }}=yes'
12 | state: present
13 | create: yes
14 | with_items:
15 | - 'USE_PEERDNS'
16 | - 'NM_CONTROLLED'
17 |
18 | - name: enable and start NetworkManager
19 | service:
20 | name: NetworkManager
21 | state: restarted
22 | enabled: yes
23 |
--------------------------------------------------------------------------------
/roles/openshift-emptydir-quota/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | node_local_quota_per_fsgroup: 512Mi
3 |
--------------------------------------------------------------------------------
/roles/openshift-emptydir-quota/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart openshift-node
3 | service:
4 | name: "{{ openshift.common.service_type }}-node"
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/openshift-emptydir-quota/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Modify the node configuration
3 | replace:
4 | dest: /etc/origin/node/node-config.yaml
5 | regexp: '^(.*)perFSGroup: (\s+.*)?$'
6 | replace: '\1 perFSGroup: {{ node_local_quota_per_fsgroup }}\2'
7 | backup: yes
8 | notify:
9 | - restart openshift-node
10 |
--------------------------------------------------------------------------------
/roles/openshift-prep/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Defines either to install required packages and update all
3 | manage_packages: true
4 | install_debug_packages: false
5 | required_packages:
6 | - wget
7 | - git
8 | - net-tools
9 | - bind-utils
10 | - bridge-utils
11 | debug_packages:
12 | - bash-completion
13 | - vim-enhanced
14 |
--------------------------------------------------------------------------------
/roles/openshift-prep/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Starting Point for OpenShift Installation and Configuration
3 | - include: prerequisites.yml
4 | tags: [prerequisites]
5 |
--------------------------------------------------------------------------------
/roles/openshift-pv-cleanup/README.md:
--------------------------------------------------------------------------------
1 | # openshift-pv-cleanup
2 |
3 | The purpose of this role is to clean up persistent volumes in a cluster before decommisioning a cluster.
4 |
5 | ## Usage
6 |
7 | ```
8 | ansible-playbook -i inventory roles/openshift-pv-cleanup/test/main.yml
9 | ```
10 |
--------------------------------------------------------------------------------
/roles/openshift-pv-cleanup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Get all projects
4 | command: >
5 | oc get projects -o jsonpath='{ .items[*].metadata.name }'
6 | register: projects
7 |
8 | - name: Delete all persistent volume claims in cluster
9 | command: >
10 | oc delete pvc --all -n {{ item }}
11 | with_items: "{{ projects.stdout.split(' ') }}"
12 |
13 | - name: Delete all persistent volumes in cluster
14 | command: >
15 | oc delete pvc --all
16 |
--------------------------------------------------------------------------------
/roles/openshift-pv-cleanup/test/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - hosts: seed-hosts
4 | roles:
5 | - openshift-pv-cleanup
6 |
--------------------------------------------------------------------------------
/roles/openshift-pv-cleanup/test/roles:
--------------------------------------------------------------------------------
1 | ../../
--------------------------------------------------------------------------------
/roles/openshift-volume-quota/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | local_volumes_device: "/dev/sdc"
3 | local_volumes_fstype: "xfs"
4 | local_volumes_fsopts: "gquota"
5 | local_volumes_path: "/var/lib/origin/openshift.local.volumes"
6 |
--------------------------------------------------------------------------------
/roles/openshift-volume-quota/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create filesystem for /var/lib/origin/openshift.local.volumes
3 | filesystem:
4 | fstype: "{{ local_volumes_fstype }}"
5 | dev: "{{ local_volumes_device }}"
6 |
7 | - name: Create local volumes directory
8 | file:
9 | path: "{{ local_volumes_path }}"
10 | state: directory
11 | recurse: yes
12 |
13 | - name: Create fstab entry
14 | mount:
15 | name: "{{ local_volumes_path }}"
16 | src: "{{ local_volumes_device }}"
17 | fstype: "{{ local_volumes_fstype }}"
18 | opts: "{{ local_volumes_fsopts }}"
19 | state: present
20 |
21 | - name: Mount fstab entry
22 | mount:
23 | name: "{{ local_volumes_path }}"
24 | src: "{{ local_volumes_device }}"
25 | fstype: "{{ local_volumes_fstype }}"
26 | opts: "{{ local_volumes_fsopts }}"
27 | state: mounted
28 |
--------------------------------------------------------------------------------
/roles/openstack-create-cinder-registry/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - os_volume:
3 | display_name: "{{ cinder_hosted_registry_name }}"
4 | size: "{{ cinder_hosted_registry_size_gb }}"
5 | register: cinder_registry_volume
6 |
--------------------------------------------------------------------------------
/roles/openstack-stack/README.md:
--------------------------------------------------------------------------------
1 | # Role openstack-stack
2 |
3 | Role for spinning up instances using OpenStack Heat.
4 |
5 | ## To Test
6 |
7 | ```
8 | ansible-playbook openshift-ansible-contrib/roles/openstack-stack/test/stack-create-test.yml
9 | ```
10 |
--------------------------------------------------------------------------------
/roles/openstack-stack/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | stack_state: 'present'
4 |
5 | ssh_ingress_cidr: 0.0.0.0/0
6 | node_ingress_cidr: 0.0.0.0/0
7 | master_ingress_cidr: 0.0.0.0/0
8 | lb_ingress_cidr: 0.0.0.0/0
9 | bastion_ingress_cidr: 0.0.0.0/0
10 | num_etcd: 0
11 | num_masters: 1
12 | num_nodes: 1
13 | num_dns: 1
14 | num_infra: 1
15 | nodes_to_remove: []
16 | etcd_volume_size: 2
17 | dns_volume_size: 1
18 | lb_volume_size: 5
19 | use_bastion: False
20 | ui_ssh_tunnel: False
21 | provider_network: False
22 |
--------------------------------------------------------------------------------
/roles/openstack-stack/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - role: common
4 |
--------------------------------------------------------------------------------
/roles/openstack-stack/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: cleanup temp files
4 | file:
5 | path: "{{ stack_template_pre.path }}"
6 | state: absent
7 |
--------------------------------------------------------------------------------
/roles/openstack-stack/tasks/generate-templates.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create HOT stack template prefix
3 | register: stack_template_pre
4 | tempfile:
5 | state: directory
6 | prefix: openshift-ansible
7 |
8 | - name: set template paths
9 | set_fact:
10 | stack_template_path: "{{ stack_template_pre.path }}/stack.yaml"
11 | user_data_template_path: "{{ stack_template_pre.path }}/user-data"
12 |
13 | - name: generate HOT stack template from jinja2 template
14 | template:
15 | src: heat_stack.yaml.j2
16 | dest: "{{ stack_template_path }}"
17 |
18 | - name: generate HOT server template from jinja2 template
19 | template:
20 | src: heat_stack_server.yaml.j2
21 | dest: "{{ stack_template_pre.path }}/server.yaml"
22 |
23 | - name: generate user_data from jinja2 template
24 | template:
25 | src: user_data.j2
26 | dest: "{{ user_data_template_path }}"
27 |
--------------------------------------------------------------------------------
/roles/openstack-stack/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Generate the templates
4 | include: generate-templates.yml
5 | when:
6 | - stack_state == 'present'
7 |
8 | - name: Handle the Stack (create/delete)
9 | ignore_errors: False
10 | register: stack_create
11 | os_stack:
12 | name: "{{ stack_name }}"
13 | state: "{{ stack_state }}"
14 | template: "{{ stack_template_path | default(omit) }}"
15 | wait: yes
16 |
17 | # NOTE(bogdando) OS::Neutron::Subnet doesn't support live updates for
18 | # dns_nameservers, so we can't do that for the "create stack" task.
19 | - include: subnet_update_dns_servers.yaml
20 | when:
21 | - private_dns_server is defined
22 | - stack_state == 'present'
23 |
24 | - name: CleanUp
25 | include: cleanup.yml
26 | when:
27 | - stack_state == 'present'
28 |
--------------------------------------------------------------------------------
/roles/openstack-stack/tasks/subnet_update_dns_servers.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Live update the subnet's DNS servers
3 | os_subnet:
4 | name: openshift-ansible-{{ stack_name }}-subnet
5 | network_name: openshift-ansible-{{ stack_name }}-net
6 | state: present
7 | use_default_subnetpool: yes
8 | dns_nameservers: "{{ [private_dns_server|default(public_dns_nameservers[0])]|union(public_dns_nameservers)|unique }}"
9 | when: not provider_network
10 |
--------------------------------------------------------------------------------
/roles/openstack-stack/templates/user_data.j2:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | disable_root: true
3 |
4 | system_info:
5 | default_user:
6 | name: openshift
7 | sudo: ["ALL=(ALL) NOPASSWD: ALL"]
8 |
9 | write_files:
10 | - path: /etc/sudoers.d/00-openshift-no-requiretty
11 | permissions: 440
12 | content: |
13 | Defaults:openshift !requiretty
14 |
--------------------------------------------------------------------------------
/roles/openstack-stack/test/roles:
--------------------------------------------------------------------------------
1 | ../../../roles/
--------------------------------------------------------------------------------
/roles/openstack-stack/test/stack-create-test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: True
4 | become: False
5 | roles:
6 | - role: openstack-stack
7 | stack_name: test-stack
8 | dns_domain: "{{ public_dns_domain }}"
9 | dns_nameservers: "{{ public_dns_nameservers }}"
10 | subnet_prefix: "{{ openstack_subnet_prefix }}"
11 | ssh_public_key: "{{ openstack_ssh_public_key }}"
12 | openstack_image: "{{ openstack_default_image_name }}"
13 | etcd_flavor: "{{ openstack_default_flavor }}"
14 | master_flavor: "{{ openstack_default_flavor }}"
15 | node_flavor: "{{ openstack_default_flavor }}"
16 | infra_flavor: "{{ openstack_default_flavor }}"
17 | dns_flavor: "{{ openstack_default_flavor }}"
18 | external_network: "{{ openstack_external_network_name }}"
19 |
--------------------------------------------------------------------------------
/roles/prerequisites/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | openshift_required_packages: ['iptables', 'iptables-services', 'NetworkManager', 'docker']
3 |
--------------------------------------------------------------------------------
/roles/prerequisites/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/roles/registry-scaleup/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/roles/registry-scaleup/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather facts
3 | openshift_facts:
4 | role: common
5 |
6 | - name: use the default project
7 | shell: "{{ openshift.common.client_binary }} project default"
8 |
9 | - name: Count the infrastructure nodes
10 | shell: "{{ openshift.common.client_binary }} get nodes --show-labels | grep role=infra -c"
11 | register: nodes
12 | when: node_type == "infra"
13 |
14 | - name: Scale the registry
15 | shell: "{{ openshift.common.client_binary }} scale dc/docker-registry --replicas={{ nodes.stdout }}"
16 | when: node_type == "infra"
17 |
--------------------------------------------------------------------------------
/roles/rhsm-repos/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # repos from https://docs.openshift.com/container-platform/3.9/install_config/install/host_preparation.html#host-registration
3 | openshift_required_repos:
4 | - 'rhel-7-server-rpms'
5 | - 'rhel-7-server-extras-rpms'
6 | - 'rhel-7-server-ose-3.9-rpms'
7 | - 'rhel-7-fast-datapath-rpms'
8 | - 'rhel-7-server-ansible-2.4-rpms'
9 |
--------------------------------------------------------------------------------
/roles/rhsm-subscription/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | rhsm_server: subscription.rhn.redhat.com
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/rhsm-timeout/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/roles/rhsm-timeout/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather facts
3 | openshift_facts:
4 | role: common
5 |
6 | - name: Allow rhsm a longer timeout to help out with subscription-manager
7 | lineinfile:
8 | dest: /etc/rhsm/rhsm.conf
9 | line: 'server_timeout=600'
10 | insertafter: '^proxy_password ='
11 | when: ansible_distribution == "RedHat"
12 |
--------------------------------------------------------------------------------
/roles/rhsm/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | openshift_required_repos: ['rhel-7-server-rpms', 'rhel-7-server-extras-rpms', 'rhel-7-server-ose-3.9-rpms', 'rhel-7-fast-datapath-rpms']
3 |
--------------------------------------------------------------------------------
/roles/router-scaleup/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/roles/router-scaleup/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather facts
3 | openshift_facts:
4 | role: common
5 |
6 | - name: use the default project
7 | shell: "{{ openshift.common.client_binary }} project default"
8 |
9 | - name: Count the infrastructure nodes
10 | shell: "{{ openshift.common.client_binary }} get nodes --show-labels | grep role=infra -c"
11 | register: nodes
12 | when: node_type == "infra"
13 |
14 | - name: Scale the router
15 | shell: "{{ openshift.common.client_binary }} scale dc/router --replicas={{ nodes.stdout }}"
16 | when: node_type == "infra"
17 |
--------------------------------------------------------------------------------
/roles/seed-git-server/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | openshift_git_repo_home: "{{ git_repo_home }}/openshift"
3 | openshift_example_repos:
4 | - "https://github.com/openshift/cakephp-ex.git"
5 | - "https://github.com/openshift/dancer-ex.git"
6 | - "https://github.com/jboss-openshift/openshift-quickstarts.git"
7 | - "https://github.com/openshift/django-ex.git"
8 | - "https://github.com/openshift/nodejs-ex.git"
9 | - "https://github.com/openshift/rails-ex.git"
10 |
--------------------------------------------------------------------------------
/roles/seed-git-server/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - role: git-server
4 |
--------------------------------------------------------------------------------
/roles/seed-git-server/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create OpenShift Git Repository Content Home
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | owner: "{{ git_user }}"
7 | group: "{{ git_user }}"
8 | with_items:
9 | - "{{ git_repo_home }}"
10 | - "{{ openshift_git_repo_home }}"
11 |
12 | - name: Clone OpenShift Examples
13 | git:
14 | repo: "{{ item }}"
15 | bare: yes
16 | dest: "{{ openshift_git_repo_home }}/{{ item | basename }}"
17 | with_items:
18 | - "{{ openshift_example_repos }}"
19 | become: yes
20 | become_user: "{{ git_user }}"
21 |
--------------------------------------------------------------------------------
/roles/static_inventory/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - role: common
4 |
--------------------------------------------------------------------------------
/roles/static_inventory/tasks/checkpoint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check for static inventory dir
3 | stat:
4 | path: "{{ inventory_path }}"
5 | register: stat_inventory_path
6 |
7 | - name: create static inventory dir
8 | file:
9 | path: "{{ inventory_path }}"
10 | state: directory
11 | mode: 0750
12 | when: not stat_inventory_path.stat.exists
13 |
14 | - name: create inventory from template
15 | template:
16 | src: inventory.j2
17 | dest: "{{ inventory_path }}/hosts"
18 |
--------------------------------------------------------------------------------
/roles/static_inventory/tasks/filter_out_new_app_nodes.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add all new app nodes to new_app_nodes
3 | when:
4 | - 'oc_old_app_nodes is defined'
5 | - 'oc_old_app_nodes | list'
6 | - 'node.name not in oc_old_app_nodes'
7 | - 'node["metadata"]["sub-host-type"] == "app"'
8 | register: result
9 | set_fact:
10 | new_app_nodes: '{{ new_app_nodes }} + [ {{ node }} ]'
11 |
12 | - name: If the node was added to new_nodes, remove it from registered nodes
13 | set_fact:
14 | registered_nodes: '{{ registered_nodes | difference([ node ]) }}'
15 | when: 'not result | skipped'
16 |
--------------------------------------------------------------------------------
/roles/static_inventory/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - debug:
3 | msg: >
4 | Static inventory is DEPRACATED and not supported any more!
5 | Use https://github.com/openshift/openshift-ansible/blob/master/playbooks/openstack/sample-inventory/inventory.py
6 | instead.
7 |
8 | - name: Remove any existing inventory
9 | file:
10 | path: "{{ inventory_path }}/hosts"
11 | state: absent
12 |
13 | - name: Refresh the inventory
14 | meta: refresh_inventory
15 |
16 | - name: Generate in-memory inventory
17 | include: openstack.yml
18 |
19 | - name: Checkpoint in-memory data into a static inventory
20 | include: checkpoint.yml
21 |
22 | - name: Generate SSH config for accessing hosts via bastion
23 | include: sshconfig.yml
24 | when: use_bastion|bool
25 |
26 | - name: Configure SSH tunneling to access UI
27 | include: sshtun.yml
28 | become: true
29 | when:
30 | - use_bastion|bool
31 | - ui_ssh_tunnel|bool
32 |
--------------------------------------------------------------------------------
/roles/static_inventory/tasks/sshconfig.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - debug:
3 | msg: >
4 | Static SSH config and bastion is DEPRACATED and not supported any more!
5 | Use https://github.com/openshift/openshift-ansible/blob/master/playbooks/openstack/sample-inventory/inventory.py
6 | instead.
7 |
8 | - name: set ssh proxy command prefix for accessing nodes via bastion
9 | set_fact:
10 | ssh_proxy_command: >-
11 | ssh {{ ssh_options }}
12 | -i {{ private_ssh_key }}
13 | {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }}
14 |
15 | - name: regenerate ssh config
16 | template:
17 | src: openstack_ssh_config.j2
18 | dest: "{{ ssh_config_path }}"
19 | mode: 0644
20 |
--------------------------------------------------------------------------------
/roles/static_inventory/tasks/sshtun.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create ssh tunnel systemd service
3 | template:
4 | src: ssh-tunnel.service.j2
5 | dest: /etc/systemd/system/ssh-tunnel.service
6 | mode: 0644
7 |
8 | - name: reload the systemctl daemon after file update
9 | command: systemctl daemon-reload
10 |
11 | - name: Enable ssh tunnel service
12 | service:
13 | name: ssh-tunnel
14 | enabled: true
15 | state: restarted
16 |
--------------------------------------------------------------------------------
/roles/static_inventory/templates/openstack_ssh_config.j2:
--------------------------------------------------------------------------------
1 | Host *
2 | IdentitiesOnly yes
3 |
4 | Host bastion
5 | Hostname {{ hostvars['bastion'].ansible_host }}
6 | IdentityFile {{ hostvars['bastion'].ansible_private_key_file }}
7 | User {{ ssh_user }}
8 | StrictHostKeyChecking no
9 | UserKnownHostsFile=/dev/null
10 |
11 | {% for host in groups['all'] | difference(groups['bastions'][0]) %}
12 |
13 | Host {{ host }}
14 | Hostname {{ hostvars[host].ansible_host }}
15 | ProxyCommand {{ ssh_proxy_command }} -W {{ hostvars[host].private_v4 }}:22
16 | IdentityFile {{ hostvars[host].ansible_private_key_file }}
17 | User {{ ssh_user }}
18 | StrictHostKeyChecking no
19 | UserKnownHostsFile=/dev/null
20 |
21 | {% endfor %}
22 |
--------------------------------------------------------------------------------
/roles/static_inventory/templates/ssh-tunnel.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Set up ssh tunneling for OpenShift cluster UI
3 | After=network.target
4 |
5 | [Service]
6 | ExecStart=/usr/bin/ssh -NT -o \
7 | ServerAliveInterval=60 -o \
8 | UserKnownHostsFile=/dev/null -o \
9 | StrictHostKeyChecking=no -o \
10 | ExitOnForwardFailure=no -i \
11 | {{ private_ssh_key }} {{ ssh_user }}@{{ hostvars['bastion'].ansible_host }} \
12 | -L 0.0.0.0:{{ ui_port }}:{{ target_ip }}:{{ ui_port }}
13 |
14 |
15 | # Restart every >2 seconds to avoid StartLimitInterval failure
16 | RestartSec=5
17 | Restart=always
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/roles/update-instances/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/roles/update-instances/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather facts
3 | openshift_facts:
4 | role: common
5 |
6 | - block:
7 | - name: Clear yum cache
8 | command: "yum clean all"
9 | ignore_errors: true
10 |
11 | - name: Update rpms
12 | package:
13 | name: "*"
14 | state: latest
15 |
16 | when: not openshift.common.is_atomic | bool
17 |
--------------------------------------------------------------------------------
/roles/validate-app/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | project: validate
3 | app: cakephp-mysql-example
4 |
--------------------------------------------------------------------------------
/roles/validate-app/library/openshift_facts.py:
--------------------------------------------------------------------------------
1 | /usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py
--------------------------------------------------------------------------------
/roles/validate-etcd/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | validate_etcd_short_hostname: false
3 |
--------------------------------------------------------------------------------
/roles/validate-etcd/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check to see if etcd client is installed
3 | stat:
4 | path: /usr/bin/etcdctl
5 | register: etcd_result
6 |
7 | - name: Validate etcd
8 | command: "etcdctl -C https://{{ ansible_hostname if validate_etcd_short_hostname else ansible_fqdn }}:2379 --ca-file=/etc/origin/master/master.etcd-ca.crt --cert-file=/etc/origin/master/master.etcd-client.crt --key-file=/etc/origin/master/master.etcd-client.key cluster-health | grep 'cluster is'"
9 | register: etcd_health
10 | when: etcd_result.stat.exists
11 |
12 | - name: ETCD Cluster is healthy
13 | debug:
14 | msg: "Cluster is healthy"
15 | when: etcd_result.stat.exists and etcd_health.stdout.find('cluster is healthy') != -1
16 |
17 | - name: ETCD Cluster is NOT healthy
18 | debug:
19 | msg: "Cluster is NOT healthy"
20 | when: etcd_result.stat.exists and etcd_health.stdout.find('cluster is healthy') == -1
21 |
--------------------------------------------------------------------------------
/roles/validate-masters/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate the public address
3 | uri:
4 | url: "https://{{ hostvars['localhost']['openshift_master_cluster_public_hostname'] }}:{{ hostvars['localhost']['console_port'] }}/healthz/ready"
5 | validate_certs: False
6 | status_code: 200
7 | method: GET
8 | - name: Validate the internal address
9 | uri:
10 | url: "https://{{ hostvars['localhost']['openshift_master_cluster_hostname'] }}:{{ hostvars['localhost']['console_port'] }}/healthz/ready"
11 | validate_certs: False
12 | status_code: 200
13 | method: GET
14 | - name: Validate the master address
15 | uri:
16 | url: "https://{{ inventory_hostname }}:{{ hostvars['localhost']['console_port'] }}/healthz/ready"
17 | validate_certs: False
18 | status_code: 200
19 | method: GET
20 |
--------------------------------------------------------------------------------
/roles/validate-public/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate the public address
3 | uri:
4 | url: "https://{{ hostvars['localhost']['openshift_master_cluster_public_hostname'] }}:{{ hostvars['localhost']['console_port'] }}/healthz/ready"
5 | validate_certs: False
6 | status_code: 200
7 | method: GET
8 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | # This flag says that the code is written to work on both Python 2 and Python
3 | # 3. If at all possible, it is good practice to do this. If you cannot, you
4 | # will need to generate wheels for each Python version that you support.
5 | universal=1
6 |
7 | [yamllint]
8 | excludes=.tox,files
9 |
10 | [flake8]
11 | exclude=.tox/*,inventory/*,reference-architecture/*,misc/gce-federation/library/*,misc/gce-federation/inventory/*
12 | max_line_length = 120
13 | ignore = E501,T003
14 |
15 | [molecule_tests]
16 | excludes=.tox,.eggs,.molecule
17 |
--------------------------------------------------------------------------------
/test-requirements.txt:
--------------------------------------------------------------------------------
1 | flake8
2 | flake8-mutable
3 | flake8-print==3.0.1
4 | PyYAML
5 | yamllint
6 | molecule[lint]
7 | docker-py==1.10.6
8 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | minversion=2.3.1
3 | envlist =
4 | py27-ansible22-molecule
5 | py{27,35}-ansible22-{flake8,yamllint}
6 | skipsdist=True
7 | skip_missing_interpreters=True
8 |
9 | [testenv]
10 | deps =
11 | -rtest-requirements.txt
12 | py35-flake8: flake8-bugbear
13 | ansible22: ansible~=2.2
14 |
15 | commands =
16 | flake8: flake8
17 | yamllint: python setup.py yamllint
18 | molecule: python setup.py molecule_tests
19 |
--------------------------------------------------------------------------------
/vagrant/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | host_key_checking = no
3 | retry_files_enabled = False
4 |
5 | [ssh_connection]
6 | ssh_args = -o ControlMaster=auto -o ControlPersist=600s
7 | pipelining = True
8 |
--------------------------------------------------------------------------------
/vagrant/provision/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Capture network variable:
4 | NETWORK_BASE=$1
5 | sudo -i
6 |
7 | echo "Check eth1 ip config for netbase: $NETWORK_BASE"
8 |
9 | if ip a show dev eth1 | grep -q "inet $NETWORK_BASE"; then
10 | echo "eth1 ip detected"
11 | else
12 | echo "eth1 missing ip; restaring interface"
13 | ifdown eth1 && ifup eth1
14 | fi
15 |
--------------------------------------------------------------------------------
/vagrant/roles/rhsm-repos:
--------------------------------------------------------------------------------
1 | ../../roles/rhsm-repos/
--------------------------------------------------------------------------------
/vagrant/roles/rhsm-subscription:
--------------------------------------------------------------------------------
1 | ../../roles/rhsm-subscription/
--------------------------------------------------------------------------------
/vagrant/tasks/install_bootstrap_enterprise.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - package:
3 | name: atomic-openshift-utils
4 | state: present
5 |
--------------------------------------------------------------------------------
/vagrant/tasks/install_bootstrap_origin.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - package:
3 | name: git
4 | state: present
5 | - git:
6 | repo: https://github.com/openshift/openshift-ansible
7 | dest: ~/openshift-ansible
8 | force: yes
9 | update: yes
10 | become: yes
11 | become_user: vagrant
12 | - package:
13 | name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
14 | state: present
15 | - replace:
16 | dest: /etc/yum.repos.d/epel.repo
17 | regexp: '^enabled=1'
18 | replace: 'enabled=0'
19 | - yum:
20 | name: "{{ item }}"
21 | enablerepo: epel
22 | state: present
23 | when: "{{ ansible_distribution != 'Fedora' }}"
24 | with_items:
25 | - ansible
26 | - pyOpenSSL
27 | - dnf:
28 | name: "{{ item }}"
29 | enablerepo: epel
30 | state: present
31 | when: "{{ ansible_distribution == 'Fedora' }}"
32 | with_items:
33 | - ansible
34 | - pyOpenSSL
35 |
--------------------------------------------------------------------------------