├── LICENSE ├── README.md ├── images ├── nested-kubernetes.png ├── non-nested-kubernetes.png ├── ocp-3.11.png ├── standalone-kubernetes.png ├── standalone-openshift-3.7.png └── standalone-openshift-3.9.png └── install ├── kubernetes ├── Nested-contrail-create-workflow.md ├── docker-install.md ├── install-kubernetes.md ├── nested-kubernetes.md ├── nodeport-kube-proxy-setup.md ├── non-nested-kubernetes.md ├── standalone-kubernete-ansible.md ├── standalone-kubernetes-centos.md ├── standalone-kubernetes-ubuntu.md └── templates │ ├── contrail-single-step-cni-install-centos.yaml │ └── contrail-single-step-cni-install-ubuntu.yaml └── openshift ├── 3.11 ├── nested-mode-openshift.md ├── redhat │ └── configurations.md └── standalone-openshift.md ├── 3.7 └── standalone-openshift.md ├── 3.9 ├── centos │ └── configurations.md ├── nested-mode-openshift.md ├── redhat │ └── configurations.md └── standalone-openshift.md ├── README.md └── legacy_files └── 3.7 └── install-files ├── all-in-one ├── contrail-installer.yaml ├── iptables-master ├── iptables-node ├── ose-install ├── ose-install-ha └── ose-prerequisites.yml ├── ose-install └── ose-prerequisites.yml /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Contrail + ( Kubernetes / Openshift ) 2 | 3 | Kubernetes (K8s) is an open source container management platform. It provides a portable platform across public and private clouds. K8s supports deployment, scaling and auto-healing of applications. More details can be found at: http://kubernetes.io/docs/whatisk8s/. 4 | 5 | There is a need to provide pod addressing, network isolation, policy based security, gateway, SNAT, loadalancer and service chaining capability in Kubernetes orchestratation. To this end K8s supports a framework for most of the basic network connectivity. This pluggable framework is called Container Network Interface (CNI). Opencontrail will support CNI for Kubernetes. 6 | 7 | Currently K8s provides a flat networking model wherein all pods can talk to each other. Opencontrail will add additional networking functionality to the solution - multi-tenancy, network isolation, micro-segmentation with network policies, load-balancing etc. 8 | 9 | ![Contrail Solution](images/standalone-kubernetes.png) 10 | 11 | # Installation: Quick and Simple 12 | 13 | ***Disclaimer: 14 | The one-step install is for those who are waiting with bated breath to get their hands on a Contrail 15 | Kubernetes cluster. This is meant as a quickstart install and is not as fequently validated to work 16 | as the [standard mode](https://github.com/Juniper/contrail-kubernetes-docs/blob/master/README.md#installation-standard-and-recommended) of install. Though functionally identical to standard install, we dont recommend 17 | this for anything other than quick tryouts.*** 18 | 19 | ### [1-Step Standalone Kubernetes - Ubuntu](install/kubernetes/standalone-kubernetes-ubuntu.md) 20 | ### [1-Step Standalone Kubernetes - Centos](/install/kubernetes/standalone-kubernetes-centos.md) 21 | 22 | # Installation: Standard and Recommended 23 | 24 | ## Deployment Modes - Kubernetes 25 | 26 | Contrail provides more than one way of providing networking to a K8s cluster. 27 | 28 | ### [Standalone Kubernetes](install/kubernetes/standalone-kubernete-ansible.md) 29 | ### [Nested Kubernetes](install/kubernetes/nested-kubernetes.md) 30 | ### [Non-Nested Kubernetes](install/kubernetes/non-nested-kubernetes.md) 31 | 32 | ## Deployment Modes - Openshift 33 | 34 | ### [Standalone Openshift 3.11](install/openshift/3.11/standalone-openshift.md) 35 | ### [Nested Openshift 3.11](install/openshift/3.11/nested-mode-openshift.md) 36 | ### [Standalone Openshift 3.9](install/openshift/3.9/standalone-openshift.md) 37 | ### [Nested Openshift 3.9](install/openshift/3.9/nested-mode-openshift.md) 38 | ### [Standalone Openshift 3.7](install/openshift/3.7/standalone-openshift.md) 39 | -------------------------------------------------------------------------------- /images/nested-kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/images/nested-kubernetes.png -------------------------------------------------------------------------------- /images/non-nested-kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/images/non-nested-kubernetes.png -------------------------------------------------------------------------------- /images/ocp-3.11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/images/ocp-3.11.png -------------------------------------------------------------------------------- /images/standalone-kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/images/standalone-kubernetes.png -------------------------------------------------------------------------------- /images/standalone-openshift-3.7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/images/standalone-openshift-3.7.png -------------------------------------------------------------------------------- /images/standalone-openshift-3.9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/images/standalone-openshift-3.9.png -------------------------------------------------------------------------------- /install/kubernetes/Nested-contrail-create-workflow.md: -------------------------------------------------------------------------------- 1 | 1. Upload an image to Openstack. 2 | 3 | On the Openstack master node: 4 | 5 | a. Download the image 6 | ``` 7 | wget http://10.87.129.3/pxe/Standard/dpdkvm/vin/x-traffic1.qcow2 8 | ``` 9 | b. Source openstack admin credentials 10 | ``` 11 | source /etc/kolla/kolla-toolbox/admin-openrc.sh 12 | ``` 13 | c. Upload the image using glance 14 | ``` 15 | glance image-create --name contrail-u16.04 --visibility=public --disk-format qcow2 --container-format bare --file x-traffic1.qcow2 16 | ``` 17 | 18 | 2. Create a compute flavor to use on Openstack GUI. 19 | 20 | a. On the Openstack UI: ADMIN -> System -> Flavors 21 | 22 | b. Create Flavor 23 | 24 | Sample large flavor: VCPU: 4, RAM: 4096 MB, Root Disk: 75 GB 25 | 26 | 3. Create a Host Aggregate and Availability Zone to use on Openstack GUI. 27 | 28 | a. On the Openstack UI: ADMIN -> System -> Host Aggregates 29 | 30 | b. Create Host Aggregate. 31 | 32 | Remember to add compute Hosts to the Host Aggregate. 33 | 34 | 4. Import Key Pairs if need be. (Will be required for many scenarios) 35 | 36 | a. On the Openstack UI: Project -> Compute -> Key Pairs 37 | 38 | b. Import Key Pair. 39 | 40 | There are multiple ways to do this. 41 | 42 | . Import your mac's keypair, so you can access the VM directly. This works only if your VM can be reached directly from your mac. 43 | 44 | . If your VM does not have external connectivity and you usually ssh from computes using meta IP, 45 | one workflow could be to create to generate ssh-keypairs (i.e ssh-keygen) on all your computes and import 46 | their public keys together as a single key pair in Openstack. 47 | 48 | 5. Create a Virtual Network of your choice in Contrail GUI. 49 | 50 | a. On Contrail UI: Configure -> Networking -> Networks 51 | 52 | b. Create a Network. 53 | 54 | c. Enable SNAT on the Network 55 | 56 | i. Network -> Edit -> Advanced Options -> Select SNAT 57 | ii. Save 58 | 59 | 6. Create VM instances. 60 | 61 | a. On the Openstack UI: Project -> Compute -> instances -> Launch Instance 62 | 63 | b. Fill out the following: 64 | ``` 65 | . name 66 | . Availability Zone (created earlier) 67 | . Number of desired instances. 68 | . Image 69 | . Flavor 70 | . Network to use. 71 | . Key pair to use (created earlier) 72 | ``` 73 | 74 | 7. Login to the Virtual Machines. 75 | 76 | a. Determine the compute host on which the VM is running, by going here. 77 | 78 | Project -> Compute -> Instances -> < Instance Name > 79 | 80 | b. Figure out the meta IP address for the instance. 81 | 82 | i. On the Contrail GUI goto: Monitor -> Infrastructure -> Virtual Routers -> Host name 83 | ii. In the instance column, look for the name of the Virtual Machine. 84 | iii. Expand the instance entry to figure out the meta_ip_addr. 85 | iv. From the compute host, ssh to the meta_ip_addr 86 | 87 | 8. Create a K8s on the Virtual Machines. 88 | 89 | There are couple of options here: 90 | 91 | a. [1-step install](https://github.com/Juniper/contrail-kubernetes-docs/blob/master/install/kubernetes/standalone-kubernetes-ubuntu.md) 92 | 93 | b. [Ansible playbook](https://github.com/Juniper/contrail-kubernetes-docs/blob/master/install/kubernetes/standalone-kubernete-ansible.md) 94 | 95 | In this case you need to run asible from a node or another virtual machine that can reach the k8s virtual machines. 96 | 97 | 98 | 9. [Install Nested Contrail cluster](https://github.com/Juniper/contrail-kubernetes-docs/blob/master/install/kubernetes/nested-kubernetes.md) 99 | 100 | 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /install/kubernetes/docker-install.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/8c261de2abb7e5a1c935d4cb3e56963031c3b448/install/kubernetes/docker-install.md -------------------------------------------------------------------------------- /install/kubernetes/install-kubernetes.md: -------------------------------------------------------------------------------- 1 | 2 | # Disclaimer 3 | 4 | 5 | ***This guide is meant to be a helpful reference to install Kubernetes. It is NOT intended to be an authoritative guide for Kubernetes install. It is merely steps that we use everyday and happens to work for us.*** 6 | 7 | Please refer to canonical documentation from Kubernetes community [here](https://kubernetes.io/docs/setup/) 8 | 9 | # Installing Kuberntes on Ubuntu-16.04 hosts 10 | 11 | ## Installing Kubernetes on Master node 12 | 13 | 1. Prepare the node by running the following pre-requisites 14 | ``` 15 | swapoff -a 16 | sudo apt-get update 17 | sudo apt-get install -y curl software-properties-common apt-transport-https 18 | ``` 19 | 20 | 2. Install Docker 21 | ``` 22 | sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 23 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 24 | sudo apt-get update 25 | sudo apt-get install -y docker-ce # Install docker 26 | sudo service docker status # Verify that docker service is running 27 | ``` 28 | 29 | 3. Add Kubernetes repo 30 | ``` 31 | sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 32 | sudo echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list 33 | sudo apt-get update 34 | ``` 35 | 4. Install Kubernetes components 36 | ``` 37 | sudo apt-get install -y kubectl kubelet kubeadm 38 | ``` 39 | 5. Config kubeadm if NodePort service is needed.(OPTIONAL) 40 | 41 | [Kube-proxy config](install/nodeport-kube-proxy-setup.md) 42 | 43 | 6. Create K8s cluster 44 | 45 | If kubeadm was configured for nodeport (via step 5): 46 | ``` 47 | kubeadm init --config config.yaml 48 | ``` 49 | else 50 | ``` 51 | kubeadm init 52 | ``` 53 | 7. Once "kubeadm init" completes, save the "join" command that will be printed on the shell, to a file of your choice. This will be needed to add new nodes to your cluster. 54 | 55 | ``` 56 | example: 57 | "kubeadm join 192.168.1.3:6443 --token 0smq4g.7pmg2jqc8arl1uz7 --discovery-token-ca-cert-hash sha256:d92ac0785b1435666d726f4bc54fde58693f87cf91371d9fd553da4a40813650" 58 | ``` 59 | 8. Run the following commands to initially kubernetes command line 60 | ``` 61 | mkdir -p $HOME/.kube 62 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 63 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 64 | ``` 65 | 66 | 9. Disable firewalld 67 | ``` 68 | sysctl -w net.bridge.bridge-nf-call-iptables=1 69 | systemctl stop firewalld; systemctl disable firewalld 70 | ``` 71 | 72 | 10. Disable networking on Master if desired. (OPTIONAL) 73 | 74 | Disabling networking on the master will result not workloads not being scheduled on the master node. 75 | 76 | For kubeadm version >= 1.11 77 | 78 | ``` 79 | a. sudo vi /var/lib/kubelet/kubeadm-flags.env 80 | 81 | b. Remove the following 3 arguments from KUBELET_KUBEADM_ARGS variable: 82 | --cni-bin-dir=/opt/cni/bin 83 | --cni-conf-dir=/etc/cni/net.d 84 | --network-plugin=cni 85 | 86 | c. Restart kubelet Service 87 | 88 | systemctl enable kubelet && systemctl start kubelet 89 | ``` 90 | 91 | For kubeadm version < 1.11 92 | 93 | ``` 94 | a. sudo vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 95 | 96 | b. Comment out 97 | 98 | "#Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 99 | sudo systemctl daemon-reload;sudo service kubelet restart 100 | 101 | c. Restart kubelet Service 102 | 103 | systemctl enable kubelet && systemctl start kubelet 104 | ``` 105 | 106 | ## Installing Kubernetes on Compute (k8s slave/minion) nodes 107 | 108 | 1. Prepare the node by running the following pre-requisites 109 | ``` 110 | swapoff -a 111 | sudo apt-get update 112 | sudo apt-get install -y curl software-properties-common apt-transport-https 113 | ``` 114 | 115 | 2. Install Docker 116 | ``` 117 | sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 118 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 119 | sudo apt-get update 120 | sudo apt-get install -y docker-ce # Install docker 121 | sudo service docker status # Verify that docker service is running 122 | ``` 123 | 124 | 3. Add Kubernetes repo 125 | ``` 126 | sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 127 | sudo echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list 128 | sudo apt-get update 129 | ``` 130 | 4. Install Kubernetes components 131 | ``` 132 | sudo apt-get install -y kubectl kubelet kubeadm 133 | ``` 134 | 5. Disable firewalld 135 | ``` 136 | sysctl -w net.bridge.bridge-nf-call-iptables=1 137 | systemctl stop firewalld; systemctl disable firewalld 138 | ``` 139 | 140 | 6. Join the Master node 141 | 142 | Copy paste the "join" command you saved from Step-7 of instructions for installation of K8s on master. 143 | ``` 144 | "kubeadm join 192.168.1.3:6443 --token 0smq4g.7pmg2jqc8arl1uz7 --discovery-token-ca-cert-hash sha256:d92ac0785b1435666d726f4bc54fde58693f87cf91371d9fd553da4a40813650" 145 | ``` 146 | 147 | # Installing Kuberntes on Centos hosts 148 | 149 | ## Installing Kubernetes on Master node 150 | 151 | **(Centos 7.4 - 3.10.0-862.3.2 kernel)** 152 | 153 | 1. Prepare the node by running the following pre-requisites 154 | ``` 155 | sudo setenforce 0 156 | swapoff -a 157 | ``` 158 | 2. Install Docker 159 | 160 | ``` 161 | sudo yum install -y docker 162 | systemctl enable docker.service;service docker start 163 | ``` 164 | 165 | 3. Add Kubernetes repo 166 | ``` 167 | cat << EOF >> /etc/yum.repos.d/kubernetes.repo 168 | [kubernetes] 169 | name=Kubernetes 170 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 171 | enabled=1 172 | gpgcheck=1 173 | repo_gpgcheck=1 174 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg 175 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 176 | EOF 177 | ``` 178 | 4. Install Kubernetes components 179 | ``` 180 | yum update -y; yum install -y kubelet kubeadm 181 | ``` 182 | 183 | 5. Config kubeadm if NodePort service is needed.(OPTIONAL) 184 | 185 | [Kube-proxy config](install/nodeport-kube-proxy-setup.md) 186 | 187 | 6. Create K8s cluster 188 | 189 | If kubeadm was configured for nodeport (via step 5): 190 | ``` 191 | kubeadm init --config config.yaml 192 | ``` 193 | else 194 | ``` 195 | kubeadm init 196 | ``` 197 | 7. Once "kubeadm init" completes, save the "join" command that will be printed on the shell, to a file of your choice. This will be needed to add new nodes to your cluster. 198 | 199 | ``` 200 | example: 201 | "kubeadm join 192.168.1.3:6443 --token 0smq4g.7pmg2jqc8arl1uz7 --discovery-token-ca-cert-hash sha256:d92ac0785b1435666d726f4bc54fde58693f87cf91371d9fd553da4a40813650" 202 | ``` 203 | 8. Run the following commands to initially kubernetes command line 204 | ``` 205 | mkdir -p $HOME/.kube 206 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 207 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 208 | ``` 209 | 9. Disable firewalld 210 | ``` 211 | sysctl -w net.bridge.bridge-nf-call-iptables=1 212 | systemctl stop firewalld; systemctl disable firewalld 213 | ``` 214 | 10. Disable networking on Master if desired. (OPTIONAL) 215 | 216 | Disabling networking on the master will result not workloads not being scheduled on the master node. 217 | 218 | For kubeadm version >= 1.11 219 | 220 | ``` 221 | a. sudo vi /var/lib/kubelet/kubeadm-flags.env 222 | 223 | b. Remove the following 3 arguments from KUBELET_KUBEADM_ARGS variable: 224 | --cni-bin-dir=/opt/cni/bin 225 | --cni-conf-dir=/etc/cni/net.d 226 | --network-plugin=cni 227 | 228 | c. Restart kubelet Service 229 | 230 | systemctl enable kubelet && systemctl start kubelet 231 | ``` 232 | 233 | For kubeadm version < 1.11 234 | 235 | ``` 236 | a. sudo vi /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 237 | 238 | b. Comment out 239 | 240 | "#Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 241 | sudo systemctl daemon-reload;sudo service kubelet restart 242 | 243 | c. Restart kubelet Service 244 | 245 | systemctl enable kubelet && systemctl start kubelet 246 | ``` 247 | 248 | ## Installing Kubernetes on Compute (k8s slave/minion) nodes 249 | **(Centos 7.4 - 3.10.0-862.3.2 kernel)** 250 | 251 | 1. Prepare the node by running the following pre-requisites 252 | ``` 253 | sudo setenforce 0 254 | swapoff -a 255 | ``` 256 | 257 | 2. Install Docker 258 | ``` 259 | sudo yum install -y docker 260 | systemctl enable docker.service;service docker start 261 | ``` 262 | 3. Add Kubernetes repo 263 | ``` 264 | cat << EOF >> /etc/yum.repos.d/kubernetes.repo 265 | [kubernetes] 266 | name=Kubernetes 267 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 268 | enabled=1 269 | gpgcheck=1 270 | repo_gpgcheck=1 271 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg 272 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 273 | EOF 274 | ``` 275 | 4. Install Kubernetes components 276 | ``` 277 | yum update -y; yum install -y kubelet kubeadm 278 | ``` 279 | 5. Disable firewalld 280 | ``` 281 | sysctl -w net.bridge.bridge-nf-call-iptables=1 282 | systemctl stop firewalld; systemctl disable firewalld 283 | ``` 284 | 6. Join the Master node 285 | Copy paste the "join" command you saved from Step-9 of instructions for installation of K8s on master. 286 | ``` 287 | "kubeadm join 192.168.1.3:6443 --token 0smq4g.7pmg2jqc8arl1uz7 --discovery-token-ca-cert-hash sha256:d92ac0785b1435666d726f4bc54fde58693f87cf91371d9fd553da4a40813650" 288 | ``` 289 | -------------------------------------------------------------------------------- /install/kubernetes/nested-kubernetes.md: -------------------------------------------------------------------------------- 1 | Nested mode is when Contrail provides networking for a Kubernetes cluster that is provisioned on an Contail-Openstack cluster. Contrail components are shared between the two clusters. 2 | 3 | ![Contrail Standalone Solution](/images/nested-kubernetes.png) 4 | 5 | # __Prerequisites__ 6 | 7 | Please ensure that the following prerequisites are met, for a successful provisioning of Nested Contrail-Kubernetes cluster. 8 | 9 | 1. Installed and running Contrail Openstack cluster. 10 | This cluster should be based on Contrail 5.0 release 11 | 12 | 2. Installed and running Kubernetes cluster on Virtual Machines created on Contrail Openstack cluster. 13 | User is free to follow any installation method of their choice. 14 | 15 | 2.a. Update /etc/hosts file on your kubernetes master node with entries for each node of your cluster. 16 | ``` 17 | Example: 18 | If you kubernetes cluster is made up of three nodes: 19 | master1 (IP: x.x.x.x), minion1 (IP: y.y.y.y) and minion2 (IP: z.z.z.z) 20 | 21 | /etc/hosts on the kubernetes master node should have the following entries: 22 | 23 | x.x.x.x master1 24 | y.y.y.y minion1 25 | z.z.z.z minion2 26 | ``` 27 | 2.b. [IF APPLICABLE] *If Contrail images will be downloaded from Contrail insecure registry, add the Contrail repository as insecure repository in docker* 28 | 29 | ***NOTE: This will need to be done on all nodes of your Kubernetes cluster *** 30 | ``` 31 | Step a: 32 | cat <>/etc/docker/daemon.json 33 | { 34 | "insecure-registries": ["ci-repo.englab.juniper.net:5010"] 35 | } 36 | EOF 37 | 38 | Step b: 39 | service docker restart 40 | ``` 41 | 3. If Contrail container images are stored in private/secure docker registry, a kubernetes secret should be created and referenced during creation of single yaml, with credentials of the private docker registry. 42 | 43 | ``` 44 | kubectl create secret docker-registry --docker-server= --docker-username= --docker-password= --docker-email= -n 45 | 46 | - name of the secret 47 | - example: hub.juniper.net/contrail 48 | - registry user name 49 | - registry passcode 50 | - registered email of this registry account 51 | - kubernetes namespace where this secret is to be created. 52 | This should be the namespace where you intend to create Contrail pods. 53 | 54 | ``` 55 | 56 | # __Provision__ 57 | 58 | Provisioning a Nested Kubernetes Cluster is a three step process: 59 | 60 | ***1. Configure network connectivity to Contrail Config/Data plane functions.*** 61 | 62 | ***2. Generate single yaml file to create Contrail-k8s cluster.*** 63 | 64 | ***3. Instantiate Contrail-k8s cluster.*** 65 | 66 | ## Configure network connectivity to Contrail Config/Data plane functions 67 | 68 | In a nested deployement, one kube-manager instance will be provisioned in each overlay cluster. Kube-manager is essentially a part of Contrail config function. This necessitates the need for kube-manager running in overlay to have network reachability to Contrail Config functions of the underlay Openstack cluster. 69 | 70 | Network connectivity for the following Contrail Config functions are required: 71 | 72 | | | 73 | | --- | 74 | | Contrail Config | 75 | | Contrail Analytics | 76 | | Contrail Msg Queue | 77 | | Contrail VNC DB | 78 | | Keystone | 79 | 80 | In addition to Config connectivity, CNI for the Kubernetes cluster needs network reachability to the Vrouter Agent of its Compute node. 81 | 82 | Network connectivity for the following Data plane function is required: 83 | 84 | | | 85 | | --- | 86 | | Vrouter | 87 | 88 | We can use Link Local Service feature or a combination of Link Local + Fabric SNAT feature of Contrail to provide IP reachability to/from the overlay kubernetes cluster config/data compoenents and to corresponding config/data compoenents of the underlay openstack cluster. 89 | 90 | ### Option 1: Fabric SNAT + Link Local (Preferred) 91 | 92 | Step 1: Enable Fabric SNAT on the Virtual Network of the VM's 93 | 94 | Fabric SNAT feature should be enabled on the Virtual Network of the Virtual Machine's on which Kubernetes Master and Minions are running. 95 | 96 | Step 2: Create one Link Local Service so CNI to communicate with its Vrouter Agent. 97 | 98 | To configure a Link Local Service, we need a Service IP and Fabric IP. Fabric IP is the node IP on which the vrouter agent of the minion is running. Service IP(along with port number) is used by data plane to identify the fabric ip/node. Service IP is required to be a unique and unused IP in the entire openstack cluster. 99 | 100 | ***NOTE: The user is responsible to configure these Link Local Services via Contrail GUI.*** 101 | 102 | The following are the Link Local Service is required: 103 | 104 | | Contrail Process | Service IP | Service Port | Fabric IP | Fabric Port | 105 | | --- | --- | --- | --- | --- | 106 | | VRouter | < Service IP for the running node > | 9091 | 127.0.0.1 | 9091 | 107 | 108 | NOTE: Fabric IP is 127.0.0.1, as our intent is to make CNI talk to Vrouter on its underlay node. 109 | 110 | ####Example: 111 | 112 | The following link-local services should be created: 113 | 114 | | LL Service Name | Service IP | Service Port | Fabric IP | Fabric Port | 115 | | --- | --- | --- | --- | --- | 116 | | K8s-cni-to-agent | 10.10.10.5 | 9091 | 127.0.0.1 | 9091 | 117 | 118 | NOTE: Here 10.10.10.5 is the Service IP that was chosen by user. This can be any unused 119 | IP in the cluster. This IP is primarily used to identify link local traffic and has no 120 | other signifance. 121 | 122 | 123 | ### Option 2: Link Local Only 124 | 125 | To configure a Link Local Service, we need a Service IP and Fabric IP. Fabric IP is the node IP on which the contrail processes are running on. Service IP(along with port number) is used by data plane to identify the fabric ip/node. Service IP is required to be a unique and unused IP in the entire openstack cluster. **For each node of the openstack cluster, one service IP should be identified.** 126 | 127 | ***NOTE: The user is responsible to configure these Link Local Services via Contrail GUI.*** 128 | 129 | The following are the Link Local Services are required: 130 | 131 | | Contrail Process | Service IP | Service Port | Fabric IP | Fabric Port | 132 | | --- | --- | --- | --- | --- | 133 | | Contrail Config | < Service IP for the running node > | 8082 | < Node IP of running node > | 8082 | 134 | | Contrail Analytics | < Service IP for the running node > | 8086 | < Node IP of running node > | 8086 | 135 | | Contrail Msg Queue | < Service IP for the running node > | 5673 | < Node IP of running node > | 5673 | 136 | | Contrail VNC DB | < Service IP for the running node > | 9161 | < Node IP of running node > | 9161 | 137 | | Keystone | < Service IP for the running node > | 35357 | < Node IP of running node > | 35357 | 138 | | VRouter | < Service IP for the running node > | 9091 | 127.0.0.1 | 9091 | 139 | 140 | ####Example: 141 | 142 | Lets assume the following hypothetical Openstack Cluster where: 143 | ``` 144 | Contrail Config : 192.168.1.100 145 | Contrail Analytics : 192.168.1.100, 192.168.1.101 146 | Contrail Msg Queue : 192.168.1.100 147 | Contrail VNC DB : 192.168.1.100, 192.168.1.101, 192.168.1.102 148 | Keystone: 192.168.1.200 149 | Vrouter: 192.168.1.201, 192.168.1.202, 192.168.1.203 150 | ``` 151 | This cluster is made of 7 nodes. We will allocate 7 unused IP's for these nodes: 152 | ``` 153 | 192.168.1.100 --> 10.10.10.1 154 | 192.168.1.101 --> 10.10.10.2 155 | 192.168.1.102 --> 10.10.10.3 156 | 192.168.1.200 --> 10.10.10.4 157 | 192.168.1.201/192.168.1.202/192.168.1.203 --> 10.10.10.5 158 | NOTE: One service IP will suffice for all VRouter nodes in the system. 159 | ``` 160 | The following link-local services should be created: 161 | 162 | | LL Service Name | Service IP | Service Port | Fabric IP | Fabric Port | 163 | | --- | --- | --- | --- | --- | 164 | | Contrail Config | 10.10.10.1 | 8082 | 192.168.1.100 | 8082 | 165 | | Contrail Analytics 1 | 10.10.10.1 | 8086 | 192.168.1.100 | 8086 | 166 | | Contrail Analytics 2 | 10.10.10.2 | 8086 | 192.168.1.101 | 8086 | 167 | | Contrail Msg Queue | 10.10.10.1 | 5673 | 192.168.1.100 | 5673 | 168 | | Contrail VNC DB 1 | 10.10.10.1 | 9161 | 192.168.1.100 | 9161 | 169 | | Contrail VNC DB 2 | 10.10.10.2 | 9161 | 192.168.1.101 | 9161 | 170 | | Contrail VNC DB 3 | 10.10.10.3 | 9161 | 192.168.1.102 | 9161 | 171 | | Keystone | 10.10.10.4 | 35357 | 192.168.1.200| 35357 | 172 | | K8s-cni-to-agent | 10.10.10.5 | 9091 | 127.0.0.1 | 9091 | 173 | 174 | ## Generate yaml file 175 | 176 | Contrail components will be installed on the Kubernetes cluster as pods. 177 | The config to create these Pods in K8s is encoded in a yaml file. 178 | 179 | This file can be generated as follows: 180 | 181 | Step 1: 182 | 183 | Clone contrail-container-build repo in any server of your choice. 184 | ``` 185 | git clone https://github.com/Juniper/contrail-container-builder.git 186 | ``` 187 | 188 | Step 2: 189 | 190 | Populate common.env file (located in the top directory of the cloned contrail-container-builder repo) with info corresponding to your cluster and environment. 191 | 192 | For you reference, please find a sample common.env file with required bare minimum configurations here: 193 | 194 | https://github.com/Juniper/contrail-container-builder/blob/master/kubernetes/sample_config_files/common.env.sample.nested_mode 195 | 196 | **NOTE: 197 | If Contrail container images are stored in private/secure docker registry, a kubernetes secret should have be created, as 198 | documented in pre-requesites. Populate the variable KUBERNETES_SECRET_CONTRAIL_REPO=< secret-name > with the name of the 199 | generated secret,in common.env file.** 200 | 201 | Step 3: 202 | 203 | Generate the yaml file as following in your shell: 204 | ``` 205 | cd /kubernetes/manifests 206 | 207 | ./resolve-manifest.sh contrail-kubernetes-nested.yaml > nested-contrail.yml 208 | ``` 209 | 210 | Step 4: 211 | 212 | Copy over the output (or file) generated from Step(3) to the master node in your kubernetes cluster. 213 | 214 | ## Instantiate Contrail-k8s cluster 215 | 216 | Create a contrail components as pods on the kubernetes cluster, as follows: 217 | 218 | ``` 219 | kubectl apply -f nested-contrail.yml 220 | ``` 221 | 222 | You will see the following pods running in the "kube-system" namespace: 223 | 224 | contrail-kube-manager-xxxxxx --> This is the manager that acts as conduit between kubernetes and openstack clusters. 225 | 226 | contrail-kubernetes-cni-agent-xxxxx --> This installs and configures Contrail CNI on kubernetes nodes. 227 | 228 | ``` 229 | root@k8s:~# kubectl get pods -n kube-system 230 | NAME READY STATUS RESTARTS AGE 231 | contrail-kube-manager-lcjbc 1/1 Running 0 3d 232 | contrail-kubernetes-cni-agent-w8shc 1/1 Running 0 3d 233 | ``` 234 | 235 | ***Now you are ready to created workloads on the Kubernetes cluster*** 236 | 237 | -------------------------------------------------------------------------------- /install/kubernetes/nodeport-kube-proxy-setup.md: -------------------------------------------------------------------------------- 1 | 2 | # NodePort Service support in contrail 3 | 4 | Contrail works with and depends on, kube-proxy component of the kubernetes software stack to implement support for NodePort Service feature in kubernetes. 5 | 6 | With Docker version >= 1.13, kube-proxy needs the following configuration to get NodePort feature working seamlessly. 7 | 8 | ## Pre-requisites 9 | 10 | A kubernetes master node with un-initialized kubernetes cluster i.e node where "kubeadm init" has not been executed. 11 | 12 | If "kubeadm init" has already been issued, then the cluster needs to be torn down by executing "kubeadm reset" on ALL (master and compute) nodes of the cluster, before the below steps can be executed. 13 | 14 | ### Step 1. Create a yaml file with following config. 15 | 16 | ***NOTE: "10.32.0.0/12" here is the Pod network that the Contrail cluster was started with.*** 17 | 18 | Filename: kube-proxy-config.yaml <-- Any name of your chosing. 19 | 20 | ``` 21 | apiVersion: kubeadm.k8s.io/v1alpha2 22 | kind: MasterConfiguration 23 | kubernetesVersion: v1.11.1 24 | api: 25 | bindPort: 6443 26 | kubeProxy: 27 | config: 28 | clusterCIDR: "10.32.0.0/12" 29 | ``` 30 | 31 | ### Step 2. Instantiate the kubernetes cluster. 32 | 33 | ``` 34 | kubeadm init --config kube-proxy-config.yaml 35 | ``` 36 | -------------------------------------------------------------------------------- /install/kubernetes/non-nested-kubernetes.md: -------------------------------------------------------------------------------- 1 | In Non-nested mode Kubernetes cluster is provisioned side by side with Openstack cluster with networking provided by same Contrail controller. 2 | 3 | ![Contrail Non-Nested Solution](/images/non-nested-kubernetes.png) 4 | 5 | # __Prerequisites__ 6 | 7 | 1. Installed and running Contrail Openstack cluster on either Bare Metal server (or Virtual Machine). 8 | This cluster should be based on Contrail 5.0 release. 9 | 10 | 2. Installed and running Kubernetes cluster on same Bare Metal server (or Virtual Machine) as used in step 1. 11 | 12 | 3. Label the Kubernetes master node using following command: 13 | 14 | ``` 15 | kubectl label node node-role.opencontrail.org/controller=true 16 | ``` 17 | 18 | 4. It is recommended that the Kubernetes master should not be configured with network plugin, 19 | as it may not be desirable to install vrouter kernel module on the control node. 20 | However it is left to the user's preference. 21 | 22 | 5. Provision additional worker Kubernetes virtual machines (with CentOS 7.5 OS) and join them to the Kubernetes cluster provisioned above. 23 | 24 | 6. If Contrail container images are stored in private/secure docker registry, a kubernetes secret should be created and referenced during creation of single yaml, with credentials of the private docker registry. 25 | 26 | ``` 27 | kubectl create secret docker-registry --docker-server= --docker-username= --docker-password= --docker-email= -n 28 | 29 | - name of the secret 30 | - example: hub.juniper.net/contrail 31 | - registry user name 32 | - registry passcode 33 | - registered email of this registry account 34 | - kubernetes namespace where this secret is to be created. 35 | This should be the namespace where you intend to create Contrail pods. 36 | 37 | ``` 38 | 39 | # __Provision__ 40 | Follow these steps to provision Contrail Kubernetes cluster side 41 | 42 | 1. Clone contrail-container-build repo in any server of your choice. 43 | ``` 44 | git clone https://github.com/Juniper/contrail-container-builder.git 45 | ``` 46 | 47 | 2. Populate common.env file (located in the top directory of the cloned contrail-container-builder repo) with info corresponding to your cluster and environment. 48 | 49 | For you reference, see a sample common.env file with required bare minimum configurations here: https://github.com/Juniper/contrail-container-builder/blob/master/kubernetes/sample_config_files/common.env.sample.non_nested_mode 50 | 51 | ***NOTE 1: If Contrail Config API is not secured by keystone, please ensure AUTH_MODE and KEYSTONE_* variables are not configured/present while populating configuration in common.env** 52 | 53 | **NOTE 2: 54 | If Contrail container images are stored in private/secure docker registry, a kubernetes secret should have be created, as 55 | documented in pre-requesites. Populate the variable KUBERNETES_SECRET_CONTRAIL_REPO=< secret-name > with the name of the 56 | generated secret,in common.env file.** 57 | 58 | 3. Generate the yaml file as following: 59 | ``` 60 | cd /kubernetes/manifests 61 | 62 | ./resolve-manifest.sh contrail-non-nested-kubernetes.yaml > non-nested-contrail.yml 63 | ``` 64 | 65 | 4. If any of the macros are not specified in common.env, they will have empty string assignments in the "env" ConfigMap in the generated yaml. Make sure such empty macros are removed from the yaml. 66 | 67 | 5. Copy over the file generated from Step 3 to the master node in your Kubernetes cluster. 68 | 69 | 6. Create contrail components as pods on the Kubernetes cluster, as follows: 70 | 71 | ``` 72 | kubectl apply -f non-nested-contrail.yml 73 | ``` 74 | 7. Following Contrail pods should be created on the Kubernetes cluster. Notice that contrail-agent pod is created only on the worker node. 75 | ``` 76 | [root@b4s403 manifests]# kubectl get pods --all-namespaces -o wide 77 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE 78 | kube-system contrail-agent-mxkcq 2/2 Running 0 1m 10.84.24.52 b4s402 79 | kube-system contrail-kube-manager-glw5m 1/1 Running 0 1m 10.84.24.53 b4s403 80 | ``` 81 | -------------------------------------------------------------------------------- /install/kubernetes/standalone-kubernete-ansible.md: -------------------------------------------------------------------------------- 1 | # Provisioning of Kubernetes Cluster Using contrail-ansible-deployer 2 | 3 | The following steps will install a standalone Kubernetes cluster with Contrail as networking provider. 4 | 5 | Provisioning of K8s and Contrail is done through Ansible-playbooks. 6 | 7 | ![Contrail Standalone Solution](/images/standalone-kubernetes.png) 8 | 9 | ### Step 1. Re-image the node to CentOS 7.4. 10 | 11 | **Linux kernel version 3.10.0-862.3.2** 12 | 13 | Contrail forwarding uses a kernel module to provide high throughput, low latency networking. 14 | 15 | The latest kernel module is compiled against 3.10.0-862.3.2 kernel. 16 | 17 | ### Step 2. Install the necessary utilities. 18 | ``` 19 | yum -y install epel-release git ansible net-tools 20 | ``` 21 | 22 | ### Step 3. Clone the contrail-ansible-deployer repo. 23 | ``` 24 | git clone https://github.com/Juniper/contrail-ansible-deployer.git 25 | cd contrail-ansible-deployer 26 | ``` 27 | 28 | ### Step 4. Edit the config/instances.yaml and enter the necessary values. 29 | 30 | For example, see the following sample file for a one node compute and a one node controller installation. 31 | ``` 32 | provider_config: 33 | bms: 34 | ssh_pwd: Password 35 | ssh_user: root 36 | ssh_public_key: /root/.ssh/id_rsa.pub 37 | ssh_private_key: /root/.ssh/id_rsa 38 | domainsuffix: local 39 | instances: 40 | bms1: 41 | provider: bms 42 | roles: # Optional. If roles is not defined, all below roles will be created 43 | config_database: # Optional. 44 | config: # Optional. 45 | control: # Optional. 46 | analytics_database: # Optional. 47 | analytics: # Optional. 48 | webui: # Optional. 49 | k8s_master: # Optional. 50 | kubemanager: # Optional. 51 | ip: BMS1_IP 52 | bms2: 53 | provider: bms 54 | roles: # Optional. If roles is not defined, all below roles will be created 55 | vrouter: # Optional. 56 | k8s_node: # Optional. 57 | ip: BMS2_IP 58 | contrail_configuration: 59 | CONTRAIL_VERSION: latest 60 | global_configuration: 61 | CONTAINER_REGISTRY: ci-repo.englab.juniper.net:5010 62 | REGISTRY_PRIVATE_INSECURE: True 63 | ``` 64 | 65 | ### Step 5. Turn off the swap functionality on all the nodes. 66 | ``` 67 | swapoff -a 68 | ``` 69 | 70 | ### Step 6. Configure the nodes. 71 | ``` 72 | ansible-playbook -e orchestrator=kubernetes -i inventory/ playbooks/configure_instances.yml 73 | ``` 74 | ### Step 7. Install Kubernetes and Contrail. 75 | ``` 76 | ansible-playbook -e orchestrator=kubernetes -i inventory/ playbooks/install_k8s.yml 77 | ansible-playbook -e orchestrator=kubernetes -i inventory/ playbooks/install_contrail.yml 78 | ``` 79 | -------------------------------------------------------------------------------- /install/kubernetes/standalone-kubernetes-centos.md: -------------------------------------------------------------------------------- 1 | Contrail CNI can be installed on a Kubernetes cluster through multiple provisioning schemes. 2 | 3 | ![Contrail Standalone Solution](/images/standalone-kubernetes.png) 4 | 5 | This wiki will describe the most simplest of all: **A single yaml based install** 6 | 7 | # Pre-requisites 8 | 1. **A running Kubernetes cluster** 9 | 10 | There are multiple options available to user to install Kubernetes. The most simplest being [kubeadm](https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/) 11 | 12 | Alternatively if you would like to install Contrail and K8s cluster together, you can use [Contrail Ansible Deployer](https://github.com/Juniper/contrail-ansible-deployer/wiki/Contrail-microservice-installation-with-kubernetes). 13 | 14 | 2. ***Add Contrail repository as insecure repository in docker.*** 15 | 16 | *** NOTE: This will need to be done on all nodes of your Kubernetes cluster *** 17 | ``` 18 | Step a: 19 | cat <>/etc/docker/daemon.json 20 | { 21 | "insecure-registries": ["ci-repo.englab.juniper.net:5010"] 22 | } 23 | EOF 24 | 25 | Step b: 26 | service docker restart 27 | ``` 28 | 3. **Linux kernel version 3.10.0-957** 29 | 30 | Contrail forwarding uses a kernel module to provide high throughput, low latency networking. 31 | 32 | The latest kernel module is compiled against 3.10.0-957 kernel. 33 | 34 | 4. **Docker version on all your nodes should be >= 1.24** 35 | 36 | # Installation 37 | Installation of Contrail is a **1**-step process. 38 | 39 | Note: Replace x.x.x.x with the IP of your Kubernetes Master node. 40 | 41 | ``` 42 | K8S_MASTER_IP=x.x.x.x; CONTRAIL_REPO="ci-repo.englab.juniper.net:5010"; CONTRAIL_RELEASE="latest"; mkdir -pm 777 /var/lib/contrail/kafka-logs; curl https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/master/install/kubernetes/templates/contrail-single-step-cni-install-centos.yaml | sed "s/{{ K8S_MASTER_IP }}/$K8S_MASTER_IP/g; s/{{ CONTRAIL_REPO }}/$CONTRAIL_REPO/g; s/{{ CONTRAIL_RELEASE }}/$CONTRAIL_RELEASE/g" | kubectl apply -f - 43 | ``` 44 | 45 | # What just happened ? 46 | 47 | **Hurray! Welcome to Contrail.** 48 | 49 | 1. You installed Contrail CNI in your Kubernetes node. If new compute nodes are added to your Kubernetes cluster, Contrail CNI will be propogated to them auto-magically as it is backed by a Kubernetes DaemaonSet. 50 | 51 | 2. You installed entire Contrail Networking suite with rich Networking, Analytics, Security, Visualization functions, to name a few. 52 | 53 | 3. Contrail UI is available on port 8143 of your node. Feel free to play around. [About Contrail](https://www.juniper.net/documentation/en_US/release-independent/contrail/information-products/pathway-pages/index.html) 54 | ``` 55 | https://x.x.x.x:8143 56 | Default credentials: admin/contrail123 57 | ``` 58 | # Check Contrail Status 59 | 60 | You can get the status of Contrail components, by running "contrail-status" command line tool in your Kubernetes master node. This will list all Contrail components running in your system. 61 | ``` 62 | [root@foo ~]# contrail-status 63 | Pod Service Original Name State Status 64 | zookeeper contrail-external-zookeeper running Up 35 minutes 65 | analytics alarm-gen contrail-analytics-alarm-gen running Up 35 minutes 66 | analytics api contrail-analytics-api running Up 35 minutes 67 | analytics collector contrail-analytics-collector running Up 35 minutes 68 | analytics nodemgr contrail-nodemgr running Up 33 minutes 69 | analytics query-engine contrail-analytics-query-engine running Up 35 minutes 70 | analytics snmp-collector contrail-analytics-snmp-collector running Up 35 minutes 71 | analytics topology contrail-analytics-topology running Up 34 minutes 72 | config api contrail-controller-config-api running Up 35 minutes 73 | config cassandra contrail-external-cassandra running Up 35 minutes 74 | config device-manager contrail-controller-config-devicemgr running Up 35 minutes 75 | config nodemgr contrail-nodemgr running Up 33 minutes 76 | config rabbitmq contrail-external-rabbitmq running Up 35 minutes 77 | config schema contrail-controller-config-schema running Up 35 minutes 78 | config svc-monitor contrail-controller-config-svcmonitor running Up 35 minutes 79 | control control contrail-controller-control-control running Up 35 minutes 80 | control dns contrail-controller-control-dns running Up 35 minutes 81 | control named contrail-controller-control-named running Up 35 minutes 82 | control nodemgr contrail-nodemgr running Up 33 minutes 83 | database cassandra contrail-external-cassandra running Up 35 minutes 84 | database kafka contrail-external-kafka running Up 35 minutes 85 | database nodemgr contrail-nodemgr running Up 34 minutes 86 | kubernetes kube-manager contrail-kubernetes-kube-manager running Up 35 minutes 87 | vrouter agent contrail-vrouter-agent running Up 34 minutes 88 | vrouter nodemgr contrail-nodemgr running Up 33 minutes 89 | webui job contrail-controller-webui-job running Up 35 minutes 90 | webui web contrail-controller-webui-web running Up 35 minutes 91 | 92 | WARNING: container with original name 'contrail-external-zookeeper' have Pod os Service empty. Pod: '' / Service: 'zookeeper'. Please pass NODE_TYPE with pod name to container's env 93 | 94 | vrouter kernel module is PRESENT 95 | == Contrail control == 96 | control: active 97 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 98 | named: active 99 | dns: active 100 | 101 | == Contrail kubernetes == 102 | kube-manager: active 103 | 104 | == Contrail database == 105 | kafka: active 106 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 107 | zookeeper: inactive <-- Safe to ignore 108 | cassandra: active 109 | 110 | == Contrail analytics == 111 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 112 | api: active 113 | collector: active 114 | query-engine: active 115 | alarm-gen: active 116 | 117 | == Contrail webui == 118 | web: active 119 | job: active 120 | 121 | == Contrail vrouter == 122 | nodemgr: initializing (NTP state unsynchronized. ) <-- Safe to ignore 123 | agent: active 124 | 125 | == Contrail config == 126 | api: active 127 | zookeeper: inactive <-- Safe to ignore 128 | svc-monitor: active 129 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 130 | device-manager: active 131 | cassandra: active 132 | rabbitmq: active 133 | schema: active 134 | 135 | ``` 136 | 137 | # Get to know Contrail more 138 | 139 | [All about Contrail](https://www.juniper.net/documentation/en_US/release-independent/contrail/information-products/pathway-pages/index.html) 140 | 141 | [Contrail and Kubernetes Intro](https://github.com/Juniper/contrail-controller/wiki/Kubernetes) 142 | 143 | [Install Kubernetes using Kubeadm](https://github.com/Juniper/contrail-controller/wiki/Install-K8s-using-Kubeadm) 144 | 145 | [Provision Contrail Kubernetes Cluster in Non-nested mode](https://github.com/Juniper/contrail-ansible-deployer/wiki/Provision-Contrail-Kubernetes-Cluster-in-Non-nested-Mode) 146 | -------------------------------------------------------------------------------- /install/kubernetes/standalone-kubernetes-ubuntu.md: -------------------------------------------------------------------------------- 1 | Contrail CNI can be installed on a Kubernetes cluster through multiple provisioning schemes. 2 | 3 | ![Contrail Standalone Solution](/images/standalone-kubernetes.png) 4 | 5 | This wiki will describe the most simplest of all: **A single yaml based install** 6 | 7 | # Pre-requisites 8 | 1. **A running Kubernetes cluster** 9 | 10 | [Install Guide](/install/kubernetes/install-kubernetes.md) 11 | 12 | 2. Add Contrail repository as insecure repository in docker. 13 | *** NOTE: This will need to be done on all nodes of your Kubernetes cluster *** 14 | ``` 15 | Step a: 16 | cat <>/etc/docker/daemon.json 17 | { 18 | "insecure-registries": ["ci-repo.englab.juniper.net:5010"] 19 | } 20 | EOF 21 | 22 | Step b: 23 | service docker restart 24 | ``` 25 | 26 | 3. Make sure that kubernetes master node ip has an entry in /etc/hosts file that resolves to 27 | the hostname. 28 | ``` 29 | File: /etc/hosts 30 | 31 | x.x.x.x hostname hostname.fqname 32 | ``` 33 | 34 | 4. Docker version on all nodes should be >= 1.24 35 | 36 | # Installation 37 | Installation of Contrail is a **1**-step process. 38 | 39 | Note: Replace x.x.x.x with the IP of your Kubernetes Master node. 40 | 41 | ``` 42 | K8S_MASTER_IP=x.x.x.x;CONTRAIL_REPO=ci-repo.englab.juniper.net:5010; CONTRAIL_RELEASE="latest"; mkdir -pm 777 /var/lib/contrail/kafka-logs; curl https://raw.githubusercontent.com/Juniper/contrail-kubernetes-docs/master/install/kubernetes/templates/contrail-single-step-cni-install-ubuntu.yaml | sed "s/{{ K8S_MASTER_IP }}/$K8S_MASTER_IP/g; s/{{ CONTRAIL_REPO }}/$CONTRAIL_REPO/g; s/{{ CONTRAIL_RELEASE }}/$CONTRAIL_RELEASE/g" | kubectl apply -f - 43 | ``` 44 | 45 | # What just happened ? 46 | 47 | **Hurray! Welcome to Contrail.** 48 | 49 | 1. You installed Contrail CNI in your Kubernetes node. If new compute nodes are added to your Kubernetes cluster, Contrail CNI will be propogated to them auto-magically as it is backed by a Kubernetes DaemaonSet. 50 | 51 | 2. You installed entire Contrail Networking suite with rich Networking, Analytics, Security, Visualization functions, to name a few. 52 | 53 | 3. Contrail UI is available on port 8143 of your node. Feel free to play around. [About Contrail](https://www.juniper.net/documentation/en_US/release-independent/contrail/information-products/pathway-pages/index.html) 54 | ``` 55 | https://x.x.x.x:8143 56 | Default credentials: admin/contrail123 57 | ``` 58 | # Check Contrail Status 59 | 60 | You can get the status of Contrail components, by running "contrail-status" command line tool in your Kubernetes master node. This will list all Contrail components running in your system. 61 | ``` 62 | [root@foo ~]# contrail-status 63 | Pod Service Original Name State Status 64 | zookeeper contrail-external-zookeeper running Up 35 minutes 65 | analytics alarm-gen contrail-analytics-alarm-gen running Up 35 minutes 66 | analytics api contrail-analytics-api running Up 35 minutes 67 | analytics collector contrail-analytics-collector running Up 35 minutes 68 | analytics nodemgr contrail-nodemgr running Up 33 minutes 69 | analytics query-engine contrail-analytics-query-engine running Up 35 minutes 70 | analytics snmp-collector contrail-analytics-snmp-collector running Up 35 minutes 71 | analytics topology contrail-analytics-topology running Up 34 minutes 72 | config api contrail-controller-config-api running Up 35 minutes 73 | config cassandra contrail-external-cassandra running Up 35 minutes 74 | config device-manager contrail-controller-config-devicemgr running Up 35 minutes 75 | config nodemgr contrail-nodemgr running Up 33 minutes 76 | config rabbitmq contrail-external-rabbitmq running Up 35 minutes 77 | config schema contrail-controller-config-schema running Up 35 minutes 78 | config svc-monitor contrail-controller-config-svcmonitor running Up 35 minutes 79 | control control contrail-controller-control-control running Up 35 minutes 80 | control dns contrail-controller-control-dns running Up 35 minutes 81 | control named contrail-controller-control-named running Up 35 minutes 82 | control nodemgr contrail-nodemgr running Up 33 minutes 83 | database cassandra contrail-external-cassandra running Up 35 minutes 84 | database kafka contrail-external-kafka running Up 35 minutes 85 | database nodemgr contrail-nodemgr running Up 34 minutes 86 | kubernetes kube-manager contrail-kubernetes-kube-manager running Up 35 minutes 87 | vrouter agent contrail-vrouter-agent running Up 34 minutes 88 | vrouter nodemgr contrail-nodemgr running Up 33 minutes 89 | webui job contrail-controller-webui-job running Up 35 minutes 90 | webui web contrail-controller-webui-web running Up 35 minutes 91 | 92 | WARNING: container with original name 'contrail-external-zookeeper' have Pod os Service empty. Pod: '' / Service: 'zookeeper'. Please pass NODE_TYPE with pod name to container's env 93 | 94 | vrouter kernel module is PRESENT 95 | == Contrail control == 96 | control: active 97 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 98 | named: active 99 | dns: active 100 | 101 | == Contrail kubernetes == 102 | kube-manager: active 103 | 104 | == Contrail database == 105 | kafka: active 106 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 107 | zookeeper: inactive <-- Safe to ignore 108 | cassandra: active 109 | 110 | == Contrail analytics == 111 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 112 | api: active 113 | collector: active 114 | query-engine: active 115 | alarm-gen: active 116 | 117 | == Contrail webui == 118 | web: active 119 | job: active 120 | 121 | == Contrail vrouter == 122 | nodemgr: initializing (NTP state unsynchronized. ) <-- Safe to ignore 123 | agent: active 124 | 125 | == Contrail config == 126 | api: active 127 | zookeeper: inactive <-- Safe to ignore 128 | svc-monitor: active 129 | nodemgr: initializing (NTP state unsynchronized. ) . <-- Safe to ignore 130 | device-manager: active 131 | cassandra: active 132 | rabbitmq: active 133 | schema: active 134 | 135 | ``` 136 | 137 | # Get to know Contrail more 138 | 139 | [All about Contrail](https://www.juniper.net/documentation/en_US/release-independent/contrail/information-products/pathway-pages/index.html) 140 | 141 | [Contrail and Kubernetes Intro](https://github.com/Juniper/contrail-controller/wiki/Kubernetes) 142 | 143 | [Install Kubernetes using Kubeadm](https://github.com/Juniper/contrail-controller/wiki/Install-K8s-using-Kubeadm) 144 | 145 | [Provision Contrail Kubernetes Cluster in Non-nested mode](https://github.com/Juniper/contrail-ansible-deployer/wiki/Provision-Contrail-Kubernetes-Cluster-in-Non-nested-Mode) 146 | -------------------------------------------------------------------------------- /install/kubernetes/templates/contrail-single-step-cni-install-centos.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: env 6 | namespace: kube-system 7 | data: 8 | AAA_MODE: no-auth 9 | AUTH_MODE: noauth 10 | CLOUD_ORCHESTRATOR: kubernetes 11 | LOG_LEVEL: SYS_NOTICE 12 | METADATA_PROXY_SECRET: contrail 13 | RABBITMQ_NODE_PORT: "5673" 14 | ZOOKEEPER_ANALYTICS_PORT: "2181" 15 | ZOOKEEPER_PORTS: "2888:3888" 16 | ZOOKEEPER_NODES: {{ K8S_MASTER_IP }} 17 | RABBITMQ_NODES: {{ K8S_MASTER_IP }} 18 | CONTROLLER_NODES: {{ K8S_MASTER_IP }} 19 | VROUTER_GATEWAY: {{ K8S_MASTER_IP }} 20 | ANALYTICSDB_ENABLE: "true" 21 | ANALYTICS_ALARM_ENABLE: "true" 22 | ANALYTICS_SNMP_ENABLE: "true" 23 | --- 24 | apiVersion: v1 25 | kind: ConfigMap 26 | metadata: 27 | name: configzookeeperenv 28 | namespace: kube-system 29 | data: 30 | ZOOKEEPER_PORT: "2181" 31 | --- 32 | apiVersion: v1 33 | kind: ConfigMap 34 | metadata: 35 | name: analyticszookeeperenv 36 | namespace: kube-system 37 | data: 38 | ZOOKEEPER_PORT: "2181" 39 | 40 | --- 41 | apiVersion: v1 42 | kind: ConfigMap 43 | metadata: 44 | name: nodemgr-config 45 | namespace: kube-system 46 | data: 47 | DOCKER_HOST: "unix://mnt/docker.sock" 48 | --- 49 | apiVersion: v1 50 | kind: ConfigMap 51 | metadata: 52 | name: contrail-analyticsdb-config 53 | namespace: kube-system 54 | data: 55 | CASSANDRA_SEEDS: {{ K8S_MASTER_IP }} 56 | CASSANDRA_CLUSTER_NAME: Contrail 57 | CASSANDRA_START_RPC: "true" 58 | CASSANDRA_LISTEN_ADDRESS: auto 59 | CASSANDRA_PORT: "9160" 60 | CASSANDRA_CQL_PORT: "9042" 61 | CASSANDRA_SSL_STORAGE_PORT: "7001" 62 | CASSANDRA_STORAGE_PORT: "7000" 63 | CASSANDRA_JMX_LOCAL_PORT: "7200" 64 | --- 65 | apiVersion: v1 66 | kind: ConfigMap 67 | metadata: 68 | name: contrail-configdb-config 69 | namespace: kube-system 70 | data: 71 | CASSANDRA_SEEDS: {{ K8S_MASTER_IP }} 72 | CASSANDRA_CLUSTER_NAME: ContrailConfigDB 73 | CASSANDRA_START_RPC: "true" 74 | CASSANDRA_LISTEN_ADDRESS: auto 75 | CASSANDRA_PORT: "9161" 76 | CASSANDRA_CQL_PORT: "9041" 77 | CASSANDRA_SSL_STORAGE_PORT: "7011" 78 | CASSANDRA_STORAGE_PORT: "7010" 79 | CASSANDRA_JMX_LOCAL_PORT: "7201" 80 | --- 81 | apiVersion: v1 82 | kind: ConfigMap 83 | metadata: 84 | name: rabbitmq-config 85 | namespace: kube-system 86 | data: 87 | RABBITMQ_ERLANG_COOKIE: "47EFF3BB-4786-46E0-A5BB-58455B3C2CB4" 88 | --- 89 | apiVersion: v1 90 | kind: ConfigMap 91 | metadata: 92 | name: kube-manager-config 93 | namespace: kube-system 94 | data: 95 | KUBERNETES_API_SERVER: {{ K8S_MASTER_IP }} 96 | KUBERNETES_API_SECURE_PORT: "6443" 97 | K8S_TOKEN_FILE: "/tmp/serviceaccount/token" 98 | # Containers section 99 | --- 100 | apiVersion: extensions/v1beta1 101 | kind: DaemonSet 102 | metadata: 103 | name: config-zookeeper 104 | namespace: kube-system 105 | labels: 106 | app: config-zookeeper 107 | spec: 108 | template: 109 | metadata: 110 | labels: 111 | app: config-zookeeper 112 | spec: 113 | affinity: 114 | nodeAffinity: 115 | requiredDuringSchedulingIgnoredDuringExecution: 116 | nodeSelectorTerms: 117 | - matchExpressions: 118 | - key: "node-role.kubernetes.io/master" 119 | operator: Exists 120 | tolerations: 121 | - key: node-role.kubernetes.io/master 122 | operator: Exists 123 | effect: NoSchedule 124 | - key: node.kubernetes.io/not-ready 125 | operator: Exists 126 | effect: NoSchedule 127 | hostNetwork: true 128 | containers: 129 | - name: config-zookeeper 130 | image: "{{ CONTRAIL_REPO }}/contrail-external-zookeeper:{{ CONTRAIL_RELEASE }}" 131 | imagePullPolicy: "" 132 | envFrom: 133 | - configMapRef: 134 | name: env 135 | - configMapRef: 136 | name: configzookeeperenv 137 | volumeMounts: 138 | - mountPath: /var/lib/zookeeper 139 | name: zookeeper-data 140 | - mountPath: /var/log/zookeeper 141 | name: zookeeper-logs 142 | volumes: 143 | - name: zookeeper-data 144 | hostPath: 145 | path: /var/lib/contrail/config-zookeeper 146 | - name: zookeeper-logs 147 | hostPath: 148 | path: /var/log/contrail/config-zookeeper 149 | --- 150 | apiVersion: extensions/v1beta1 151 | kind: DaemonSet 152 | metadata: 153 | name: contrail-analyticsdb 154 | namespace: kube-system 155 | labels: 156 | app: contrail-analyticsdb 157 | spec: 158 | template: 159 | metadata: 160 | labels: 161 | app: contrail-analyticsdb 162 | spec: 163 | affinity: 164 | nodeAffinity: 165 | requiredDuringSchedulingIgnoredDuringExecution: 166 | nodeSelectorTerms: 167 | - matchExpressions: 168 | - key: "node-role.kubernetes.io/master" 169 | operator: Exists 170 | tolerations: 171 | - key: node-role.kubernetes.io/master 172 | operator: Exists 173 | effect: NoSchedule 174 | - key: node.kubernetes.io/not-ready 175 | operator: Exists 176 | effect: NoSchedule 177 | hostNetwork: true 178 | initContainers: 179 | - name: contrail-node-init 180 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 181 | imagePullPolicy: "" 182 | env: 183 | - name: NODE_TYPE 184 | value: "database" 185 | - name: CONTRAIL_STATUS_IMAGE 186 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 187 | envFrom: 188 | - configMapRef: 189 | name: contrail-analyticsdb-config 190 | securityContext: 191 | privileged: true 192 | volumeMounts: 193 | - mountPath: /host/usr/bin 194 | name: host-usr-bin 195 | - mountPath: /host/var/lib 196 | name: host-var-lib 197 | containers: 198 | - name: contrail-analyticsdb-nodemgr 199 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 200 | imagePullPolicy: "" 201 | securityContext: 202 | privileged: true 203 | envFrom: 204 | - configMapRef: 205 | name: env 206 | - configMapRef: 207 | name: nodemgr-config 208 | - configMapRef: 209 | name: contrail-analyticsdb-config 210 | env: 211 | - name: NODE_TYPE 212 | value: database 213 | - name: DATABASE_NODEMGR__DEFAULTS__minimum_diskGB 214 | value: "2" 215 | volumeMounts: 216 | - mountPath: /var/log/contrail 217 | name: analyticsdb-logs 218 | - mountPath: /mnt 219 | name: docker-unix-socket 220 | - name: contrail-analyticsdb 221 | image: "{{ CONTRAIL_REPO }}/contrail-external-cassandra:{{ CONTRAIL_RELEASE }}" 222 | imagePullPolicy: "" 223 | env: 224 | - name: NODE_TYPE 225 | value: database 226 | envFrom: 227 | - configMapRef: 228 | name: contrail-analyticsdb-config 229 | volumeMounts: 230 | - mountPath: /var/lib/cassandra 231 | name: analyticsdb-data 232 | - mountPath: /var/log/cassandra 233 | name: analyticsdb-logs 234 | - name: contrail-analytics-query-engine 235 | image: "{{ CONTRAIL_REPO }}/contrail-analytics-query-engine:{{ CONTRAIL_RELEASE }}" 236 | imagePullPolicy: "" 237 | securityContext: 238 | privileged: true 239 | env: 240 | - name: NODE_TYPE 241 | value: database 242 | envFrom: 243 | - configMapRef: 244 | name: env 245 | - configMapRef: 246 | name: analyticszookeeperenv 247 | volumeMounts: 248 | - mountPath: /var/log/contrail 249 | name: analyticsdb-logs 250 | volumes: 251 | - name: analyticsdb-data 252 | hostPath: 253 | path: /var/lib/contrail/analyticsdb 254 | - name: analyticsdb-logs 255 | hostPath: 256 | path: /var/log/contrail/analyticsdb 257 | - name: docker-unix-socket 258 | hostPath: 259 | path: /var/run 260 | - name: host-usr-bin 261 | hostPath: 262 | path: /usr/bin 263 | - name: host-var-lib 264 | hostPath: 265 | path: /var/lib 266 | --- 267 | apiVersion: extensions/v1beta1 268 | kind: DaemonSet 269 | metadata: 270 | name: contrail-configdb 271 | namespace: kube-system 272 | labels: 273 | app: contrail-configdb 274 | spec: 275 | template: 276 | metadata: 277 | labels: 278 | app: contrail-configdb 279 | spec: 280 | affinity: 281 | nodeAffinity: 282 | requiredDuringSchedulingIgnoredDuringExecution: 283 | nodeSelectorTerms: 284 | - matchExpressions: 285 | - key: "node-role.kubernetes.io/master" 286 | operator: Exists 287 | tolerations: 288 | - key: node-role.kubernetes.io/master 289 | operator: Exists 290 | effect: NoSchedule 291 | - key: node.kubernetes.io/not-ready 292 | operator: Exists 293 | effect: NoSchedule 294 | hostNetwork: true 295 | containers: 296 | - name: contrail-configdb 297 | image: "{{ CONTRAIL_REPO }}/contrail-external-cassandra:{{ CONTRAIL_RELEASE }}" 298 | imagePullPolicy: "" 299 | env: 300 | - name: NODE_TYPE 301 | value: config 302 | envFrom: 303 | - configMapRef: 304 | name: contrail-configdb-config 305 | volumeMounts: 306 | - mountPath: /var/lib/cassandra 307 | name: configdb-data 308 | - mountPath: /var/log/cassandra 309 | name: configdb-log 310 | volumes: 311 | - name: configdb-data 312 | hostPath: 313 | path: /var/lib/contrail/configdb 314 | - name: configdb-log 315 | hostPath: 316 | path: /var/log/contrail/configdb 317 | --- 318 | apiVersion: extensions/v1beta1 319 | kind: DaemonSet 320 | metadata: 321 | name: contrail-config-database-nodemgr 322 | namespace: kube-system 323 | labels: 324 | app: contrail-config-database-nodemgr 325 | spec: 326 | template: 327 | metadata: 328 | labels: 329 | app: contrail-config-database-nodemgr 330 | spec: 331 | affinity: 332 | nodeAffinity: 333 | requiredDuringSchedulingIgnoredDuringExecution: 334 | nodeSelectorTerms: 335 | - matchExpressions: 336 | - key: "node-role.kubernetes.io/master" 337 | operator: Exists 338 | tolerations: 339 | - key: node-role.kubernetes.io/master 340 | operator: Exists 341 | effect: NoSchedule 342 | - key: node.kubernetes.io/not-ready 343 | operator: Exists 344 | effect: NoSchedule 345 | hostNetwork: true 346 | initContainers: 347 | - name: contrail-node-init 348 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 349 | imagePullPolicy: "" 350 | securityContext: 351 | privileged: true 352 | env: 353 | - name: CONTRAIL_STATUS_IMAGE 354 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 355 | envFrom: 356 | - configMapRef: 357 | name: env 358 | volumeMounts: 359 | - mountPath: /host/usr/bin 360 | name: host-usr-bin 361 | containers: 362 | - name: contrail-config-database-nodemgr 363 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 364 | imagePullPolicy: "" 365 | envFrom: 366 | - configMapRef: 367 | name: env 368 | - configMapRef: 369 | name: nodemgr-config 370 | - configMapRef: 371 | name: contrail-configdb-config 372 | env: 373 | - name: NODE_TYPE 374 | value: database 375 | - name: DATABASE_NODEMGR__DEFAULTS__minimum_diskGB 376 | value: "2" 377 | # todo: there is type Socket in new kubernetes, it is possible to use full 378 | # path: 379 | # hostPath: 380 | # path: /var/run/docker.sock and 381 | # type: Socket 382 | volumeMounts: 383 | - mountPath: /var/log/contrail 384 | name: configdb-logs 385 | - mountPath: /mnt 386 | name: docker-unix-socket 387 | volumes: 388 | - name: configdb-logs 389 | hostPath: 390 | path: /var/log/contrail/configdb 391 | - name: docker-unix-socket 392 | hostPath: 393 | path: /var/run 394 | - name: host-usr-bin 395 | hostPath: 396 | path: /usr/bin 397 | --- 398 | apiVersion: extensions/v1beta1 399 | kind: DaemonSet 400 | metadata: 401 | name: contrail-analytics 402 | namespace: kube-system 403 | labels: 404 | app: contrail-analytics 405 | spec: 406 | template: 407 | metadata: 408 | labels: 409 | app: contrail-analytics 410 | spec: 411 | affinity: 412 | nodeAffinity: 413 | requiredDuringSchedulingIgnoredDuringExecution: 414 | nodeSelectorTerms: 415 | - matchExpressions: 416 | - key: "node-role.kubernetes.io/master" 417 | operator: Exists 418 | tolerations: 419 | - key: node-role.kubernetes.io/master 420 | operator: Exists 421 | effect: NoSchedule 422 | - key: node.kubernetes.io/not-ready 423 | operator: Exists 424 | effect: NoSchedule 425 | hostNetwork: true 426 | initContainers: 427 | - name: contrail-node-init 428 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 429 | imagePullPolicy: "" 430 | securityContext: 431 | privileged: true 432 | env: 433 | - name: CONTRAIL_STATUS_IMAGE 434 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 435 | envFrom: 436 | - configMapRef: 437 | name: env 438 | - configMapRef: 439 | name: analyticszookeeperenv 440 | volumeMounts: 441 | - mountPath: /host/usr/bin 442 | name: host-usr-bin 443 | containers: 444 | - name: contrail-analytics-api 445 | image: "{{ CONTRAIL_REPO }}/contrail-analytics-api:{{ CONTRAIL_RELEASE }}" 446 | imagePullPolicy: "" 447 | envFrom: 448 | - configMapRef: 449 | name: env 450 | - configMapRef: 451 | name: analyticszookeeperenv 452 | volumeMounts: 453 | - mountPath: /var/log/contrail 454 | name: analytics-logs 455 | - name: contrail-analytics-collector 456 | image: "{{ CONTRAIL_REPO }}/contrail-analytics-collector:{{ CONTRAIL_RELEASE }}" 457 | imagePullPolicy: "" 458 | envFrom: 459 | - configMapRef: 460 | name: env 461 | volumeMounts: 462 | - mountPath: /var/log/contrail 463 | name: analytics-logs 464 | - name: contrail-analytics-nodemgr 465 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 466 | imagePullPolicy: "" 467 | envFrom: 468 | - configMapRef: 469 | name: env 470 | - configMapRef: 471 | name: analyticszookeeperenv 472 | - configMapRef: 473 | name: nodemgr-config 474 | env: 475 | - name: NODE_TYPE 476 | value: analytics 477 | # todo: there is type Socket in new kubernetes, it is possible to use full 478 | # path: 479 | # hostPath: 480 | # path: /var/run/docker.sock and 481 | # type: Socket 482 | volumeMounts: 483 | - mountPath: /var/log/contrail 484 | name: analytics-logs 485 | - mountPath: /mnt 486 | name: docker-unix-socket 487 | volumes: 488 | - name: analytics-logs 489 | hostPath: 490 | path: /var/log/contrail/analytics 491 | - name: docker-unix-socket 492 | hostPath: 493 | path: /var/run 494 | - name: host-usr-bin 495 | hostPath: 496 | path: /usr/bin 497 | --- 498 | apiVersion: extensions/v1beta1 499 | kind: DaemonSet 500 | metadata: 501 | name: contrail-analytics-snmp 502 | namespace: kube-system 503 | labels: 504 | app: contrail-analytics-snmp 505 | spec: 506 | template: 507 | metadata: 508 | labels: 509 | app: contrail-analytics-snmp 510 | spec: 511 | affinity: 512 | nodeAffinity: 513 | requiredDuringSchedulingIgnoredDuringExecution: 514 | nodeSelectorTerms: 515 | - matchExpressions: 516 | - key: "node-role.kubernetes.io/master" 517 | operator: Exists 518 | tolerations: 519 | - key: node-role.kubernetes.io/master 520 | operator: Exists 521 | effect: NoSchedule 522 | - key: node.kubernetes.io/not-ready 523 | operator: Exists 524 | effect: NoSchedule 525 | hostNetwork: true 526 | initContainers: 527 | - name: contrail-node-init 528 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 529 | imagePullPolicy: "" 530 | env: 531 | - name: NODE_TYPE 532 | value: "analytics-snmp" 533 | - name: CONTRAIL_STATUS_IMAGE 534 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 535 | envFrom: 536 | - configMapRef: 537 | name: env 538 | - configMapRef: 539 | name: contrail-analyticsdb-config 540 | securityContext: 541 | privileged: true 542 | volumeMounts: 543 | - mountPath: /host/usr/bin 544 | name: host-usr-bin 545 | - mountPath: /host/var/lib 546 | name: host-var-lib 547 | containers: 548 | - name: contrail-analytics-snmp-collector 549 | image: "{{ CONTRAIL_REPO }}/contrail-analytics-snmp-collector:{{ CONTRAIL_RELEASE }}" 550 | imagePullPolicy: "" 551 | securityContext: 552 | privileged: true 553 | envFrom: 554 | - configMapRef: 555 | name: env 556 | volumeMounts: 557 | - mountPath: /var/log/contrail 558 | name: analytics-snmp-logs 559 | env: 560 | - name: NODE_TYPE 561 | value: analytics-snmp 562 | - name: contrail-analytics-snmp-topology 563 | image: "{{ CONTRAIL_REPO }}/contrail-analytics-snmp-topology:{{ CONTRAIL_RELEASE }}" 564 | imagePullPolicy: "" 565 | securityContext: 566 | privileged: true 567 | envFrom: 568 | - configMapRef: 569 | name: env 570 | volumeMounts: 571 | - mountPath: /var/log/contrail 572 | name: analytics-snmp-logs 573 | env: 574 | - name: NODE_TYPE 575 | value: analytics-snmp 576 | - name: contrail-analytics-snmp-nodemgr 577 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 578 | imagePullPolicy: "" 579 | securityContext: 580 | privileged: true 581 | envFrom: 582 | - configMapRef: 583 | name: env 584 | - configMapRef: 585 | name: nodemgr-config 586 | - configMapRef: 587 | name: contrail-analyticsdb-config 588 | env: 589 | - name: NODE_TYPE 590 | value: analytics-snmp 591 | volumeMounts: 592 | - mountPath: /var/log/contrail 593 | name: analytics-snmp-logs 594 | - mountPath: /mnt 595 | name: docker-unix-socket 596 | volumes: 597 | - name: analytics-snmp-logs 598 | hostPath: 599 | path: /var/log/contrail/analytics-snmp 600 | - name: host-var-lib 601 | hostPath: 602 | path: /var/lib 603 | - name: docker-unix-socket 604 | hostPath: 605 | path: /var/run 606 | - name: host-usr-bin 607 | hostPath: 608 | path: /usr/bin 609 | --- 610 | apiVersion: extensions/v1beta1 611 | kind: DaemonSet 612 | metadata: 613 | name: contrail-analytics-alarm 614 | namespace: kube-system 615 | labels: 616 | app: contrail-analytics-alarm 617 | spec: 618 | template: 619 | metadata: 620 | labels: 621 | app: contrail-analytics-alarm 622 | spec: 623 | affinity: 624 | nodeAffinity: 625 | requiredDuringSchedulingIgnoredDuringExecution: 626 | nodeSelectorTerms: 627 | - matchExpressions: 628 | - key: "node-role.kubernetes.io/master" 629 | operator: Exists 630 | tolerations: 631 | - key: node-role.kubernetes.io/master 632 | operator: Exists 633 | effect: NoSchedule 634 | - key: node.kubernetes.io/not-ready 635 | operator: Exists 636 | effect: NoSchedule 637 | hostNetwork: true 638 | initContainers: 639 | - name: contrail-node-init 640 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 641 | imagePullPolicy: "" 642 | env: 643 | - name: NODE_TYPE 644 | value: "analytics-alarm" 645 | - name: CONTRAIL_STATUS_IMAGE 646 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 647 | envFrom: 648 | - configMapRef: 649 | name: env 650 | - configMapRef: 651 | name: contrail-analyticsdb-config 652 | securityContext: 653 | privileged: true 654 | volumeMounts: 655 | - mountPath: /host/usr/bin 656 | name: host-usr-bin 657 | - mountPath: /host/var/lib 658 | name: host-var-lib 659 | containers: 660 | - name: kafka 661 | image: "{{ CONTRAIL_REPO }}/contrail-external-kafka:{{ CONTRAIL_RELEASE }}" 662 | imagePullPolicy: "" 663 | securityContext: 664 | privileged: true 665 | env: 666 | - name: NODE_TYPE 667 | value: analytics-alarm 668 | envFrom: 669 | - configMapRef: 670 | name: env 671 | - configMapRef: 672 | name: analyticszookeeperenv 673 | - name: contrail-analytics-alarm-gen 674 | image: "{{ CONTRAIL_REPO }}/contrail-analytics-alarm-gen:{{ CONTRAIL_RELEASE }}" 675 | imagePullPolicy: "" 676 | securityContext: 677 | privileged: true 678 | envFrom: 679 | - configMapRef: 680 | name: env 681 | - configMapRef: 682 | name: analyticszookeeperenv 683 | volumeMounts: 684 | - mountPath: /var/log/contrail 685 | name: analytics-alarm-logs 686 | env: 687 | - name: NODE_TYPE 688 | value: analytics-alarm 689 | - name: contrail-analytics-alarm-nodemgr 690 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 691 | imagePullPolicy: "" 692 | securityContext: 693 | privileged: true 694 | envFrom: 695 | - configMapRef: 696 | name: env 697 | - configMapRef: 698 | name: contrail-analyticsdb-config 699 | - configMapRef: 700 | name: nodemgr-config 701 | env: 702 | - name: NODE_TYPE 703 | value: analytics-alarm 704 | volumeMounts: 705 | - mountPath: /var/log/contrail 706 | name: analytics-alarm-logs 707 | - mountPath: /mnt 708 | name: docker-unix-socket 709 | volumes: 710 | - name: analytics-alarm-logs 711 | hostPath: 712 | path: /var/log/contrail/analytics-alarm 713 | - name: host-var-lib 714 | hostPath: 715 | path: /var/lib 716 | - name: docker-unix-socket 717 | hostPath: 718 | path: /var/run 719 | - name: host-usr-bin 720 | hostPath: 721 | path: /usr/bin 722 | --- 723 | apiVersion: extensions/v1beta1 724 | kind: DaemonSet 725 | metadata: 726 | name: contrail-controller-control 727 | namespace: kube-system 728 | labels: 729 | app: contrail-controller-control 730 | spec: 731 | template: 732 | metadata: 733 | labels: 734 | app: contrail-controller-control 735 | spec: 736 | affinity: 737 | nodeAffinity: 738 | requiredDuringSchedulingIgnoredDuringExecution: 739 | nodeSelectorTerms: 740 | - matchExpressions: 741 | - key: "node-role.kubernetes.io/master" 742 | operator: Exists 743 | tolerations: 744 | - key: node-role.kubernetes.io/master 745 | operator: Exists 746 | effect: NoSchedule 747 | - key: node.kubernetes.io/not-ready 748 | operator: Exists 749 | effect: NoSchedule 750 | hostNetwork: true 751 | initContainers: 752 | - name: contrail-node-init 753 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 754 | imagePullPolicy: "" 755 | securityContext: 756 | privileged: true 757 | env: 758 | - name: CONTRAIL_STATUS_IMAGE 759 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 760 | envFrom: 761 | - configMapRef: 762 | name: env 763 | - configMapRef: 764 | name: configzookeeperenv 765 | volumeMounts: 766 | - mountPath: /host/usr/bin 767 | name: host-usr-bin 768 | containers: 769 | - name: contrail-controller-control 770 | image: "{{ CONTRAIL_REPO }}/contrail-controller-control-control:{{ CONTRAIL_RELEASE }}" 771 | imagePullPolicy: "" 772 | envFrom: 773 | - configMapRef: 774 | name: env 775 | - configMapRef: 776 | name: configzookeeperenv 777 | volumeMounts: 778 | - mountPath: /var/log/contrail 779 | name: control-logs 780 | - name: contrail-controller-control-dns 781 | image: "{{ CONTRAIL_REPO }}/contrail-controller-control-dns:{{ CONTRAIL_RELEASE }}" 782 | imagePullPolicy: "" 783 | envFrom: 784 | - configMapRef: 785 | name: env 786 | - configMapRef: 787 | name: configzookeeperenv 788 | volumeMounts: 789 | - mountPath: /etc/contrail 790 | name: dns-config 791 | - mountPath: /var/log/contrail 792 | name: control-logs 793 | - name: contrail-controller-control-named 794 | image: "{{ CONTRAIL_REPO }}/contrail-controller-control-named:{{ CONTRAIL_RELEASE }}" 795 | imagePullPolicy: "" 796 | envFrom: 797 | - configMapRef: 798 | name: env 799 | - configMapRef: 800 | name: configzookeeperenv 801 | securityContext: 802 | privileged: true 803 | volumeMounts: 804 | - mountPath: /etc/contrail 805 | name: dns-config 806 | - mountPath: /var/log/contrail 807 | name: control-logs 808 | - name: contrail-controller-nodemgr 809 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 810 | imagePullPolicy: "" 811 | envFrom: 812 | - configMapRef: 813 | name: env 814 | - configMapRef: 815 | name: configzookeeperenv 816 | - configMapRef: 817 | name: nodemgr-config 818 | env: 819 | - name: NODE_TYPE 820 | value: control 821 | # todo: there is type Socket in new kubernetes, it is possible to use full 822 | # path: 823 | # hostPath: 824 | # path: /var/run/docker.sock and 825 | # type: Socket 826 | volumeMounts: 827 | - mountPath: /var/log/contrail 828 | name: control-logs 829 | - mountPath: /mnt 830 | name: docker-unix-socket 831 | volumes: 832 | - name: control-logs 833 | hostPath: 834 | path: /var/log/contrail/control 835 | - name: docker-unix-socket 836 | hostPath: 837 | path: /var/run 838 | - name: dns-config 839 | emptyDir: {} 840 | - name: host-usr-bin 841 | hostPath: 842 | path: /usr/bin 843 | --- 844 | apiVersion: extensions/v1beta1 845 | kind: DaemonSet 846 | metadata: 847 | name: contrail-controller-config 848 | namespace: kube-system 849 | labels: 850 | app: contrail-controller-config 851 | spec: 852 | template: 853 | metadata: 854 | labels: 855 | app: contrail-controller-config 856 | spec: 857 | affinity: 858 | nodeAffinity: 859 | requiredDuringSchedulingIgnoredDuringExecution: 860 | nodeSelectorTerms: 861 | - matchExpressions: 862 | - key: "node-role.kubernetes.io/master" 863 | operator: Exists 864 | tolerations: 865 | - key: node-role.kubernetes.io/master 866 | operator: Exists 867 | effect: NoSchedule 868 | - key: node.kubernetes.io/not-ready 869 | operator: Exists 870 | effect: NoSchedule 871 | hostNetwork: true 872 | initContainers: 873 | - name: contrail-node-init 874 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 875 | imagePullPolicy: "" 876 | securityContext: 877 | privileged: true 878 | env: 879 | - name: CONTRAIL_STATUS_IMAGE 880 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 881 | envFrom: 882 | - configMapRef: 883 | name: env 884 | - configMapRef: 885 | name: configzookeeperenv 886 | volumeMounts: 887 | - mountPath: /host/usr/bin 888 | name: host-usr-bin 889 | containers: 890 | - name: contrail-controller-config-api 891 | image: "{{ CONTRAIL_REPO }}/contrail-controller-config-api:{{ CONTRAIL_RELEASE }}" 892 | imagePullPolicy: "" 893 | envFrom: 894 | - configMapRef: 895 | name: env 896 | - configMapRef: 897 | name: configzookeeperenv 898 | volumeMounts: 899 | - mountPath: /var/log/contrail 900 | name: config-logs 901 | - name: contrail-controller-config-devicemgr 902 | image: "{{ CONTRAIL_REPO }}/contrail-controller-config-devicemgr:{{ CONTRAIL_RELEASE }}" 903 | imagePullPolicy: "" 904 | envFrom: 905 | - configMapRef: 906 | name: env 907 | - configMapRef: 908 | name: configzookeeperenv 909 | volumeMounts: 910 | - mountPath: /var/log/contrail 911 | name: config-logs 912 | - name: contrail-controller-config-schema 913 | image: "{{ CONTRAIL_REPO }}/contrail-controller-config-schema:{{ CONTRAIL_RELEASE }}" 914 | imagePullPolicy: "" 915 | envFrom: 916 | - configMapRef: 917 | name: env 918 | - configMapRef: 919 | name: configzookeeperenv 920 | volumeMounts: 921 | - mountPath: /var/log/contrail 922 | name: config-logs 923 | - name: contrail-controller-config-svcmonitor 924 | image: "{{ CONTRAIL_REPO }}/contrail-controller-config-svcmonitor:{{ CONTRAIL_RELEASE }}" 925 | imagePullPolicy: "" 926 | envFrom: 927 | - configMapRef: 928 | name: env 929 | - configMapRef: 930 | name: configzookeeperenv 931 | volumeMounts: 932 | - mountPath: /var/log/contrail 933 | name: config-logs 934 | - name: contrail-controller-config-nodemgr 935 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 936 | imagePullPolicy: "" 937 | envFrom: 938 | - configMapRef: 939 | name: env 940 | - configMapRef: 941 | name: configzookeeperenv 942 | - configMapRef: 943 | name: nodemgr-config 944 | env: 945 | - name: NODE_TYPE 946 | value: config 947 | - name: CASSANDRA_CQL_PORT 948 | value: "9041" 949 | - name: CASSANDRA_JMX_LOCAL_PORT 950 | value: "7201" 951 | - name: CONFIG_NODEMGR__DEFAULTS__minimum_diskGB 952 | value: "2" 953 | # todo: there is type Socket in new kubernetes, it is possible to use full 954 | # path: 955 | # hostPath: 956 | # path: /var/run/docker.sock and 957 | # type: Socket 958 | volumeMounts: 959 | - mountPath: /var/log/contrail 960 | name: config-logs 961 | - mountPath: /mnt 962 | name: docker-unix-socket 963 | volumes: 964 | - name: config-logs 965 | hostPath: 966 | path: /var/log/contrail/config 967 | - name: docker-unix-socket 968 | hostPath: 969 | path: /var/run 970 | - name: host-usr-bin 971 | hostPath: 972 | path: /usr/bin 973 | --- 974 | apiVersion: extensions/v1beta1 975 | kind: DaemonSet 976 | metadata: 977 | name: contrail-controller-webui 978 | namespace: kube-system 979 | labels: 980 | app: contrail-controller-webui 981 | spec: 982 | template: 983 | metadata: 984 | labels: 985 | app: contrail-controller-webui 986 | spec: 987 | affinity: 988 | nodeAffinity: 989 | requiredDuringSchedulingIgnoredDuringExecution: 990 | nodeSelectorTerms: 991 | - matchExpressions: 992 | - key: "node-role.kubernetes.io/master" 993 | operator: Exists 994 | tolerations: 995 | - key: node-role.kubernetes.io/master 996 | operator: Exists 997 | effect: NoSchedule 998 | - key: node.kubernetes.io/not-ready 999 | operator: Exists 1000 | effect: NoSchedule 1001 | hostNetwork: true 1002 | initContainers: 1003 | - name: contrail-node-init 1004 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 1005 | imagePullPolicy: "" 1006 | securityContext: 1007 | privileged: true 1008 | env: 1009 | - name: CONTRAIL_STATUS_IMAGE 1010 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 1011 | envFrom: 1012 | - configMapRef: 1013 | name: env 1014 | - configMapRef: 1015 | name: configzookeeperenv 1016 | volumeMounts: 1017 | - mountPath: /host/usr/bin 1018 | name: host-usr-bin 1019 | containers: 1020 | - name: contrail-controller-webui-job 1021 | image: "{{ CONTRAIL_REPO }}/contrail-controller-webui-job:{{ CONTRAIL_RELEASE }}" 1022 | imagePullPolicy: "" 1023 | envFrom: 1024 | - configMapRef: 1025 | name: env 1026 | - configMapRef: 1027 | name: configzookeeperenv 1028 | volumeMounts: 1029 | - mountPath: /var/log/contrail 1030 | name: webui-logs 1031 | - name: contrail-controller-webui-web 1032 | image: "{{ CONTRAIL_REPO }}/contrail-controller-webui-web:{{ CONTRAIL_RELEASE }}" 1033 | imagePullPolicy: "" 1034 | envFrom: 1035 | - configMapRef: 1036 | name: env 1037 | - configMapRef: 1038 | name: configzookeeperenv 1039 | volumeMounts: 1040 | - mountPath: /var/log/contrail 1041 | name: webui-logs 1042 | volumes: 1043 | - name: webui-logs 1044 | hostPath: 1045 | path: /var/log/contrail/webui 1046 | - name: host-usr-bin 1047 | hostPath: 1048 | path: /usr/bin 1049 | --- 1050 | apiVersion: extensions/v1beta1 1051 | kind: DaemonSet 1052 | metadata: 1053 | name: redis 1054 | namespace: kube-system 1055 | labels: 1056 | app: redis 1057 | spec: 1058 | template: 1059 | metadata: 1060 | labels: 1061 | app: redis 1062 | spec: 1063 | affinity: 1064 | nodeAffinity: 1065 | requiredDuringSchedulingIgnoredDuringExecution: 1066 | nodeSelectorTerms: 1067 | - matchExpressions: 1068 | - key: "node-role.kubernetes.io/master" 1069 | operator: Exists 1070 | tolerations: 1071 | - key: node-role.kubernetes.io/master 1072 | operator: Exists 1073 | effect: NoSchedule 1074 | - key: node.kubernetes.io/not-ready 1075 | operator: Exists 1076 | effect: NoSchedule 1077 | hostNetwork: true 1078 | containers: 1079 | - name: redis 1080 | image: "redis:4.0.2" 1081 | imagePullPolicy: "" 1082 | volumeMounts: 1083 | - mountPath: /var/lib/redis 1084 | name: redis-data 1085 | - mountPath: /var/log/redis 1086 | name: redis-logs 1087 | volumes: 1088 | - name: redis-data 1089 | hostPath: 1090 | path: /var/lib/contrail/redis 1091 | - name: redis-logs 1092 | hostPath: 1093 | path: /var/log/contrail/redis 1094 | --- 1095 | apiVersion: extensions/v1beta1 1096 | kind: DaemonSet 1097 | metadata: 1098 | name: rabbitmq 1099 | namespace: kube-system 1100 | labels: 1101 | app: rabbitmq 1102 | spec: 1103 | template: 1104 | metadata: 1105 | labels: 1106 | app: rabbitmq 1107 | spec: 1108 | affinity: 1109 | nodeAffinity: 1110 | requiredDuringSchedulingIgnoredDuringExecution: 1111 | nodeSelectorTerms: 1112 | - matchExpressions: 1113 | - key: "node-role.kubernetes.io/master" 1114 | operator: Exists 1115 | tolerations: 1116 | - key: node-role.kubernetes.io/master 1117 | operator: Exists 1118 | effect: NoSchedule 1119 | - key: node.kubernetes.io/not-ready 1120 | operator: Exists 1121 | effect: NoSchedule 1122 | hostNetwork: true 1123 | containers: 1124 | - name: rabbitmq 1125 | image: "{{ CONTRAIL_REPO }}/contrail-external-rabbitmq:{{ CONTRAIL_RELEASE }}" 1126 | imagePullPolicy: "" 1127 | env: 1128 | - name: NODE_TYPE 1129 | value: config 1130 | envFrom: 1131 | - configMapRef: 1132 | name: env 1133 | - configMapRef: 1134 | name: configzookeeperenv 1135 | - configMapRef: 1136 | name: rabbitmq-config 1137 | volumeMounts: 1138 | - mountPath: /var/lib/rabbitmq 1139 | name: rabbitmq-data 1140 | - mountPath: /var/log/rabbitmq 1141 | name: rabbitmq-logs 1142 | volumes: 1143 | - name: rabbitmq-data 1144 | hostPath: 1145 | path: /var/lib/contrail/rabbitmq 1146 | - name: rabbitmq-logs 1147 | hostPath: 1148 | path: /var/log/contrail/rabbitmq 1149 | --- 1150 | apiVersion: extensions/v1beta1 1151 | kind: DaemonSet 1152 | metadata: 1153 | name: contrail-kube-manager 1154 | namespace: kube-system 1155 | labels: 1156 | app: contrail-kube-manager 1157 | spec: 1158 | template: 1159 | metadata: 1160 | labels: 1161 | app: contrail-kube-manager 1162 | spec: 1163 | affinity: 1164 | nodeAffinity: 1165 | requiredDuringSchedulingIgnoredDuringExecution: 1166 | nodeSelectorTerms: 1167 | - matchExpressions: 1168 | - key: "node-role.kubernetes.io/master" 1169 | operator: Exists 1170 | tolerations: 1171 | - key: node-role.kubernetes.io/master 1172 | operator: Exists 1173 | effect: NoSchedule 1174 | - key: node.kubernetes.io/not-ready 1175 | operator: Exists 1176 | effect: NoSchedule 1177 | automountServiceAccountToken: false 1178 | hostNetwork: true 1179 | initContainers: 1180 | - name: contrail-node-init 1181 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 1182 | imagePullPolicy: "" 1183 | securityContext: 1184 | privileged: true 1185 | env: 1186 | - name: CONTRAIL_STATUS_IMAGE 1187 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 1188 | envFrom: 1189 | - configMapRef: 1190 | name: env 1191 | - configMapRef: 1192 | name: configzookeeperenv 1193 | volumeMounts: 1194 | - mountPath: /host/usr/bin 1195 | name: host-usr-bin 1196 | containers: 1197 | - name: contrail-kube-manager 1198 | image: "{{ CONTRAIL_REPO }}/contrail-kubernetes-kube-manager:{{ CONTRAIL_RELEASE }}" 1199 | imagePullPolicy: "" 1200 | envFrom: 1201 | - configMapRef: 1202 | name: env 1203 | - configMapRef: 1204 | name: configzookeeperenv 1205 | - configMapRef: 1206 | name: kube-manager-config 1207 | volumeMounts: 1208 | - mountPath: /var/log/contrail 1209 | name: kube-manager-logs 1210 | - mountPath: /tmp/serviceaccount 1211 | name: pod-secret 1212 | volumes: 1213 | - name: kube-manager-logs 1214 | hostPath: 1215 | path: /var/log/contrail/kube-manager 1216 | - name: pod-secret 1217 | secret: 1218 | secretName: contrail-kube-manager-token 1219 | - name: host-usr-bin 1220 | hostPath: 1221 | path: /usr/bin 1222 | --- 1223 | apiVersion: extensions/v1beta1 1224 | kind: DaemonSet 1225 | metadata: 1226 | name: contrail-agent 1227 | namespace: kube-system 1228 | labels: 1229 | app: contrail-agent 1230 | spec: 1231 | template: 1232 | metadata: 1233 | labels: 1234 | app: contrail-agent 1235 | spec: 1236 | tolerations: 1237 | - key: node-role.kubernetes.io/master 1238 | operator: Exists 1239 | effect: NoSchedule 1240 | - key: node.kubernetes.io/not-ready 1241 | operator: Exists 1242 | effect: NoSchedule 1243 | automountServiceAccountToken: false 1244 | hostNetwork: true 1245 | initContainers: 1246 | - name: contrail-node-init 1247 | image: "{{ CONTRAIL_REPO }}/contrail-node-init:{{ CONTRAIL_RELEASE }}" 1248 | imagePullPolicy: "" 1249 | securityContext: 1250 | privileged: true 1251 | env: 1252 | - name: CONTRAIL_STATUS_IMAGE 1253 | value: "{{ CONTRAIL_REPO }}/contrail-status:{{ CONTRAIL_RELEASE }}" 1254 | envFrom: 1255 | - configMapRef: 1256 | name: env 1257 | - configMapRef: 1258 | name: configzookeeperenv 1259 | volumeMounts: 1260 | - mountPath: /host/usr/bin 1261 | name: host-usr-bin 1262 | - name: contrail-vrouter-kernel-init 1263 | image: "{{ CONTRAIL_REPO }}/contrail-vrouter-kernel-init:{{ CONTRAIL_RELEASE }}" 1264 | imagePullPolicy: "" 1265 | securityContext: 1266 | privileged: true 1267 | envFrom: 1268 | - configMapRef: 1269 | name: env 1270 | - configMapRef: 1271 | name: configzookeeperenv 1272 | volumeMounts: 1273 | - mountPath: /usr/src 1274 | name: usr-src 1275 | - mountPath: /lib/modules 1276 | name: lib-modules 1277 | - mountPath: /etc/sysconfig/network-scripts 1278 | name: network-scripts 1279 | - mountPath: /host/bin 1280 | name: host-bin 1281 | - name: contrail-kubernetes-cni-init 1282 | image: "{{ CONTRAIL_REPO }}/contrail-kubernetes-cni-init:{{ CONTRAIL_RELEASE }}" 1283 | imagePullPolicy: "" 1284 | envFrom: 1285 | - configMapRef: 1286 | name: env 1287 | - configMapRef: 1288 | name: configzookeeperenv 1289 | volumeMounts: 1290 | - mountPath: /var/lib/contrail 1291 | name: var-lib-contrail 1292 | - mountPath: /host/etc_cni 1293 | name: etc-cni 1294 | - mountPath: /host/opt_cni_bin 1295 | name: opt-cni-bin 1296 | - mountPath: /host/log_cni 1297 | name: var-log-contrail-cni 1298 | - mountPath: /var/log/contrail 1299 | name: agent-logs 1300 | containers: 1301 | - name: contrail-vrouter-agent 1302 | image: "{{ CONTRAIL_REPO }}/contrail-vrouter-agent:{{ CONTRAIL_RELEASE }}" 1303 | imagePullPolicy: "" 1304 | # TODO: Priveleged mode is requied because w/o it the device /dev/net/tun 1305 | # is not present in the container. The mounting it into container 1306 | # doesnt help because of permissions are not enough syscalls, 1307 | # e.g. https://github.com/Juniper/contrail-controller/blob/master/src/vnsw/agent/contrail/linux/pkt0_interface.cc: 48. 1308 | securityContext: 1309 | privileged: true 1310 | envFrom: 1311 | - configMapRef: 1312 | name: env 1313 | - configMapRef: 1314 | name: configzookeeperenv 1315 | volumeMounts: 1316 | - mountPath: /dev 1317 | name: dev 1318 | - mountPath: /etc/sysconfig/network-scripts 1319 | name: network-scripts 1320 | - mountPath: /host/bin 1321 | name: host-bin 1322 | - mountPath: /var/log/contrail 1323 | name: agent-logs 1324 | - mountPath: /usr/src 1325 | name: usr-src 1326 | - mountPath: /lib/modules 1327 | name: lib-modules 1328 | - mountPath: /var/lib/contrail 1329 | name: var-lib-contrail 1330 | - mountPath: /var/crashes 1331 | name: var-crashes 1332 | - mountPath: /tmp/serviceaccount 1333 | name: pod-secret 1334 | - name: contrail-agent-nodemgr 1335 | image: "{{ CONTRAIL_REPO }}/contrail-nodemgr:{{ CONTRAIL_RELEASE }}" 1336 | imagePullPolicy: "" 1337 | envFrom: 1338 | - configMapRef: 1339 | name: env 1340 | - configMapRef: 1341 | name: configzookeeperenv 1342 | - configMapRef: 1343 | name: nodemgr-config 1344 | env: 1345 | - name: NODE_TYPE 1346 | value: vrouter 1347 | # todo: there is type Socket in new kubernetes, it is possible to use full 1348 | # path: 1349 | # hostPath: 1350 | # path: /var/run/docker.sock and 1351 | # type: Socket 1352 | volumeMounts: 1353 | - mountPath: /var/log/contrail 1354 | name: agent-logs 1355 | - mountPath: /mnt 1356 | name: docker-unix-socket 1357 | volumes: 1358 | - name: dev 1359 | hostPath: 1360 | path: /dev 1361 | - name: network-scripts 1362 | hostPath: 1363 | path: /etc/sysconfig/network-scripts 1364 | - name: host-bin 1365 | hostPath: 1366 | path: /bin 1367 | - name: docker-unix-socket 1368 | hostPath: 1369 | path: /var/run 1370 | - name: pod-secret 1371 | secret: 1372 | secretName: contrail-kube-manager-token 1373 | - name: usr-src 1374 | hostPath: 1375 | path: /usr/src 1376 | - name: lib-modules 1377 | hostPath: 1378 | path: /lib/modules 1379 | - name: var-lib-contrail 1380 | hostPath: 1381 | path: /var/lib/contrail 1382 | - name: var-crashes 1383 | hostPath: 1384 | path: /var/contrail/crashes 1385 | - name: etc-cni 1386 | hostPath: 1387 | path: /etc/cni 1388 | - name: opt-cni-bin 1389 | hostPath: 1390 | path: /opt/cni/bin 1391 | - name: var-log-contrail-cni 1392 | hostPath: 1393 | path: /var/log/contrail/cni 1394 | - name: agent-logs 1395 | hostPath: 1396 | path: /var/log/contrail/agent 1397 | - name: host-usr-bin 1398 | hostPath: 1399 | path: /usr/bin 1400 | 1401 | # Meta information section 1402 | --- 1403 | kind: ClusterRole 1404 | apiVersion: rbac.authorization.k8s.io/v1beta1 1405 | metadata: 1406 | name: contrail-kube-manager 1407 | namespace: kube-system 1408 | rules: 1409 | - apiGroups: ["*"] 1410 | resources: ["*"] 1411 | verbs: ["*"] 1412 | --- 1413 | apiVersion: v1 1414 | kind: ServiceAccount 1415 | metadata: 1416 | name: contrail-kube-manager 1417 | namespace: kube-system 1418 | --- 1419 | apiVersion: rbac.authorization.k8s.io/v1beta1 1420 | kind: ClusterRoleBinding 1421 | metadata: 1422 | name: contrail-kube-manager 1423 | roleRef: 1424 | apiGroup: rbac.authorization.k8s.io 1425 | kind: ClusterRole 1426 | name: contrail-kube-manager 1427 | subjects: 1428 | - kind: ServiceAccount 1429 | name: contrail-kube-manager 1430 | namespace: kube-system 1431 | --- 1432 | apiVersion: v1 1433 | kind: Secret 1434 | metadata: 1435 | name: contrail-kube-manager-token 1436 | namespace: kube-system 1437 | annotations: 1438 | kubernetes.io/service-account.name: contrail-kube-manager 1439 | type: kubernetes.io/service-account-token 1440 | -------------------------------------------------------------------------------- /install/openshift/3.11/nested-mode-openshift.md: -------------------------------------------------------------------------------- 1 | Nested mode is when Contrail provides networking for a Openshift cluster that is provisioned on an Contail-Openstack cluster. Contrail components are shared between the two clusters. 2 | 3 | # __Prerequisites__ 4 | 5 | Please ensure that the following prerequisites are met, for a successful provisioning of Nested Contrail-Openshift cluster. 6 | 7 | - Installed and running Contrail Openstack cluster. 8 | This cluster should be based on Contrail 5.x release 9 | 10 | 11 | # __Provision__ 12 | 13 | Provisioning a Nested Openshift Cluster is a two step process: 14 | 15 | ***1. Create link-local services in the Contrail-Openstack cluster.*** 16 | 17 | ***2. Install openshift using openshift-ansible.*** 18 | 19 | 20 | ## Create link-local services 21 | 22 | A nested Openshift cluster is managed by the same contrail control processes that manage the underlying openstack cluster. Towards this goal, the nested Openshift cluster needs ip reachability to the contrail control processes. Since the Openshift cluster is actually an overlay on the openstack cluster, we use Link Local Service feature or a combination of Link Local + Fabric SNAT feature of Contrail to provide IP reachability to/from the overly Openshift cluster and openstack cluster. 23 | 24 | ### Option 1: Fabric SNAT + Link Local (Preferred) 25 | 26 | Step 1: Enable Fabric SNAT on the Virtual Network of the VM's 27 | 28 | Fabric SNAT feature should be enabled on the Virtual Network of the Virtual Machine's on which Openshift Master and Nodes are running. 29 | 30 | Step 2: Create one Link Local Service for CNI to communicate with its Vrouter 31 | 32 | To configure a Link Local Service, we need a Service IP and Fabric IP. Fabric IP is the node IP on which the vrouter agent of the minion is running. Service IP(along with port number) is used by data plane to identify the fabric ip/node. Service IP is required to be a unique and unused IP in the entire openstack cluster. 33 | 34 | ***NOTE: The user is responsible to configure these Link Local Services via Contrail GUI.*** 35 | 36 | The following are the Link Local Service is required: 37 | 38 | | Contrail Process | Service IP | Service Port | Fabric IP | Fabric Port | 39 | | --- | --- | --- | --- | --- | 40 | | VRouter | < Service IP for the running node > | 9091 | 127.0.0.1 | 9091 | 41 | 42 | NOTE: Fabric IP is 127.0.0.1, as our intent is to make CNI talk to Vrouter on its underlay node. 43 | 44 | ####Example: 45 | 46 | The following link-local services should be created: 47 | 48 | | LL Service Name | Service IP | Service Port | Fabric IP | Fabric Port | 49 | | --- | --- | --- | --- | --- | 50 | | K8s-cni-to-agent | 10.10.10.5 | 9091 | 127.0.0.1 | 9091 | 51 | 52 | NOTE: Here 10.10.10.5 is the Service IP that was chosen by user. This can be any unused 53 | IP in the cluster. This IP is primarily used to identify link local traffic and has no 54 | other signifance. 55 | 56 | 57 | ### Option 2: Link Local Only 58 | 59 | To configure a Link Local Service, we need a Service IP and Fabric IP. Fabric IP is the node IP on which the contrail processes are running on. Service IP(along with port number) is used by data plane to identify the fabric ip/node. Service IP is required to be a unique and unused IP in the entire openstack cluster. **For each node of the openstack cluster, one service IP should be identified.** 60 | 61 | ***NOTE: The user is responsible to configure these Link Local Services via Contrail GUI.*** 62 | 63 | The following are the Link Local Services are required: 64 | 65 | | Contrail Process | Service IP | Service Port | Fabric IP | Fabric Port | 66 | | --- | --- | --- | --- | --- | 67 | | Contrail Config | < Service IP for the running node > | 8082 | < Node IP of running node > | 8082 | 68 | | Contrail Analytics | < Service IP for the running node > | 8086 | < Node IP of running node > | 8086 | 69 | | Contrail Msg Queue | < Service IP for the running node > | 5673 | < Node IP of running node > | 5673 | 70 | | Contrail VNC DB | < Service IP for the running node > | 9161 | < Node IP of running node > | 9161 | 71 | | Keystone | < Service IP for the running node > | 35357 | < Node IP of running node > | 35357 | 72 | | K8s-cni-to-agent | < Service IP for the running node > | 9091 | 127.0.0.1 | 9091 | 73 | 74 | ####Example: 75 | 76 | Lets assume the following hypothetical Openstack Cluster where: 77 | ``` 78 | Contrail Config : 192.168.1.100 79 | Contrail Analytics : 192.168.1.100, 192.168.1.101 80 | Contrail Msg Queue : 192.168.1.100 81 | Contrail VNC DB : 192.168.1.100, 192.168.1.101, 192.168.1.102 82 | Keystone: 192.168.1.200 83 | Vrouter: 192.168.1.201, 192.168.1.202, 192.168.1.203 84 | ``` 85 | This cluster is made of 7 nodes. We will allocate 7 unused IP's for these nodes: 86 | ``` 87 | 192.168.1.100 --> 10.10.10.1 88 | 192.168.1.101 --> 10.10.10.2 89 | 192.168.1.102 --> 10.10.10.3 90 | 192.168.1.200 --> 10.10.10.4 91 | 192.168.1.201/192.168.1.202/192.168.1.203 --> 10.10.10.5 92 | NOTE: One Service IP will be enough to represent all VRouter nodes. 93 | ``` 94 | The following link-local services should be created: 95 | 96 | | LL Service Name | Service IP | Service Port | Fabric IP | Fabric Port | 97 | | --- | --- | --- | --- | --- | 98 | | Contrail Config | 10.10.10.1 | 8082 | 192.168.1.100 | 8082 | 99 | | Contrail Analytics 1 | 10.10.10.1 | 8086 | 192.168.1.100 | 8086 | 100 | | Contrail Analytics 2 | 10.10.10.2 | 8086 | 192.168.1.101 | 8086 | 101 | | Contrail Msg Queue | 10.10.10.1 | 5673 | 192.168.1.100 | 5673 | 102 | | Contrail VNC DB 1 | 10.10.10.1 | 9161 | 192.168.1.100 | 9161 | 103 | | Contrail VNC DB 2 | 10.10.10.2 | 9161 | 192.168.1.101 | 9161 | 104 | | Contrail VNC DB 3 | 10.10.10.3 | 9161 | 192.168.1.102 | 9161 | 105 | | Keystone | 10.10.10.4 | 35357 | 192.168.1.200| 35357 | 106 | | K8s-cni-to-agent | 10.10.10.5 | 9091 | 127.0.0.1 | 9091 | 107 | 108 | 109 | ## Install openshift using openshift-ansible 110 | 111 | Now you can follow [stand-alone wiki](https://github.com/Juniper/contrail-kubernetes-docs/blob/master/install/openshift/3.9/standalone-openshift.md) 112 | and add following details to your ose-install file 113 | ``` 114 | #Nested mode vars 115 | nested_mode_contrail=true 116 | rabbitmq_node_port=5673 117 | contrail_nested_masters_ip="1.1.1.1 2.2.2.2 3.3.3.3" <--- ips of contrail controllers 118 | auth_mode=keystone 119 | keystone_auth_host= <--- This should be the IP where Keystone service is running. 120 | keystone_auth_admin_tenant=admin 121 | keystone_auth_admin_user=admin 122 | keystone_auth_admin_password=MAYffWrX7ZpPrV2AMAa9zAUvG <-- Keystone admin password. 123 | keystone_auth_admin_port=35357 124 | keystone_auth_url_version=/v3 125 | #k8s_nested_vrouter_vip is a service IP for the running node which we configured above 126 | k8s_nested_vrouter_vip=10.10.10.5 <-- Service IP configured for CNI to Agent communication.(K8s-cni-to-agent in above examples) 127 | #k8s_vip is kubernetes api server ip 128 | k8s_vip= <-- IP of the Openshift Master Node. 129 | #cluster_network is the one which vm network belongs to 130 | cluster_network="{'domain': 'default-domain', 'project': 'admin', 'name': 'net1'}" <-- FQName of the Virtual Network where Virtual Machines are running. The VMs in which Openshift cluster is being installed in nested mode. 131 | #config_nodes="x.x.x.x,y.y.y.y.y" 132 | #analytics_nodes="x.x.x.x,y.y.y.y.y" 133 | #config_api_vip=x.x.x.x 134 | #analytics_api_vip=x.x.x.x 135 | ``` 136 | 137 | ## Note : 138 | 139 | 1. Your control data interface should have internet connectivity for some of openshift infra components to work. 140 | 2. Make sure your configurations above are correct, please verify it with the contrail which you are using. 141 | -------------------------------------------------------------------------------- /install/openshift/3.11/redhat/configurations.md: -------------------------------------------------------------------------------- 1 | ``` 2 | subscription-manager register --username <> --password <> --force 3 | subscription-manager attach --pool=<> 4 | 5 | subscription-manager repos --disable="*" 6 | 7 | subscription-manager repos \ 8 | --enable="rhel-7-server-rpms" \ 9 | --enable="rhel-7-server-extras-rpms" \ 10 | --enable="rhel-7-server-ose-3.11-rpms" \ 11 | --enable=rhel-7-fast-datapath-rpms \ 12 | --enable="rhel-7-server-ansible-2.6-rpms" 13 | 14 | yum install -y tcpdump wget git net-tools bind-utils yum-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct python-netaddr openshift-ansible 15 | ``` 16 | -------------------------------------------------------------------------------- /install/openshift/3.11/standalone-openshift.md: -------------------------------------------------------------------------------- 1 | 2 | # Provisioning of Openshift cluster using openshift-ansible deployer 3.11 3 | 4 | The following steps will install a standalone openshift cluster with Contrail as networking provider. 5 | 6 | Provisioning of Openshift and Contrail is done through Ansible-playbooks. 7 | Required topology is as shown below. 8 | 9 | ![Contrail Standalone Solution](/images/ocp-3.11.png) 10 | * Note : vrouter is now installed on all nodes 11 | 12 | ### System Requirements : 13 | As per openshift recommendation follow the link 14 | https://docs.openshift.com/container-platform/3.11/install/prerequisites.html 15 | 16 | But recommended, 17 | * Master :- 16G RAM/8 vcpus/100G space ( this is for bare minimum openshift components, if you run more pods scale accordingly, same goes for infra node running openshift and contrail components ) 18 | * Infra :- 64G RAM/16 vcpus/100G space 19 | * compute :- as per openshift recommendation 20 | * NFS :- if nfs mount volumes are used please check disk capacity and mounts, also openshift-logging with nfs is not recommended by openshift 21 | 22 | ### Steps : 23 | 24 | * Setup environment(all nodes): 25 | 26 | * For centOS (origin installations) ( not supported ) : 27 | 28 | * For Redhat (openshift-enterprise installations) : [click_here](/install/openshift/3.11/redhat/configurations.md) 29 | 30 | * Get the files from the released and verified tar or git clone below : 31 | 32 | ```shell 33 | git clone https://github.com/Juniper/openshift-ansible.git -b release-3.11-contrail 34 | ``` 35 | * Note : Its recommend to get the code from the tar as the latest code may have behavior changes. 36 | 37 | 38 | * For this setup am assuming one master one infra and one compute 39 | ```shell 40 | master : server1 (10.84.11.11) 41 | 42 | infra : server2 (10.84.11.22) 43 | 44 | compute : server3 (10.84.11.33) 45 | ``` 46 | 47 | * Edit /etc/hosts to have all machines entry for eg(all nodes): 48 | 49 | ```shell 50 | [root@server1]# cat /etc/hosts 51 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 52 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 53 | 10.84.5.100 puppet 54 | 10.84.11.11 server1.contrail.juniper.net server1 55 | 10.84.11.22 server2.contrail.juniper.net server2 56 | 10.84.11.33 server3.contrail.juniper.net server3 57 | ``` 58 | 59 | * Setup passless ssh to ansible node itself and all nodes: 60 | 61 | ```shell 62 | ssh-keygen -t rsa 63 | ssh-copy-id root@10.84.11.11 64 | ssh-copy-id root@10.84.11.22 65 | ssh-copy-id root@10.84.11.33 66 | ``` 67 | 68 | ### Run ansible playbook: 69 | Before running make sure that you have edited inventory/ose-install file as shown below. 70 | 71 | ```shell 72 | ansible-playbook -i inventory/ose-install playbooks/prerequisites.yml 73 | ansible-playbook -i inventory/ose-install playbooks/deploy_cluster.yml 74 | ``` 75 | 76 | 77 | ### Sample ose-install file for non HA: 78 | 79 | ```yaml 80 | 81 | [OSEv3:vars] 82 | 83 | ########################################################################### 84 | ### Ansible Vars 85 | ########################################################################### 86 | #timeout=60 87 | 88 | ########################################################################### 89 | ### OpenShift Basic Vars 90 | ########################################################################### 91 | openshift_deployment_type=openshift-enterprise 92 | deployment_type=openshift-enterprise 93 | containerized=false 94 | openshift_disable_check=memory_availability,package_availability,disk_availability,package_version,docker_storage,docker_image_availability 95 | 96 | # Default node selectors 97 | openshift_hosted_infra_selector="node-role.kubernetes.io/infra=true" 98 | 99 | # Redhat customer credentials 100 | oreg_auth_user=<> 101 | oreg_auth_password=<> 102 | ########################################################################### 103 | ### OpenShift Master Vars 104 | ########################################################################### 105 | 106 | openshift_master_api_port=8443 107 | openshift_master_console_port=8443 108 | 109 | openshift_master_cluster_method=native 110 | #openshift_master_cluster_hostname=ip-172-31-25-175.ap-southeast-1.compute.internal 111 | #openshift_master_cluster_public_hostname=ec2-13-251-240-166.ap-southeast-1.compute.amazonaws.com 112 | #openshift_master_default_subdomain=apps.ap-southeast-1.compute.amazonaws.com 113 | 114 | # Set this line to enable NFS 115 | openshift_enable_unsupported_configurations=True 116 | 117 | 118 | ######################################################################### 119 | ### Contrail Variables 120 | ######################################################################## 121 | 122 | contrail_version=5.0 123 | 124 | contrail_container_tag=5.0.2-0.398-queens 125 | contrail_registry="hub.juniper.net/contrail-nightly" 126 | contrail_registry_username=<> 127 | contrail_registry_password=<> 128 | # need to notify openshift to configure insecure registry as below 129 | #openshift_docker_insecure_registries="opencontrailnightly" 130 | 131 | #contrail_os_release=redhat7 132 | #analyticsdb_min_diskgb=50 133 | #configdb_min_diskgb=25 134 | #aaa_mode=no-auth 135 | #auth_mode=noauth 136 | #CLOUD_ORCHESTRATOR=openshift 137 | #LOG_LEVEL=SYS_NOTICE 138 | #METADATA_PROXY_SECRET=contrail 139 | #cloud_orchestrator=kubernetes 140 | #metadata_proxy_secret=contrail 141 | #log_level=SYS_NOTICE 142 | #rabbitmq_node_port=5672 143 | #zookeeper_analytics_port=2182 144 | #zookeeper_port=2181 145 | #zookeeper_ports=2888:3888 146 | #zookeeper_analytics_ports=4888:5888 147 | #vrouter_gateway=172.31.16.1 148 | #vrouter_physical_interface=eth0 149 | #kubernetes_api_secure_port=443 150 | #nested_mode_contrail=false 151 | 152 | # vip should be master 153 | #api_vip="172.31.25.175" 154 | 155 | service_subnets="172.30.0.0/16" 156 | pod_subnets="10.128.0.0/14" 157 | 158 | ########################################################################### 159 | ### OpenShift Network Vars 160 | ########################################################################### 161 | 162 | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' 163 | openshift_use_openshift_sdn=false 164 | #r_openshift_node_use_openshift_sdn=True 165 | os_sdn_network_plugin_name='cni' 166 | openshift_use_contrail=true 167 | #openshift_use_calico=true 168 | 169 | 170 | ########################################################################### 171 | ### OpenShift Authentication Vars 172 | ########################################################################### 173 | 174 | # htpasswd Authentication 175 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 176 | 177 | 178 | ########################################################################### 179 | ### OpenShift Router and Registry Vars 180 | ########################################################################### 181 | 182 | openshift_hosted_router_replicas=1 183 | 184 | openshift_hosted_registry_replicas=1 185 | 186 | openshift_hosted_registry_storage_kind=nfs 187 | openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] 188 | openshift_hosted_registry_storage_nfs_directory=/export 189 | openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' 190 | openshift_hosted_registry_storage_volume_name=registry 191 | openshift_hosted_registry_storage_volume_size=10Gi 192 | openshift_hosted_registry_pullthrough=true 193 | openshift_hosted_registry_acceptschema2=true 194 | openshift_hosted_registry_enforcequota=true 195 | openshift_hosted_router_selector="node-role.kubernetes.io/infra=true" 196 | openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true" 197 | ########################################################################### 198 | ### OpenShift Service Catalog Vars 199 | ########################################################################### 200 | 201 | openshift_enable_service_catalog=true 202 | 203 | template_service_broker_install=true 204 | openshift_template_service_broker_namespaces=['openshift'] 205 | 206 | ansible_service_broker_install=true 207 | ansible_service_broker_local_registry_whitelist=['.*-apb$'] 208 | 209 | openshift_hosted_etcd_storage_kind=nfs 210 | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" 211 | openshift_hosted_etcd_storage_nfs_directory=/export 212 | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} 213 | openshift_hosted_etcd_storage_volume_name=etcd-asb 214 | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] 215 | openshift_hosted_etcd_storage_volume_size=10G 216 | 217 | 218 | ########################################################################### 219 | ### OpenShift Metrics and Logging Vars 220 | ########################################################################### 221 | # Enable cluster metrics 222 | openshift_metrics_install_metrics=True 223 | 224 | openshift_metrics_storage_kind=nfs 225 | openshift_metrics_storage_access_modes=['ReadWriteOnce'] 226 | openshift_metrics_storage_nfs_directory=/export 227 | openshift_metrics_storage_nfs_options='*(rw,root_squash)' 228 | openshift_metrics_storage_volume_name=metrics 229 | openshift_metrics_storage_volume_size=10Gi 230 | openshift_metrics_storage_labels={'storage': 'metrics'} 231 | 232 | openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} 233 | openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/master":"true"} 234 | openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/master":"true"} 235 | 236 | # Enable cluster logging 237 | openshift_logging_install_logging=True 238 | 239 | openshift_logging_storage_kind=nfs 240 | openshift_logging_storage_access_modes=['ReadWriteOnce'] 241 | openshift_logging_storage_nfs_directory=/export 242 | openshift_logging_storage_nfs_options='*(rw,root_squash)' 243 | openshift_logging_storage_volume_name=logging 244 | openshift_logging_storage_volume_size=10Gi 245 | openshift_logging_storage_labels={'storage': 'logging'} 246 | 247 | openshift_logging_es_cluster_size=1 248 | 249 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} 250 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra":"true"} 251 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra":"true"} 252 | 253 | 254 | ########################################################################### 255 | ### OpenShift Prometheus Vars 256 | ########################################################################### 257 | 258 | ## Add Prometheus Metrics: 259 | openshift_hosted_prometheus_deploy=true 260 | openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"} 261 | openshift_prometheus_namespace=openshift-metrics 262 | 263 | # Prometheus 264 | openshift_prometheus_storage_kind=nfs 265 | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] 266 | openshift_prometheus_storage_nfs_directory=/export 267 | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' 268 | openshift_prometheus_storage_volume_name=prometheus 269 | openshift_prometheus_storage_volume_size=10Gi 270 | openshift_prometheus_storage_labels={'storage': 'prometheus'} 271 | openshift_prometheus_storage_type='pvc' 272 | 273 | # For prometheus-alertmanager 274 | openshift_prometheus_alertmanager_storage_kind=nfs 275 | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] 276 | openshift_prometheus_alertmanager_storage_nfs_directory=/export 277 | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' 278 | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager 279 | openshift_prometheus_alertmanager_storage_volume_size=10Gi 280 | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} 281 | openshift_prometheus_alertmanager_storage_type='pvc' 282 | 283 | # For prometheus-alertbuffer 284 | openshift_prometheus_alertbuffer_storage_kind=nfs 285 | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] 286 | openshift_prometheus_alertbuffer_storage_nfs_directory=/export 287 | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' 288 | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer 289 | openshift_prometheus_alertbuffer_storage_volume_size=10Gi 290 | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} 291 | openshift_prometheus_alertbuffer_storage_type='pvc' 292 | 293 | 294 | ########################################################################### 295 | ### OpenShift Hosts 296 | ########################################################################### 297 | [OSEv3:children] 298 | masters 299 | etcd 300 | nodes 301 | nfs 302 | 303 | [masters] 304 | a6s41node1 305 | 306 | [etcd] 307 | a6s41node1 308 | 309 | [nodes] 310 | a6s41node1 openshift_node_group_name='node-config-master' 311 | a6s41node2 openshift_node_group_name='node-config-compute' 312 | a6s4node1 openshift_node_group_name='node-config-infra' 313 | 314 | [nfs] 315 | a6s41node2 316 | 317 | ``` 318 | ### Sample ose-install for Nested mode : 319 | 320 | ```yaml 321 | [OSEv3:vars] 322 | 323 | ########################################################################### 324 | ### Ansible Vars 325 | ########################################################################### 326 | #timeout=60 327 | 328 | ########################################################################### 329 | ### OpenShift Basic Vars 330 | ########################################################################### 331 | openshift_deployment_type=openshift-enterprise 332 | deployment_type=openshift-enterprise 333 | containerized=false 334 | openshift_disable_check=docker_image_availability,memory_availability,package_availability,disk_availability,package_version,docker_storage 335 | 336 | # Default node selectors 337 | openshift_hosted_infra_selector="node-role.kubernetes.io/infra=true" 338 | 339 | oreg_auth_user=<> 340 | oreg_auth_password=<> 341 | ########################################################################### 342 | ### OpenShift Master Vars 343 | ########################################################################### 344 | 345 | openshift_master_api_port=8443 346 | openshift_master_console_port=8443 347 | 348 | openshift_master_cluster_method=native 349 | #openshift_master_cluster_hostname=ip-172-31-25-175.ap-southeast-1.compute.internal 350 | #openshift_master_cluster_public_hostname=ec2-13-251-240-166.ap-southeast-1.compute.amazonaws.com 351 | #openshift_master_default_subdomain=apps.ap-southeast-1.compute.amazonaws.com 352 | 353 | # Set this line to enable NFS 354 | openshift_enable_unsupported_configurations=True 355 | 356 | 357 | ######################################################################### 358 | ### Contrail Variables 359 | ######################################################################## 360 | 361 | contrail_version=5.0 362 | 363 | contrail_container_tag=5.1.0-0.511-rhel-queens 364 | contrail_registry_insecure=false 365 | contrail_registry="hub.juniper.net/contrail-nightly" 366 | contrail_registry_username=<> 367 | contrail_registry_password=<> 368 | # need to notify openshift to configure insecure registry as below 369 | #openshift_docker_insecure_registries="opencontrailnightly" 370 | 371 | nested_mode_contrail=true 372 | auth_mode=keystone 373 | # contrail nested masters string seperated by space 374 | contrail_nested_masters_ip="10.84.13.51" 375 | keystone_auth_host=10.84.13.51 376 | keystone_auth_admin_tenant=admin 377 | keystone_auth_admin_user=admin 378 | keystone_auth_admin_password=MAYffWrX7ZpPrV2AMAa9zAUvG 379 | keystone_auth_admin_port=35357 380 | keystone_auth_url_version=/v3 381 | #k8s_nested_vrouter_vip is a service IP for the running node which we configured above 382 | k8s_nested_vrouter_vip=10.10.10.5 383 | #k8s_vip is kubernetes api server ip 384 | k8s_vip=192.168.100.3 385 | #cluster_network is the one which vm network belongs to 386 | cluster_network="{'domain': 'default-domain', 'project': 'admin', 'name': 'openshift-vn'}" 387 | 388 | #contrail_os_release=redhat7 389 | #analyticsdb_min_diskgb=50 390 | #configdb_min_diskgb=25 391 | #aaa_mode=no-auth 392 | #auth_mode=noauth 393 | #CLOUD_ORCHESTRATOR=openshift 394 | #LOG_LEVEL=SYS_NOTICE 395 | #METADATA_PROXY_SECRET=contrail 396 | #cloud_orchestrator=kubernetes 397 | #metadata_proxy_secret=contrail 398 | #log_level=SYS_NOTICE 399 | #rabbitmq_node_port=5673 400 | #zookeeper_analytics_port=2182 401 | #zookeeper_port=2181 402 | #zookeeper_ports=2888:3888 403 | #zookeeper_analytics_ports=4888:5888 404 | #vrouter_gateway=172.31.16.1 405 | #vrouter_physical_interface=eth0 406 | #kubernetes_api_secure_port=443 407 | #nested_mode_contrail=false 408 | 409 | # vip should be master 410 | #api_vip="172.31.25.175" 411 | 412 | service_subnets="172.30.0.0/16" 413 | pod_subnets="10.128.0.0/14" 414 | 415 | ########################################################################### 416 | ### OpenShift Network Vars 417 | ########################################################################### 418 | 419 | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' 420 | openshift_use_openshift_sdn=false 421 | #r_openshift_node_use_openshift_sdn=True 422 | os_sdn_network_plugin_name='cni' 423 | openshift_use_contrail=true 424 | #openshift_use_calico=true 425 | 426 | 427 | ########################################################################### 428 | ### OpenShift Authentication Vars 429 | ########################################################################### 430 | 431 | # htpasswd Authentication 432 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 433 | 434 | 435 | ########################################################################### 436 | ### OpenShift Router and Registry Vars 437 | ########################################################################### 438 | 439 | openshift_hosted_router_replicas=1 440 | 441 | openshift_hosted_registry_replicas=1 442 | 443 | openshift_hosted_registry_storage_kind=nfs 444 | openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] 445 | openshift_hosted_registry_storage_nfs_directory=/export 446 | openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' 447 | openshift_hosted_registry_storage_volume_name=registry 448 | openshift_hosted_registry_storage_volume_size=10Gi 449 | openshift_hosted_registry_pullthrough=true 450 | openshift_hosted_registry_acceptschema2=true 451 | openshift_hosted_registry_enforcequota=true 452 | openshift_hosted_router_selector="node-role.kubernetes.io/infra=true" 453 | openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true" 454 | ########################################################################### 455 | ### OpenShift Service Catalog Vars 456 | ########################################################################### 457 | 458 | openshift_enable_service_catalog=true 459 | 460 | template_service_broker_install=true 461 | openshift_template_service_broker_namespaces=['openshift'] 462 | 463 | ansible_service_broker_install=true 464 | #ansible_service_broker_local_registry_whitelist=['.*-apb$'] 465 | 466 | openshift_hosted_etcd_storage_kind=nfs 467 | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" 468 | #openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash)" 469 | openshift_hosted_etcd_storage_nfs_directory=/export 470 | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} 471 | openshift_hosted_etcd_storage_volume_name=etcd-asb 472 | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] 473 | openshift_hosted_etcd_storage_volume_size=10G 474 | 475 | 476 | ########################################################################### 477 | ### OpenShift Metrics and Logging Vars 478 | ########################################################################### 479 | # Enable cluster metrics 480 | openshift_metrics_install_metrics=True 481 | #openshift_metrics_cassandra_storage_type=pv 482 | #openshift_metrics_hawkular_hostname=hawkular-metrics.example.com 483 | 484 | openshift_metrics_storage_kind=nfs 485 | openshift_metrics_storage_access_modes=['ReadWriteOnce'] 486 | openshift_metrics_storage_nfs_directory=/export 487 | openshift_metrics_storage_nfs_options='*(rw,root_squash)' 488 | openshift_metrics_storage_volume_name=metrics 489 | openshift_metrics_storage_volume_size=10Gi 490 | openshift_metrics_storage_labels={'storage': 'metrics'} 491 | 492 | openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} 493 | openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra":"true"} 494 | openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra":"true"} 495 | 496 | # Enable cluster logging 497 | openshift_logging_install_logging=True 498 | 499 | openshift_logging_storage_kind=nfs 500 | openshift_logging_storage_access_modes=['ReadWriteOnce'] 501 | openshift_logging_storage_nfs_directory=/export 502 | openshift_logging_storage_nfs_options='*(rw,root_squash)' 503 | openshift_logging_storage_volume_name=logging 504 | openshift_logging_storage_volume_size=10Gi 505 | openshift_logging_storage_labels={'storage': 'logging'} 506 | 507 | openshift_logging_es_cluster_size=1 508 | 509 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} 510 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra":"true"} 511 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra":"true"} 512 | 513 | 514 | ########################################################################### 515 | ### OpenShift Prometheus Vars 516 | ########################################################################### 517 | 518 | ## Add Prometheus Metrics: 519 | openshift_hosted_prometheus_deploy=true 520 | openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"} 521 | openshift_prometheus_namespace=openshift-metrics 522 | 523 | # Prometheus 524 | openshift_prometheus_storage_kind=nfs 525 | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] 526 | openshift_prometheus_storage_nfs_directory=/export 527 | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' 528 | openshift_prometheus_storage_volume_name=prometheus 529 | openshift_prometheus_storage_volume_size=10Gi 530 | openshift_prometheus_storage_labels={'storage': 'prometheus'} 531 | openshift_prometheus_storage_type='pvc' 532 | 533 | # For prometheus-alertmanager 534 | openshift_prometheus_alertmanager_storage_kind=nfs 535 | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] 536 | openshift_prometheus_alertmanager_storage_nfs_directory=/export 537 | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' 538 | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager 539 | openshift_prometheus_alertmanager_storage_volume_size=10Gi 540 | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} 541 | openshift_prometheus_alertmanager_storage_type='pvc' 542 | 543 | # For prometheus-alertbuffer 544 | openshift_prometheus_alertbuffer_storage_kind=nfs 545 | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] 546 | openshift_prometheus_alertbuffer_storage_nfs_directory=/export 547 | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' 548 | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer 549 | openshift_prometheus_alertbuffer_storage_volume_size=10Gi 550 | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} 551 | openshift_prometheus_alertbuffer_storage_type='pvc' 552 | 553 | 554 | ########################################################################### 555 | ### OpenShift Hosts 556 | ########################################################################### 557 | [OSEv3:children] 558 | masters 559 | etcd 560 | nodes 561 | nfs 562 | 563 | [masters] 564 | a6s41node1 565 | 566 | [etcd] 567 | a6s41node1 568 | 569 | [nodes] 570 | a6s41node1 openshift_node_group_name='node-config-master' 571 | a6s41node2 openshift_node_group_name='node-config-compute' 572 | a6s4node1 openshift_node_group_name='node-config-infra' 573 | 574 | [nfs] 575 | a6s41node2 openshift_hostname=a6s41node2 576 | 577 | ``` 578 | 579 | ### Sample ose-install file for HA: 580 | ```yaml 581 | [OSEv3:vars] 582 | 583 | ########################################################################### 584 | ### Ansible Vars 585 | ########################################################################### 586 | #timeout=60 587 | 588 | ########################################################################### 589 | ### OpenShift Basic Vars 590 | ########################################################################### 591 | openshift_deployment_type=openshift-enterprise 592 | deployment_type=openshift-enterprise 593 | containerized=false 594 | openshift_disable_check=docker_image_availability,memory_availability,package_availability,disk_availability,package_version,docker_storage 595 | 596 | # Default node selectors 597 | openshift_hosted_infra_selector="node-role.kubernetes.io/infra=true" 598 | 599 | oreg_auth_user=<> 600 | oreg_auth_password=<> 601 | ########################################################################### 602 | ### OpenShift Master Vars 603 | ########################################################################### 604 | 605 | openshift_master_api_port=8443 606 | openshift_master_console_port=8443 607 | 608 | openshift_master_cluster_method=native 609 | openshift_master_cluster_hostname=lb 610 | openshift_master_cluster_public_hostname=lb 611 | #openshift_master_default_subdomain= 612 | 613 | # Set this line to enable NFS 614 | openshift_enable_unsupported_configurations=True 615 | 616 | 617 | ######################################################################### 618 | ### Contrail Variables 619 | ######################################################################## 620 | 621 | contrail_version=5.0 622 | openshift_use_contrail=true 623 | 624 | contrail_container_tag=5.0.2-0.398-queens 625 | contrail_registry_insecure=false 626 | contrail_registry="hub.juniper.net/contrail-nightly" 627 | contrail_registry_username=<> 628 | contrail_registry_password=<> 629 | # need to notify openshift to configure insecure registry as below 630 | #openshift_docker_insecure_registries="opencontrailnightly" 631 | 632 | 633 | #contrail_os_release=redhat7 634 | #analyticsdb_min_diskgb=50 635 | #configdb_min_diskgb=25 636 | #aaa_mode=no-auth 637 | #auth_mode=noauth 638 | #CLOUD_ORCHESTRATOR=openshift 639 | #LOG_LEVEL=SYS_NOTICE 640 | #METADATA_PROXY_SECRET=contrail 641 | #cloud_orchestrator=kubernetes 642 | #metadata_proxy_secret=contrail 643 | #log_level=SYS_NOTICE 644 | #rabbitmq_node_port=5672 645 | #zookeeper_analytics_port=2182 646 | #zookeeper_port=2181 647 | #zookeeper_ports=2888:3888 648 | #zookeeper_analytics_ports=4888:5888 649 | #vrouter_gateway=172.31.16.1 650 | #vrouter_physical_interface=eth0 651 | #kubernetes_api_secure_port=443 652 | #nested_mode_contrail=false 653 | 654 | # vip should be master 655 | #api_vip="172.31.25.175" 656 | 657 | service_subnets="172.30.0.0/16" 658 | pod_subnets="10.128.0.0/14" 659 | 660 | 661 | ########################################################################### 662 | ### OpenShift Network Vars 663 | ########################################################################### 664 | 665 | #os_sdn_network_plugin_name='redhat/openshift-ovs-networkpolicy' 666 | openshift_use_openshift_sdn=false 667 | r_openshift_node_use_openshift_sdn=True 668 | os_sdn_network_plugin_name='cni' 669 | 670 | ########################################################################### 671 | ### OpenShift Authentication Vars 672 | ########################################################################### 673 | 674 | # htpasswd Authentication 675 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 676 | 677 | 678 | ########################################################################### 679 | ### OpenShift Router and Registry Vars 680 | ########################################################################### 681 | 682 | openshift_hosted_router_replicas=1 683 | 684 | openshift_hosted_registry_replicas=1 685 | 686 | openshift_hosted_registry_storage_kind=nfs 687 | openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] 688 | openshift_hosted_registry_storage_nfs_directory=/export 689 | openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' 690 | openshift_hosted_registry_storage_volume_name=registry 691 | openshift_hosted_registry_storage_volume_size=10Gi 692 | openshift_hosted_registry_pullthrough=true 693 | openshift_hosted_registry_acceptschema2=true 694 | openshift_hosted_registry_enforcequota=true 695 | openshift_hosted_router_selector="node-role.kubernetes.io/infra=true" 696 | openshift_hosted_registry_selector="node-role.kubernetes.io/infra=true" 697 | ########################################################################### 698 | ### OpenShift Service Catalog Vars 699 | ########################################################################### 700 | 701 | openshift_enable_service_catalog=true 702 | 703 | template_service_broker_install=true 704 | openshift_template_service_broker_namespaces=['openshift'] 705 | 706 | ansible_service_broker_install=true 707 | ansible_service_broker_local_registry_whitelist=['.*-apb$'] 708 | 709 | openshift_hosted_etcd_storage_kind=nfs 710 | openshift_hosted_etcd_storage_nfs_options="*(rw,root_squash,sync,no_wdelay)" 711 | openshift_hosted_etcd_storage_nfs_directory=/export 712 | openshift_hosted_etcd_storage_labels={'storage': 'etcd-asb'} 713 | openshift_hosted_etcd_storage_volume_name=etcd-asb 714 | openshift_hosted_etcd_storage_access_modes=['ReadWriteOnce'] 715 | openshift_hosted_etcd_storage_volume_size=10G 716 | 717 | 718 | ########################################################################### 719 | ### OpenShift Metrics and Logging Vars 720 | ########################################################################### 721 | # Enable cluster metrics 722 | openshift_metrics_install_metrics=True 723 | 724 | openshift_metrics_storage_kind=nfs 725 | openshift_metrics_storage_access_modes=['ReadWriteOnce'] 726 | openshift_metrics_storage_nfs_directory=/export 727 | openshift_metrics_storage_nfs_options='*(rw,root_squash)' 728 | openshift_metrics_storage_volume_name=metrics 729 | openshift_metrics_storage_volume_size=10Gi 730 | openshift_metrics_storage_labels={'storage': 'metrics'} 731 | 732 | openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} 733 | openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra":"true"} 734 | openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra":"true"} 735 | 736 | # Enable cluster logging 737 | openshift_logging_install_logging=True 738 | 739 | openshift_logging_storage_kind=nfs 740 | openshift_logging_storage_access_modes=['ReadWriteOnce'] 741 | openshift_logging_storage_nfs_directory=/export 742 | openshift_logging_storage_nfs_options='*(rw,root_squash)' 743 | openshift_logging_storage_volume_name=logging 744 | openshift_logging_storage_volume_size=10Gi 745 | openshift_logging_storage_labels={'storage': 'logging'} 746 | 747 | openshift_logging_kibana_hostname=kibana.apps.ap-southeast-1.compute.amazonaws.com 748 | openshift_logging_es_cluster_size=1 749 | 750 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} 751 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra":"true"} 752 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra":"true"} 753 | 754 | 755 | ########################################################################### 756 | ### OpenShift Prometheus Vars 757 | ########################################################################### 758 | 759 | ## Add Prometheus Metrics: 760 | openshift_hosted_prometheus_deploy=true 761 | openshift_prometheus_node_selector={"node-role.kubernetes.io/infra":"true"} 762 | openshift_prometheus_namespace=openshift-metrics 763 | 764 | # Prometheus 765 | openshift_prometheus_storage_kind=nfs 766 | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] 767 | openshift_prometheus_storage_nfs_directory=/export 768 | openshift_prometheus_storage_nfs_options='*(rw,root_squash)' 769 | openshift_prometheus_storage_volume_name=prometheus 770 | openshift_prometheus_storage_volume_size=10Gi 771 | openshift_prometheus_storage_labels={'storage': 'prometheus'} 772 | openshift_prometheus_storage_type='pvc' 773 | # For prometheus-alertmanager 774 | openshift_prometheus_alertmanager_storage_kind=nfs 775 | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] 776 | openshift_prometheus_alertmanager_storage_nfs_directory=/export 777 | openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)' 778 | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager 779 | openshift_prometheus_alertmanager_storage_volume_size=10Gi 780 | openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'} 781 | openshift_prometheus_alertmanager_storage_type='pvc' 782 | # For prometheus-alertbuffer 783 | openshift_prometheus_alertbuffer_storage_kind=nfs 784 | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] 785 | openshift_prometheus_alertbuffer_storage_nfs_directory=/export 786 | openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)' 787 | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer 788 | openshift_prometheus_alertbuffer_storage_volume_size=10Gi 789 | openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'} 790 | openshift_prometheus_alertbuffer_storage_type='pvc' 791 | 792 | 793 | ########################################################################### 794 | ### OpenShift Hosts 795 | ########################################################################### 796 | [OSEv3:children] 797 | masters 798 | etcd 799 | nodes 800 | nfs 801 | lb 802 | openshift_ca 803 | 804 | [masters] 805 | kube-master-0-e4c1bd8c1f8740e18aca00c95fcb5936 806 | kube-master-1-e4c1bd8c1f8740e18aca00c95fcb5936 807 | kube-master-2-e4c1bd8c1f8740e18aca00c95fcb5936 808 | 809 | [etcd] 810 | kube-master-0-e4c1bd8c1f8740e18aca00c95fcb5936 811 | kube-master-1-e4c1bd8c1f8740e18aca00c95fcb5936 812 | kube-master-2-e4c1bd8c1f8740e18aca00c95fcb5936 813 | 814 | [nodes] 815 | kube-master-0-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-master' 816 | kube-master-1-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-master' 817 | kube-master-2-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-master' 818 | controller-0-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-infra' 819 | controller-1-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-infra' 820 | controller-2-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-infra' 821 | compute-0-e4c1bd8c1f8740e18aca00c95fcb5936 openshift_node_group_name='node-config-compute' 822 | 823 | [nfs] 824 | compute-1-e4c1bd8c1f8740e18aca00c95fcb5936 825 | 826 | [lb] 827 | load-balancer-0-e4c1bd8c1f8740e18aca00c95fcb5936 828 | 829 | [openshift_ca] 830 | kube-master-0-e4c1bd8c1f8740e18aca00c95fcb5936 831 | kube-master-1-e4c1bd8c1f8740e18aca00c95fcb5936 832 | kube-master-2-e4c1bd8c1f8740e18aca00c95fcb5936 833 | 834 | ``` 835 | 836 | ### Issues 837 | * if there is a java error do, yum install java-1.8.0-openjdk-devel.x86_64 and rerun deploy_cluster 838 | * if the service_catalog is not passing but cluster is up fine, check /etc/resolv.conf whether it has cluster.local 839 | in its search line, and nameserver as host ip 840 | * ntp is installed by openshift and should be synchronized by user (does not affect any functionality of contrail, but shows up in contrail-status output). 841 | * If ansible_service_broker component of openshift is not up and its "ansible_service_broker_deploy" shows error it means that the "ansible_service_broker" pod did not come up fine. The reason usually is it failed liveliness and readiness checks. Modify the liveliness and readiness check of this pod when its brought up to make it running. The root cause is the redhat url not correct, verify that the pod uses correct url. 842 | * If you are facing SELINUX issue in the playbooks, change SELINUX configuration in /etc/selinux/config file and reboot the nodes and run the playbook again 843 | 844 | 845 | ### Note: 846 | * use "oc adm manage-node --selector=region=infra --schedulable=false" to make infra nodes non schedulable 847 | 848 | 849 | ### Make OpenShift web console working 850 | Create a password for admin user to login to the UI from master node 851 | ``` 852 | (master-node)# htpasswd /etc/origin/master/htpasswd admin 853 | ``` 854 | **If you are using a lb, you manually need to copy the htpasswd file into all your masters 855 | 856 | Assign cluster-admin role to admin user 857 | ``` 858 | (master-node)# oc adm policy add-cluster-role-to-user cluster-admin admin 859 | (master-node)# oc login -u admin 860 | ``` 861 | 862 | ### Accessing web console 863 | * Go to browser and type the entire fqdn name of your master node / lb node, followed by :8443/console 864 | ``` 865 | https://:8443/console 866 | ``` 867 | * use username/password created above to login into webconsole 868 | * Note : your dns should resolve for the above hostname for access, else modify your /etc/hosts file to route 869 | to above host 870 | 871 | 872 | ### FAQ 873 | * If you copy paste to the inventory file please check for unicode characters. 874 | * Please use -vvv in your ansible runs for more logs 875 | -------------------------------------------------------------------------------- /install/openshift/3.7/standalone-openshift.md: -------------------------------------------------------------------------------- 1 | 2 | # Provisioning of Openshift cluster using openshift-ansible deployer 3.7 3 | 4 | The following steps will install a standalone openshift cluster with Contrail as networking provider. 5 | 6 | Provisioning of Openshift and Contrail is done through Ansible-playbooks. 7 | 8 | ![Contrail Standalone Solution](/images/standalone-openshift-3.7.png) 9 | 10 | ### Reimage all your servers with : 11 | 12 | ```shell 13 | /cs-shared/server-manager/client/server-manager reimage --server_id server1 centos-7.4 14 | ``` 15 | 16 | ### Setup environment(all nodes): 17 | 18 | ```shell 19 | yum install vim git wget -y && wget -O /tmp/epel-release-latest-7.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && rpm -ivh /tmp/epel-release-latest-7.noarch.rpm && yum update -y && yum install python-pip -y && pip install ansible==2.5.2 20 | ``` 21 | 22 | ### Clone ansible repo (ansible node): 23 | 24 | ```shell 25 | git clone https://github.com/Juniper/openshift-ansible.git -b release-3.7-contrail 26 | ``` 27 | 28 | ### For this setup am assuming one master one slave 29 | 30 | master : server1 (10.84.11.11) 31 | 32 | slave : server2 (10.84.11.22) 33 | 34 | ### Edit /etc/hosts to have all machines entry for eg(all nodes): 35 | 36 | ```shell 37 | [root@server1]# cat /etc/hosts 38 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 39 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 40 | 10.84.5.100 puppet 41 | 10.84.11.11 server1.contrail.juniper.net server1 42 | 10.84.11.22 server2.contrail.juniper.net server2 43 | 20.1.1.1 server1.contrail.juniper.net server1 44 | 20.1.1.2 server2.contrail.juniper.net server2 45 | ``` 46 | 47 | ### Setup passless ssh to ansible node itself and all nodes: 48 | 49 | ```shell 50 | ssh-keygen -t rsa 51 | ssh-copy-id root@10.84.11.11 52 | ssh-copy-id root@10.84.11.22 53 | ``` 54 | 55 | ### Run ansible playbook: 56 | Before running make sure that you have edited inventory/byo/ose-install file as shown below 57 | 58 | ```shell 59 | ansible-playbook -i inventory/byo/ose-install inventory/byo/ose-prerequisites.yml 60 | ansible-playbook -i inventory/byo/ose-install playbooks/byo/openshift_facts.yml 61 | ansible-playbook -i inventory/byo/ose-install playbooks/byo/config.yml 62 | ``` 63 | 64 | 65 | 66 | ### Note (temporary fixes): 67 | 68 | If you see any of below couple of different TASK errors : 69 | 70 | ```shell 71 | TASK [contrail_node : Label master nodes with opencontrail.org/controller=true] ****************************************************************************************************** 72 | Tuesday 20 March 2018 12:22:35 -0700 (0:00:00.672) 0:16:36.099 ********* 73 | failed: [10.84.11.22 -> 10.84.11.11] (item=10.84.11.11) => {"changed": true, "cmd": ["oc", "label", "nodes", "server1", "opencontrail.org/controller=true", "--overwrite=true"], "delta": "0:00:00.212255", "end": "2018-03-20 12:22:35.756727", "item": "10.84.11.11", "msg": "non-zero return code", "rc": 1, "start": "2018-03-20 12:22:35.544472", "stderr": "Error from server (NotFound): nodes \"server1\" not found", "stderr_lines": ["Error from server (NotFound): nodes \"server1\" not found"], "stdout": "", "stdout_lines": []} 74 | 75 | (or) 76 | TASK openshift_node : restart node 77 | RUNNING HANDLER [openshift_node : restart node] *********************************************************************************************************************************************************** 78 | Wednesday 21 March 2018 14:19:48 -0700 (0:00:00.086) 0:14:48.981 ******* 79 | FAILED - RETRYING: restart node (3 retries left). 80 | FAILED - RETRYING: restart node (3 retries left). 81 | FAILED - RETRYING: restart node (3 retries left). 82 | FAILED - RETRYING: restart node (2 retries left). 83 | FAILED - RETRYING: restart node (2 retries left). 84 | FAILED - RETRYING: restart node (2 retries left). 85 | FAILED - RETRYING: restart node (1 retries left). 86 | FAILED - RETRYING: restart node (1 retries left). 87 | FAILED - RETRYING: restart node (1 retries left). 88 | fatal: [10.87.36.11]: FAILED! => {"attempts": 3, "changed": false, "msg": "Unable to restart service origin-node: Job for origin-node.service failed because the control process exited with error code. See \"systemctl status origin-node.service\" and \"journalctl -xe\" for details.\n"} 89 | fatal: [10.87.36.10]: FAILED! => {"attempts": 3, "changed": false, "msg": "Unable to restart service origin-node: Job for origin-node.service failed because the control process exited with error code. See \"systemctl status origin-node.service\" and \"journalctl -xe\" for details.\n"} 90 | fatal: [10.87.36.12]: FAILED! => {"attempts": 3, "changed": false, "msg": "Unable to restart service origin-node: Job for origin-node.service failed because the control process exited with error code. See \"systemctl status origin-node.service\" and \"journalctl -xe\" for details.\n"} 91 | ``` 92 | 93 | Fix: 94 | 95 | ```shell 96 | touch /etc/origin/node/resolv.conf 97 | 98 | rerun : 99 | ansible-playbook -i inventory/byo/ose-install playbooks/byo/config.yml 100 | 101 | ``` 102 | 103 | 104 | ### Sample ose-install file: 105 | 106 | ```yaml 107 | [OSEv3:children] 108 | masters 109 | nodes 110 | etcd 111 | openshift_ca 112 | 113 | [OSEv3:vars] 114 | ansible_ssh_user=root 115 | ansible_become=yes 116 | debug_level=2 117 | deployment_type=origin 118 | openshift_release=v3.7 119 | openshift_pkg_version=-3.7.1-2.el7 120 | #openshift_repos_enable_testing=true 121 | containerized=false 122 | openshift_install_examples=true 123 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 124 | osm_cluster_network_cidr=10.32.0.0/12 125 | openshift_portal_net=10.96.0.0/12 126 | openshift_use_dnsmasq=true 127 | openshift_clock_enabled=true 128 | openshift_hosted_manage_registry=true 129 | openshift_hosted_manage_router=true 130 | openshift_enable_service_catalog=false 131 | openshift_use_openshift_sdn=false 132 | os_sdn_network_plugin_name='cni' 133 | openshift_disable_check=memory_availability,package_availability,disk_availability,package_version,docker_storage 134 | openshift_docker_insecure_registries=opencontrailnightly 135 | 136 | openshift_use_contrail=true 137 | contrail_version=5.0 138 | contrail_container_tag=ocata-5.0-156 139 | contrail_registry=opencontrailnightly 140 | # Username /Password for private Docker regiteries 141 | #contrail_registry_username=test 142 | #contrail_registry_password=test 143 | # Below option presides over contrail masters if set 144 | #vrouter_physical_interface=ens160 145 | contrail_vip=10.87.65.48 146 | vrouter_gateway=10.84.13.254 147 | #docker_version=1.13.1 148 | 149 | # Contrail vars with default values 150 | #kubernetes_api_server=10.84.13.51 151 | #kubernetes_api_port=8080 152 | #kubernetes_api_secure_port=8443 153 | #cluster_name=k8s 154 | #cluster_project={} 155 | #cluster_network={} 156 | #pod_subnets=10.32.0.0/12 157 | #ip_fabric_subnets=10.64.0.0/12 158 | #service_subnets=10.96.0.0/12 159 | #ip_fabric_forwarding=false 160 | #ip_fabric_snat=false 161 | #public_fip_pool={} 162 | #vnc_endpoint_ip=20.1.1.1 163 | #vnc_endpoint_port=8082 164 | 165 | [masters] 166 | 10.84.13.51 openshift_hostname=openshift-master 167 | 168 | [etcd] 169 | 10.84.13.51 openshift_hostname=openshift-master 170 | 171 | [nodes] 172 | 10.84.13.51 openshift_hostname=openshift-master 173 | 10.84.13.52 openshift_hostname=openshift-slave 174 | 175 | [openshift_ca] 176 | 10.84.13.51 openshift_hostname=openshift-master 177 | 178 | [contrail_masters] 179 | 20.1.1.1 openshift_hostname=openshift-master 180 | 181 | ``` 182 | -------------------------------------------------------------------------------- /install/openshift/3.9/centos/configurations.md: -------------------------------------------------------------------------------- 1 | 2 | ### Following steps are how you need to bring up your nodes before running ansible 3 | 4 | * Reimage all your servers with : 5 | 6 | ```shell 7 | /cs-shared/server-manager/client/server-manager reimage --server_id server1 centos-7.5 8 | ``` 9 | 10 | * Setup environment(all nodes): 11 | 12 | ```shell 13 | yum install vim git wget -y && wget -O /tmp/epel-release-latest-7.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && rpm -ivh /tmp/epel-release-latest-7.noarch.rpm && yum update -y && yum install python-pip -y && pip install ansible==2.5.2 && yum install python-netaddr -y 14 | 15 | yum install -y centos-release-openshift-origin 16 | ``` 17 | -------------------------------------------------------------------------------- /install/openshift/3.9/nested-mode-openshift.md: -------------------------------------------------------------------------------- 1 | Nested mode is when Contrail provides networking for a Openshift cluster that is provisioned on an Contail-Openstack cluster. Contrail components are shared between the two clusters. 2 | 3 | # __Prerequisites__ 4 | 5 | Please ensure that the following prerequisites are met, for a successful provisioning of Nested Contrail-Openshift cluster. 6 | 7 | - Installed and running Contrail Openstack cluster. 8 | This cluster should be based on Contrail 5.x release 9 | 10 | 11 | # __Provision__ 12 | 13 | Provisioning a Nested Openshift Cluster is a two step process: 14 | 15 | ***1. Create link-local services in the Contrail-Openstack cluster.*** 16 | 17 | ***2. Install openshift using openshift-ansible.*** 18 | 19 | 20 | ## Create link-local services 21 | 22 | A nested Openshift cluster is managed by the same contrail control processes that manage the underlying openstack cluster. Towards this goal, the nested Openshift cluster needs ip reachability to the contrail control processes. Since the Openshift cluster is actually an overlay on the openstack cluster, we use Link Local Service feature or a combination of Link Local + Fabric SNAT feature of Contrail to provide IP reachability to/from the overly Openshift cluster and openstack cluster. 23 | 24 | ### Option 1: Fabric SNAT + Link Local (Preferred) 25 | 26 | Step 1: Enable Fabric SNAT on the Virtual Network of the VM's 27 | 28 | Fabric SNAT feature should be enabled on the Virtual Network of the Virtual Machine's on which Openshift Master and Nodes are running. 29 | 30 | Step 2: Create one Link Local Service for CNI to communicate with its Vrouter 31 | 32 | To configure a Link Local Service, we need a Service IP and Fabric IP. Fabric IP is the node IP on which the vrouter agent of the minion is running. Service IP(along with port number) is used by data plane to identify the fabric ip/node. Service IP is required to be a unique and unused IP in the entire openstack cluster. 33 | 34 | ***NOTE: The user is responsible to configure these Link Local Services via Contrail GUI.*** 35 | 36 | The following are the Link Local Service is required: 37 | 38 | | Contrail Process | Service IP | Service Port | Fabric IP | Fabric Port | 39 | | --- | --- | --- | --- | --- | 40 | | VRouter | < Service IP for the running node > | 9091 | 127.0.0.1 | 9091 | 41 | 42 | NOTE: Fabric IP is 127.0.0.1, as our intent is to make CNI talk to Vrouter on its underlay node. 43 | 44 | ####Example: 45 | 46 | The following link-local services should be created: 47 | 48 | | LL Service Name | Service IP | Service Port | Fabric IP | Fabric Port | 49 | | --- | --- | --- | --- | --- | 50 | | K8s-cni-to-agent | 10.10.10.5 | 9091 | 127.0.0.1 | 9091 | 51 | 52 | NOTE: Here 10.10.10.5 is the Service IP that was chosen by user. This can be any unused 53 | IP in the cluster. This IP is primarily used to identify link local traffic and has no 54 | other signifance. 55 | 56 | 57 | ### Option 2: Link Local Only 58 | 59 | To configure a Link Local Service, we need a Service IP and Fabric IP. Fabric IP is the node IP on which the contrail processes are running on. Service IP(along with port number) is used by data plane to identify the fabric ip/node. Service IP is required to be a unique and unused IP in the entire openstack cluster. **For each node of the openstack cluster, one service IP should be identified.** 60 | 61 | ***NOTE: The user is responsible to configure these Link Local Services via Contrail GUI.*** 62 | 63 | The following are the Link Local Services are required: 64 | 65 | | Contrail Process | Service IP | Service Port | Fabric IP | Fabric Port | 66 | | --- | --- | --- | --- | --- | 67 | | Contrail Config | < Service IP for the running node > | 8082 | < Node IP of running node > | 8082 | 68 | | Contrail Analytics | < Service IP for the running node > | 8086 | < Node IP of running node > | 8086 | 69 | | Contrail Msg Queue | < Service IP for the running node > | 5673 | < Node IP of running node > | 5673 | 70 | | Contrail VNC DB | < Service IP for the running node > | 9161 | < Node IP of running node > | 9161 | 71 | | Keystone | < Service IP for the running node > | 35357 | < Node IP of running node > | 35357 | 72 | | K8s-cni-to-agent | < Service IP for the running node > | 9091 | 127.0.0.1 | 9091 | 73 | 74 | ####Example: 75 | 76 | Lets assume the following hypothetical Openstack Cluster where: 77 | ``` 78 | Contrail Config : 192.168.1.100 79 | Contrail Analytics : 192.168.1.100, 192.168.1.101 80 | Contrail Msg Queue : 192.168.1.100 81 | Contrail VNC DB : 192.168.1.100, 192.168.1.101, 192.168.1.102 82 | Keystone: 192.168.1.200 83 | Vrouter: 192.168.1.201, 192.168.1.202, 192.168.1.203 84 | ``` 85 | This cluster is made of 7 nodes. We will allocate 7 unused IP's for these nodes: 86 | ``` 87 | 192.168.1.100 --> 10.10.10.1 88 | 192.168.1.101 --> 10.10.10.2 89 | 192.168.1.102 --> 10.10.10.3 90 | 192.168.1.200 --> 10.10.10.4 91 | 192.168.1.201/192.168.1.202/192.168.1.203 --> 10.10.10.5 92 | NOTE: One Service IP will be enough to represent all VRouter nodes. 93 | ``` 94 | The following link-local services should be created: 95 | 96 | | LL Service Name | Service IP | Service Port | Fabric IP | Fabric Port | 97 | | --- | --- | --- | --- | --- | 98 | | Contrail Config | 10.10.10.1 | 8082 | 192.168.1.100 | 8082 | 99 | | Contrail Analytics 1 | 10.10.10.1 | 8086 | 192.168.1.100 | 8086 | 100 | | Contrail Analytics 2 | 10.10.10.2 | 8086 | 192.168.1.101 | 8086 | 101 | | Contrail Msg Queue | 10.10.10.1 | 5673 | 192.168.1.100 | 5673 | 102 | | Contrail VNC DB 1 | 10.10.10.1 | 9161 | 192.168.1.100 | 9161 | 103 | | Contrail VNC DB 2 | 10.10.10.2 | 9161 | 192.168.1.101 | 9161 | 104 | | Contrail VNC DB 3 | 10.10.10.3 | 9161 | 192.168.1.102 | 9161 | 105 | | Keystone | 10.10.10.4 | 35357 | 192.168.1.200| 35357 | 106 | | K8s-cni-to-agent | 10.10.10.5 | 9091 | 127.0.0.1 | 9091 | 107 | 108 | 109 | ## Install openshift using openshift-ansible 110 | 111 | Now you can follow [stand-alone wiki](https://github.com/Juniper/contrail-kubernetes-docs/blob/master/install/openshift/3.9/standalone-openshift.md) 112 | and add following details to your ose-install file 113 | ``` 114 | #Nested mode vars 115 | nested_mode_contrail=true 116 | auth_mode=keystone 117 | keystone_auth_host= <--- This should be the IP where Keystone service is running. 118 | keystone_auth_admin_tenant=admin 119 | keystone_auth_admin_user=admin 120 | keystone_auth_admin_password=MAYffWrX7ZpPrV2AMAa9zAUvG <-- Keystone admin password. 121 | keystone_auth_admin_port=35357 122 | keystone_auth_url_version=/v3 123 | #k8s_nested_vrouter_vip is a service IP for the running node which we configured above 124 | k8s_nested_vrouter_vip=10.10.10.5 <-- Service IP configured for CNI to Agent communication.(K8s-cni-to-agent in above examples) 125 | #k8s_vip is kubernetes api server ip 126 | k8s_vip= <-- IP of the Openshift Master Node. 127 | #cluster_network is the one which vm network belongs to 128 | cluster_network="{'domain': 'default-domain', 'project': 'admin', 'name': 'net1'}" <-- FQName of the Virtual Network where Virtual Machines are running. There are the VM's in which Openshift cluster is being installed in nested mode. 129 | ``` 130 | -------------------------------------------------------------------------------- /install/openshift/3.9/redhat/configurations.md: -------------------------------------------------------------------------------- 1 | 2 | ### Following steps are how you need to bring up your nodes before running ansible 3 | 4 | * Reimage all your servers with : 5 | 6 | ```shell 7 | /cs-shared/server-manager/client/server-manager reimage --server_id server1 redhat-7.5-minimal 8 | ``` 9 | 10 | * Setup environment(all nodes): 11 | * Register all nodes in cluster using Red Hat Subscription Manager (RHSM) 12 | ```shell 13 | (all-nodes)# subscription-manager register --username --password --force 14 | ``` 15 | * List the available subscriptions 16 | ```shell 17 | (all-nodes)# subscription-manager list --available --matches '*OpenShift*' 18 | ``` 19 | * From the previous command, find the pool ID for OpenShift Container Platform subscription & attach it 20 | ```shell 21 | (all-nodes)# subscription-manager attach --pool= 22 | ``` 23 | * Disable all yum respositories 24 | ```shell 25 | (all-nodes)# subscription-manager repos --disable="*" 26 | ``` 27 | * Enable only the repositories required by OpenShift Container Platform 3.9 28 | ```shell 29 | subscription-manager repos \ 30 | --enable="rhel-7-server-rpms" \ 31 | --enable="rhel-7-server-extras-rpms" \ 32 | --enable="rhel-7-server-ose-3.9-rpms" \ 33 | --enable="rhel-7-fast-datapath-rpms" \ 34 | --enable="rhel-7-server-ansible-2.5-rpms" 35 | ``` 36 | * Install EPEL 37 | ```shell 38 | (all-nodes)# yum install wget -y && wget -O /tmp/epel-release-latest-7.noarch.rpm https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && rpm -ivh /tmp/epel-release-latest-7.noarch.rpm 39 | ``` 40 | * Update the system to use the latest packages 41 | ```shell 42 | (all-nodes)# yum update -y 43 | ``` 44 | * Install the following package, which provides OpenShift Container Platform utilities 45 | ```shell 46 | (all-nodes)# yum install atomic-openshift-excluder atomic-openshift-utils git python-netaddr -y 47 | ``` 48 | * Remove the atomic-openshift packages from the list for the duration of the installation 49 | ```shell 50 | (all-nodes)# atomic-openshift-excluder unexclude -y 51 | ``` 52 | * Enforce SELinux security policy 53 | ```shell 54 | (all-nodes)# vi /etc/selinux/config 55 | SELINUX=enforcing 56 | ``` 57 | -------------------------------------------------------------------------------- /install/openshift/3.9/standalone-openshift.md: -------------------------------------------------------------------------------- 1 | 2 | # Provisioning of Openshift cluster using openshift-ansible deployer 3.9 3 | 4 | The following steps will install a standalone openshift cluster with Contrail as networking provider. 5 | 6 | Provisioning of Openshift and Contrail is done through Ansible-playbooks. 7 | Required topology is as shown below. 8 | 9 | ![Contrail Standalone Solution](/images/standalone-openshift-3.9.png) 10 | 11 | ### Steps : 12 | 13 | * Setup environment(all nodes): 14 | 15 | * For centOS (origin installations) : [click_here](/install/openshift/3.9/centos/configurations.md) 16 | 17 | * For Redhat (openshift-enterprise installations) : [click_here](/install/openshift/3.9/redhat/configurations.md) 18 | 19 | * Needs supported ansible version 20 | 21 | ```shell 22 | yum install -y python-pip 23 | pip install ansible==2.5.2 24 | ``` 25 | * Get the files from the released and verified tar or git clone below : 26 | 27 | ```shell 28 | git clone https://github.com/Juniper/openshift-ansible.git -b release-3.9-contrail 29 | ``` 30 | * Note : Its recommend to get the code from the tar as the latest code may have behavior changes. 31 | 32 | 33 | * For this setup am assuming one master one infra and one compute 34 | ```shell 35 | master : server1 (10.84.11.11) 36 | 37 | infra : server2 (10.84.11.22) 38 | 39 | compute : server3 (10.84.11.33) 40 | ``` 41 | 42 | * Edit /etc/hosts to have all machines entry for eg(all nodes): 43 | 44 | ```shell 45 | [root@server1]# cat /etc/hosts 46 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 47 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 48 | 10.84.5.100 puppet 49 | 10.84.11.11 server1.contrail.juniper.net server1 50 | 10.84.11.22 server2.contrail.juniper.net server2 51 | 10.84.11.33 server3.contrail.juniper.net server3 52 | ``` 53 | 54 | * Setup passless ssh to ansible node itself and all nodes: 55 | 56 | ```shell 57 | ssh-keygen -t rsa 58 | ssh-copy-id root@10.84.11.11 59 | ssh-copy-id root@10.84.11.22 60 | ssh-copy-id root@10.84.11.33 61 | ``` 62 | 63 | ### Note (temporary fixes): 64 | 65 | 1. If you see any of below couple of different TASK errors : 66 | 67 | ```shell 68 | TASK [contrail_node : Label master nodes with opencontrail.org/controller=true] ****************************************************************************************************** 69 | Tuesday 20 March 2018 12:22:35 -0700 (0:00:00.672) 0:16:36.099 ********* 70 | failed: [10.84.11.22 -> 10.84.11.11] (item=10.84.11.11) => {"changed": true, "cmd": ["oc", "label", "nodes", "server1", "opencontrail.org/controller=true", "--overwrite=true"], "delta": "0:00:00.212255", "end": "2018-03-20 12:22:35.756727", "item": "10.84.11.11", "msg": "non-zero return code", "rc": 1, "start": "2018-03-20 12:22:35.544472", "stderr": "Error from server (NotFound): nodes \"server1\" not found", "stderr_lines": ["Error from server (NotFound): nodes \"server1\" not found"], "stdout": "", "stdout_lines": []} 71 | 72 | (or) 73 | TASK openshift_node : restart node 74 | RUNNING HANDLER [openshift_node : restart node] *********************************************************************************************************************************************************** 75 | Wednesday 21 March 2018 14:19:48 -0700 (0:00:00.086) 0:14:48.981 ******* 76 | FAILED - RETRYING: restart node (3 retries left). 77 | FAILED - RETRYING: restart node (3 retries left). 78 | FAILED - RETRYING: restart node (3 retries left). 79 | FAILED - RETRYING: restart node (2 retries left). 80 | FAILED - RETRYING: restart node (2 retries left). 81 | FAILED - RETRYING: restart node (2 retries left). 82 | FAILED - RETRYING: restart node (1 retries left). 83 | FAILED - RETRYING: restart node (1 retries left). 84 | FAILED - RETRYING: restart node (1 retries left). 85 | fatal: [10.87.36.11]: FAILED! => {"attempts": 3, "changed": false, "msg": "Unable to restart service origin-node: Job for origin-node.service failed because the control process exited with error code. See \"systemctl status origin-node.service\" and \"journalctl -xe\" for details.\n"} 86 | fatal: [10.87.36.10]: FAILED! => {"attempts": 3, "changed": false, "msg": "Unable to restart service origin-node: Job for origin-node.service failed because the control process exited with error code. See \"systemctl status origin-node.service\" and \"journalctl -xe\" for details.\n"} 87 | fatal: [10.87.36.12]: FAILED! => {"attempts": 3, "changed": false, "msg": "Unable to restart service origin-node: Job for origin-node.service failed because the control process exited with error code. See \"systemctl status origin-node.service\" and \"journalctl -xe\" for details.\n"} 88 | ``` 89 | 90 | Fix: 91 | 92 | ```shell 93 | touch /etc/origin/node/resolv.conf 94 | 95 | rerun : 96 | ansible-playbook -i inventory/ose-install playbooks/deploy_cluster.yml 97 | 98 | ``` 99 | 2. If you see docker image pull errors, and your registry is a insecure registry. set openshift_docker_insecure_registries= in the ose-install and rerun prerequisites play. 100 | 101 | ### Run ansible playbook: 102 | Before running make sure that you have edited inventory/ose-install file as shown below. 103 | 104 | ```shell 105 | ansible-playbook -i inventory/ose-install playbooks/prerequisites.yml 106 | ansible-playbook -i inventory/ose-install playbooks/deploy_cluster.yml 107 | ``` 108 | 109 | 110 | ### Sample ose-install file for non HA: 111 | 112 | ```yaml 113 | [OSEv3:children] 114 | masters 115 | nodes 116 | etcd 117 | openshift_ca 118 | 119 | [OSEv3:vars] 120 | ansible_ssh_user=root 121 | ansible_become=yes 122 | debug_level=2 123 | deployment_type=origin #openshift-enterprise for Redhat 124 | openshift_release=v3.9 125 | #openshift_repos_enable_testing=true 126 | containerized=false 127 | openshift_install_examples=true 128 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 129 | osm_cluster_network_cidr=10.32.0.0/12 130 | openshift_portal_net=10.96.0.0/12 131 | openshift_use_dnsmasq=true 132 | openshift_clock_enabled=true 133 | openshift_hosted_manage_registry=false 134 | openshift_hosted_manage_router=false 135 | openshift_enable_service_catalog=false 136 | openshift_use_openshift_sdn=false 137 | os_sdn_network_plugin_name='cni' 138 | openshift_disable_check=memory_availability,package_availability,disk_availability,package_version,docker_storage 139 | openshift_docker_insecure_registries=opencontrailnightly 140 | openshift_web_console_install=false 141 | #openshift_web_console_nodeselector={'region':'infra'} 142 | 143 | openshift_web_console_contrail_install=true 144 | openshift_use_contrail=true 145 | nested_mode_contrail=false 146 | contrail_version=5.0 147 | contrail_container_tag=ocata-5.0-156 148 | contrail_registry=opencontrailnightly 149 | # Username /Password for private Docker regiteries 150 | #contrail_registry_username=test 151 | #contrail_registry_password=test 152 | # Below option presides over contrail masters if set 153 | #vrouter_physical_interface=ens160 154 | #docker_version=1.13.1 155 | ntpserver=10.1.1.1 # a proper ntpserver is required for contrail. 156 | 157 | # Contrail_vars 158 | # below variables are used by contrail kubemanager to configure the cluster, 159 | # you can configure all options below. All values are defaults and can be modified. 160 | 161 | #kubernetes_api_server=10.84.13.52 # in our case this is the master, which is default 162 | #kubernetes_api_port=8080 163 | #kubernetes_api_secure_port=8443 164 | #cluster_name=myk8s 165 | #cluster_project={} 166 | #cluster_network={} 167 | #pod_subnets=10.32.0.0/12 168 | #ip_fabric_subnets=10.64.0.0/12 169 | #service_subnets=10.96.0.0/12 170 | #ip_fabric_forwarding=false 171 | #ip_fabric_snat=false 172 | #public_fip_pool={} 173 | #vnc_endpoint_ip=20.1.1.1 174 | #vnc_endpoint_port=8082 175 | 176 | [masters] 177 | 10.84.13.52 openshift_hostname=openshift-master 178 | 179 | [etcd] 180 | 10.84.13.52 openshift_hostname=openshift-master 181 | 182 | [nodes] 183 | 10.84.13.52 openshift_hostname=openshift-master 184 | 10.84.13.53 openshift_hostname=openshift-compute 185 | 10.84.13.54 openshift_hostname=openshift-infra openshift_node_labels="{'region': 'infra'}" 186 | 187 | [openshift_ca] 188 | 10.84.13.52 openshift_hostname=openshift-master 189 | ``` 190 | 191 | ### Sample ose-install file for HA: 192 | ```yaml 193 | [OSEv3:children] 194 | masters 195 | nodes 196 | etcd 197 | lb 198 | openshift_ca 199 | 200 | [OSEv3:vars] 201 | ansible_ssh_user=root 202 | ansible_become=yes 203 | debug_level=2 204 | deployment_type=openshift-enterprise 205 | openshift_release=v3.9 206 | openshift_repos_enable_testing=true 207 | containerized=false 208 | openshift_install_examples=true 209 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 210 | osm_cluster_network_cidr=10.32.0.0/12 211 | openshift_portal_net=10.96.0.0/12 212 | openshift_use_dnsmasq=true 213 | openshift_clock_enabled=true 214 | openshift_enable_service_catalog=false 215 | openshift_use_openshift_sdn=false 216 | os_sdn_network_plugin_name='cni' 217 | openshift_disable_check=disk_availability,package_version,docker_storage 218 | openshift_web_console_install=false 219 | openshift_web_console_contrail_install=true 220 | openshift_web_console_nodeselector={'region':'infra'} 221 | openshift_hosted_manage_registry=true 222 | openshift_hosted_registry_selector="region=infra" 223 | openshift_hosted_manage_router=true 224 | openshift_hosted_router_selector="region=infra" 225 | ntpserver= 226 | 227 | 228 | # Openshift HA 229 | openshift_master_cluster_method=native 230 | openshift_master_cluster_hostname=lb 231 | openshift_master_cluster_public_hostname=lb 232 | 233 | 234 | # Below are Contrail variables. Comment them out if you don't want to install Contrail through ansible-playbook 235 | contrail_version=5.0 236 | openshift_use_contrail=true 237 | contrail_registry=hub.juniper.net/contrail 238 | contrail_registry_username= 239 | contrail_registry_password= 240 | contrail_container_tag= 241 | #vrouter_physical_interface=eth0 242 | 243 | [masters] 244 | 10.0.0.13 openshift_hostname=master1 245 | 10.0.0.4 openshift_hostname=master2 246 | 10.0.0.5 openshift_hostname=master3 247 | 248 | [lb] 249 | 10.0.0.6 openshift_hostname=lb 250 | 251 | [etcd] 252 | 10.0.0.13 openshift_hostname=master1 253 | 10.0.0.4 openshift_hostname=master2 254 | 10.0.0.5 openshift_hostname=master3 255 | 256 | [nodes] 257 | 10.0.0.13 openshift_hostname=master1 258 | 10.0.0.4 openshift_hostname=master2 259 | 10.0.0.5 openshift_hostname=master3 260 | 10.0.0.7 openshift_hostname=slave1 261 | 10.0.0.10 openshift_hostname=slave2 262 | 10.0.0.9 openshift_hostname=infra1 openshift_node_labels="{'region': 'infra'}" 263 | 10.0.0.11 openshift_hostname=infra2 openshift_node_labels="{'region': 'infra'}" 264 | 10.0.0.8 openshift_hostname=infra3 openshift_node_labels="{'region': 'infra'}" 265 | 266 | [openshift_ca] 267 | 10.0.0.13 openshift_hostname=master1 268 | 10.0.0.4 openshift_hostname=master2 269 | 10.0.0.5 openshift_hostname=master3 270 | ``` 271 | 272 | 273 | ### Note: 274 | * dnsmasq on master needs to be restarted after installation if dns is not working as expected. 275 | * use "oc adm manage-node --selector=region=infra --schedulable=false" to make infra nodes non schedulable 276 | 277 | 278 | ### Make OpenShift web console working 279 | * We will be installing our customized web console which will run 280 | on infra nodes. 281 | * To do this disable the openshift web console and enable contrail's webconsole, add below in the ose-install 282 | ``` 283 | openshift_web_console_install=false 284 | openshift_web_console_contrail_install=true 285 | ``` 286 | 287 | After above step 288 | 289 | Create a password for admin user to login to the UI from master node 290 | ``` 291 | (master-node)# htpasswd /etc/origin/master/htpasswd admin 292 | ``` 293 | **If you are using a lb, you manually need to copy the htpasswd file into all your masters 294 | 295 | Assign cluster-admin role to admin user 296 | ``` 297 | (master-node)# oc adm policy add-cluster-role-to-user cluster-admin admin 298 | (master-node)# oc login -u admin 299 | ``` 300 | 301 | ### Accessing web console 302 | * Go to browser and type the entire fqdn name of your master node / lb node, followed by :8443/console 303 | ``` 304 | https://:8443/console 305 | ``` 306 | * use username/password created above to login into webconsole 307 | * Note : your dns should resolve for the above hostname for access, else modify your /etc/hosts file to route 308 | to above host 309 | 310 | -------------------------------------------------------------------------------- /install/openshift/README.md: -------------------------------------------------------------------------------- 1 | # contrail-kubernetes-docs -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/all-in-one/contrail-installer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: contrail 6 | namespace: kube-system 7 | --- 8 | kind: ClusterRole 9 | apiVersion: v1 10 | metadata: 11 | name: contrail 12 | namespace: kube-system 13 | rules: 14 | - apiGroups: ["*"] 15 | resources: ["*"] 16 | verbs: ["*"] 17 | --- 18 | apiVersion: v1 19 | kind: ClusterRoleBinding 20 | metadata: 21 | name: contrail 22 | roleRef: 23 | name: contrail 24 | subjects: 25 | - kind: SystemUser 26 | name: kube-system:contrail 27 | - kind: ServiceAccount 28 | name: contrail 29 | namespace: kube-system 30 | userNames: 31 | - system:serviceaccount:kube-system:contrail 32 | --- 33 | apiVersion: v1 34 | kind: ConfigMap 35 | metadata: 36 | name: contrail-config 37 | namespace: kube-system 38 | data: 39 | global-config: |- 40 | [GLOBAL] 41 | cloud_orchestrator = openshift 42 | sandesh_ssl_enable = False 43 | enable_config_service = True 44 | enable_control_service = True 45 | enable_webui_service = True 46 | introspect_ssl_enable = False 47 | config_nodes = 48 | controller_nodes = 49 | analytics_nodes = 50 | analyticsdb_nodes = 51 | analyticsdb_minimum_diskgb = 52 | configdb_minimum_diskgb = 53 | opscenter_ip = 1.1.1.1 54 | agent-config: |- 55 | [AGENT] 56 | compile_vrouter_module = True 57 | vrouter_physical_interface = 58 | kubemanager-config: |- 59 | [KUBERNETES] 60 | cluster_name = k8s-default 61 | cluster_project = {} 62 | cluster_network = {} 63 | service_subnets = 10.96.0.0/12 64 | pod_subnets = 10.32.0.0/12 65 | api_server = 66 | kubernetes-agent-config: |- 67 | [AGENT] 68 | --- 69 | apiVersion: extensions/v1beta1 70 | kind: DaemonSet 71 | metadata: 72 | name: contrail-analyticsdb 73 | namespace: kube-system 74 | labels: 75 | app: contrail-analyticsdb 76 | spec: 77 | selector: 78 | matchLabels: 79 | app: contrail-analyticsdb 80 | template: 81 | metadata: 82 | labels: 83 | app: contrail-analyticsdb 84 | spec: 85 | nodeSelector: 86 | "opencontrail.org/controller": "true" 87 | hostNetwork: true 88 | containers: 89 | - name: contrail-analyticsdb 90 | image: "contrail-analyticsdb-redhat7:4.1.0.0-8" 91 | imagePullPolicy: "" 92 | securityContext: 93 | privileged: true 94 | volumeMounts: 95 | - mountPath: /etc/contrailctl 96 | name: contrail-config 97 | - mountPath: /var/lib/cassandra 98 | name: analyticsdb-data 99 | - mountPath: /var/lib/zookeeper 100 | name: zookeeper-analyticsdb-data 101 | volumes: 102 | - name: contrail-config 103 | configMap: 104 | name: contrail-config 105 | items: 106 | - key: global-config 107 | path: analyticsdb.conf 108 | - name: analyticsdb-data 109 | hostPath: 110 | path: /var/lib/analyticsdb 111 | - name: zookeeper-analyticsdb-data 112 | hostPath: 113 | path: /var/lib/analyticsdb_zookeeper_data 114 | --- 115 | apiVersion: extensions/v1beta1 116 | kind: DaemonSet 117 | metadata: 118 | name: contrail-analytics 119 | namespace: kube-system 120 | labels: 121 | app: contrail-analytics 122 | spec: 123 | template: 124 | metadata: 125 | labels: 126 | app: contrail-analytics 127 | spec: 128 | nodeSelector: 129 | "opencontrail.org/controller": "true" 130 | hostNetwork: true 131 | containers: 132 | - name: contrail-analytics 133 | image: "contrail-analytics-redhat7:4.1.0.0-8" 134 | imagePullPolicy: "" 135 | securityContext: 136 | privileged: true 137 | volumeMounts: 138 | - mountPath: /etc/contrailctl 139 | name: contrail-config 140 | volumes: 141 | - name: contrail-config 142 | configMap: 143 | name: contrail-config 144 | items: 145 | - key: global-config 146 | path: analytics.conf 147 | --- 148 | apiVersion: extensions/v1beta1 149 | kind: DaemonSet 150 | metadata: 151 | name: contrail-controller 152 | namespace: kube-system 153 | labels: 154 | app: contrail-controller 155 | spec: 156 | template: 157 | metadata: 158 | labels: 159 | app: contrail-controller 160 | spec: 161 | nodeSelector: 162 | "opencontrail.org/controller": "true" 163 | hostNetwork: true 164 | containers: 165 | - name: contrail-controller 166 | image: "contrail-controller-redhat7:4.1.0.0-8" 167 | imagePullPolicy: "" 168 | securityContext: 169 | privileged: true 170 | volumeMounts: 171 | - mountPath: /etc/contrailctl 172 | name: contrail-config 173 | - mountPath: /var/lib/cassandra 174 | name: configdb-data 175 | - mountPath: /var/lib/zookeeper 176 | name: zookeeper-data 177 | volumes: 178 | - name: contrail-config 179 | configMap: 180 | name: contrail-config 181 | items: 182 | - key: global-config 183 | path: controller.conf 184 | - name: configdb-data 185 | hostPath: 186 | path: /var/lib/configdb 187 | - name: zookeeper-data 188 | hostPath: 189 | path: /var/lib/config_zookeeper_data 190 | --- 191 | apiVersion: extensions/v1beta1 192 | kind: DaemonSet 193 | metadata: 194 | name: contrail-kube-manager 195 | namespace: kube-system 196 | labels: 197 | app: contrail-kube-manager 198 | spec: 199 | template: 200 | metadata: 201 | labels: 202 | app: contrail-kube-manager 203 | spec: 204 | nodeSelector: 205 | "opencontrail.org/controller": "true" 206 | automountServiceAccountToken: false 207 | hostNetwork: true 208 | containers: 209 | - name: contrail-kube-manager 210 | image: "contrail-kube-manager-redhat7:4.1.0.0-8" 211 | imagePullPolicy: "" 212 | securityContext: 213 | privileged: true 214 | volumeMounts: 215 | - mountPath: /tmp/contrailctl 216 | name: tmp-contrail-config 217 | - mountPath: /tmp/serviceaccount 218 | name: pod-secret 219 | volumes: 220 | - name: tmp-contrail-config 221 | configMap: 222 | name: contrail-config 223 | items: 224 | - key: global-config 225 | path: global.conf 226 | - key: kubemanager-config 227 | path: kubemanager.conf 228 | - name: pod-secret 229 | secret: 230 | secretName: contrail-kube-manager-token 231 | --- 232 | apiVersion: extensions/v1beta1 233 | kind: DaemonSet 234 | metadata: 235 | name: contrail-agent 236 | namespace: kube-system 237 | labels: 238 | app: contrail-agent 239 | spec: 240 | template: 241 | metadata: 242 | labels: 243 | app: contrail-agent 244 | spec: 245 | affinity: 246 | nodeAffinity: 247 | requiredDuringSchedulingIgnoredDuringExecution: 248 | nodeSelectorTerms: 249 | - matchExpressions: 250 | - key: "opencontrail.org/controller" 251 | operator: DoesNotExist 252 | automountServiceAccountToken: false 253 | hostNetwork: true 254 | initContainers: 255 | - name: contrail-kubernetes-agent 256 | image: "contrail-kubernetes-agent-redhat7:4.1.0.0-8" 257 | imagePullPolicy: "" 258 | securityContext: 259 | privileged: true 260 | volumeMounts: 261 | - mountPath: /tmp/contrailctl 262 | name: tmp-contrail-config 263 | - mountPath: /var/lib/contrail/ 264 | name: var-lib-contrail 265 | - mountPath: /host/etc_cni 266 | name: etc-cni 267 | - mountPath: /host/opt_cni_bin 268 | name: opt-cni-bin 269 | - mountPath: /var/log/contrail/cni 270 | name: var-log-contrail-cni 271 | containers: 272 | - name: contrail-agent 273 | image: "contrail-agent-redhat7:4.1.0.0-8" 274 | imagePullPolicy: "" 275 | securityContext: 276 | privileged: true 277 | volumeMounts: 278 | - mountPath: /usr/src 279 | name: usr-src 280 | - mountPath: /lib/modules 281 | name: lib-modules 282 | - mountPath: /tmp/contrailctl 283 | name: tmp-contrail-config 284 | - mountPath: /var/lib/contrail/ 285 | name: var-lib-contrail 286 | - mountPath: /host/etc_cni 287 | name: etc-cni 288 | - mountPath: /host/opt_cni_bin 289 | name: opt-cni-bin 290 | # This is a workaround just to make sure the directory is created on host 291 | - mountPath: /var/log/contrail/cni 292 | name: var-log-contrail-cni 293 | - mountPath: /tmp/serviceaccount 294 | name: pod-secret 295 | volumes: 296 | - name: tmp-contrail-config 297 | configMap: 298 | name: contrail-config 299 | items: 300 | - key: global-config 301 | path: global.conf 302 | - key: agent-config 303 | path: agent.conf 304 | - key: kubemanager-config 305 | path: kubemanager.conf 306 | - key: kubernetes-agent-config 307 | path: kubernetesagent.conf 308 | - name: pod-secret 309 | secret: 310 | secretName: contrail-kube-manager-token 311 | - name: usr-src 312 | hostPath: 313 | path: /usr/src 314 | - name: usr-src-kernels 315 | hostPath: 316 | path: /usr/src/kernels 317 | - name: lib-modules 318 | hostPath: 319 | path: /lib/modules 320 | - name: var-lib-contrail 321 | hostPath: 322 | path: /var/lib/contrail/ 323 | - name: etc-cni 324 | hostPath: 325 | path: /etc/cni 326 | - name: opt-cni-bin 327 | hostPath: 328 | path: /opt/cni/bin 329 | - name: var-log-contrail-cni 330 | hostPath: 331 | path: /var/log/contrail/cni/ 332 | --- 333 | apiVersion: v1 334 | kind: Secret 335 | metadata: 336 | name: contrail-kube-manager-token 337 | namespace: kube-system 338 | annotations: 339 | kubernetes.io/service-account.name: contrail 340 | type: kubernetes.io/service-account-token 341 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/all-in-one/iptables-master: -------------------------------------------------------------------------------- 1 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8082 -j ACCEPT -m comment --comment contrail 2 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9100 -j ACCEPT -m comment --comment contrail 3 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8084 -j ACCEPT -m comment --comment contrail 4 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8143 -j ACCEPT -m comment --comment contrail 5 | 6 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8080 -j ACCEPT -m comment --comment contrail 7 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 5269 -j ACCEPT -m comment --comment contrail 8 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8093 -j ACCEPT -m comment --comment contrail 9 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8083 -j ACCEPT -m comment --comment contrail 10 | 11 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8092 -j ACCEPT -m comment --comment contrail 12 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8086 -j ACCEPT -m comment --comment contrail 13 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8081 -j ACCEPT -m comment --comment contrail 14 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 2181 -j ACCEPT -m comment --comment contrail 15 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 2182 -j ACCEPT -m comment --comment contrail 16 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 2888:3889 -j ACCEPT -m comment --comment contrail 17 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 5672 -j ACCEPT -m comment --comment contrail 18 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 6381 -j ACCEPT -m comment --comment contrail 19 | 20 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9092 -j ACCEPT -m comment --comment contrail 21 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9041 -j ACCEPT -m comment --comment contrail 22 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9042 -j ACCEPT -m comment --comment contrail 23 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9160 -j ACCEPT -m comment --comment contrail 24 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9161 -j ACCEPT -m comment --comment contrail 25 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 7000 -j ACCEPT -m comment --comment contrail 26 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 7010 -j ACCEPT -m comment --comment contrail 27 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 7199 -j ACCEPT -m comment --comment contrail 28 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 7198 -j ACCEPT -m comment --comment contrail 29 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8443 -j ACCEPT -m comment --comment contrail 30 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/all-in-one/iptables-node: -------------------------------------------------------------------------------- 1 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 8085 -j ACCEPT -m comment --comment contrail 2 | iptables -I OS_FIREWALL_ALLOW 1 -p tcp --dport 9091 -j ACCEPT -m comment --comment contrail 3 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/all-in-one/ose-install: -------------------------------------------------------------------------------- 1 | [OSEv3:children] 2 | masters 3 | nodes 4 | etcd 5 | 6 | [OSEv3:vars] 7 | ansible_ssh_user=root 8 | ansible_become=yes 9 | debug_level=2 10 | deployment_type=openshift-enterprise 11 | openshift_release=v3.7 12 | openshift_install_examples=true 13 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 14 | osm_cluster_network_cidr=10.32.0.0/12 15 | openshift_portal_net=10.96.0.0/12 16 | openshift_use_dnsmasq=true 17 | openshift_clock_enabled=true 18 | openshift_hosted_manage_registry=true 19 | openshift_hosted_manage_router=true 20 | openshift_enable_service_catalog=false 21 | openshift_use_openshift_sdn=false 22 | openshift_disable_check=docker_storage,package_version 23 | os_sdn_network_plugin_name='cni' 24 | 25 | 26 | # Below are Contrail variables. Comment them out if you don't want to install Contrail through ansible-playbook 27 | 28 | # Contrail 4.X releases 29 | #openshift_use_contrail=true 30 | #contrail_os_release=redhat7 31 | #contrail_version=4.0 32 | #analyticsdb_min_diskgb=50 33 | #configdb_min_diskgb=25 34 | #vrouter_physical_interface=eno1 35 | #contrail_docker_images_path=/root/docker_images 36 | 37 | # Contrail 5.X releases 38 | openshift_use_contrail=true 39 | contrail_version=5.0 40 | analyticsdb_min_diskgb=50 41 | configdb_min_diskgb=25 42 | contrail_container_tag=newton-master-39 43 | vrouter_physical_interface=eno1 44 | 45 | [masters] 46 | 10.87.64.149 openshift_hostname=5b4s40 47 | 48 | [etcd] 49 | 10.87.64.149 openshift_hostname=5b4s40 50 | 51 | [nodes] 52 | 10.87.64.149 openshift_hostname=5b4s40 53 | 10.87.64.150 openshift_hostname=5b4s41 openshift_node_labels="{'region': 'infra'}" 54 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/all-in-one/ose-install-ha: -------------------------------------------------------------------------------- 1 | [OSEv3:children] 2 | masters 3 | nodes 4 | etcd 5 | lb 6 | openshift_ca 7 | 8 | [OSEv3:vars] 9 | ansible_ssh_user=root 10 | ansible_become=yes 11 | debug_level=2 12 | deployment_type=openshift-enterprise 13 | openshift_release=v3.7 14 | openshift_install_examples=true 15 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 16 | openshift_master_htpasswd_users={'admin': 'contrail123'} 17 | osm_cluster_network_cidr=10.32.0.0/12 18 | openshift_portal_net=10.96.0.0/12 19 | openshift_use_dnsmasq=true 20 | openshift_clock_enabled=true 21 | openshift_hosted_manage_registry=false 22 | openshift_hosted_manage_router=false 23 | openshift_use_openshift_sdn=false 24 | openshift_enable_service_catalog=false 25 | os_sdn_network_plugin_name='cni' 26 | 27 | # Below are Contrail variables. Comment them out if you don't want to install Contrail through ansible-playbook 28 | 29 | openshift_use_contrail=true 30 | contrail_os_release=redhat7 31 | contrail_version=4.1.0.0-8 32 | analyticsdb_min_diskgb=30 33 | configdb_min_diskgb=25 34 | vrouter_physical_interface=enp2s0 35 | contrail_docker_images_path=/root/docker_images 36 | cni_version=v0.5.2 37 | 38 | [masters] 39 | 10.87.64.149 openshift_hostname=5b4s40 40 | 10.87.64.150 openshift_hostname=5b4s41 41 | 10.84.29.39 openshift_hostname=b7s39 42 | 43 | [lb] 44 | 10.84.18.2 openshift_hostname=a4s2 45 | 46 | [etcd] 47 | 10.87.64.149 openshift_hostname=5b4s40 48 | 49 | [nodes] 50 | 10.87.64.149 openshift_hostname=5b4s40 51 | 10.87.64.150 openshift_hostname=5b4s41 52 | 10.84.29.39 openshift_hostname=b7s39 53 | 10.84.18.3 openshift_hostname=a4s3 54 | 10.84.18.4 openshift_hostname=a4s4 55 | 10.84.18.5 openshift_hostname=a4s5 56 | 57 | [openshift_ca] 58 | 10.87.64.149 openshift_hostname=5b4s40 59 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/all-in-one/ose-prerequisites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: OSEv3 3 | tasks: 4 | - name: Install base packages 5 | yum: name="{{ item }}" state=present 6 | with_items: 7 | - net-tools 8 | - bind-utils 9 | - iptables-services 10 | - bridge-utils 11 | - bash-completion 12 | - python-pip 13 | - kexec-tools 14 | - sos 15 | - psacct 16 | - ntp 17 | - NetworkManager 18 | 19 | - name: Install openshift-enterprise package 20 | yum: name="{{ item }}" state=present 21 | with_items: 22 | - atomic-openshift-docker-excluder 23 | when: 24 | - deployment_type == "openshift-enterprise" 25 | 26 | - name: Install docker-py 27 | command: pip install docker-py 28 | when: 29 | - deployment_type == "openshift-enterprise" 30 | 31 | - name: Install docker 32 | yum: 33 | name: docker-1.12.6 34 | state: present 35 | 36 | - name: Enable & start docker service 37 | service: 38 | name: docker 39 | state: started 40 | enabled: yes 41 | 42 | - name: Enble & start network-manager service 43 | service: 44 | name: NetworkManager 45 | state: started 46 | enabled: yes 47 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/ose-install: -------------------------------------------------------------------------------- 1 | [OSEv3:children] 2 | masters 3 | nodes 4 | etcd 5 | 6 | [OSEv3:vars] 7 | ansible_ssh_user=root 8 | ansible_become=yes 9 | debug_level=2 10 | #deployment_type=openshift-enterprise 11 | deployment_type=origin 12 | openshift_release=v1.5 13 | openshift_image_tag=v1.5.1 14 | openshift_install_examples=true 15 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}] 16 | osm_cluster_network_cidr=10.32.0.0/12 17 | openshift_portal_net=10.96.0.0/12 18 | openshift_use_dnsmasq=true 19 | os_sdn_network_plugin_name='cni' 20 | openshift_disable_check=docker_storage 21 | 22 | [masters] 23 | {master-node-internal-ip} openshift_hostname={master-node-internal-hostname} 24 | 25 | [etcd] 26 | {master-node-internal-ip} openshift_hostname={master-node-internal-hostname} 27 | 28 | [nodes] 29 | {slave-node-internal-ip} openshift_hostname={slave-node-internal-hostname} 30 | -------------------------------------------------------------------------------- /install/openshift/legacy_files/3.7/install-files/ose-prerequisites.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: OSEv3 3 | tasks: 4 | - name: Install the following base packages 5 | yum: name="{{ item }}" state=present 6 | with_items: 7 | - wget 8 | - net-tools 9 | - bind-utils 10 | - iptables-services 11 | - bridge-utils 12 | - bash-completion 13 | - python-pip 14 | - git 15 | 16 | - name: Install ansible and additional packages 17 | yum: name="{{ item }}" state=present enablerepo=epel 18 | with_items: 19 | - pyOpenSSL 20 | - python-cryptography 21 | - python-lxml 22 | 23 | - name: Install Docker 24 | yum: name=docker state=present 25 | 26 | - name: Enable Docker Service 27 | service: name=docker enabled=yes 28 | 29 | - name: Start Docker Service 30 | service: name=docker state=started 31 | --------------------------------------------------------------------------------