├── .gitignore ├── LICENSE ├── Makefile ├── QUICKSTART.md ├── README.md ├── RELEASE.md ├── cluster ├── Vagrantfile ├── docker17 │ ├── bootstrap_centos.sh │ ├── centos_docker_install.sh │ ├── centos_prep.sh │ └── master.sh ├── k8s1.4 │ ├── bootstrap_centos.sh │ ├── bootstrap_ubuntu.sh │ ├── k8smaster.sh │ └── k8sworker.sh ├── k8s1.6 │ ├── bootstrap_centos.sh │ ├── bootstrap_ubuntu.sh │ ├── k8smaster.sh │ └── k8sworker.sh └── k8s1.8 │ ├── bootstrap_centos.sh │ ├── bootstrap_ubuntu.sh │ ├── k8smaster.sh │ └── k8sworker.sh ├── install ├── ansible │ ├── Dockerfile │ ├── aci_cfg.yml │ ├── cfg.yml │ ├── env.json │ ├── install.sh │ ├── install_defaults.sh │ ├── install_swarm.sh │ ├── uninstall.sh │ └── uninstall_swarm.sh ├── genInventoryFile.py └── k8s │ ├── configs │ ├── aci_gw.yaml │ ├── cleanup.yaml │ ├── contiv-grafana.yml │ ├── contiv-prometheus.yml │ ├── contiv.yaml │ ├── etcd.yaml │ └── prometheus.yml │ ├── install.sh │ └── uninstall.sh ├── installer.png └── scripts ├── build.sh ├── build_image.sh ├── download_ansible_repo.sh ├── generate-certificate.sh ├── get_latest_release.py ├── get_latest_release.sh ├── jenkins_cleanup.sh ├── kube_legacy_test.sh ├── kubeadm_test.sh ├── legacy_swarm_test.sh ├── prepare_netplugin_artifacts.sh ├── prepare_netplugin_images.sh ├── release.sh ├── swarm_mode_test.sh ├── unpack-installer.sh ├── vagrantup.sh └── vbcleanup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # vagrant data directories 2 | .cfg* 3 | .etc_* 4 | cluster/.vagrant 5 | cluster/export 6 | cluster/*.log 7 | release 8 | artifact_staging/ 9 | 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 Cisco Systems Inc. All rights reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | http://www.apache.org/licenses/LICENSE-2.0 7 | 8 | Unless required by applicable law or agreed to in writing, software 9 | distributed under the License is distributed on an "AS IS" BASIS, 10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | See the License for the specific language governing permissions and 12 | limitations under the License. 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # backwards compatibility name for CONTIV_INSTALLER_VERSION 2 | export BUILD_VERSION ?= devbuild 3 | # sets the version for the installer output artifacts 4 | export CONTIV_INSTALLER_VERSION ?= $(BUILD_VERSION) 5 | # downloaded and built assets intended to go in installer by build.sh 6 | export CONTIV_ARTIFACT_STAGING := $(PWD)/artifact_staging 7 | # some assets are retrieved from GitHub, this is the default version to fetch 8 | export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.2.0 9 | export CONTIV_ACI_GW_VERSION ?= latest 10 | export NETPLUGIN_OWNER ?= contiv 11 | # setting NETPLUGIN_BRANCH compiles that commit on demand, 12 | # setting CONTIV_NETPLUGIN_VERSION will download that released version 13 | ifeq ($(NETPLUGIN_BRANCH),) 14 | export CONTIV_NETPLUGIN_VERSION ?= $(DEFAULT_DOWNLOAD_CONTIV_VERSION) 15 | export CONTIV_V2PLUGIN_VERSION ?= $(DEFAULT_DOWNLOAD_CONTIV_VERSION) 16 | else 17 | export CONTIV_NETPLUGIN_VERSION := $(NETPLUGIN_OWNER)-$(NETPLUGIN_BRANCH) 18 | export CONTIV_V2PLUGIN_VERSION ?= $(NETPLUGIN_OWNER)-$(NETPLUGIN_BRANCH) 19 | endif 20 | export CONTIV_NETPLUGIN_TARBALL_NAME := netplugin-$(CONTIV_NETPLUGIN_VERSION).tar.bz2 21 | export CONTIV_ANSIBLE_COMMIT ?= 8e20f56d541af8bc7a3ecbde0d9c64fa943812ed 22 | export CONTIV_ANSIBLE_OWNER ?= contiv 23 | # TODO(chrisplo): restore the normal default after 1.1.8 has been pushed 24 | #export CONTIV_ANSIBLE_IMAGE ?= contiv/install:$(DEFAULT_DOWNLOAD_CONTIV_VERSION) 25 | export CONTIV_ANSIBLE_IMAGE ?= contiv/install:1.1.7-bash-netcat 26 | export CONTIV_V2PLUGIN_TARBALL_NAME := v2plugin-$(CONTIV_V2PLUGIN_VERSION).tar.gz 27 | export CONTIV_ANSIBLE_COMMIT ?= 00da7b2a1fd9f631bcfe283a0a640d903ca389f4 28 | export CONTIV_ANSIBLE_OWNER ?= contiv 29 | 30 | # this is the classic first makefile target, and it's also the default target 31 | # run when `make` is invoked with no specific target. 32 | all: build 33 | rel_ver = $(shell ./scripts/get_latest_release.sh) 34 | 35 | # accepts CONTIV_ANSIBLE_COMMIT and CONTIV_ANSIBLE_OWNER environment vars 36 | download-ansible-repo: 37 | @scripts/download_ansible_repo.sh 38 | 39 | # set NETPLUGIN_OWNER (default contiv) and NETPLUGIN_BRANCH make variables 40 | # to compile locally 41 | # e.g. make NETPLUGIN_OWNER=contiv NETPLUGIN_BRANCH=master 42 | prepare-netplugin-artifacts: 43 | @./scripts/prepare_netplugin_artifacts.sh 44 | 45 | assemble-build: 46 | @./scripts/build.sh 47 | 48 | # build creates a release package for contiv. 49 | # It uses a pre-built image specified by BUILD_VERSION. 50 | build: download-ansible-repo prepare-netplugin-artifacts assemble-build 51 | 52 | # ansible-image creates the docker image for ansible container 53 | # It uses the version specified by BUILD_VERSION or creates an image with the latest tag. 54 | ansible-image: 55 | @bash ./scripts/build_image.sh 56 | 57 | # Brings up a demo cluster to install Contiv on with docker, centos. 58 | cluster-legacy-swarm: vagrant-clean 59 | @bash ./scripts/vagrantup.sh legacy-swarm 60 | 61 | # Brings up a demo cluster to install Contiv on with swarm, centos. 62 | cluster-swarm-mode: vagrant-clean 63 | @bash ./scripts/vagrantup.sh swarm-mode 64 | 65 | # Brings up a demo cluster to install Contiv on with kubeadm, centos. 66 | cluster-kubeadm: vagrant-clean 67 | @bash ./scripts/vagrantup.sh kubeadm 68 | 69 | cluster-destroy: vagrant-clean 70 | 71 | # demo-swarm-mode brings up a cluster with native docker swarm, runs the installer on it, and shows the URL 72 | # of the demo Contiv Admin Console which was set up 73 | # BUILD_VERSION must be setup to use a specific build, e.g. 74 | # export BUILD_VERSION=1.0.0-beta.3 75 | # Or run make as BUILD_VERSION=1.0.0-beta.3 make demo-swarm-mode 76 | demo-swarm-mode: 77 | BUILD_VERSION=$(rel_ver) make cluster-swarm-mode 78 | BUILD_VERSION=$(rel_ver) make install-test-swarm-mode 79 | 80 | # demo-kubeadm brings up a cluster with kubeadm, runs the installer on it, and shows the URL 81 | # of the demo Contiv Admin Console which was set up 82 | # BUILD_VERSION must be setup to use a specific build, e.g. 83 | # export BUILD_VERSION=1.0.0-beta.3 84 | # Or run make as BUILD_VERSION=1.0.0-beta.3 make demo-kubeadm 85 | demo-kubeadm: 86 | BUILD_VERSION=$(rel_ver) make cluster-kubeadm 87 | BUILD_VERSION=$(rel_ver) make install-test-kubeadm 88 | 89 | # demo-swarm brings up a cluster with docker swarm, runs the installer on it, and shows the URL 90 | # of the demo Contiv Admin Console which was set up 91 | # BUILD_VERSION must be setup to use a specific build, e.g. 92 | # export BUILD_VERSION=1.0.0-beta.3 93 | # Or run make as BUILD_VERSION=1.0.0-beta.3 make demo-legacy-swarm 94 | demo-legacy-swarm: 95 | BUILD_VERSION=$(rel_ver) make cluster-legacy-swarm 96 | BUILD_VERSION=$(rel_ver) make install-test-legacy-swarm 97 | 98 | vagrant-clean: 99 | cd cluster && vagrant destroy -f 100 | @bash ./scripts/vbcleanup.sh 101 | 102 | # Create a build and test the release installation on a vagrant cluster 103 | # TODO: The vagrant part of this can be optimized by taking snapshots instead 104 | # of creating a new set of VMs for each case 105 | release-test-swarm-mode: build 106 | # Test swarm-mode (centos by default) 107 | make cluster-swarm-mode 108 | make install-test-swarm-mode 109 | 110 | # create k8s release testing image (do not contains ansible) 111 | k8s-build: prepare-netplugin-images assemble-build 112 | 113 | prepare-netplugin-images: 114 | @bash ./scripts/prepare_netplugin_images.sh 115 | # Create a build and test the release installation on a vagrant cluster 116 | # TODO: The vagrant part of this can be optimized by taking snapshots instead 117 | # of creating a new set of VMs for each case 118 | release-test-kubeadm: k8s-build 119 | # Test kubeadm (centos by default) 120 | make cluster-kubeadm 121 | make install-test-kubeadm 122 | 123 | release-test-legacy-swarm: build 124 | # Test docker + swarm (centos by default) 125 | make cluster-legacy-swarm 126 | make install-test-legacy-swarm 127 | 128 | release-test-kubelegacy: build 129 | # Test k8s ansible (centos by default) 130 | make cluster-kubeadm 131 | make install-test-kube-legacy 132 | 133 | # shfmt reformats all shell scripts in this repo 134 | shfmt: 135 | go get github.com/contiv-experimental/sh/cmd/shfmt 136 | find . -type f -name "*.sh" -print0 | xargs -0 shfmt -w 137 | 138 | # Test the installation on the provided cluster. This is for bare-metal and other 139 | # setups where the cluster is created using non-vagrant mechanisms. 140 | # Clusters need to have k8s installed for kubernetes kubeadm based mechanism and 141 | # docker installed on the master node for all others. 142 | install-test-swarm-mode: 143 | @bash ./scripts/swarm_mode_test.sh 144 | 145 | install-test-kubeadm: 146 | @bash ./scripts/kubeadm_test.sh 147 | 148 | install-test-kube-legacy: 149 | @bash ./scripts/kube_legacy_test.sh 150 | 151 | install-test-legacy-swarm: 152 | @bash ./scripts/legacy_swarm_test.sh 153 | 154 | # ci does everything necessary for a Github PR-triggered CI run. 155 | # currently, this means building a container image and running 156 | # all of the available tests. 157 | ci: release-test-kubeadm 158 | ci-old: release-test-swarm-mode release-test-kubeadm release-test-legacy-swarm 159 | 160 | .PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy k8s-build prepare-netplugin-images 161 | -------------------------------------------------------------------------------- /QUICKSTART.md: -------------------------------------------------------------------------------- 1 | # Quick Start Guide 2 | 3 | Please follow the tutorials [here](http://contiv.github.io/documents/tutorials/). 4 | 5 | ## Quick Start Guide for CentOS 7.x hosts 6 | 7 | * Setup the pre-requisites as follows and follow the demo instructions above 8 | ``` 9 | wget https://releases.hashicorp.com/vagrant/1.9.1/vagrant_1.9.1_x86_64.rpm 10 | wget http://download.virtualbox.org/virtualbox/5.1.14/VirtualBox-5.1-5.1.14_112924_el7-1.x86_64.rpm 11 | sudo yum install VirtualBox-5.1-5.1.14_112924_el7-1.x86_64.rpm -y 12 | sudo yum install vagrant_1.9.1_x86_64.rpm -y 13 | sudo yum install docker -y 14 | sudo systemctl start docker 15 | git clone http://github.com/contiv/install 16 | ``` 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Contiv Installation for Docker Swarm & Kubernetes 1.4+ 2 | Install Contiv on your Docker Swarm or Kubernetes cluster. 3 | 4 | ## Docker Swarm Installation 5 | ### Prerequisites 6 | * CentOS 7.x operating system. 7 | * Python installed on the master and worker nodes. 8 | * Docker installed on the host where you are running the installer. 9 | * Install a Docker Swarm cluster in either legacy swarm mode or native swarm mode (requires 17.03+ version of Docker engine where swarm functionality is inbuilt). Alternatively, use the Contiv installer to setup docker and legacy swarm stack on cluster nodes. 10 | 11 | ### Contiv Installation with Legacy Swarm Mode 12 | 13 | The Contiv Docker Swarm installer is launched from a host external to the cluster. All the nodes must be accessible to the Contiv Ansible-based installer host through SSH. 14 | ![installer](installer.png) 15 | * Download the installer bundle:
`curl -L -O https://github.com/contiv/install/releases/download/$VERSION/contiv-$VERSION.tgz`
16 | If your access to the Internet is limited or slow and you want to download the full Contiv install, choose
17 | `contiv-full-$VERSION.tgz`
18 | Note: The full image contains only Contiv components. Installing Docker Swarm will need Internet connectivity. 19 | * Extract the install bundle
`tar oxf contiv-$VERSION.tgz`. 20 | * Change directories to the extracted folder
`cd contiv-$VERSION` 21 | 22 | * To install Contiv with Docker Legacy Swarm:
`./install/ansible/install_swarm.sh -f cfg.yml -e -u -i` 23 | * To install Contiv with Docker Legacy Swarm and ACI:
`./install/ansible/install_swarm.sh -f aci_cfg.yml -e -u -i -m aci` 24 | 25 | * Example host config files are available at install/ansible/cfg.yml and install/ansible/aci_cfg.yml 26 | * To see additional install options and examples, run
`./install/ansible/install_swarm.sh -h`. 27 | 28 | ### Contiv Installation with Native Swarm Mode 29 | 30 | Docker swarm cluster must be already setup (see [details](https://docs.docker.com/engine/swarm/)). Installer only sets up Contiv v2plugin and dependencies. The Contiv installer can be run from a host in cluster itself. 31 | 32 | * Download the installer bundle:
`curl -L -O https://github.com/contiv/install/releases/download/$VERSION/contiv-$VERSION.tgz`
33 | If your access to the Internet is limited or slow and you want to download the full Contiv install, choose
34 | `contiv-full-$VERSION.tgz`
35 | Note: The full image contains only Contiv components. 36 | * Extract the install bundle
`tar oxf contiv-$VERSION.tgz`. 37 | * Change directories to the extracted folder
`cd contiv-$VERSION` 38 | 39 | * To install Contiv v2plugin:
`./install/ansible/install_swarm.sh -f cfg.yml -e -u -p` 40 | 41 | * Example host config files are available at install/ansible/cfg.yml and install/ansible/aci_cfg.yml 42 | * To see additional install options and examples, such as adding arguments to ansible for verbose output and proxy settings, run
`./install/ansible/install_swarm.sh -h`. 43 | 44 | ### Removing Contiv 45 | 46 | If you need to remove Contiv from Docker Swarm and return to your original state, you can uninstall Contiv with the following commands: 47 | * To uninstall Contiv and Docker Legacy Swarm:
48 | `./install/ansible/uninstall_swarm.sh -f cfg.yml -e -u -i` 49 | * To uninstall Contiv and Docker Legacy Swarm with ACI support:
50 | `./install/ansible/uninstall_swarm.sh -f aci_cfg.yml -e -u -i -m aci` 51 | * To uninstall Contiv and not Docker Legacy Swarm:
52 | `./install/ansible/uninstall_swarm.sh -f cfg.yml -e -u ` 53 | * To uninstall Contiv v2plugin:
54 | `./install/ansible/uninstall_swarm.sh -f cfg.yml -e -u -p` 55 | * Note: Adding the `-r` flag, will cleanup any Contiv state. 56 | * To see additional install options and examples, such as adding arguments to ansible for verbose output and proxy settings, run
`./install/ansible/uninstall_swarm.sh -h`. 57 | 58 | ## Kubernetes Installation 59 | 60 | ### Prerequisites 61 | 62 | * Kubernetes 1.6.2+, and 1.8.4+ are supported with the following instructions. 63 | * CentOS 7.x operating system 64 | * Install Kubernetes: 65 | 1. kubeadm installs the latest Kubernetes version.
66 | For Kubernetes 1.6, see an example script [here.](https://github.com/contiv/install/blob/master/cluster/k8s1.6/k8smaster.sh) 67 | For Kubernetes 1.8, see an example script [here.](https://github.com/contiv/install/blob/master/cluster/k8s1.8/k8smaster.sh) 68 | 1. Replace step (3/4) in the kubeadm install guide with the following Contiv Installation Instructions. Contiv installation can be done after completing step (4/4). 69 | 1. Instructions to install Kubernetes are available [here.](http://kubernetes.io/docs/getting-started-guides/kubeadm/) 70 | 71 | ### Contiv Installation 72 | * Run the following commands on the kubernetes master host. 73 | * Use curl to get the installer bundle:
`curl -L -O https://github.com/contiv/install/releases/download/$VERSION/contiv-$VERSION.tgz` 74 | * Extract the install bundle
`tar oxf contiv-$VERSION.tgz`. 75 | * Change directories to the extracted folder
`cd contiv-$VERSION` 76 | * To install Contiv with VXLAN:
`sudo ./install/k8s/install.sh -n $CONTIV_MASTER` 77 | * **NOTE:** Use the same IP for CONTIV_MASTER as you use for --api-advertise-addresses in kubeadm init. 78 | * To install Contiv specifying a data plane interface for VLAN:
`sudo ./install/k8s/install.sh -n $CONTIV_MASTER -v ` 79 | * **NOTE:** Ensure that the data plane interface is the same on all the worker nodes. 80 | * To install Contiv with ACI:
`./install/k8s/install.sh -n $CONTIV_MASTER -a -u -p -l -d -e -m 81 | `
For example:
`./install/k8s/install.sh -n -a https://apic_host:443 -u apic_user -p apic_password -l topology/pod-xxx/node-xxx -d phys_domain -e not_specified -m no 82 | ` 83 |
where `$CONTIV_MASTER` is the Contiv proxy or Net Master IP. 84 | * To install Contiv with a custom infra network and gateway:
`./install/k8s/install.sh -n -g -i ` 85 | * To see additional install options, run
`./install/ansible/install.sh`. 86 | 87 | ### Removing Contiv 88 | * To uninstall Contiv, retaining the etcd state, run:
89 | `sudo ./install/k8s/uninstall.sh` 90 | * To uninstall Contiv, cleaning up the etcd state, run:
91 | `sudo ./install/k8s/uninstall.sh etcd-cleanup`.
Use this option to cleanup all the Contiv network state. 92 | * To stop Contiv, go to the install folder contiv-$VERSION and run:
`kubectl delete -f .contiv.yaml` 93 | * To start Contiv, go to the install folder contiv-$VERSION and run:
`kubectl apply -f .contiv.yaml` 94 | * To remove etcd state when Contiv is stopped, run:
`rm -rf /var/etcd/contiv-data` 95 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Automated releases 2 | TBD 3 | 4 | # Testing installer builds of any netplugin branch 5 | Normally, the installer requires the contiv netplugin version to be a released version hosted on github, as it pulls that archive down either during install or caches it prior to install. 6 | 7 | Instead, to create an installer for the legacy docker swarm of any branch of contiv, additionally set these two environment variables: 8 | 9 | * NETPLUGIN_BRANCH - the branch to checkout, compile, and archive 10 | * NETPLUGIN_OWNER (if not contiv) - the username for the netplugin fork 11 | 12 | This will only provide a "full" installer with the netplugin archive in `contiv_cache` 13 | 14 | Other types of installs such as v2plugin do not yet support builds targeting specific branches, they only support building installers for released versions of netplugin. 15 | 16 | #### Example: 17 | 18 | ``` 19 | NETPLUGIN_BRANCH=v2plugin_local_versioned NETPLUGIN_OWNER=chrisplo make build 20 | ``` 21 | 22 | Can produce netplugin binary archive at 23 | `contiv-devbuild/contiv_cache/netplugin-ca1b582.tar.bz2` 24 | and a symlink in the same directory to that archive named `netplugin-chrisplo-v2plugin_local_versioned.tar.bz2` 25 | 26 | That archive will be used for netplugin binaries instead of a released archive hosted on github. 27 | 28 | # Manual releases 29 | 1. Check out the right branch and the right commit. This is necessary 30 | when not releasing from the HEAD of master. 31 | 32 | 2. Tag the right commit and push it to GitHub. This is mandatory if the 33 | release isn't made from the HEAD of master. 34 | ``` 35 | git tag 1.0.1 3aba546aea1235 36 | git push origin 1.0.1 37 | ``` 38 | 39 | 3. Set BUILD_VERSION, e.g., 1.0.0-beta.3. Set or update the following variables under script/build.sh. Merge via PR to appropriate branch. 40 | 41 | ``` 42 | auth_proxy_version=${CONTIV_API_PROXY_VERSION:-"1.0.0-beta.3"} 43 | aci_gw_version=${CONTIV_ACI_GW_VERSION:-"latest"} 44 | contiv_version=${CONTIV_VERSION:-"1.0.0-beta.3"} 45 | etcd_version=${CONTIV_ETCD_VERSION:-v2.3.8} 46 | docker_version=${CONTIV_DOCKER_VERSION:-1.12.6} 47 | ``` 48 | 49 | 4. Build docker binary image. This would create a docker image contiv/install:$BUILD_VERSION. It also creates two release bundles - contiv-${BUILD_VETSION}.tgz and contiv-full-${BUILD_VERSION}.tgz. This version should be tested locally using a vagrant setup with release-test-* make targets. 50 | 51 | ``` 52 | make ansible-image 53 | ``` 54 | 55 | 5. Execute ```./scripts/release.sh``` Creates a new release on GitHub. 56 | 57 | ``` 58 | export GITHUB_USER=contiv 59 | export GITHUB_TOKEN= 60 | ./scripts/release.sh 61 | ``` 62 | 63 | 6. Push image to docker hub 64 | 65 | ``` 66 | docker login -u $docker_user -p $docker_password 67 | docker push contiv/install:$BUILD_VERSION 68 | ``` 69 | -------------------------------------------------------------------------------- /cluster/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | require 'rubygems' 4 | require 'fileutils' 5 | require 'yaml' 6 | 7 | DEFAULT_K8S_VERSION = 'v1.8.4'.freeze 8 | 9 | # Guest OS versions for testing 10 | UBUNTU = 'ubuntu'.freeze 11 | CENTOS = 'centos'.freeze 12 | RHEL = 'rhel7'.freeze 13 | 14 | BOX_VERSION = ENV['BOX_VERSION'] || '1707.01'.freeze 15 | HOST_SHARED_FOLDER = './export/'.freeze 16 | GUEST_SHARED_FOLDER = '/shared'.freeze 17 | # Different orchestration platforms we support 18 | ORC_LEGACY_SWARM = 'legacy-swarm'.freeze 19 | ORC_SWARM = 'swarm-mode'.freeze 20 | ORC_KUBEADM = 'kubeadm'.freeze 21 | ORC_KUBEADM_IDX = 2 22 | 23 | num_nodes = (ENV["CONTIV_NODES"] || 2).to_i 24 | num_masters = (ENV["CONTIV_MASTERS"] || 1).to_i 25 | num_workers = num_nodes - num_masters 26 | 27 | 28 | # Global vars 29 | token = 'd900e1.8a392798f13b33a4' 30 | node_os = ENV['CONTIV_NODE_OS'] || CENTOS 31 | k8s_ver = ENV['CONTIV_K8S_VERSION'] || DEFAULT_K8S_VERSION 32 | orc_path = case k8s_ver 33 | when /^v1\.[45]\./ then 'k8s1.4/' 34 | when /^v1\.[67]\./ then 'k8s1.6/' 35 | when /^v1\.[8]\./ then 'k8s1.8/' 36 | else 37 | raise "unsupported k8s version: #{k8s_ver}" 38 | end 39 | 40 | swarm_path = 'docker17/' 41 | orchestrators = [ORC_LEGACY_SWARM, ORC_SWARM, ORC_KUBEADM] 42 | 43 | # method to create an etc_hosts file based on the cluster info 44 | def create_etc_hosts(node_names, node_ips, o) 45 | hosts = "127.0.0.1 localhost\n" 46 | 47 | node_names.zip(node_ips).each do |node, ip| 48 | hosts << "#{ip} #{node} \n" 49 | end 50 | 51 | etc_file = (ENV['VAGRANT_CWD'] || '.') + '/.etc_hosts_' + o 52 | File.write(etc_file, hosts) 53 | end 54 | 55 | # method to create an cfg file based on the cluster info 56 | # This cfg file is used for ansible installations 57 | def create_cfg_info(node_ips, o, masters) 58 | node_os = ENV['CONTIV_NODE_OS'] || CENTOS 59 | conn = {} 60 | node_ips.each_with_index do |node_ip, n| 61 | node = if n < masters 62 | { 'role' => 'master' } 63 | else 64 | {} 65 | end 66 | def_ctrl_if = node_os == UBUNTU ? 'enp0s8' : 'eth1' 67 | def_data_if = node_os == UBUNTU ? 'enp0s9' : 'eth2' 68 | node['control'] = ENV['CONTIV_CONTROL_IF'] || def_ctrl_if 69 | node['data'] = ENV['CONTIV_DATA_IF'] || def_data_if 70 | conn[node_ip] = node 71 | end 72 | cfg_data = { 'CONNECTION_INFO' => conn } 73 | cfg_file = (ENV['VAGRANT_CWD'] || '.') + '/.cfg_' + o + '.yaml' 74 | File.write(cfg_file, cfg_data.to_yaml) 75 | end 76 | 77 | provision_node = <