├── .gitignore ├── README.md ├── ansible ├── ansible.cfg ├── clusterUp.yml ├── dockerSetup.yml ├── rancherServer.yml ├── rancherServers.yml └── roles │ ├── dockersetup │ └── tasks │ │ └── main.yml │ ├── k8senvironment │ └── tasks │ │ └── main.yml │ ├── k8setcd │ └── tasks │ │ └── main.yml │ ├── k8sha │ └── tasks │ │ └── main.yml │ ├── mysqldb │ └── tasks │ │ └── main.yml │ ├── rancherha │ └── tasks │ │ └── main.yml │ ├── rancherhost │ └── tasks │ │ └── main.yml │ └── ranchermaster │ ├── tasks │ └── main.yml │ └── vars │ └── vars.yml ├── docs ├── benchmarks.md ├── detailed.md ├── img │ ├── 1x2-arch.png │ ├── 20170323b-Triton-Kubernetes.jpg │ ├── 20170323b-Triton-Kubernetes.png │ ├── 20170324a-add-environment.png │ ├── 20170324a-add-host.png │ ├── 20170324a-create-environment.png │ ├── 20170324a-kubernetes-dashboard.png │ ├── 20170324a-manage-environments.png │ ├── 20170324b-add-host.png │ ├── 20170328a-k8scli-copyconfig.png │ ├── 20170328a-k8scli-generateconfig.png │ ├── 20170328a-k8scli-guestbook.png │ ├── 20170328a-k8scli-guestbookindashboard.png │ ├── 20170328a-k8sdashboard-create.png │ ├── 20170328a-k8sdashboard-deploy.png │ ├── 20170328a-k8sdashboard-services.png │ ├── 20170328a-k8sdashboard-workloads.png │ ├── 20170328a-k8sdashboard.png │ ├── 20170328a-k8smonitoring-deployments.png │ ├── 20170328b-k8smonitoring-deployments.png │ ├── 20170530a-Triton-Kubernetes-HA.jpg │ ├── infrastructure-containers.png │ ├── kubernetes-cli.png │ ├── kubernetes-dashboard.png │ └── rancher-dashboard.png └── manual-setup.md ├── setup.sh └── terraform ├── host ├── main.tf └── vars.tf ├── k8setcd ├── main.tf └── vars.tf ├── k8sha ├── main.tf └── vars.tf ├── master ├── main.tf └── vars.tf └── mysqldb ├── main.tf └── vars.tf /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > # ###This repo is out of date. Please check out [https://github.com/joyent/k8s-triton-supervisor](https://github.com/joyent/k8s-triton-supervisor).### 2 | 3 | ## Quick Start Guide 4 | In this guide, we will start a simple 2 worker node Kubernetes install that runs on Joyent Cloud. 5 | 6 | ### Pre-Reqs 7 | In order to start running Triton K8s Supervisor, you must create a **Triton** account and install the **Triton CLI**, **Terraform**, **Ansible**, and the **Kubernetes CLI**. 8 | 9 | [Triton](https://www.joyent.com/why) is our container-native and open source cloud, which we will use to provide the infrastructure required for your Kubernetes cluster. 10 | 11 | [Terraform](https://www.terraform.io/) enables you to safely and predictably create, change, and improve production infrastructure. It is an open source tool that codifies APIs into declarative configuration files that can be shared amongst team members, treated as code, edited, reviewed, and versioned. We use Terraform to provision virtual machines, set up root access, and install `python`. 12 | 13 | [Ansible](http://docs.ansible.com/ansible/intro_installation.html) is an IT automation tool that enables app deployment, configuration management and orchestration. We are using Ansible to install pre-reqs (including supported version of docker-engine), create Kubernetes environment and set up Kubernetes cluster. 14 | 15 | #### Install Triton 16 | 17 | In order to install Triton, first you must have a [Triton account](https://sso.joyent.com/signup). As a new user you will recieve a $250 credit to enable you to give Triton and Kubernetes a test run, but it's important to [add your billing information](https://my.joyent.com/main/#!/account/payment) and [add an ssh key](https://my.joyent.com/main/#!/account) to your account. If you need instructions for how to generate and SSH key, [read our documentation](https://docs.joyent.com/public-cloud/getting-started). 18 | 19 | 1. Install [Node.js](https://nodejs.org/en/download/) and run `npm install -g triton` to install Triton CLI. 20 | 1. `triton` uses profiles to store access information. You'll need to set up profiles for relevant data centers. 21 | + `triton profile create` will give a [step-by-step walkthrough](https://docs.joyent.com/public-cloud/api-access/cloudapi) of how to create a profile. 22 | + Choose a profile to use for your Kubernetes Cluster. 23 | 1. Get into the Triton environment with `eval $(triton env )`. 24 | 1. Run `triton info` to test your configuration. 25 | 26 | #### Install Terraform 27 | 28 | [Download Terraform](https://www.terraform.io/downloads.html) and unzip the package. 29 | Terraform runs as a single binary named terraform. The final step is to make sure that the terraform binary is available on the PATH. See [this](https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) page for instructions on setting the PATH on Linux and Mac. 30 | 31 | Test your installation by running `terraform`. You should see an output similar to: 32 | 33 | ``` 34 | $ terraform 35 | Usage: terraform [--version] [--help] [args] 36 | 37 | The available commands for execution are listed below. 38 | The most common, useful commands are shown first, followed by 39 | less common or more advanced commands. If you're just getting 40 | started with Terraform, stick with the common commands. For the 41 | other commands, please read the help and docs before usage. 42 | 43 | Common commands: 44 | apply Builds or changes infrastructure 45 | console Interactive console for Terraform interpolations 46 | 47 | # ... 48 | ``` 49 | 50 | #### Install Ansible 51 | 52 | There are [many ways to install Ansible](http://docs.ansible.com/ansible/intro_installation.html), but the simplest would be to use Python package manager (`pip`). If you don’t already have `pip` installed, install it: 53 | 54 | ``` 55 | sudo easy_install pip 56 | ``` 57 | Ansible by default manages machines over SSH and requires Python 2.6 or 2.7 to be installed on all the hosts. 58 | 59 | Install Ansible: 60 | 61 | ``` 62 | sudo pip install ansible 63 | ``` 64 | 65 | Once done, you can run `ansible` to test your installation. You should see a list of usage commands that looks like the following: 66 | 67 | ``` 68 | $ ansible 69 | Usage: ansible [options] 70 | 71 | Options: 72 | -a MODULE_ARGS, --args=MODULE_ARGS 73 | module arguments 74 | --ask-vault-pass ask for vault password 75 | -B SECONDS, --background=SECONDS 76 | run asynchronously, failing after X seconds 77 | (default=N/A) 78 | -C, --check don't make any changes; instead, try to predict some 79 | of the changes that may occur 80 | -D, --diff when changing (small) files and templates, show the 81 | differences in those files; works great with --check 82 | [...] 83 | ``` 84 | 85 | #### Install the Kubernetes CLI 86 | 87 | There are different ways to [install `kubectl`](https://kubernetes.io/docs/tasks/kubectl/install/), but the simplest way is via `curl`: 88 | 89 | ```sh 90 | # OS X 91 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl 92 | 93 | # Linux 94 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl 95 | 96 | # Windows 97 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/windows/amd64/kubectl.exe 98 | ``` 99 | 100 | ### Starting Kubernetes Cluster 101 | Download the k8sontriton package and run `setup.sh`: 102 | 103 | ```bash 104 | $ git clone https://github.com/fayazg/tritonK8ssupervisor.git 105 | Cloning into 'tritonK8ssupervisor'... 106 | $ cd tritonK8ssupervisor 107 | $ ./setup.sh 108 | ``` 109 | 110 | Follow the on screen instructions answering questions about the cluster. You can use the default by pressing “Enter”/”Return” key. 111 | 112 | #### Setup Questions 113 | 114 | ``` 115 | Name your Kubernetes environment: (k8s dev) 116 | Describe this Kubernetes environment: (k8s dev) 117 | Would you like HA for Kubernetes Cluster Manager (+3 VMs) (yes | no)? 118 | Run Kubernetes Management Services on dedicated nodes (+3 VMs for etcd, +3 VMs for K8s services - apiserver/scheduler/controllermanager...) (yes | no)? 119 | Hostname of the master: (kubemaster) 120 | Enter a string to use for appending to hostnames of all the nodes: (kubenode) 121 | How many nodes should this Kubernetes cluster have: (1) 2 122 | What networks should the master be a part of, provide comma separated values: (31428241-4878-47d6-9fba-9a8436b596a4) 123 | What networks should the nodes be a part of, provide comma separated values: (31428241-4878-47d6-9fba-9a8436b596a4) 124 | What KVM package should the master and nodes run on: (14b6fade-d0f8-11e5-85c5-4ff7918ab5c1) 125 | ``` 126 | 127 | After verification of the entries, setup will provide a Kubernetes environment on triton that will be set up as below (if HA isn't set up): 128 | 129 | ![1x2 architecture](docs/img/1x2-arch.png) 130 | 131 | 132 | For a more detailed guide and how the automation works, click [here](docs/detailed.md). 133 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | deprecation_warnings = False 3 | host_key_checking = False 4 | private_key_file = 5 | -------------------------------------------------------------------------------- /ansible/clusterUp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set up Rancher Master and Kubernetes Template 3 | hosts: MASTER[0] 4 | remote_user: root 5 | vars: 6 | playbook_dir: $(pwd) 7 | vars_files: 8 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 9 | roles: 10 | - k8senvironment 11 | 12 | - name: Add hosts to Kubernetes Environment (ETCD) 13 | hosts: K8SETCD 14 | remote_user: root 15 | vars: 16 | playbook_dir: $(pwd) 17 | vars_files: 18 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 19 | roles: 20 | - k8setcd 21 | 22 | - name: Add hosts to Kubernetes Environment (ORCHESTRATION) 23 | hosts: K8SHA 24 | remote_user: root 25 | vars: 26 | playbook_dir: $(pwd) 27 | vars_files: 28 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 29 | roles: 30 | - k8sha 31 | 32 | - name: Add hosts to Kubernetes Environment (COMPUTE) 33 | hosts: HOST 34 | remote_user: root 35 | vars: 36 | playbook_dir: $(pwd) 37 | vars_files: 38 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 39 | roles: 40 | - rancherhost 41 | -------------------------------------------------------------------------------- /ansible/dockerSetup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup docker-engine on all hosts (masters/hosts) 3 | hosts: all 4 | remote_user: root 5 | roles: 6 | - dockersetup 7 | -------------------------------------------------------------------------------- /ansible/rancherServer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set up Rancher Master and Kubernetes Template 3 | hosts: MASTER 4 | remote_user: root 5 | vars: 6 | playbook_dir: $(pwd) 7 | vars_files: 8 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 9 | roles: 10 | - ranchermaster 11 | -------------------------------------------------------------------------------- /ansible/rancherServers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: MysqlDB for Rancher HA 3 | hosts: MYSQLDB 4 | remote_user: root 5 | vars: 6 | playbook_dir: $(pwd) 7 | vars_files: 8 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 9 | roles: 10 | - mysqldb 11 | 12 | - name: Set up Rancher Master and Kubernetes Template 13 | hosts: MASTER 14 | remote_user: root 15 | vars: 16 | playbook_dir: $(pwd) 17 | vars_files: 18 | - "{{playbook_dir}}/roles/ranchermaster/vars/vars.yml" 19 | roles: 20 | - rancherha 21 | -------------------------------------------------------------------------------- /ansible/roles/dockersetup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if docker is installed 3 | shell: echo $(docker --version | grep 1.12.6) 4 | register: docker_installed 5 | - name: Update all packages to the latest version 6 | apt: 7 | upgrade: dist 8 | update_cache: yes 9 | cache_valid_time: 86400 10 | when: docker_installed.stdout.find('Docker version') 11 | - name: Install docker pre-reqs 12 | apt: name={{ item.name }} 13 | with_items: 14 | - { name: 'linux-headers-generic' } 15 | - { name: 'build-essential' } 16 | - { name: 'zfs' } 17 | - { name: 'python-pip' } 18 | when: docker_installed.stdout.find('Docker version') 19 | - name: Install docker-py 20 | pip: 21 | name: docker-py 22 | when: docker_installed.stdout.find('Docker version') 23 | - name: Install docker repo pre-reqs 24 | apt: name={{ item.name }} install_recommends=no 25 | with_items: 26 | - { name: 'apt-transport-https' } 27 | - { name: 'ca-certificates' } 28 | - { name: 'curl' } 29 | - { name: 'software-properties-common' } 30 | when: docker_installed.stdout.find('Docker version') 31 | - name: Add docker's official key 32 | apt_key: 33 | id: 58118E89F3A912897C070ADBF76221572C52609D 34 | keyserver: p80.pool.sks-keyservers.net 35 | state: present 36 | when: docker_installed.stdout.find('Docker version') 37 | - name: Add docker repo 38 | apt_repository: 39 | repo: deb https://apt.dockerproject.org/repo ubuntu-xenial main 40 | state: present 41 | when: docker_installed.stdout.find('Docker version') 42 | - name: Update repositories cache and install docker-engine package version 1.12.6-0~ubuntu-xenial 43 | apt: 44 | name: docker-engine=1.12.6-0~ubuntu-xenial 45 | update_cache: yes 46 | when: docker_installed.stdout.find('Docker version') 47 | -------------------------------------------------------------------------------- /ansible/roles/k8senvironment/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get Kubernetes Template ID 3 | action: uri 4 | method=GET 5 | url="http://{{ master }}:8080/v2-beta/projectTemplates?name=kubernetes" 6 | register: kubernetes_template_id 7 | run_once: true 8 | delegate_to: "{{ master }}" 9 | 10 | - name: Store kubernetes_template_id 11 | local_action: shell echo {{ kubernetes_template_id.json.data[0].id }} > tmp/kubernetes_template_id.id 12 | 13 | - name: Create Kubernetes Environment 14 | uri: 15 | method: POST 16 | url: "http://{{ master }}:8080/v2-beta/projects" 17 | HEADER_Accept: "application/json" 18 | HEADER_Content-Type: "application/json" 19 | body_format: json 20 | body: ' { "description":"{{ kubernetes_description }}", "name":"{{ kubernetes_name }}", "projectTemplateId":"{{ kubernetes_template_id.json.data[0].id }}", "allowSystemRole":false, "members":[], "virtualMachine":false, "servicesPortRange":null, "projectLinks":[]}' 21 | return_content: yes 22 | status_code: 201 23 | register: kubernetes_environment_id 24 | run_once: true 25 | delegate_to: "{{ master }}" 26 | 27 | - name: Store kubernetes_environment_id 28 | local_action: shell echo {{ kubernetes_environment_id.json.id }} > tmp/kubernetes_environment.id 29 | -------------------------------------------------------------------------------- /ansible/roles/k8setcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if the rancher-agent is running 3 | command: docker ps -a 4 | register: containers 5 | 6 | - name: Get kubernetes_environment_id 7 | local_action: slurp src="tmp/kubernetes_environment.id" 8 | register: project_id 9 | when: "{{ 'rancher-agent' not in containers.stdout }}" 10 | 11 | - name: Get registration url 12 | action: uri 13 | method=POST 14 | status_code=201 15 | url="http://{{ master }}:8080/v2-beta/projects/{{ project_id['content'] | b64decode | replace('\n', '') }}/registrationtokens" return_content=yes 16 | register: rancher_agent_registration 17 | when: "{{ 'rancher-agent' not in containers.stdout }}" 18 | 19 | - name: Get registration token 20 | action: uri 21 | url="{{ rancher_agent_registration.json['actions']['activate'] }}" return_content=yes 22 | register: agent_registration 23 | when: "{{ 'rancher-agent' not in containers.stdout }}" 24 | 25 | - name: Register etcd machine 26 | shell: "docker run -e CATTLE_HOST_LABELS='etcd=true' --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher {{ agent_registration.json['image'] }} {{ agent_registration.json['registrationUrl'] }}" 27 | when: "{{ 'rancher-agent' not in containers.stdout }}" 28 | -------------------------------------------------------------------------------- /ansible/roles/k8sha/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if the rancher-agent is running 3 | command: docker ps -a 4 | register: containers 5 | 6 | - name: Get kubernetes_environment_id 7 | local_action: slurp src="tmp/kubernetes_environment.id" 8 | register: project_id 9 | when: "{{ 'rancher-agent' not in containers.stdout }}" 10 | 11 | - name: Get registration url 12 | action: uri 13 | method=POST 14 | status_code=201 15 | url="http://{{ master }}:8080/v2-beta/projects/{{ project_id['content'] | b64decode | replace('\n', '') }}/registrationtokens" return_content=yes 16 | register: rancher_agent_registration 17 | when: "{{ 'rancher-agent' not in containers.stdout }}" 18 | 19 | - name: Get registration token 20 | action: uri 21 | url="{{ rancher_agent_registration.json['actions']['activate'] }}" return_content=yes 22 | register: agent_registration 23 | when: "{{ 'rancher-agent' not in containers.stdout }}" 24 | 25 | - name: Register orchestration machine 26 | shell: "docker run -e CATTLE_HOST_LABELS='orchestration=true' --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher {{ agent_registration.json['image'] }} {{ agent_registration.json['registrationUrl'] }}" 27 | when: "{{ 'rancher-agent' not in containers.stdout }}" 28 | -------------------------------------------------------------------------------- /ansible/roles/mysqldb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install python mysql bindings 3 | apt: name=python-mysqldb state=installed 4 | 5 | - name: Install mysql on DB node (root/password) 6 | raw: debconf-set-selections <<< 'mysql-server mysql-server/root_password password password' && debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password' && apt-get -y install mysql-server 7 | 8 | - name: Create db, and setup user permission 9 | mysql_db: 10 | state: present 11 | name: "{{ db_name }}" 12 | login_password: password 13 | 14 | - name: User Permission - local 15 | mysql_user: 16 | name: "{{ db_user }}" 17 | host: localhost 18 | password: "{{ db_pass }}" 19 | priv: 'cattle.*:ALL,GRANT' 20 | state: present 21 | login_password: password 22 | 23 | - name: User Permission - all 24 | mysql_user: 25 | name: "{{ db_user }}" 26 | host: "%" 27 | password: "{{ db_pass }}" 28 | priv: 'cattle.*:ALL,GRANT' 29 | state: present 30 | login_password: password 31 | 32 | - name: Update mysql bind-address 33 | lineinfile: 34 | dest: /etc/mysql/my.cnf 35 | line: "[mysqld]\nbind-address = 0.0.0.0" 36 | 37 | - name: Restart mysql Service 38 | service: 39 | name: mysql 40 | state: restarted 41 | -------------------------------------------------------------------------------- /ansible/roles/rancherha/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Rancher HA container is running 3 | command: docker ps -a 4 | register: containers 5 | - debug: msg="{{ containers.stdout }}" 6 | - name: Start Rancher Server HA containers 7 | shell: "docker run --name=master -d --restart=unless-stopped -p 8080:8080 -p 9345:9345 rancher/server --db-host {{ mysqldb }} --db-port 3306 --db-user {{ db_user }} --db-pass {{ db_pass }} --db-name {{ db_name }} --advertise-address {{ ansible_default_ipv4.address }}" 8 | when: not containers.stdout | search("rancher/server") 9 | 10 | - name: Wait for the Rancher servers to start 11 | action: command docker logs master 12 | register: rancher_logs 13 | until: rancher_logs.stdout.find("Listening on") != -1 14 | retries: 60 15 | delay: 10 16 | when: not containers.stdout | search("rancher/server") 17 | 18 | - debug: msg="You can connect to rancher server http://{{ ansible_default_ipv4.address }}:8080" 19 | when: not containers.stdout | search("rancher/server") 20 | 21 | - pause: 22 | seconds: 15 23 | prompt: "Waiting for Rancher to be fully up before continueing to build Kubernetes environment..." 24 | when: not containers.stdout | search("rancher/server") 25 | 26 | - name: Get Kubernetes Template ID 27 | action: uri 28 | method=GET 29 | url="http://{{ ansible_default_ipv4.address }}:8080/v2-beta/projectTemplates?name=kubernetes" 30 | register: kubernetes_template_id 31 | run_once: true 32 | delegate_to: "{{ master }}" 33 | 34 | - name: Store kubernetes_template_id 35 | local_action: shell echo {{ kubernetes_template_id.json.data[0].id }} > tmp/kubernetes_template_id.id 36 | -------------------------------------------------------------------------------- /ansible/roles/rancherhost/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if the rancher-agent is running 3 | command: docker ps -a 4 | register: containers 5 | 6 | - name: Get kubernetes_environment_id 7 | local_action: slurp src="tmp/kubernetes_environment.id" 8 | register: project_id 9 | when: "{{ 'rancher-agent' not in containers.stdout }}" 10 | 11 | - name: Get registration url 12 | action: uri 13 | method=POST 14 | status_code=201 15 | url="http://{{ master }}:8080/v2-beta/projects/{{ project_id['content'] | b64decode | replace('\n', '') }}/registrationtokens" return_content=yes 16 | register: rancher_agent_registration 17 | when: "{{ 'rancher-agent' not in containers.stdout }}" 18 | 19 | - name: Get registration token 20 | action: uri 21 | url="{{ rancher_agent_registration.json['actions']['activate'] }}" return_content=yes 22 | register: agent_registration 23 | when: "{{ 'rancher-agent' not in containers.stdout }}" 24 | 25 | - name: Register compute machine 26 | shell: "docker run -e CATTLE_HOST_LABELS='compute=true' --rm --privileged -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/rancher:/var/lib/rancher {{ agent_registration.json['image'] }} {{ agent_registration.json['registrationUrl'] }}" 27 | when: "{{ 'rancher-agent' not in containers.stdout }}" 28 | -------------------------------------------------------------------------------- /ansible/roles/ranchermaster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if master container is running 3 | command: docker ps -a 4 | register: containers 5 | 6 | - name: Start master containers 7 | docker_container: 8 | name: master 9 | image: rancher/server:stable 10 | published_ports: 8080:8080 11 | restart_policy: unless-stopped 12 | state: started 13 | when: "{{ 'master' not in containers.stdout }}" 14 | - name: Wait for the Rancher servers to start 15 | action: command docker logs master 16 | register: rancher_logs 17 | until: rancher_logs.stdout.find("Listening on") != -1 18 | retries: 30 19 | delay: 10 20 | when: "{{ 'master' not in containers.stdout }}" 21 | - debug: msg="You can connect to rancher server http://{{ ansible_default_ipv4.address }}:8080" 22 | when: "{{ 'master' not in containers.stdout }}" 23 | 24 | - pause: 25 | seconds: 15 26 | prompt: "Waiting for Rancher to be fully up before continueing to build Kubernetes environment..." 27 | when: "{{ 'master' not in containers.stdout }}" 28 | 29 | - name: Get Kubernetes Template ID 30 | action: uri 31 | method=GET 32 | url="http://{{ master }}:8080/v2-beta/projectTemplates?name=kubernetes" 33 | register: kubernetes_template_id 34 | run_once: true 35 | delegate_to: "{{ master }}" 36 | 37 | - name: Store kubernetes_template_id 38 | local_action: shell echo {{ kubernetes_template_id.json.data[0].id }} > tmp/kubernetes_template_id.id 39 | -------------------------------------------------------------------------------- /ansible/roles/ranchermaster/vars/vars.yml: -------------------------------------------------------------------------------- 1 | master: 64.30.129.229 2 | kubernetes_name: "k8s dev" 3 | kubernetes_description: "k8s dev" 4 | -------------------------------------------------------------------------------- /docs/benchmarks.md: -------------------------------------------------------------------------------- 1 | # Benchmarking with [simple-container-benchmarks](https://github.com/misterbisson/simple-container-benchmarks) 2 | 3 | **System performance test of Triton KVM and AWS VM using [simple-container-benchmarks](https://github.com/misterbisson/simple-container-benchmarks):** 4 | This test was between a general purpose VM AWS and a comparable Triton KVM. Triton KVM package used for this test is k4-general-kvm-31.75G, and the AWS VM t2.2xlarge. 5 | 6 | For all tests, ubuntu-16.04 image was used on both the Triton KVMs and AWS VMs. 7 | 8 | To get write performance, it pipes a gigabyte of zeros to a file on the filesystem: 9 | /disk request : 1073741824 bytes (1.1 GB) 10 | 11 | To test CPU performance, it fetches random numbers and md5 hashes them: 12 | /cpu request : 268435456 bytes (268 MB) 13 | 14 | 15 | For more detailed information on what the benchmarking does, click [here](https://github.com/misterbisson/simple-container-benchmarks#how-the-tests-work). 16 | 17 | ## k4-general-kvm-31.75G vs t2.2xlarge 18 | 19 | | | Triton | AWS | 20 | |----------------------- |--------------------------- |------------------------ | 21 | | /disk request average | 8.470108 s, 128.8 MB/s | 13.02394 s, 83.45 MB/s | 22 | | /cpu request average | 16.85761 s, 15.96 MB/s | 21.63477 s, 12.4 MB/s | 23 | | total mem | 32689108 | 32946296 | 24 | | used mem | 1593180 | 1595780 | 25 | | free mem | 31095928 | 31350516 | 26 | | shared mem | 9016 | 8944 | 27 | | buffers mem | 153116 | 153028 | 28 | | cached mem | 951076 | 949996 | 29 | | Architecture | x86_64 | x86_64 | 30 | | CPU op-mode(s) | 32-bit, 64-bit | 32-bit, 64-bit | 31 | | Byte Order | Little Endian | Little Endian | 32 | | CPU(s) | 8 | 8 | 33 | | On-line CPU(s) list | 0-7 | 0-7 | 34 | | Thread(s) per core | 1 | 1 | 35 | | Core(s) per socket | 1 | 8 | 36 | | Socket(s) | 8 | 1 | 37 | | NUMA node(s) | 1 | 1 | 38 | | Vendor ID | GenuineIntel | GenuineIntel | 39 | | CPU family | 6 | 6 | 40 | | Model | 45 | 63 | 41 | | Stepping | 7 | 2 | 42 | | CPU MHz | 2600.128 | 2400.046 | 43 | | BogoMIPS | 5200.25 | 4800.09 | 44 | | Hypervisor vendor | - | Xen | 45 | | Virtualization type | - | full | 46 | | L1d cache | 32K | 32K | 47 | | L1i cache | 32K | 32K | 48 | | L2 cache | 4096K | 256K | 49 | | L3 cache | - | 30720K | 50 | | NUMA node0 CPU(s) | 0-7 | 0-7 | 51 | -------------------------------------------------------------------------------- /docs/detailed.md: -------------------------------------------------------------------------------- 1 | [Kubernetes](https://kubernetes.io/docs/concepts/overview/what-is-kubernetes/) is an open-source system for automating deployment, scaling, and management of application containers across clusters of hosts. Here I introduce the [Triton K8s Supervisor](https://github.com/fayazg/tritonK8ssupervisor) and walk through the steps required to run Kubernetes on Triton. 2 | 3 | #### Step one: creating your Kubernetes Cluster. 4 | 5 | All it takes is the entry of a few simple parameters and then the Triton K8s Supervisor will automate the provisioning of the Triton virtual machine hosts, Docker Engines and networks that will serve as your Kubernetes Cluster. The Triton K8s Supervisor leverages [Terraform](https://www.terraform.io/) and [Ansible](https://www.ansible.com/) to complete these steps, so you can easily modify your Kubernetes Cluster creation set up if you wish. 6 | 7 | #### Step two: creating your Kubernetes Environment. 8 | 9 | Once the Kubernetes Cluster is set up, the Triton K8s Supervisor will simply continue on and automate the provisioning of a Kubernetes Environment on the Kubernetes Cluster it just created. The Triton K8s Supervisor leverages [Rancher](http://rancher.com/kubernetes) best practices and tools to complete this step, ensuring that you have a production grade, supportable (and easily upgradable) Kubernetes Environment. 10 | 11 | ![1x2 Architecture](img/20170530a-Triton-Kubernetes-HA.jpg) 12 | 13 | #### Step three: deploying your first Kubernetes Managed Application. 14 | 15 | As a final step, we will walk through deploying an application on your Kubernetes Environment using both the Kubernetes CLI and [Kubernetes Dashboard](https://kubernetes.io/docs/user-guide/ui/). 16 | 17 | Follow along as through these three easy steps in detail below. Complete them on your own, leveraging Triton’s [free trial](https://sso.joyent.com/signup) offer to get started, and in less than an hour you will have your very own, 100% open source, production-grade, Kubernetes stack. 18 | 19 | ### Let’s get started: the pre-requisites 20 | 21 | In order to start running Triton K8s Supervisor, you must create a **Triton** account and install the **Triton CLI**, **Terraform**, **Ansible**, and the **Kubernetes CLI**. 22 | 23 | [Triton](https://www.joyent.com/why) is our container-native and open source cloud, which we will use to provide the infrastructure required for your Kubernetes cluster. 24 | 25 | [Terraform](https://www.terraform.io/) enables you to safely and predictably create, change, and improve production infrastructure. It is an open source tool that codifies APIs into declarative configuration files that can be shared amongst team members, treated as code, edited, reviewed, and versioned. We use Terraform to provision virtual machines, set up root access, and install `python`. 26 | 27 | [Ansible](http://docs.ansible.com/ansible/intro_installation.html) is an IT automation tool that enables app deployment, configuration management and orchestration. We are using Ansible to install pre-reqs (including supported version of docker-engine), create Kubernetes environment and set up Kubernetes cluster. 28 | 29 | #### Install Triton 30 | 31 | In order to install Triton, first you must have a [Triton account](https://sso.joyent.com/signup). As a new user you will recieve a $250 credit to enable you to give Triton and Kubernetes a test run, but it's important to [add your billing information](https://my.joyent.com/main/#!/account/payment) and [add an ssh key](https://my.joyent.com/main/#!/account) to your account. If you need instructions for how to generate and SSH key, [read our documentation](https://docs.joyent.com/public-cloud/getting-started). 32 | 33 | 1. Install [Node.js](https://nodejs.org/en/download/) and run `npm install -g triton` to install Triton CLI. 34 | 1. `triton` uses profiles to store access information. You'll need to set up profiles for relevant data centers. 35 | + `triton profile create` will give a [step-by-step walkthrough](https://docs.joyent.com/public-cloud/api-access/cloudapi) of how to create a profile. 36 | + Choose a profile to use for your Kubernetes Cluster. 37 | 1. Get into the Triton environment with `eval $(triton env )`. 38 | 1. Run `triton info` to test your configuration. 39 | 40 | #### Install Terraform 41 | 42 | [Download Terraform](https://www.terraform.io/downloads.html) and unzip the package. 43 | Terraform runs as a single binary named terraform. The final step is to make sure that the terraform binary is available on the PATH. See [this](https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) page for instructions on setting the PATH on Linux and Mac. 44 | 45 | Test your installation by running `terraform`. You should see an output similar to: 46 | 47 | ``` 48 | $ terraform 49 | Usage: terraform [--version] [--help] [args] 50 | 51 | The available commands for execution are listed below. 52 | The most common, useful commands are shown first, followed by 53 | less common or more advanced commands. If you're just getting 54 | started with Terraform, stick with the common commands. For the 55 | other commands, please read the help and docs before usage. 56 | 57 | Common commands: 58 | apply Builds or changes infrastructure 59 | console Interactive console for Terraform interpolations 60 | 61 | # ... 62 | ``` 63 | 64 | #### Install Ansible 65 | 66 | There are [many ways to install Ansible](http://docs.ansible.com/ansible/intro_installation.html), but the simplest would be to use Python package manager (`pip`). If you don’t already have `pip` installed, install it: 67 | 68 | ``` 69 | sudo easy_install pip 70 | ``` 71 | Ansible by default manages machines over SSH and requires Python 2.6 or 2.7 to be installed on all the hosts. 72 | 73 | Install Ansible: 74 | 75 | ``` 76 | sudo pip install ansible 77 | ``` 78 | 79 | Once done, you can run `ansible` to test your installation. You should see a list of usage commands that looks like the following: 80 | 81 | ``` 82 | $ ansible 83 | Usage: ansible [options] 84 | 85 | Options: 86 | -a MODULE_ARGS, --args=MODULE_ARGS 87 | module arguments 88 | --ask-vault-pass ask for vault password 89 | -B SECONDS, --background=SECONDS 90 | run asynchronously, failing after X seconds 91 | (default=N/A) 92 | -C, --check don't make any changes; instead, try to predict some 93 | of the changes that may occur 94 | -D, --diff when changing (small) files and templates, show the 95 | differences in those files; works great with --check 96 | [...] 97 | ``` 98 | 99 | #### Install the Kubernetes CLI 100 | 101 | There are different ways to [install `kubectl`](https://kubernetes.io/docs/tasks/kubectl/install/), but the simplest way is via `curl`: 102 | 103 | ```sh 104 | # OS X 105 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl 106 | 107 | # Linux 108 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl 109 | 110 | # Windows 111 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/windows/amd64/kubectl.exe 112 | ``` 113 | 114 | ### Now let's create a Kubernetes Cluster 115 | 116 | The Triton K8s Supervisor uses `triton`, `terraform`, and `ansible` to set up and interact with a Kubernetes Cluster. Once those have been installed, you can download the [Triton K8s Supervisor package](https://github.com/fayazg/tritonK8ssupervisor.git), start `setup.sh`, and answer the prompted questions. 117 | 118 | Default values will be shown in parentheses and if no input is provided, defaults will be used. 119 | 120 | ``` 121 | $ git clone https://github.com/fayazg/tritonK8ssupervisor.git 122 | Cloning into 'tritonK8ssupervisor'... 123 | $ cd tritonK8ssupervisor 124 | $ ./setup.sh 125 | Name your Kubernetes environment: (k8s dev) 126 | ``` 127 | 128 | Provide a name for the Kubernetes Environment and press Enter. 129 | 130 | ``` 131 | Describe this Kubernetes environment: (k8s dev) 132 | ``` 133 | 134 | Provide a description for the Kubernetes Environment and press Enter. 135 | 136 | ``` 137 | Would you like HA for Kubernetes Cluster Manager (+3 VMs) (yes | no)? 138 | ``` 139 | 140 | HA for Kubernetes Cluster Manager will run a 2 node HA setup with 1 mysql VM. 141 | 142 | ``` 143 | Run Kubernetes Management Services on dedicated nodes (+3 VMs for etcd, +3 VMs for K8s services - apiserver/scheduler/controllermanager...) (yes | no)? 144 | ``` 145 | 146 | HA for Kubernetes Management Services will run 3 node etcd cluster on dedicated VMs and dedicate 3 vms for K8s services like the apiserver/scheduler/controllermanager. 147 | 148 | ``` 149 | Hostname of the master: (kubemaster) 150 | ``` 151 | 152 | Provide a hostname for the virtual machine that will run the Rancher Server container. This VM will be used to interact with Rancher and Kubernetes environments. Hostname must start with a letter and must be alphanumeric. 153 | 154 | ``` 155 | Enter a string to use for appending to hostnames of all the nodes: (kubenode) 156 | ``` 157 | 158 | Provide a prefix which will be used for all of the virtual machines that will be connected as nodes within your Kubernetes Cluster. This must be alphanumeric. 159 | 160 | ``` 161 | How many nodes should this Kubernetes cluster have: (1) 162 | ``` 163 | 164 | Provide the number of nodes that should be created/connected to your Kubernetes Cluster. The default provisioning limit on your free trial is set to 2 virtual machines (master + 1 node). If you need a larger size cluster, please contact to request more resources. 165 | 166 | ``` 167 | From the networks below: 168 | 1. Joyent-SDC-Private 909c0c0d-1455-404f-85bd-04f48b7b0059 169 | 2. Joyent-SDC-Public 31428241-4878-47d6-9fba-9a8436b596a4 170 | 3. My-Fabric-Network 0882d255-ac1e-41b2-b1d5-e08200ebb380 171 | 4. kubernetes 0b206464-d655-4723-a848-86d0f28764c8 172 | What networks should the master be a part of, provide comma separated values: (31428241-4878-47d6-9fba-9a8436b596a4) 173 | ``` 174 | 175 | Triton CLI is used here to pull all the active networks for the current data center defined in the Triton profile. Provide a comma-separated list of networks that the master virtual machine should be a part of (e.g. “2,4”). 176 | 177 | ``` 178 | From the networks below: 179 | 1. Joyent-SDC-Private 909c0c0d-1455-404f-85bd-04f48b7b0059 180 | 2. Joyent-SDC-Public 31428241-4878-47d6-9fba-9a8436b596a4 181 | 3. My-Fabric-Network 0882d255-ac1e-41b2-b1d5-e08200ebb380 182 | 4. kubernetes 0b206464-d655-4723-a848-86d0f28764c8 183 | What networks should the nodes be a part of, provide comma separated values: (31428241-4878-47d6-9fba-9a8436b596a4) 184 | ``` 185 | 186 | Provide a comma-separated list of networks that the virtual machines used as Kubernetes nodes should be a part of (e.g. “2,4”). The nodes must be able to communicate to the master virtual machine or the setup will fail. 187 | 188 | ``` 189 | From the packages below: 190 | 1. k4-bigdisk-kvm-15.75G 7741b8f6-2733-11e6-bdb9-bf11c4147d38 191 | 2. k4-bigdisk-kvm-31.75G 14c01a1a-d0f8-11e5-ad69-1fd27456ad73 192 | 3. k4-bigdisk-kvm-63.75G 14c0992c-d0f8-11e5-bd78-e71dad0f8626 193 | 4. k4-fastdisk-kvm-31.75G 14bd9600-d0f8-11e5-a69c-97be6e961834 194 | 5. k4-fastdisk-kvm-63.75G 14be13c8-d0f8-11e5-b55b-47eb44d4e064 195 | 6. k4-general-kvm-15.75G 14ac8f5e-d0f8-11e5-a0e5-9b622a20595f 196 | 7. k4-general-kvm-3.75G 14aba044-d0f8-11e5-8c88-eb339a5da5d0 197 | 8. k4-general-kvm-31.75G 14ad1a32-d0f8-11e5-a465-8f264489308b 198 | 9. k4-general-kvm-7.75G 14ac17a4-d0f8-11e5-a400-e39503e18b19 199 | 10. k4-highcpu-kvm-1.75G 14b5edc4-d0f8-11e5-b4d2-b3e6e8c05f9d 200 | 11. k4-highcpu-kvm-15.75G 14b783d2-d0f8-11e5-8d93-6ba10192d750 201 | 12. k4-highcpu-kvm-250M 14b4ff36-d0f8-11e5-a8b1-e343c129d7f0 202 | 13. k4-highcpu-kvm-3.75G 14b67ef6-d0f8-11e5-ba19-479de37c6f75 203 | 14. k4-highcpu-kvm-7.75G 14b6fade-d0f8-11e5-85c5-4ff7918ab5c1 204 | 15. k4-highcpu-kvm-750M 14b5760a-d0f8-11e5-9cb1-23c9c232c00e 205 | 16. k4-highram-kvm-15.75G 14ba876c-d0f8-11e5-8a1b-ab02fdd17b07 206 | 17. k4-highram-kvm-31.75G 14bafb20-d0f8-11e5-a5cf-e386b841ed87 207 | 18. k4-highram-kvm-63.75G 14bb84f0-d0f8-11e5-8014-2fb7b19ccb24 208 | What KVM package should the master and nodes run on: (14b6fade-d0f8-11e5-85c5-4ff7918ab5c1) 209 | ``` 210 | 211 | Triton CLI is used here to pull all the available KVM packages (i.e. virtual machine images) for the current data center defined by your Triton environment. Provide the corresponding number (e.g. "10" for k4-highcpu-kvm-1.75G) to be used for all the nodes. 212 | 213 | After the package detail has been provided, the CLI will verify all the entries before creating the cluster. 214 | 215 | ``` 216 | Verify that the following configuration is correct: 217 | 218 | Name of kubernetes environment: k8s dev 219 | Kubernetes environment description: k8s dev 220 | Master hostname: kubemaster 221 | All node hostnames will start with: kubenode 222 | Kubernetes environment will have 1 nodes 223 | Master server will be part of these networks: 31428241-4878-47d6-9fba-9a8436b596a4 224 | Kubernetes nodes will be a part of these networks: 31428241-4878-47d6-9fba-9a8436b596a4 225 | This package will be used for all the hosts: 14b6fade-d0f8-11e5-85c5-4ff7918ab5c1 226 | 227 | Make sure the above information is correct before answering: 228 | to view list of networks call "triton networks -l" 229 | to view list of packages call "triton packages -l" 230 | Make sure that the nodes and master are part of networks that can communicate with each other. 231 | Is the above config correct (yes | no)? yes 232 | ``` 233 | 234 | Answer the verification question, press enter, and the setup will start. 235 | 236 | The Triton K8s Supervisor will store the provided inputs, generate a Terraform configuration for the environment and start Terraform tasks to provision the required virtual machine hosts for your Kubernetes Cluster. After all Terraform tasks are finished, Ansible configuration files are generated and Ansible roles are started to install Docker, start Rancher, create the Kubernetes Environment and connect all nodes to the Kubernetes Environment. 237 | 238 | A long message will be displayed with URLs to different services as they start to come up. Here is a breakdown of that message: 239 | 240 | ``` 241 | Congratulations, your Kubernetes cluster setup has been complete. 242 | 243 | It will take a few minutes for all the Kubernetes process to start up before you can access Kubernetes Dashboard 244 | ––––> To check what processes/containers are coming up, go to http://:8080/env//infra/containers 245 | once all these containers are up, you should be able to access Kubernetes by its dashboard or using CLI 246 | ``` 247 | 248 | This URL is a link to the view within Rancher that provides a list of the containers that make up your Kubernetes Environment. It will take a few minutes for all of these Kubernetes containers to start up. You can use this view to see their status. Details regarding Rancher's rich set of features can be found in the Rancher [Documentation](http://docs.rancher.com/rancher/v1.5/en/). 249 | 250 | ![Infrastructure Containers](img/infrastructure-containers.png "Infrastructure Containers") 251 | 252 | One of the last containers that will come up is for Kubernetes Dashboard. 253 | 254 | ``` 255 | Waiting on Kubernetes Dashboard to come up. 256 | ................................................................... 257 | ––––> Kubernetes Dashboard is at http://:8080/r/projects//kubernetes-dashboard:9090/ 258 | ––––> Kubernetes CLI config is at http://:8080/env//kubernetes/kubectl 259 | 260 | CONGRATULATIONS, YOU HAVE CONFIGURED YOUR KUBERNETES ENVIRONMENT! 261 | ``` 262 | 263 | The last container to come up will be for the Kubernetes Dashboard. Once that container is up your Kubernets Environment is ready. You will get the above two URLs. The first URL is for the Kubernetes Dashboard, and the second URL is for Kubernetes CLI (`kubectl` config). These will alow you to bypass Rancher and interact with the native Kubernetes interfaces. 264 | 265 | Congratulations! You have a production grade Kubernetes Environment ready for your use. One note, however, before we move on to deploying some sample applications on your new, 100% open source, Kubernetes Environment. If you want to customize any of these set up steps for your Kubernetes Environment, check out [this GitHub repo](https://github.com/fayazg/tritonK8ssupervisor/blob/master/docs/manual-setup.md) for an explanation of how to manually set up a Kubernetes Cluster on Triton. 266 | 267 | ### Now, let's deploy an app with Kubernetes 268 | 269 | Now that your Kubernetes Environment is up and running, you can deploy apps using either the Kubernetes Dashboard or `kubectl`. 270 | 271 | In this section we will walk through the deployment of a Ghost blog using the Kubernetes Dashboard and the example Kubernetes Guestbook app using the `kubectl`. 272 | 273 | #### Deploy an app using the Kubernetes Dashboard (Web UI) 274 | 275 | The [Kubernetes Dashboard](https://kubernetes.io/docs/user-guide/ui/) can be used to get an overview of applications running on your cluster, as well as to create or modify individual Kubernetes resources. The Kubernetes Dashboard also provides information on the state of Kubernetes resources in your cluster. 276 | 277 | ![Kubernetes Dashboard](img/20170328a-k8sdashboard.png) 278 | 279 | Now, let's deploy Ghost using the Kubernetes Dashboard. 280 | 281 | First, get the URL for the Kubernetes Dashboard that the Triton K8s Supervisor provided at the end of the initial setup, and enter the URL in your browser to access your Kubernetes Dashboard. Once you are in the Kubernetes Dashboard you should see a **CREATE** button at the top. Click the **CREATE** button to begin the process of deploying an app on your Kubernetes Environment. 282 | 283 | ![Kubernetes Dashboard - Create](img/20170328a-k8sdashboard-create.png) 284 | 285 | Next, enter the details requested, using the inputs provided in the below image, and then click **Deploy**. 286 | 287 | ![Kubernetes Dashboard - Deploy](img/20170328a-k8sdashboard-deploy.png) 288 | 289 | That's it! Kubernetes should now be starting up your Ghost app and you should see something that looks like this: 290 | 291 | ![Kubernetes Dashboard - Workloads](img/20170328a-k8sdashboard-workloads.png) 292 | 293 | Your app is configured to be exposed externally on port 8080. So, you should see the app URL under the services screen. Once the deployment is complete and pods are up, the app should be available. 294 | 295 | ![Kubernetes Dashboard - Services](img/20170328a-k8sdashboard-services.png) 296 | 297 | #### Deploy an app using the Kubernetes CLI 298 | 299 | Now, let's deploy the example Kubernetes Guestbook app using the Kubernetes CLI. 300 | 301 | First, get the URL to the Kubernetes CLI config page, which will generate a `kubectl` config file, that the Triton K8s Supervisor provided at the end of the intial setup. 302 | 303 | Go to the Kubernetes CLI config URL and click on **Generate Config**: 304 | 305 | ![Generate kubectl config](img/20170328a-k8scli-generateconfig.png) 306 | 307 | From the next screen click **Copy to Clipboard** and paste the content to the `~/.kube/config` file: 308 | 309 | ![Copy kubectl config](img/20170328a-k8scli-copyconfig.png) 310 | 311 | 312 | Now you should be able to use the `kubectl` command to deploy your app. 313 | 314 | The app we will deploy is called [Guestbook](https://github.com/kubernetes/kubernetes/tree/master/examples/guestbook). Clone the repository to your local machine, and navigate to the app's directory in your terminal. We'll make one minor change to the configuration file so that we can interact with it using a public IP address for this demo: 315 | 316 | ``` 317 | git clone https://github.com/kubernetes/kubernetes.git 318 | cd kubernetes/examples/guestbook 319 | vi all-in-one/guestbook-all-in-one.yaml 320 | ``` 321 | 322 | In that configuration file (`all-in-one/guestbook-all-in-one.yaml`), uncomment the frontend service type, [`# type: LoadBalancer`](https://github.com/kubernetes/kubernetes/blob/master/examples/guestbook/all-in-one/guestbook-all-in-one.yaml#L130), so that it runs as a load balancer: 323 | 324 | ``` 325 | spec: 326 | # if your cluster supports it, uncomment the following to automatically create 327 | # an external load-balanced IP for the frontend service. 328 | type: LoadBalancer 329 | ports: 330 | # the port that this service should serve on 331 | - port: 80 332 | selector: 333 | app: guestbook 334 | tier: frontend 335 | ``` 336 | 337 | Be sure to save the file. 338 | 339 | Now you should be able to use `kubectl` to deploy the app and get the external URL for the frontend service which can be used to access the app once the pods are up: 340 | 341 | ``` 342 | # Deploy guestbook app 343 | $ kubectl create -f all-in-one/guestbook-all-in-one.yaml 344 | service "redis-master" created 345 | deployment "redis-master" created 346 | service "redis-slave" created 347 | deployment "redis-slave" created 348 | service "frontend" created 349 | deployment "frontend" created 350 | 351 | # Make sure that the pods are up and running 352 | $ kubectl get deployments 353 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 354 | frontend 3 3 3 3 2m 355 | redis-master 1 1 1 1 2m 356 | redis-slave 2 2 2 2 2m 357 | 358 | $ kubectl get pods 359 | NAME READY STATUS RESTARTS AGE 360 | frontend-88237173-b23b9 1/1 Running 0 2m 361 | frontend-88237173-cq5jz 1/1 Running 0 2m 362 | frontend-88237173-sbkrb 1/1 Running 0 2m 363 | redis-master-343230949-3ll61 1/1 Running 0 2m 364 | redis-slave-132015689-p54lv 1/1 Running 0 2m 365 | redis-slave-132015689-t6z7z 1/1 Running 0 2m 366 | 367 | # Get the external service IP/URL 368 | $ kubectl get services 369 | NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE 370 | frontend 10.43.80.176 165.225.175.75 80:30896/TCP 14s 371 | kubernetes 10.43.0.1 443/TCP 7m 372 | redis-master 10.43.0.176 6379/TCP 15s 373 | redis-slave 10.43.141.195 6379/TCP 15s 374 | ``` 375 | 376 | We can see above, for this demo, all pods are running and the only service exposed externally is the frontend service on `165.225.175.75:80` which is the URL for guestbook app. 377 | 378 | ![Guestbook App](img/20170328a-k8scli-guestbook.png) 379 | 380 | The deployment status for all the pods and services can also be viewed using the Kubernetes Dashboard. To check using Dashboard, go to the URL for the Web UI. 381 | 382 | ![Guestbook K8s Dashboard](img/20170328a-k8scli-guestbookindashboard.png) 383 | 384 | If you want to better understand what it takes to deploy Kubernetes on Triton, check out [Kubernetes on Triton – the hard way](https://www.joyent.com/blog/kubernetes-the-hard-way), by Joe Julian. For more information on Kubernetes itself, dig into the [official Kubernetes user guide](https://kubernetes.io/docs/user-guide/) or the `kubectl` [cheatsheet](https://kubernetes.io/docs/user-guide/kubectl-cheatsheet/). More information about Rancher can be found in the Rancher [Documentation](http://docs.rancher.com/rancher/v1.5/en/). 385 | 386 | Put these three great technologies together and you end up with a 100% open source, supportable, and production grade Kubernetes stack, which you can easily run on the [Triton public cloud](https://www.joyent.com/why), or in your data center using [Triton software](https://www.joyent.com/why) as the foundation for your private cloud. 387 | -------------------------------------------------------------------------------- /docs/img/1x2-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/1x2-arch.png -------------------------------------------------------------------------------- /docs/img/20170323b-Triton-Kubernetes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170323b-Triton-Kubernetes.jpg -------------------------------------------------------------------------------- /docs/img/20170323b-Triton-Kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170323b-Triton-Kubernetes.png -------------------------------------------------------------------------------- /docs/img/20170324a-add-environment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170324a-add-environment.png -------------------------------------------------------------------------------- /docs/img/20170324a-add-host.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170324a-add-host.png -------------------------------------------------------------------------------- /docs/img/20170324a-create-environment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170324a-create-environment.png -------------------------------------------------------------------------------- /docs/img/20170324a-kubernetes-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170324a-kubernetes-dashboard.png -------------------------------------------------------------------------------- /docs/img/20170324a-manage-environments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170324a-manage-environments.png -------------------------------------------------------------------------------- /docs/img/20170324b-add-host.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170324b-add-host.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8scli-copyconfig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8scli-copyconfig.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8scli-generateconfig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8scli-generateconfig.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8scli-guestbook.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8scli-guestbook.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8scli-guestbookindashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8scli-guestbookindashboard.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8sdashboard-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8sdashboard-create.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8sdashboard-deploy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8sdashboard-deploy.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8sdashboard-services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8sdashboard-services.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8sdashboard-workloads.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8sdashboard-workloads.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8sdashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8sdashboard.png -------------------------------------------------------------------------------- /docs/img/20170328a-k8smonitoring-deployments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328a-k8smonitoring-deployments.png -------------------------------------------------------------------------------- /docs/img/20170328b-k8smonitoring-deployments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170328b-k8smonitoring-deployments.png -------------------------------------------------------------------------------- /docs/img/20170530a-Triton-Kubernetes-HA.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/20170530a-Triton-Kubernetes-HA.jpg -------------------------------------------------------------------------------- /docs/img/infrastructure-containers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/infrastructure-containers.png -------------------------------------------------------------------------------- /docs/img/kubernetes-cli.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/kubernetes-cli.png -------------------------------------------------------------------------------- /docs/img/kubernetes-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/kubernetes-dashboard.png -------------------------------------------------------------------------------- /docs/img/rancher-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fayazg/tritonK8ssupervisor/4b9990241dacd1619c3e79341e14e386be56392c/docs/img/rancher-dashboard.png -------------------------------------------------------------------------------- /docs/manual-setup.md: -------------------------------------------------------------------------------- 1 | ## Manual Setup 2 | There are many ways to set up a Kubernetes Cluster. We are using Rancher as a kubernetes management platform. You can read more about Rancher and all it offers [here](http://rancher.com/rancher/). 3 | Rancher runs as a docker container and runs/manages a production ready kubernetes cluster by running kubernetes services as containers. This section provides the steps needed to manually set up a Kubernetes cluster with three nodes on Rancher. 4 | 5 | We will provision 4 kvms. One kvm will be used as Rancher server, and three will be used as worker nodes by kubernetes cluster. For a simple architecture diagram, click [here](#architecture). 6 | 7 | ### Provision KVMs 8 | Provision one kvm for KubeServer 9 | ```bash 10 | triton instance create --wait --name=kubeserver -N Joyent-SDC-Public \ 11 | ubuntu-certified-16.04 k4-highcpu-kvm-1.75G 12 | ``` 13 | This will be the host where Rancher server container runs. 14 | 15 | Provision three kvms for KubeNodes 16 | ```bash 17 | triton instance create --wait --name=kubenode1 -N Joyent-SDC-Public \ 18 | ubuntu-certified-16.04 k4-highcpu-kvm-1.75G 19 | triton instance create --wait --name=kubenode2 -N Joyent-SDC-Public \ 20 | ubuntu-certified-16.04 k4-highcpu-kvm-1.75G 21 | triton instance create --wait --name=kubenode3 -N Joyent-SDC-Public \ 22 | ubuntu-certified-16.04 k4-highcpu-kvm-1.75G 23 | ``` 24 | These are provisioned to be kubernetes worker nodes. 25 | 26 | ### Allow root access to all KVMs: 27 | Triton’s default KVM setup allows for login only as ubuntu user with `sudo` access. We need to setup root access with our ssh key. Copy the `authorized_keys` file from ubuntu user to root for all hosts. 28 | ```bash 29 | kubeserver=$(triton ip kubeserver) 30 | kubenode1=$(triton ip kubenode1) 31 | kubenode2=$(triton ip kubenode2) 32 | kubenode3=$(triton ip kubenode3) 33 | 34 | for h in $kubeserver $kubenode1 $kubenode2 $kubenode3; do 35 | ssh ubuntu@$h sudo cp /home/ubuntu/.ssh/authorized_keys /root/.ssh/ 36 | done 37 | ``` 38 | Make sure all your KVMs have been created and are running: 39 | ```bash 40 | triton ls 41 | SHORTID NAME IMG STATE FLAGS AGE 42 | abde0e87 kubeserver ubuntu-certified-16.04@20170221 running K 5m 43 | e3fe229a kubenode1 ubuntu-certified-16.04@20170221 running K 2m 44 | baa582d0 kubenode2 ubuntu-certified-16.04@20170221 running K 1m 45 | 2077abe8 kubenode3 ubuntu-certified-16.04@20170221 running K 1m 46 | ``` 47 | 48 | ### Install pre-reqs and docker-engine package on all KVMs: 49 | Rancher and all kubernetes services run as docker containers managed by Rancher so configure and installed docker-engine version 1.12.6 on all KVMs. 50 | ```bash 51 | for h in $kubeserver $kubenode1 $kubenode2 $kubenode3; do 52 | ssh root@$h \ 53 | 'apt-get update && \ 54 | apt-get upgrade -y && \ 55 | apt-get install -y linux-image-extra-$(uname -r) && \ 56 | apt-get install -y linux-image-extra-virtual zfs && \ 57 | curl -fsSL https://apt.dockerproject.org/gpg |apt-key add - && \ 58 | add-apt-repository "deb https://apt.dockerproject.org/repo/ ubuntu-$(lsb_release -cs) main" && \ 59 | apt-get update && \ 60 | apt-get -y install docker-engine=1.12.6-0~ubuntu-xenial' 61 | done 62 | ``` 63 | 64 | ### Start Rancher and setup Kubernetes environment and nodes 65 | Start the rancher/server container on kubeserver KVM: 66 | ```bash 67 | ssh root@$kubeserver docker run -d --restart=unless-stopped \ 68 | -p 8080:8080 rancher/server 69 | ``` 70 | 71 | After the rancher/server docker container comes up, you should be able to access the Rancher UI and create a Kubernetes environment. 72 | 73 | Go to the Rancher UI http\://$(triton ip kubeserver):8080 and select “Manage Environments” from the Environments tab: 74 | 75 | ![Manage Environments](img/20170324a-manage-environments.png "Manage Environments") 76 | 77 | 78 | Add a new environment: 79 | 80 | ![Add Environments](img/20170324a-add-environment.png "Add Environment") 81 | 82 | 83 | Select Kubernetes from the list, provide a Name/Description and click create at the bottom of the page: 84 | 85 | ![Create Environments](img/20170324a-create-environment.png "Create Environment") 86 | 87 | 88 | Now you should have a Kubernetes Environment which you can select from the “Environments” tab and add nodes to it by clicking “Add a host” button: 89 | 90 | ![Add Host](img/20170324a-add-host.png "Add Host") 91 | 92 | 93 | From here you will add all three nodes (kubenode1 kubenode2 and kubenode3) by performing the same steps: 94 | 1. Select Custom from the available machine drivers list 95 | 1. Enter the ip address of kubenode 96 | 1. Copy the docker command and run it on the kubenode 97 | 98 | ![Add Host](img/20170324b-add-host.png "Add Host") 99 | 100 | 101 | After the nodes have been added, kubernetes services will be started on each of the hosts and within minutes you will have your Kubernetes environment up and ready. 102 | 103 | To deploy your app on your kubernetes environment, Rancher provides two simple options: 104 | * kubectl config which can be copied from “KUBERNETES -> CLI” tab 105 | * Kubernetes UI from “KUBERNETES -> Dashboard” tab 106 | 107 | ![Kubernetes Environment](img/20170324a-kubernetes-dashboard.png "Kubernetes Environment") 108 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o pipefail 5 | # set -o xtrace 6 | 7 | db_name=cattle 8 | db_user=cattle 9 | db_pass=cattle 10 | 11 | main() { 12 | if [[ ! -z "$1" && "$1" == "-c" ]]; then 13 | cleanRunner 14 | exit 0 15 | fi 16 | 17 | if [ -e terraform/rancher.tf ]; then 18 | echo "error: configuration for a previous run has been found" 19 | echo " clean the configuration (./setup -c)" 20 | exit 21 | fi 22 | 23 | mkdir ansible/tmp > /dev/null 2>&1 || true 24 | # SET default variables 25 | setVarDefaults 26 | # SET configuration from current triton profile (triton account information) 27 | setConfigFromTritonENV 28 | # GET updated configuration from user 29 | getConfigFromUser 30 | # VERIFY with user that parameters are correct 31 | verifyConfig 32 | # UPDATE config file with parameters 33 | setConfigToFile 34 | 35 | exportVars 36 | 37 | echo "################################################################################" 38 | echo "### Starting terraform tasks..." 39 | echo "################################################################################" 40 | sleep 2 41 | runTerraformTasks 42 | echo "################################################################################" 43 | echo "### Creating ansible configs..." 44 | echo "################################################################################" 45 | sleep 2 46 | createAnsibleConfigs 47 | echo "################################################################################" 48 | echo "### Running ansible tasks..." 49 | echo "################################################################################" 50 | sleep 2 51 | runAnsible 52 | 53 | echo "" 54 | echo "Congratulations, your Kubernetes cluster setup has been complete." 55 | echo "" 56 | echo "It will take a few minutes for all the Kubernetes process to start up before you can access Kubernetes Dashboard" 57 | echo "----> To check what processes/containers are coming up, go to http://$(tail -1 terraform/masters.ip):8080/env/$(cat ansible/tmp/kubernetes_environment.id)/infra/containers" 58 | echo " once all these containers are up, you should be able to access Kubernetes by its dashboard or using CLI" 59 | 60 | echo "Waiting on Kubernetes dashboard to come up." 61 | echo "" 62 | 63 | KUBERNETES_DASHBOARD_UP=false 64 | DASHBOARD_CONTAINER_COUNT=0 65 | while ! "$KUBERNETES_DASHBOARD_UP"; do 66 | echo -ne "." 67 | sleep 1 68 | if ! ((`date +%s` % 15)); then 69 | DASHBOARD_CONTAINER_COUNT=0 70 | cRetVal=$(curl --connect-timeout 5 --max-time 5 -s http://$(tail -1 terraform/masters.ip):8080/r/projects/$(cat ansible/tmp/kubernetes_environment.id)/kubernetes-dashboard:9090/) 71 | if [ $(echo $cRetVal | grep -i kubernetes | wc -l) -ne 0 ]; then 72 | KUBERNETES_DASHBOARD_UP=true 73 | # BUG kill stuck dashboard container 74 | else 75 | for node in $(cat terraform/hosts.ip); do 76 | let DASHBOARD_CONTAINER_COUNT=$DASHBOARD_CONTAINER_COUNT+$(ssh -o StrictHostKeyChecking=no root@$node docker ps | grep kubernetes-dashboard | awk '{print $1}' | wc -w | sed "s/ //g") || true 77 | done 78 | if [[ $DASHBOARD_CONTAINER_COUNT -eq 2 && $(curl --connect-timeout 5 --max-time 5 -s http://$(tail -1 terraform/masters.ip):8080/r/projects/$(cat ansible/tmp/kubernetes_environment.id)/kubernetes-dashboard:9090/ | grep -i "Service Unavailable" | wc -l) -ne 0 ]]; then 79 | for node in $(cat terraform/hosts.ip); do 80 | dashboard_container="$(ssh -o StrictHostKeyChecking=no root@$node docker ps | grep k8s_POD.*dashboard | awk '{print $1}')" || true 81 | if [ "${dashboard_container:-}" != "" ]; then 82 | ssh -o StrictHostKeyChecking=no root@$node "docker stop -t0 $dashboard_container" >> /dev/null 2>&1 83 | fi 84 | echo -ne "." 85 | done 86 | fi 87 | fi 88 | fi 89 | done 90 | 91 | echo "" 92 | echo "----> Kubernetes dashboard is at http://$(tail -1 terraform/masters.ip):8080/r/projects/$(cat ansible/tmp/kubernetes_environment.id)/kubernetes-dashboard:9090/" 93 | echo "----> Kubernetes CLI config is at http://$(tail -1 terraform/masters.ip):8080/env/$(cat ansible/tmp/kubernetes_environment.id)/kubernetes/kubectl" 94 | echo "" 95 | echo " CONGRATULATIONS, YOU HAVE CONFIGURED YOUR KUBERNETES ENVIRONMENT!" 96 | } 97 | 98 | getArgument() { 99 | # $1 message 100 | # $2 default 101 | while true; do 102 | if [ -z ${2+x} ]; then 103 | read -p "$1 " theargument 104 | else 105 | read -p "$1 ($2) " theargument 106 | if [ -z "$theargument" ]; then 107 | echo $2 108 | else 109 | echo $theargument 110 | fi 111 | break 112 | fi 113 | done 114 | } 115 | runAnsible() { 116 | cd ansible 117 | ansible-playbook -i hosts dockerSetup.yml 118 | if $RANCHER_HA; then 119 | ansible-playbook -i hosts rancherServers.yml 120 | else 121 | ansible-playbook -i hosts rancherServer.yml 122 | fi 123 | echo "Waiting for Rancher to come up ..." 124 | echo "" 125 | while [[ $(curl --connect-timeout 5 --max-time 5 -s http://$(tail -1 ../terraform/masters.ip):8080/ | grep $(tail -1 ../terraform/masters.ip) | wc -l) -eq 0 ]]; do 126 | echo -ne "." 127 | sleep 1 128 | done 129 | echo "" 130 | sleep 5 # wait few seconds before creating environment 131 | if $SEPARATE_PLANE; then 132 | curl -X PUT -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"accountId":null, "data":{"fields":{"stacks":[{"name":"healthcheck", "templateId":"library:infra*healthcheck"}, {"name":"kubernetes", "templateId":"library:infra*k8s", "answers":{"CONSTRAINT_TYPE":"required", "CLOUD_PROVIDER":"rancher", "REGISTRY":"", "DISABLE_ADDONS":"false", "POD_INFRA_CONTAINER_IMAGE":"gcr.io/google_containers/pause-amd64:3.0", "INFLUXDB_HOST_PATH":"", "EMBEDDED_BACKUPS":true, "BACKUP_PERIOD":"15m0s", "BACKUP_RETENTION":"24h", "ETCD_HEARTBEAT_INTERVAL":"500", "ETCD_ELECTION_TIMEOUT":"5000"}}, {"name":"network-services", "templateId":"library:infra*network-services"}, {"name":"ipsec", "templateId":"library:infra*ipsec"}]}}, "description":"Default Kubernetes template", "externalId":"catalog://library:project*kubernetes:0", "id":0, "isPublic":true, "kind":"projectTemplate", "name":"Kubernetes", "removeTime":null, "removed":null, "state":"active", "transitioning":"no", "transitioningMessage":null, "transitioningProgress":0, "stacks":[{"type":"catalogTemplate", "name":"healthcheck", "templateId":"library:infra*healthcheck"}, {"type":"catalogTemplate", "answers":{"CONSTRAINT_TYPE":"required", "CLOUD_PROVIDER":"rancher", "REGISTRY":"", "DISABLE_ADDONS":"false", "POD_INFRA_CONTAINER_IMAGE":"gcr.io/google_containers/pause-amd64:3.0", "INFLUXDB_HOST_PATH":"", "EMBEDDED_BACKUPS":true, "BACKUP_PERIOD":"15m0s", "BACKUP_RETENTION":"24h", "ETCD_HEARTBEAT_INTERVAL":"500", "ETCD_ELECTION_TIMEOUT":"5000"}, "name":"kubernetes", "templateId":"library:infra*k8s"}, {"type":"catalogTemplate", "name":"network-services", "templateId":"library:infra*network-services"}, {"type":"catalogTemplate", "name":"ipsec", "templateId":"library:infra*ipsec"}]}' "http://$(tail -1 ../terraform/masters.ip):8080/v2-beta/projecttemplates/$(cat tmp/kubernetes_template_id.id)" > /dev/null 2>&1 133 | fi 134 | ansible-playbook -i hosts clusterUp.yml 135 | cd .. 136 | } 137 | createAnsibleConfigs() { 138 | if [[ ! -e terraform/masters.ip || ! -e terraform/hosts.ip ]]; then 139 | echo "Terraform had too many errors. Make sure you haven't reached your provisioning limit." 140 | exit 1 141 | fi 142 | echo "Creating ansible hosts file and variable files" 143 | rm -f ansible/hosts 2> /dev/null 144 | if $RANCHER_HA; then 145 | echo "[MYSQLDB]" >> ansible/hosts 146 | cat terraform/mysqldb.ip >> ansible/hosts 147 | fi 148 | echo "[MASTER]" >> ansible/hosts 149 | cat terraform/masters.ip >> ansible/hosts 150 | echo "[HOST]" >> ansible/hosts 151 | cat terraform/hosts.ip >> ansible/hosts 152 | if $SEPARATE_PLANE; then 153 | echo "[K8SHA]" >> ansible/hosts 154 | cat terraform/k8sha.ip >> ansible/hosts 155 | echo "[K8SETCD]" >> ansible/hosts 156 | cat terraform/k8setcd.ip >> ansible/hosts 157 | fi 158 | echo " created: ansible/hosts" 159 | master=$(tail -1 terraform/masters.ip) 160 | echo "master: $master" > ansible/roles/ranchermaster/vars/vars.yml 161 | if $RANCHER_HA; then 162 | mysqldb=$(tail -1 terraform/mysqldb.ip) 163 | echo "mysqldb: $mysqldb" >> ansible/roles/ranchermaster/vars/vars.yml 164 | echo "db_user: $db_user" >> ansible/roles/ranchermaster/vars/vars.yml 165 | echo "db_pass: $db_pass" >> ansible/roles/ranchermaster/vars/vars.yml 166 | echo "db_name: $db_name" >> ansible/roles/ranchermaster/vars/vars.yml 167 | fi 168 | echo "kubernetes_name: \"$(echo $KUBERNETES_NAME | sed 's/"//g')\"" >> ansible/roles/ranchermaster/vars/vars.yml 169 | echo "kubernetes_description: \"$(echo $KUBERNETES_DESCRIPTION | sed 's/"//g')\"" >> ansible/roles/ranchermaster/vars/vars.yml 170 | cd ansible 171 | sed "s;private_key_file = .*$;private_key_file = $(echo $SDC_KEY | sed 's/"//g');g" ansible.cfg > tmp.cfg && mv tmp.cfg ansible.cfg 172 | cd .. 173 | 174 | echo " created: ansible/roles/ranchermaster/vars/vars.yml" 175 | } 176 | runTerraformTasks() { 177 | if [ -e terraform/rancher.tf ] 178 | then 179 | echo "warning: a previous terraform configuration has been found" 180 | echo " skipping terraform configuration and execution..." 181 | else 182 | echo "Generating terraform configs for environment..." 183 | updateTerraformConfig provider triton 184 | 185 | if $RANCHER_HA; then 186 | echo " Master hostname: ${RANCHER_MASTER_HOSTNAME}1" 187 | echo " ${RANCHER_MASTER_HOSTNAME}2" 188 | echo " ${RANCHER_MASTER_HOSTNAME}db" 189 | updateTerraformConfig master $(echo ${RANCHER_MASTER_HOSTNAME}1 | sed 's/"//g') 190 | updateTerraformConfig master $(echo ${RANCHER_MASTER_HOSTNAME}2 | sed 's/"//g') 191 | updateTerraformConfig mysqldb $(echo ${RANCHER_MASTER_HOSTNAME}db | sed 's/"//g') 192 | else 193 | echo " Master hostname: $RANCHER_MASTER_HOSTNAME" 194 | updateTerraformConfig master $(echo $RANCHER_MASTER_HOSTNAME | sed 's/"//g') 195 | fi 196 | 197 | for (( i = 1; i <= $KUBERNETES_NUMBER_OF_NODES; i++ )) 198 | do 199 | echo " Kubernetes node $i: $(echo $KUBERNETES_NODE_HOSTNAME_BEGINSWITH | sed 's/"//g')$i" 200 | updateTerraformConfig host $(echo $KUBERNETES_NODE_HOSTNAME_BEGINSWITH$i | sed 's/"//g') 201 | done 202 | 203 | if $SEPARATE_PLANE; then 204 | updateTerraformConfig k8setcd $(echo ${KUBERNETES_NODE_HOSTNAME_BEGINSWITH}etcd1 | sed 's/"//g') 205 | updateTerraformConfig k8setcd $(echo ${KUBERNETES_NODE_HOSTNAME_BEGINSWITH}etcd2 | sed 's/"//g') 206 | updateTerraformConfig k8setcd $(echo ${KUBERNETES_NODE_HOSTNAME_BEGINSWITH}etcd3 | sed 's/"//g') 207 | updateTerraformConfig k8sha $(echo ${KUBERNETES_NODE_HOSTNAME_BEGINSWITH}k8smgmt1 | sed 's/"//g') 208 | updateTerraformConfig k8sha $(echo ${KUBERNETES_NODE_HOSTNAME_BEGINSWITH}k8smgmt2 | sed 's/"//g') 209 | updateTerraformConfig k8sha $(echo ${KUBERNETES_NODE_HOSTNAME_BEGINSWITH}k8smgmt3 | sed 's/"//g') 210 | fi 211 | cd terraform 212 | echo "Starting terraform tasks" 213 | terraform get 214 | terraform apply 215 | echo " terraform tasks completed" 216 | cd .. 217 | fi 218 | } 219 | updateTerraformConfig() { 220 | if [ $1 == "k8sha" ]; then 221 | echo "" >> terraform/rancher.tf 222 | echo "module \"$2\" {" >> terraform/rancher.tf 223 | echo " source = \"k8sha\"" >> terraform/rancher.tf 224 | echo " hostname = \"$2\"" >> terraform/rancher.tf 225 | echo " networks = [\"$(echo $RANCHER_MASTER_NETWORKS | sed 's/,/","/g')\"]" >> terraform/rancher.tf 226 | echo " root_authorized_keys = \"\${file(\"$(echo $SDC_KEY | sed 's/"//g')\")}\"" >> terraform/rancher.tf 227 | # echo " image = \"0867ef86-e69d-4aaa-ba3b-8d2aef0c204e\"" >> terraform/rancher.tf 228 | echo " package = \"$(echo $HOST_PACKAGE | sed 's/"//g')\"" >> terraform/rancher.tf 229 | echo "}" >> terraform/rancher.tf 230 | return 231 | fi 232 | if [ $1 == "k8setcd" ]; then 233 | echo "" >> terraform/rancher.tf 234 | echo "module \"$2\" {" >> terraform/rancher.tf 235 | echo " source = \"k8setcd\"" >> terraform/rancher.tf 236 | echo " hostname = \"$2\"" >> terraform/rancher.tf 237 | echo " networks = [\"$(echo $RANCHER_MASTER_NETWORKS | sed 's/,/","/g')\"]" >> terraform/rancher.tf 238 | echo " root_authorized_keys = \"\${file(\"$(echo $SDC_KEY | sed 's/"//g')\")}\"" >> terraform/rancher.tf 239 | # echo " image = \"0867ef86-e69d-4aaa-ba3b-8d2aef0c204e\"" >> terraform/rancher.tf 240 | echo " package = \"$(echo $HOST_PACKAGE | sed 's/"//g')\"" >> terraform/rancher.tf 241 | echo "}" >> terraform/rancher.tf 242 | return 243 | fi 244 | if [ $1 == "master" ]; then 245 | echo "" >> terraform/rancher.tf 246 | echo "module \"$2\" {" >> terraform/rancher.tf 247 | echo " source = \"master\"" >> terraform/rancher.tf 248 | echo " hostname = \"$2\"" >> terraform/rancher.tf 249 | echo " networks = [\"$(echo $RANCHER_MASTER_NETWORKS | sed 's/,/","/g')\"]" >> terraform/rancher.tf 250 | echo " root_authorized_keys = \"\${file(\"$(echo $SDC_KEY | sed 's/"//g')\")}\"" >> terraform/rancher.tf 251 | # echo " image = \"0867ef86-e69d-4aaa-ba3b-8d2aef0c204e\"" >> terraform/rancher.tf 252 | echo " package = \"$(echo $HOST_PACKAGE | sed 's/"//g')\"" >> terraform/rancher.tf 253 | echo "}" >> terraform/rancher.tf 254 | return 255 | fi 256 | if [ $1 == "mysqldb" ]; then 257 | echo "" >> terraform/rancher.tf 258 | echo "module \"$2\" {" >> terraform/rancher.tf 259 | echo " source = \"mysqldb\"" >> terraform/rancher.tf 260 | echo " hostname = \"$2\"" >> terraform/rancher.tf 261 | echo " networks = [\"$(echo $RANCHER_MASTER_NETWORKS | sed 's/,/","/g')\"]" >> terraform/rancher.tf 262 | echo " root_authorized_keys = \"\${file(\"$(echo $SDC_KEY | sed 's/"//g')\")}\"" >> terraform/rancher.tf 263 | # echo " image = \"0867ef86-e69d-4aaa-ba3b-8d2aef0c204e\"" >> terraform/rancher.tf 264 | echo " package = \"$(echo $HOST_PACKAGE | sed 's/"//g')\"" >> terraform/rancher.tf 265 | echo "}" >> terraform/rancher.tf 266 | return 267 | fi 268 | if [ $1 == "host" ]; then 269 | echo "" >> terraform/rancher.tf 270 | echo "module \"$2\" {" >> terraform/rancher.tf 271 | echo " source = \"host\"" >> terraform/rancher.tf 272 | echo " hostname = \"$2\"" >> terraform/rancher.tf 273 | echo " networks = [\"$(echo $KUBERNETES_NODE_NETWORKS | sed 's/,/","/g')\"]" >> terraform/rancher.tf 274 | echo " root_authorized_keys = \"\${file(\"$(echo $SDC_KEY | sed 's/"//g')\")}\"" >> terraform/rancher.tf 275 | # echo " image = \"0867ef86-e69d-4aaa-ba3b-8d2aef0c204e\"" >> terraform/rancher.tf 276 | echo " package = \"$(echo $HOST_PACKAGE | sed 's/"//g')\"" >> terraform/rancher.tf 277 | echo "}" >> terraform/rancher.tf 278 | return 279 | fi 280 | if [ $1 == "provider" ]; then 281 | echo "provider \"triton\" {" > terraform/rancher.tf 282 | echo " account = \"$(echo $SDC_ACCOUNT | sed 's/"//g')\"" >> terraform/rancher.tf 283 | echo " key_material = \"\${file(\"$(echo $SDC_KEY | sed 's/"//g')\")}\"" >> terraform/rancher.tf 284 | echo " key_id = \"$(echo $SDC_KEY_ID | sed 's/"//g')\"" >> terraform/rancher.tf 285 | echo " url = \"$(echo $SDC_URL | sed 's/"//g')\"" >> terraform/rancher.tf 286 | echo "}" >> terraform/rancher.tf 287 | return 288 | fi 289 | echo "error: problem updating terraform configuration..." 290 | exit 1 291 | } 292 | setConfigToFile() { 293 | echo "RANCHER_MASTER_NETWORKS=$RANCHER_MASTER_NETWORKS" >> config 294 | echo "KUBERNETES_NODE_NETWORKS=$KUBERNETES_NODE_NETWORKS" >> config 295 | echo "KUBERNETES_NUMBER_OF_NODES=$KUBERNETES_NUMBER_OF_NODES" >> config 296 | echo "KUBERNETES_NAME=\"$KUBERNETES_NAME\"" >> config 297 | echo "KUBERNETES_DESCRIPTION=\"$KUBERNETES_DESCRIPTION\"" >> config 298 | echo "RANCHER_MASTER_HOSTNAME=\"$RANCHER_MASTER_HOSTNAME\"" >> config 299 | echo "KUBERNETES_NODE_HOSTNAME_BEGINSWITH=\"$KUBERNETES_NODE_HOSTNAME_BEGINSWITH\"" >> config 300 | echo "HOST_PACKAGE=\"$HOST_PACKAGE\"" >> config 301 | } 302 | setConfigFromTritonENV() { 303 | eval "$(triton env)" 304 | echo "SDC_URL=\"$SDC_URL\"" >> config 305 | echo "SDC_ACCOUNT=\"$SDC_ACCOUNT\"" >> config 306 | echo "SDC_KEY_ID=\"$SDC_KEY_ID\"" >> config 307 | 308 | local foundKey=false 309 | for f in $(ls ~/.ssh); do 310 | if [[ "$(ssh-keygen -E md5 -lf ~/.ssh/$(echo $f | sed 's/.pub$//') 2> /dev/null | awk '{print $2}' | sed 's/^MD5://')" == "$SDC_KEY_ID" ]]; then 311 | echo "SDC_KEY=\"~/.ssh/$(echo $f | sed 's/.pub$//')\"" >> config 312 | foundKey=true 313 | break 314 | fi 315 | # old version of ssh-keygen, defaults to md5 316 | if [ "$(ssh-keygen -l -f ~/.ssh/$(echo $f | sed 's/.pub$//') 2> /dev/null | awk '{print $2}' | grep ^SHA256)" == "" ]; then 317 | if [[ "$(ssh-keygen -l -f ~/.ssh/$(echo $f | sed 's/.pub$//') 2> /dev/null | awk '{print $2}')" == "$SDC_KEY_ID" ]]; then 318 | echo "SDC_KEY=\"~/.ssh/$(echo $f | sed 's/.pub$//')\"" >> config 319 | foundKey=true 320 | break 321 | fi 322 | fi 323 | done 324 | if ! "$foundKey" ; then 325 | echo "error: couldn't find the ssh key associated with fingerprint $SDC_KEY_ID in ~/.ssh/ directory..." 326 | echo " Clean the setup and make sure your triton profile is set up." 327 | echo " To confirm your profile is set up, try \`triton info\`." 328 | cleanRunner 329 | exit 1 330 | fi 331 | echo "" >> config 332 | } 333 | setVarDefaults() { 334 | if [ -e config ]; then 335 | echo "error: old configuration found" 336 | cleanRunner 337 | fi 338 | KUBERNETES_NAME="k8s dev" 339 | KUBERNETES_DESCRIPTION=$KUBERNETES_NAME 340 | RANCHER_MASTER_HOSTNAME="kubemaster" 341 | KUBERNETES_NODE_HOSTNAME_BEGINSWITH="kubenode" 342 | KUBERNETES_NUMBER_OF_NODES=1 343 | RANCHER_MASTER_NETWORKS= 344 | KUBERNETES_NODE_NETWORKS= 345 | HOST_PACKAGE= 346 | echo "ANSIBLE_HOST_KEY_CHECKING=False" >> config 347 | } 348 | getConfigFromUser() { 349 | # get networks from the current triton profile to prompt 350 | local networks=$(triton networks -oname,id | grep -v "^NAME.*ID$" | tr -s " " | tr " " "=" | sort) 351 | # get packages for the current triton profile to prompt 352 | local packages=$(triton packages -oname,id | grep "\-kvm-" | grep -v "^NAME.*ID$" | tr -s " " | tr " " "=" | sort) 353 | 354 | local tmp=0 355 | local gotValidInput=false 356 | local tmp_ValidatedInput 357 | echo "---------------" 358 | KUBERNETES_NAME=$(getArgument "Name your Kubernetes environment:" "$(echo $KUBERNETES_NAME | sed 's/"//g')") 359 | echo "---------------" 360 | if [[ $KUBERNETES_DESCRIPTION == "" ]]; then 361 | KUBERNETES_DESCRIPTION=$(getArgument "Describe this Kubernetes environment:" "$(echo $KUBERNETES_NAME | sed 's/"//g')") 362 | else 363 | KUBERNETES_DESCRIPTION=$(getArgument "Describe this Kubernetes environment:" "$(echo $KUBERNETES_DESCRIPTION | sed 's/"//g')") 364 | fi 365 | echo "---------------" 366 | gotValidInput=false 367 | while ! $gotValidInput; do 368 | read -p "Would you like HA for Kubernetes Cluster Manager (+3 VMs) (yes | no)? " yn 369 | case $yn in 370 | yes ) 371 | RANCHER_HA=true 372 | gotValidInput=true 373 | ;; 374 | no ) 375 | RANCHER_HA=false 376 | gotValidInput=true 377 | ;; 378 | * ) echo "Please answer yes or no.";; 379 | esac 380 | done 381 | echo "---------------" 382 | gotValidInput=false 383 | while ! $gotValidInput; do 384 | read -p "Run Kubernetes Management Services on dedicated nodes (+3 VMs for etcd, +3 VMs for K8s services - apiserver/scheduler/controllermanager...) (yes | no)? " yn 385 | case $yn in 386 | yes ) 387 | SEPARATE_PLANE=true 388 | gotValidInput=true 389 | # echo "SEPARATE_PLANE: true" >> ansible/roles/ranchermaster/vars/vars.yml 390 | ;; 391 | no ) 392 | SEPARATE_PLANE=false 393 | gotValidInput=true 394 | ;; 395 | * ) echo "Please answer yes or no.";; 396 | esac 397 | done 398 | echo "---------------" 399 | gotValidInput=false 400 | while ! $gotValidInput; do 401 | tmp_ValidatedInput=$(getArgument "Hostname of the master:" "$(echo $RANCHER_MASTER_HOSTNAME | sed 's/"//g')") 402 | if [[ $tmp_ValidatedInput =~ ^[a-zA-Z][0-9a-zA-Z]+$ ]]; then 403 | gotValidInput=true 404 | else 405 | echo "error: Enter a valid hostname or leave blank to use the default." 406 | echo " Must start with a letter and can only include letters and numbers" 407 | fi 408 | done 409 | RANCHER_MASTER_HOSTNAME=$tmp_ValidatedInput 410 | echo "---------------" 411 | gotValidInput=false 412 | while ! $gotValidInput; do 413 | tmp_ValidatedInput=$(getArgument "Enter a string to use for appending to hostnames of all the nodes:" "$(echo $KUBERNETES_NODE_HOSTNAME_BEGINSWITH | sed 's/"//g')") 414 | if [[ $tmp_ValidatedInput =~ ^[a-zA-Z][0-9a-zA-Z]+$ ]]; then 415 | gotValidInput=true 416 | else 417 | echo "error: Enter a valid value or leave blank to use the default." 418 | echo " Must start with a letter and can only include letters and numbers" 419 | fi 420 | done 421 | KUBERNETES_NODE_HOSTNAME_BEGINSWITH=$tmp_ValidatedInput 422 | echo "---------------" 423 | # HARD LIMIT: 1-9 nodes allowed only since this setup has no HA 424 | gotValidInput=false 425 | while ! $gotValidInput; do 426 | tmp_ValidatedInput=$(getArgument "How many nodes should this Kubernetes cluster have:" "$(echo $KUBERNETES_NUMBER_OF_NODES | sed 's/"//g')") 427 | if [[ $tmp_ValidatedInput =~ ^[1-9]$ ]]; then 428 | gotValidInput=true 429 | else 430 | echo "error: Enter a valid value (1-9) or leave blank to use the default." 431 | fi 432 | done 433 | KUBERNETES_NUMBER_OF_NODES=$tmp_ValidatedInput 434 | echo "---------------" 435 | echo "From the networks below:" 436 | # print options and find location for "Joyent-SDC-Public" 437 | local publicNetworkLocation=1 438 | local countNetwork 439 | tmp=0 440 | for network in $networks; do 441 | tmp=$((tmp + 1)) 442 | echo -e "$tmp.\t$(echo $network | sed 's/=/ /g')" 443 | # get default location of public network 444 | if [[ "$network" == "Joyent-SDC-Public="* ]]; then 445 | publicNetworkLocation=$tmp 446 | fi 447 | done 448 | countNetwork=$tmp 449 | 450 | # set publicNetworkLocation to RANCHER_MASTER_NETWORKS, if it wasn't set already 451 | if [[ $RANCHER_MASTER_NETWORKS == "" ]]; then 452 | RANCHER_MASTER_NETWORKS=$(getNetworkIDs $publicNetworkLocation) 453 | fi 454 | 455 | # get input for network and validate to make sure the input provided is within the limit (number of networks) 456 | gotValidInput=false 457 | while ! $gotValidInput; do 458 | tmp_RANCHER_MASTER_NETWORKS=$(getArgument "What networks should the master be a part of, provide comma separated values:" "$(echo $RANCHER_MASTER_NETWORKS | sed 's/"//g')") 459 | RANCHER_MASTER_NETWORKS=$(echo $RANCHER_MASTER_NETWORKS | tr ',' '\n' | sort | uniq | tr '\n' ',' | sed 's/\(.*\),$/\1/') 460 | tmp_RANCHER_MASTER_NETWORKS=$(echo $tmp_RANCHER_MASTER_NETWORKS | tr ',' '\n' | sort | uniq | tr '\n' ',' | sed 's/\(.*\),$/\1/') 461 | 462 | # if valid input was given, move forward, else quit 463 | if [[ $(echo $tmp_RANCHER_MASTER_NETWORKS | grep '^[1-9][0-9]\?\(,[1-9][0-9]\?\)*$' 2> /dev/null) ]]; then 464 | gotValidInput=true 465 | for network in $(echo $tmp_RANCHER_MASTER_NETWORKS | tr "," "\n"); do 466 | if [[ "$network" -gt "$countNetwork" || "$network" -lt 1 ]]; then 467 | echo "error: Enter a valid option or leave blank to use the default." 468 | echo " Values should be comma separated between 1 and $countNetwork." 469 | gotValidInput=false 470 | fi 471 | done 472 | 473 | if $gotValidInput; then 474 | RANCHER_MASTER_NETWORKS=$(getNetworkIDs $tmp_RANCHER_MASTER_NETWORKS) 475 | fi 476 | 477 | elif [[ $tmp_RANCHER_MASTER_NETWORKS == $RANCHER_MASTER_NETWORKS ]]; then 478 | gotValidInput=true 479 | else 480 | echo "error: Enter a valid option or leave blank to use the default." 481 | echo " Values should be comma separated between 1 and $countNetwork." 482 | fi 483 | done 484 | echo "---------------" 485 | echo "From the networks below:" 486 | # print options 487 | tmp=0 488 | for network in $networks; do 489 | tmp=$((tmp + 1)) 490 | echo -e "$tmp.\t$(echo $network | sed 's/=/ /g')" 491 | done 492 | 493 | # set publicNetworkLocation to KUBERNETES_NODE_NETWORKS, if it wasn't set already 494 | if [[ $KUBERNETES_NODE_NETWORKS == "" ]]; then 495 | KUBERNETES_NODE_NETWORKS=$(getNetworkIDs $publicNetworkLocation) 496 | fi 497 | 498 | # get input for network and validate to make sure the input provided is within the limit (number of networks) 499 | gotValidInput=false 500 | while ! $gotValidInput; do 501 | tmp_KUBERNETES_NODE_NETWORKS=$(getArgument "What networks should the nodes be a part of, provide comma separated values:" "$(echo $KUBERNETES_NODE_NETWORKS | sed 's/"//g')") 502 | KUBERNETES_NODE_NETWORKS=$(echo $KUBERNETES_NODE_NETWORKS | tr ',' '\n' | sort | uniq | tr '\n' ',' | sed 's/\(.*\),$/\1/') 503 | tmp_KUBERNETES_NODE_NETWORKS=$(echo $tmp_KUBERNETES_NODE_NETWORKS | tr ',' '\n' | sort | uniq | tr '\n' ',' | sed 's/\(.*\),$/\1/') 504 | 505 | # if valid input was given, move forward, else quit 506 | if [[ $(echo $tmp_KUBERNETES_NODE_NETWORKS | grep '^[1-9][0-9]\?\(,[1-9][0-9]\?\)*$' 2> /dev/null) ]]; then 507 | gotValidInput=true 508 | for network in $(echo $tmp_KUBERNETES_NODE_NETWORKS | tr "," "\n"); do 509 | if [[ "$network" -gt "$countNetwork" || "$network" -lt 1 ]]; then 510 | echo "error: Enter a valid option or leave blank to use the default." 511 | echo " Values should be comma separated between 1 and $countNetwork." 512 | gotValidInput=false 513 | fi 514 | done 515 | 516 | if $gotValidInput; then 517 | KUBERNETES_NODE_NETWORKS=$(getNetworkIDs $tmp_KUBERNETES_NODE_NETWORKS) 518 | fi 519 | 520 | elif [[ $tmp_KUBERNETES_NODE_NETWORKS == $KUBERNETES_NODE_NETWORKS ]]; then 521 | gotValidInput=true 522 | else 523 | echo "error: Enter a valid option or leave blank to use the default." 524 | echo " Values should be comma separated between 1 and $countNetwork." 525 | fi 526 | done 527 | echo "---------------" 528 | echo "From the packages below:" 529 | # print options and find location for "k4-highcpu-kvm-7.75G" 530 | local packageLocation=1 531 | local countPackages 532 | tmp=0 533 | for package in $packages; do 534 | tmp=$((tmp + 1)) 535 | echo -e "$tmp.\t$(echo $package | sed 's/=/ /g')" 536 | # get default location of package 537 | if [[ "$package" == "k4-highcpu-kvm-7.75G="* ]]; then 538 | packageLocation=$tmp 539 | fi 540 | done 541 | countPackages=$tmp 542 | 543 | # set packageLocation to HOST_PACKAGE, if it wasn't set already 544 | if [[ $HOST_PACKAGE == "" ]]; then 545 | HOST_PACKAGE=$(getPackageID $packageLocation) 546 | fi 547 | 548 | # get input for package and validate to make sure the input provided is within the limit (number of packages) 549 | gotValidInput=false 550 | while ! $gotValidInput; do 551 | tmp_HOST_PACKAGE=$(getArgument "What KVM package should the master and nodes run on:" "$(echo $HOST_PACKAGE | sed 's/"//g')") 552 | 553 | # if valid input was given, move forward, else quit 554 | if [[ $(echo $tmp_HOST_PACKAGE | grep '^[1-9][0-9]*$' 2> /dev/null) ]]; then 555 | gotValidInput=true 556 | for package in $(echo $tmp_HOST_PACKAGE | tr "," "\n"); do 557 | if [[ "$package" -gt "$countPackages" || "$package" -lt 1 ]]; then 558 | echo "error: Enter a valid option or leave blank to use the default." 559 | echo " Value should be between 1 and $countPackages." 560 | gotValidInput=false 561 | fi 562 | done 563 | 564 | if $gotValidInput; then 565 | HOST_PACKAGE=$(getPackageID $tmp_HOST_PACKAGE) 566 | echo "entered $tmp_HOST_PACKAGE and got $HOST_PACKAGE" 567 | fi 568 | 569 | elif [[ $tmp_HOST_PACKAGE == $(echo $HOST_PACKAGE | sed 's/"//g') ]]; then 570 | gotValidInput=true 571 | else 572 | echo "error: Enter a valid option or leave blank to use the default." 573 | echo " Value should be between 1 and $countPackages." 574 | fi 575 | done 576 | HOST_PACKAGE=$(echo $HOST_PACKAGE | sed 's/"//g') 577 | } 578 | verifyConfig() { 579 | echo "################################################################################" 580 | echo "Verify that the following configuration is correct:" 581 | echo "" 582 | echo "Name of kubernetes environment: $KUBERNETES_NAME" 583 | echo "Kubernetes environment description: $KUBERNETES_DESCRIPTION" 584 | if $RANCHER_HA; then 585 | echo "Cluster Manager hosts: ${RANCHER_MASTER_HOSTNAME}1, ${RANCHER_MASTER_HOSTNAME}2, ${RANCHER_MASTER_HOSTNAME}db" 586 | else 587 | echo "Master hostname: $RANCHER_MASTER_HOSTNAME" 588 | fi 589 | if $SEPARATE_PLANE; then 590 | echo " There will be three nodes created for etcd and three for Kubernetes HA." 591 | fi 592 | echo "All node hostnames will start with: $KUBERNETES_NODE_HOSTNAME_BEGINSWITH" 593 | echo "Kubernetes environment will have $KUBERNETES_NUMBER_OF_NODES nodes" 594 | echo "Master server will be part of these networks: $RANCHER_MASTER_NETWORKS" 595 | echo "Kubernetes nodes will be a part of these networks: $KUBERNETES_NODE_NETWORKS" 596 | echo "This package will be used for all the hosts: $HOST_PACKAGE" 597 | echo "" 598 | echo "Make sure the above information is correct before answering:" 599 | echo " to view list of networks call \"triton networks -l\"" 600 | echo " to view list of packages call \"triton packages -l\"" 601 | echo "WARN: Make sure that the nodes and master are part of networks that can communicate with each other and this system from which the setup is running." 602 | 603 | 604 | while true; do 605 | read -p "Is the above config correct (yes | no)? " yn 606 | case $yn in 607 | yes ) 608 | break 609 | ;; 610 | no ) 611 | exit 0 612 | ;; 613 | * ) echo "Please answer yes or no.";; 614 | esac 615 | done 616 | } 617 | cleanRunner() { 618 | echo "Clearing settings...." 619 | while true; do 620 | if [ -e terraform/masters.ip ]; then 621 | echo "WARNING: You are about to destroy KVMs associated with this Rancher cluster." 622 | 623 | read -p "Do you wish to destroy the KVMs and reset configuration (yes | no)? " yn 624 | else 625 | read -p "Do you wish to reset configuration (yes | no)? " yn 626 | fi 627 | case $yn in 628 | yes ) 629 | if [ -e terraform/rancher.tf ]; then 630 | cd terraform 631 | echo " destroying images..." 632 | terraform destroy -force 2> /dev/null || true 633 | cd .. 634 | fi 635 | if [[ -e terraform/hosts.ip && -e terraform/masters.ip && -e ~/.ssh/known_hosts ]]; then 636 | for host_key in $(cat terraform/hosts.ip terraform/masters.ip); do 637 | ssh-keygen -R $host_key 2>&1 >> /dev/null 638 | done 639 | fi 640 | rm -rf terraform/hosts.ip terraform/masters.ip terraform/k8setcd.ip terraform/k8sha.ip terraform/mysqldb.ip terraform/terraform.* terraform/.terraform* terraform/rancher.tf 2>&1 >> /dev/null 641 | rm -rf ansible/roles/ranchermaster/vars/vars.yml ansible/tmp/kubernetes_* 2>&1 >> /dev/null 642 | echo 'master: 64.30.129.229' > ansible/roles/ranchermaster/vars/vars.yml 643 | echo 'kubernetes_name: "k8s dev"' >> ansible/roles/ranchermaster/vars/vars.yml 644 | echo 'kubernetes_description: "k8s dev"' >> ansible/roles/ranchermaster/vars/vars.yml 645 | sed "s~private_key_file = .*$~private_key_file = ~g" ansible/ansible.cfg > tmp && mv tmp ansible/ansible.cfg 646 | rm -f ansible/hosts ansible/*retry ansible/ansible.cfg.tmp 2>&1 >> /dev/null 647 | rm -rf config tmp/* 2>&1 >> /dev/null 648 | 649 | echo " All clear!" 650 | return;; 651 | no ) exit;; 652 | * ) echo "Please answer yes or no.";; 653 | esac 654 | done 655 | } 656 | debugVars() { 657 | echo "KUBERNETES_NAME=$KUBERNETES_NAME" 658 | echo "KUBERNETES_DESCRIPTION=$KUBERNETES_DESCRIPTION" 659 | echo "RANCHER_MASTER_HOSTNAME=$RANCHER_MASTER_HOSTNAME" 660 | echo "KUBERNETES_NODE_HOSTNAME_BEGINSWITH=$KUBERNETES_NODE_HOSTNAME_BEGINSWITH" 661 | echo "KUBERNETES_NUMBER_OF_NODES=$KUBERNETES_NUMBER_OF_NODES" 662 | echo "RANCHER_MASTER_NETWORKS=$RANCHER_MASTER_NETWORKS" 663 | echo "KUBERNETES_NODE_NETWORKS=$KUBERNETES_NODE_NETWORKS" 664 | echo "HOST_PACKAGE=$HOST_PACKAGE" 665 | } 666 | getNetworkIDs() { 667 | values=$(echo $1 | tr "," " ") 668 | local networks 669 | for network in $values; do 670 | networks="$networks,$(triton networks -oname,id | sort | grep -v "^NAME *ID$" | sed -n "$network"p | awk 'NF>1{print $NF}')" 671 | done 672 | echo "$networks" | sed 's/^,\(.*\)$/\1/' | sed 's/\(.*\),$/\1/' 673 | } 674 | getPackageID() { 675 | echo "$(triton packages -oname,id | grep "\-kvm-" | grep -v "^NAME.*ID$" | tr -s " " | sort | sed -n "$1"p | awk 'NF>1{print $NF}')" 676 | } 677 | exportVars() { 678 | grep -v "^$" config > config.tmp 679 | while read line; do 680 | export "$line" 681 | done