├── .editorconfig ├── .gitignore ├── LICENSE ├── README.md ├── ansible.cfg ├── book.json ├── docker-compose.yml ├── docs ├── README.md ├── addons │ └── dashboard.md ├── getting-started-guides │ ├── aws │ │ └── public.md │ ├── digitalocean.md │ └── docker-compose.md └── roadmap.md ├── group_vars └── all.yml ├── helm.yml ├── inventory ├── inventory └── terraform.py ├── library └── kube.py ├── package.json ├── playbooks └── coreos-bootstrap.yml ├── requirements-test.txt ├── requirements.txt ├── requirements.yml ├── roles ├── addons │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── dashboard-deployment.yaml │ │ ├── dashboard-svc.yaml │ │ ├── es-controller.yaml │ │ ├── es-service.yaml │ │ ├── fluentd-logging-ds.yaml │ │ ├── grafana-service.yaml │ │ ├── heapster-service.yaml │ │ ├── influxdb-grafana-controller.yaml │ │ ├── influxdb-service.yaml │ │ ├── kibana-controller.yaml │ │ ├── kibana-service.yaml │ │ ├── kube-system.yaml │ │ ├── traefik-ingress-controller.yaml │ │ └── traefik.toml │ ├── tasks │ │ ├── dashboard.yml │ │ ├── logging.yml │ │ ├── main.yml │ │ ├── monitoring.yml │ │ ├── registry.yml │ │ ├── skydns.yml │ │ └── traefik.yml │ └── templates │ │ ├── heapster-controller.yaml.j2 │ │ ├── skydns-rc.yaml.j2 │ │ └── skydns-svc.yaml.j2 ├── docker │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── config.json.j2 │ │ └── docker.service.j2 ├── handlers │ └── handlers │ │ └── main.yml ├── helm │ ├── LICENSE │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── kube-master │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── calico-system.yaml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── calico.yml │ │ └── main.yml │ ├── templates │ │ ├── 10-calico.conf.j2 │ │ ├── calico-node.service.j2 │ │ ├── kube-apiserver.yaml.j2 │ │ ├── kube-controller-manager.yaml.j2 │ │ ├── kube-proxy.yaml.j2 │ │ ├── kube-scheduler.yaml.j2 │ │ ├── kubelet.service.j2 │ │ ├── master-kubeconfig.j2 │ │ └── policy-controller.yaml.j2 │ └── vars │ │ ├── aws.yml │ │ ├── default.yml │ │ └── digitalocean.yml ├── kube-worker │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── calico.yml │ │ └── main.yml │ ├── templates │ │ ├── 10-calico.conf.j2 │ │ ├── calico-node.service.j2 │ │ ├── kube-proxy.yaml.j2 │ │ ├── kubelet.service.j2 │ │ └── worker-kubeconfig.j2 │ └── vars │ │ ├── aws.yml │ │ ├── default.yml │ │ └── digitalocean.yml └── kubectl │ ├── LICENSE │ ├── README.md │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── meta │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ └── config.j2 ├── site.yml ├── terraform ├── aws │ ├── elb │ │ └── main.tf │ ├── iam │ │ ├── edge-router-policy.json │ │ ├── edge-router-role.json │ │ ├── main.tf │ │ ├── master-policy.json │ │ ├── master-role.json │ │ ├── worker-policy.json │ │ └── worker-role.json │ ├── keypair │ │ └── main.tf │ ├── private-cloud │ │ ├── .gitignore │ │ ├── bastion-cloud-config.yml.tpl │ │ ├── bastion-server.tf │ │ ├── bin │ │ │ ├── ovpn-client-config │ │ │ ├── ovpn-init │ │ │ ├── ovpn-new-client │ │ │ └── ovpn-start │ │ ├── etcd_discovery_url.txt │ │ ├── main.tf │ │ ├── master-cloud-config.yml.tpl │ │ ├── masters.tf │ │ ├── security_groups.tf │ │ ├── vpc │ │ │ └── main.tf │ │ ├── worker-cloud-config.yml.tpl │ │ └── workers.tf │ ├── public-cloud │ │ ├── .gitignore │ │ ├── edge-routers.tf │ │ ├── etcd_discovery_url.txt │ │ ├── main.tf │ │ ├── master-cloud-config.yml.tpl │ │ ├── masters.tf │ │ ├── worker-cloud-config.yml.tpl │ │ └── workers.tf │ └── sg-all-traffic │ │ └── main.tf ├── certs │ └── etcd │ │ └── main.tf ├── digitalocean │ ├── .gitignore │ ├── edge-router-cloud-config.yml.tpl │ ├── etcd_discovery_url.txt │ ├── main.tf │ ├── master-cloud-config.yml.tpl │ └── worker-cloud-config.yml.tpl ├── gce │ └── main.tf └── scripts │ └── coreos │ ├── bootstrap.sh │ ├── get-pip.py │ └── runner └── wercker.yml /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: http://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | charset = utf-8 9 | end_of_line = lf 10 | insert_final_newline = true 11 | trim_trailing_whitespace = true 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vagrant 3 | *.box 4 | *.tfstate* 5 | *.tfvars 6 | *.tfplan 7 | .wercker 8 | *.cache 9 | ssh.config 10 | vendor 11 | .bundle 12 | .kitchen 13 | terraform/**/.terraform 14 | terraform/**/etcd_discovery_url.txt 15 | site.retry 16 | roles/coreos_timezone 17 | roles/coreos_bootstrap 18 | _builds 19 | _projects 20 | _steps 21 | _book 22 | node_modules 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2015 Capgemini 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Kubeform 2 | ======== 3 | [![wercker 4 | status](https://app.wercker.com/status/d51be2fb5ae796055969b74d7924a059/s/master 5 | "wercker 6 | status")](https://app.wercker.com/project/bykey/d51be2fb5ae796055969b74d7924a059) 7 | 8 | Deploy yourself a high-availability Kubernetes cluster, in minutes. 9 | Built on Terraform, CoreOS and Ansible. 10 | 11 | Our recipes for bootstrapping HA Kubernetes clusters on any cloud or on-premise. 12 | 13 | Includes the following - 14 | 15 | * CoreOS as the base operating system 16 | * Kubernetes (in HA) mode (leader election using Podmaster) 17 | * SSL certs/security for Kubernetes cluster components 18 | * Flannel for networking 19 | * Kubernetes Dashboard 20 | * Sky/KubeDNS 21 | 22 | and optionally - 23 | 24 | * Prometheus for cluster monitoring (coming soon!) 25 | * Fluentd, elasticsearch for cluster logging 26 | * [Traefik](https://docs.traefik.io/toml/#kubernetes-ingress-backend) as the ingress controller for the edge-routers. For configuring it to use [letsencrypt](https://letsencrypt.org/) you can [edit this file](https://github.com/Capgemini/kubeform/blob/master/roles/addons/files/traefik.toml). 27 | 28 | See our [Roadmap](/docs/roadmap.md) for future features and feel free to help us improve the project 29 | by contributing a Pull Request, or raise an issue if you run into trouble! 30 | 31 | ## Getting started 32 | 33 | Check out the instructions for provisioning on different clouds including: 34 | 35 | * [AWS](/docs/getting-started-guides/aws/public.md) 36 | * [Digitalocean](/docs/getting-started-guides/digitalocean.md) 37 | * [Local Docker Compose](/docs/getting-started-guides/docker-compose.md) 38 | 39 | ## Demo 40 | 41 | [Check out this demo deploying and scaling the backends for the ingress-controller on AWS and DigitalOcean](https://www.youtube.com/watch?v=Ejc5rKTzHiQ) 42 | 43 | ## Keep up to date... 44 | 45 | Check out the [Capgemini UK Engineering blog](http://capgemini.github.io/) to find out more about the stuff we do! 46 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = no 3 | record_host_keys = no 4 | jinja2_extensions = jinja2.ext.do 5 | timeout = 15 6 | gathering = smart 7 | roles_path = roles 8 | forks = 100 9 | library = ./library 10 | 11 | [ssh_connection] 12 | ssh_args = -o ControlMaster=no -o ControlPersist=30m 13 | scp_if_ssh = True 14 | pipelining = True 15 | -------------------------------------------------------------------------------- /book.json: -------------------------------------------------------------------------------- 1 | { 2 | "gitbook": "2.4.3", 3 | "structure": { 4 | "summary": "docs/README.md" 5 | }, 6 | "plugins": ["edit-link", "prism", "-highlight", "github"], 7 | "pluginsConfig": { 8 | "edit-link": { 9 | "base": "https://github.com/Capgemini/kubeform/tree/master", 10 | "label": "Edit This Page" 11 | }, 12 | "github": { 13 | "url": "https://github.com/Capgemini/kubeform/" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | etcd: 4 | image: gcr.io/google_containers/etcd:2.0.12 5 | network_mode: host 6 | container_name: etcd 7 | command: ['/usr/local/bin/etcd', '--bind-addr=0.0.0.0:4001', '--data-dir=/var/etcd/data'] 8 | 9 | apiserver: 10 | image: gcr.io/google_containers/hyperkube:v${KUBERNETES_VERSION} 11 | network_mode: host 12 | container_name: apiserver 13 | ports: 14 | - "8080" 15 | command: ["/hyperkube", "apiserver", "--service-cluster-ip-range=172.17.17.1/24", "--insecure-bind-address=127.0.0.1", "--insecure-port=8080", "--etcd_servers=http://127.0.0.1:4001", "--v=2"] 16 | 17 | controller: 18 | image: gcr.io/google_containers/hyperkube:v${KUBERNETES_VERSION} 19 | network_mode: host 20 | command: ["/hyperkube", "controller-manager", "--address=0.0.0.0", "--master=http://127.0.0.1:8080", "--v=2"] 21 | 22 | scheduler: 23 | image: gcr.io/google_containers/hyperkube:v${KUBERNETES_VERSION} 24 | network_mode: host 25 | command: ["/hyperkube", "scheduler", "--address=0.0.0.0", "--master=http://127.0.0.1:8080", "--v=2"] 26 | 27 | kubelet: 28 | image: gcr.io/google_containers/hyperkube:v${KUBERNETES_VERSION} 29 | network_mode: host 30 | command: ['/hyperkube', 'kubelet', '--containerized' , '--api_servers=http://127.0.0.1:8080', '--allow-privileged', '--v=2', '--address=0.0.0.0', '--enable_server', '--hostname_override=127.0.0.1', '--config=/etc/kubernetes/manifests'] 31 | volumes: 32 | - /:/rootfs:ro 33 | - /sys:/sys:ro 34 | - /dev:/dev 35 | - /var/run/docker.sock:/var/run/docker.sock 36 | - /var/lib/docker/:/var/lib/docker:ro 37 | - /var/lib/kubelet/:/var/lib/kubelet:rw 38 | - /var/run:/var/run:rw 39 | privileged: true 40 | 41 | proxy: 42 | image: gcr.io/google_containers/hyperkube:v${KUBERNETES_VERSION} 43 | network_mode: host 44 | command: ['/hyperkube', 'proxy', '--master=http://127.0.0.1:8080', '--v=2'] 45 | privileged: true 46 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ## Table of Contents 2 | 3 | * [Read Me](/README.md) 4 | * [Getting Started Guides](/docs/getting-started-guides/README.md) 5 | * [DigitalOcean](/docs/getting-started-guides/digitalocean.md) 6 | * [AWS](/docs/getting-started-guides/aws/public.md) 7 | * [Local Docker Compose](/docs/getting-started-guides/docker-compose.md) 8 | * [Roadmap](/docs/roadmap.md) 9 | -------------------------------------------------------------------------------- /docs/addons/dashboard.md: -------------------------------------------------------------------------------- 1 | IN AWS 2 | ======== 3 | * Note similiar instructions should work for DigitalOcean 4 | 5 | After deploying the cluster in AWS if you run 6 | 7 | ```kubectl cluster-info``` 8 | you get something like 9 | ``` 10 | Kubernetes master is running at https://kube-master-61628301.eu-west-1.elb.amazonaws.com 11 | KubeDNS is running at https://kube-master-61628301.eu-west-1.elb.amazonaws.com/api/v1/proxy/namespaces/kube-system/services/kube-dns 12 | kubernetes-dashboard is running at https://kube-master-61628301.eu-west-1.elb.amazonaws.com/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard 13 | ``` 14 | you can access to the dashboard through the elb load balancer on https by 15 | ``` 16 | https://kube-master-61628301.eu-west-1.elb.amazonaws.com/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard 17 | ``` 18 | or 19 | ``` 20 | https://kube-master-61628301.eu-west-1.elb.amazonaws.com/ui 21 | ``` 22 | it will ask you for the credentials so If you ssh into any of the masters you will find the default credentials here ``` /etc/kubernetes/users/known_users.csv ``` 23 | 24 | user: kube 25 | 26 | pass: changeme 27 | 28 | should just work. 29 | 30 | Also as we are binding the insecure address to 0.0.0.0 on 8080 at the moment 31 | https://github.com/Capgemini/kubeform/blob/master/roles/kube-master/templates/kube-apiserver.yaml.j2#L17 32 | you could access via http by: 33 | ``` http://master-ip:8080/ui ``` 34 | 35 | In .kube/config we handle the user and credentials for interacting with the cluster via api using client certificate authentication. 36 | 37 | In a real environment you will probably want to create a cname record to route queries from your domain name into the elb 38 | http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/using-domain-names-with-elb.html 39 | -------------------------------------------------------------------------------- /docs/getting-started-guides/aws/public.md: -------------------------------------------------------------------------------- 1 | # Getting started on AWS 2 | 3 | The cluster is provisioned in separate stages as follows: 4 | 5 | * [Terraform](https://terraform.io) to provision the cluster instances, security groups, firewalls, cloud infrastructure and security (SSL) certificates. 6 | * [Ansible](https://ansible.com) to configure the cluster, including installing Kubernetes, addons and platform level configuration (files, directories etc...) 7 | 8 | ## Prerequisites 9 | 10 | 1. You need an AWS account. Visit [http://aws.amazon.com](http://aws.amazon.com) to get started 11 | 2. You need an AWS [instance profile and role](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) with EC2 full access. 12 | 3. You need to have installed and configured Terraform (>= 0.7.11 required). Visit [https://www.terraform.io/intro/getting-started/install.html](https://www.terraform.io/intro/getting-started/install.html) to get started. 13 | 4. You need to have [Python](https://www.python.org/) >= 2.7.5 installed along with [pip](https://pip.pypa.io/en/latest/installing.html). 14 | 5. Kubectl installed in and your PATH: 15 | 16 | ``` 17 | curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kubectl 18 | ``` 19 | 20 | On an OS X workstation, replace linux in the URL above with darwin: 21 | 22 | ``` 23 | curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/darwin/amd64/kubectl 24 | ``` 25 | After downloading the binary, ensure it is executable and move it into your PATH: 26 | 27 | ``` 28 | chmod +x kubectl 29 | mv kubectl /usr/local/bin/kubectl 30 | ``` 31 | 32 | ## Cluster Turnup 33 | 34 | ### Download Kubeform (install from source at head) 35 | ``` 36 | git clone https://github.com/Capgemini/kubeform.git /tmp/kubeform 37 | cd /tmp/kubeform 38 | pip install -r requirements.txt 39 | ``` 40 | 41 | ### Set config 42 | 43 | Configuration can be set via environment variables. As a minimum you will need to set these environment variables: 44 | 45 | ``` 46 | export TF_VAR_access_key=$AWS_ACCESS_KEY_ID 47 | export TF_VAR_secret_key=$AWS_SECRET_ACCESS_KEY 48 | export TF_VAR_STATE_ROOT=/tmp/kubeform/terraform/aws/public-cloud 49 | ``` 50 | 51 | ### Get terraform dependencies 52 | 53 | ``` 54 | cd /tmp/kubeform/terraform/aws/public-cloud 55 | terraform get 56 | for i in $(ls .terraform/modules/*/Makefile); do i=$(dirname $i); make -C $i; done 57 | ``` 58 | 59 | ### Provision the cluster infrastructure 60 | 61 | ``` 62 | terraform apply 63 | ``` 64 | 65 | ### Configure the cluster 66 | 67 | To install the role dependencies for Ansible execute: 68 | 69 | ``` 70 | cd /tmp/kubeform 71 | ansible-galaxy install -r requirements.yml 72 | ``` 73 | 74 | To run the Ansible playbook (to configure the cluster): 75 | 76 | ``` 77 | ansible-playbook -u core --ssh-common-args="-i /tmp/kubeform/terraform/aws/public-cloud/id_rsa -q" --inventory-file=inventory site.yml -e kube_apiserver_vip=$(cd /tmp/kubeform/terraform/aws/public-cloud && terraform output master_elb_hostname) 78 | ``` 79 | 80 | This will run the playbook (using the credentials output by terraform and the terraform state as a dynamic inventory) and inject the AWS ELB (for the master API servers) address as a variable ```kube_apiserver_vip```. 81 | 82 | ## Cluster Destroy 83 | 84 | ``` 85 | cd /tmp/kubeform/terraform/aws/public-cloud 86 | terraform destroy 87 | ``` 88 | -------------------------------------------------------------------------------- /docs/getting-started-guides/digitalocean.md: -------------------------------------------------------------------------------- 1 | # Getting started on Digitalocean 2 | 3 | The cluster is provisioned in separate stages as follows: 4 | 5 | * [Terraform](https://terraform.io) to provision the cluster instances, security groups, firewalls, cloud infrastructure and security (SSL) certificates. 6 | * [Ansible](https://ansible.com) to configure the cluster, including installing Kubernetes, addons and platform level configuration (files, directories etc...) 7 | 8 | ## Prerequisites 9 | 10 | 1. You need a Digitalocean account. Visit [https://cloud.digitalocean.com/registrations/new](https://cloud.digitalocean.com/registrations/new) to get started 11 | 2. You need to have installed and configured Terraform (>= 0.6.16 recommended). Visit [https://www.terraform.io/intro/getting-started/install.html](https://www.terraform.io/intro/getting-started/install.html) to get started. 12 | 3. You need to have [Python](https://www.python.org/) >= 2.7.5 installed along with [pip](https://pip.pypa.io/en/latest/installing.html). 13 | 4. Kubectl installed in and your PATH: 14 | 15 | ``` 16 | curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.3/bin/linux/amd64/kubectl 17 | ``` 18 | 19 | On an OS X workstation, replace linux in the URL above with darwin: 20 | 21 | ``` 22 | curl -O https://storage.googleapis.com/kubernetes-release/release/v1.2.3/bin/darwin/amd64/kubectl 23 | ``` 24 | After downloading the binary, ensure it is executable and move it into your PATH: 25 | 26 | ``` 27 | chmod +x kubectl 28 | mv kubectl /usr/local/bin/kubectl 29 | ``` 30 | 31 | ## Cluster Turnup 32 | 33 | ### Download Kubeform (install from source at head) 34 | ``` 35 | git clone https://github.com/Capgemini/kubeform.git /tmp/kubeform 36 | cd /tmp/kubeform 37 | pip install -r requirements.txt 38 | ``` 39 | 40 | ### Set config 41 | 42 | Configuration can be set via environment variables. As a minimum you will need to set these environment variables: 43 | 44 | ``` 45 | export TF_VAR_do_token=$DO_API_TOKEN 46 | export TF_VAR_STATE_ROOT=/tmp/kubeform/terraform/digitalocean 47 | ``` 48 | 49 | ### Get terraform dependencies 50 | 51 | ``` 52 | cd /tmp/kubeform/terraform/digitalocean 53 | terraform get 54 | ``` 55 | 56 | ### Provision the cluster infrastructure 57 | 58 | ``` 59 | terraform apply 60 | ``` 61 | 62 | ### Configure the cluster 63 | 64 | To install the role dependencies for Ansible execute: 65 | 66 | ``` 67 | cd /tmp/kubeform 68 | ansible-galaxy install -r requirements.yml 69 | ``` 70 | 71 | To run the Ansible playbook (to configure the cluster): 72 | 73 | ``` 74 | ansible-playbook -u core --ssh-common-args="-i /tmp/kubeform/terraform/digitalocean/id_rsa -q" --inventory-file=inventory site.yml 75 | ``` 76 | 77 | This will run the playbook (using the credentials output by terraform and the terraform state as a dynamic inventory). 78 | 79 | ## Cluster Destroy 80 | 81 | ``` 82 | cd /tmp/kubeform/terraform/digitalocean 83 | terraform destroy 84 | ``` 85 | -------------------------------------------------------------------------------- /docs/getting-started-guides/docker-compose.md: -------------------------------------------------------------------------------- 1 | # Getting started locally with Docker Compose 2 | 3 | ## Prerequisites 4 | 5 | 1. [Docker for Mac](https://beta.docker.com/) or [Docker Toolbox for Windows](https://docs.docker.com/engine/installation/windows/) or [Docker Toolbox for OSX](https://docs.docker.com/engine/installation/mac/) depending on your platform. 6 | 7 | ## Cluster Turnup 8 | 9 | ### Download Kubeform (install from source at head) 10 | ``` 11 | git clone https://github.com/Capgemini/kubeform.git /tmp/kubeform 12 | cd /tmp/kubeform 13 | 14 | ``` 15 | 16 | To start up the Docker containers execute: 17 | 18 | ``` 19 | KUBERNETES_VERSION=1.2.4 docker-compose up -d 20 | ``` 21 | 22 | replace with the relevant Kubernetes version. 23 | 24 | ## Cluster Destroy 25 | 26 | ``` 27 | cd /tmp/kubeform/ 28 | docker-compose stop && docker-compose rm 29 | ``` 30 | -------------------------------------------------------------------------------- /docs/roadmap.md: -------------------------------------------------------------------------------- 1 | Kubeform 2 | ======== 3 | 4 | Core 5 | -------- 6 | - [ ] High Availability Kubernetes 7 | - [x] Kubernetes Dashboard 8 | - [x] SkyDNS 9 | - [ ] Networking 10 | - [x] Docker compose dev cluster 11 | 12 | Providers 13 | --------- 14 | - [ ] Vagrant 15 | - [x] AWS 16 | - [x] Digitalocean 17 | - [ ] Google Cloud 18 | - [ ] Openstack 19 | - [ ] Microsoft Azure 20 | 21 | Operating Systems 22 | ----------------- 23 | - [x] CoreOS 24 | 25 | Features 26 | -------- 27 | - [ ] Monitoring and alerts(nodes/services) 28 | - [ ] Logging (nodes/services) 29 | - [ ] Distributed File storage (for nodes/services) 30 | - [x] Ingress Load balancing 31 | - [ ] Private registry 32 | - [ ] Cluster federation with Ubernetes 33 | - [ ] Kubernetes maintenance 34 | 35 | Security 36 | -------- 37 | - [ ] Certs for all services 38 | - [x] Lets encrypt support 39 | - [ ] IAM (IaaS & Platform) 40 | - [ ] Network policies support 41 | - [ ] Vault 42 | - [ ] Service Accounts & Security Contexts 43 | - [ ] Networking Policy 44 | -------------------------------------------------------------------------------- /group_vars/all.yml: -------------------------------------------------------------------------------- 1 | http_proxy: '' 2 | proxy_env: 3 | http_proxy: "{{ http_proxy }}" 4 | https_proxy: "{{ http_proxy }}" 5 | HTTP_PROXY: "{{ http_proxy }}" 6 | HTTPS_PROXY: "{{ http_proxy }}" 7 | 8 | # For baremetal or onpremise when not running terraform 9 | onpremise: false 10 | 11 | coreos_timezone: 'Europe/London' 12 | etcd_servers_group: etcd_servers 13 | api_servers_group: masters 14 | 15 | # kubernetes docker image 16 | kubernetes_image: quay.io/coreos/hyperkube 17 | 18 | # The docker container version to use for Kubernetes 19 | kubernetes_version: v1.4.4_coreos.0 20 | 21 | # Kubernetes log level. uses glog. See https://github.com/golang/glog 22 | # 0 - Generally useful for this to ALWAYS be visible to an operator 23 | # - Programmer errors 24 | # - Logging extra info about a panic 25 | # - CLI argument handling 26 | # 1 - A reasonable default log level if you don't want verbosity. 27 | # - Information about config (listening on X, watching Y) 28 | # - Errors that repeat frequently that relate to conditions that can be corrected (pod detected as unhealthy) 29 | # 2 - Useful steady state information about the service and important log messages that may correlate to significant changes in the system. This is the recommended default log level for most systems. 30 | # - Logging HTTP requests and their exit code 31 | # - System state changing (killing pod) 32 | # - Controller state change events (starting pods) 33 | # - Scheduler log messages 34 | # 3 - Extended information about changes 35 | # - More info about system state changes 36 | # 4 - Debug level verbosity (for now) 37 | # - Logging in particularly thorny parts of code where you may want to come back later and check it 38 | kubernetes_log_level: 2 39 | 40 | # The CIDR network to use for pod IPs. Each pod launched in the cluster will be 41 | # assigned an IP out of this range. This network must be routable between all 42 | # hosts in the cluster. In a default installation, the flannel overlay network 43 | # will provide routing to this network. 44 | kubernetes_pod_network: 10.2.0.0/16 45 | 46 | # The CIDR network to use for service cluster VIPs (Virtual IPs). 47 | # Each service will be assigned a cluster IP out of this range. 48 | # This must not overlap with any IP ranges assigned to the POD_NETWORK, 49 | # or other existing network infrastructure. 50 | # Routing to these VIPs is handled by a local kube-proxy service to each host, 51 | # and are not required to be routable between hosts. 52 | kubernetes_service_ip_range: 10.3.0.0/24 53 | 54 | # The VIP (Virtual IP) address of the Kubernetes API Service. 55 | # If the SERVICE_IP_RANGE is changed above, 56 | # this must be set to the first IP in that range. 57 | kubernetes_service_ip: 10.3.0.1 58 | 59 | # The VIP (Virtual IP) address of the cluster DNS service. 60 | # This IP must be in the range of the SERVICE_IP_RANGE and cannot be the first 61 | # IP in the range. This same IP must be configured on all worker nodes to 62 | # enable DNS service discovery. 63 | kubernetes_dns_service_ip: 10.3.0.10 64 | 65 | # The kubelet wrapper script for use on CoreOS. This makes it easy to run the 66 | # kubelet under rkt (recommended by CoreOS). Ships by default in CoreOS 962.0.0+ 67 | # but we include it here for backwards compatibility in the stable/beta channels. 68 | # See https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html 69 | kubernetes_kubelet_wrapper: https://raw.githubusercontent.com/coreos/coreos-overlay/master/app-admin/kubelet-wrapper/files/kubelet-wrapper 70 | 71 | # kubernetes config dirs (used by master and worker) 72 | kubernetes_config_dir: /etc/kubernetes 73 | kubernetes_cert_dir: "{{ kubernetes_config_dir }}/ssl" 74 | kubernetes_manifest_dir: "{{ kubernetes_config_dir }}/manifests" 75 | kubernetes_addons_dir: "{{ kubernetes_config_dir }}/addons" 76 | kubernetes_users_dir: "{{ kubernetes_config_dir }}/users" 77 | kubernetes_cni_dir: "{{ kubernetes_config_dir }}/cni/net.d" 78 | 79 | # Network CNI plugin to use. Supported: 80 | # - Calico 81 | # - Weave 82 | # - Romana - Coming soon... 83 | # - OpenContrail - Coming soon... 84 | network_plugin: 'calico' 85 | 86 | # Whether to enable the kubernetes-dashboard 87 | enable_dashboard: true 88 | enable_logging: false 89 | enable_monitoring: false 90 | 91 | # DNS domain for SkyDNS + Kubelet 92 | dns_domain: cluster.local 93 | 94 | # Choose a specific ingress-controller. Supported: 95 | # - traefik 96 | # - nginx - Coming soon... 97 | # - vamp - Coming soon... 98 | ingress_controller: "traefik" 99 | -------------------------------------------------------------------------------- /helm.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | roles: 3 | - helm 4 | -------------------------------------------------------------------------------- /inventory/inventory: -------------------------------------------------------------------------------- 1 | [role=masters] 2 | 3 | [role=workers] 4 | 5 | [role=edge-routers] 6 | 7 | [masters:children] 8 | role=masters 9 | 10 | [workers:children] 11 | role=workers 12 | 13 | [edge-routers:children] 14 | role=edge-routers 15 | 16 | [all:children] 17 | masters 18 | workers 19 | edge-routers 20 | 21 | [all:vars] 22 | ansible_python_interpreter="PATH=/home/core/bin:$PATH python" 23 | 24 | [etcd_servers:children] 25 | masters 26 | -------------------------------------------------------------------------------- /inventory/terraform.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright 2015 Cisco Systems, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | """\ 17 | Dynamic inventory for Terraform - finds all `.tfstate` files below the working 18 | directory and generates an inventory based on them. 19 | """ 20 | from __future__ import unicode_literals, print_function 21 | import argparse 22 | from collections import defaultdict 23 | from functools import wraps 24 | import json 25 | import os 26 | import re 27 | import yaml 28 | 29 | VERSION = '0.3.0pre' 30 | 31 | 32 | def tfstates(root=None): 33 | root = root or os.getcwd() 34 | for dirpath, _, filenames in os.walk(root): 35 | for name in filenames: 36 | if os.path.splitext(name)[-1] == '.tfstate': 37 | yield os.path.join(dirpath, name) 38 | 39 | 40 | def iterresources(filenames): 41 | for filename in filenames: 42 | with open(filename, 'r') as json_file: 43 | state = json.load(json_file) 44 | for module in state['modules']: 45 | name = module['path'][-1] 46 | for key, resource in module['resources'].items(): 47 | yield name, key, resource 48 | 49 | ## READ RESOURCES 50 | PARSERS = {} 51 | 52 | 53 | def _clean_dc(dcname): 54 | # Consul DCs are strictly alphanumeric with underscores and hyphens - 55 | # ensure that the consul_dc attribute meets these requirements. 56 | return re.sub('[^\w_\-]', '-', dcname) 57 | 58 | 59 | def iterhosts(resources): 60 | '''yield host tuples of (name, attributes, groups)''' 61 | for module_name, key, resource in resources: 62 | resource_type, name = key.split('.', 1) 63 | try: 64 | parser = PARSERS[resource_type] 65 | except KeyError: 66 | continue 67 | 68 | yield parser(resource, module_name) 69 | 70 | 71 | def parses(prefix): 72 | def inner(func): 73 | PARSERS[prefix] = func 74 | return func 75 | 76 | return inner 77 | 78 | 79 | def calculate_mi_vars(func): 80 | """calculate microservices-infrastructure vars""" 81 | 82 | @wraps(func) 83 | def inner(*args, **kwargs): 84 | name, attrs, groups = func(*args, **kwargs) 85 | 86 | # groups 87 | if attrs.get('publicly_routable', False): 88 | groups.append('publicly_routable') 89 | 90 | return name, attrs, groups 91 | 92 | return inner 93 | 94 | 95 | def _get_ignore_blank(obj, key, default=None): 96 | """ 97 | Get a key in an object, but treat blank string as missing value. 98 | """ 99 | v = obj.get(key, default) 100 | if v == "": 101 | return default 102 | return v 103 | 104 | 105 | def _parse_prefix(source, prefix, sep='.'): 106 | for compkey, value in source.items(): 107 | try: 108 | curprefix, rest = compkey.split(sep, 1) 109 | except ValueError: 110 | continue 111 | 112 | if curprefix != prefix or rest == '#': 113 | continue 114 | 115 | yield rest, value 116 | 117 | 118 | def parse_attr_list(source, prefix, sep='.'): 119 | attrs = defaultdict(dict) 120 | for compkey, value in _parse_prefix(source, prefix, sep): 121 | idx, key = compkey.split(sep, 1) 122 | attrs[idx][key] = value 123 | 124 | return attrs.values() 125 | 126 | 127 | def parse_dict(source, prefix, sep='.'): 128 | return dict(_parse_prefix(source, prefix, sep)) 129 | 130 | 131 | def parse_list(source, prefix, sep='.'): 132 | return [value for _, value in _parse_prefix(source, prefix, sep)] 133 | 134 | 135 | def parse_bool(string_form): 136 | token = string_form.lower()[0] 137 | 138 | if token == 't': 139 | return True 140 | elif token == 'f': 141 | return False 142 | else: 143 | raise ValueError('could not convert %r to a bool' % string_form) 144 | 145 | 146 | @parses('openstack_compute_instance_v2') 147 | @calculate_mi_vars 148 | def openstack_host(resource, module_name): 149 | raw_attrs = resource['primary']['attributes'] 150 | name = raw_attrs['name'] 151 | groups = [] 152 | 153 | attrs = { 154 | 'access_ip_v4': raw_attrs['access_ip_v4'], 155 | 'access_ip_v6': raw_attrs['access_ip_v6'], 156 | 'flavor': parse_dict(raw_attrs, 'flavor', 157 | sep='_'), 158 | 'id': raw_attrs['id'], 159 | 'image': parse_dict(raw_attrs, 'image', 160 | sep='_'), 161 | 'key_pair': raw_attrs['key_pair'], 162 | 'metadata': parse_dict(raw_attrs, 'metadata'), 163 | 'network': parse_attr_list(raw_attrs, 'network'), 164 | 'region': raw_attrs.get('region', ''), 165 | 'security_groups': parse_list(raw_attrs, 'security_groups'), 166 | #ansible 167 | 'ansible_port': 22, 168 | 'ansible_user': 'centos', 169 | # workaround for an OpenStack bug where hosts have a different domain 170 | # after they're restarted 171 | 'host_domain': 'novalocal', 172 | 'use_host_domain': True, 173 | } 174 | 175 | try: 176 | attrs.update({ 177 | 'ansible_host': raw_attrs['access_ip_v4'], 178 | 'publicly_routable': True, 179 | }) 180 | except (KeyError, ValueError): 181 | attrs.update({'ansible_host': '', 'publicly_routable': False}) 182 | 183 | # attrs specific to microservices-infrastructure 184 | attrs.update({ 185 | 'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)), 186 | 'role': attrs['metadata'].get('role', 'none'), 187 | }) 188 | 189 | # add groups based on attrs 190 | groups.append('os_image=' + attrs['image']['name']) 191 | groups.append('os_flavor=' + attrs['flavor']['name']) 192 | groups.extend('os_metadata_%s=%s' % item 193 | for item in attrs['metadata'].items()) 194 | groups.append('os_region=' + attrs['region']) 195 | 196 | # groups specific to microservices-infrastructure 197 | groups.append('role=' + attrs['metadata'].get('role', 'none')) 198 | groups.append('dc=' + attrs['consul_dc']) 199 | 200 | return name, attrs, groups 201 | 202 | 203 | @parses('aws_instance') 204 | @calculate_mi_vars 205 | def aws_host(resource, module_name): 206 | name = resource['primary']['attributes']['tags.Name'] 207 | raw_attrs = resource['primary']['attributes'] 208 | 209 | groups = [] 210 | 211 | attrs = { 212 | 'ami': raw_attrs['ami'], 213 | 'availability_zone': raw_attrs['availability_zone'], 214 | 'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'), 215 | 'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']), 216 | 'ephemeral_block_device': parse_attr_list(raw_attrs, 217 | 'ephemeral_block_device'), 218 | 'id': raw_attrs['id'], 219 | 'key_name': raw_attrs['key_name'], 220 | 'private': parse_dict(raw_attrs, 'private', 221 | sep='_'), 222 | 'public': parse_dict(raw_attrs, 'public', 223 | sep='_'), 224 | 'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'), 225 | 'security_groups': parse_list(raw_attrs, 'security_groups'), 226 | 'subnet': parse_dict(raw_attrs, 'subnet', 227 | sep='_'), 228 | 'tags': parse_dict(raw_attrs, 'tags'), 229 | 'tenancy': raw_attrs['tenancy'], 230 | 'vpc_security_group_ids': parse_list(raw_attrs, 231 | 'vpc_security_group_ids'), 232 | # ansible-specific 233 | 'ansible_port': 22, 234 | 'ansible_user': raw_attrs.get('tags.sshUser', 'core'), 235 | 'ansible_host': _get_ignore_blank(raw_attrs, 'public_ip', raw_attrs['private_ip']), 236 | 'provider': 'aws' 237 | } 238 | 239 | # attrs specific to microservices-infrastructure 240 | attrs.update({ 241 | 'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)), 242 | 'role': attrs['tags'].get('role', 'none') 243 | }) 244 | 245 | # groups specific to microservices-infrastructure 246 | groups.extend(['aws_ami=' + attrs['ami'], 247 | 'aws_az=' + attrs['availability_zone'], 248 | 'aws_key_name=' + attrs['key_name'], 249 | 'aws_tenancy=' + attrs['tenancy']]) 250 | groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items()) 251 | groups.extend('aws_vpc_security_group=' + group 252 | for group in attrs['vpc_security_group_ids']) 253 | groups.extend('aws_subnet_%s=%s' % subnet 254 | for subnet in attrs['subnet'].items()) 255 | 256 | # groups specific to microservices-infrastructure 257 | groups.append('role=' + attrs['role']) 258 | groups.append('dc=' + attrs['consul_dc']) 259 | 260 | return name, attrs, groups 261 | 262 | @parses('digitalocean_droplet') 263 | @calculate_mi_vars 264 | def digitalocean_host(resource, tfvars=None): 265 | 266 | raw_attrs = resource['primary']['attributes'] 267 | groups = [] 268 | 269 | # general attrs 270 | attrs = { 271 | 'name': raw_attrs['name'], 272 | 'metadata': yaml.load(raw_attrs['user_data']), 273 | 'region': raw_attrs['region'], 274 | 'size': raw_attrs['size'], 275 | # ansible 276 | 'ansible_port': 22, 277 | # Could be passed from the command line via environment variable 278 | 'ansible_user': 'core', 279 | 'ansible_host': raw_attrs['ipv4_address'], 280 | 'provider': 'digitalocean', 281 | } 282 | 283 | attrs.update({ 284 | 'role': attrs['metadata'].get('role', 'none') 285 | }) 286 | 287 | # groups specific to microservices-infrastructure 288 | name = attrs.get('name') 289 | 290 | groups.append('region=' + attrs['region']) 291 | groups.append('role=' + attrs['role']) 292 | 293 | return name, attrs, groups 294 | 295 | 296 | @parses('google_compute_instance') 297 | @calculate_mi_vars 298 | def gce_host(resource, module_name): 299 | name = resource['primary']['id'] 300 | raw_attrs = resource['primary']['attributes'] 301 | groups = [] 302 | 303 | # network interfaces 304 | interfaces = parse_attr_list(raw_attrs, 'network_interface') 305 | for interface in interfaces: 306 | interface['access_config'] = parse_attr_list(interface, 307 | 'access_config') 308 | for key in interface.keys(): 309 | if '.' in key: 310 | del interface[key] 311 | 312 | # general attrs 313 | attrs = { 314 | 'can_ip_forward': raw_attrs['can_ip_forward'] == 'true', 315 | 'disks': parse_attr_list(raw_attrs, 'disk'), 316 | 'machine_type': raw_attrs['machine_type'], 317 | 'metadata': parse_dict(raw_attrs, 'metadata'), 318 | 'network': parse_attr_list(raw_attrs, 'network'), 319 | 'network_interface': interfaces, 320 | 'self_link': raw_attrs['self_link'], 321 | 'service_account': parse_attr_list(raw_attrs, 'service_account'), 322 | 'tags': parse_list(raw_attrs, 'tags'), 323 | 'zone': raw_attrs['zone'], 324 | # ansible 325 | 'ansible_port': 22, 326 | 'ansible_user': 'deploy', 327 | 'provider': 'gce', 328 | } 329 | 330 | # attrs specific to microservices-infrastructure 331 | attrs.update({ 332 | 'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)), 333 | 'role': attrs['metadata'].get('role', 'none'), 334 | }) 335 | 336 | try: 337 | attrs.update({ 338 | 'ansible_host': interfaces[0]['access_config'][0]['nat_ip'], 339 | 'publicly_routable': True, 340 | }) 341 | except (KeyError, ValueError): 342 | attrs.update({'ansible_host': '', 'publicly_routable': False}) 343 | 344 | # add groups based on attrs 345 | groups.extend('gce_image=' + disk['image'] for disk in attrs['disks']) 346 | groups.append('gce_machine_type=' + attrs['machine_type']) 347 | groups.extend('gce_metadata_%s=%s' % (key, value) 348 | for (key, value) in attrs['metadata'].items() 349 | if key not in set(['sshKeys'])) 350 | groups.extend('gce_tag=' + tag for tag in attrs['tags']) 351 | groups.append('gce_zone=' + attrs['zone']) 352 | 353 | if attrs['can_ip_forward']: 354 | groups.append('gce_ip_forward') 355 | if attrs['publicly_routable']: 356 | groups.append('gce_publicly_routable') 357 | 358 | # groups specific to microservices-infrastructure 359 | groups.append('role=' + attrs['metadata'].get('role', 'none')) 360 | groups.append('dc=' + attrs['consul_dc']) 361 | 362 | return name, attrs, groups 363 | 364 | @parses('azure_instance') 365 | @calculate_mi_vars 366 | def azure_host(resource, module_name): 367 | name = resource['primary']['attributes']['name'] 368 | raw_attrs = resource['primary']['attributes'] 369 | 370 | groups = [] 371 | 372 | attrs = { 373 | 'automatic_updates': raw_attrs['automatic_updates'], 374 | 'description': raw_attrs['description'], 375 | 'hosted_service_name': raw_attrs['hosted_service_name'], 376 | 'id': raw_attrs['id'], 377 | 'image': raw_attrs['image'], 378 | 'ip_address': raw_attrs['ip_address'], 379 | 'location': raw_attrs['location'], 380 | 'name': raw_attrs['name'], 381 | 'reverse_dns': raw_attrs['reverse_dns'], 382 | 'security_group': raw_attrs['security_group'], 383 | 'size': raw_attrs['size'], 384 | 'ssh_key_thumbprint': raw_attrs['ssh_key_thumbprint'], 385 | 'subnet': raw_attrs['subnet'], 386 | 'username': raw_attrs['username'], 387 | 'vip_address': raw_attrs.get('vip_address'), 388 | 'virtual_network': raw_attrs.get('virtual_network'), 389 | 'endpoint': parse_attr_list(raw_attrs, 'endpoint'), 390 | # ansible 391 | 'ansible_port': 22, 392 | 'ansible_user': raw_attrs['username'], 393 | 'ansible_host': raw_attrs.get('vip_address', raw_attrs['ip_address']), 394 | 'provider': 'azure', 395 | } 396 | 397 | # attrs specific to microservices-infrastructure 398 | attrs.update({ 399 | 'consul_dc': attrs['location'].lower().replace(" ", "-"), 400 | 'role': attrs['description'] 401 | }) 402 | 403 | # groups specific to microservices-infrastructure 404 | groups.extend(['azure_image=' + attrs['image'], 405 | 'azure_location=' + attrs['location'].lower().replace(" ", "-"), 406 | 'azure_username=' + attrs['username'], 407 | 'azure_security_group=' + attrs['security_group']]) 408 | 409 | # groups specific to microservices-infrastructure 410 | groups.append('role=' + attrs['role']) 411 | groups.append('dc=' + attrs['consul_dc']) 412 | 413 | return name, attrs, groups 414 | 415 | ## QUERY TYPES 416 | def query_host(hosts, target): 417 | for name, attrs, _ in hosts: 418 | if name == target: 419 | return attrs 420 | 421 | return {} 422 | 423 | 424 | def query_list(hosts): 425 | groups = defaultdict(dict) 426 | meta = {} 427 | 428 | for name, attrs, hostgroups in hosts: 429 | for group in set(hostgroups): 430 | groups[group].setdefault('hosts', []) 431 | groups[group]['hosts'].append(name) 432 | 433 | meta[name] = attrs 434 | 435 | groups['_meta'] = {'hostvars': meta} 436 | return groups 437 | 438 | 439 | def main(): 440 | 441 | parser = argparse.ArgumentParser( 442 | __file__, 443 | __doc__, 444 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 445 | ) 446 | modes = parser.add_mutually_exclusive_group(required=True) 447 | modes.add_argument('--list', 448 | action='store_true', 449 | help='list all variables') 450 | modes.add_argument('--host', help='list variables for a single host') 451 | modes.add_argument('--version', 452 | action='store_true', 453 | help='print version and exit') 454 | parser.add_argument('--pretty', 455 | action='store_true', 456 | help='pretty-print output JSON') 457 | parser.add_argument('--nometa', 458 | action='store_true', 459 | help='with --list, exclude hostvars') 460 | default_root = os.environ.get('TF_VAR_STATE_ROOT', os.path.join(os.path.dirname(__file__), 461 | '..', '..', )) 462 | parser.add_argument('--root', 463 | default=default_root, 464 | help='custom root to search for `.tfstate`s in') 465 | 466 | args = parser.parse_args() 467 | 468 | if args.version: 469 | print('%s %s' % (__file__, VERSION)) 470 | parser.exit() 471 | 472 | hosts = iterhosts(iterresources(tfstates(args.root))) 473 | if args.list: 474 | output = query_list(hosts) 475 | if args.nometa: 476 | del output['_meta'] 477 | else: 478 | output = query_host(hosts, args.host) 479 | 480 | print(json.dumps(output, indent=4 if args.pretty else None)) 481 | parser.exit() 482 | 483 | 484 | if __name__ == '__main__': 485 | main() 486 | -------------------------------------------------------------------------------- /library/kube.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | DOCUMENTATION = """ 5 | --- 6 | module: kube 7 | short_description: Manage Kubernetes Cluster 8 | description: 9 | - Create, replace, remove, and stop resources within a Kubernetes Cluster 10 | version_added: "2.0" 11 | options: 12 | name: 13 | required: false 14 | default: null 15 | description: 16 | - The name associated with resource 17 | filename: 18 | required: false 19 | default: null 20 | description: 21 | - The path and filename of the resource(s) definition file. 22 | namespace: 23 | required: false 24 | default: null 25 | description: 26 | - The namespace associated with the resource(s) 27 | resource: 28 | required: false 29 | default: null 30 | description: 31 | - The resource to perform an action on. pods (po), replicationControllers (rc), services (svc), deployments (deployment), secrets (secret), ingress (ing), configmap (configmap) 32 | label: 33 | required: false 34 | default: null 35 | description: 36 | - The labels used to filter specific resources. 37 | server: 38 | required: false 39 | default: null 40 | description: 41 | - The url for the API server that commands are executed against. 42 | force: 43 | required: false 44 | default: false 45 | description: 46 | - A flag to indicate to force delete, replace, or stop. 47 | all: 48 | required: false 49 | default: false 50 | description: 51 | - A flag to indicate delete all, stop all, or all namespaces when checking exists. 52 | log_level: 53 | required: false 54 | default: 0 55 | description: 56 | - Indicates the level of verbosity of logging by kubectl. 57 | state: 58 | required: false 59 | choices: ['present', 'absent', 'latest', 'reloaded', 'stopped'] 60 | default: present 61 | description: 62 | - present handles checking existence or creating if definition file provided, 63 | absent handles deleting resource(s) based on other options, 64 | latest handles creating ore updating based on existence, 65 | reloaded handles updating resource(s) definition using definition file, 66 | stopped handles stopping resource(s) based on other options. 67 | requirements: 68 | - kubectl 69 | author: "Kenny Jones (@kenjones-cisco)" 70 | """ 71 | 72 | EXAMPLES = """ 73 | - name: test nginx is present 74 | kube: name=nginx resource=rc state=present 75 | 76 | - name: test nginx is stopped 77 | kube: name=nginx resource=rc state=stopped 78 | 79 | - name: test nginx is absent 80 | kube: name=nginx resource=rc state=absent 81 | 82 | - name: test nginx is present 83 | kube: filename=/tmp/nginx.yml 84 | """ 85 | 86 | 87 | class KubeManager(object): 88 | 89 | def __init__(self, module): 90 | 91 | self.module = module 92 | 93 | self.base_cmd = [module.get_bin_path('kubectl', True)] 94 | 95 | if module.params.get('server'): 96 | self.base_cmd.append('--server=' + module.params.get('server')) 97 | 98 | if module.params.get('log_level'): 99 | self.base_cmd.append('--v=' + str(module.params.get('log_level'))) 100 | 101 | if module.params.get('namespace'): 102 | self.base_cmd.append('--namespace=' + module.params.get('namespace')) 103 | 104 | self.all = module.params.get('all') 105 | self.force = module.params.get('force') 106 | self.name = module.params.get('name') 107 | self.filename = module.params.get('filename') 108 | self.resource = module.params.get('resource') 109 | self.label = module.params.get('label') 110 | 111 | def _execute(self, cmd): 112 | args = self.base_cmd + cmd 113 | try: 114 | rc, out, err = self.module.run_command(args) 115 | if rc != 0: 116 | self.module.fail_json( 117 | msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out + err)) 118 | except Exception as exc: 119 | self.module.fail_json( 120 | msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc))) 121 | return out.splitlines() 122 | 123 | def _execute_nofail(self, cmd): 124 | args = self.base_cmd + cmd 125 | rc, out, err = self.module.run_command(args) 126 | if rc != 0: 127 | return None 128 | return out.splitlines() 129 | 130 | def create(self, check=True): 131 | if check and self.exists(): 132 | return [] 133 | 134 | cmd = ['create'] 135 | 136 | if not self.filename: 137 | self.module.fail_json(msg='filename required to create') 138 | 139 | cmd.append('--filename=' + self.filename) 140 | 141 | return self._execute(cmd) 142 | 143 | def replace(self): 144 | 145 | if not self.force and not self.exists(): 146 | return [] 147 | 148 | cmd = ['replace'] 149 | 150 | if self.force: 151 | cmd.append('--force') 152 | 153 | if not self.filename: 154 | self.module.fail_json(msg='filename required to reload') 155 | 156 | cmd.append('--filename=' + self.filename) 157 | 158 | return self._execute(cmd) 159 | 160 | def delete(self): 161 | 162 | if not self.force and not self.exists(): 163 | return [] 164 | 165 | cmd = ['delete'] 166 | 167 | if self.filename: 168 | cmd.append('--filename=' + self.filename) 169 | else: 170 | if not self.resource: 171 | self.module.fail_json(msg='resource required to delete without filename') 172 | 173 | cmd.append(self.resource) 174 | 175 | if self.name: 176 | cmd.append(self.name) 177 | 178 | if self.label: 179 | cmd.append('--selector=' + self.label) 180 | 181 | if self.all: 182 | cmd.append('--all') 183 | 184 | if self.force: 185 | cmd.append('--ignore-not-found') 186 | 187 | return self._execute(cmd) 188 | 189 | def exists(self): 190 | cmd = ['get'] 191 | 192 | if not self.resource: 193 | return False 194 | 195 | cmd.append(self.resource) 196 | 197 | if self.name: 198 | cmd.append(self.name) 199 | 200 | cmd.append('--no-headers') 201 | 202 | if self.label: 203 | cmd.append('--selector=' + self.label) 204 | 205 | if self.all: 206 | cmd.append('--all-namespaces') 207 | 208 | result = self._execute_nofail(cmd) 209 | if not result: 210 | return False 211 | return True 212 | 213 | def stop(self): 214 | 215 | if not self.force and not self.exists(): 216 | return [] 217 | 218 | cmd = ['stop'] 219 | 220 | if self.filename: 221 | cmd.append('--filename=' + self.filename) 222 | else: 223 | if not self.resource: 224 | self.module.fail_json(msg='resource required to stop without filename') 225 | 226 | cmd.append(self.resource) 227 | 228 | if self.name: 229 | cmd.append(self.name) 230 | 231 | if self.label: 232 | cmd.append('--selector=' + self.label) 233 | 234 | if self.all: 235 | cmd.append('--all') 236 | 237 | if self.force: 238 | cmd.append('--ignore-not-found') 239 | 240 | return self._execute(cmd) 241 | 242 | 243 | def main(): 244 | 245 | module = AnsibleModule( 246 | argument_spec=dict( 247 | name=dict(), 248 | filename=dict(), 249 | namespace=dict(), 250 | resource=dict(), 251 | label=dict(), 252 | server=dict(), 253 | force=dict(default=False, type='bool'), 254 | all=dict(default=False, type='bool'), 255 | log_level=dict(default=0, type='int'), 256 | state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']), 257 | ) 258 | ) 259 | 260 | changed = False 261 | 262 | manager = KubeManager(module) 263 | state = module.params.get('state') 264 | 265 | if state == 'present': 266 | result = manager.create() 267 | 268 | elif state == 'absent': 269 | result = manager.delete() 270 | 271 | elif state == 'reloaded': 272 | result = manager.replace() 273 | 274 | elif state == 'stopped': 275 | result = manager.stop() 276 | 277 | elif state == 'latest': 278 | if manager.exists(): 279 | manager.force = True 280 | result = manager.replace() 281 | else: 282 | result = manager.create(check=False) 283 | 284 | else: 285 | module.fail_json(msg='Unrecognized state %s.' % state) 286 | 287 | if result: 288 | changed = True 289 | module.exit_json(changed=changed, 290 | msg='success: %s' % (' '.join(result)) 291 | ) 292 | 293 | 294 | from ansible.module_utils.basic import * # noqa 295 | if __name__ == '__main__': 296 | main() 297 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kubeform", 3 | "version": "0.0.1", 4 | "description": "Automatic Kubernetes cluster provisioning", 5 | "private": true, 6 | "scripts": { 7 | "docs:clean": "rimraf _book", 8 | "docs:prepare": "gitbook install", 9 | "docs:build": "npm run docs:prepare && gitbook build -g Capgemini/kubeform", 10 | "docs:watch": "npm run docs:prepare && gitbook serve", 11 | "docs:publish": "npm run docs:clean && npm run docs:build && cd _book && git init && git commit --allow-empty -m 'update book' && git checkout -b gh-pages && touch .nojekyll && git add . && git commit -am 'update book' && git push git@github.com:Capgemini/kubeform gh-pages --force" 12 | }, 13 | "repository": { 14 | "type": "git", 15 | "url": "https://github.com/Capgemini/kubeform.git" 16 | }, 17 | "keywords": [ 18 | "kubernetes", 19 | "devops", 20 | "containers", 21 | "docker" 22 | ], 23 | "license": "MIT", 24 | "bugs": { 25 | "url": "https://github.com/Capgemini/kubeform/issues" 26 | }, 27 | "devDependencies": { 28 | "gitbook-cli": "^0.3.4", 29 | "gitbook-plugin-edit-link": "^1.4.2", 30 | "gitbook-plugin-github": "^1.1.0", 31 | "gitbook-plugin-prism": "^1.0.0", 32 | "rimraf": "^2.5.2" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /playbooks/coreos-bootstrap.yml: -------------------------------------------------------------------------------- 1 | - name: bootstrap coreos hosts 2 | hosts: all:!role=bastion 3 | gather_facts: False 4 | environment: "{{ proxy_env }}" 5 | roles: 6 | - { role: coreos_bootstrap, when: onpremise|bool } 7 | - coreos_timezone 8 | 9 | - name: Install pip packages 10 | hosts: all:!role=bastion 11 | gather_facts: False 12 | environment: "{{ proxy_env }}" 13 | tasks: 14 | - pip: 15 | name: docker-py 16 | version: 1.9.0 17 | - pip: 18 | name: pyyaml 19 | version: 3.11 20 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | ansible-lint==2.7.0 2 | -r requirements.txt 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==2.0.2.0 2 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | - src: wallies.coreos-timezone 2 | name: coreos_timezone 3 | version: v0.1.3 4 | 5 | - src: defunctzombie.coreos-bootstrap 6 | name: coreos_bootstrap 7 | -------------------------------------------------------------------------------- /roles/addons/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for addons 3 | 4 | # determines if we are on the first master instance. 5 | is_first_master: "{% if inventory_hostname == groups['masters'][0] %}true{% else %}false{% endif %}" 6 | 7 | # skydns 8 | dns_replicas: 1 9 | 10 | kubectl_bin: /opt/bin/kubectl 11 | 12 | # Heapster monitoring 13 | base_metrics_memory: "140Mi" 14 | metrics_memory_initial: 200 15 | metrics_memory_per_node: 4 16 | metrics_memory: "{{ metrics_memory_initial + (metrics_memory_per_node * (groups['all'] | length | int)) }}Mi" 17 | 18 | base_eventer_memory: "190Mi" 19 | eventer_memory_initial: 200 20 | eventer_memory_per_node: 500 21 | eventer_memory: "{{ eventer_memory_initial + (eventer_memory_per_node * (groups['all'] | length | int)) }}Ki" 22 | 23 | nanny_memory_per_node: 200 24 | nanny_memory: "{{ (200 * 1024 + (groups['all'] | length | int) * nanny_memory_per_node) }}Ki" 25 | 26 | base_metrics_cpu: "80m" 27 | metrics_cpu_per_node: 0.5 28 | metrics_cpu: "{{ (80 + (groups['all'] | length | int) * metrics_cpu_per_node)|string }}m" 29 | grains: "" 30 | -------------------------------------------------------------------------------- /roles/addons/files/dashboard-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kubernetes-dashboard-v1.4.0-beta2 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | version: v1.4.0-beta2 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: kubernetes-dashboard 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: kubernetes-dashboard 18 | version: v1.4.0-beta2 19 | kubernetes.io/cluster-service: "true" 20 | annotations: 21 | scheduler.alpha.kubernetes.io/critical-pod: '' 22 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 23 | spec: 24 | containers: 25 | - name: kubernetes-dashboard 26 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.0-beta2 27 | resources: 28 | # keep request = limit to keep this container in guaranteed class 29 | limits: 30 | cpu: 100m 31 | memory: 50Mi 32 | requests: 33 | cpu: 100m 34 | memory: 50Mi 35 | ports: 36 | - containerPort: 9090 37 | livenessProbe: 38 | httpGet: 39 | path: / 40 | port: 9090 41 | initialDelaySeconds: 30 42 | timeoutSeconds: 30 -------------------------------------------------------------------------------- /roles/addons/files/dashboard-svc.yaml: -------------------------------------------------------------------------------- 1 | # This file should be kept in sync with cluster/gce/coreos/kube-manifests/addons/dashboard/dashboard-service.yaml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kubernetes-dashboard 6 | namespace: kube-system 7 | labels: 8 | k8s-app: kubernetes-dashboard 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | selector: 12 | k8s-app: kubernetes-dashboard 13 | ports: 14 | - port: 80 15 | targetPort: 9090 -------------------------------------------------------------------------------- /roles/addons/files/es-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: elasticsearch-logging-v1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | version: v1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 4 12 | selector: 13 | k8s-app: elasticsearch-logging 14 | version: v1 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: elasticsearch-logging 19 | version: v1 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - image: gcr.io/google_containers/elasticsearch:1.9 24 | name: elasticsearch-logging 25 | resources: 26 | # keep request = limit to keep this container in guaranteed class 27 | limits: 28 | cpu: 300m 29 | requests: 30 | cpu: 300m 31 | ports: 32 | - containerPort: 9200 33 | name: db 34 | protocol: TCP 35 | - containerPort: 9300 36 | name: transport 37 | protocol: TCP 38 | volumeMounts: 39 | - name: es-persistent-storage 40 | mountPath: /data 41 | volumes: 42 | - name: es-persistent-storage 43 | emptyDir: {} 44 | -------------------------------------------------------------------------------- /roles/addons/files/es-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "Elasticsearch" 10 | spec: 11 | ports: 12 | - port: 9200 13 | protocol: TCP 14 | targetPort: db 15 | selector: 16 | k8s-app: elasticsearch-logging 17 | -------------------------------------------------------------------------------- /roles/addons/files/fluentd-logging-ds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: DaemonSet 4 | metadata: 5 | name: fluent-elasticsearch 6 | namespace: kube-system 7 | labels: 8 | k8s-app: fluentd-logging 9 | spec: 10 | template: 11 | metadata: 12 | name: fluentd-elasticsearch 13 | namespace: kube-system 14 | labels: 15 | k8s-app: fluentd-logging 16 | spec: 17 | containers: 18 | - name: fluentd-elasticsearch 19 | image: gcr.io/google_containers/fluentd-elasticsearch:1.17 20 | resources: 21 | limits: 22 | memory: 200Mi 23 | requests: 24 | cpu: 100m 25 | memory: 200Mi 26 | volumeMounts: 27 | - name: varlog 28 | mountPath: "/var/log" 29 | - name: varlibdockercontainers 30 | mountPath: "/var/lib/docker/containers" 31 | readOnly: true 32 | terminationGracePeriodSeconds: 30 33 | volumes: 34 | - name: varlog 35 | hostPath: 36 | path: "/var/log" 37 | - name: varlibdockercontainers 38 | hostPath: 39 | path: "/var/lib/docker/containers" 40 | -------------------------------------------------------------------------------- /roles/addons/files/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: monitoring-grafana 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | kubernetes.io/name: "Grafana" 9 | spec: 10 | # On production clusters, consider setting up auth for grafana, and 11 | # exposing Grafana either using a LoadBalancer or a public IP. 12 | # type: LoadBalancer 13 | ports: 14 | - port: 80 15 | targetPort: 3000 16 | selector: 17 | k8s-app: influxGrafana -------------------------------------------------------------------------------- /roles/addons/files/heapster-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: heapster 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | kubernetes.io/name: "Heapster" 9 | spec: 10 | ports: 11 | - port: 80 12 | targetPort: 8082 13 | selector: 14 | k8s-app: heapster -------------------------------------------------------------------------------- /roles/addons/files/influxdb-grafana-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: monitoring-influxdb-grafana-v3 5 | namespace: kube-system 6 | labels: 7 | k8s-app: influxGrafana 8 | version: v3 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: influxGrafana 14 | version: v3 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: influxGrafana 19 | version: v3 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - image: gcr.io/google_containers/heapster_influxdb:v0.5 24 | name: influxdb 25 | resources: 26 | # keep request = limit to keep this container in guaranteed class 27 | limits: 28 | cpu: 100m 29 | memory: 500Mi 30 | requests: 31 | cpu: 100m 32 | memory: 500Mi 33 | ports: 34 | - containerPort: 8083 35 | - containerPort: 8086 36 | volumeMounts: 37 | - name: influxdb-persistent-storage 38 | mountPath: /data 39 | - image: gcr.io/google_containers/heapster_grafana:v2.6.0-2 40 | name: grafana 41 | env: 42 | resources: 43 | # keep request = limit to keep this container in guaranteed class 44 | limits: 45 | cpu: 100m 46 | memory: 100Mi 47 | requests: 48 | cpu: 100m 49 | memory: 100Mi 50 | env: 51 | # This variable is required to setup templates in Grafana. 52 | - name: INFLUXDB_SERVICE_URL 53 | value: http://monitoring-influxdb:8086 54 | # The following env variables are required to make Grafana accessible via 55 | # the kubernetes api-server proxy. On production clusters, we recommend 56 | # removing these env variables, setup auth for grafana, and expose the grafana 57 | # service using a LoadBalancer or a public IP. 58 | - name: GF_AUTH_BASIC_ENABLED 59 | value: "false" 60 | - name: GF_AUTH_ANONYMOUS_ENABLED 61 | value: "true" 62 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 63 | value: Admin 64 | - name: GF_SERVER_ROOT_URL 65 | value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ 66 | volumeMounts: 67 | - name: grafana-persistent-storage 68 | mountPath: /var 69 | volumes: 70 | - name: influxdb-persistent-storage 71 | emptyDir: {} 72 | - name: grafana-persistent-storage 73 | emptyDir: {} -------------------------------------------------------------------------------- /roles/addons/files/influxdb-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: monitoring-influxdb 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | kubernetes.io/name: "InfluxDB" 9 | spec: 10 | ports: 11 | - name: http 12 | port: 8083 13 | targetPort: 8083 14 | - name: api 15 | port: 8086 16 | targetPort: 8086 17 | selector: 18 | k8s-app: influxGrafana 19 | -------------------------------------------------------------------------------- /roles/addons/files/kibana-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: kibana-logging-v1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | version: v1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | replicas: 1 12 | selector: 13 | k8s-app: kibana-logging 14 | version: v1 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kibana-logging 19 | version: v1 20 | kubernetes.io/cluster-service: "true" 21 | spec: 22 | containers: 23 | - name: kibana-logging 24 | image: gcr.io/google_containers/kibana:1.3 25 | resources: 26 | # keep request = limit to keep this container in guaranteed class 27 | limits: 28 | cpu: 100m 29 | requests: 30 | cpu: 100m 31 | env: 32 | - name: "ELASTICSEARCH_URL" 33 | value: "http://elasticsearch-logging:9200" 34 | ports: 35 | - containerPort: 5601 36 | name: ui 37 | protocol: TCP 38 | -------------------------------------------------------------------------------- /roles/addons/files/kibana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "Kibana" 10 | spec: 11 | ports: 12 | - port: 5601 13 | protocol: TCP 14 | targetPort: ui 15 | selector: 16 | k8s-app: kibana-logging 17 | 18 | -------------------------------------------------------------------------------- /roles/addons/files/kube-system.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kube-system 5 | -------------------------------------------------------------------------------- /roles/addons/files/traefik-ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | # We run this at the minute on the hostnetwork 2 | # as a workaround for 3 | # https://github.com/kubernetes/kubernetes/issues/23920 4 | # https://github.com/kubernetes/kubernetes/issues/31307 5 | # https://github.com/containernetworking/cni/issues/46 6 | apiVersion: extensions/v1beta1 7 | kind: DaemonSet 8 | metadata: 9 | name: traefik-ingress-controller-v1 10 | namespace: kube-system 11 | labels: 12 | k8s-app: traefik-ingress-lb 13 | kubernetes.io/cluster-service: "true" 14 | spec: 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: traefik-ingress-lb 19 | name: traefik-ingress-lb 20 | spec: 21 | hostNetwork: true 22 | terminationGracePeriodSeconds: 60 23 | containers: 24 | - image: containous/traefik 25 | name: traefik-ingress-lb 26 | imagePullPolicy: Always 27 | # ports: 28 | # - containerPort: 80 29 | # hostPort: 80 30 | # - containerPort: 443 31 | # hostPort: 443 32 | # - containerPort: 8080 33 | # hostPort: 8080 34 | volumeMounts: 35 | - mountPath: /etc/traefik 36 | name: traefik-volume 37 | readOnly: false 38 | args: 39 | - --web 40 | - --kubernetes 41 | - --configFile=/etc/traefik/traefik.toml 42 | - --logLevel=DEBUG 43 | volumes: 44 | - hostPath: 45 | path: /etc/traefik 46 | name: traefik-volume 47 | nodeSelector: 48 | role: edge-router 49 | -------------------------------------------------------------------------------- /roles/addons/files/traefik.toml: -------------------------------------------------------------------------------- 1 | #defaultEntryPoints = ["http", "https"] 2 | defaultEntryPoints = ["http"] 3 | [entryPoints] 4 | [entryPoints.http] 5 | address = ":80" 6 | 7 | # [entryPoints.https] 8 | # address = ":443" 9 | # [entryPoints.https.tls] 10 | 11 | # Uncomment this for letsencrypt configuration 12 | # See https://docs.traefik.io/toml/#acme-lets-encrypt-configuration for more info 13 | 14 | # [acme] 15 | # email = "test@test.com" 16 | 17 | # storageFile = "/etc/traefik/acme.json" 18 | 19 | # entryPoint = "https" 20 | 21 | # onDemand = true 22 | 23 | # caServer = "https://acme-staging.api.letsencrypt.org/directory" 24 | 25 | # [[acme.domains]] 26 | # main = "your-domain" 27 | -------------------------------------------------------------------------------- /roles/addons/tasks/dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # create kubernetes-dashboard 3 | - name: create dashboard deployment file 4 | become: yes 5 | copy: 6 | src: dashboard-deployment.yaml 7 | dest: "{{ kubernetes_addons_dir }}/dashboard-deployment.yaml" 8 | tags: 9 | - addons 10 | - dashboard 11 | 12 | - name: create dashboard service file 13 | become: yes 14 | copy: 15 | src: dashboard-svc.yaml 16 | dest: "{{ kubernetes_addons_dir }}/dashboard-svc.yaml" 17 | tags: 18 | - addons 19 | - dashboard 20 | 21 | -------------------------------------------------------------------------------- /roles/addons/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | # Fluentd 2 | - name: create fluentd-logging-ds file 3 | become: yes 4 | copy: 5 | src: fluentd-logging-ds.yaml 6 | dest: "{{ kubernetes_addons_dir }}/fluentd-logging-ds.yaml" 7 | tags: 8 | - addons 9 | - logging 10 | 11 | # Elasticsearch 12 | - name: create es-controller file 13 | become: yes 14 | copy: 15 | src: es-controller.yaml 16 | dest: "{{ kubernetes_addons_dir }}/es-controller.yaml" 17 | tags: 18 | - addons 19 | - logging 20 | 21 | - name: create es-service file 22 | become: yes 23 | copy: 24 | src: es-service.yaml 25 | dest: "{{ kubernetes_addons_dir }}/es-service.yaml" 26 | tags: 27 | - addons 28 | - logging 29 | 30 | # Kibana 31 | - name: create kibana-controller file 32 | become: yes 33 | copy: 34 | src: kibana-controller.yaml 35 | dest: "{{ kubernetes_addons_dir }}/kibana-controller.yaml" 36 | tags: 37 | - addons 38 | - logging 39 | 40 | - name: create kibana-service file 41 | become: yes 42 | copy: 43 | src: kibana-service.yaml 44 | dest: "{{ kubernetes_addons_dir }}/kibana-service.yaml" 45 | tags: 46 | - addons 47 | - logging 48 | -------------------------------------------------------------------------------- /roles/addons/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: skydns.yml 3 | when: is_first_master|bool 4 | 5 | - include: dashboard.yml 6 | when: enable_dashboard and is_first_master|bool 7 | 8 | - include: "{{ ingress_controller }}.yml" 9 | when: is_first_master|bool 10 | 11 | - include: logging.yml 12 | when: enable_logging and is_first_master|bool 13 | 14 | - include: monitoring.yml 15 | when: enable_monitoring and is_first_master|bool 16 | 17 | #- include: registry.yml 18 | # when: setup_docker_registry and is_first_master|bool 19 | 20 | - name: Create addons 21 | run_once: true 22 | become: yes 23 | kube: 24 | filename: "{{ kubernetes_addons_dir }}" 25 | state: present 26 | run_once: true 27 | tags: 28 | - addons 29 | - kubernetes 30 | - create-addons 31 | -------------------------------------------------------------------------------- /roles/addons/tasks/monitoring.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # heapster 4 | - name: create heapster-controller file 5 | become: yes 6 | template: 7 | src: heapster-controller.yaml.j2 8 | dest: "{{ kubernetes_addons_dir }}/heapster-controller.yaml" 9 | tags: 10 | - addons 11 | - monitoring 12 | 13 | - name: create heapster-service file 14 | become: yes 15 | copy: 16 | src: heapster-service.yaml 17 | dest: "{{ kubernetes_addons_dir }}/heapster-service.yaml" 18 | tags: 19 | - addons 20 | - monitoring 21 | 22 | # influxdb 23 | - name: create influxdb-grafana-controller file 24 | become: yes 25 | copy: 26 | src: influxdb-grafana-controller.yaml 27 | dest: "{{ kubernetes_addons_dir }}/influxdb-grafana-controller.yaml" 28 | tags: 29 | - addons 30 | - monitoring 31 | 32 | - name: create influxdb-service file 33 | become: yes 34 | copy: 35 | src: influxdb-service.yaml 36 | dest: "{{ kubernetes_addons_dir }}/influxdb-service.yaml" 37 | tags: 38 | - addons 39 | - monitoring 40 | 41 | # Grafana 42 | - name: create grafana-service file 43 | become: yes 44 | copy: 45 | src: grafana-service.yaml 46 | dest: "{{ kubernetes_addons_dir }}/grafana-service.yaml" 47 | tags: 48 | - addons 49 | - monitoring -------------------------------------------------------------------------------- /roles/addons/tasks/registry.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # create private Docker registry 3 | -------------------------------------------------------------------------------- /roles/addons/tasks/skydns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # SkyDNS service addon 3 | - name: create SkyDNS pod file 4 | become: yes 5 | template: 6 | src: skydns-rc.yaml.j2 7 | dest: "{{ kubernetes_addons_dir }}/skydns-rc.yaml" 8 | tags: 9 | - addons 10 | - skydns 11 | 12 | - name: create SkyDNS service file 13 | become: yes 14 | template: 15 | src: skydns-svc.yaml.j2 16 | dest: "{{ kubernetes_addons_dir }}/skydns-svc.yaml" 17 | tags: 18 | - addons 19 | - skydns 20 | 21 | -------------------------------------------------------------------------------- /roles/addons/tasks/traefik.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Traefik ingress controller as a daemonset 3 | - name: create traefik-ingress-controller file 4 | become: yes 5 | copy: 6 | src: traefik-ingress-controller.yaml 7 | dest: "{{ kubernetes_addons_dir }}/traefik-ingress-controller.yaml" 8 | tags: 9 | - addons 10 | - traefik-ingress-controller 11 | 12 | - name: create traefik config dir 13 | file: 14 | path: /etc/traefik 15 | state: directory 16 | mode: 0755 17 | delegate_to: "{{item}}" 18 | delegate_facts: True 19 | with_items: "{{groups['edge-routers']}}" 20 | become: yes 21 | tags: 22 | - addons 23 | - traefik-ingress-controller 24 | 25 | - name: create traefik config toml file 26 | become: yes 27 | copy: 28 | src: traefik.toml 29 | dest: /etc/traefik/traefik.toml 30 | delegate_to: "{{item}}" 31 | delegate_facts: True 32 | with_items: "{{groups['edge-routers']}}" 33 | tags: 34 | - addons 35 | - traefik-ingress-controller 36 | -------------------------------------------------------------------------------- /roles/addons/templates/heapster-controller.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: heapster-v1.1.0 5 | namespace: kube-system 6 | labels: 7 | k8s-app: heapster 8 | kubernetes.io/cluster-service: "true" 9 | version: v1.1.0 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | k8s-app: heapster 15 | version: v1.1.0 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: heapster 20 | version: v1.1.0 21 | spec: 22 | containers: 23 | - image: gcr.io/google_containers/heapster:v1.1.0 24 | name: heapster 25 | resources: 26 | # keep request = limit to keep this container in guaranteed class 27 | limits: 28 | cpu: {{ metrics_cpu }} 29 | memory: {{ metrics_memory }} 30 | requests: 31 | cpu: {{ metrics_cpu }} 32 | memory: {{ metrics_memory }} 33 | command: 34 | - /heapster 35 | - --source=kubernetes.summary_api:'' 36 | - --sink=influxdb:http://monitoring-influxdb:8086 37 | - image: gcr.io/google_containers/heapster:v1.1.0 38 | name: eventer 39 | resources: 40 | # keep request = limit to keep this container in guaranteed class 41 | limits: 42 | cpu: 100m 43 | memory: {{ eventer_memory }} 44 | requests: 45 | cpu: 100m 46 | memory: {{ eventer_memory }} 47 | command: 48 | - /eventer 49 | - --source=kubernetes:'' 50 | - --sink=influxdb:http://monitoring-influxdb:8086 51 | - image: gcr.io/google_containers/addon-resizer:1.3 52 | name: heapster-nanny 53 | resources: 54 | limits: 55 | cpu: 50m 56 | memory: {{ nanny_memory }} 57 | requests: 58 | cpu: 50m 59 | memory: {{ nanny_memory }} 60 | env: 61 | - name: MY_POD_NAME 62 | valueFrom: 63 | fieldRef: 64 | fieldPath: metadata.name 65 | - name: MY_POD_NAMESPACE 66 | valueFrom: 67 | fieldRef: 68 | fieldPath: metadata.namespace 69 | command: 70 | - /pod_nanny 71 | - --cpu={{ base_metrics_cpu }} 72 | - --extra-cpu={{ metrics_cpu_per_node }}m 73 | - --memory={{ base_metrics_memory }} 74 | - --extra-memory={{ metrics_memory_per_node }}Mi 75 | - --threshold=5 76 | - --deployment=heapster-v1.1.0 77 | - --container=heapster 78 | - --poll-period=300000 79 | - --estimator=exponential 80 | - image: gcr.io/google_containers/addon-resizer:1.3 81 | name: eventer-nanny 82 | resources: 83 | limits: 84 | cpu: 50m 85 | memory: {{ nanny_memory }} 86 | requests: 87 | cpu: 50m 88 | memory: {{ nanny_memory }} 89 | env: 90 | - name: MY_POD_NAME 91 | valueFrom: 92 | fieldRef: 93 | fieldPath: metadata.name 94 | - name: MY_POD_NAMESPACE 95 | valueFrom: 96 | fieldRef: 97 | fieldPath: metadata.namespace 98 | command: 99 | - /pod_nanny 100 | - --cpu=100m 101 | - --extra-cpu=0m 102 | - --memory={{ base_eventer_memory }} 103 | - --extra-memory={{ eventer_memory_per_node }}Ki 104 | - --threshold=5 105 | - --deployment=heapster-v1.1.0 106 | - --container=eventer 107 | - --poll-period=300000 108 | - --estimator=exponential 109 | -------------------------------------------------------------------------------- /roles/addons/templates/skydns-rc.yaml.j2: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # This file should be kept in sync with cluster/images/hyperkube/dns-rc.yaml 16 | 17 | # TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.* 18 | 19 | # Warning: This is a file generated from the base underscore template file: skydns-rc.yaml.base 20 | 21 | apiVersion: v1 22 | kind: ReplicationController 23 | metadata: 24 | name: kube-dns-v19 25 | namespace: kube-system 26 | labels: 27 | k8s-app: kube-dns 28 | version: v19 29 | kubernetes.io/cluster-service: "true" 30 | spec: 31 | replicas: {{ dns_replicas }} 32 | selector: 33 | k8s-app: kube-dns 34 | version: v19 35 | template: 36 | metadata: 37 | labels: 38 | k8s-app: kube-dns 39 | version: v19 40 | kubernetes.io/cluster-service: "true" 41 | annotations: 42 | scheduler.alpha.kubernetes.io/critical-pod: '' 43 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 44 | spec: 45 | containers: 46 | - name: kubedns 47 | image: gcr.io/google_containers/kubedns-amd64:1.7 48 | resources: 49 | # TODO: Set memory limits when we've profiled the container for large 50 | # clusters, then set request = limit to keep this container in 51 | # guaranteed class. Currently, this container falls into the 52 | # "burstable" category so the kubelet doesn't backoff from restarting it. 53 | limits: 54 | cpu: 100m 55 | memory: 170Mi 56 | requests: 57 | cpu: 100m 58 | memory: 70Mi 59 | livenessProbe: 60 | httpGet: 61 | path: /healthz 62 | port: 8080 63 | scheme: HTTP 64 | initialDelaySeconds: 60 65 | timeoutSeconds: 5 66 | successThreshold: 1 67 | failureThreshold: 5 68 | readinessProbe: 69 | httpGet: 70 | path: /readiness 71 | port: 8081 72 | scheme: HTTP 73 | # we poll on pod startup for the Kubernetes master service and 74 | # only setup the /readiness HTTP server once that's available. 75 | initialDelaySeconds: 30 76 | timeoutSeconds: 5 77 | args: 78 | # command = "/kube-dns" 79 | - --domain={{ dns_domain }}. 80 | - --dns-port=10053 81 | ports: 82 | - containerPort: 10053 83 | name: dns-local 84 | protocol: UDP 85 | - containerPort: 10053 86 | name: dns-tcp-local 87 | protocol: TCP 88 | - name: dnsmasq 89 | image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3 90 | args: 91 | - --cache-size=1000 92 | - --no-resolv 93 | - --server=127.0.0.1#10053 94 | ports: 95 | - containerPort: 53 96 | name: dns 97 | protocol: UDP 98 | - containerPort: 53 99 | name: dns-tcp 100 | protocol: TCP 101 | - name: healthz 102 | image: gcr.io/google_containers/exechealthz-amd64:1.1 103 | resources: 104 | # keep request = limit to keep this container in guaranteed class 105 | limits: 106 | cpu: 10m 107 | memory: 50Mi 108 | requests: 109 | cpu: 10m 110 | # Note that this container shouldn't really need 50Mi of memory. The 111 | # limits are set higher than expected pending investigation on #29688. 112 | # The extra memory was stolen from the kubedns container to keep the 113 | # net memory requested by the pod constant. 114 | memory: 50Mi 115 | args: 116 | - -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1 >/dev/null && nslookup kubernetes.default.svc.{{ dns_domain }} 127.0.0.1:10053 >/dev/null 117 | - -port=8080 118 | - -quiet 119 | ports: 120 | - containerPort: 8080 121 | protocol: TCP 122 | dnsPolicy: Default # Don't use cluster DNS. -------------------------------------------------------------------------------- /roles/addons/templates/skydns-svc.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeDNS" 10 | spec: 11 | selector: 12 | k8s-app: kube-dns 13 | clusterIP: {{ kubernetes_dns_service_ip }} 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | -------------------------------------------------------------------------------- /roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for docker 3 | docker_tmp_dir: /var/lib/docker/tmp 4 | docker_dns_config: '' 5 | docker_storage_config: --storage-driver=overlay 6 | docker_endpoints: "-H=tcp://0.0.0.0:2376 -H=unix:///var/run/docker.sock" 7 | docker_bridge_ip: '' 8 | docker_proxy_exceptions: '' 9 | docker_registry: '' 10 | private_docker_registry: false 11 | docker_content_trust: 1 12 | docker_tls: "--tlsverify --tlscacert=/etc/docker/ca.pem --tlscert=/etc/docker/server.pem --tlskey=/etc/docker/server-key.pem" 13 | docker_customisations: "{{ docker_endpoints }} {{ docker_tls }} {{ docker_bridge_ip }} {{ docker_storage_config }} {{ docker_dns_config }} {{ docker_registry }}" 14 | 15 | 16 | -------------------------------------------------------------------------------- /roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for docker 3 | - name: restart docker 4 | become: yes 5 | service: 6 | name: docker 7 | state: restarted 8 | -------------------------------------------------------------------------------- /roles/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Graham Taylor 4 | description: 5 | company: Capgemini 6 | license: license (MIT) 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: CoreOS 10 | categories: 11 | - cloud 12 | - system 13 | dependencies: 14 | - role: handlers 15 | 16 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for docker 3 | - name: ensure docker config dir exists 4 | become: yes 5 | file: 6 | path: /root/.docker 7 | state: directory 8 | tags: 9 | - docker 10 | 11 | - name: setup private docker registry credentials 12 | become: yes 13 | when: private_docker_registry|bool 14 | template: 15 | src: config.json.j2 16 | dest: /root/.docker/config.json 17 | tags: 18 | - docker 19 | 20 | - name: deploy docker service 21 | become: yes 22 | become_user: root 23 | template: 24 | src: docker.service.j2 25 | dest: /etc/systemd/system/docker.service 26 | notify: 27 | - reload systemd 28 | - restart docker 29 | tags: 30 | - docker 31 | 32 | - name: ensure docker is running (and enable it at boot) 33 | become: yes 34 | service: 35 | name: docker 36 | state: started 37 | enabled: yes 38 | tags: 39 | - docker 40 | -------------------------------------------------------------------------------- /roles/docker/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "auths": { 3 | "{{ docker_registry_url }}": { 4 | "auth": "{{ docker_registry_auth }}", 5 | "email": "{{ docker_registry_email }}" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /roles/docker/templates/docker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Application Container Engine 3 | Documentation=http://docs.docker.com 4 | After=docker.socket early-docker.target network.target 5 | Requires=docker.socket early-docker.target 6 | 7 | [Service] 8 | # the default is not to use systemd for cgroups because the delegate issues still 9 | # exists and systemd currently does not support the cgroup feature set required 10 | # for containers run by docker 11 | EnvironmentFile=-/run/flannel_docker_opts.env 12 | {% if http_proxy is defined and http_proxy != '' %} 13 | Environment="HTTP_PROXY={{ http_proxy }}" 14 | Environment="NO_PROXY={{ docker_proxy_exceptions }}" 15 | {% endif %} 16 | Environment="DOCKER_TMPDIR={{ docker_tmp_dir }}" 17 | Environment="DOCKER_CONTENT_TRUST={{ docker_content_trust }}" 18 | 19 | MountFlags=slave 20 | LimitNOFILE=1048576 21 | LimitNPROC=1048576 22 | LimitCORE=infinity 23 | ExecStart=/usr/lib/coreos/dockerd daemon {{ docker_customisations }} --host=fd:// $DOCKER_OPTS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ 24 | TimeoutStartSec=0 25 | # set delegate yes so that systemd does not reset the cgroups of docker containers 26 | Delegate=yes 27 | 28 | [Install] 29 | WantedBy=multi-user.target 30 | -------------------------------------------------------------------------------- /roles/handlers/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: reload systemd 2 | become: yes 3 | command: systemctl daemon-reload 4 | -------------------------------------------------------------------------------- /roles/helm/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2015 Capgemini 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /roles/helm/README.md: -------------------------------------------------------------------------------- 1 | helm 2 | ========= 3 | 4 | Set up [helm](https://github.com/helm/helm) 5 | 6 | Requirements 7 | ------------ 8 | 9 | 10 | 11 | Role Variables 12 | -------------- 13 | ``` 14 | helm_url: "https://bintray.com/artifact/download/deis/helm/helm-0.5.0%2B1689ee4-linux-amd64.zip" 15 | helm_folder: "/opt/bin" 16 | helm_deis_enabled: false 17 | helm_packages_list: 18 | - { name: deis, repo: deis/workflow } 19 | ``` 20 | 21 | -------------------------------------------------------------------------------- /roles/helm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | helm_url: "https://bintray.com/artifact/download/deis/helm/helm-0.5.0%2B1689ee4-linux-amd64.zip" 2 | helm_folder: "/opt/bin" 3 | helm_deis_enabled: false 4 | helm_packages_list: 5 | - { name: deis, repo: deis/workflow, namespace: deis } 6 | -------------------------------------------------------------------------------- /roles/helm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/helm/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Alberto García Lamela 4 | description: 5 | company: Capgemini 6 | license: license (MIT) 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: CoreOS 10 | versions: 11 | categories: 12 | - cloud 13 | - system 14 | dependencies: 15 | - role: handlers 16 | -------------------------------------------------------------------------------- /roles/helm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: download and install helm 3 | become: yes 4 | become_user: root 5 | unarchive: 6 | src: "{{ helm_url }}" 7 | dest: "{{ helm_folder }}" 8 | copy: no 9 | environment: "{{ proxy_env }}" 10 | tags: 11 | - helm 12 | 13 | - name: install helm packages 14 | become: yes 15 | run_once: true 16 | when: "helm_{{ item.name }}_enabled | bool" 17 | command: "helm install {{ item.repo }} --namespace {{ item.namespace }}" 18 | environment: "{{ proxy_env }}" 19 | with_items: 20 | - "{{ helm_packages_list }}" 21 | tags: 22 | - helm 23 | 24 | - name: uninstall helm packages 25 | become: yes 26 | run_once: true 27 | when: "not helm_{{ item.name }}_enabled | bool" 28 | command: "helm uninstall {{ item.repo }} --namespace {{ item.namespace }}" 29 | environment: "{{ proxy_env }}" 30 | with_items: 31 | - "{{ helm_packages_list }}" 32 | tags: 33 | - helm 34 | -------------------------------------------------------------------------------- /roles/kube-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kube-master 3 | kube_podmaster_version: 1.1 4 | kube_etcd_servers: "{% for host in groups[etcd_servers_group] %}http://{{ hostvars[host].ansible_default_ipv4.address }}:4001{% if not loop.last %},{% endif %}{% endfor %}" 5 | kube_apiserver_bind_address: "{% if hostvars[inventory_hostname].private is defined %}{{ hostvars[inventory_hostname].private.ip }}{% else %}{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}{% endif %}" 6 | kube_master: "{% if groups[api_servers_group]|length > 1 and kube_apiserver_vip is defined %}https://{{ kube_apiserver_vip }}{% else %}http://127.0.0.1:8080{% endif %}" 7 | kube_apiserver_count: "{{ groups[api_servers_group]|length }}" 8 | 9 | # Service account users 10 | kube_users: 11 | kube: 12 | pass: changeme 13 | role: admin 14 | root: 15 | pass: changeme 16 | role: admin 17 | -------------------------------------------------------------------------------- /roles/kube-master/files/calico-system.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: calico-system 5 | -------------------------------------------------------------------------------- /roles/kube-master/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for kube-master 3 | - name: start kubelet 4 | become: yes 5 | service: 6 | name: kubelet 7 | state: started 8 | 9 | - name: restart kubelet 10 | become: yes 11 | service: 12 | name: kubelet 13 | state: restarted 14 | 15 | - name: restart calico-node 16 | become: yes 17 | service: 18 | name: calico-node 19 | state: restarted 20 | -------------------------------------------------------------------------------- /roles/kube-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Graham Taylor 4 | description: 5 | company: Capgemini 6 | license: license (MIT) 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: CoreOS 10 | categories: 11 | - cloud 12 | - system 13 | dependencies: 14 | - handlers 15 | -------------------------------------------------------------------------------- /roles/kube-master/tasks/calico.yml: -------------------------------------------------------------------------------- 1 | - name: deploy calico-node service 2 | become: yes 3 | template: 4 | src: calico-node.service.j2 5 | dest: /etc/systemd/system/calico-node.service 6 | owner: root 7 | group: root 8 | mode: 0644 9 | notify: 10 | - reload systemd 11 | - restart calico-node 12 | tags: 13 | - kube-master 14 | 15 | - name: ensure calico-node is running 16 | become: yes 17 | service: 18 | name: calico-node 19 | state: started 20 | enabled: yes 21 | tags: 22 | - kube-master 23 | 24 | - name: create calico-system namespace file 25 | become: yes 26 | copy: 27 | src: calico-system.yaml 28 | dest: "{{ kubernetes_addons_dir }}/calico-system.yaml" 29 | tags: 30 | - kube-master 31 | - kubernetes 32 | -------------------------------------------------------------------------------- /roles/kube-master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # provider is set in the inventory 2 | - include_vars: "{{ item }}" 3 | with_first_found: 4 | - "{{ provider }}.yml" 5 | - "default.yml" 6 | 7 | - name: create kubernetes dirs 8 | file: 9 | path: "{{ item }}" 10 | state: directory 11 | mode: 0755 12 | with_items: 13 | - "{{ kubernetes_config_dir }}" 14 | - "{{ kubernetes_addons_dir }}" 15 | - "{{ kubernetes_manifest_dir }}" 16 | - "{{ kubernetes_cni_dir }}" 17 | become: yes 18 | tags: 19 | - kube-master 20 | 21 | # kubelet-wrapper is only available on CoreOS 962.0.0+ so we make sure its here 22 | # to ensure backwards compatability. 23 | # See https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html 24 | - name: download kubelet-wrapper 25 | become: yes 26 | get_url: 27 | url: "{{ kubernetes_kubelet_wrapper }}" 28 | dest: /opt/bin/kubelet-wrapper 29 | mode: 0775 30 | tags: 31 | - kube-master 32 | 33 | - name: create service account users 34 | become: yes 35 | lineinfile: 36 | dest: "{{ kubernetes_users_dir }}/known_users.csv" 37 | create: yes 38 | line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' 39 | with_dict: "{{ kube_users }}" 40 | notify: 41 | - restart kubelet 42 | tags: 43 | - kube-master 44 | 45 | # master 46 | - name: deploy kubelet service 47 | become: yes 48 | template: 49 | src: kubelet.service.j2 50 | dest: /etc/systemd/system/kubelet.service 51 | owner: root 52 | group: root 53 | mode: 0644 54 | notify: 55 | - reload systemd 56 | - restart kubelet 57 | tags: 58 | - kube-master 59 | 60 | - name: ensure kubelet is running 61 | become: yes 62 | service: 63 | name: kubelet 64 | state: started 65 | enabled: yes 66 | tags: 67 | - kube-master 68 | 69 | - name: deploy kube-master manifests 70 | become: yes 71 | template: 72 | src: "{{ item.src }}" 73 | dest: "{{ item.dest }}" 74 | owner: root 75 | group: root 76 | mode: 0644 77 | with_items: 78 | - src: kube-proxy.yaml.j2 79 | dest: "{{ kubernetes_manifest_dir }}/kube-proxy.yaml" 80 | - src: kube-apiserver.yaml.j2 81 | dest: "{{ kubernetes_manifest_dir }}/kube-apiserver.yaml" 82 | - src: kube-controller-manager.yaml.j2 83 | dest: "{{ kubernetes_manifest_dir }}/kube-controller-manager.yaml" 84 | - src: kube-scheduler.yaml.j2 85 | dest: "{{ kubernetes_manifest_dir }}/kube-scheduler.yaml" 86 | - src: master-kubeconfig.j2 87 | dest: "{{ kubernetes_config_dir }}/master-kubeconfig" 88 | - src: policy-controller.yaml.j2 89 | dest: "{{ kubernetes_manifest_dir }}/policy-controller.yaml" 90 | - src: "10-{{ network_plugin }}.conf.j2" 91 | dest: "{{ kubernetes_cni_dir }}/10-{{ network_plugin }}.conf" 92 | notify: 93 | - reload systemd 94 | - restart kubelet 95 | - restart calico-node 96 | tags: 97 | - kube-master 98 | 99 | - name: wait for kube-apiserver up 100 | wait_for: 101 | port: 8080 102 | tags: 103 | - kube-master 104 | 105 | - include: calico.yml 106 | tags: 107 | - calico 108 | - kube-master 109 | -------------------------------------------------------------------------------- /roles/kube-master/templates/10-calico.conf.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name": "calico", 3 | "type": "flannel", 4 | "delegate": { 5 | "type": "calico", 6 | "etcd_endpoints": "{{ kube_etcd_servers }}", 7 | "log_level": "none", 8 | "log_level_stderr": "info", 9 | "hostname": "{{ ansible_default_ipv4.address }}", 10 | "policy": { 11 | "type": "k8s", 12 | "k8s_api_root": "http://127.0.0.1:8080/api/v1/" 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /roles/kube-master/templates/calico-node.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Calico per-host agent 3 | Requires=network-online.target 4 | After=network-online.target 5 | 6 | [Service] 7 | Slice=machine.slice 8 | KillMode=mixed 9 | Restart=always 10 | TimeoutStartSec=0 11 | Environment=CALICO_DISABLE_FILE_LOGGING=true 12 | Environment=HOSTNAME={{ ansible_default_ipv4.address }} 13 | Environment=IP={{ ansible_default_ipv4.address }} 14 | Environment=FELIX_FELIXHOSTNAME={{ ansible_default_ipv4.address }} 15 | Environment=CALICO_NETWORKING=false 16 | Environment=NO_DEFAULT_POOLS=true 17 | Environment=ETCD_ENDPOINTS={{ kube_etcd_servers }} 18 | ExecStart=/usr/bin/rkt run \ 19 | --inherit-env \ 20 | --stage1-from-dir=stage1-fly.aci \ 21 | --volume=modules,kind=host,source=/lib/modules,readOnly=false \ 22 | --mount=volume=modules,target=/lib/modules \ 23 | --trust-keys-from-https quay.io/calico/node:v0.20.0 24 | 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /roles/kube-master/templates/kube-apiserver.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-apiserver 10 | image: {{ kubernetes_image }}:{{ kubernetes_version }} 11 | command: 12 | - /hyperkube 13 | - apiserver 14 | - --allow-privileged=true 15 | - --apiserver-count={{ kube_apiserver_count }} 16 | - --bind-address={{ kube_apiserver_bind_address }} 17 | - --insecure-bind-address=0.0.0.0 18 | - --etcd-servers={{ kube_etcd_servers }} 19 | - --service-cluster-ip-range={{ kubernetes_service_ip_range }} 20 | - --secure-port=443 21 | - --insecure_port=8080 22 | - --advertise-address={{ ansible_default_ipv4.address }} 23 | - --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota 24 | - --tls-cert-file={{ kubernetes_cert_dir }}/master.pem 25 | - --tls-private-key-file={{ kubernetes_cert_dir }}/master-key.pem 26 | - --client-ca-file={{ kubernetes_cert_dir }}/ca.pem 27 | - --service-account-key-file={{ kubernetes_cert_dir }}/master-key.pem 28 | - --basic-auth-file={{ kubernetes_users_dir }}/known_users.csv 29 | - --runtime-config=extensions/v1beta1=true,extensions/v1beta1/networkpolicies=true 30 | - --v={{ kubernetes_log_level }} 31 | ports: 32 | - containerPort: 443 33 | hostPort: 443 34 | name: https 35 | - containerPort: 8080 36 | hostPort: 8080 37 | name: local 38 | volumeMounts: 39 | - mountPath: {{ kubernetes_users_dir }} 40 | name: basic-auth-kubernetes 41 | readOnly: true 42 | - mountPath: {{ kubernetes_cert_dir }} 43 | name: ssl-certs-kubernetes 44 | readOnly: true 45 | - mountPath: /etc/ssl/certs 46 | name: ssl-certs-host 47 | readOnly: true 48 | volumes: 49 | - hostPath: 50 | path: {{ kubernetes_users_dir }} 51 | name: basic-auth-kubernetes 52 | - hostPath: 53 | path: {{ kubernetes_cert_dir }} 54 | name: ssl-certs-kubernetes 55 | - hostPath: 56 | path: /usr/share/ca-certificates 57 | name: ssl-certs-host 58 | -------------------------------------------------------------------------------- /roles/kube-master/templates/kube-controller-manager.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-controller-manager 5 | namespace: kube-system 6 | spec: 7 | containers: 8 | - name: kube-controller-manager 9 | image: {{ kubernetes_image }}:{{ kubernetes_version }} 10 | command: 11 | - /hyperkube 12 | - controller-manager 13 | - --master=http://127.0.0.1:8080 14 | - --leader-elect=true 15 | - --service-account-private-key-file={{ kubernetes_cert_dir }}/master-key.pem 16 | - --root-ca-file={{ kubernetes_cert_dir }}/ca.pem 17 | - --v={{ kubernetes_log_level }} 18 | livenessProbe: 19 | httpGet: 20 | host: 127.0.0.1 21 | path: /healthz 22 | port: 10252 23 | initialDelaySeconds: 15 24 | timeoutSeconds: 1 25 | volumeMounts: 26 | - mountPath: {{ kubernetes_cert_dir }} 27 | name: ssl-certs-kubernetes 28 | readOnly: true 29 | - mountPath: /etc/ssl/certs 30 | name: ssl-certs-host 31 | readOnly: true 32 | hostNetwork: true 33 | volumes: 34 | - hostPath: 35 | path: {{ kubernetes_cert_dir }} 36 | name: ssl-certs-kubernetes 37 | - hostPath: 38 | path: /usr/share/ca-certificates 39 | name: ssl-certs-host 40 | -------------------------------------------------------------------------------- /roles/kube-master/templates/kube-proxy.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-proxy 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-proxy 10 | image: {{ kubernetes_image }}:{{ kubernetes_version }} 11 | command: 12 | - /hyperkube 13 | - proxy 14 | - --master={{ kube_master }} 15 | - --kubeconfig={{ kubernetes_config_dir }}/master-kubeconfig 16 | - --proxy-mode=iptables 17 | - --v={{ kubernetes_log_level }} 18 | securityContext: 19 | privileged: true 20 | volumeMounts: 21 | - mountPath: /etc/ssl/certs 22 | name: ssl-certs 23 | - mountPath: {{ kubernetes_config_dir }}/master-kubeconfig 24 | name: kubeconfig 25 | readOnly: true 26 | - mountPath: {{ kubernetes_cert_dir }} 27 | name: etc-kube-ssl 28 | readOnly: true 29 | volumes: 30 | - name: ssl-certs 31 | hostPath: 32 | path: /usr/share/ca-certificates 33 | - name: kubeconfig 34 | hostPath: 35 | path: "{{ kubernetes_config_dir }}/master-kubeconfig" 36 | - name: etc-kube-ssl 37 | hostPath: 38 | path: "{{ kubernetes_cert_dir }}" 39 | -------------------------------------------------------------------------------- /roles/kube-master/templates/kube-scheduler.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-scheduler 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-scheduler 10 | image: {{ kubernetes_image }}:{{ kubernetes_version }} 11 | command: 12 | - /hyperkube 13 | - scheduler 14 | - --leader-elect=true 15 | - --master=http://127.0.0.1:8080 16 | - --v={{ kubernetes_log_level }} 17 | livenessProbe: 18 | httpGet: 19 | host: 127.0.0.1 20 | path: /healthz 21 | port: 10251 22 | initialDelaySeconds: 15 23 | timeoutSeconds: 1 24 | -------------------------------------------------------------------------------- /roles/kube-master/templates/kubelet.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | Restart=always 7 | RestartSec=10 8 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests 9 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/cni/net.d 10 | EnvironmentFile=/etc/environment 11 | Environment="RKT_OPTS=--volume var-log,kind=host,source=/var/log --mount volume=var-log,target=/var/log" 12 | Environment=KUBELET_VERSION={{ kubernetes_version }} 13 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers 14 | ExecStart=/opt/bin/kubelet-wrapper \ 15 | --api-servers=http://127.0.0.1:8080 \ 16 | --network-plugin-dir=/etc/kubernetes/cni/net.d \ 17 | --network-plugin=cni \ 18 | --register-schedulable=false \ 19 | --allow-privileged=true \ 20 | --config=/etc/kubernetes/manifests \ 21 | --hostname-override=${COREOS_PRIVATE_IPV4} \ 22 | --cluster_dns={{ kubernetes_dns_service_ip }} \ 23 | --cluster_domain={{ dns_domain }} \ 24 | --v={{ kubernetes_log_level }}{% if cloud_provider is defined %} \ 25 | --cloud-provider={{ cloud_provider }} {% endif %} 26 | 27 | [Install] 28 | WantedBy=multi-user.target 29 | -------------------------------------------------------------------------------- /roles/kube-master/templates/master-kubeconfig.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: local 5 | cluster: 6 | certificate-authority: {{ kubernetes_cert_dir }}/ca.pem 7 | users: 8 | - name: kubelet 9 | user: 10 | client-certificate: {{ kubernetes_cert_dir }}/kubelet.pem 11 | client-key: {{ kubernetes_cert_dir }}/kubelet-key.pem 12 | contexts: 13 | - context: 14 | cluster: local 15 | user: kubelet 16 | name: kubelet-context 17 | current-context: kubelet-context 18 | -------------------------------------------------------------------------------- /roles/kube-master/templates/policy-controller.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: calico-policy-controller 5 | namespace: calico-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | # The Calico policy agent. 10 | - name: k8s-policy-controller 11 | image: calico/kube-policy-controller:v0.2.0 12 | env: 13 | - name: ETCD_ENDPOINTS 14 | value: "{{ kube_etcd_servers }}" 15 | - name: K8S_API 16 | value: "http://127.0.0.1:8080" 17 | - name: LEADER_ELECTION 18 | value: "true" 19 | # Leader election container used by the policy agent. 20 | - name: leader-elector 21 | image: quay.io/calico/leader-elector:v0.1.0 22 | imagePullPolicy: IfNotPresent 23 | args: 24 | - "--election=calico-policy-election" 25 | - "--election-namespace=calico-system" 26 | - "--http=127.0.0.1:4040" 27 | -------------------------------------------------------------------------------- /roles/kube-master/vars/aws.yml: -------------------------------------------------------------------------------- 1 | cloud_provider: aws 2 | -------------------------------------------------------------------------------- /roles/kube-master/vars/default.yml: -------------------------------------------------------------------------------- 1 | # default vars file for region/zone used by local/vagrant setups 2 | region: local 3 | zone: local 4 | -------------------------------------------------------------------------------- /roles/kube-master/vars/digitalocean.yml: -------------------------------------------------------------------------------- 1 | region: "{{ hostvars[inventory_hostname].region }}" 2 | zone: "{{ hostvars[inventory_hostname].region }}" 3 | -------------------------------------------------------------------------------- /roles/kube-worker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for kube-worker 3 | kube_api_servers: "{% for host in groups[api_servers_group] %}https://{{ hostvars[host].ansible_default_ipv4.address }}{% if not loop.last %},{% endif %}{% endfor %}" 4 | kube_master: "{% if groups[api_servers_group]|length > 1 and kube_apiserver_vip is defined %}https://{{ kube_apiserver_vip }}{% else %}https://{{ hostvars[groups[api_servers_group][0]].ansible_default_ipv4.address }}{% endif %}" 5 | kube_etcd_servers: "{% for host in groups[etcd_servers_group] %}http://{{ hostvars[host].ansible_default_ipv4.address }}:4001{% if not loop.last %},{% endif %}{% endfor %}" 6 | schedulable: true 7 | node_labels: "" 8 | -------------------------------------------------------------------------------- /roles/kube-worker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for kube-apiserver 3 | - name: start kube-apiserver 4 | become: yes 5 | service: 6 | name: apiserver 7 | state: started 8 | 9 | - name: restart kube-apiserver 10 | become: yes 11 | service: 12 | name: apiserver 13 | state: restarted 14 | -------------------------------------------------------------------------------- /roles/kube-worker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Graham Taylor 4 | description: 5 | company: Capgemini 6 | license: license (MIT) 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: CoreOS 10 | categories: 11 | - cloud 12 | - system 13 | dependencies: 14 | - handlers 15 | -------------------------------------------------------------------------------- /roles/kube-worker/tasks/calico.yml: -------------------------------------------------------------------------------- 1 | - name: deploy calico-node service 2 | become: yes 3 | template: 4 | src: calico-node.service.j2 5 | dest: /etc/systemd/system/calico-node.service 6 | notify: 7 | - reload systemd 8 | - restart calico-node 9 | tags: 10 | - kube-worker 11 | 12 | - name: ensure calico-node is running 13 | become: yes 14 | service: 15 | name: calico-node 16 | state: started 17 | enabled: yes 18 | tags: 19 | - kube-worker 20 | -------------------------------------------------------------------------------- /roles/kube-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # provider is set in the inventory 2 | - include_vars: "{{ item }}" 3 | with_first_found: 4 | - "{{ provider }}.yml" 5 | - "default.yml" 6 | 7 | - name: create kubernetes dirs 8 | file: 9 | path: "{{ item }}" 10 | state: directory 11 | mode: 0755 12 | with_items: 13 | - "{{ kubernetes_config_dir }}" 14 | - "{{ kubernetes_cni_dir }}" 15 | become: yes 16 | tags: 17 | - kube-worker 18 | 19 | # kubelet-wrapper is only available on CoreOS 962.0.0+ so we make sure its here 20 | # to ensure backwards compatability. 21 | # See https://coreos.com/kubernetes/docs/latest/kubelet-wrapper.html 22 | - name: download kubelet-wrapper 23 | become: yes 24 | get_url: 25 | url: "{{ kubernetes_kubelet_wrapper }}" 26 | dest: /opt/bin/kubelet-wrapper 27 | mode: 0775 28 | tags: 29 | - kube-worker 30 | 31 | # worker 32 | - name: deploy kubelet service 33 | become: yes 34 | template: 35 | src: kubelet.service.j2 36 | dest: /etc/systemd/system/kubelet.service 37 | notify: 38 | - reload systemd 39 | - restart kubelet 40 | tags: 41 | - kube-worker 42 | 43 | - name: ensure kubelet is running 44 | become: yes 45 | service: 46 | name: kubelet 47 | state: started 48 | enabled: yes 49 | tags: 50 | - kube-worker 51 | 52 | - name: deploy kube-worker manifests 53 | become: yes 54 | template: 55 | src: "{{ item.src }}" 56 | dest: "{{ item.dest }}" 57 | with_items: 58 | - src: worker-kubeconfig.j2 59 | dest: "{{ kubernetes_config_dir }}/worker-kubeconfig" 60 | - src: kube-proxy.yaml.j2 61 | dest: "{{ kubernetes_manifest_dir }}/kube-proxy.yaml" 62 | - src: "10-{{ network_plugin }}.conf.j2" 63 | dest: "{{ kubernetes_cni_dir }}/10-{{ network_plugin }}.conf" 64 | tags: 65 | - kube-worker 66 | 67 | - include: calico.yml 68 | tags: 69 | - calico 70 | - kube-worker -------------------------------------------------------------------------------- /roles/kube-worker/templates/10-calico.conf.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name": "calico", 3 | "type": "flannel", 4 | "delegate": { 5 | "type": "calico", 6 | "etcd_endpoints": "{{ kube_etcd_servers }}", 7 | "log_level": "none", 8 | "log_level_stderr": "info", 9 | "hostname": "{{ ansible_default_ipv4.address }}", 10 | "policy": { 11 | "type": "k8s", 12 | "k8s_api_root": "{{ kube_master }}:443/api/v1/", 13 | "k8s_client_key": "{{ kubernetes_cert_dir }}/kubelet-key.pem", 14 | "k8s_client_certificate": "{{ kubernetes_cert_dir }}/kubelet.pem" 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /roles/kube-worker/templates/calico-node.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Calico node for network policy 3 | Requires=network-online.target 4 | After=network-online.target 5 | 6 | [Service] 7 | Slice=machine.slice 8 | KillMode=mixed 9 | Restart=always 10 | TimeoutStartSec=0 11 | Environment=CALICO_DISABLE_FILE_LOGGING=true 12 | Environment=HOSTNAME={{ ansible_default_ipv4.address }} 13 | Environment=IP={{ ansible_default_ipv4.address }} 14 | Environment=FELIX_FELIXHOSTNAME={{ ansible_default_ipv4.address }} 15 | Environment=CALICO_NETWORKING=false 16 | Environment=NO_DEFAULT_POOLS=true 17 | Environment=ETCD_ENDPOINTS={{ kube_etcd_servers }} 18 | ExecStart=/usr/bin/rkt run \ 19 | --inherit-env \ 20 | --stage1-from-dir=stage1-fly.aci \ 21 | --volume=modules,kind=host,source=/lib/modules,readOnly=false \ 22 | --mount=volume=modules,target=/lib/modules \ 23 | --trust-keys-from-https quay.io/calico/node:v0.20.0 24 | 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /roles/kube-worker/templates/kube-proxy.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-proxy 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-proxy 10 | image: {{ kubernetes_image }}:{{ kubernetes_version }} 11 | command: 12 | - /hyperkube 13 | - proxy 14 | - --master={{ kube_master }} 15 | - --kubeconfig={{ kubernetes_config_dir }}/worker-kubeconfig 16 | - --proxy-mode=iptables 17 | - --v={{ kubernetes_log_level }} 18 | securityContext: 19 | privileged: true 20 | volumeMounts: 21 | - mountPath: /etc/ssl/certs 22 | name: ssl-certs 23 | - mountPath: {{ kubernetes_config_dir }}/worker-kubeconfig 24 | name: kubeconfig 25 | readOnly: true 26 | - mountPath: {{ kubernetes_cert_dir }} 27 | name: etc-kube-ssl 28 | readOnly: true 29 | volumes: 30 | - name: ssl-certs 31 | hostPath: 32 | path: /usr/share/ca-certificates 33 | - name: kubeconfig 34 | hostPath: 35 | path: "{{ kubernetes_config_dir }}/worker-kubeconfig" 36 | - name: etc-kube-ssl 37 | hostPath: 38 | path: "{{ kubernetes_cert_dir }}" 39 | -------------------------------------------------------------------------------- /roles/kube-worker/templates/kubelet.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | Restart=always 7 | RestartSec=10 8 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests 9 | EnvironmentFile=/etc/environment 10 | Environment="RKT_OPTS=--volume var-log,kind=host,source=/var/log --mount volume=var-log,target=/var/log" 11 | Environment=KUBELET_VERSION={{ kubernetes_version }} 12 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers 13 | ExecStart=/opt/bin/kubelet-wrapper \ 14 | --api-servers={{ kube_api_servers }} \ 15 | --network-plugin-dir=/etc/kubernetes/cni/net.d \ 16 | --network-plugin=cni \ 17 | --allow-privileged=true \ 18 | --register-schedulable={{ schedulable }} \ 19 | {% if node_labels != "" %} 20 | --node-labels="{{ node_labels }}" \ 21 | {% endif %} 22 | --config=/etc/kubernetes/manifests \ 23 | --hostname-override=${COREOS_PRIVATE_IPV4} \ 24 | --cluster_dns={{ kubernetes_dns_service_ip }} \ 25 | --cluster_domain={{ dns_domain }} \ 26 | --kubeconfig={{ kubernetes_config_dir }}/worker-kubeconfig \ 27 | --tls-cert-file={{ kubernetes_cert_dir }}/kubelet.pem \ 28 | --tls-private-key-file={{ kubernetes_cert_dir }}/kubelet-key.pem \ 29 | --v={{ kubernetes_log_level }}{% if cloud_provider is defined %} \ 30 | --cloud-provider={{ cloud_provider }} {% endif %} 31 | 32 | [Install] 33 | WantedBy=multi-user.target 34 | -------------------------------------------------------------------------------- /roles/kube-worker/templates/worker-kubeconfig.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: local 5 | cluster: 6 | certificate-authority: {{ kubernetes_cert_dir }}/ca.pem 7 | users: 8 | - name: kubelet 9 | user: 10 | client-certificate: {{ kubernetes_cert_dir }}/kubelet.pem 11 | client-key: {{ kubernetes_cert_dir }}/kubelet-key.pem 12 | contexts: 13 | - context: 14 | cluster: local 15 | user: kubelet 16 | name: kubelet-context 17 | current-context: kubelet-context 18 | -------------------------------------------------------------------------------- /roles/kube-worker/vars/aws.yml: -------------------------------------------------------------------------------- 1 | cloud_provider: aws 2 | -------------------------------------------------------------------------------- /roles/kube-worker/vars/default.yml: -------------------------------------------------------------------------------- 1 | # default vars file for region/zone used by local/vagrant setups 2 | region: local 3 | zone: local 4 | -------------------------------------------------------------------------------- /roles/kube-worker/vars/digitalocean.yml: -------------------------------------------------------------------------------- 1 | region: "{{ hostvars[inventory_hostname].region }}" 2 | zone: "{{ hostvars[inventory_hostname].region }}" 3 | -------------------------------------------------------------------------------- /roles/kubectl/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2015 Capgemini 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /roles/kubectl/README.md: -------------------------------------------------------------------------------- 1 | kubectl 2 | ========= 3 | 4 | Set up a [kubectl](http://kubernetes.io/docs/user-guide/kubectl-overview/) 5 | 6 | Requirements 7 | ------------ 8 | 9 | 10 | 11 | Role Variables 12 | -------------- 13 | ``` 14 | kubectl_version: 'v1.2.0' 15 | kubectl_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubectl_version }}/bin/linux/amd64/kubectl" 16 | kubectl_bin: '/opt/bin/kubectl' 17 | kubectl_certificate_authority: '/etc/kubernetes/ssl/ca.pem' 18 | kubectl_server: "https://{{ ssh_host }}" 19 | kubectl_cluster_name: 'default-cluster' 20 | kubectl_context_cluster: 'default-cluster' 21 | kubectl_context_user: 'default-cluster' 22 | kubectl_context_name: 'default-system' 23 | kubectl_context_current: 'default-system' 24 | kubectl_users_name: 'default-admin' 25 | kubectl_client_certificate: '/etc/kubernetes/ssl/master.pem' 26 | kubectl_client_key: '/etc/kubernetes/ssl/master-key.pem' 27 | 28 | 29 | ``` 30 | 31 | -------------------------------------------------------------------------------- /roles/kubectl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | kubectl_version: 'v1.2.4' 2 | kubectl_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubectl_version }}/bin/linux/amd64/kubectl" 3 | kubectl_bin: '/opt/bin/kubectl' 4 | kubectl_certificate_authority: '/etc/kubernetes/ssl/ca.pem' 5 | kubectl_server: "https://{% if hostvars[inventory_hostname].private is defined %}{{ hostvars[inventory_hostname].private.ip }}{% else %}{{ hostvars[inventory_hostname].ansible_default_ipv4.address }}{% endif %}" 6 | kubectl_cluster_name: 'default-cluster' 7 | kubectl_context_cluster: 'default-cluster' 8 | kubectl_context_user: 'default-admin' 9 | kubectl_context_name: 'default-system' 10 | kubectl_context_current: 'default-system' 11 | kubectl_users_name: 'default-admin' 12 | kubectl_client_certificate: '/etc/kubernetes/ssl/master.pem' 13 | kubectl_client_key: '/etc/kubernetes/ssl/master-key.pem' 14 | -------------------------------------------------------------------------------- /roles/kubectl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/kubectl/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Alberto García Lamela 4 | description: 5 | company: Capgemini 6 | license: license (MIT) 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: CoreOS 10 | versions: 11 | categories: 12 | - cloud 13 | - system 14 | dependencies: 15 | - role: handlers 16 | -------------------------------------------------------------------------------- /roles/kubectl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: download kubectl 3 | become: yes 4 | become_user: root 5 | get_url: 6 | url: "{{ kubectl_url }}" 7 | dest: "{{ kubectl_bin }}" 8 | mode: 0755 9 | validate_certs: no 10 | force: true 11 | environment: "{{ proxy_env }}" 12 | tags: 13 | - kubectl 14 | 15 | - name: Creates kubectl user directory 16 | become: yes 17 | become_user: root 18 | file: 19 | path: /root/.kube 20 | state: directory 21 | 22 | - name: deploy kubectl config 23 | become: yes 24 | become_user: root 25 | template: 26 | src: "{{ item.src }}" 27 | dest: "{{ item.dest }}" 28 | with_items: 29 | - src: config.j2 30 | dest: /root/.kube/config 31 | tags: 32 | - kubectl 33 | 34 | -------------------------------------------------------------------------------- /roles/kubectl/templates/config.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | certificate-authority: "{{ kubectl_certificate_authority }}" 5 | server: "{{ kubectl_server }}" 6 | name: "{{ kubectl_cluster_name }}" 7 | contexts: 8 | - context: 9 | cluster: "{{ kubectl_context_cluster }}" 10 | user: "{{ kubectl_context_user }}" 11 | name: "{{ kubectl_context_name }}" 12 | current-context: "{{ kubectl_context_current }}" 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: "{{ kubectl_users_name }}" 17 | user: 18 | client-certificate: "{{ kubectl_client_certificate }}" 19 | client-key: "{{ kubectl_client_key }}" 20 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all:!role=bastion 3 | gather_facts: false 4 | vars: 5 | ansible_python_interpreter: "PATH=/home/core/bin:$PATH python" 6 | tasks: 7 | - name: Wait for ssh port to become available from bastion server. 8 | wait_for: 9 | port: "{{ ansible_port }}" 10 | host: "{{ ansible_host }}" 11 | delay: 10 12 | timeout: 180 13 | delegate_to: "{{ bastion_ip }}" 14 | sudo: False 15 | when: bastion_ip is defined 16 | - name: Wait for port 22 to become available from local server. 17 | local_action: "wait_for port={{ ansible_port }} host={{ ansible_host }} delay=10 timeout=180" 18 | sudo: False 19 | when: bastion_ip is not defined 20 | 21 | - include: playbooks/coreos-bootstrap.yml 22 | 23 | - hosts: all 24 | roles: 25 | - docker 26 | 27 | - hosts: masters 28 | roles: 29 | - kubectl 30 | - kube-master 31 | - addons 32 | 33 | - hosts: workers 34 | roles: 35 | - kube-worker 36 | 37 | - hosts: edge-routers 38 | vars: 39 | schedulable: false 40 | node_labels: "role=edge-router" 41 | roles: 42 | - kube-worker 43 | 44 | - include: helm.yml 45 | -------------------------------------------------------------------------------- /terraform/aws/elb/main.tf: -------------------------------------------------------------------------------- 1 | variable "elb_name" { default = "kube-master" } 2 | variable "health_check_target" { default = "HTTP:8080/healthz" } 3 | variable "instances" { type = "list" } 4 | variable "subnets" { type = "list" } 5 | variable "security_groups" {} 6 | 7 | resource "aws_elb" "kube_master" { 8 | name = "${var.elb_name}" 9 | subnets = ["${var.subnets}"] 10 | security_groups = ["${var.security_groups}"] 11 | instances = ["${var.instances}"] 12 | cross_zone_load_balancing = true 13 | 14 | health_check { 15 | healthy_threshold = 2 16 | unhealthy_threshold = 2 17 | timeout = 3 18 | target = "${var.health_check_target}" 19 | interval = 30 20 | } 21 | 22 | listener { 23 | instance_port = 443 24 | instance_protocol = "tcp" 25 | lb_port = 443 26 | lb_protocol = "tcp" 27 | } 28 | 29 | listener { 30 | instance_port = 80 31 | instance_protocol = "tcp" 32 | lb_port = 80 33 | lb_protocol = "tcp" 34 | } 35 | 36 | listener { 37 | instance_port = 8080 38 | instance_protocol = "tcp" 39 | lb_port = 8080 40 | lb_protocol = "tcp" 41 | } 42 | 43 | tags { 44 | Name = "${var.elb_name}" 45 | } 46 | } 47 | 48 | # outputs 49 | output "elb_id" { value = "${aws_elb.kube_master.id}" } 50 | output "elb_name" { value = "${aws_elb.kube_master.name}" } 51 | output "elb_dns_name" { value = "${aws_elb.kube_master.dns_name}" } 52 | -------------------------------------------------------------------------------- /terraform/aws/iam/edge-router-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "s3:*", 7 | "Resource": [ 8 | "arn:aws:s3:::kubernetes-*" 9 | ] 10 | }, 11 | { 12 | "Effect": "Allow", 13 | "Action": "ec2:Describe*", 14 | "Resource": "*" 15 | }, 16 | { 17 | "Effect": "Allow", 18 | "Action": "ec2:AttachVolume", 19 | "Resource": "*" 20 | }, 21 | { 22 | "Effect": "Allow", 23 | "Action": "ec2:DetachVolume", 24 | "Resource": "*" 25 | }, 26 | { 27 | "Effect": "Allow", 28 | "Action": [ 29 | "ecr:GetAuthorizationToken", 30 | "ecr:BatchCheckLayerAvailability", 31 | "ecr:GetDownloadUrlForLayer", 32 | "ecr:GetRepositoryPolicy", 33 | "ecr:DescribeRepositories", 34 | "ecr:ListImages", 35 | "ecr:BatchGetImage" 36 | ], 37 | "Resource": "*" 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /terraform/aws/iam/edge-router-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { "Service": "ec2.amazonaws.com"}, 7 | "Action": "sts:AssumeRole" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /terraform/aws/iam/main.tf: -------------------------------------------------------------------------------- 1 | # master 2 | resource "aws_iam_role" "master_role" { 3 | name = "master_role" 4 | path = "/" 5 | assume_role_policy = "${file("${path.module}/master-role.json")}" 6 | } 7 | 8 | resource "aws_iam_role_policy" "master_policy" { 9 | name = "master_policy" 10 | role = "${aws_iam_role.master_role.id}" 11 | policy = "${file("${path.module}/master-policy.json")}" 12 | } 13 | 14 | resource "aws_iam_instance_profile" "master_profile" { 15 | name = "master_profile" 16 | roles = ["${aws_iam_role.master_role.name}"] 17 | } 18 | 19 | # worker 20 | resource "aws_iam_role" "worker_role" { 21 | name = "worker_role" 22 | path = "/" 23 | assume_role_policy = "${file("${path.module}/worker-role.json")}" 24 | } 25 | 26 | resource "aws_iam_role_policy" "worker_policy" { 27 | name = "worker_policy" 28 | role = "${aws_iam_role.worker_role.id}" 29 | policy = "${file("${path.module}/worker-policy.json")}" 30 | } 31 | 32 | resource "aws_iam_instance_profile" "worker_profile" { 33 | name = "worker_profile" 34 | roles = ["${aws_iam_role.worker_role.name}"] 35 | } 36 | 37 | # edge-router 38 | resource "aws_iam_role" "edge-router_role" { 39 | name = "edge-router_role" 40 | path = "/" 41 | assume_role_policy = "${file("${path.module}/edge-router-role.json")}" 42 | } 43 | 44 | resource "aws_iam_role_policy" "edge-router_policy" { 45 | name = "edge-router_policy" 46 | role = "${aws_iam_role.edge-router_role.id}" 47 | policy = "${file("${path.module}/edge-router-policy.json")}" 48 | } 49 | 50 | resource "aws_iam_instance_profile" "edge-router_profile" { 51 | name = "edge-router_profile" 52 | roles = ["${aws_iam_role.edge-router_role.name}"] 53 | } 54 | 55 | # outputs 56 | output "master_profile_name" { 57 | value = "${aws_iam_instance_profile.master_profile.name}" 58 | } 59 | output "worker_profile_name" { 60 | value = "${aws_iam_instance_profile.worker_profile.name}" 61 | } 62 | output "edge-router_profile_name" { 63 | value = "${aws_iam_instance_profile.edge-router_profile.name}" 64 | } 65 | -------------------------------------------------------------------------------- /terraform/aws/iam/master-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": ["ec2:*"], 7 | "Resource": ["*"] 8 | }, 9 | { 10 | "Effect": "Allow", 11 | "Action": ["elasticloadbalancing:*"], 12 | "Resource": ["*"] 13 | }, 14 | { 15 | "Effect": "Allow", 16 | "Action": "s3:*", 17 | "Resource": [ 18 | "arn:aws:s3:::kubernetes-*" 19 | ] 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /terraform/aws/iam/master-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { "Service": "ec2.amazonaws.com"}, 7 | "Action": "sts:AssumeRole" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /terraform/aws/iam/worker-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "s3:*", 7 | "Resource": [ 8 | "arn:aws:s3:::kubernetes-*" 9 | ] 10 | }, 11 | { 12 | "Effect": "Allow", 13 | "Action": "ec2:Describe*", 14 | "Resource": "*" 15 | }, 16 | { 17 | "Effect": "Allow", 18 | "Action": "ec2:AttachVolume", 19 | "Resource": "*" 20 | }, 21 | { 22 | "Effect": "Allow", 23 | "Action": "ec2:DetachVolume", 24 | "Resource": "*" 25 | }, 26 | { 27 | "Effect": "Allow", 28 | "Action": [ 29 | "ecr:GetAuthorizationToken", 30 | "ecr:BatchCheckLayerAvailability", 31 | "ecr:GetDownloadUrlForLayer", 32 | "ecr:GetRepositoryPolicy", 33 | "ecr:DescribeRepositories", 34 | "ecr:ListImages", 35 | "ecr:BatchGetImage" 36 | ], 37 | "Resource": "*" 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /terraform/aws/iam/worker-role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { "Service": "ec2.amazonaws.com"}, 7 | "Action": "sts:AssumeRole" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /terraform/aws/keypair/main.tf: -------------------------------------------------------------------------------- 1 | # input variables 2 | variable "short_name" { default = "kube" } 3 | variable "public_key" {} 4 | 5 | # SSH keypair for the instances 6 | resource "aws_key_pair" "default" { 7 | key_name = "${var.short_name}" 8 | public_key = "${var.public_key}" 9 | } 10 | 11 | # output variables 12 | output "keypair_name" { 13 | value = "${aws_key_pair.default.key_name}" 14 | } 15 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/.gitignore: -------------------------------------------------------------------------------- 1 | etcd_discovery_url.txt 2 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/bastion-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | proxy: on 6 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | discovery: ${etcd_discovery_url} 8 | fleet: 9 | metadata: "role=bastion,region=${region}" 10 | etcd_servers: "http://localhost:2379" 11 | locksmith: 12 | endpoint: "http://localhost:2379" 13 | units: 14 | - name: docker.service 15 | command: start 16 | - name: etcd2.service 17 | command: start 18 | update: 19 | reboot-strategy: best-effort 20 | manage_etc_hosts: localhost 21 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/bastion-server.tf: -------------------------------------------------------------------------------- 1 | # Bastion server 2 | module "bastion_amitype" { 3 | source = "github.com/terraform-community-modules/tf_aws_virttype" 4 | instance_type = "${var.bastion_instance_type}" 5 | } 6 | 7 | module "bastion_ami" { 8 | source = "github.com/terraform-community-modules/tf_aws_coreos_ami" 9 | region = "${var.region}" 10 | channel = "${var.coreos_channel}" 11 | virttype = "${module.bastion_amitype.prefer_hvm}" 12 | } 13 | 14 | data "template_file" "bastion_cloud_init" { 15 | template = "${file("bastion-cloud-config.yml.tpl")}" 16 | depends_on = ["null_resource.etcd_discovery_url"] 17 | vars { 18 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 19 | size = "${var.masters}" 20 | vpc_cidr_block = "${var.vpc_cidr_block}" 21 | region = "${var.region}" 22 | } 23 | } 24 | 25 | resource "aws_instance" "bastion" { 26 | instance_type = "${var.bastion_instance_type}" 27 | ami = "${module.bastion_ami.ami_id}" 28 | # Just put the bastion in the first public subnet 29 | subnet_id = "${element(split(",", module.vpc.public_subnets), 0)}" 30 | # @todo - this allows bastion connection on any port which is not ideal but was like this previously. 31 | security_groups = ["${module.sg-default.security_group_id}", "${aws_security_group.bastion.id}"] 32 | key_name = "${module.aws-keypair.keypair_name}" 33 | source_dest_check = false 34 | user_data = "${data.template_file.bastion_cloud_init.rendered}" 35 | tags = { 36 | Name = "kube-bastion" 37 | role = "bastion" 38 | } 39 | connection { 40 | user = "core" 41 | private_key = "${var.private_key_file}" 42 | } 43 | 44 | # Do some early bootstrapping of the CoreOS machines. This will install 45 | # python and pip so we can use as the ansible_python_interpreter in our playbooks 46 | provisioner "file" { 47 | source = "../../scripts/coreos" 48 | destination = "/tmp" 49 | } 50 | provisioner "remote-exec" { 51 | inline = [ 52 | "sudo chmod -R +x /tmp/coreos", 53 | "/tmp/coreos/bootstrap.sh", 54 | "~/bin/python /tmp/coreos/get-pip.py", 55 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 56 | "sudo rm -rf /tmp/coreos", 57 | /* Initialize open VPN container and server config */ 58 | "sudo iptables -t nat -A POSTROUTING -j MASQUERADE", 59 | "sudo docker run --name ovpn-data -v /etc/openvpn busybox", 60 | "sudo docker run --volumes-from ovpn-data --rm gosuri/openvpn ovpn_genconfig -p ${var.vpc_cidr_block} -u udp://${aws_instance.bastion.public_ip}" 61 | ] 62 | } 63 | } 64 | 65 | # Bastion elastic IP 66 | resource "aws_eip" "bastion" { 67 | instance = "${aws_instance.bastion.id}" 68 | vpc = true 69 | } 70 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/bin/ovpn-client-config: -------------------------------------------------------------------------------- 1 | ssh -t "core@$(terraform output bastion.ip)" sudo docker run --volumes-from ovpn-data --rm gosuri/openvpn ovpn_getclient "${1}" > "${1}-kube.ovpn" 2 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/bin/ovpn-init: -------------------------------------------------------------------------------- 1 | ssh -t "core@$(terraform output bastion.ip)" sudo docker run --volumes-from ovpn-data --rm -it gosuri/openvpn ovpn_initpki 2 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/bin/ovpn-new-client: -------------------------------------------------------------------------------- 1 | ssh -t "core@$(terraform output bastion.ip)" sudo docker run --volumes-from ovpn-data --rm -it gosuri/openvpn easyrsa build-client-full "${1}" nopass 2 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/bin/ovpn-start: -------------------------------------------------------------------------------- 1 | ssh -t "core@$(terraform output bastion.ip)" sudo docker run --volumes-from ovpn-data -d -p 1194:1194/udp --cap-add=NET_ADMIN gosuri/openvpn 2 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/etcd_discovery_url.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Capgemini/kubeform/a49b0dd29b302036be7d7958c31288db591516c3/terraform/aws/private-cloud/etcd_discovery_url.txt -------------------------------------------------------------------------------- /terraform/aws/private-cloud/main.tf: -------------------------------------------------------------------------------- 1 | variable "access_key" {} 2 | variable "secret_key" {} 3 | variable "public_key_file" { default = "~/.ssh/id_rsa_aws.pub" } 4 | variable "private_key_file" { default = "~/.ssh/id_rsa_aws.pem" } 5 | variable "region" { default = "eu-west-1" } 6 | variable "availability_zones" { 7 | type = "list" 8 | default = [ "eu-west-1a", "eu-west-1b", "eu-west-1c" ] 9 | } 10 | variable "vpc_cidr_block" { default = "10.0.0.0/16" } 11 | variable "vpc_private_subnets_list" { 12 | type = "list" 13 | default = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24" ] 14 | } 15 | variable "vpc_public_subnets_list" { 16 | type = "list" 17 | default = [ "10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24" ] 18 | } 19 | variable "coreos_channel" { default = "stable" } 20 | variable "etcd_discovery_url_file" { default = "etcd_discovery_url.txt" } 21 | variable "masters" { default = "3" } 22 | variable "master_instance_type" { default = "m3.medium" } 23 | variable "workers" { default = "1" } 24 | variable "worker_instance_type" { default = "m3.medium" } 25 | variable "worker_ebs_volume_size" { default = "30" } 26 | variable "bastion_instance_type" { default = "t2.micro" } 27 | variable "docker_version" { default = "1.9.1-0~trusty" } 28 | 29 | provider "aws" { 30 | access_key = "${var.access_key}" 31 | secret_key = "${var.secret_key}" 32 | region = "${var.region}" 33 | } 34 | 35 | module "vpc" { 36 | source = "./vpc" 37 | 38 | name = "default" 39 | 40 | cidr = "${var.vpc_cidr_block}" 41 | private_subnets = [ "${var.vpc_private_subnets_list}" ] 42 | public_subnets = [ "${var.vpc_public_subnets_list}" ] 43 | bastion_instance_id = "${aws_instance.bastion.id}" 44 | 45 | azs = [ "${var.availability_zones}" ] 46 | } 47 | 48 | # ssh keypair for instances 49 | module "aws-keypair" { 50 | source = "../keypair" 51 | 52 | public_key = "${file("${var.public_key_file}")}" 53 | } 54 | 55 | # security group to allow all traffic in and out of the instances in the VPC 56 | module "sg-default" { 57 | source = "../sg-all-traffic" 58 | 59 | vpc_id = "${module.vpc.vpc_id}" 60 | } 61 | 62 | module "elb" { 63 | source = "../elb" 64 | 65 | security_groups = "${module.sg-default.security_group_id}" 66 | instances = "${join(",", aws_instance.worker.*.id)}" 67 | subnets = "${module.vpc.public_subnets}" 68 | } 69 | 70 | # Generate an etcd URL for the cluster 71 | resource "null_resource" "etcd_discovery_url" { 72 | provisioner "local-exec" { 73 | command = "curl -s https://discovery.etcd.io/new?size=${var.masters} > ${var.etcd_discovery_url_file}" 74 | } 75 | 76 | # To change the cluster size of an existing live cluster, please read: 77 | # https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html 78 | } 79 | 80 | # outputs 81 | output "bastion.ip" { 82 | value = "${aws_eip.bastion.public_ip}" 83 | } 84 | output "master_ips" { 85 | value = "${join(",", aws_instance.master.*.private_ip)}" 86 | } 87 | output "worker_ips" { 88 | value = "${join(",", aws_instance.worker.*.private_ip)}" 89 | } 90 | output "vpc_cidr_block_ip" { 91 | value = "${module.vpc.vpc_cidr_block}" 92 | } 93 | output "elb.hostname" { 94 | value = "${module.elb.elb_dns_name}" 95 | } 96 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/master-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | # $private_ipv4 is populated by the cloud provider 6 | # we don't have a $public_ipv4 in the private VPC 7 | advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 8 | initial-advertise-peer-urls: http://$private_ipv4:2380 9 | # listen on both the official ports and the legacy ports 10 | # legacy ports can be omitted if your application doesn't depend on them 11 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 12 | listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 13 | # Discovery is populated by Terraform 14 | discovery: ${etcd_discovery_url} 15 | fleet: 16 | metadata: "role=master,region=${region}" 17 | units: 18 | - name: etcd2.service 19 | command: start 20 | update: 21 | reboot-strategy: best-effort 22 | manage_etc_hosts: localhost 23 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/masters.tf: -------------------------------------------------------------------------------- 1 | module "master_amitype" { 2 | source = "github.com/terraform-community-modules/tf_aws_virttype" 3 | instance_type = "${var.master_instance_type}" 4 | } 5 | 6 | module "master_ami" { 7 | source = "github.com/terraform-community-modules/tf_aws_coreos_ami" 8 | region = "${var.region}" 9 | channel = "${var.coreos_channel}" 10 | virttype = "${module.master_amitype.prefer_hvm}" 11 | } 12 | 13 | data "template_file" "master_cloud_init" { 14 | template = "${file("master-cloud-config.yml.tpl")}" 15 | depends_on = ["null_resource.etcd_discovery_url"] 16 | vars { 17 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 18 | size = "${var.masters}" 19 | region = "${var.region}" 20 | } 21 | } 22 | 23 | resource "aws_instance" "master" { 24 | instance_type = "${var.master_instance_type}" 25 | ami = "${module.master_ami.ami_id}" 26 | count = "${var.masters}" 27 | key_name = "${module.aws-keypair.keypair_name}" 28 | source_dest_check = false 29 | subnet_id = "${element(split(",", module.vpc.private_subnets), count.index)}" 30 | security_groups = ["${module.sg-default.security_group_id}"] 31 | depends_on = ["aws_instance.bastion"] 32 | user_data = "${data.template_file.master_cloud_init.rendered}" 33 | tags = { 34 | Name = "kube-master-${count.index}" 35 | role = "masters" 36 | } 37 | connection { 38 | user = "core" 39 | private_key = "${var.private_key_file}" 40 | bastion_host = "${aws_eip.bastion.public_ip}" 41 | bastion_private_key = "${var.private_key_file}" 42 | } 43 | 44 | # Do some early bootstrapping of the CoreOS machines. This will install 45 | # python and pip so we can use as the ansible_python_interpreter in our playbooks 46 | provisioner "file" { 47 | source = "../../scripts/coreos" 48 | destination = "/tmp" 49 | } 50 | provisioner "remote-exec" { 51 | inline = [ 52 | "sudo chmod -R +x /tmp/coreos", 53 | "/tmp/coreos/bootstrap.sh", 54 | "~/bin/python /tmp/coreos/get-pip.py", 55 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 56 | "sudo rm -rf /tmp/coreos" 57 | ] 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/security_groups.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "bastion" { 2 | name = "kube-bastion" 3 | description = "Security group for bastion instances that allows SSH and VPN traffic from internet" 4 | vpc_id = "${module.vpc.vpc_id}" 5 | 6 | # inbound http/https traffic from the private subnets to allow them to talk with the internet 7 | ingress { 8 | from_port = 80 9 | to_port = 80 10 | protocol = "tcp" 11 | cidr_blocks = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] 12 | } 13 | 14 | ingress { 15 | from_port = 443 16 | to_port = 443 17 | protocol = "tcp" 18 | cidr_blocks = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] 19 | } 20 | 21 | # ssh 22 | ingress { 23 | from_port = 22 24 | to_port = 22 25 | protocol = "tcp" 26 | cidr_blocks = ["0.0.0.0/0"] 27 | } 28 | 29 | # openvpn 30 | ingress { 31 | from_port = 1194 32 | to_port = 1194 33 | protocol = "udp" 34 | cidr_blocks = ["0.0.0.0/0"] 35 | } 36 | 37 | # outbound access to the inernet 38 | egress { 39 | from_port = 80 40 | to_port = 80 41 | protocol = "tcp" 42 | cidr_blocks = ["0.0.0.0/0"] 43 | } 44 | 45 | egress { 46 | from_port = 443 47 | to_port = 443 48 | protocol = "tcp" 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | 52 | tags { 53 | Name = "kube-bastion-sg" 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/vpc/main.tf: -------------------------------------------------------------------------------- 1 | variable "name" { } 2 | variable "cidr" { } 3 | variable "public_subnets" { default = [] } 4 | variable "private_subnets" { default = [] } 5 | variable "bastion_instance_id" { } 6 | variable "azs" { type="list" } 7 | variable "enable_dns_hostnames" { 8 | description = "should be true if you want to use private DNS within the VPC" 9 | default = false 10 | } 11 | variable "enable_dns_support" { 12 | description = "should be true if you want to use private DNS within the VPC" 13 | default = false 14 | } 15 | 16 | # resources 17 | resource "aws_vpc" "mod" { 18 | cidr_block = "${var.cidr}" 19 | enable_dns_hostnames = "${var.enable_dns_hostnames}" 20 | enable_dns_support = "${var.enable_dns_support}" 21 | tags { 22 | Name = "${var.name}" 23 | } 24 | } 25 | 26 | resource "aws_internet_gateway" "mod" { 27 | vpc_id = "${aws_vpc.mod.id}" 28 | } 29 | 30 | resource "aws_route_table" "public" { 31 | vpc_id = "${aws_vpc.mod.id}" 32 | route { 33 | cidr_block = "0.0.0.0/0" 34 | gateway_id = "${aws_internet_gateway.mod.id}" 35 | } 36 | tags { 37 | Name = "${var.name}-public" 38 | } 39 | } 40 | 41 | resource "aws_route_table" "private" { 42 | vpc_id = "${aws_vpc.mod.id}" 43 | route { 44 | cidr_block = "0.0.0.0/0" 45 | instance_id = "${var.bastion_instance_id}" 46 | } 47 | tags { 48 | Name = "${var.name}-private" 49 | } 50 | } 51 | 52 | resource "aws_subnet" "private" { 53 | vpc_id = "${aws_vpc.mod.id}" 54 | cidr_block = "${element(var.private_subnets, count.index)}" 55 | availability_zone = "${element(var.azs, count.index)}" 56 | count = "${length(var.private_subnets)}" 57 | tags { 58 | Name = "${var.name}-private" 59 | } 60 | } 61 | 62 | resource "aws_subnet" "public" { 63 | vpc_id = "${aws_vpc.mod.id}" 64 | cidr_block = "${element(var.public_subnets, count.index)}" 65 | availability_zone = "${element(var.azs, count.index)}" 66 | count = "${length(var.public_subnets)}" 67 | tags { 68 | Name = "${var.name}-public" 69 | } 70 | 71 | map_public_ip_on_launch = true 72 | } 73 | 74 | resource "aws_route_table_association" "private" { 75 | count = "${length(var.private_subnets)}" 76 | subnet_id = "${element(aws_subnet.private.*.id, count.index)}" 77 | route_table_id = "${aws_route_table.private.id}" 78 | } 79 | 80 | resource "aws_route_table_association" "public" { 81 | count = "${length(var.public_subnets)}" 82 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}" 83 | route_table_id = "${aws_route_table.public.id}" 84 | } 85 | 86 | # outputs 87 | output "private_subnets" { 88 | value = "${join(",", aws_subnet.private.*.id)}" 89 | } 90 | output "public_subnets" { 91 | value = "${join(",", aws_subnet.public.*.id)}" 92 | } 93 | output "vpc_id" { 94 | value = "${aws_vpc.mod.id}" 95 | } 96 | output "vpc_cidr_block" { 97 | value = "${aws_vpc.mod.cidr_block}" 98 | } 99 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/worker-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | units: 5 | - name: format-ebs-volume.service 6 | command: start 7 | content: | 8 | [Unit] 9 | Description=Formats the ebs volume if needed 10 | Before=docker.service 11 | [Service] 12 | Type=oneshot 13 | RemainAfterExit=yes 14 | ExecStart=/bin/bash -c '(/usr/sbin/blkid -t TYPE=ext4 | grep /dev/xvdb) || (/usr/sbin/wipefs -fa /dev/xvdb && /usr/sbin/mkfs.ext4 /dev/xvdb)' 15 | - name: var-lib-docker.mount 16 | command: start 17 | content: | 18 | [Unit] 19 | Description=Mount ephemeral to /var/lib/docker 20 | Requires=format-ebs-volume.service 21 | After=format-ebs-volume.service 22 | [Mount] 23 | What=/dev/xvdb 24 | Where=/var/lib/docker 25 | Type=ext4 26 | - name: docker.service 27 | drop-ins: 28 | - name: 10-wait-docker.conf 29 | content: | 30 | [Unit] 31 | After=var-lib-docker.mount 32 | Requires=var-lib-docker.mount 33 | etcd2: 34 | proxy: on 35 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 36 | discovery: ${etcd_discovery_url} 37 | fleet: 38 | metadata: "role=worker,region=${region}" 39 | public-ip: "$public_ipv4" 40 | etcd_servers: "http://localhost:2379" 41 | locksmith: 42 | endpoint: "http://localhost:2379" 43 | units: 44 | - name: etcd2.service 45 | command: start 46 | update: 47 | reboot-strategy: best-effort 48 | manage_etc_hosts: localhost 49 | -------------------------------------------------------------------------------- /terraform/aws/private-cloud/workers.tf: -------------------------------------------------------------------------------- 1 | module "worker_amitype" { 2 | source = "github.com/terraform-community-modules/tf_aws_virttype" 3 | instance_type = "${var.worker_instance_type}" 4 | } 5 | 6 | module "worker_ami" { 7 | source = "github.com/terraform-community-modules/tf_aws_coreos_ami" 8 | region = "${var.region}" 9 | channel = "${var.coreos_channel}" 10 | virttype = "${module.worker_amitype.prefer_hvm}" 11 | } 12 | 13 | data "template_file" "worker_cloud_init" { 14 | template = "${file("worker-cloud-config.yml.tpl")}" 15 | depends_on = ["null_resource.etcd_discovery_url"] 16 | vars { 17 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 18 | size = "${var.masters}" 19 | region = "${var.region}" 20 | } 21 | } 22 | 23 | /* 24 | @todo This should be changed to be an autoscaling worker with launch config 25 | */ 26 | resource "aws_instance" "worker" { 27 | instance_type = "${var.worker_instance_type}" 28 | ami = "${module.worker_ami.ami_id}" 29 | count = "${var.workers}" 30 | key_name = "${module.aws-keypair.keypair_name}" 31 | source_dest_check = false 32 | # @todo - fix this as this only allows 3 workers maximum (due to splittingo on the count variable) 33 | subnet_id = "${element(split(",", module.vpc.private_subnets), count.index)}" 34 | security_groups = ["${module.sg-default.security_group_id}"] 35 | depends_on = ["aws_instance.bastion", "aws_instance.master"] 36 | user_data = "${data.template_file.master_cloud_init.rendered}" 37 | tags = { 38 | Name = "kube-worker-${count.index}" 39 | role = "workers" 40 | } 41 | connection { 42 | user = "core" 43 | private_key = "${var.private_key_file}" 44 | bastion_host = "${aws_eip.bastion.public_ip}" 45 | bastion_private_key = "${var.private_key_file}" 46 | } 47 | 48 | # Do some early bootstrapping of the CoreOS machines. This will install 49 | # python and pip so we can use as the ansible_python_interpreter in our playbooks 50 | provisioner "file" { 51 | source = "../../scripts/coreos" 52 | destination = "/tmp" 53 | } 54 | provisioner "remote-exec" { 55 | inline = [ 56 | "sudo chmod -R +x /tmp/coreos", 57 | "/tmp/coreos/bootstrap.sh", 58 | "~/bin/python /tmp/coreos/get-pip.py", 59 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 60 | "sudo rm -rf /tmp/coreos" 61 | ] 62 | } 63 | 64 | ebs_block_device { 65 | device_name = "/dev/xvdb" 66 | volume_size = "${var.worker_ebs_volume_size}" 67 | delete_on_termination = true 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/.gitignore: -------------------------------------------------------------------------------- 1 | id_rsa 2 | id_rsa.pub 3 | etcd_discovery_url.txt 4 | *.pem 5 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/edge-routers.tf: -------------------------------------------------------------------------------- 1 | module "edge-router_amitype" { 2 | source = "github.com/terraform-community-modules/tf_aws_virttype" 3 | instance_type = "${var.edge-router_instance_type}" 4 | } 5 | 6 | module "edge-router_ami" { 7 | source = "github.com/terraform-community-modules/tf_aws_coreos_ami" 8 | region = "${var.region}" 9 | channel = "${var.coreos_channel}" 10 | virttype = "${module.edge-router_amitype.prefer_hvm}" 11 | } 12 | 13 | data "template_file" "edge-router_cloud_init" { 14 | template = "${file("worker-cloud-config.yml.tpl")}" 15 | depends_on = ["null_resource.etcd_discovery_url"] 16 | vars { 17 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 18 | size = "${var.masters}" 19 | region = "${var.region}" 20 | etcd_ca = "${replace(module.ca.ca_cert_pem, "\n", "\\n")}" 21 | etcd_cert = "${replace(module.etcd_cert.etcd_cert_pem, "\n", "\\n")}" 22 | etcd_key = "${replace(module.etcd_cert.etcd_private_key, "\n", "\\n")}" 23 | } 24 | } 25 | 26 | resource "aws_instance" "edge-router" { 27 | instance_type = "${var.edge-router_instance_type}" 28 | ami = "${module.edge-router_ami.ami_id}" 29 | iam_instance_profile = "${module.iam.edge-router_profile_name}" 30 | count = "${var.edge-routers}" 31 | key_name = "${module.aws-keypair.keypair_name}" 32 | subnet_id = "${element(module.public_subnet.subnet_ids, count.index)}" 33 | source_dest_check = false 34 | vpc_security_group_ids = ["${module.sg-default.security_group_id}"] 35 | depends_on = ["aws_instance.master"] 36 | user_data = "${data.template_file.edge-router_cloud_init.rendered}" 37 | tags = { 38 | Name = "kube-edge-router-${count.index}" 39 | role = "edge-routers" 40 | region = "${var.region}" 41 | } 42 | ebs_block_device { 43 | device_name = "/dev/xvdb" 44 | volume_size = "${var.edge-router_ebs_volume_size}" 45 | delete_on_termination = true 46 | } 47 | connection { 48 | user = "core" 49 | private_key = "${tls_private_key.ssh.private_key_pem}" 50 | } 51 | provisioner "file" { 52 | source = "../../scripts/coreos" 53 | destination = "/tmp" 54 | } 55 | provisioner "remote-exec" { 56 | inline = [ 57 | "sudo chmod -R +x /tmp/coreos", 58 | "/tmp/coreos/bootstrap.sh", 59 | "~/bin/python /tmp/coreos/get-pip.py", 60 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 61 | "sudo rm -rf /tmp/coreos" 62 | ] 63 | } 64 | } 65 | 66 | output "edge-router_ips" { 67 | value = "${join(",", aws_instance.edge-router.*.public_ip)}" 68 | } 69 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/etcd_discovery_url.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Capgemini/kubeform/a49b0dd29b302036be7d7958c31288db591516c3/terraform/aws/public-cloud/etcd_discovery_url.txt -------------------------------------------------------------------------------- /terraform/aws/public-cloud/main.tf: -------------------------------------------------------------------------------- 1 | variable "access_key" {} 2 | variable "secret_key" {} 3 | variable "organization" { default = "kubeform" } 4 | variable "region" { default = "eu-west-1" } 5 | 6 | # length(availability_zones) must == length(vpc_public_cidrs_list) 7 | variable "availability_zones" { 8 | type = "list" 9 | default = [ "eu-west-1a", "eu-west-1b", "eu-west-1c" ] 10 | } 11 | variable "vpc_public_cidrs_list" { 12 | type = "list" 13 | default = [ "10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24" ] 14 | } 15 | 16 | variable "coreos_channel" { default = "alpha" } 17 | variable "etcd_discovery_url_file" { default = "etcd_discovery_url.txt" } 18 | variable "masters" { default = "3" } 19 | variable "master_instance_type" { default = "m3.medium" } 20 | variable "workers" { default = "1" } 21 | variable "worker_instance_type" { default = "m3.medium" } 22 | variable "worker_ebs_volume_size" { default = "30" } 23 | variable "edge-routers" { default = "1" } 24 | variable "edge-router_instance_type" { default = "m3.medium" } 25 | variable "edge-router_ebs_volume_size" { default = "30" } 26 | variable "vpc_cidr_block" { default = "10.0.0.0/16" } 27 | 28 | provider "aws" { 29 | access_key = "${var.access_key}" 30 | secret_key = "${var.secret_key}" 31 | region = "${var.region}" 32 | } 33 | 34 | resource "aws_vpc" "default" { 35 | cidr_block = "${var.vpc_cidr_block}" 36 | enable_dns_support = true 37 | enable_dns_hostnames = true 38 | tags { 39 | Name = "${var.organization} VPC" 40 | } 41 | lifecycle { 42 | create_before_destroy = true 43 | } 44 | } 45 | 46 | # ssh keypair 47 | resource "tls_private_key" "ssh" { 48 | algorithm = "RSA" 49 | } 50 | 51 | # Export ssh key so we can login with core@instance -i id_rsa 52 | resource "null_resource" "keys" { 53 | depends_on = ["tls_private_key.ssh"] 54 | 55 | provisioner "local-exec" { 56 | command = "echo '${tls_private_key.ssh.private_key_pem}' > ${path.module}/id_rsa && chmod 600 ${path.module}/id_rsa" 57 | } 58 | } 59 | 60 | module "aws-keypair" { 61 | source = "../keypair" 62 | public_key = "${tls_private_key.ssh.public_key_openssh}" 63 | } 64 | 65 | # certificates 66 | module "ca" { 67 | source = "github.com/tamsky/tf_tls/ca" 68 | organization = "${var.organization}" 69 | ca_count = "${var.masters + var.workers + var.edge-routers}" 70 | deploy_ssh_hosts = [ "${concat(aws_instance.edge-router.*.public_ip, concat(aws_instance.master.*.public_ip, aws_instance.worker.*.public_ip))}" ] 71 | ssh_user = "core" 72 | ssh_private_key = "${tls_private_key.ssh.private_key_pem}" 73 | } 74 | 75 | module "etcd_cert" { 76 | source = "github.com/tamsky/tf_tls/etcd" 77 | ca_cert_pem = "${module.ca.ca_cert_pem}" 78 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 79 | } 80 | 81 | module "kube_master_certs" { 82 | source = "github.com/tamsky/tf_tls/kubernetes/master" 83 | ca_cert_pem = "${module.ca.ca_cert_pem}" 84 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 85 | ip_addresses = [ "${concat(aws_instance.master.*.private_ip, aws_instance.master.*.public_ip)}" ] 86 | dns_names = [ "${module.master_elb.elb_dns_name}" ] 87 | deploy_ssh_hosts = [ "${aws_instance.master.*.public_ip}" ] 88 | master_count = "${var.masters}" 89 | validity_period_hours = "8760" 90 | early_renewal_hours = "720" 91 | ssh_user = "core" 92 | ssh_private_key = "${tls_private_key.ssh.private_key_pem}" 93 | } 94 | 95 | module "kube_kubelet_certs" { 96 | source = "github.com/tamsky/tf_tls/kubernetes/kubelet" 97 | ca_cert_pem = "${module.ca.ca_cert_pem}" 98 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 99 | ip_addresses = [ "${concat(aws_instance.edge-router.*.private_ip, concat(aws_instance.master.*.private_ip, aws_instance.worker.*.private_ip))}" ] 100 | deploy_ssh_hosts = [ "${concat(aws_instance.edge-router.*.public_ip, concat(aws_instance.master.*.public_ip, aws_instance.worker.*.public_ip))}" ] 101 | kubelet_count = "${var.masters + var.workers + var.edge-routers}" 102 | validity_period_hours = "8760" 103 | early_renewal_hours = "720" 104 | ssh_user = "core" 105 | ssh_private_key = "${tls_private_key.ssh.private_key_pem}" 106 | } 107 | 108 | module "kube_admin_cert" { 109 | source = "github.com/tamsky/tf_tls/kubernetes/admin" 110 | ca_cert_pem = "${module.ca.ca_cert_pem}" 111 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 112 | kubectl_server_ip = "${module.master_elb.elb_dns_name}" 113 | } 114 | 115 | module "docker_daemon_certs" { 116 | source = "github.com/tamsky/tf_tls/docker/daemon" 117 | ca_cert_pem = "${module.ca.ca_cert_pem}" 118 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 119 | ip_addresses_list = [ "${concat(aws_instance.edge-router.*.private_ip, concat(aws_instance.master.*.private_ip, aws_instance.worker.*.private_ip))}" ] 120 | deploy_ssh_hosts = [ "${concat(aws_instance.edge-router.*.public_ip, concat(aws_instance.master.*.public_ip, aws_instance.worker.*.public_ip))}" ] 121 | docker_daemon_count = "${var.masters + var.workers + var.edge-routers}" 122 | private_key = "${tls_private_key.ssh.private_key_pem}" 123 | validity_period_hours = "8760" 124 | early_renewal_hours = "720" 125 | user = "core" 126 | } 127 | 128 | module "docker_client_certs" { 129 | source = "github.com/tamsky/tf_tls/docker/client" 130 | ca_cert_pem = "${module.ca.ca_cert_pem}" 131 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 132 | ip_addresses_list = [ "${concat(aws_instance.edge-router.*.private_ip, concat(aws_instance.master.*.private_ip, aws_instance.worker.*.private_ip))}" ] 133 | deploy_ssh_hosts = [ "${concat(aws_instance.edge-router.*.public_ip, concat(aws_instance.master.*.public_ip, aws_instance.worker.*.public_ip))}" ] 134 | docker_client_count = "${var.masters + var.workers + var.edge-routers}" 135 | private_key = "${tls_private_key.ssh.private_key_pem}" 136 | validity_period_hours = "8760" 137 | early_renewal_hours = "720" 138 | user = "core" 139 | } 140 | 141 | # internet gateway 142 | module "igw" { 143 | source = "github.com/terraform-community-modules/tf_aws_igw" 144 | name = "public" 145 | vpc_id = "${aws_vpc.default.id}" 146 | } 147 | 148 | # public subnets 149 | module "public_subnet" { 150 | source = "github.com/terraform-community-modules/tf_aws_public_subnet" 151 | name = "public" 152 | cidrs = [ "${var.vpc_public_cidrs_list}" ] 153 | azs = [ "${var.availability_zones}" ] 154 | vpc_id = "${aws_vpc.default.id}" 155 | igw_id = "${module.igw.igw_id}" 156 | } 157 | 158 | # security group to allow all traffic in and out of the instances 159 | module "sg-default" { 160 | source = "../sg-all-traffic" 161 | vpc_id = "${aws_vpc.default.id}" 162 | } 163 | 164 | # IAM 165 | module "iam" { 166 | source = "../iam" 167 | } 168 | 169 | # Generate an etcd URL for the cluster 170 | resource "null_resource" "etcd_discovery_url" { 171 | provisioner "local-exec" { 172 | command = "curl -s https://discovery.etcd.io/new?size=${var.masters} > ${var.etcd_discovery_url_file}" 173 | } 174 | 175 | # To change the cluster size of an existing live cluster, please read: 176 | # https://coreos.com/etcd/docs/latest/etcd-live-cluster-reconfiguration.html 177 | } 178 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/master-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | advertise-client-urls: http://$public_ipv4:2379 6 | initial-advertise-peer-urls: http://$private_ipv4:2380 7 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 8 | listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 9 | discovery: ${etcd_discovery_url} 10 | fleet: 11 | metadata: "role=master,region=${region}" 12 | units: 13 | - name: setup-network-environment.service 14 | command: start 15 | content: | 16 | [Unit] 17 | Description=Setup Network Environment 18 | Documentation=https://github.com/kelseyhightower/setup-network-environment 19 | Requires=network-online.target 20 | After=network-online.target 21 | 22 | [Service] 23 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 24 | ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/1.0.1/setup-network-environment 25 | ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment 26 | ExecStart=/opt/bin/setup-network-environment 27 | RemainAfterExit=yes 28 | Type=oneshot 29 | - name: flanneld.service 30 | command: start 31 | drop-ins: 32 | - name: 50-network-config.conf 33 | content: | 34 | [Unit] 35 | Requires=etcd2.service 36 | [Service] 37 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"}}' 38 | - name: docker.service 39 | command: start 40 | drop-ins: 41 | - name: 60-wait-for-flannel-config.conf 42 | content: | 43 | [Unit] 44 | After=flanneld.service 45 | Requires=flanneld.service 46 | Restart=always 47 | Restart=on-failure 48 | - name: etcd2.service 49 | command: start 50 | update: 51 | reboot-strategy: best-effort 52 | write_files: 53 | - path: /run/systemd/system/etcd.service.d/30-certificates.conf 54 | permissions: 0644 55 | content: | 56 | [Service] 57 | Environment=ETCD_CA_FILE=/etc/ssl/etcd/certs/ca.pem 58 | Environment=ETCD_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 59 | Environment=ETCD_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 60 | Environment=ETCD_PEER_CA_FILE=/etc/ssl/etcd/certs/ca.pem 61 | Environment=ETCD_PEER_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 62 | Environment=ETCD_PEER_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 63 | - path: /etc/ssl/etcd/certs/ca.pem 64 | permissions: 0644 65 | content: "${etcd_ca}" 66 | - path: /etc/ssl/etcd/certs/etcd.pem 67 | permissions: 0644 68 | content: "${etcd_cert}" 69 | - path: /etc/ssl/etcd/private/etcd.pem 70 | permissions: 0644 71 | content: "${etcd_key}" 72 | manage_etc_hosts: localhost 73 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/masters.tf: -------------------------------------------------------------------------------- 1 | module "master_amitype" { 2 | source = "github.com/terraform-community-modules/tf_aws_virttype" 3 | instance_type = "${var.master_instance_type}" 4 | } 5 | 6 | module "master_ami" { 7 | source = "github.com/terraform-community-modules/tf_aws_coreos_ami" 8 | region = "${var.region}" 9 | channel = "${var.coreos_channel}" 10 | virttype = "${module.master_amitype.prefer_hvm}" 11 | } 12 | 13 | data "template_file" "master_cloud_init" { 14 | template = "${file("master-cloud-config.yml.tpl")}" 15 | depends_on = ["null_resource.etcd_discovery_url"] 16 | vars { 17 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 18 | size = "${var.masters}" 19 | region = "${var.region}" 20 | etcd_ca = "${replace(module.ca.ca_cert_pem, "\n", "\\n")}" 21 | etcd_cert = "${replace(module.etcd_cert.etcd_cert_pem, "\n", "\\n")}" 22 | etcd_key = "${replace(module.etcd_cert.etcd_private_key, "\n", "\\n")}" 23 | } 24 | } 25 | 26 | resource "aws_instance" "master" { 27 | instance_type = "${var.master_instance_type}" 28 | ami = "${module.master_ami.ami_id}" 29 | iam_instance_profile = "${module.iam.master_profile_name}" 30 | count = "${var.masters}" 31 | key_name = "${module.aws-keypair.keypair_name}" 32 | subnet_id = "${element(module.public_subnet.subnet_ids, count.index)}" 33 | source_dest_check = false 34 | vpc_security_group_ids = ["${module.sg-default.security_group_id}"] 35 | user_data = "${data.template_file.master_cloud_init.rendered}" 36 | tags = { 37 | Name = "kube-master-${count.index}" 38 | role = "masters" 39 | region = "${var.region}" 40 | } 41 | connection { 42 | user = "core" 43 | private_key = "${tls_private_key.ssh.private_key_pem}" 44 | } 45 | provisioner "file" { 46 | source = "../../scripts/coreos" 47 | destination = "/tmp" 48 | } 49 | provisioner "remote-exec" { 50 | inline = [ 51 | "sudo chmod -R +x /tmp/coreos", 52 | "/tmp/coreos/bootstrap.sh", 53 | "~/bin/python /tmp/coreos/get-pip.py", 54 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 55 | "sudo rm -rf /tmp/coreos" 56 | ] 57 | } 58 | } 59 | 60 | module "master_elb" { 61 | source = "../elb" 62 | security_groups = "${module.sg-default.security_group_id}" 63 | instances = [ "${aws_instance.master.*.id}" ] 64 | subnets = [ "${module.public_subnet.subnet_ids}" ] 65 | } 66 | 67 | output "master_ips" { 68 | value = "${join(",", aws_instance.master.*.public_ip)}" 69 | } 70 | output "master_elb_hostname" { 71 | value = "${module.master_elb.elb_dns_name}" 72 | } 73 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/worker-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | coreos: 3 | etcd2: 4 | proxy: on 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | discovery: ${etcd_discovery_url} 7 | fleet: 8 | metadata: "role=worker,region=${region}" 9 | public-ip: "$public_ipv4" 10 | etcd_servers: "http://localhost:2379" 11 | locksmith: 12 | endpoint: "http://localhost:2379" 13 | units: 14 | - name: setup-network-environment.service 15 | command: start 16 | content: | 17 | [Unit] 18 | Description=Setup Network Environment 19 | Documentation=https://github.com/kelseyhightower/setup-network-environment 20 | Requires=network-online.target 21 | After=network-online.target 22 | 23 | [Service] 24 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 25 | ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/1.0.1/setup-network-environment 26 | ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment 27 | ExecStart=/opt/bin/setup-network-environment 28 | RemainAfterExit=yes 29 | Type=oneshot 30 | - name: flanneld.service 31 | command: start 32 | drop-ins: 33 | - name: 50-network-config.conf 34 | content: | 35 | [Unit] 36 | Requires=etcd2.service 37 | [Service] 38 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"}}' 39 | - name: format-ebs-volume.service 40 | command: start 41 | content: | 42 | [Unit] 43 | Description=Formats the ebs volume if needed 44 | Before=docker.service 45 | [Service] 46 | Type=oneshot 47 | RemainAfterExit=yes 48 | ExecStart=/bin/bash -c '(/usr/sbin/blkid -t TYPE=ext4 | grep /dev/xvdb) || (/usr/sbin/wipefs -fa /dev/xvdb && /usr/sbin/mkfs.ext4 /dev/xvdb)' 49 | - name: var-lib-docker.mount 50 | command: start 51 | content: | 52 | [Unit] 53 | Description=Mount ephemeral to /var/lib/docker 54 | Requires=format-ebs-volume.service 55 | After=format-ebs-volume.service 56 | [Mount] 57 | What=/dev/xvdb 58 | Where=/var/lib/docker 59 | Type=ext4 60 | - name: docker.service 61 | command: start 62 | drop-ins: 63 | - name: 10-wait-docker.conf 64 | content: | 65 | [Unit] 66 | After=var-lib-docker.mount 67 | After=flanneld.service 68 | Requires=flanneld.Service 69 | Requires=var-lib-docker.mount 70 | Restart=always 71 | - name: etcd2.service 72 | command: start 73 | update: 74 | reboot-strategy: best-effort 75 | write_files: 76 | - path: /run/systemd/system/etcd.service.d/30-certificates.conf 77 | permissions: 0644 78 | content: | 79 | [Service] 80 | Environment=ETCD_CA_FILE=/etc/ssl/etcd/certs/ca.pem 81 | Environment=ETCD_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 82 | Environment=ETCD_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 83 | Environment=ETCD_PEER_CA_FILE=/etc/ssl/etcd/certs/ca.pem 84 | Environment=ETCD_PEER_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 85 | Environment=ETCD_PEER_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 86 | - path: /etc/ssl/etcd/certs/ca.pem 87 | permissions: 0644 88 | content: "${etcd_ca}" 89 | - path: /etc/ssl/etcd/certs/etcd.pem 90 | permissions: 0644 91 | content: "${etcd_cert}" 92 | - path: /etc/ssl/etcd/private/etcd.pem 93 | permissions: 0644 94 | content: "${etcd_key}" 95 | manage_etc_hosts: localhost 96 | -------------------------------------------------------------------------------- /terraform/aws/public-cloud/workers.tf: -------------------------------------------------------------------------------- 1 | module "worker_amitype" { 2 | source = "github.com/terraform-community-modules/tf_aws_virttype" 3 | instance_type = "${var.worker_instance_type}" 4 | } 5 | 6 | module "worker_ami" { 7 | source = "github.com/terraform-community-modules/tf_aws_coreos_ami" 8 | region = "${var.region}" 9 | channel = "${var.coreos_channel}" 10 | virttype = "${module.worker_amitype.prefer_hvm}" 11 | } 12 | 13 | data "template_file" "worker_cloud_init" { 14 | template = "${file("worker-cloud-config.yml.tpl")}" 15 | depends_on = ["null_resource.etcd_discovery_url"] 16 | vars { 17 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 18 | size = "${var.masters}" 19 | region = "${var.region}" 20 | etcd_ca = "${replace(module.ca.ca_cert_pem, "\n", "\\n")}" 21 | etcd_cert = "${replace(module.etcd_cert.etcd_cert_pem, "\n", "\\n")}" 22 | etcd_key = "${replace(module.etcd_cert.etcd_private_key, "\n", "\\n")}" 23 | } 24 | } 25 | 26 | resource "aws_instance" "worker" { 27 | instance_type = "${var.worker_instance_type}" 28 | ami = "${module.worker_ami.ami_id}" 29 | iam_instance_profile = "${module.iam.worker_profile_name}" 30 | count = "${var.workers}" 31 | key_name = "${module.aws-keypair.keypair_name}" 32 | subnet_id = "${element(module.public_subnet.subnet_ids, count.index)}" 33 | source_dest_check = false 34 | vpc_security_group_ids = ["${module.sg-default.security_group_id}"] 35 | depends_on = ["aws_instance.master"] 36 | user_data = "${data.template_file.worker_cloud_init.rendered}" 37 | tags = { 38 | Name = "kube-worker-${count.index}" 39 | role = "workers" 40 | region = "${var.region}" 41 | } 42 | ebs_block_device { 43 | device_name = "/dev/xvdb" 44 | volume_size = "${var.worker_ebs_volume_size}" 45 | delete_on_termination = true 46 | } 47 | connection { 48 | user = "core" 49 | private_key = "${tls_private_key.ssh.private_key_pem}" 50 | } 51 | provisioner "file" { 52 | source = "../../scripts/coreos" 53 | destination = "/tmp" 54 | } 55 | provisioner "remote-exec" { 56 | inline = [ 57 | "sudo chmod -R +x /tmp/coreos", 58 | "/tmp/coreos/bootstrap.sh", 59 | "~/bin/python /tmp/coreos/get-pip.py", 60 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 61 | "sudo rm -rf /tmp/coreos" 62 | ] 63 | } 64 | } 65 | 66 | output "worker_ips" { 67 | value = "${join(",", aws_instance.worker.*.public_ip)}" 68 | } 69 | -------------------------------------------------------------------------------- /terraform/aws/sg-all-traffic/main.tf: -------------------------------------------------------------------------------- 1 | variable "security_group_name" { default = "default-kube" } 2 | variable "vpc_id" {} 3 | variable "source_cidr_block" { default = "0.0.0.0/0" } 4 | 5 | # Security group that allows all traffic 6 | resource "aws_security_group" "default" { 7 | name = "${var.security_group_name}" 8 | description = "Default security group that allows all traffic" 9 | vpc_id = "${var.vpc_id}" 10 | 11 | # Allows inbound and outbound traffic from all instances in the VPC. 12 | ingress { 13 | from_port = "0" 14 | to_port = "0" 15 | protocol = "-1" 16 | self = true 17 | } 18 | 19 | # Allows all inbound traffic 20 | ingress { 21 | from_port = "0" 22 | to_port = "0" 23 | protocol = "-1" 24 | cidr_blocks = ["${var.source_cidr_block}"] 25 | } 26 | 27 | # Allows all outbound traffic. 28 | egress { 29 | from_port = 0 30 | to_port = 0 31 | protocol = "-1" 32 | cidr_blocks = ["${var.source_cidr_block}"] 33 | } 34 | tags { 35 | Name = "kube-default-sg" 36 | } 37 | } 38 | 39 | # output variables 40 | output "security_group_id" { 41 | value = "${aws_security_group.default.id}" 42 | } 43 | -------------------------------------------------------------------------------- /terraform/certs/etcd/main.tf: -------------------------------------------------------------------------------- 1 | variable "ca_cert_pem" {} 2 | variable "ca_private_key_pem" {} 3 | 4 | resource "tls_private_key" "etcd" { 5 | algorithm = "RSA" 6 | } 7 | 8 | resource "tls_cert_request" "etcd" { 9 | key_algorithm = "RSA" 10 | private_key_pem = "${tls_private_key.etcd.private_key_pem}" 11 | 12 | subject { 13 | common_name = "*" 14 | organization = "etcd" 15 | } 16 | } 17 | 18 | resource "tls_locally_signed_cert" "etcd" { 19 | cert_request_pem = "${tls_cert_request.etcd.cert_request_pem}" 20 | ca_key_algorithm = "RSA" 21 | ca_private_key_pem = "${var.ca_private_key_pem}" 22 | ca_cert_pem = "${var.ca_cert_pem}" 23 | 24 | # valid for 365 days 25 | validity_period_hours = 8760 26 | early_renewal_hours = 720 27 | 28 | allowed_uses = [ 29 | "key_encipherment", 30 | "server_auth", 31 | "client_auth" 32 | ] 33 | } 34 | 35 | output "etcd_cert_pem" { 36 | value = "${tls_locally_signed_cert.etcd.cert_pem}" 37 | } 38 | output "etcd_private_key" { 39 | value = "${tls_private_key.etcd.private_key_pem}" 40 | } 41 | -------------------------------------------------------------------------------- /terraform/digitalocean/.gitignore: -------------------------------------------------------------------------------- 1 | id_rsa 2 | id_rsa.pub 3 | etcd_discovery_url.txt 4 | *.pem 5 | -------------------------------------------------------------------------------- /terraform/digitalocean/edge-router-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | coreos: 3 | etcd2: 4 | proxy: on 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | discovery: ${etcd_discovery_url} 7 | fleet: 8 | metadata: "role=edge-router,region=${region}" 9 | etcd_servers: "http://localhost:2379" 10 | locksmith: 11 | endpoint: "http://localhost:2379" 12 | units: 13 | - name: setup-network-environment.service 14 | command: start 15 | content: | 16 | [Unit] 17 | Description=Setup Network Environment 18 | Documentation=https://github.com/kelseyhightower/setup-network-environment 19 | Requires=network-online.target 20 | After=network-online.target 21 | 22 | [Service] 23 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 24 | ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/1.0.1/setup-network-environment 25 | ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment 26 | ExecStart=/opt/bin/setup-network-environment 27 | RemainAfterExit=yes 28 | Type=oneshot 29 | - name: flanneld.service 30 | command: start 31 | drop-ins: 32 | - name: 50-network-config.conf 33 | content: | 34 | [Unit] 35 | Requires=etcd2.service 36 | [Service] 37 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"}}' 38 | - name: docker.service 39 | command: start 40 | drop-ins: 41 | - name: 60-wait-for-flannel-config.conf 42 | content: | 43 | [Unit] 44 | After=flanneld.service 45 | Requires=flanneld.service 46 | Restart=always 47 | - name: etcd2.service 48 | command: start 49 | update: 50 | reboot-strategy: off 51 | write_files: 52 | - path: /run/systemd/system/etcd.service.d/30-certificates.conf 53 | permissions: 0644 54 | content: | 55 | [Service] 56 | Environment=ETCD_CA_FILE=/etc/ssl/etcd/certs/ca.pem 57 | Environment=ETCD_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 58 | Environment=ETCD_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 59 | Environment=ETCD_PEER_CA_FILE=/etc/ssl/etcd/certs/ca.pem 60 | Environment=ETCD_PEER_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 61 | Environment=ETCD_PEER_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 62 | - path: /etc/ssl/etcd/certs/ca.pem 63 | permissions: 0644 64 | content: "${etcd_ca}" 65 | - path: /etc/ssl/etcd/certs/etcd.pem 66 | permissions: 0644 67 | content: "${etcd_cert}" 68 | - path: /etc/ssl/etcd/private/etcd.pem 69 | permissions: 0644 70 | content: "${etcd_key}" 71 | manage_etc_hosts: localhost 72 | role: edge-routers 73 | -------------------------------------------------------------------------------- /terraform/digitalocean/etcd_discovery_url.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /terraform/digitalocean/main.tf: -------------------------------------------------------------------------------- 1 | variable "do_token" {} 2 | variable "organization" { default = "kubeform" } 3 | variable "region" { default = "lon1" } 4 | variable "masters" { default = "3" } 5 | variable "workers" { default = "1" } 6 | variable "edge-routers" { default = "1" } 7 | variable "master_instance_type" { default = "512mb" } 8 | variable "worker_instance_type" { default = "512mb" } 9 | variable "edge-router_instance_type" { default = "512mb" } 10 | variable "etcd_discovery_url_file" { default = "etcd_discovery_url.txt" } 11 | 12 | variable "coreos_image" { default = "coreos-stable" } 13 | 14 | # Provider 15 | provider "digitalocean" { 16 | token = "${var.do_token}" 17 | } 18 | 19 | resource "tls_private_key" "ssh" { 20 | algorithm = "RSA" 21 | } 22 | 23 | resource "digitalocean_ssh_key" "default" { 24 | name = "${var.organization}" 25 | public_key = "${tls_private_key.ssh.public_key_openssh}" 26 | } 27 | 28 | # Export ssh key so we can login with core@instance -i id_rsa 29 | resource "null_resource" "keys" { 30 | depends_on = ["tls_private_key.ssh"] 31 | 32 | provisioner "local-exec" { 33 | command = "echo '${tls_private_key.ssh.private_key_pem}' > ${path.module}/id_rsa && chmod 600 ${path.module}/id_rsa" 34 | } 35 | } 36 | 37 | # Generate an etcd URL for the cluster 38 | resource "template_file" "etcd_discovery_url" { 39 | template = "/dev/null" 40 | provisioner "local-exec" { 41 | command = "curl https://discovery.etcd.io/new?size=${var.masters} > ${var.etcd_discovery_url_file}" 42 | } 43 | # This will regenerate the discovery URL if the cluster size changes 44 | vars { 45 | size = "${var.masters}" 46 | } 47 | } 48 | 49 | module "ca" { 50 | source = "github.com/Capgemini/tf_tls//ca" 51 | organization = "${var.organization}" 52 | ca_count = "${var.masters + var.workers + var.edge-routers}" 53 | deploy_ssh_hosts = "${concat(digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 54 | ssh_user = "core" 55 | ssh_private_key = "${tls_private_key.ssh.private_key_pem}" 56 | } 57 | 58 | module "etcd_cert" { 59 | source = "../certs/etcd" 60 | ca_cert_pem = "${module.ca.ca_cert_pem}" 61 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 62 | } 63 | 64 | module "kube_master_certs" { 65 | source = "github.com/Capgemini/tf_tls/kubernetes/master" 66 | ca_cert_pem = "${module.ca.ca_cert_pem}" 67 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 68 | ip_addresses = "${compact(digitalocean_droplet.master.*.ipv4_address)}" 69 | deploy_ssh_hosts = "${compact(digitalocean_droplet.master.*.ipv4_address)}" 70 | dns_names = "test" 71 | master_count = "${var.masters}" 72 | validity_period_hours = "8760" 73 | early_renewal_hours = "720" 74 | ssh_user = "core" 75 | ssh_private_key = "${tls_private_key.ssh.private_key_pem}" 76 | } 77 | 78 | module "kube_kubelet_certs" { 79 | source = "github.com/Capgemini/tf_tls/kubernetes/kubelet" 80 | ca_cert_pem = "${module.ca.ca_cert_pem}" 81 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 82 | ip_addresses = "${concat( digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 83 | deploy_ssh_hosts = "${concat( digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 84 | kubelet_count = "${var.masters + var.workers + var.edge-routers}" 85 | validity_period_hours = "8760" 86 | early_renewal_hours = "720" 87 | ssh_user = "core" 88 | ssh_private_key = "${tls_private_key.ssh.private_key_pem}" 89 | } 90 | 91 | module "kube_admin_cert" { 92 | source = "github.com/Capgemini/tf_tls/kubernetes/admin" 93 | ca_cert_pem = "${module.ca.ca_cert_pem}" 94 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 95 | kubectl_server_ip = "${digitalocean_droplet.master.0.ipv4_address}" 96 | } 97 | 98 | module "docker_daemon_certs" { 99 | source = "github.com/Capgemini/tf_tls//docker/daemon" 100 | ca_cert_pem = "${module.ca.ca_cert_pem}" 101 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 102 | ip_addresses_list = "${concat(digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 103 | deploy_ssh_hosts = "${concat(digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 104 | docker_daemon_count = "${var.masters + var.workers + var.edge-routers}" 105 | private_key = "${tls_private_key.ssh.private_key_pem}" 106 | validity_period_hours = 8760 107 | early_renewal_hours = 720 108 | user = "core" 109 | } 110 | 111 | module "docker_client_certs" { 112 | source = "github.com/Capgemini/tf_tls//docker/client" 113 | ca_cert_pem = "${module.ca.ca_cert_pem}" 114 | ca_private_key_pem = "${module.ca.ca_private_key_pem}" 115 | ip_addresses_list = "${concat(digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 116 | deploy_ssh_hosts = "${concat(digitalocean_droplet.edge-router.*.ipv4_address, concat(digitalocean_droplet.master.*.ipv4_address, digitalocean_droplet.worker.*.ipv4_address))}" 117 | docker_client_count = "${var.masters + var.workers + var.edge-routers}" 118 | private_key = "${tls_private_key.ssh.private_key_pem}" 119 | validity_period_hours = 8760 120 | early_renewal_hours = 720 121 | user = "core" 122 | } 123 | 124 | resource "template_file" "master_cloud_init" { 125 | template = "master-cloud-config.yml.tpl" 126 | depends_on = ["template_file.etcd_discovery_url"] 127 | vars { 128 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 129 | size = "${var.masters}" 130 | region = "${var.region}" 131 | etcd_ca = "${replace(module.ca.ca_cert_pem, \"\n\", \"\\n\")}" 132 | etcd_cert = "${replace(module.etcd_cert.etcd_cert_pem, \"\n\", \"\\n\")}" 133 | etcd_key = "${replace(module.etcd_cert.etcd_private_key, \"\n\", \"\\n\")}" 134 | } 135 | } 136 | 137 | resource "template_file" "worker_cloud_init" { 138 | template = "worker-cloud-config.yml.tpl" 139 | depends_on = ["template_file.etcd_discovery_url"] 140 | vars { 141 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 142 | size = "${var.masters}" 143 | region = "${var.region}" 144 | etcd_ca = "${replace(module.ca.ca_cert_pem, \"\n\", \"\\n\")}" 145 | etcd_cert = "${replace(module.etcd_cert.etcd_cert_pem, \"\n\", \"\\n\")}" 146 | etcd_key = "${replace(module.etcd_cert.etcd_private_key, \"\n\", \"\\n\")}" 147 | } 148 | } 149 | 150 | resource "template_file" "edge-router_cloud_init" { 151 | template = "edge-router-cloud-config.yml.tpl" 152 | depends_on = ["template_file.etcd_discovery_url"] 153 | vars { 154 | etcd_discovery_url = "${file(var.etcd_discovery_url_file)}" 155 | size = "${var.masters}" 156 | region = "${var.region}" 157 | etcd_ca = "${replace(module.ca.ca_cert_pem, \"\n\", \"\\n\")}" 158 | etcd_cert = "${replace(module.etcd_cert.etcd_cert_pem, \"\n\", \"\\n\")}" 159 | etcd_key = "${replace(module.etcd_cert.etcd_private_key, \"\n\", \"\\n\")}" 160 | } 161 | } 162 | 163 | # Masters 164 | resource "digitalocean_droplet" "master" { 165 | image = "${var.coreos_image}" 166 | region = "${var.region}" 167 | count = "${var.masters}" 168 | name = "kube-master-${count.index}" 169 | size = "${var.master_instance_type}" 170 | private_networking = true 171 | user_data = "${template_file.master_cloud_init.rendered}" 172 | ssh_keys = [ 173 | "${digitalocean_ssh_key.default.id}" 174 | ] 175 | 176 | # Do some early bootstrapping of the CoreOS machines. This will install 177 | # python and pip so we can use as the ansible_python_interpreter in our playbooks 178 | connection { 179 | user = "core" 180 | private_key = "${tls_private_key.ssh.private_key_pem}" 181 | } 182 | provisioner "file" { 183 | source = "../scripts/coreos" 184 | destination = "/tmp" 185 | } 186 | provisioner "remote-exec" { 187 | inline = [ 188 | "sudo chmod -R +x /tmp/coreos", 189 | "/tmp/coreos/bootstrap.sh", 190 | "~/bin/python /tmp/coreos/get-pip.py", 191 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 192 | "sudo rm -rf /tmp/coreos" 193 | ] 194 | } 195 | } 196 | 197 | # Workers 198 | resource "digitalocean_droplet" "worker" { 199 | image = "${var.coreos_image}" 200 | region = "${var.region}" 201 | count = "${var.workers}" 202 | name = "kube-worker-${count.index}" 203 | size = "${var.worker_instance_type}" 204 | private_networking = true 205 | user_data = "${template_file.worker_cloud_init.rendered}" 206 | ssh_keys = [ 207 | "${digitalocean_ssh_key.default.id}" 208 | ] 209 | # Do some early bootstrapping of the CoreOS machines. This will install 210 | # python and pip so we can use as the ansible_python_interpreter in our playbooks 211 | connection { 212 | user = "core" 213 | private_key = "${tls_private_key.ssh.private_key_pem}" 214 | } 215 | provisioner "file" { 216 | source = "../scripts/coreos" 217 | destination = "/tmp" 218 | } 219 | provisioner "remote-exec" { 220 | inline = [ 221 | "sudo chmod -R +x /tmp/coreos", 222 | "/tmp/coreos/bootstrap.sh", 223 | "~/bin/python /tmp/coreos/get-pip.py", 224 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 225 | "sudo rm -rf /tmp/coreos" 226 | ] 227 | } 228 | } 229 | 230 | # Edge-routers 231 | resource "digitalocean_droplet" "edge-router" { 232 | image = "${var.coreos_image}" 233 | region = "${var.region}" 234 | count = "${var.edge-routers}" 235 | name = "kube-edge-router-${count.index}" 236 | size = "${var.edge-router_instance_type}" 237 | private_networking = true 238 | user_data = "${template_file.edge-router_cloud_init.rendered}" 239 | ssh_keys = [ 240 | "${digitalocean_ssh_key.default.id}" 241 | ] 242 | # Do some early bootstrapping of the CoreOS machines. This will install 243 | # python and pip so we can use as the ansible_python_interpreter in our playbooks 244 | connection { 245 | user = "core" 246 | private_key = "${tls_private_key.ssh.private_key_pem}" 247 | } 248 | provisioner "file" { 249 | source = "../scripts/coreos" 250 | destination = "/tmp" 251 | } 252 | provisioner "remote-exec" { 253 | inline = [ 254 | "sudo chmod -R +x /tmp/coreos", 255 | "/tmp/coreos/bootstrap.sh", 256 | "~/bin/python /tmp/coreos/get-pip.py", 257 | "sudo mv /tmp/coreos/runner ~/bin/pip && sudo chmod 0755 ~/bin/pip", 258 | "sudo rm -rf /tmp/coreos" 259 | ] 260 | } 261 | } 262 | 263 | # Outputs 264 | output "master_ips" { 265 | value = "${join(",", digitalocean_droplet.master.*.ipv4_address)}" 266 | } 267 | output "worker_ips" { 268 | value = "${join(",", digitalocean_droplet.worker.*.ipv4_address)}" 269 | } 270 | output "edge-router_ips" { 271 | value = "${join(",", digitalocean_droplet.edge-router.*.ipv4_address)}" 272 | } 273 | -------------------------------------------------------------------------------- /terraform/digitalocean/master-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | advertise-client-urls: http://$public_ipv4:2379,http://$public_ipv4:4001 6 | initial-advertise-peer-urls: http://$public_ipv4:2380 7 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 8 | listen-peer-urls: http://$public_ipv4:2380,http://$public_ipv4:7001 9 | discovery: ${etcd_discovery_url} 10 | fleet: 11 | metadata: "role=master,region=${region}" 12 | units: 13 | - name: setup-network-environment.service 14 | command: start 15 | content: | 16 | [Unit] 17 | Description=Setup Network Environment 18 | Documentation=https://github.com/kelseyhightower/setup-network-environment 19 | Requires=network-online.target 20 | After=network-online.target 21 | 22 | [Service] 23 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 24 | ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/1.0.1/setup-network-environment 25 | ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment 26 | ExecStart=/opt/bin/setup-network-environment 27 | RemainAfterExit=yes 28 | Type=oneshot 29 | - name: flanneld.service 30 | command: start 31 | drop-ins: 32 | - name: 50-network-config.conf 33 | content: | 34 | [Unit] 35 | Requires=etcd2.service 36 | [Service] 37 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"}}' 38 | - name: docker.service 39 | command: start 40 | drop-ins: 41 | - name: 60-wait-for-flannel-config.conf 42 | content: | 43 | [Unit] 44 | After=flanneld.service 45 | Requires=flanneld.service 46 | Restart=always 47 | Restart=on-failure 48 | - name: etcd2.service 49 | command: start 50 | update: 51 | reboot-strategy: off 52 | write_files: 53 | - path: /run/systemd/system/etcd.service.d/30-certificates.conf 54 | permissions: 0644 55 | content: | 56 | [Service] 57 | Environment=ETCD_CA_FILE=/etc/ssl/etcd/certs/ca.pem 58 | Environment=ETCD_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 59 | Environment=ETCD_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 60 | Environment=ETCD_PEER_CA_FILE=/etc/ssl/etcd/certs/ca.pem 61 | Environment=ETCD_PEER_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 62 | Environment=ETCD_PEER_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 63 | - path: /etc/ssl/etcd/certs/ca.pem 64 | permissions: 0644 65 | content: "${etcd_ca}" 66 | - path: /etc/ssl/etcd/certs/etcd.pem 67 | permissions: 0644 68 | content: "${etcd_cert}" 69 | - path: /etc/ssl/etcd/private/etcd.pem 70 | permissions: 0644 71 | content: "${etcd_key}" 72 | manage_etc_hosts: localhost 73 | role: masters 74 | -------------------------------------------------------------------------------- /terraform/digitalocean/worker-cloud-config.yml.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | coreos: 3 | etcd2: 4 | proxy: on 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | discovery: ${etcd_discovery_url} 7 | fleet: 8 | metadata: "role=worker,region=${region}" 9 | etcd_servers: "http://localhost:2379" 10 | locksmith: 11 | endpoint: "http://localhost:2379" 12 | units: 13 | - name: setup-network-environment.service 14 | command: start 15 | content: | 16 | [Unit] 17 | Description=Setup Network Environment 18 | Documentation=https://github.com/kelseyhightower/setup-network-environment 19 | Requires=network-online.target 20 | After=network-online.target 21 | 22 | [Service] 23 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 24 | ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/1.0.1/setup-network-environment 25 | ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment 26 | ExecStart=/opt/bin/setup-network-environment 27 | RemainAfterExit=yes 28 | Type=oneshot 29 | - name: flanneld.service 30 | command: start 31 | drop-ins: 32 | - name: 50-network-config.conf 33 | content: | 34 | [Unit] 35 | Requires=etcd2.service 36 | [Service] 37 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network": "10.2.0.0/16", "Backend": {"Type": "vxlan"}}' 38 | - name: docker.service 39 | command: start 40 | drop-ins: 41 | - name: 60-wait-for-flannel-config.conf 42 | content: | 43 | [Unit] 44 | After=flanneld.service 45 | Requires=flanneld.service 46 | Restart=always 47 | - name: etcd2.service 48 | command: start 49 | update: 50 | reboot-strategy: off 51 | write_files: 52 | - path: /run/systemd/system/etcd.service.d/30-certificates.conf 53 | permissions: 0644 54 | content: | 55 | [Service] 56 | Environment=ETCD_CA_FILE=/etc/ssl/etcd/certs/ca.pem 57 | Environment=ETCD_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 58 | Environment=ETCD_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 59 | Environment=ETCD_PEER_CA_FILE=/etc/ssl/etcd/certs/ca.pem 60 | Environment=ETCD_PEER_CERT_FILE=/etc/ssl/etcd/certs/etcd.pem 61 | Environment=ETCD_PEER_KEY_FILE=/etc/ssl/etcd/private/etcd.pem 62 | - path: /etc/ssl/etcd/certs/ca.pem 63 | permissions: 0644 64 | content: "${etcd_ca}" 65 | - path: /etc/ssl/etcd/certs/etcd.pem 66 | permissions: 0644 67 | content: "${etcd_cert}" 68 | - path: /etc/ssl/etcd/private/etcd.pem 69 | permissions: 0644 70 | content: "${etcd_key}" 71 | manage_etc_hosts: localhost 72 | role: workers 73 | -------------------------------------------------------------------------------- /terraform/gce/main.tf: -------------------------------------------------------------------------------- 1 | variable "account_file" {} 2 | variable "project" {} 3 | variable "region" { default = "europe-west1" } 4 | variable "gce_user" { default = "kube" } 5 | variable "zone" { default = "europe-west1-b" } 6 | variable "workers" { default = "1" } 7 | variable "masters" { default = "3" } 8 | variable "master_instance_type" { default = "n1-standard-2" } 9 | variable "worker_instance_type" { default = "n1-standard-2" } 10 | 11 | provider "google" { 12 | account_file = "${var.account_file}" 13 | project = "${var.project}" 14 | region = "${var.region}" 15 | } 16 | -------------------------------------------------------------------------------- /terraform/scripts/coreos/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | set -e 4 | 5 | cd 6 | 7 | if [[ -e $HOME/.bootstrapped ]]; then 8 | exit 0 9 | fi 10 | 11 | PYPY_VERSION=5.1.1 12 | 13 | if [[ -e $HOME/pypy-$PYPY_VERSION-linux64.tar.bz2 ]]; then 14 | tar -xjf $HOME/pypy-$PYPY_VERSION-linux64.tar.bz2 15 | rm -rf $HOME/pypy-$PYPY_VERSION-linux64.tar.bz2 16 | else 17 | wget -O - https://bitbucket.org/pypy/pypy/downloads/pypy-$PYPY_VERSION-linux64.tar.bz2 |tar -xjf - 18 | fi 19 | 20 | mv -n pypy-$PYPY_VERSION-linux64 pypy 21 | 22 | ## library fixup 23 | mkdir -p pypy/lib 24 | ln -snf /lib64/libncurses.so.5.9 $HOME/pypy/lib/libtinfo.so.5 25 | 26 | mkdir -p $HOME/bin 27 | 28 | cat > $HOME/bin/python < wercker_inventory 24 | ansible-galaxy install --force -r requirements.yml 25 | ansible-playbook -i wercker_inventory --syntax-check site.yml 26 | 27 | deploy: 28 | # Override the python:2.7 box 29 | box: 30 | id: node:4-slim 31 | 32 | steps: 33 | - install-packages: 34 | packages: git 35 | - npm-install: 36 | options: -g rimraf gitbook-cli 37 | - script: 38 | name: Generate docs 39 | code: | 40 | npm run docs:build 41 | - lukevivier/gh-pages: 42 | token: $GITHUB_TOKEN 43 | basedir: _book 44 | --------------------------------------------------------------------------------