├── .gitignore ├── .gitmodules ├── Readme.md ├── Vagrantfile ├── ansible.cfg ├── bootstrap_coreos.yml ├── ceph-key.py ├── cloud_config.tmpl ├── cloud_config_install.tmpl ├── config.rb ├── docs ├── getting_started_hetzner.md ├── hetzner_add_key.png ├── hetzner_key_list.png ├── hetzner_key_management.png ├── hetzner_server_list.png └── hetzner_webservice_user.png ├── inventory-hetzner.ini.sample ├── inventory-vagrant.ini ├── reset-etdnode.yml ├── reset_ceph.yml ├── roles ├── boot-rescue │ └── tasks │ │ ├── hetzner.yml │ │ ├── main.yml │ │ └── ovh.yml ├── calico │ ├── tasks │ │ ├── main.yml │ │ └── templates │ │ │ └── calico.yaml │ └── templates │ │ ├── calico-official.yml │ │ ├── calico.yml.tmpl │ │ ├── calico.yml.tmpl.bak │ │ ├── cluster_policy.yml │ │ ├── env.tmpl │ │ ├── failsafe_policy.yml │ │ └── host_endpoint.yml.tmpl ├── ceph-on-kubernetes-config │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── ceph │ │ ├── admin.keyring.tmpl │ │ ├── bootstrap.keyring.tmpl │ │ ├── ceph.conf.tmpl │ │ └── mon.keyring.tmpl │ │ └── kubernetes │ │ ├── ceph-admin-secret.tmpl │ │ ├── ceph-bootstrap-secret.tmpl │ │ ├── ceph-conf-combined.tmpl │ │ ├── ceph-mds-v1-dp.yaml │ │ ├── ceph-mon-check-v1-rc.yaml │ │ ├── ceph-mon-secret.tmpl │ │ ├── ceph-mon-v1-ds.yaml │ │ ├── ceph-mon-v1-svc.yaml │ │ ├── ceph-namespace.yaml │ │ ├── ceph-osd-v1-ds.yaml │ │ └── secret.tmpl ├── ceph-on-kubernetes-resources │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── cloud-config │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── etcd-certificates │ └── tasks │ │ ├── install_etcdca.yml │ │ └── main.yml ├── etcd-certs-cfssl │ └── tasks │ │ ├── install-cfssl.yml │ │ └── main.yml ├── extra-cas │ └── tasks │ │ └── main.yml ├── install-coreos │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── bare_metal.yml │ │ ├── main.yml │ │ └── vagrant.yml ├── k8s-dns-addon │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── dns-addon.yml ├── k8s-kubesystem-namespace │ ├── files │ │ └── kube-system-namespace.yml │ └── tasks │ │ └── main.yml ├── k8s-resource │ └── tasks │ │ └── main.yml ├── kubectl-config │ └── tasks │ │ └── main.yml ├── kubectl │ └── tasks │ │ └── main.yml ├── kubernetes-certificates │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── openssl.cnf.tmpl │ │ └── openssl_worker.cnf.tmpl ├── label-node │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── dns-addon.yml ├── loadbalancer-config │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── loadbalancer_ds.yml.tmpl ├── loadbalancer │ └── meta │ │ └── main.yml ├── reboot │ └── tasks │ │ └── main.yml ├── safe_reboot │ └── tasks │ │ └── main.yml ├── unlabel-node │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── dns-addon.yml ├── unsafe_reboot │ └── tasks │ │ └── main.yml └── wait_for_k8s_node │ └── tasks │ └── main.yml └── update_cloudconfig.yml /.gitignore: -------------------------------------------------------------------------------- 1 | *-etcd-ca-keys/ 2 | *-kubernetes-ca/ 3 | *-ceph/ 4 | *-loadbalancer/ 5 | *.ini 6 | .vagrant 7 | !inventory-vagrant.ini 8 | vagrant-* 9 | *.retry 10 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "library"] 2 | path = library 3 | url = https://github.com/cornelius-keller/ansible-ovh 4 | [submodule "roles/ansible-coreos-bootstrap"] 5 | path = roles/ansible-coreos-bootstrap 6 | url = https://github.com/sigma/ansible-coreos-bootstrap.git 7 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Ansible playbooks to install CoreOs and Kubernetes on baremetal servers 2 | 3 | This repository contains ansible playbooks to bootstrap and update a kubernetes cluster on bare metal servers on low budget bare metal servers provided by hetzner, kimsufi or ovh. 4 | 5 | # Disclaimer 6 | CoreOs, Kubernetes, Etcd, Ceph etc. are highly complex distributed systems. Use at your own risk and be sure you know what you are doing. I don't take any guarantee on your data, availability etc. 7 | 8 | # State of this documentation: 9 | The documentation is very basic for now. Open sourcing this is a spare time project and so also my time to work on this and updating the documentation is limited. So please, if something is not working on the first try as you expected, take some time to dig into the playbooks etc. before opening an issue. I'll be happy to merge pull requests also if they are only improving the documentation. 10 | 11 | ## Getting started: 12 | There is an **new** [getting started documentatinon](docs/getting_started_hetzner.md) to get started on hetzner. 13 | 14 | 15 | ## Features 16 | 17 | * Works with every Server provider that provides an api to boot the server in a rescue linux system over the network. For now hetzner and kimsufi (ovh) are supported. 18 | * Installs coreos on all cluster nodes 19 | * Sets up an etcd cluster on all hosts in the etcd-node group (should be an uneaven number) 20 | * Sets up etcd in proxy mode on the hosts in the etcd-proxy group (should be all other nodes) 21 | * Sets up a VPN between the nodes using flannel and tinc 22 | * Installs kubernetes api server etc in a ha-setup on all nodes in the kubernetes-master group 23 | * sets up the hosts in the group kubernetes_nodes as kubernetes nodes 24 | * installs the kubeernetes dns addon 25 | * installs the kubectl for you in ~/bin 26 | * configures the ~/.kube/config file for you for cluster access 27 | * Sets up a ceph cluster on top of kubernetes (highly experimental) for block devices. 28 | * Provides a way to update the cloud-config of your clusrter and reboot without destroying your ceph cluster. 29 | * Coreos auto updates are applied in a way that the ceph cluster stays alive. 30 | * Supports multiple clusters 31 | 32 | ## Missing features (ToDo) 33 | * configure centralized logging / kibana for the cluster automatically. For now have a look into the cluster addons folder in the kubernetes project for setting this up. 34 | * add support for glusterfs to have clusterwide shared voumes 35 | * Add support for configuration switches, f.e with / without ceph support. 36 | 37 | ## Getting started. 38 | ### Hetzner 39 | #### Preparations: 40 | * Order machines. Three is a good start. For testing purposes I used cheap ones from the "Serverbidding". 41 | * create an inventory by copying the inventory-hetzner.ini.sample 42 | * In the sample inventory the hosts are grouped in the following way: 43 | * All three nodes are etcd-nodes 44 | * There are no etcd proxy nodes. If you want to test etcd proxy nodes with three nodes you can use one host in the etcd node group and two hosts in the etcd-proxy group. But be aware that your cluster is down if the single etcd node is down. 45 | * Two are kube-apiserver / master nodes 46 | * One node is a kubernetes node 47 | * all nodes are ceph monitor nodes (should stay three) 48 | * all nodes are ceph osds ( can be more ) 49 | * If your cluster and your load grows you probably want to have the etcd cluster and the kubernetes master nodes on dedicated hosts. 50 | * upload your private key into the hetzner admin interface and put the fingerprint of the key into the according variable inventory. 51 | * Put your credentials for the hetzner webservices into the according variables in the inventory. 52 | #### Bootstrap cluster 53 | 54 | Run: 55 | 56 | ansible-playbook -i inventory-hetzner.ini bootstrap_coreos.yml 57 | 58 | After about 10-15 min your cluster is ready and you can access it with kubectl. 59 | #### Update cluster 60 | If you want to update your cluster for example to rollout a new kubernetes version or you want to update the cloud configuration for some other reasons run: 61 | 62 | ansible-playbook -i inventory-hetzner.ini update_cloudconfig.yml 63 | 64 | 65 | ### Directories 66 | The cluster bootstrap creates the following Directories prefixed with your cluster name ( you can set the cluster name in the inventory ). This directories contain important configuration information like generated certificates and certification authorities for etcd and kubernetes and the ceph configuration files. 67 | 68 | * `-etcd-ca-keys`: Contains the CA, the certificates and the public / private keys used to secure etcd. Don't delete unless you dispose the cluster. 69 | * `-kubernetes-ca`: Contains the CA, the certificates and the public / private keys used to secure kubernetes and access the cluster. Don't delete unless you dispose the cluster. 70 | * `-ceph`: contains the ceph configuration. Can be regenerated from the inventory. 71 | 72 | 73 | 74 | # Known issues 75 | * On kimsufi / ovh you are not able to set your servers into the rescue mode after you installed coreos on them. ( I opened some tickets regarding this and had a long conversation in the tickets about this, but so far nothing changed ) So if there went something wrong and you want to restart from scratch Do the following steps: 76 | * Login to the kimsufi console 77 | * Select Reinstall server from the menu and install an arbitrary os on the server. 78 | * Wait for a lot of failure emails and a engineer to fix this 79 | * Once you get an email from the support that your server is reinstalled with the OS you selected you are ready to go again. 80 | 81 | # Credits 82 | 83 | * The general coreos setup is based on https://coreos.com/kubernetes/docs/latest/getting-started.html 84 | * The general idea on how to run ceph on top of kubernetes is inspired by https://github.com/AcalephStorage/ceph-docker/tree/kubernetes/examples 85 | * Many thanks to kayrus in the coreos irc channel for patiently answering all my questions. 86 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # # vi: set ft=ruby : 3 | 4 | require 'fileutils' 5 | 6 | Vagrant.require_version ">= 1.6.0" 7 | 8 | CLOUD_CONFIG_PATH = File.join(File.dirname(__FILE__), "user-data") 9 | CONFIG = File.join(File.dirname(__FILE__), "config.rb") 10 | 11 | # Defaults for config options defined in CONFIG 12 | $num_instances = 1 13 | $instance_name_prefix = "core" 14 | $update_channel = "alpha" 15 | $image_version = "current" 16 | $enable_serial_logging = false 17 | $share_home = false 18 | $vm_gui = false 19 | $vm_memory = 1024 20 | $vm_cpus = 1 21 | $vb_cpuexecutioncap = 100 22 | $shared_folders = {} 23 | $forwarded_ports = {} 24 | 25 | # Attempt to apply the deprecated environment variable NUM_INSTANCES to 26 | # $num_instances while allowing config.rb to override it 27 | if ENV["NUM_INSTANCES"].to_i > 0 && ENV["NUM_INSTANCES"] 28 | $num_instances = ENV["NUM_INSTANCES"].to_i 29 | end 30 | 31 | if File.exist?(CONFIG) 32 | require CONFIG 33 | end 34 | 35 | # Use old vb_xxx config variables when set 36 | def vm_gui 37 | $vb_gui.nil? ? $vm_gui : $vb_gui 38 | end 39 | 40 | def vm_memory 41 | $vb_memory.nil? ? $vm_memory : $vb_memory 42 | end 43 | 44 | def vm_cpus 45 | $vb_cpus.nil? ? $vm_cpus : $vb_cpus 46 | end 47 | 48 | Vagrant.configure("2") do |config| 49 | # always use Vagrants insecure key 50 | config.ssh.insert_key = true 51 | # forward ssh agent to easily ssh into the different machines 52 | config.ssh.forward_agent = true 53 | 54 | config.vm.box = "coreos-%s" % $update_channel 55 | if $image_version != "current" 56 | config.vm.box_version = $image_version 57 | end 58 | config.vm.box_url = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/%s/coreos_production_vagrant.json" % [$update_channel, $image_version] 59 | 60 | ["vmware_fusion", "vmware_workstation"].each do |vmware| 61 | config.vm.provider vmware do |v, override| 62 | override.vm.box_url = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/%s/coreos_production_vagrant_vmware_fusion.json" % [$update_channel, $image_version] 63 | end 64 | end 65 | 66 | config.vm.provider :virtualbox do |v| 67 | # On VirtualBox, we don't have guest additions or a functional vboxsf 68 | # in CoreOS, so tell Vagrant that so it can be smarter. 69 | v.check_guest_additions = false 70 | v.functional_vboxsf = false 71 | end 72 | 73 | # plugin conflict 74 | if Vagrant.has_plugin?("vagrant-vbguest") then 75 | config.vbguest.auto_update = false 76 | end 77 | 78 | (1..$num_instances).each do |i| 79 | config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| 80 | config.vm.hostname = vm_name 81 | 82 | if $enable_serial_logging 83 | logdir = File.join(File.dirname(__FILE__), "log") 84 | FileUtils.mkdir_p(logdir) 85 | 86 | serialFile = File.join(logdir, "%s-serial.txt" % vm_name) 87 | FileUtils.touch(serialFile) 88 | 89 | ["vmware_fusion", "vmware_workstation"].each do |vmware| 90 | config.vm.provider vmware do |v, override| 91 | v.vmx["serial0.present"] = "TRUE" 92 | v.vmx["serial0.fileType"] = "file" 93 | v.vmx["serial0.fileName"] = serialFile 94 | v.vmx["serial0.tryNoRxLoss"] = "FALSE" 95 | end 96 | end 97 | 98 | config.vm.provider :virtualbox do |vb, override| 99 | vb.customize ["modifyvm", :id, "--uart1", "0x3F8", "4"] 100 | vb.customize ["modifyvm", :id, "--uartmode1", serialFile] 101 | end 102 | end 103 | 104 | ip = "172.17.8.#{i+100}" 105 | config.vm.network :private_network, ip: ip #, bridge: "eth0" 106 | 107 | if $expose_docker_tcp 108 | config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), host_ip: "127.0.0.1", auto_correct: true 109 | end 110 | 111 | $forwarded_ports.each do |guest, host| 112 | config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true 113 | end 114 | 115 | file_to_disk = "./tmp/large_disk#{i}.vdi" 116 | config.vm.provider :virtualbox do |vb, override| 117 | vb.customize ['createhd', '--filename', file_to_disk, '--size', 500 * 1024] 118 | vb.customize ['storageattach', :id, '--storagectl', 'IDE Controller', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', file_to_disk] 119 | end 120 | 121 | 122 | ["vmware_fusion", "vmware_workstation"].each do |vmware| 123 | config.vm.provider vmware do |v| 124 | v.gui = vm_gui 125 | v.vmx['memsize'] = vm_memory 126 | v.vmx['numvcpus'] = vm_cpus 127 | end 128 | end 129 | 130 | config.vm.provider :virtualbox do |vb| 131 | vb.gui = vm_gui 132 | vb.memory = vm_memory 133 | vb.cpus = vm_cpus 134 | vb.customize ["modifyvm", :id, "--cpuexecutioncap", "#{$vb_cpuexecutioncap}"] 135 | vb.customize ['modifyvm', :id, '--natdnshostresolver1', 'off'] 136 | vb.customize ['modifyvm', :id, '--natdnsproxy1', 'off'] 137 | end 138 | 139 | 140 | # Uncomment below to enable NFS for sharing the host machine into the coreos-vagrant VM. 141 | #config.vm.synced_folder ".", "/home/core/share", id: "core", :nfs => true, :mount_options => ['nolock,vers=3,udp'] 142 | $shared_folders.each_with_index do |(host_folder, guest_folder), index| 143 | config.vm.synced_folder host_folder.to_s, guest_folder.to_s, id: "core-share%02d" % index, nfs: true, mount_options: ['nolock,vers=3,udp'] 144 | end 145 | 146 | if $share_home 147 | config.vm.synced_folder ENV['HOME'], ENV['HOME'], id: "home", :nfs => true, :mount_options => ['nolock,vers=3,udp'] 148 | end 149 | 150 | if File.exist?(CLOUD_CONFIG_PATH) 151 | config.vm.provision :file, :source => "#{CLOUD_CONFIG_PATH}", :destination => "/tmp/vagrantfile-user-data" 152 | config.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true 153 | end 154 | config.vm.provision "file", source: "~/.ssh/id_rsa.pub", destination: "/home/core/.ssh/me.pub" 155 | config.vm.provision "shell", inline: "cat /home/core/.ssh/me.pub >> /home/core/.ssh/authorized_keys" 156 | #config.vm.provision "shell", inline: "sed -i '/10.0.2.3/d' /etc/resolv.conf" 157 | 158 | end 159 | end 160 | end 161 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | callback_whitelist = profile_tasks 4 | ansible_python_interpreter = /home/core/pypy/bin/python 5 | 6 | -------------------------------------------------------------------------------- /bootstrap_coreos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reboot server into rescue image 3 | hosts: coreos 4 | connection: local 5 | gather_facts: no 6 | vars: 7 | ansible_python_interpreter: /usr/bin/python 8 | roles: 9 | - role: boot-rescue 10 | 11 | - name: install coreos 12 | hosts: coreos 13 | user: root 14 | gather_facts: no 15 | 16 | roles: 17 | #- role: etcd-certificates 18 | - role: etcd-certs-cfssl 19 | - role: extra-cas 20 | - { role: install-coreos, ansible_python_interpreter: /usr/bin/python } 21 | - role: kubernetes-certificates 22 | - role: ceph-on-kubernetes-config 23 | - { role: ansible-coreos-bootstrap, ansible_ssh_user: core } 24 | - { role: cloud-config, clear_etcd_data: true, ansible_ssh_user: core } 25 | - { role: unsafe_reboot, unsafe_reboot_dealay: 1, ansible_ssh_user: core } 26 | 27 | - name: configure install and configure kubectl and install dns adonn 28 | hosts: kubernetes 29 | gather_facts: no 30 | vars: 31 | ansible_python_interpreter: /usr/bin/python 32 | pre_tasks: 33 | - name: wait for kubernetes master to come up 34 | delegate_to: localhost 35 | connection: local 36 | when: inventory_hostname in groups['kubernetes-master'] 37 | wait_for: 38 | host: "{{ inventory_hostname }}" 39 | port: 6443 40 | timeout: 900 41 | roles: 42 | - role: kubectl 43 | - role: kubectl-config 44 | - role: k8s-kubesystem-namespace 45 | - { role: k8s-dns-addon } 46 | - { role: ceph-on-kubernetes-resources, ansible_ssh_user: core, ansible_python_interpreter: /home/core/pypy/bin/python } 47 | - role: calico 48 | # - role: loadbalancer 49 | -------------------------------------------------------------------------------- /ceph-key.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import logging 3 | import os 4 | import uuid 5 | import struct 6 | import time 7 | import base64 8 | 9 | key = os.urandom(16) 10 | header = struct.pack( 11 | ' /srv/tinc/hosts/$host 481 | fi 482 | done 483 | #docker exec tinc /usr/sbin/tinc reload 484 | rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc reload 485 | - path: /etc/environment 486 | permissions: 0774 487 | owner: root 488 | content: | 489 | COREOS_PUBLIC_IPV4={{ inventory_hostname }} 490 | #COREOS_PRIVATE_IPV4=172.17.8.101 491 | ETCDCTL_CERT_FILE=/etc/ssl/etcd/key.crt 492 | ETCDCTL_CA_FILE=/etc/ssl/etcd/ca.crt 493 | ETCDCTL_PEERS="{% for host in groups['etcd-node'] %}https://{{host}}:2379{%if not loop.last %},{% endif %}{% endfor %}" 494 | ETCDCTL_KEY_FILE=/etc/ssl/etcd/key.key 495 | LOCKSMITHCTL_ETCD_CERTFILE=/etc/ssl/etcd/key.crt 496 | LOCKSMITHCTL_ETCD_CAFILE=/etc/ssl/etcd/ca.crt 497 | LOCKSMITHCTL_ENDPOINT=https://{{ inventory_hostname }}:4001 498 | LOCKSMITHCTL_ETCD_KEYFILE=/etc/ssl/etcd/key.key 499 | 500 | - path: /etc/flannel/options.env 501 | permissions: 0774 502 | owner: root 503 | content: | 504 | FLANNELD_ETCD_ENDPOINTS=https://{{ inventory_hostname }}:4001 505 | FLANNELD_ETCD_KEYFILE=/etc/ssl/etcd/key.key 506 | FLANNELD_ETCD_CERTFILE=/etc/ssl/etcd/key.crt 507 | FLANNELD_ETCD_CAFILE=/etc/ssl/etcd/ca.crt 508 | FLANNELD_IFACE={{ inventory_hostname }} 509 | 510 | - path: /srv/tinc_conf_updater.sh 511 | permissions: 0774 512 | owner: root 513 | content: | 514 | #!/bin/sh 515 | #export DOCKER_HOST=unix:///var/run/early-docker.sock 516 | . /etc/tinc-env 517 | host=${ETCD_WATCH_KEY/\/services\/tinc\//} 518 | #echo "host is $host" 519 | #echo "$ETCD_WATCH_KEY\" key was updated to \"$ETCD_WATCH_VALUE\" value by \"$ETCD_WATCH_ACTION\" action" 520 | if [ $TINC_HOSTNAME != $host ]; then 521 | if [ "$ETCD_WATCH_ACTION" = "set" ]; then 522 | echo "configuring new tinc host $host" 523 | current_value=""; 524 | if [ -f /srv/tinc/hosts/$host ]; then 525 | current_value="$( cat /srv/tinc/hosts/$host )" 526 | fi 527 | if [ "$ETCD_WATCH_VALUE" != "\"$current_value\"" ]; then 528 | rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc add ConnectTo = $host 529 | #etcdctl get /services/tinc/$host | sed -e 's/\"//g' > /srv/tinc/hosts/$host 530 | echo "$ETCD_WATCH_VALUE" | sed -e 's/\"//g' > /srv/tinc/hosts/$host 531 | rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc reload \ 532 | || rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc stop 533 | #docker exec tinc /usr/sbin/tinc reload 534 | echo "done" 535 | else 536 | echo "old value = new value; nothing to do" 537 | fi 538 | fi 539 | if [ "$ETCD_WATCH_ACTION" = "delete" ] || [ "$ETCD_WATCH_ACTION" = "expire" ]; then 540 | echo "removing tinc host $host" 541 | rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc del ConnectTo = $host 542 | rm -f /srv/tinc/hosts/$host 543 | rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc reload \ 544 | || rkt enter $(cat /srv/tinc/tinc.uuid ) /usr/sbin/tinc stop 545 | echo "done" 546 | fi 547 | fi 548 | - path: /etc/ssl/etcd/ca.crt 549 | permissions: 0644 550 | content: | 551 | {{ etcd_ca_certificate|indent(width=6) }} 552 | 553 | - path: /etc/ssl/etcd/key.crt 554 | permissions: 0644 555 | content: | 556 | {{ etcd_cert|indent(width=6) }} 557 | 558 | - path: /etc/ssl/etcd/key.key 559 | permissions: 0644 560 | content: | 561 | {{ etcd_key|indent(width=6) }} 562 | 563 | - path: /etc/ceph/ceph.conf 564 | content: {{ ceph_conf.content }} 565 | encoding: base64 566 | 567 | - path: /etc/ceph/ceph.client.admin.keyring 568 | content: {{ ceph_admin_keyring.content }} 569 | encoding: base64 570 | 571 | - path: /etc/ceph/ceph.mon.keyring 572 | content: {{ ceph_mon_keyring.content }} 573 | encoding: base64 574 | 575 | - path: /etc/kube_apiserver_haproxy.cfg 576 | content: | 577 | global 578 | daemon 579 | maxconn 256 580 | defaults 581 | mode tcp 582 | default-server inter 1s fall 2 583 | timeout connect 5000ms 584 | timeout client 50000ms 585 | timeout server 50000ms 586 | backend apiserver_backend 587 | mode tcp 588 | balance source 589 | hash-type consistent 590 | {% for apiserver in groups['kubernetes-master'] %} 591 | server kube{{ loop.index }} {{ apiserver }}:6443 check 592 | {% endfor %} 593 | frontend apiserver_frontend 594 | mode tcp 595 | bind *:8080 596 | default_backend apiserver_backend 597 | 598 | coreos: 599 | #etcd: 600 | # generate a new token for each unique cluster from https://discovery.etcd.io/new 601 | # WARNING: replace each time you 'vagrant destroy' 602 | #discovery: https://discovery.etcd.io/4dbce9b90646e13c17bd298cffc0ed99 603 | #addr: {{ inventory_hostname }}:4001 604 | #peer-addr: {{ inventory_hostname }}:7001 605 | update: 606 | reboot-strategy: off 607 | etcd2: 608 | # generate a new token for each unique cluster from https://discovery.etcd.io/new?size=3 609 | #discovery: "{# etcd_discovery_url #}" 610 | name: {{ coreos_hostname }} 611 | initial-cluster: "{% for host in groups['etcd-node'] %}{{ hostvars[host]['coreos_hostname'] }}=https://{{host}}:2380{%if not loop.last %},{% endif %}{% endfor %}" 612 | #initial_cluster_state: NEW 613 | # multi-region and multi-cloud deployments need to use $public_ipv4 614 | {% if inventory_hostname in groups['etcd-node'] %} 615 | 616 | advertise-client-urls: "https://{{ inventory_hostname }}:2379" 617 | initial-advertise-peer-urls: "https://{{ inventory_hostname }}:2380" 618 | listen-peer-urls: "https://{{ inventory_hostname }}:2380,https://{{ inventory_hostname }}:7001" 619 | {% endif %} 620 | 621 | {% if inventory_hostname in groups['etcd-proxy'] %} 622 | 623 | proxy: on 624 | {% endif %} 625 | # listen on both the official ports and the legacy ports 626 | # legacy ports can be omitted if your application doesn't depend on them 627 | listen-client-urls: "https://0.0.0.0:2379,https://0.0.0.0:4001" 628 | 629 | fleet: 630 | public-ip: {{ inventory_hostname }} 631 | # metadata: role={# coreos_role #} 632 | etcd_cafile: /etc/ssl/etcd/ca.crt 633 | etcd_certfile: /etc/ssl/etcd/key.crt 634 | etcd_keyfile: /etc/ssl/etcd/key.key 635 | etcd_servers: https://{{ inventory_hostname }}:2379 636 | locksmith: 637 | endpoint: https://{{ inventory_hostname }}:2379 638 | etcd_cafile: /etc/ssl/etcd/ca.crt 639 | etcd_certfile: /etc/ssl/etcd/key.crt 640 | etcd_keyfile: /etc/ssl/etcd/key.key 641 | flannel: 642 | interface: {{ inventory_hostname }} 643 | units: 644 | - name: locksmithd.service 645 | command: stop 646 | - name: update-window.service 647 | runtime: true 648 | content: | 649 | [Unit] 650 | Description=Reboot if an update has been downloaded 651 | 652 | [Service] 653 | EnvironmentFile=/etc/environment 654 | ExecStart=/opt/bin/update-window.sh 655 | - name: update-window.timer 656 | runtime: true 657 | command: start 658 | content: | 659 | [Unit] 660 | Description=Reboot timer 661 | 662 | [Timer] 663 | OnCalendar=*:0/5 664 | 665 | - name: etcd2.service 666 | command: start 667 | drop-ins: 668 | {% if baremetal_provider != 'vagrant' %} 669 | - name: 50-network-wait.conf 670 | content: | 671 | [Unit] 672 | Requires=systemd-networkd-wait-online.service 673 | After=systemd-networkd-wait-online.service setup-subnet-routes.service 674 | Requires=setup-subnet-routes.service 675 | {% else %} 676 | - name: 50-network-wait.conf 677 | content: | 678 | [Unit] 679 | Requires=systemd-networkd-wait-online.service 680 | After=systemd-networkd-wait-online.service 681 | {% endif %} 682 | - name: 30-certificates.conf 683 | content: | 684 | [Service] 685 | # Client Env Vars 686 | Environment=ETCD_CA_FILE=/etc/ssl/etcd/ca.crt 687 | Environment=ETCD_CERT_FILE=/etc/ssl/etcd/key.crt 688 | Environment=ETCD_KEY_FILE=/etc/ssl/etcd/key.key 689 | # Peer Env Vars 690 | Environment=ETCD_PEER_CA_FILE=/etc/ssl/etcd/ca.crt 691 | Environment=ETCD_PEER_CERT_FILE=/etc/ssl/etcd/key.crt 692 | Environment=ETCD_PEER_KEY_FILE=/etc/ssl/etcd/key.key 693 | 694 | - name: update_ca_certificates.service 695 | command: start 696 | content: | 697 | [Unit] 698 | Before=docker.service 699 | [Service] 700 | ExecStart=/usr/sbin/update-ca-certificates 701 | RemainAfterExit=yes 702 | Type=oneshot 703 | 704 | 705 | - name: docker.service 706 | command: start 707 | drop-ins: 708 | - name: 50-wait-for-flannel.conf 709 | content: | 710 | [Unit] 711 | Requires=flanneld.service 712 | Wants=flanneld.service 713 | After=flanneld.service 714 | [Service] 715 | EnvironmentFile=/run/flannel/flannel_docker_opts.env 716 | {% if baremetal_provider == 'vagrant' %} 717 | - name: 00-wired.network 718 | runtime: true 719 | content: | 720 | [Match] 721 | Name=eth1 722 | [Network] 723 | DHCP=ipv4 724 | DNS=127.0.0.1 725 | DNS=8.8.8.8 726 | Domains= 727 | {% else %} 728 | - name: 00-wired.network 729 | runtime: true 730 | content: | 731 | [Match] 732 | Name=en* 733 | [Network] 734 | DHCP=ipv4 735 | DNS=127.0.0.1 736 | Domains= 737 | {% endif %} 738 | {% if hetzner_failover_ips is defined %} 739 | {% for ip in hetzner_failover_ips %} 740 | Address={{ ip }} 741 | {% endfor %} 742 | {% endif %} 743 | 744 | {% if baremetal_provider == 'vagrant' %} 745 | - name: vagrant_dns.service 746 | command: start 747 | content: | 748 | [Unit] 749 | Description=Setup Network Environment 750 | Requires=network-online.target 751 | After=network-online.target 752 | [Service] 753 | ExecStart=/usr/bin/sed -i '/10.0.2.3/d' /etc/resolv.conf 754 | RemainAfterExit=yes 755 | Type=oneshot 756 | {% endif %} 757 | 758 | #To use etcd2, comment out the above service and uncomment these 759 | # Note: this requires a release that contains etcd2 760 | #- name: etcd2.service 761 | # command: start 762 | {% if inventory_hostname in groups['kubernetes-master'] %} 763 | # systemd services master only 764 | - name: kube-apiserver.service 765 | command: start 766 | content: | 767 | [Unit] 768 | Description=Kubernetes API Server 769 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 770 | Requires=setup-network-environment.service etcd2.service 771 | After=setup-network-environment.service etcd2.service kubernetes-networking.target 772 | Before=kube-kubelet.service 773 | [Service] 774 | Environment=KUBELET_VERSION={{ hyperkube_aci_tag }} 775 | Environment=KUBELET_ACI={{ hyperkube_aci }} 776 | Environment=EXEC=apiserver 777 | ExecStart=/opt/bin/hyperkube-wrapper \ 778 | #--service-account-key-file=/opt/bin/kube-serviceaccount.key \ 779 | --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem \ 780 | --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem \ 781 | --client-ca-file=/etc/kubernetes/ssl/ca.pem \ 782 | --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem \ 783 | --service-account-lookup=false \ 784 | --admission-control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \ 785 | --runtime-config=api/v1 \ 786 | --allow-privileged=true \ 787 | --insecure-bind-address=127.0.0.1 \ 788 | --bind-address={{ inventory_hostname }} \ 789 | --insecure-port=8080 \ 790 | #--etcd-config=/etc/etcd-client.config.json \ 791 | --etcd-cafile=/etc/ssl/etcd/ca.crt \ 792 | --etcd-certfile=/etc/ssl/etcd/key.crt \ 793 | --etcd-keyfile=/etc/ssl/etcd/key.key \ 794 | --etcd-servers="{% for host in groups['etcd-node'] %}https://{{host}}:2379{%if not loop.last %},{% endif %}{% endfor %}" \ 795 | --kubelet-https=true \ 796 | --secure-port=6443 \ 797 | --runtime-config=extensions/v1beta1/daemonsets=true \ 798 | --service-cluster-ip-range={{ k8s_service_ip_range }} \ 799 | --anonymous-auth=false \ 800 | #--token-auth-file=/srv/kubernetes/known_tokens.csv \ 801 | #--basic-auth-file=/srv/kubernetes/basic_auth.csv \ 802 | #--etcd-servers=http://127.0.0.1:2379 \ 803 | --logtostderr=true 804 | Restart=always 805 | RestartSec=10 806 | 807 | - name: kube-controller-manager.service 808 | command: start 809 | content: | 810 | [Unit] 811 | Description=Kubernetes Controller Manager 812 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 813 | Requires=kube-apiserver.service 814 | After=kube-apiserver.service 815 | Before=kube-kubelet.service 816 | [Service] 817 | Environment=KUBELET_VERSION={{ hyperkube_aci_tag }} 818 | Environment=KUBELET_ACI={{ hyperkube_aci }} 819 | Environment=EXEC=controller-manager 820 | ExecStart=/opt/bin/hyperkube-wrapper \ 821 | --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem \ 822 | --root-ca-file=/etc/kubernetes/ssl/ca.pem \ 823 | --master=127.0.0.1:8080 \ 824 | --leader-elect=true \ 825 | --leader-elect-lease-duration=15s \ 826 | --leader-elect-renew-deadline=10s \ 827 | --leader-elect-retry-period=2s \ 828 | --logtostderr=true 829 | Restart=always 830 | RestartSec=10 831 | - name: kube-scheduler.service 832 | command: start 833 | content: | 834 | [Unit] 835 | Description=Kubernetes Scheduler 836 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 837 | Requires=kube-apiserver.service 838 | After=kube-apiserver.service 839 | Before=kube-kubelet.service 840 | [Service] 841 | Environment=KUBELET_VERSION={{ hyperkube_aci_tag }} 842 | Environment=KUBELET_ACI={{ hyperkube_aci }} 843 | Environment=EXEC=scheduler 844 | ExecStart=/opt/bin/hyperkube-wrapper \ 845 | --master=127.0.0.1:8080 \ 846 | --leader-elect=true \ 847 | --leader-elect-lease-duration=15s \ 848 | --leader-elect-renew-deadline=10s \ 849 | --leader-elect-retry-period=2s 850 | Restart=always 851 | RestartSec=10 852 | 853 | {% endif %} 854 | {% if inventory_hostname in groups['kubernetes-master'] or inventory_hostname in groups['kubernetes-node'] %} 855 | # kubernetes network service 856 | - name: kubernetes-networking.target 857 | command: start 858 | content: | 859 | [Unit] 860 | Description=services required for proper container networking 861 | - name: setup-network-environment.service 862 | command: start 863 | content: | 864 | [Unit] 865 | Description=Setup Network Environment 866 | Documentation=https://github.com/kelseyhightower/setup-network-environment 867 | Requires=network-online.target 868 | After=network-online.target 869 | [Service] 870 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 871 | ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment 872 | ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment 873 | ExecStart=/opt/bin/setup-network-environment 874 | RemainAfterExit=yes 875 | Type=oneshot 876 | {% if baremetal_provider != 'vagrant' %} 877 | - name: setup-subnet-routes.service 878 | command: start 879 | content: | 880 | [Unit] 881 | Description=Setup Hetzner Subnet Routes 882 | Requires=network-online.target 883 | After=network-online.target 884 | Before=etcd2.service 885 | [Service] 886 | TimeoutStartSec=10m 887 | ExecStartPre=/usr/bin/rkt fetch --insecure-options=image docker://quay.io/cornelius/hetner-netconf 888 | ExecStart=/usr/bin/rkt run --trust-keys-from-https --net=host quay.io/cornelius/hetner-netconf --caps-retain=CAP_NET_ADMIN 889 | RemainAfterExit=yes 890 | Type=oneshot 891 | {% endif %} 892 | - name: kube-proxy.service 893 | enable: true 894 | command: start 895 | content: | 896 | [Unit] 897 | Description=Kubernetes Proxy 898 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 899 | Requires=docker.service 900 | After=docker.service 901 | Before=kubernetes-networking.target 902 | 903 | [Install] 904 | WantedBy=kubernetes-networking.target 905 | 906 | [Service] 907 | TimeoutStartSec=10m 908 | Environment=KUBELET_VERSION={{ hyperkube_aci_tag }} 909 | Environment=KUBELET_ACI={{ hyperkube_aci }} 910 | ExecStartPre=/usr/bin/docker pull {{ hyperkube_aci }}:{{ hyperkube_aci_tag }} 911 | ExecStartPre=-/usr/bin/docker rm -f kube-proxy 912 | ExecStart=/usr/bin/docker run \ 913 | -v /etc/kubernetes/:/etc/kubernetes/ \ 914 | --name=kube-proxy \ 915 | --net=host --privileged=true \ 916 | {{ hyperkube_aci }}:{{ hyperkube_aci_tag }} \ 917 | /hyperkube proxy \ 918 | --master=https://localhost:8888 \ 919 | --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \ 920 | --logtostderr=true \ 921 | --proxy-mode=iptables 922 | ExecStop=/usr/bin/docker rm -f kube-proxy 923 | Restart=always 924 | RestartSec=10 925 | 926 | - name: kube-kubelet.service 927 | command: start 928 | content: | 929 | [Unit] 930 | After=kubernetes-networking.target docker.service 931 | Requires=kubernetes-networking.target docker.service 932 | 933 | [Service] 934 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests 935 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers 936 | ExecStartPre=/usr/bin/docker pull ceph/base 937 | TimeoutStopSec=900s 938 | TimeoutStartSec=10m 939 | 940 | Environment=KUBELET_VERSION={{ hyperkube_aci_tag }} 941 | Environment=KUBELET_ACI={{ hyperkube_aci }} 942 | Environment="RKT_OPTS=--volume=ceph,kind=host,source=/etc/ceph/ --mount volume=ceph,target=/etc/ceph/ --volume=modules,kind=host,source=/lib/modules/ --mount volume=modules,target=/lib/modules/ --volume resolv-conf,kind=host,source=/etc/resolv.conf --mount volume=resolv-conf,target=/etc/resolv.conf --volume var-log-containers,kind=host,source=/var/log/containers/ --mount volume=var-log-containers,target=/var/log/containers/" 943 | #--volume sys,kind=host,source=/sys --mount volume=sys,target=/sys --volume dev,kind=host,source=/dev --mount volume=dev,target=/dev" 944 | ExecStart=/usr/lib/coreos/kubelet-wrapper \ 945 | --api-servers=https://127.0.0.1:8888 \ 946 | --allow-privileged=true \ 947 | --pod-manifest-path=/etc/kubernetes/manifests \ 948 | --cluster-dns={{ k8s_dns_service_ip }} \ 949 | --cluster-domain={{ k8s_dns_domain }} \ 950 | --hostname-override={{ inventory_hostname }} \ 951 | --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \ 952 | --tls-cert-file=/etc/kubernetes/ssl/worker.pem \ 953 | --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem \ 954 | --register-node=true \ 955 | --register-schedulable=true 956 | ExecStartPost=-/opt/bin/uncordon-node 957 | ExecStop=/opt/bin/drain-node 958 | 959 | 960 | Restart=always 961 | RestartSec=10 962 | 963 | [Install] 964 | WantedBy=multi-user.target 965 | - name: kube-apiserver-haproxy.service 966 | enable: true 967 | command: start 968 | content: | 969 | [Unit] 970 | Description=Ha Proxy for kubernetes api server 971 | After=docker.service 972 | Before=kubernetes-networking.target 973 | 974 | [Service] 975 | TimeoutStartSec=5m 976 | ExecStartPre=-/usr/bin/docker kill haproxy 977 | ExecStartPre=-/usr/bin/docker rm -f haproxy 978 | ExecStartPre=/usr/bin/docker pull haproxy:alpine 979 | ExecStart=/usr/bin/docker run --rm \ 980 | --name haproxy \ 981 | -v /etc/kube_apiserver_haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \ 982 | -p 127.0.0.1:8888:8080 \ 983 | haproxy:alpine 984 | ExecStop=-/usr/bin/docker stop haproxy 985 | ExecStop=-/usr/bin/docker rm haproxy 986 | Restart=always 987 | RestartSec=10 988 | 989 | [Install] 990 | WantedBy=kubernetes-networking.target 991 | 992 | {% endif %} 993 | 994 | - name: tinc-env.service 995 | enable: true 996 | command: start 997 | content: | 998 | [Unit] 999 | Description=Tinc Service 1000 | After=etcd.service etcd2.service flanneld.service 1001 | Before=fleet.service 1002 | 1003 | [Service] 1004 | Type=oneshot 1005 | ExecStart=/bin/sh -c "echo \"TINC_HOSTNAME=`hostname | sed -e 's/-/_/g'`\" > /etc/tinc-env" 1006 | - name: flannel-wait.service 1007 | command: start 1008 | enable: true 1009 | content: | 1010 | 1011 | [Unit] 1012 | Description=Wait For Flannel 1013 | Requires=flanneld.service 1014 | After=etcd.service etcd2.service flanneld.service 1015 | 1016 | [Service] 1017 | Type=oneshot 1018 | ExecStart=/bin/sh -c "echo \"TINC_HOSTNAME=`hostname | sed -e 's/-/_/g'`\" > /etc/tinc-env" 1019 | ExecStartPre=/bin/sh -c "while [ ! -f /run/flannel/subnet.env ] ; do sleep 1; done" 1020 | 1021 | - name: etcd-waiter.service 1022 | command: start 1023 | content: | 1024 | [Unit] 1025 | Description=etcd waiter 1026 | Wants=network-online.target 1027 | Wants=etcd2.service 1028 | After=etcd2.service 1029 | After=network-online.target 1030 | Before=flanneld.service 1031 | Before=setup-network-environment.service 1032 | 1033 | [Service] 1034 | EnvironmentFile=/etc/environment 1035 | ExecStartPre=/usr/bin/chmod +x /opt/bin/waiter.sh 1036 | ExecStart=/usr/bin/bash /opt/bin/waiter.sh 1037 | RemainAfterExit=true 1038 | Type=oneshot 1039 | 1040 | - name: tinc-conf.service 1041 | enable: true 1042 | command: start 1043 | content: | 1044 | [Unit] 1045 | Description=Tinc Configuration Service 1046 | After=etcd.service etcd2.service flanneld.service 1047 | Before=fleet.service 1048 | 1049 | [Service] 1050 | Type=oneshot 1051 | ExecStart=/bin/sh -c "echo \"TINC_HOSTNAME=`hostname | sed -e 's/-/_/g'`\" > /etc/tinc-env" 1052 | - name: dnsmask.service 1053 | command: start 1054 | enable: true 1055 | content: | 1056 | [Unit] 1057 | Description=dnsmask service 1058 | Requires=docker.service 1059 | Before=kubernetes-networking.target 1060 | 1061 | [Install] 1062 | WantedBy=kubernetes-networking.target 1063 | 1064 | 1065 | [Service] 1066 | ExecStartPre=/usr/bin/docker pull quay.io/coreos/dnsmasq 1067 | ExecStartPre=-/usr/bin/docker rm -f dnsmask 1068 | ExecStart=/usr/bin/docker run --name dnsmask --net=host --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q \ 1069 | --listen-address=127.0.0.1 \ 1070 | --server=/cluster.local/{{ k8s_dns_service_ip }} \ 1071 | --rev-server={{ k8s_service_ip_range }},{{ k8s_dns_service_ip }} \ 1072 | --rev-server=10.1.0.0/16,{{ k8s_dns_service_ip }} \ 1073 | --server=8.8.8.8 1074 | 1075 | #ExecStop=/usr/bin/sleep 50 1076 | ExecStopPost=/usr/bin/docker rm -f dnsmask 1077 | Restart=always 1078 | RestartSec=10 1079 | TimeoutStartSec=5m 1080 | 1081 | 1082 | - name: tinc.service 1083 | command: start 1084 | enable: true 1085 | content: | 1086 | [Unit] 1087 | Description=Tinc VPN Service 1088 | Requires=flannel-wait.service 1089 | After=flanneld.service tinc-env.service flannel-wait.service 1090 | Before=kubernetes-networking.target tinc-config-updater.service tinc-sidekick.service docker-bridge.service 1091 | Wants=tinc-config-updater.service tinc-sidekick.service docker-bridge.service 1092 | 1093 | [Install] 1094 | WantedBy=kubernetes-networking.target 1095 | 1096 | [Service] 1097 | #Environment="DOCKER_HOST=unix:///var/run/early-docker.sock" 1098 | Restart=always 1099 | 1100 | EnvironmentFile=/etc/tinc-env 1101 | EnvironmentFile=/etc/environment 1102 | 1103 | 1104 | #ExecStartPre=/usr/bin/docker pull jenserat/tinc 1105 | ExecStartPre=/usr/bin/rkt --insecure-options=image fetch docker://jenserat/tinc 1106 | ExecStartPre=/usr/bin/rm -rf /srv/tinc 1107 | ExecStartPre=/usr/bin/mkdir -p /srv/tinc 1108 | ExecStartPre=/bin/sh -c "/usr/bin/rkt run --volume tinc,kind=host,source=/srv/tinc,readOnly=false --mount volume=tinc,target=/etc/tinc docker://jenserat/tinc --exec=/usr/sbin/tinc -- init $TINC_HOSTNAME" 1109 | ExecStartPre=/bin/sh -c "/usr/bin/rkt run --volume tinc,kind=host,source=/srv/tinc,readOnly=false --mount volume=tinc,target=/etc/tinc docker://jenserat/tinc --exec=/usr/sbin/tinc -- add Address = $COREOS_PUBLIC_IPV4" 1110 | TimeoutStartSec=5m 1111 | EnvironmentFile=/run/flannel/subnet.env 1112 | ExecStartPre=/bin/sh -c "/usr/bin/rkt run --volume tinc,kind=host,source=/srv/tinc,readOnly=false --mount volume=tinc,target=/etc/tinc docker://jenserat/tinc --exec=/usr/sbin/tinc -- add Subnet = `echo $FLANNEL_SUBNET | sed -e 's/1\\/24/0\\/24/'`" 1113 | ExecStartPre=/bin/sh -c "/usr/bin/rkt run --volume tinc,kind=host,source=/srv/tinc,readOnly=false --mount volume=tinc,target=/etc/tinc docker://jenserat/tinc --exec=/usr/sbin/tinc -- add Mode = switch" 1114 | ExecStartPre=/bin/sh -c "/usr/bin/rkt run --volume tinc,kind=host,source=/srv/tinc,readOnly=false --mount volume=tinc,target=/etc/tinc docker://jenserat/tinc --exec=/usr/sbin/tinc -- add DeviceType = tap" 1115 | #ExecStartPre=-/usr/bin/docker rm -f tinc 1116 | ExecStartPre=-/usr/bin/rkt stop --uuid-file=/srv/tinc/tinc.uuid 1117 | ExecStart=/usr/bin/rkt run --uuid-file-save=/srv/tinc/tinc.uuid --volume tinc,kind=host,source=/srv/tinc,readOnly=false --mount volume=tinc,target=/etc/tinc \ 1118 | --volume tun,kind=host,source=/dev/net/tun,readOnly=false --mount volume=tun,target=/dev/net/tun \ 1119 | --insecure-options=image docker://jenserat/tinc \ 1120 | --net=host \ 1121 | --caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID --exec=/usr/sbin/tinc -- start -D 1122 | #ExecStartPre=/usr/bin/docker run --name tinc -d --volume /srv/tinc:/etc/tinc --net=host --device=/dev/net/tun --cap-add NET_ADMIN jenserat/tinc start -D 1123 | 1124 | 1125 | #ExecStop=/usr/bin/docker rm -f tinc 1126 | #ExecStartPost=/bin/sh -c "while ! ifconfig -s | grep -q tap0 ; do sleep 1; done" 1127 | #ExecStartPost=/bin/sh -c "while ! ifconfig -s | grep -q docker0 ; do sleep 1; done" 1128 | #ExecStartPost=-/bin/sh -c "route add -net 10.1.0.0 netmask 255.255.0.0 dev docker0" 1129 | #ExecStartPost=/bin/sh -c "brctl addif docker0 tap0" 1130 | #ExecStartPost=/bin/sh -c "while ! ifconfig -s | grep -q docker0 ; do sleep 1; done" 1131 | #ExecStartPost=-/usr/sbin/brctl addif docker0 tap0 1132 | ExecStop=-/usr/bin/rkt stop --uuid-file=/srv/tinc/tinc.uuid 1133 | 1134 | 1135 | 1136 | - name: tinc-sidekick.service 1137 | command: start 1138 | enable: true 1139 | content: | 1140 | [Unit] 1141 | Description=Tinc VPN Service 1142 | BindsTo=tinc.service 1143 | After=tinc.service 1144 | Before=kubernetes-networking.target tinc-config-updater.service 1145 | 1146 | [Service] 1147 | Restart=always 1148 | EnvironmentFile=/etc/tinc-env 1149 | EnvironmentFile=/etc/environment 1150 | ExecStart=/bin/sh -c "while true; do etcdctl set /services/tinc/$TINC_HOSTNAME \"\\\"` cat /srv/tinc/hosts/$TINC_HOSTNAME `\"\\\" --ttl 60;sleep 45;done" 1151 | ExecStopPost=/bin/sh -c "etcdctl rm /services/tinc/$TINC_HOSTNAME" 1152 | 1153 | 1154 | 1155 | - name: tinc-config-updater.service 1156 | command: start 1157 | enable: true 1158 | content: | 1159 | [Unit] 1160 | Description=Countinously update tinc configuration after ectd changes 1161 | After=tinc.service tinc-sidekick.service 1162 | Requires=tinc-sidekick.service 1163 | Before=kubernetes-networking.target 1164 | BindsTo=tinc.service 1165 | 1166 | [Install] 1167 | WantedBy=kubernetes-networking.target 1168 | 1169 | [Service] 1170 | Restart=always 1171 | RestartSec=1 1172 | EnvironmentFile=/etc/environment 1173 | ExecStartPre=/srv/tinc_initial_config.sh 1174 | ExecStart=/usr/bin/etcdctl exec-watch --recursive /services/tinc -- /srv/tinc_conf_updater.sh 1175 | 1176 | 1177 | - name: flanneld.service 1178 | command: start 1179 | enable: true 1180 | drop-ins: 1181 | - name: 50-network-config.conf 1182 | content: | 1183 | [Unit] 1184 | Requires=etcd2.service etcd-waiter.service 1185 | After=etcd2.service etcd-waiter.service 1186 | Before=docker.service 1187 | [Service] 1188 | EnvironmentFile=/etc/environment 1189 | ExecStartPre=/usr/bin/etcdctl cluster-health 1190 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "10.1.0.0/16", "Backend": { "Type": "alloc"} }' 1191 | - name: 40-symlink.conf 1192 | content: | 1193 | [Service] 1194 | ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env 1195 | 1196 | 1197 | - name: docker-bridge.service 1198 | command: start 1199 | enable: true 1200 | content: | 1201 | [Unit] 1202 | Description=Configure Docker Bridge 1203 | Requires=tinc.service 1204 | After=tinc.service 1205 | BindsTo=tinc.service 1206 | #After=docker.socket 1207 | [Service] 1208 | RemainAfterExit=true 1209 | Restart=always 1210 | #ExecStartPre=-/bin/sh -c "route del -net 10.1.0.0 netmask 255.255.0.0 dev tap0" 1211 | ExecStartPre=/bin/sh -c "while ! ifconfig -s | grep -q tap0 ; do sleep 1; done" 1212 | ExecStartPre=/bin/sh -c "while ! ifconfig -s | grep -q docker0 ; do sleep 1; done" 1213 | ExecStartPre=-/bin/sh -c "route add -net 10.1.0.0 netmask 255.255.0.0 dev docker0" 1214 | ExecStart=/bin/sh -c "brctl addif docker0 tap0" 1215 | 1216 | - name: fleet.service 1217 | command: start 1218 | - name: clenup-at-shutdown.service 1219 | enable: true 1220 | command: start 1221 | content: | 1222 | [Unit] 1223 | Description=Cleanup directories populated by cloudconfig to avoid configuration drift 1224 | Before=network.target 1225 | 1226 | [Install] 1227 | WantedBy=kubernetes-networking.target 1228 | RequiredBy=kube-kubelet.service 1229 | 1230 | 1231 | [Service] 1232 | RemainAfterExit=yes 1233 | ExecStart=/bin/true 1234 | ExecStopPost=/opt/bin/cleanup 1235 | Type=oneshot 1236 | -------------------------------------------------------------------------------- /cloud_config_install.tmpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | hostname: {{ coreos_hostname }} 4 | ssh_authorized_keys: 5 | - "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" 6 | {% for key in coreos_public_keys %} 7 | - {{ key }} 8 | {% endfor %} 9 | 10 | 11 | 12 | coreos: 13 | update: 14 | reboot-strategy: reboot 15 | -------------------------------------------------------------------------------- /config.rb: -------------------------------------------------------------------------------- 1 | $new_discovery_url='https://discovery.etcd.io/new' 2 | 3 | # To automatically replace the discovery token on 'vagrant up', uncomment 4 | # the lines below: 5 | # 6 | #if File.exists?('user-data') && ARGV[0].eql?('up') 7 | # require 'open-uri' 8 | # require 'yaml' 9 | # 10 | # token = open($new_discovery_url).read 11 | # 12 | # data = YAML.load(IO.readlines('user-data')[1..-1].join) 13 | # data['coreos']['etcd']['discovery'] = token 14 | # 15 | # yaml = YAML.dump(data) 16 | # File.open('user-data', 'w') { |file| file.write("#cloud-config\n\n#{yaml}") } 17 | #end 18 | # 19 | 20 | # 21 | # coreos-vagrant is configured through a series of configuration 22 | # options (global ruby variables) which are detailed below. To modify 23 | # these options, first copy this file to "config.rb". Then simply 24 | # uncomment the necessary lines, leaving the $, and replace everything 25 | # after the equals sign.. 26 | 27 | # Size of the CoreOS cluster created by Vagrant 28 | $num_instances=3 29 | 30 | # Change basename of the VM 31 | # The default value is "core", which results in VMs named starting with 32 | # "core-01" through to "core-${num_instances}". 33 | #$instance_name_prefix="core" 34 | 35 | # Official CoreOS channel from which updates should be downloaded 36 | #$update_channel='alpha' 37 | $update_channel='stable' 38 | 39 | # Log the serial consoles of CoreOS VMs to log/ 40 | # Enable by setting value to true, disable with false 41 | # WARNING: Serial logging is known to result in extremely high CPU usage with 42 | # VirtualBox, so should only be used in debugging situations 43 | #$enable_serial_logging=false 44 | 45 | # Enable port forwarding of Docker TCP socket 46 | # Set to the TCP port you want exposed on the *host* machine, default is 2375 47 | # If 2375 is used, Vagrant will auto-increment (e.g. in the case of $num_instances > 1) 48 | # You can then use the docker tool locally by setting the following env var: 49 | # export DOCKER_HOST='tcp://127.0.0.1:2375' 50 | #$expose_docker_tcp=2375 51 | 52 | # Enable NFS sharing of your home directory ($HOME) to CoreOS 53 | # It will be mounted at the same path in the VM as on the host. 54 | # Example: /Users/foobar -> /Users/foobar 55 | #$share_home=false 56 | 57 | # Customize VMs 58 | #$vm_gui = false 59 | $vm_memory = 3072 60 | #$vm_cpus = 1 61 | 62 | # Share additional folders to the CoreOS VMs 63 | # For example, 64 | # $shared_folders = {'/path/on/host' => '/path/on/guest', '/home/foo/app' => '/app'} 65 | # or, to map host folders to guest folders of the same name, 66 | # $shared_folders = Hash[*['/home/foo/app1', '/home/foo/app2'].map{|d| [d, d]}.flatten] 67 | #$shared_folders = {} 68 | 69 | # Enable port forwarding from guest(s) to host machine, syntax is: { 80 => 8080 }, auto correction is enabled by default. 70 | #$forwarded_ports = {} 71 | -------------------------------------------------------------------------------- /docs/getting_started_hetzner.md: -------------------------------------------------------------------------------- 1 | # Getting started on Hetzner 2 | 3 | ## clone and init submodules 4 | 5 | git clone https://github.com/cornelius-keller/ansible-coroeos-kubernetes.gi 6 | cd ansible-coroeos-kubernetes/ 7 | git submodule init 8 | Submodule 'library' (https://github.com/cornelius-keller/ansible-ovh) registered for path 'library' 9 | Submodule 'roles/ansible-coreos-bootstrap' (https://github.com/sigma/ansible-coreos-bootstrap.git) registered for path 'roles/ansible-coreos-bootstrap' 10 | git submodule init 11 | Submodule 'library' (https://github.com/cornelius-keller/ansible-ovh) registered for path 'library' 12 | Submodule 'roles/ansible-coreos-bootstrap' (https://github.com/sigma/ansible-coreos-bootstrap.git) registered for path 'roles/ansible-coreos-bootstrap' 13 | git submodule update 14 | Cloning into 'library'... 15 | remote: Counting objects: 18, done. 16 | remote: Total 18 (delta 0), reused 0 (delta 0), pack-reused 18 17 | Unpacking objects: 100% (18/18), done. 18 | Checking connectivity... done. 19 | Submodule path 'library': checked out 'f87a217cef869a7c2ce69178441cb02299b321b2' 20 | Cloning into 'roles/ansible-coreos-bootstrap'... 21 | remote: Counting objects: 314, done. 22 | remote: Total 314 (delta 0), reused 0 (delta 0), pack-reused 313 23 | Receiving objects: 100% (314/314), 1.01 MiB | 1.03 MiB/s, done. 24 | Resolving deltas: 100% (131/131), done. 25 | Checking connectivity... done. 26 | Submodule path 'roles/ansible-coreos-bootstrap': checked out '8cd508c21868babdf32a7e0ec078df77cdf49611' 27 | 28 | ## upload ýour public key to Hetzner 29 | * Log into the the Hetzner robot. 30 | * Navigate to the server list in the left menu. 31 | ![hetzner server list](hetzner_server_list.png "Hetzner Server List") 32 | 33 | * chose key management 34 | ![hetzner key management](hetzner_key_management.png "Logo Key Managemet") 35 | * chooose new key, give it a name, and copy the output of 36 | 37 | `cat ~/.ssh/id_rsa.pub` 38 | 39 | into the form and save it. 40 | ![hetzner new key](hetzner_add_key.png) 41 | 42 | * copy the id of the key for use in the inventory. ![hetzer key list](hetzner_key_list.png) 43 | 44 | ## create an inventory 45 | ``` 46 | mkdir ../ansible-coreos-inventory  47 | cp inventory-hetzner.ini.sample the ../ansible-coreos-inventory/inventory-hetzner.ini 48 | ``` 49 | 50 | * change the ips of your servers to match your server ips. 51 | * copy the id of your public key you uploaded to hetzner into the inventory value `rescue_authorized_key` 52 | * create a robot account and copy the username and password into the inventory. 53 | ![hetzner robot account](hetzner_webservice_user.png) 54 | 55 | For Examble: 56 | 57 | hetzner_webservice_username=#ws+7FPjagF7 58 | hetzner_webservice_password= 59 | 60 | * create a uuid and key for ceph 61 | ``` 62 | # uuidgen 63 | 3867334c-2a6d-4468-b970-c878f0d36fee 64 | # python ceph-key.py 65 | AQCBK2FYAAAAABAAxcEwUSSPc7Zt1VJ9fjYH8A== 66 | ``` 67 | 68 | * add to the ceph fsid and key to the inventory 69 | 70 | ``` 71 | ceph_fsid=3867334c-2a6d-4468-b970-c878f0d36fee 72 | ceph_key=AQCBK2FYAAAAABAAxcEwUSSPc7Zt1VJ9fjYH8A== 73 | ``` 74 | 75 | * add the public keys to the inventory that should have accces to the cluster: 76 | 77 | `mkdir ../ansible-coreos-inventory/group_vars` 78 | * add the public keys that should have access to to the cluster to the file: `../ansible-coreos-inventory/group_vars../ansible-coreos-inventory/group_vars/all.yml` 79 | For example: 80 | 81 | ``` 82 | coreos_public_keys: 83 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqj5FfbBok7q8qtHqj+g5aut4eT6y78QLtC7igcGriZnbEHX9aUfpOgue+Bq1GcIXp9uSOzp+R5OrP0FzPTK9nDgm7R1wnp1zTFOF8LorcLH7ii/9p793O/bvcGNc6OaPGwzIA0naI9pumyIiArEbsnUJlWqGWchHmAm+3McN3QVxTQY6/+aW1Dt5dnC5rbvgB1lfOOhxfr19ED1zV1qgeFKkHptJ1llIkyNLyXNSiMYNuuC2pFn5F3w+Nfe+hRq8gRsJRnDuwcVibNcfR3egZ8sYyHhCWdR0HM1ZLfzW/ens37rahNEkCJrrZUYGbZ3sSDjkWoEOFk/rcdge+detV jck@jck" 84 | ``` 85 | Your inventory shuld now look like this: 86 | 87 | [etcd-node] 88 | 176.9.123.237 coreos_hostname=core01 89 | 176.9.122.117 coreos_hostname=core02 90 | 176.9.118.230 coreos_hostname=core03 91 | [etcd-proxy] 92 | 93 | 94 | [coreos:children] 95 | etcd-node 96 | etcd-proxy 97 | 98 | [kubernetes-master] 99 | 176.9.123.237 100 | 176.9.122.117 101 | [kubernetes-node] 102 | 176.9.118.230 103 | [kubernetes:children] 104 | kubernetes-master 105 | kubernetes-node 106 | [ceph-mon] 107 | 176.9.123.237 108 | 176.9.122.117 109 | 176.9.118.230 110 | [ceph-osd] 111 | 176.9.123.237 112 | 176.9.122.117 113 | 176.9.118.230 114 | 115 | [all:vars] 116 | #kube_master_dns_name=kubemaster.bettertest.de 117 | hyperkube_aci=quay.io/cornelius/hyperkube 118 | hyperkube_aci_tag=v1.4.4_coreos.0 119 | kube_master_ip=176.9.123.237 120 | kube_cluster_name=privat 121 | kubernetes_version=1.4.4 122 | k8s_service_ip_range=10.100.0.0/16 123 | k8s_service_ip=10.100.0.1 124 | k8s_dns_service_ip=10.100.0.10 125 | k8s_dns_domain=cluster.local 126 | hetzner_webservice_username=#ws+7FPjagF7 127 | hetzner_webservice_password=justforthedemo 128 | baremetal_provider=hetzner 129 | kubectl_checksum=1ffbcfdec9961093b5fb1eec0b65f37af71a825dafff733de90d6ed6db647729 130 | ansible_python_interpreter=/home/core/pypy/bin/python 131 | ceph_fsid=3867334c-2a6d-4468-b970-c878f0d36fee 132 | ceph_key=AQCBK2FYAAAAABAAxcEwUSSPc7Zt1VJ9fjYH8A== 133 | ceph_osd_dir=/home/core/data/ceph/osd 134 | ceph_osd_type=osd_disk 135 | ceph_osd_device=/dev/sdb 136 | rescue_authorized_key=21:74:2f:a4:08:8d:7c:83:bc:b4:3a:8a:5a:bd:3e:5f 137 | 138 | # bootsrap your coreos cluster 139 | 140 | # ansible-playbook -i ../ansible-coreos-inventory/inventory-hetzner.ini bootstrap_coreos.yml 141 | 142 | You should see an output like this: 143 | 144 | [![asciicast](https://asciinema.org/a/3gcyoj8ddki0i5f2t9fhpjfw6.png)](https://asciinema.org/a/3gcyoj8ddki0i5f2t9fhpjfw6) 145 | 146 | Check if the bootstrap was successfull: 147 | ``` 148 | # kubectl get nodes 149 | NAME STATUS AGE 150 | 176.9.118.230 Ready 9m 151 | 176.9.122.117 Ready 9m 152 | 176.9.123.237 Ready 10m 153 | ``` 154 | To ceck if the ceph cluster is healthy you can log into one of the nodes and issue `ceph -s` 155 | 156 | ssh core@176.9.118.230 157 | Last login: Mon Dec 26 17:07:36 UTC 2016 from 84.191.220.116 on pts/0 158 | CoreOS stable (1185.5.0) 159 | Update Strategy: No Reboots 160 | Failed Units: 1 161 | locksmithd.service 162 | core@core03 ~ $ ceph -s 163 | cluster 3867334c-2a6d-4468-b970-c878f0d36fee 164 | health HEALTH_WARN 165 | 80 pgs degraded 166 | 80 pgs stuck unclean 167 | 80 pgs undersized 168 | recovery 40/60 objects degraded (66.667%) 169 | monmap e1: 3 mons at {ceph-mon-j8h1q=10.1.43.6:6789/0,ceph-mon-pbv8y=10.1.3.3:6789/0,ceph-mon-uvril=10.1.56.3:6789/0} 170 | election epoch 6, quorum 0,1,2 ceph-mon-pbv8y,ceph-mon-j8h1q,ceph-mon-uvril 171 | fsmap e5: 1/1/1 up {0=mds-ceph-mds-2484866350-hu4o7=up:active} 172 | osdmap e15: 2 osds: 2 up, 2 in 173 | flags sortbitwise 174 | pgmap v32: 80 pgs, 3 pools, 2068 bytes data, 20 objects 175 | 78568 kB used, 5576 GB / 5576 GB avail 176 | 40/60 objects degraded (66.667%) 177 | 80 active+undersized+degraded 178 | 179 | If the cluster is unhealthy try again after a few minutes unil you see someting like this: 180 | 181 | 182 | core@core03 ~ $ ceph -s 183 | cluster 3867334c-2a6d-4468-b970-c878f0d36fee 184 | health HEALTH_OK 185 | monmap e1: 3 mons at {ceph-mon-j8h1q=10.1.43.6:6789/0,ceph-mon-pbv8y=10.1.3.3:6789/0,ceph-mon-uvril=10.1.56.3:6789/0} 186 | election epoch 6, quorum 0,1,2 ceph-mon-pbv8y,ceph-mon-j8h1q,ceph-mon-uvril 187 | fsmap e5: 1/1/1 up {0=mds-ceph-mds-2484866350-hu4o7=up:active} 188 | osdmap e20: 3 osds: 3 up, 3 in 189 | flags sortbitwise 190 | pgmap v50: 80 pgs, 3 pools, 2068 bytes data, 20 objects 191 | 116 MB used, 8364 GB / 8364 GB avail 192 | 80 active+clean 193 | 194 | 195 | Cluster check asciicast: 196 | 197 | [![asciicast](https://asciinema.org/a/0d08oqnx4ioxqf0vjecd1zbfl.png)](https://asciinema.org/a/0d08oqnx4ioxqf0vjecd1zbfl) 198 | -------------------------------------------------------------------------------- /docs/hetzner_add_key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cornelius-keller/ansible-coreos-kubernetes/10d1210bc24eb28fbcdd4705d4e105999a6112cc/docs/hetzner_add_key.png -------------------------------------------------------------------------------- /docs/hetzner_key_list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cornelius-keller/ansible-coreos-kubernetes/10d1210bc24eb28fbcdd4705d4e105999a6112cc/docs/hetzner_key_list.png -------------------------------------------------------------------------------- /docs/hetzner_key_management.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cornelius-keller/ansible-coreos-kubernetes/10d1210bc24eb28fbcdd4705d4e105999a6112cc/docs/hetzner_key_management.png -------------------------------------------------------------------------------- /docs/hetzner_server_list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cornelius-keller/ansible-coreos-kubernetes/10d1210bc24eb28fbcdd4705d4e105999a6112cc/docs/hetzner_server_list.png -------------------------------------------------------------------------------- /docs/hetzner_webservice_user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cornelius-keller/ansible-coreos-kubernetes/10d1210bc24eb28fbcdd4705d4e105999a6112cc/docs/hetzner_webservice_user.png -------------------------------------------------------------------------------- /inventory-hetzner.ini.sample: -------------------------------------------------------------------------------- 1 | [etcd-node] 2 | 176.9.123.237 coreos_hostname=core01 3 | 176.9.122.117 coreos_hostname=core02 4 | 176.9.118.230 coreos_hostname=core03 5 | [etcd-proxy] 6 | 7 | 8 | [coreos:children] 9 | etcd-node 10 | etcd-proxy 11 | 12 | [kubernetes-master] 13 | 176.9.123.237 14 | 176.9.122.117 15 | [kubernetes-node] 16 | 176.9.118.230 17 | [kubernetes:children] 18 | kubernetes-master 19 | kubernetes-node 20 | [ceph-mon] 21 | 176.9.123.237 22 | 176.9.122.117 23 | 176.9.118.230 24 | [ceph-osd] 25 | 176.9.123.237 26 | 176.9.122.117 27 | 176.9.118.230 28 | 29 | [ingres] 30 | 176.9.122.117 31 | 176.9.118.230 32 | 33 | [all:vars] 34 | #kube_master_dns_name=kubemaster.bettertest.de 35 | hyperkube_aci=quay.io/cornelius/hyperkube 36 | hyperkube_aci_tag=v1.5.1_coreos.0 37 | kube_master_ip=176.9.123.237 38 | kube_cluster_name=privat 39 | kubernetes_version=1.5.1 40 | k8s_service_ip_range=10.100.0.0/16 41 | k8s_service_ip=10.100.0.1 42 | k8s_dns_service_ip=10.100.0.10 43 | k8s_dns_domain=cluster.local 44 | hetzner_webservice_username= 45 | hetzner_webservice_password= 46 | baremetal_provider=hetzner 47 | kubectl_checksum=4d56b8fbec4a274a61893d244bfce532cadf313632a31a065a0edf7130066ac6 48 | ansible_python_interpreter=/home/core/pypy/bin/python 49 | ceph_fsid= # generate with uuidgen 50 | ceph_key= # generate with python ceph-key.py ( in this directory ) 51 | ceph_osd_dir=/home/core/data/ceph/osd 52 | ceph_osd_type=osd_disk 53 | ceph_osd_device=/dev/sdb 54 | rescue_authorized_key= 55 | ingres_failover_ip= 56 | -------------------------------------------------------------------------------- /inventory-vagrant.ini: -------------------------------------------------------------------------------- 1 | [etcd-node] 2 | 172.17.8.101 coreos_hostname=core-01 3 | 172.17.8.102 coreos_hostname=core-02 4 | 172.17.8.103 coreos_hostname=core-03 5 | [etcd-proxy] 6 | 7 | 8 | [coreos:children] 9 | etcd-node 10 | etcd-proxy 11 | 12 | [kubernetes-master] 13 | 172.17.8.101 14 | 172.17.8.102 15 | [kubernetes-node] 16 | 172.17.8.103 17 | [kubernetes:children] 18 | kubernetes-master 19 | kubernetes-node 20 | [ceph-mon] 21 | 172.17.8.101 22 | 172.17.8.102 23 | 172.17.8.103 24 | [ceph-osd] 25 | 172.17.8.101 26 | 172.17.8.102 27 | 172.17.8.103 28 | 29 | 30 | [ingres] 31 | 172.17.8.102 32 | 172.17.8.103 33 | 34 | [all:vars] 35 | #kube_master_dns_name=kubemaster.bettertest.de 36 | hyperkube_aci=quay.io/cornelius/hyperkube 37 | hyperkube_aci_tag=v1.5.1_coreos.0 38 | kube_master_ip=172.17.8.101 39 | kube_cluster_name=vagrant 40 | kubernetes_version=1.5.1 41 | k8s_service_ip_range=10.100.0.0/16 42 | k8s_service_ip=10.100.0.1 43 | k8s_dns_service_ip=10.100.0.10 44 | k8s_dns_domain=cluster.local 45 | hetzner_webservice_username= 46 | hetzner_webservice_password= 47 | baremetal_provider=vagrant 48 | kubectl_checksum=4d56b8fbec4a274a61893d244bfce532cadf313632a31a065a0edf7130066ac6 49 | ansible_python_interpreter=/home/core/pypy/bin/python 50 | ceph_fsid=5304a6e0-e9dd-47ef-a4e8-5830693805f7 51 | ceph_key=AQDVVedYAAAAABAAZRwf84Es7z+If1GEev+3Zw== 52 | ceph_osd_dir=/home/core/data/ceph/osd 53 | ceph_osd_type=osd_disk 54 | ceph_osd_device=/dev/sdb 55 | rescue_authorized_key= 56 | ingres_failover_ip= 57 | -------------------------------------------------------------------------------- /reset-etdnode.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reset etcd member 3 | hosts: "{{ new_etcd_node }}" 4 | 5 | tasks: 6 | 7 | - name: remove node from cluster 8 | shell: | 9 | etcdctl member remove $(etcdctl cluster-health | grep unreachable | grep {{ inventory_hostname }} | awk '{print $2 }') 10 | etcdctl member add {{ coreos_hostname }} https://{{ inventory_hostname }}:2380 11 | 12 | 13 | args: 14 | executable: /bin/bash 15 | sudo: yes 16 | delegate_to: "{{ existing_etcd_node }}" 17 | 18 | - name: change etcd startup env 19 | lineinfile: 20 | dest: /run/systemd/system/etcd2.service.d/20-cloudinit.conf 21 | line: Environment="ETCD_INITIAL_CLUSTER_STATE=existing" 22 | state: present 23 | sudo: yes 24 | - name: reload systemd 25 | command: systemctl daemon-reload 26 | sudo: yes 27 | 28 | - name: stop etcd2.service 29 | command: systemctl stop etcd2.service 30 | sudo: yes 31 | 32 | - name: remove old etcd2 data dir 33 | file: path=/var/lib/etcd2/member state=absent 34 | sudo: yes 35 | 36 | - name: restart etcd2.service 37 | command: systemctl start etcd2.service 38 | sudo: yes 39 | -------------------------------------------------------------------------------- /reset_ceph.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: remove ceph 3 | serial: 1 4 | hosts: localhost 5 | 6 | pre_tasks: 7 | - name: delete ceph resource 8 | command: kubectl delete namespace ceph 9 | delegate_to: 127.0.0.1 10 | run_once: true 11 | 12 | - name: wait til namespace ceph is deleted 13 | pause: minutes=5 14 | run_once: true 15 | 16 | - name: whipe and prepare disks 17 | hosts: coreos 18 | 19 | pre_tasks: 20 | - name: clear osd disk. 21 | command: dd if=/dev/zero of={{ ceph_osd_device }} bs=512 count=1 conv=notrunc 22 | when: ceph_osd_device is defined 23 | sudo: yes 24 | 25 | - name: create new partintion table on osd disk 26 | shell: | 27 | echo "g 28 | w" | fdisk {{ ceph_osd_device }} 29 | when: ceph_osd_device is defined 30 | sudo: yes 31 | register: fdisk_result 32 | ignore_errors: yes 33 | 34 | 35 | 36 | # roles: 37 | # - { role: unsafe_reboot, unsafe_reboot_dealay=300, when: fdisk_result|failed } 38 | 39 | - name: reinstall ceph 40 | hosts: coreos 41 | roles: 42 | - ceph-on-kubernetes-resources 43 | 44 | -------------------------------------------------------------------------------- /roles/boot-rescue/tasks/hetzner.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check rescue mode 3 | uri: 4 | url: https://robot-ws.your-server.de/boot/{{ inventory_hostname }}/rescue 5 | method: GET 6 | user: "{{ hetzner_webservice_username }}" 7 | password: "{{ hetzner_webservice_password }}" 8 | force_basic_auth: yes 9 | status_code: 200 10 | register: rescue 11 | 12 | - name: activate rescue mode 13 | when: rescue.json.rescue.active == false 14 | uri: 15 | url: https://robot-ws.your-server.de/boot/{{ inventory_hostname }}/rescue 16 | method: POST 17 | user: "{{ hetzner_webservice_username }}" 18 | password: "{{ hetzner_webservice_password }}" 19 | force_basic_auth: yes 20 | body: "os=linux&arch=64&authorized_key={{ rescue_authorized_key }}" 21 | status_code: 200 22 | HEADER_Content-Type: "application/x-www-form-urlencoded" 23 | register: activated 24 | 25 | #- debug: var=activated 26 | 27 | - name: Execute hardware reset 28 | uri: 29 | url: https://robot-ws.your-server.de/reset/{{ inventory_hostname }} 30 | method: POST 31 | user: "{{ hetzner_webservice_username }}" 32 | password: "{{ hetzner_webservice_password }}" 33 | force_basic_auth: yes 34 | body: "type=hw" 35 | status_code: 200 36 | HEADER_Content-Type: "application/x-www-form-urlencoded" 37 | register: reset 38 | 39 | - name: remove server from local known_hosts file 40 | local_action: shell ssh-keygen -R {{ inventory_hostname }} 41 | ignore_errors: true 42 | 43 | - name: waiting for server to go down 44 | local_action: 45 | module: wait_for 46 | host: "{{ inventory_hostname }}" 47 | port: 22 48 | delay: 1 49 | timeout: 120 50 | state: stopped 51 | 52 | - name: waiting for server to come back 53 | local_action: 54 | module: wait_for 55 | host: "{{ inventory_hostname }}" 56 | port: 22 57 | delay: 1 58 | timeout: 120 59 | -------------------------------------------------------------------------------- /roles/boot-rescue/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: ovh.yml 3 | when: baremetal_provider == 'ovh' 4 | 5 | - include: hetzner.yml 6 | when: baremetal_provider == 'hetzner' 7 | 8 | -------------------------------------------------------------------------------- /roles/boot-rescue/tasks/ovh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set rescue boot 3 | ovh: 4 | method: put 5 | endpoint: kimsufi-eu 6 | application_key: "{{ kimsufi_application_key }}" 7 | application_secret: "{{ kimsufi_application_secret }}" 8 | consumer_key: "{{ kimsufi_consumer_key }}" 9 | uri: /dedicated/server/{{ server_name }} 10 | args: 11 | bootId: 22 12 | 13 | - name: reboot 14 | ovh: 15 | method: post 16 | endpoint: kimsufi-eu 17 | application_key: "{{ kimsufi_application_key }}" 18 | application_secret: "{{ kimsufi_application_secret }}" 19 | consumer_key: "{{ kimsufi_consumer_key }}" 20 | uri: /dedicated/server/{{ server_name }}/reboot 21 | 22 | - name: remove server from local known_hosts file 23 | local_action: shell ssh-keygen -R {{ inventory_hostname }} 24 | ignore_errors: true 25 | 26 | 27 | - name: waiting for server to go down 28 | local_action: 29 | module: wait_for 30 | host={{ inventory_hostname }} 31 | port=22 32 | delay=1 33 | timeout=120 34 | state=absent 35 | - name: waiting for server to come back 36 | local_action: 37 | module: wait_for 38 | host={{ inventory_hostname }} 39 | port=22 40 | delay=1 41 | timeout=120 42 | search_regex=OpenSSH 43 | 44 | - name: set boot back to harddrive 45 | ovh: 46 | method: put 47 | endpoint: kimsufi-eu 48 | application_key: "{{ kimsufi_application_key }}" 49 | application_secret: "{{ kimsufi_application_secret }}" 50 | consumer_key: "{{ kimsufi_consumer_key }}" 51 | uri: /dedicated/server/{{ server_name }} 52 | args: 53 | bootId: 1 54 | -------------------------------------------------------------------------------- /roles/calico/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: install calicoctl 4 | get_url: 5 | url: https://github.com/projectcalico/calicoctl/releases/download/v1.1.1/calicoctl 6 | dest: "{{ lookup('env','HOME') }}/bin/calicoctl" 7 | mode: 0774 8 | 9 | 10 | 11 | - name: create callico dir 12 | file: 13 | path: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/' 14 | #path: /etc/kubernetes/manifests/ 15 | state: directory 16 | - name: template calico env 17 | template: 18 | src: env.tmpl 19 | dest: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/calico.env' 20 | - name: create callico dir 21 | file: 22 | path: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/manifests' 23 | #path: /etc/kubernetes/manifests/ 24 | state: directory 25 | 26 | - name: template calico resorce 27 | template: 28 | src: calico.yml.tmpl 29 | dest: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/calico.yml' 30 | #dest: /etc/kubernetes/manifests/calico.ymĺ 31 | - include: ../k8s-resource/tasks/main.yml 32 | vars: 33 | k8s_resource_kind: DaemonSet 34 | k8s_resource_name: calico-node 35 | k8s_resource_namespace: kube-system 36 | k8s_resource_file: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/calico.yml' 37 | 38 | - name: failsafe policy 39 | template: 40 | src: failsafe_policy.yml 41 | dest: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/manifests/failsafe_policy.yml' 42 | - name: failsafe policy 43 | template: 44 | src: cluster_policy.yml 45 | dest: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/manifests/cluster_policy.yml' 46 | - name: host endpoint 47 | template: 48 | src: host_endpoint.yml.tmpl 49 | dest: '{{ inventory_dir }}/{{ kube_cluster_name }}-calico/manifests/{{ coreos_hostname }}.yml' 50 | - name: read generated config into variable 51 | set_fact: 52 | calico_resource_file: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-calico/calico.yml') }}" 53 | - name: create resources 54 | shell: | 55 | cd {{ inventory_dir }}/{{ kube_cluster_name }}-calico/ 56 | . calico.env 57 | calicoctl apply -f {{ item }} 58 | args: 59 | executable: /bin/bash 60 | with_items: 61 | - manifests/failsafe_policy.yml 62 | - manifests/cluster_policy.yml 63 | - manifests/{{ coreos_hostname }}.yml 64 | connection: local 65 | delegate_to: localhost 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /roles/calico/tasks/templates/calico.yaml: -------------------------------------------------------------------------------- 1 | # This ConfigMap is used to configure a self-hosted Calico installation. 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: calico-config 6 | namespace: kube-system 7 | data: 8 | # Configure this with the location of your etcd cluster. 9 | etcd_endpoints: "http://127.0.0.1:2379" 10 | 11 | # Configure the Calico backend to use. 12 | calico_backend: "bird" 13 | 14 | # The CNI network configuration to install on each node. 15 | cni_network_config: |- 16 | { 17 | "name": "k8s-pod-network", 18 | "type": "calico", 19 | "etcd_endpoints": "__ETCD_ENDPOINTS__", 20 | "etcd_key_file": "__ETCD_KEY_FILE__", 21 | "etcd_cert_file": "__ETCD_CERT_FILE__", 22 | "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", 23 | "log_level": "info", 24 | "ipam": { 25 | "type": "calico-ipam" 26 | }, 27 | "policy": { 28 | "type": "k8s", 29 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 30 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 31 | }, 32 | "kubernetes": { 33 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 34 | } 35 | } 36 | 37 | # The default IP Pool to be created for the cluster. 38 | # Pod IP addresses will be assigned from this pool. 39 | ippool.yaml: | 40 | apiVersion: v1 41 | kind: ipPool 42 | metadata: 43 | cidr: 192.168.0.0/16 44 | spec: 45 | nat-outgoing: true 46 | 47 | # If you're using TLS enabled etcd uncomment the following. 48 | # You must also populate the Secret below with these files. 49 | etcd_ca: "" # "/calico-secrets/etcd-ca" 50 | etcd_cert: "" # "/calico-secrets/etcd-cert" 51 | etcd_key: "" # "/calico-secrets/etcd-key" 52 | 53 | --- 54 | 55 | # The following contains k8s Secrets for use with a TLS enabled etcd cluster. 56 | # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ 57 | apiVersion: v1 58 | kind: Secret 59 | type: Opaque 60 | metadata: 61 | name: calico-etcd-secrets 62 | namespace: kube-system 63 | data: 64 | # Populate the following files with etcd TLS configuration if desired, but leave blank if 65 | # not using TLS for etcd. 66 | # This self-hosted install expects three files with the following names. The values 67 | # should be base64 encoded strings of the entire contents of each file. 68 | # etcd-key: null 69 | # etcd-cert: null 70 | # etcd-ca: null 71 | 72 | --- 73 | 74 | # This manifest installs the calico/node container, as well 75 | # as the Calico CNI plugins and network config on 76 | # each master and worker node in a Kubernetes cluster. 77 | kind: DaemonSet 78 | apiVersion: extensions/v1beta1 79 | metadata: 80 | name: calico-node 81 | namespace: kube-system 82 | labels: 83 | k8s-app: calico-node 84 | spec: 85 | selector: 86 | matchLabels: 87 | k8s-app: calico-node 88 | template: 89 | metadata: 90 | labels: 91 | k8s-app: calico-node 92 | annotations: 93 | scheduler.alpha.kubernetes.io/critical-pod: '' 94 | scheduler.alpha.kubernetes.io/tolerations: | 95 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 96 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 97 | spec: 98 | hostNetwork: true 99 | containers: 100 | # Runs calico/node container on each Kubernetes node. This 101 | # container programs network policy and routes on each 102 | # host. 103 | - name: calico-node 104 | image: quay.io/calico/node:v1.0.2 105 | env: 106 | # The location of the Calico etcd cluster. 107 | - name: ETCD_ENDPOINTS 108 | valueFrom: 109 | configMapKeyRef: 110 | name: calico-config 111 | key: etcd_endpoints 112 | # Choose the backend to use. 113 | - name: CALICO_NETWORKING_BACKEND 114 | valueFrom: 115 | configMapKeyRef: 116 | name: calico-config 117 | key: calico_backend 118 | # Disable file logging so `kubectl logs` works. 119 | - name: CALICO_DISABLE_FILE_LOGGING 120 | value: "true" 121 | # Don't configure a default pool. This is done by the Job 122 | # below. 123 | - name: NO_DEFAULT_POOLS 124 | value: "true" 125 | - name: FELIX_LOGSEVERITYSCREEN 126 | value: "info" 127 | # Location of the CA certificate for etcd. 128 | - name: ETCD_CA_CERT_FILE 129 | valueFrom: 130 | configMapKeyRef: 131 | name: calico-config 132 | key: etcd_ca 133 | # Location of the client key for etcd. 134 | - name: ETCD_KEY_FILE 135 | valueFrom: 136 | configMapKeyRef: 137 | name: calico-config 138 | key: etcd_key 139 | # Location of the client certificate for etcd. 140 | - name: ETCD_CERT_FILE 141 | valueFrom: 142 | configMapKeyRef: 143 | name: calico-config 144 | key: etcd_cert 145 | # Auto-detect the BGP IP address. 146 | - name: IP 147 | value: "" 148 | securityContext: 149 | privileged: true 150 | volumeMounts: 151 | - mountPath: /lib/modules 152 | name: lib-modules 153 | readOnly: true 154 | - mountPath: /var/run/calico 155 | name: var-run-calico 156 | readOnly: false 157 | - mountPath: /calico-secrets 158 | name: etcd-certs 159 | # This container installs the Calico CNI binaries 160 | # and CNI network config file on each node. 161 | - name: install-cni 162 | image: calico/cni:v1.5.6 163 | command: ["/install-cni.sh"] 164 | env: 165 | # The location of the Calico etcd cluster. 166 | - name: ETCD_ENDPOINTS 167 | valueFrom: 168 | configMapKeyRef: 169 | name: calico-config 170 | key: etcd_endpoints 171 | # The CNI network config to install on each node. 172 | - name: CNI_NETWORK_CONFIG 173 | valueFrom: 174 | configMapKeyRef: 175 | name: calico-config 176 | key: cni_network_config 177 | volumeMounts: 178 | - mountPath: /host/opt/cni/bin 179 | name: cni-bin-dir 180 | - mountPath: /host/etc/cni/net.d 181 | name: cni-net-dir 182 | - mountPath: /calico-secrets 183 | name: etcd-certs 184 | volumes: 185 | # Used by calico/node. 186 | - name: lib-modules 187 | hostPath: 188 | path: /lib/modules 189 | - name: var-run-calico 190 | hostPath: 191 | path: /var/run/calico 192 | # Used to install CNI. 193 | - name: cni-bin-dir 194 | hostPath: 195 | path: /opt/cni/bin 196 | - name: cni-net-dir 197 | hostPath: 198 | path: /etc/cni/net.d 199 | # Mount in the etcd TLS secrets. 200 | - name: etcd-certs 201 | secret: 202 | secretName: calico-etcd-secrets 203 | 204 | --- 205 | 206 | # This manifest deploys the Calico policy controller on Kubernetes. 207 | # See https://github.com/projectcalico/k8s-policy 208 | apiVersion: extensions/v1beta1 209 | kind: Deployment 210 | metadata: 211 | name: calico-policy-controller 212 | namespace: kube-system 213 | labels: 214 | k8s-app: calico-policy 215 | annotations: 216 | scheduler.alpha.kubernetes.io/critical-pod: '' 217 | scheduler.alpha.kubernetes.io/tolerations: | 218 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 219 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 220 | spec: 221 | # The policy controller can only have a single active instance. 222 | replicas: 1 223 | strategy: 224 | type: Recreate 225 | template: 226 | metadata: 227 | name: calico-policy-controller 228 | namespace: kube-system 229 | labels: 230 | k8s-app: calico-policy 231 | spec: 232 | # The policy controller must run in the host network namespace so that 233 | # it isn't governed by policy that would prevent it from working. 234 | hostNetwork: true 235 | containers: 236 | - name: calico-policy-controller 237 | image: calico/kube-policy-controller:v0.5.2 238 | env: 239 | # The location of the Calico etcd cluster. 240 | - name: ETCD_ENDPOINTS 241 | valueFrom: 242 | configMapKeyRef: 243 | name: calico-config 244 | key: etcd_endpoints 245 | # Location of the CA certificate for etcd. 246 | - name: ETCD_CA_CERT_FILE 247 | valueFrom: 248 | configMapKeyRef: 249 | name: calico-config 250 | key: etcd_ca 251 | # Location of the client key for etcd. 252 | - name: ETCD_KEY_FILE 253 | valueFrom: 254 | configMapKeyRef: 255 | name: calico-config 256 | key: etcd_key 257 | # Location of the client certificate for etcd. 258 | - name: ETCD_CERT_FILE 259 | valueFrom: 260 | configMapKeyRef: 261 | name: calico-config 262 | key: etcd_cert 263 | # The location of the Kubernetes API. Use the default Kubernetes 264 | # service for API access. 265 | - name: K8S_API 266 | value: "https://kubernetes.default:443" 267 | # Since we're running in the host namespace and might not have KubeDNS 268 | # access, configure the container's /etc/hosts to resolve 269 | # kubernetes.default to the correct service clusterIP. 270 | - name: CONFIGURE_ETC_HOSTS 271 | value: "true" 272 | volumeMounts: 273 | # Mount in the etcd TLS secrets. 274 | - mountPath: /calico-secrets 275 | name: etcd-certs 276 | volumes: 277 | # Mount in the etcd TLS secrets. 278 | - name: etcd-certs 279 | secret: 280 | secretName: calico-etcd-secrets 281 | 282 | --- 283 | 284 | ## This manifest deploys a Job which performs one time 285 | # configuration of Calico 286 | apiVersion: batch/v1 287 | kind: Job 288 | metadata: 289 | name: configure-calico 290 | namespace: kube-system 291 | labels: 292 | k8s-app: calico 293 | spec: 294 | template: 295 | metadata: 296 | name: configure-calico 297 | annotations: 298 | scheduler.alpha.kubernetes.io/critical-pod: '' 299 | scheduler.alpha.kubernetes.io/tolerations: | 300 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 301 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 302 | spec: 303 | hostNetwork: true 304 | restartPolicy: OnFailure 305 | containers: 306 | # Writes basic configuration to datastore. 307 | - name: configure-calico 308 | image: calico/ctl:v1.0.2 309 | args: 310 | - apply 311 | - -f 312 | - /etc/config/calico/ippool.yaml 313 | volumeMounts: 314 | - name: config-volume 315 | mountPath: /etc/config 316 | # Mount in the etcd TLS secrets. 317 | - mountPath: /calico-secrets 318 | name: etcd-certs 319 | env: 320 | # The location of the etcd cluster. 321 | - name: ETCD_ENDPOINTS 322 | valueFrom: 323 | configMapKeyRef: 324 | name: calico-config 325 | key: etcd_endpoints 326 | # Location of the CA certificate for etcd. 327 | - name: ETCD_CA_CERT_FILE 328 | valueFrom: 329 | configMapKeyRef: 330 | name: calico-config 331 | key: etcd_ca 332 | # Location of the client key for etcd. 333 | - name: ETCD_KEY_FILE 334 | valueFrom: 335 | configMapKeyRef: 336 | name: calico-config 337 | key: etcd_key 338 | # Location of the client certificate for etcd. 339 | - name: ETCD_CERT_FILE 340 | valueFrom: 341 | configMapKeyRef: 342 | name: calico-config 343 | key: etcd_cert 344 | volumes: 345 | - name: config-volume 346 | configMap: 347 | name: calico-config 348 | items: 349 | - key: ippool.yaml 350 | path: calico/ippool.yaml 351 | # Mount in the etcd TLS secrets. 352 | - name: etcd-certs 353 | secret: 354 | secretName: calico-etcd-secrets 355 | -------------------------------------------------------------------------------- /roles/calico/templates/calico-official.yml: -------------------------------------------------------------------------------- 1 | 1# This ConfigMap is used to configure a self-hosted Calico installation. 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: calico-config 6 | namespace: kube-system 7 | data: 8 | # Configure this with the location of your etcd cluster. 9 | etcd_endpoints: "http://127.0.0.1:2379" 10 | 11 | # Configure the Calico backend to use. 12 | calico_backend: "bird" 13 | 14 | # The CNI network configuration to install on each node. 15 | cni_network_config: |- 16 | { 17 | "name": "k8s-pod-network", 18 | "type": "calico", 19 | "etcd_endpoints": "__ETCD_ENDPOINTS__", 20 | "etcd_key_file": "__ETCD_KEY_FILE__", 21 | "etcd_cert_file": "__ETCD_CERT_FILE__", 22 | "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", 23 | "log_level": "info", 24 | "ipam": { 25 | "type": "calico-ipam" 26 | }, 27 | "policy": { 28 | "type": "k8s", 29 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 30 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 31 | }, 32 | "kubernetes": { 33 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 34 | } 35 | } 36 | 37 | # The default IP Pool to be created for the cluster. 38 | # Pod IP addresses will be assigned from this pool. 39 | ippool.yaml: | 40 | apiVersion: v1 41 | kind: ipPool 42 | metadata: 43 | cidr: 192.168.0.0/16 44 | spec: 45 | nat-outgoing: true 46 | 47 | # If you're using TLS enabled etcd uncomment the following. 48 | # You must also populate the Secret below with these files. 49 | etcd_ca: "" # "/calico-secrets/etcd-ca" 50 | etcd_cert: "" # "/calico-secrets/etcd-cert" 51 | etcd_key: "" # "/calico-secrets/etcd-key" 52 | 53 | --- 54 | 55 | # The following contains k8s Secrets for use with a TLS enabled etcd cluster. 56 | # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ 57 | apiVersion: v1 58 | kind: Secret 59 | type: Opaque 60 | metadata: 61 | name: calico-etcd-secrets 62 | namespace: kube-system 63 | data: 64 | # Populate the following files with etcd TLS configuration if desired, but leave blank if 65 | # not using TLS for etcd. 66 | # This self-hosted install expects three files with the following names. The values 67 | # should be base64 encoded strings of the entire contents of each file. 68 | # etcd-key: null 69 | # etcd-cert: null 70 | # etcd-ca: null 71 | 72 | --- 73 | 74 | # This manifest installs the calico/node container, as well 75 | # as the Calico CNI plugins and network config on 76 | # each master and worker node in a Kubernetes cluster. 77 | kind: DaemonSet 78 | apiVersion: extensions/v1beta1 79 | metadata: 80 | name: calico-node 81 | namespace: kube-system 82 | labels: 83 | k8s-app: calico-node 84 | spec: 85 | selector: 86 | matchLabels: 87 | k8s-app: calico-node 88 | template: 89 | metadata: 90 | labels: 91 | k8s-app: calico-node 92 | annotations: 93 | scheduler.alpha.kubernetes.io/critical-pod: '' 94 | scheduler.alpha.kubernetes.io/tolerations: | 95 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 96 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 97 | spec: 98 | hostNetwork: true 99 | containers: 100 | # Runs calico/node container on each Kubernetes node. This 101 | # container programs network policy and routes on each 102 | # host. 103 | - name: calico-node 104 | image: quay.io/calico/node:v1.0.2 105 | env: 106 | # The location of the Calico etcd cluster. 107 | - name: ETCD_ENDPOINTS 108 | valueFrom: 109 | configMapKeyRef: 110 | name: calico-config 111 | key: etcd_endpoints 112 | # Choose the backend to use. 113 | - name: CALICO_NETWORKING_BACKEND 114 | valueFrom: 115 | configMapKeyRef: 116 | name: calico-config 117 | key: calico_backend 118 | # Disable file logging so `kubectl logs` works. 119 | - name: CALICO_DISABLE_FILE_LOGGING 120 | value: "true" 121 | # Set Felix endpoint to host default action to ACCEPT. 122 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 123 | value: "ACCEPT" 124 | # Don't configure a default pool. This is done by the Job 125 | # below. 126 | - name: NO_DEFAULT_POOLS 127 | value: "true" 128 | - name: FELIX_LOGSEVERITYSCREEN 129 | value: "info" 130 | # Location of the CA certificate for etcd. 131 | - name: ETCD_CA_CERT_FILE 132 | valueFrom: 133 | configMapKeyRef: 134 | name: calico-config 135 | key: etcd_ca 136 | # Location of the client key for etcd. 137 | - name: ETCD_KEY_FILE 138 | valueFrom: 139 | configMapKeyRef: 140 | name: calico-config 141 | key: etcd_key 142 | # Location of the client certificate for etcd. 143 | - name: ETCD_CERT_FILE 144 | valueFrom: 145 | configMapKeyRef: 146 | name: calico-config 147 | key: etcd_cert 148 | # Auto-detect the BGP IP address. 149 | - name: IP 150 | value: "" 151 | securityContext: 152 | privileged: true 153 | volumeMounts: 154 | - mountPath: /lib/modules 155 | name: lib-modules 156 | readOnly: true 157 | - mountPath: /var/run/calico 158 | name: var-run-calico 159 | readOnly: false 160 | - mountPath: /calico-secrets 161 | name: etcd-certs 162 | # This container installs the Calico CNI binaries 163 | # and CNI network config file on each node. 164 | - name: install-cni 165 | image: calico/cni:v1.5.6 166 | command: ["/install-cni.sh"] 167 | env: 168 | # The location of the Calico etcd cluster. 169 | - name: ETCD_ENDPOINTS 170 | valueFrom: 171 | configMapKeyRef: 172 | name: calico-config 173 | key: etcd_endpoints 174 | # The CNI network config to install on each node. 175 | - name: CNI_NETWORK_CONFIG 176 | valueFrom: 177 | configMapKeyRef: 178 | name: calico-config 179 | key: cni_network_config 180 | volumeMounts: 181 | - mountPath: /host/opt/cni/bin 182 | name: cni-bin-dir 183 | - mountPath: /host/etc/cni/net.d 184 | name: cni-net-dir 185 | - mountPath: /calico-secrets 186 | name: etcd-certs 187 | volumes: 188 | # Used by calico/node. 189 | - name: lib-modules 190 | hostPath: 191 | path: /lib/modules 192 | - name: var-run-calico 193 | hostPath: 194 | path: /var/run/calico 195 | # Used to install CNI. 196 | - name: cni-bin-dir 197 | hostPath: 198 | path: /opt/cni/bin 199 | - name: cni-net-dir 200 | hostPath: 201 | path: /etc/cni/net.d 202 | # Mount in the etcd TLS secrets. 203 | - name: etcd-certs 204 | secret: 205 | secretName: calico-etcd-secrets 206 | 207 | --- 208 | 209 | # This manifest deploys the Calico policy controller on Kubernetes. 210 | # See https://github.com/projectcalico/k8s-policy 211 | apiVersion: extensions/v1beta1 212 | kind: Deployment 213 | metadata: 214 | name: calico-policy-controller 215 | namespace: kube-system 216 | labels: 217 | k8s-app: calico-policy 218 | annotations: 219 | scheduler.alpha.kubernetes.io/critical-pod: '' 220 | scheduler.alpha.kubernetes.io/tolerations: | 221 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 222 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 223 | spec: 224 | # The policy controller can only have a single active instance. 225 | replicas: 1 226 | strategy: 227 | type: Recreate 228 | template: 229 | metadata: 230 | name: calico-policy-controller 231 | namespace: kube-system 232 | labels: 233 | k8s-app: calico-policy 234 | spec: 235 | # The policy controller must run in the host network namespace so that 236 | # it isn't governed by policy that would prevent it from working. 237 | hostNetwork: true 238 | containers: 239 | - name: calico-policy-controller 240 | image: calico/kube-policy-controller:v0.5.2 241 | env: 242 | # The location of the Calico etcd cluster. 243 | - name: ETCD_ENDPOINTS 244 | valueFrom: 245 | configMapKeyRef: 246 | name: calico-config 247 | key: etcd_endpoints 248 | # Location of the CA certificate for etcd. 249 | - name: ETCD_CA_CERT_FILE 250 | valueFrom: 251 | configMapKeyRef: 252 | name: calico-config 253 | key: etcd_ca 254 | # Location of the client key for etcd. 255 | - name: ETCD_KEY_FILE 256 | valueFrom: 257 | configMapKeyRef: 258 | name: calico-config 259 | key: etcd_key 260 | # Location of the client certificate for etcd. 261 | - name: ETCD_CERT_FILE 262 | valueFrom: 263 | configMapKeyRef: 264 | name: calico-config 265 | key: etcd_cert 266 | # The location of the Kubernetes API. Use the default Kubernetes 267 | # service for API access. 268 | - name: K8S_API 269 | value: "https://kubernetes.default:443" 270 | # Since we're running in the host namespace and might not have KubeDNS 271 | # access, configure the container's /etc/hosts to resolve 272 | # kubernetes.default to the correct service clusterIP. 273 | - name: CONFIGURE_ETC_HOSTS 274 | value: "true" 275 | volumeMounts: 276 | # Mount in the etcd TLS secrets. 277 | - mountPath: /calico-secrets 278 | name: etcd-certs 279 | volumes: 280 | # Mount in the etcd TLS secrets. 281 | - name: etcd-certs 282 | secret: 283 | secretName: calico-etcd-secrets 284 | 285 | --- 286 | 287 | ## This manifest deploys a Job which performs one time 288 | # configuration of Calico 289 | apiVersion: batch/v1 290 | kind: Job 291 | metadata: 292 | name: configure-calico 293 | namespace: kube-system 294 | labels: 295 | k8s-app: calico 296 | spec: 297 | template: 298 | metadata: 299 | name: configure-calico 300 | annotations: 301 | scheduler.alpha.kubernetes.io/critical-pod: '' 302 | scheduler.alpha.kubernetes.io/tolerations: | 303 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 304 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 305 | spec: 306 | hostNetwork: true 307 | restartPolicy: OnFailure 308 | containers: 309 | # Writes basic configuration to datastore. 310 | - name: configure-calico 311 | image: calico/ctl:v1.0.2 312 | args: 313 | - apply 314 | - -f 315 | - /etc/config/calico/ippool.yaml 316 | volumeMounts: 317 | - name: config-volume 318 | mountPath: /etc/config 319 | # Mount in the etcd TLS secrets. 320 | - mountPath: /calico-secrets 321 | name: etcd-certs 322 | env: 323 | # The location of the etcd cluster. 324 | - name: ETCD_ENDPOINTS 325 | valueFrom: 326 | configMapKeyRef: 327 | name: calico-config 328 | key: etcd_endpoints 329 | # Location of the CA certificate for etcd. 330 | - name: ETCD_CA_CERT_FILE 331 | valueFrom: 332 | configMapKeyRef: 333 | name: calico-config 334 | key: etcd_ca 335 | # Location of the client key for etcd. 336 | - name: ETCD_KEY_FILE 337 | valueFrom: 338 | configMapKeyRef: 339 | name: calico-config 340 | key: etcd_key 341 | # Location of the client certificate for etcd. 342 | - name: ETCD_CERT_FILE 343 | valueFrom: 344 | configMapKeyRef: 345 | name: calico-config 346 | key: etcd_cert 347 | volumes: 348 | - name: config-volume 349 | configMap: 350 | name: calico-config 351 | items: 352 | - key: ippool.yaml 353 | path: calico/ippool.yaml 354 | # Mount in the etcd TLS secrets. 355 | - name: etcd-certs 356 | secret: 357 | secretName: calico-etcd-secrets 358 | -------------------------------------------------------------------------------- /roles/calico/templates/calico.yml.tmpl: -------------------------------------------------------------------------------- 1 | # This ConfigMap is used to configure a self-hosted Calico installation. 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: calico-config 6 | namespace: kube-system 7 | data: 8 | # Configure this with the location of your etcd cluster. 9 | etcd_endpoints: "{% for host in groups['etcd-node'] %}https://{{host}}:2379{%if not loop.last %},{% endif %}{% endfor %}" 10 | 11 | # Configure the Calico backend to use. 12 | calico_backend: "none" 13 | # The CNI network configuration to install on each node. The special 14 | # values in this config will be automatically populated. 15 | cni_network_config: |- 16 | { 17 | "name": "calico", 18 | "type": "flannel", 19 | "delegate": { 20 | "type": "calico", 21 | "bridge": "cni0", 22 | "isDefaultGateway": true, 23 | "etcd_endpoints": "__ETCD_ENDPOINTS__", 24 | "etcd_key_file": "/etc/ssl/etcd/client-key.pem", 25 | "etcd_cert_file": "/etc/ssl/etcd/client-cert.pem", 26 | "etcd_ca_cert_file": "/etc/ssl/etcd/ca.crt", 27 | "log_level": "info", 28 | "policy": { 29 | "type": "k8s", 30 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 31 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 32 | }, 33 | "kubernetes": { 34 | "kubeconfig": "/etc/kubernetes/cni/net.d/__KUBECONFIG_FILENAME__" 35 | } 36 | } 37 | } 38 | 39 | etcd_ca: "/calico-secrets/etcd-ca" 40 | etcd_cert: "/calico-secrets/etcd-cert" 41 | etcd_key: "/calico-secrets/etcd-key" 42 | --- 43 | 44 | # The following contains k8s Secrets for use with a TLS enabled etcd cluster. 45 | # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ 46 | apiVersion: v1 47 | kind: Secret 48 | type: Opaque 49 | metadata: 50 | name: calico-etcd-secrets 51 | namespace: kube-system 52 | data: 53 | # Populate the following files with etcd TLS configuration if desired, but leave blank if 54 | # not using TLS for etcd. 55 | # This self-hosted install expects three files with the following names. The values 56 | # should be base64 encoded strings of the entire contents of each file. 57 | etcd-key: "{{ etcd_client_key|b64encode }}" 58 | etcd-cert: "{{ etcd_client_cert|b64encode }}" 59 | etcd-ca: "{{ etcd_ca_certificate|b64encode }}" 60 | --- 61 | 62 | # This manifest installs the calico/node container, as well 63 | # as the Calico CNI plugins and network config on 64 | # each master and worker node in a Kubernetes cluster. 65 | kind: DaemonSet 66 | apiVersion: extensions/v1beta1 67 | metadata: 68 | name: calico-node 69 | namespace: kube-system 70 | labels: 71 | k8s-app: calico-node 72 | spec: 73 | selector: 74 | matchLabels: 75 | k8s-app: calico-node 76 | template: 77 | metadata: 78 | labels: 79 | k8s-app: calico-node 80 | annotations: 81 | scheduler.alpha.kubernetes.io/critical-pod: '' 82 | scheduler.alpha.kubernetes.io/tolerations: | 83 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 84 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 85 | spec: 86 | hostNetwork: true 87 | containers: 88 | # Runs calico/node container on each Kubernetes node. This 89 | # container programs network policy and routes on each 90 | # host. 91 | - name: calico-node 92 | image: quay.io/calico/node:v1.1.1 93 | env: 94 | # The location of the Calico etcd cluster. 95 | - name: ETCD_ENDPOINTS 96 | valueFrom: 97 | configMapKeyRef: 98 | name: calico-config 99 | key: etcd_endpoints 100 | # Choose the backend to use. 101 | - name: CALICO_NETWORKING_BACKEND 102 | valueFrom: 103 | configMapKeyRef: 104 | name: calico-config 105 | key: calico_backend 106 | # Disable file logging so `kubectl logs` works. 107 | - name: CALICO_DISABLE_FILE_LOGGING 108 | value: "true" 109 | # Set Felix endpoint to host default action to ACCEPT. 110 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 111 | value: "ACCEPT" 112 | # Don't configure a default pool. This is done by the Job 113 | # below. 114 | - name: NO_DEFAULT_POOLS 115 | value: "true" 116 | - name: FELIX_LOGSEVERITYSCREEN 117 | value: "info" 118 | # Location of the CA certificate for etcd. 119 | - name: ETCD_CA_CERT_FILE 120 | valueFrom: 121 | configMapKeyRef: 122 | name: calico-config 123 | key: etcd_ca 124 | # Location of the client key for etcd. 125 | - name: ETCD_KEY_FILE 126 | valueFrom: 127 | configMapKeyRef: 128 | name: calico-config 129 | key: etcd_key 130 | # Location of the client certificate for etcd. 131 | - name: ETCD_CERT_FILE 132 | valueFrom: 133 | configMapKeyRef: 134 | name: calico-config 135 | key: etcd_cert 136 | # Auto-detect the BGP IP address. 137 | - name: IP 138 | value: "" 139 | securityContext: 140 | privileged: true 141 | volumeMounts: 142 | - mountPath: /lib/modules 143 | name: lib-modules 144 | readOnly: true 145 | - mountPath: /var/run/calico 146 | name: var-run-calico 147 | readOnly: false 148 | - mountPath: /calico-secrets 149 | name: etcd-certs 150 | - mountPath: /etc/resolv.conf 151 | name: dns 152 | readOnly: true 153 | # This container installs the Calico CNI binaries 154 | # and CNI network config file on each node. 155 | # - name: install-cni 156 | # image: calico/cni:v1.5.6 157 | # imagePullPolicy: Always 158 | # command: ["/install-cni.sh"] 159 | # env: 160 | # # CNI configuration filename 161 | # - name: CNI_CONF_NAME 162 | # value: "10-flannel.conf" 163 | # # The location of the Calico etcd cluster. 164 | # - name: ETCD_ENDPOINTS 165 | # valueFrom: 166 | # configMapKeyRef: 167 | # name: calico-config 168 | # key: etcd_endpoints 169 | # # The CNI network config to install on each node. 170 | # - name: CNI_NETWORK_CONFIG 171 | # valueFrom: 172 | # configMapKeyRef: 173 | # name: calico-config 174 | # key: cni_network_config 175 | # volumeMounts: 176 | # - mountPath: /host/opt/cni/bin 177 | # name: cni-bin-dir 178 | # - mountPath: /host/etc/cni/net.d 179 | # name: cni-net-dir 180 | # - mountPath: /calico-secrets 181 | # name: etcd-certs 182 | volumes: 183 | # Used by calico/node. 184 | - name: lib-modules 185 | hostPath: 186 | path: /lib/modules 187 | - name: var-run-calico 188 | hostPath: 189 | path: /var/run/calico 190 | # Used to install CNI. 191 | - name: cni-bin-dir 192 | hostPath: 193 | path: /opt/cni/bin 194 | - name: cni-net-dir 195 | hostPath: 196 | path: /etc/kubernetes/cni/net.d 197 | - name: dns 198 | hostPath: 199 | path: /etc/resolv.conf 200 | - name: etcd-certs 201 | secret: 202 | secretName: calico-etcd-secrets 203 | 204 | --- 205 | 206 | # This manifest deploys the Calico policy controller on Kubernetes. 207 | # See https://github.com/projectcalico/k8s-policy 208 | apiVersion: extensions/v1beta1 209 | kind: Deployment 210 | metadata: 211 | name: calico-policy-controller 212 | namespace: kube-system 213 | labels: 214 | k8s-app: calico-policy 215 | annotations: 216 | scheduler.alpha.kubernetes.io/critical-pod: '' 217 | scheduler.alpha.kubernetes.io/tolerations: | 218 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 219 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 220 | spec: 221 | # The policy controller can only have a single active instance. 222 | replicas: 1 223 | strategy: 224 | type: Recreate 225 | template: 226 | metadata: 227 | name: calico-policy-controller 228 | namespace: kube-system 229 | labels: 230 | k8s-app: calico-policy 231 | spec: 232 | # The policy controller must run in the host network namespace so that 233 | # it isn't governed by policy that would prevent it from working. 234 | hostNetwork: true 235 | containers: 236 | - name: calico-policy-controller 237 | image: calico/kube-policy-controller:v0.5.2 238 | env: 239 | # The location of the Calico etcd cluster. 240 | - name: ETCD_ENDPOINTS 241 | valueFrom: 242 | configMapKeyRef: 243 | name: calico-config 244 | key: etcd_endpoints 245 | # Location of the CA certificate for etcd. 246 | - name: ETCD_CA_CERT_FILE 247 | valueFrom: 248 | configMapKeyRef: 249 | name: calico-config 250 | key: etcd_ca 251 | # Location of the client key for etcd. 252 | - name: ETCD_KEY_FILE 253 | valueFrom: 254 | configMapKeyRef: 255 | name: calico-config 256 | key: etcd_key 257 | # Location of the client certificate for etcd. 258 | - name: ETCD_CERT_FILE 259 | valueFrom: 260 | configMapKeyRef: 261 | name: calico-config 262 | key: etcd_cert 263 | # The location of the Kubernetes API. Use the default Kubernetes 264 | # service for API access. 265 | - name: K8S_API 266 | value: "https://kubernetes.default.svc.cluster.local:443" 267 | # Since we're running in the host namespace and might not have KubeDNS 268 | # access, configure the container's /etc/hosts to resolve 269 | # kubernetes.default to the correct service clusterIP. 270 | - name: CONFIGURE_ETC_HOSTS 271 | value: "false" 272 | volumeMounts: 273 | # Mount in the etcd TLS secrets. 274 | - mountPath: /calico-secrets 275 | name: etcd-certs 276 | volumes: 277 | # Mount in the etcd TLS secrets. 278 | - name: etcd-certs 279 | secret: 280 | secretName: calico-etcd-secrets 281 | -------------------------------------------------------------------------------- /roles/calico/templates/calico.yml.tmpl.bak: -------------------------------------------------------------------------------- 1 | # This ConfigMap is used to configure a self-hosted Calico installation. 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: calico-config 6 | namespace: kube-system 7 | data: 8 | # Configure this with the location of your etcd cluster. 9 | etcd_endpoints: "https://176.9.123.237:2379,https://176.9.122.117:2379,https://176.9.118.230:2379" 10 | 11 | # Configure the Calico backend to use. 12 | calico_backend: "none" 13 | # The CNI network configuration to install on each node. The special 14 | # values in this config will be automatically populated. 15 | cni_network_config: |- 16 | { 17 | "name": "calico", 18 | "type": "flannel", 19 | "delegate": { 20 | "type": "calico", 21 | "bridge": "cni0", 22 | "isDefaultGateway": true, 23 | "etcd_endpoints": "__ETCD_ENDPOINTS__", 24 | "etcd_key_file": "/etc/ssl/etcd/client-key.pem", 25 | "etcd_cert_file": "/etc/ssl/etcd/client-cert.pem", 26 | "etcd_ca_cert_file": "/etc/ssl/etcd/ca.crt", 27 | "log_level": "info", 28 | "policy": { 29 | "type": "k8s", 30 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", 31 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" 32 | }, 33 | "kubernetes": { 34 | "kubeconfig": "/etc/kubernetes/cni/net.d/__KUBECONFIG_FILENAME__" 35 | } 36 | } 37 | } 38 | 39 | etcd_ca: "/calico-secrets/etcd-ca" 40 | etcd_cert: "/calico-secrets/etcd-cert" 41 | etcd_key: "/calico-secrets/etcd-key" 42 | --- 43 | 44 | # The following contains k8s Secrets for use with a TLS enabled etcd cluster. 45 | # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ 46 | apiVersion: v1 47 | kind: Secret 48 | type: Opaque 49 | metadata: 50 | name: calico-etcd-secrets 51 | namespace: kube-system 52 | data: 53 | # Populate the following files with etcd TLS configuration if desired, but leave blank if 54 | # not using TLS for etcd. 55 | # This self-hosted install expects three files with the following names. The values 56 | # should be base64 encoded strings of the entire contents of each file. 57 | etcd-key: "{{ etcd_client_key|b64encode }}" 58 | etcd-cert: "{{ etcd_client_cert|b64encode }}" 59 | etcd-ca: "{{ etcd_ca_certificate|b64encode }}" 60 | --- 61 | 62 | # This manifest installs the calico/node container, as well 63 | # as the Calico CNI plugins and network config on 64 | # each master and worker node in a Kubernetes cluster. 65 | kind: DaemonSet 66 | apiVersion: extensions/v1beta1 67 | metadata: 68 | name: calico-node 69 | namespace: kube-system 70 | labels: 71 | k8s-app: calico-node 72 | spec: 73 | selector: 74 | matchLabels: 75 | k8s-app: calico-node 76 | template: 77 | metadata: 78 | labels: 79 | k8s-app: calico-node 80 | annotations: 81 | scheduler.alpha.kubernetes.io/critical-pod: '' 82 | scheduler.alpha.kubernetes.io/tolerations: | 83 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 84 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 85 | spec: 86 | hostNetwork: true 87 | containers: 88 | # Runs calico/node container on each Kubernetes node. This 89 | # container programs network policy and routes on each 90 | # host. 91 | - name: calico-node 92 | image: quay.io/calico/node:v1.0.2 93 | env: 94 | # The location of the Calico etcd cluster. 95 | - name: ETCD_ENDPOINTS 96 | valueFrom: 97 | configMapKeyRef: 98 | name: calico-config 99 | key: etcd_endpoints 100 | # Choose the backend to use. 101 | - name: CALICO_NETWORKING_BACKEND 102 | valueFrom: 103 | configMapKeyRef: 104 | name: calico-config 105 | key: calico_backend 106 | # Disable file logging so `kubectl logs` works. 107 | - name: CALICO_DISABLE_FILE_LOGGING 108 | value: "true" 109 | # Set Felix endpoint to host default action to ACCEPT. 110 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 111 | value: "ACCEPT" 112 | # Don't configure a default pool. This is done by the Job 113 | # below. 114 | - name: NO_DEFAULT_POOLS 115 | value: "true" 116 | - name: FELIX_LOGSEVERITYSCREEN 117 | value: "info" 118 | # Location of the CA certificate for etcd. 119 | - name: ETCD_CA_CERT_FILE 120 | valueFrom: 121 | configMapKeyRef: 122 | name: calico-config 123 | key: etcd_ca 124 | # Location of the client key for etcd. 125 | - name: ETCD_KEY_FILE 126 | valueFrom: 127 | configMapKeyRef: 128 | name: calico-config 129 | key: etcd_key 130 | # Location of the client certificate for etcd. 131 | - name: ETCD_CERT_FILE 132 | valueFrom: 133 | configMapKeyRef: 134 | name: calico-config 135 | key: etcd_cert 136 | # Auto-detect the BGP IP address. 137 | - name: IP 138 | value: "" 139 | securityContext: 140 | privileged: true 141 | volumeMounts: 142 | - mountPath: /lib/modules 143 | name: lib-modules 144 | readOnly: true 145 | - mountPath: /var/run/calico 146 | name: var-run-calico 147 | readOnly: false 148 | - mountPath: /calico-secrets 149 | name: etcd-certs 150 | - mountPath: /etc/resolv.conf 151 | name: dns 152 | readOnly: true 153 | # This container installs the Calico CNI binaries 154 | # and CNI network config file on each node. 155 | - name: install-cni 156 | image: calico/cni:v1.5.6 157 | imagePullPolicy: Always 158 | command: ["/install-cni.sh"] 159 | env: 160 | # CNI configuration filename 161 | - name: CNI_CONF_NAME 162 | value: "10-flannel.conf" 163 | # The location of the Calico etcd cluster. 164 | - name: ETCD_ENDPOINTS 165 | valueFrom: 166 | configMapKeyRef: 167 | name: calico-config 168 | key: etcd_endpoints 169 | # The CNI network config to install on each node. 170 | - name: CNI_NETWORK_CONFIG 171 | valueFrom: 172 | configMapKeyRef: 173 | name: calico-config 174 | key: cni_network_config 175 | volumeMounts: 176 | - mountPath: /host/opt/cni/bin 177 | name: cni-bin-dir 178 | - mountPath: /host/etc/cni/net.d 179 | name: cni-net-dir 180 | - mountPath: /calico-secrets 181 | name: etcd-certs 182 | volumes: 183 | # Used by calico/node. 184 | - name: lib-modules 185 | hostPath: 186 | path: /lib/modules 187 | - name: var-run-calico 188 | hostPath: 189 | path: /var/run/calico 190 | # Used to install CNI. 191 | - name: cni-bin-dir 192 | hostPath: 193 | path: /opt/cni/bin 194 | - name: cni-net-dir 195 | hostPath: 196 | path: /etc/kubernetes/cni/net.d 197 | - name: dns 198 | hostPath: 199 | path: /etc/resolv.conf 200 | - name: etcd-certs 201 | secret: 202 | secretName: calico-etcd-secrets 203 | 204 | --- 205 | 206 | # This manifest deploys the Calico policy controller on Kubernetes. 207 | # See https://github.com/projectcalico/k8s-policy 208 | apiVersion: extensions/v1beta1 209 | kind: Deployment 210 | metadata: 211 | name: calico-policy-controller 212 | namespace: kube-system 213 | labels: 214 | k8s-app: calico-policy 215 | annotations: 216 | scheduler.alpha.kubernetes.io/critical-pod: '' 217 | scheduler.alpha.kubernetes.io/tolerations: | 218 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 219 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 220 | spec: 221 | # The policy controller can only have a single active instance. 222 | replicas: 1 223 | strategy: 224 | type: Recreate 225 | template: 226 | metadata: 227 | name: calico-policy-controller 228 | namespace: kube-system 229 | labels: 230 | k8s-app: calico-policy 231 | spec: 232 | # The policy controller must run in the host network namespace so that 233 | # it isn't governed by policy that would prevent it from working. 234 | hostNetwork: true 235 | containers: 236 | - name: calico-policy-controller 237 | image: calico/kube-policy-controller:v0.5.2 238 | env: 239 | # The location of the Calico etcd cluster. 240 | - name: ETCD_ENDPOINTS 241 | valueFrom: 242 | configMapKeyRef: 243 | name: calico-config 244 | key: etcd_endpoints 245 | # Location of the CA certificate for etcd. 246 | - name: ETCD_CA_CERT_FILE 247 | valueFrom: 248 | configMapKeyRef: 249 | name: calico-config 250 | key: etcd_ca 251 | # Location of the client key for etcd. 252 | - name: ETCD_KEY_FILE 253 | valueFrom: 254 | configMapKeyRef: 255 | name: calico-config 256 | key: etcd_key 257 | # Location of the client certificate for etcd. 258 | - name: ETCD_CERT_FILE 259 | valueFrom: 260 | configMapKeyRef: 261 | name: calico-config 262 | key: etcd_cert 263 | # The location of the Kubernetes API. Use the default Kubernetes 264 | # service for API access. 265 | - name: K8S_API 266 | value: "https://kubernetes.default:443" 267 | # Since we're running in the host namespace and might not have KubeDNS 268 | # access, configure the container's /etc/hosts to resolve 269 | # kubernetes.default to the correct service clusterIP. 270 | - name: CONFIGURE_ETC_HOSTS 271 | value: "false" 272 | volumeMounts: 273 | # Mount in the etcd TLS secrets. 274 | - mountPath: /calico-secrets 275 | name: etcd-certs 276 | volumes: 277 | # Mount in the etcd TLS secrets. 278 | - name: etcd-certs 279 | secret: 280 | secretName: calico-etcd-secrets 281 | 282 | --- 283 | 284 | ## This manifest deploys a Job which performs one time 285 | # configuration of Calico 286 | apiVersion: batch/v1 287 | kind: Job 288 | metadata: 289 | name: configure-calico 290 | namespace: kube-system 291 | labels: 292 | k8s-app: calico 293 | spec: 294 | template: 295 | metadata: 296 | name: configure-calico 297 | annotations: 298 | scheduler.alpha.kubernetes.io/critical-pod: '' 299 | scheduler.alpha.kubernetes.io/tolerations: | 300 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" }, 301 | {"key":"CriticalAddonsOnly", "operator":"Exists"}] 302 | spec: 303 | hostNetwork: true 304 | restartPolicy: OnFailure 305 | containers: 306 | # Writes basic configuration to datastore. 307 | - name: configure-calico 308 | image: calico/ctl:v1.0.2 309 | args: 310 | - apply 311 | - -f 312 | - /etc/config/calico/ippool.yaml 313 | volumeMounts: 314 | - name: config-volume 315 | mountPath: /etc/config 316 | # Mount in the etcd TLS secrets. 317 | - mountPath: /calico-secrets 318 | name: etcd-certs 319 | env: 320 | # The location of the etcd cluster. 321 | - name: ETCD_ENDPOINTS 322 | valueFrom: 323 | configMapKeyRef: 324 | name: calico-config 325 | key: etcd_endpoints 326 | # Location of the CA certificate for etcd. 327 | - name: ETCD_CA_CERT_FILE 328 | valueFrom: 329 | configMapKeyRef: 330 | name: calico-config 331 | key: etcd_ca 332 | # Location of the client key for etcd. 333 | - name: ETCD_KEY_FILE 334 | valueFrom: 335 | configMapKeyRef: 336 | name: calico-config 337 | key: etcd_key 338 | # Location of the client certificate for etcd. 339 | - name: ETCD_CERT_FILE 340 | valueFrom: 341 | configMapKeyRef: 342 | name: calico-config 343 | key: etcd_cert 344 | volumes: 345 | - name: config-volume 346 | configMap: 347 | name: calico-config 348 | items: 349 | - key: ippool.yaml 350 | path: calico/ippool.yaml 351 | # Mount in the etcd TLS secrets. 352 | - name: etcd-certs 353 | secret: 354 | secretName: calico-etcd-secrets 355 | -------------------------------------------------------------------------------- /roles/calico/templates/cluster_policy.yml: -------------------------------------------------------------------------------- 1 | - apiVersion: v1 2 | kind: policy 3 | metadata: 4 | name: cluster 5 | spec: 6 | selector: "role == 'node'" 7 | order: 1 8 | ingress: 9 | - action: allow 10 | protocol: tcp 11 | source: 12 | selector: "role == 'node'" 13 | destination: 14 | ports: 15 | - 10250 16 | - 9100 17 | - 4191 18 | egress: 19 | - action: allow 20 | protocol: tcp 21 | source: 22 | selector: "role == 'node'" 23 | destination: 24 | ports: 25 | - 10250 26 | - 9100 27 | - 4191 28 | - action: allow 29 | protocol: tcp 30 | source: 31 | net: "10.1.0.0/16" 32 | destination: 33 | ports: 34 | - 10250 35 | - 9100 36 | - 4191 37 | -------------------------------------------------------------------------------- /roles/calico/templates/env.tmpl: -------------------------------------------------------------------------------- 1 | export ETCD_ENDPOINTS="{% for host in groups['etcd-node'] %}https://{{host}}:2379{%if not loop.last %},{% endif %}{% endfor %}" 2 | export ETCD_CA_CERT_FILE="{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/ca.pem" 3 | export ETCD_CERT_FILE="{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/client.pem" 4 | export ETCD_KEY_FILE="{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/client-key.pem" 5 | -------------------------------------------------------------------------------- /roles/calico/templates/failsafe_policy.yml: -------------------------------------------------------------------------------- 1 | - apiVersion: v1 2 | kind: policy 3 | metadata: 4 | name: failsafe 5 | spec: 6 | selector: "role == 'node'" 7 | order: 0 8 | ingress: 9 | - action: allow 10 | protocol: tcp 11 | destination: 12 | ports: [22] 13 | - action: allow 14 | protocol: icmp 15 | - action: allow 16 | protocol: tcp 17 | destination: 18 | ports: [2379, 2380] 19 | - action: allow 20 | protocol: tcp 21 | destination: 22 | ports: 23 | - 6443 24 | - action: allow 25 | protocol: tcp 26 | destination: 27 | ports: 28 | - 655 29 | - action: allow 30 | protocol: udp 31 | destination: 32 | ports: 33 | - 655 34 | - action: allow 35 | protocol: tcp 36 | destination: 37 | ports: [80,443] 38 | egress: 39 | - action: allow 40 | protocol: tcp 41 | destination: 42 | ports: [2379, 2380, 4001] 43 | - action: allow 44 | protocol: udp 45 | destination: 46 | ports: [67,68] 47 | - action: allow 48 | protocol: udp 49 | destination: 50 | ports: [53] 51 | - action: allow 52 | protocol: tcp 53 | destination: 54 | ports: [80,443] 55 | - action: allow 56 | protocol: icmp 57 | - action: allow 58 | protocol: tcp 59 | destination: 60 | ports: 61 | - 655 62 | - action: allow 63 | protocol: udp 64 | destination: 65 | ports: 66 | - 655 67 | - action: allow 68 | protocol: tcp 69 | destination: 70 | ports: 71 | - 6443 72 | -------------------------------------------------------------------------------- /roles/calico/templates/host_endpoint.yml.tmpl: -------------------------------------------------------------------------------- 1 | - apiVersion: v1 2 | kind: hostEndpoint 3 | metadata: 4 | name: {{ coreos_hostname }} 5 | node: {{ coreos_hostname }} 6 | labels: 7 | role: node 8 | environment: production 9 | spec: 10 | expectedIPs: 11 | - {{ inventory_hostname }} 12 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create ceph config dir 3 | file: 4 | path: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph" 5 | state: directory 6 | delegate_to: localhost 7 | connection: local 8 | run_once: true 9 | 10 | - name: generate ceph.conf 11 | template: src=ceph/ceph.conf.tmpl dest={{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph.conf 12 | delegate_to: localhost 13 | connection: local 14 | run_once: true 15 | 16 | - name: generate admin.keyring 17 | template: src=ceph/admin.keyring.tmpl dest={{ inventory_dir }}/{{ kube_cluster_name }}-ceph/admin.keyring 18 | delegate_to: localhost 19 | connection: local 20 | run_once: true 21 | 22 | - name: generate admin.keyring 23 | template: src=ceph/mon.keyring.tmpl dest={{ inventory_dir }}/{{ kube_cluster_name }}-ceph/mon.keyring 24 | delegate_to: localhost 25 | connection: local 26 | run_once: true 27 | 28 | - name: generate bootstrap.keyring 29 | template: src=ceph/bootstrap.keyring.tmpl dest={{ inventory_dir }}/{{ kube_cluster_name }}-ceph/bootstrap-{{ item }}.keyring 30 | delegate_to: localhost 31 | connection: local 32 | run_once: true 33 | with_items: 34 | - osd 35 | - mds 36 | - rgw 37 | 38 | - name: slurp bootstrap keyrings 39 | slurp: 40 | src: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/bootstrap-{{ item }}.keyring" 41 | delegate_to: localhost 42 | connection: local 43 | run_once: true 44 | with_items: 45 | - osd 46 | - mds 47 | - rgw 48 | register: bootstrap_secrets 49 | 50 | - name: debut bootstrap_secrets 51 | run_once: true 52 | debug: 53 | var: bootstrap_secrets 54 | 55 | 56 | - name: template bootstrap keyring secrets 57 | template: 58 | src: kubernetes/ceph-bootstrap-secret.tmpl 59 | dest: '{{ inventory_dir}}/{{ kube_cluster_name }}-ceph/bootstrap-{{ item.item }}.yml' 60 | register: bootstrap 61 | delegate_to: localhost 62 | connection: local 63 | run_once: true 64 | with_items: '{{ bootstrap_secrets.results }}' 65 | 66 | - name: slurp ceph.conf 67 | slurp: 68 | src: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph.conf" 69 | register: ceph_conf 70 | delegate_to: localhost 71 | connection: local 72 | run_once: true 73 | 74 | - name: slurp ceph.conf 75 | slurp: 76 | src: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/admin.keyring" 77 | register: ceph_admin_keyring 78 | delegate_to: localhost 79 | connection: local 80 | run_once: true 81 | 82 | - name: slurp ceph.conf 83 | slurp: 84 | src: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/mon.keyring" 85 | register: ceph_mon_keyring 86 | delegate_to: localhost 87 | connection: local 88 | run_once: true 89 | 90 | - set_fact: 91 | name: ceph-conf-combined 92 | ceph_conf_val: "{{ ceph_conf.content }}" 93 | ceph_admin_keyring_val: "{{ ceph_admin_keyring.content }}" 94 | ceph_mon_keyring_val: "{{ ceph_mon_keyring.content }}" 95 | run_once: true 96 | 97 | - name: debug 98 | debug: 99 | var: ceph_conf_val 100 | run_once: true 101 | - name: debug 102 | debug: 103 | var: ceph_admin_keyring_val 104 | run_once: true 105 | - name: debug 106 | debug: 107 | var: ceph_mon_keyring_val 108 | run_once: true 109 | 110 | 111 | - name: template ceph-conf-combined.yml 112 | template: 113 | src: kubernetes/ceph-conf-combined.tmpl 114 | dest: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/combined.yaml" 115 | delegate_to: localhost 116 | connection: local 117 | run_once: true 118 | 119 | - name: templtae k8s resources 120 | template: 121 | src: kubernetes/{{ item }} 122 | dest: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/{{ item }}" 123 | delegate_to: localhost 124 | connection: local 125 | run_once: true 126 | with_items: 127 | - ceph-mon-check-v1-rc.yaml 128 | - ceph-mon-v1-ds.yaml 129 | - ceph-mon-v1-svc.yaml 130 | - ceph-namespace.yaml 131 | - ceph-osd-v1-ds.yaml 132 | - ceph-mds-v1-dp.yaml 133 | 134 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/ceph/admin.keyring.tmpl: -------------------------------------------------------------------------------- 1 | [client.admin] 2 | key = {{ ceph_key }} 3 | auid = 0 4 | caps mds = "allow" 5 | caps mon = "allow *" 6 | caps osd = "allow *" 7 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/ceph/bootstrap.keyring.tmpl: -------------------------------------------------------------------------------- 1 | [client.bootstrap-{{ item }}] 2 | key = {{ ceph_key }} 3 | caps mon = "allow profile bootstrap-{{ item }}" 4 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/ceph/ceph.conf.tmpl: -------------------------------------------------------------------------------- 1 | [global] 2 | fsid = {{ ceph_fsid }} 3 | cephx = {{ auth_cephx|default("true")}} 4 | cephx_require_signatures = {{auth_cephx_require_signatures|default("false")}} 5 | cephx_cluster_require_signatures = {{auth_cephx_cluster_require_signatures|default("true")}} 6 | cephx_service_require_signatures = {{auth_cephx_service_require_signatures|default("false")}} 7 | 8 | # auth 9 | max_open_files = {{global_max_open_files|default("131072")}} 10 | osd_pool_default_pg_num = {{global_osd_pool_default_pg_num|default("128")}} 11 | osd_pool_default_pgp_num = {{global_osd_pool_default_pgp_num|default("128")}} 12 | osd_pool_default_size = {{global_osd_pool_default_size|default("3")}} 13 | osd_pool_default_min_size = {{global_osd_pool_default_min_size|default("1")}} 14 | 15 | mon_osd_full_ratio = {{global_mon_osd_full_ratio|default(".95")}} 16 | mon_osd_nearfull_ratio = {{global_mon_osd_nearfull_ratio|default(".85")}} 17 | 18 | mon_host = {{global_mon_host|default('ceph-mon.ceph.svc.cluster.local')}} 19 | 20 | [mon] 21 | mon_osd_down_out_interval = {{mon_mon_osd_down_out_interval|default("600")}} 22 | mon_osd_min_down_reporters = {{mon_mon_osd_min_down_reporters|default("4")}} 23 | mon_clock_drift_allowed = {{mon_mon_clock_drift_allowed|default(".15")}} 24 | mon_clock_drift_warn_backoff = {{mon_mon_clock_drift_warn_backoff|default("30")}} 25 | mon_osd_report_timeout = {{mon_mon_osd_report_timeout|default("300")}} 26 | 27 | 28 | [osd] 29 | journal_size = {{osd_journal_size|default("100")}} 30 | cluster_network = {{osd_cluster_network|default('10.1.0.0/16')}} 31 | public_network = {{osd_public_network|default('10.1.0.0/16')}} 32 | osd_mkfs_type = {{osd_osd_mkfs_type|default("xfs")}} 33 | osd_mkfs_options_xfs = {{osd_osd_mkfs_options_xfs|default("-f -i size=2048")}} 34 | osd_mon_heartbeat_interval = {{osd_osd_mon_heartbeat_interval|default("30")}} 35 | 36 | #crush 37 | osd_pool_default_crush_rule = {{osd_pool_default_crush_rule|default("0")}} 38 | osd_crush_update_on_start = {{osd_osd_crush_update_on_start|default("true")}} 39 | 40 | #backend 41 | osd_objectstore = {{osd_osd_objectstore|default("filestore")}} 42 | 43 | #performance tuning 44 | filestore_merge_threshold = {{osd_filestore_merge_threshold|default("40")}} 45 | filestore_split_multiple = {{osd_filestore_split_multiple|default("8")}} 46 | osd_op_threads = {{osd_osd_op_threads|default("8")}} 47 | filestore_op_threads = {{osd_filestore_op_threads|default("8")}} 48 | filestore_max_sync_interval = {{osd_filestore_max_sync_interval|default("5")}} 49 | osd_max_scrubs = {{osd_osd_max_scrubs|default("1")}} 50 | 51 | 52 | #recovery tuning 53 | osd_recovery_max_active = {{osd_osd_recovery_max_active|default("5")}} 54 | osd_max_backfills = {{osd_osd_max_backfills|default("2")}} 55 | osd_recovery_op_priority = {{osd_osd_recovery_op_priority|default("2")}} 56 | osd_client_op_priority = {{osd_osd_client_op_priority|default("63")}} 57 | osd_recovery_max_chunk = {{osd_osd_recovery_max_chunk|default("1048576")}} 58 | osd_recovery_threads = {{osd_osd_recovery_threads|default("1")}} 59 | 60 | #ports 61 | ms_bind_port_min = {{osd_ms_bind_port_min|default("6800")}} 62 | ms_bind_port_max = {{osd_ms_bind_port_max|default("7100")}} 63 | 64 | [client] 65 | rbd_cache_enabled = {{client_rbd_cache_enabled|default("true")}} 66 | rbd_cache_writethrough_until_flush = {{client_rbd_cache_writethrough_until_flush|default("true")}} 67 | rbd default features = {{rbd_default_features|default("1")}} 68 | 69 | [mds] 70 | mds_cache_size = {{mds_mds_cache_size|default("100000")}} 71 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/ceph/mon.keyring.tmpl: -------------------------------------------------------------------------------- 1 | [mon.] 2 | key = {{ ceph_key }} 3 | caps mon = "allow *" 4 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-admin-secret.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ceph-admin-keyring 6 | data: 7 | ceph.client.admin.keyring: {{ ceph_key }} 8 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-bootstrap-secret.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ceph-bootstrap-{{ item.item }}-keyring 6 | data: 7 | ceph.keyring: {{ item.content }} 8 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-conf-combined.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ name }} 6 | data: 7 | ceph.conf: {{ ceph_conf_val }} 8 | ceph.client.admin.keyring: {{ ceph_admin_keyring_val }} 9 | ceph.mon.keyring: {{ ceph_mon_keyring_val }} 10 | 11 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-mds-v1-dp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | labels: 6 | app: ceph 7 | daemon: mds 8 | name: ceph-mds 9 | namespace: ceph 10 | spec: 11 | replicas: 1 12 | template: 13 | metadata: 14 | name: ceph-mds 15 | namespace: ceph 16 | labels: 17 | app: ceph 18 | daemon: mds 19 | spec: 20 | serviceAccount: default 21 | volumes: 22 | - name: ceph-conf 23 | secret: 24 | secretName: ceph-conf-combined 25 | - name: ceph-bootstrap-osd-keyring 26 | secret: 27 | secretName: ceph-bootstrap-osd-keyring 28 | - name: ceph-bootstrap-mds-keyring 29 | secret: 30 | secretName: ceph-bootstrap-mds-keyring 31 | - name: ceph-bootstrap-rgw-keyring 32 | secret: 33 | secretName: ceph-bootstrap-rgw-keyring 34 | containers: 35 | - name: ceph-mds 36 | image: ceph/daemon:latest 37 | ports: 38 | - containerPort: 6800 39 | env: 40 | - name: CEPH_DAEMON 41 | value: MDS 42 | - name: CEPHFS_CREATE 43 | value: "1" 44 | - name: KV_TYPE 45 | value: k8s 46 | - name: CLUSTER 47 | value: ceph 48 | volumeMounts: 49 | - name: ceph-conf 50 | mountPath: /etc/ceph 51 | - name: ceph-bootstrap-osd-keyring 52 | mountPath: /var/lib/ceph/bootstrap-osd 53 | - name: ceph-bootstrap-mds-keyring 54 | mountPath: /var/lib/ceph/bootstrap-mds 55 | - name: ceph-bootstrap-rgw-keyring 56 | mountPath: /var/lib/ceph/bootstrap-rgw 57 | livenessProbe: 58 | tcpSocket: 59 | port: 6800 60 | initialDelaySeconds: 360 61 | timeoutSeconds: 5 62 | readinessProbe: 63 | tcpSocket: 64 | port: 6800 65 | timeoutSeconds: 5 66 | resources: 67 | requests: 68 | memory: "2048Mi" 69 | cpu: "1000m" 70 | limits: 71 | memory: "4096Mi" 72 | cpu: "2000m" 73 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-mon-check-v1-rc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ReplicationController 3 | apiVersion: v1 4 | metadata: 5 | labels: 6 | app: ceph 7 | daemon: moncheck 8 | name: ceph-mon-check 9 | namespace: ceph 10 | spec: 11 | replicas: 1 12 | selector: 13 | app: ceph 14 | daemon: moncheck 15 | template: 16 | metadata: 17 | name: ceph-mon 18 | namespace: ceph 19 | labels: 20 | app: ceph 21 | daemon: moncheck 22 | spec: 23 | serviceAccount: default 24 | volumes: 25 | - name: ceph-conf 26 | secret: 27 | secretName: ceph-conf-combined 28 | - name: ceph-bootstrap-osd-keyring 29 | secret: 30 | secretName: ceph-bootstrap-osd-keyring 31 | - name: ceph-bootstrap-mds-keyring 32 | secret: 33 | secretName: ceph-bootstrap-mds-keyring 34 | - name: ceph-bootstrap-rgw-keyring 35 | secret: 36 | secretName: ceph-bootstrap-rgw-keyring 37 | containers: 38 | - name: ceph-mon 39 | #image: quay.io/acaleph/ceph-daemon:kubernetes 40 | image: quay.io/cornelius/ceph-daemon-test:latest 41 | imagePullPolicy: Always 42 | ports: 43 | - containerPort: 6789 44 | env: 45 | - name: CEPH_DAEMON 46 | value: MON_HEALTH 47 | - name: CEPH_PUBLIC_NETWORK 48 | value: 10.1.0.0/16 49 | - name: CEPH_CLUSTER_NETWORK 50 | value: 10.1.0.0/16 51 | - name: KV_TYPE 52 | value: k8s 53 | - name: MON_IP_AUTO_DETECT 54 | value: "1" 55 | - name: CLUSTER 56 | value: ceph 57 | volumeMounts: 58 | - name: ceph-conf 59 | mountPath: /etc/ceph 60 | - name: ceph-bootstrap-osd-keyring 61 | mountPath: /var/lib/ceph/bootstrap-osd 62 | - name: ceph-bootstrap-mds-keyring 63 | mountPath: /var/lib/ceph/bootstrap-mds 64 | - name: ceph-bootstrap-rgw-keyring 65 | mountPath: /var/lib/ceph/bootstrap-rgw 66 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-mon-secret.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ceph-mon-keyring 6 | data: 7 | ceph.mon.keyring: {{ $key }} 8 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-mon-v1-ds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: DaemonSet 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | labels: 6 | app: ceph 7 | daemon: mon 8 | name: ceph-mon 9 | namespace: ceph 10 | spec: 11 | template: 12 | metadata: 13 | name: ceph-mon 14 | namespace: ceph 15 | labels: 16 | app: ceph 17 | daemon: mon 18 | spec: 19 | nodeSelector: 20 | ceph-mon: "true" 21 | serviceAccount: default 22 | volumes: 23 | - name: ceph-conf 24 | secret: 25 | secretName: ceph-conf-combined 26 | - name: ceph-bootstrap-osd-keyring 27 | secret: 28 | secretName: ceph-bootstrap-osd-keyring 29 | - name: ceph-bootstrap-mds-keyring 30 | secret: 31 | secretName: ceph-bootstrap-mds-keyring 32 | - name: ceph-bootstrap-rgw-keyring 33 | secret: 34 | secretName: ceph-bootstrap-rgw-keyring 35 | containers: 36 | - name: ceph-mon 37 | image: quay.io/cornelius/ceph-daemon-test:latest 38 | # image: quay.io/acaleph/ceph-daemon:kubernetes 39 | # imagePullPolicy: Always 40 | lifecycle: 41 | preStop: 42 | exec: 43 | # remove the mon on Pod stop. 44 | command: 45 | - "/remove-mon.sh" 46 | ports: 47 | - containerPort: 6789 48 | env: 49 | - name: CEPH_DAEMON 50 | value: MON 51 | - name: CEPH_PUBLIC_NETWORK 52 | value: 10.1.0.0/16 53 | - name: CEPH_CLUSTER_NETWORK 54 | value: 10.1.0.0/16 55 | - name: KV_TYPE 56 | value: k8s 57 | - name: NETWORK_AUTO_DETECT 58 | value: "1" 59 | - name: CLUSTER 60 | value: ceph 61 | volumeMounts: 62 | - name: ceph-conf 63 | mountPath: /etc/ceph 64 | - name: ceph-bootstrap-osd-keyring 65 | mountPath: /var/lib/ceph/bootstrap-osd 66 | - name: ceph-bootstrap-mds-keyring 67 | mountPath: /var/lib/ceph/bootstrap-mds 68 | - name: ceph-bootstrap-rgw-keyring 69 | mountPath: /var/lib/ceph/bootstrap-rgw 70 | livenessProbe: 71 | tcpSocket: 72 | port: 6789 73 | initialDelaySeconds: 60 74 | timeoutSeconds: 5 75 | readinessProbe: 76 | tcpSocket: 77 | port: 6789 78 | timeoutSeconds: 5 79 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-mon-v1-svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: ceph-mon 6 | namespace: ceph 7 | labels: 8 | app: ceph 9 | daemon: mon 10 | spec: 11 | ports: 12 | - port: 6789 13 | protocol: TCP 14 | targetPort: 6789 15 | selector: 16 | app: ceph 17 | daemon: mon 18 | clusterIP: None -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: ceph -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/ceph-osd-v1-ds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: DaemonSet 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | name: ceph-osd 6 | namespace: ceph 7 | labels: 8 | app: ceph 9 | daemon: osd 10 | spec: 11 | template: 12 | metadata: 13 | labels: 14 | app: ceph 15 | daemon: osd 16 | spec: 17 | nodeSelector: 18 | ceph-osd: "true" 19 | volumes: 20 | - name: devices 21 | hostPath: 22 | path: /dev 23 | - name: ceph 24 | hostPath: 25 | path: /opt/ceph 26 | - name: ceph-conf 27 | secret: 28 | secretName: ceph-conf-combined 29 | - name: ceph-bootstrap-osd-keyring 30 | secret: 31 | secretName: ceph-bootstrap-osd-keyring 32 | - name: ceph-bootstrap-mds-keyring 33 | secret: 34 | secretName: ceph-bootstrap-mds-keyring 35 | - name: ceph-bootstrap-rgw-keyring 36 | secret: 37 | secretName: ceph-bootstrap-rgw-keyring 38 | {% if ceph_osd_type == 'osd_directory' %} 39 | - name: osd-directory 40 | hostPath: 41 | path: "{{ ceph_osd_dir }}" 42 | {% endif %} 43 | containers: 44 | - name: osd-pod 45 | image: quay.io/cornelius/ceph-daemon-test:latest 46 | # imagePullPolicy: Always 47 | volumeMounts: 48 | - name: devices 49 | mountPath: /dev 50 | - name: ceph 51 | mountPath: /var/lib/ceph 52 | - name: ceph-conf 53 | mountPath: /etc/ceph 54 | - name: ceph-bootstrap-osd-keyring 55 | mountPath: /var/lib/ceph/bootstrap-osd 56 | - name: ceph-bootstrap-mds-keyring 57 | mountPath: /var/lib/ceph/bootstrap-mds 58 | - name: ceph-bootstrap-rgw-keyring 59 | mountPath: /var/lib/ceph/bootstrap-rgw 60 | {% if ceph_osd_type == 'osd_directory' %} 61 | - name: osd-directory 62 | mountPath: /var/lib/ceph/osd 63 | {% endif %} 64 | securityContext: 65 | privileged: true 66 | env: 67 | - name: CEPH_DAEMON 68 | value: osd 69 | - name: KV_TYPE 70 | value: k8s 71 | - name: CLUSTER 72 | value: ceph 73 | - name: CEPH_GET_ADMIN_KEY 74 | value: "1" 75 | {% if ceph_osd_type == 'osd_disk' %} 76 | - name: OSD_DEVICE 77 | value: "{{ ceph_osd_device }}" 78 | #- name: OSD_TYPE 79 | #value: disk 80 | {% endif %} 81 | livenessProbe: 82 | tcpSocket: 83 | port: 6800 84 | initialDelaySeconds: 360 85 | timeoutSeconds: 5 86 | readinessProbe: 87 | tcpSocket: 88 | port: 6800 89 | timeoutSeconds: 5 90 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-config/templates/kubernetes/secret.tmpl: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ name }} 6 | data: 7 | {{ key }}: {{ val }} 8 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-resources/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: ceph-on-kubernetes-config, ansible_python_interpreter: /usr/bin/python } 4 | - { role: wait_for_k8s_node, wait_for_seconds: 300, wait_for_resource_type: node, wait_for_resource_name: "{{ inventory_hostname }}", ansible_python_interpreter: /usr/bin/python } 5 | - { role: label-node, label_node: "{{ inventory_hostname }}", label_key: 'ceph-mon', label_value: 'true', when: "inventory_hostname in groups['ceph-mon']" , ansible_python_interpreter: /usr/bin/python } 6 | - { role: label-node, label_node: "{{ inventory_hostname }}", label_key: 'ceph-osd', label_value: 'true', when: "inventory_hostname in groups['ceph-osd']" , ansible_python_interpreter: /usr/bin/python } 7 | - { role: k8s-resource, k8s_resource_name: ceph, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph-namespace.yaml", k8s_resource_kind: namespace, k8s_resource_namespace: ceph , ansible_python_interpreter: /usr/bin/python } 8 | - { role: k8s-resource, k8s_resource_name: ceph-mon, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph-mon-v1-svc.yaml", k8s_resource_kind: service, k8s_resource_namespace: ceph , ansible_python_interpreter: /usr/bin/python } 9 | - { role: k8s-resource, k8s_resource_name: ceph-bootstrap-osd-keyring, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/bootstrap-osd.yml", k8s_resource_kind: secret, k8s_resource_namespace: ceph , ansible_python_interpreter: /usr/bin/python } 10 | - { role: k8s-resource, k8s_resource_name: ceph-bootstrap-mds-keyring, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/bootstrap-mds.yml", k8s_resource_kind: secret, k8s_resource_namespace: ceph, ansible_python_interpreter: /usr/bin/python } 11 | - { role: k8s-resource, k8s_resource_name: ceph-bootstrap-rgw-keyring, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/bootstrap-rgw.yml", k8s_resource_kind: secret, k8s_resource_namespace: ceph, ansible_python_interpreter: /usr/bin/python } 12 | - { role: k8s-resource, k8s_resource_name: ceph-conf-combined, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/combined.yaml", k8s_resource_kind: secret, k8s_resource_namespace: ceph , ansible_python_interpreter: /usr/bin/python } 13 | - { role: k8s-resource, k8s_resource_name: ceph-mon, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph-mon-v1-ds.yaml", k8s_resource_kind: daemonset, k8s_resource_namespace: ceph , ansible_python_interpreter: /usr/bin/python } 14 | - { role: k8s-resource, k8s_resource_name: ceph-osd, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph-osd-v1-ds.yaml", k8s_resource_kind: daemonset, k8s_resource_namespace: ceph, ansible_python_interpreter: /usr/bin/python } 15 | - { role: k8s-resource, k8s_resource_name: ceph-mon-check, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph-mon-check-v1-rc.yaml", k8s_resource_kind: rc, k8s_resource_namespace: ceph, ansible_python_interpreter: /usr/bin/python } 16 | - { role: k8s-resource, k8s_resource_name: ceph-mds, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-ceph/ceph-mds-v1-dp.yaml", k8s_resource_kind: rc, k8s_resource_namespace: ceph, ansible_python_interpreter: /usr/bin/python } 17 | -------------------------------------------------------------------------------- /roles/ceph-on-kubernetes-resources/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create ceph osd dir 3 | file: 4 | path: "{{ ceph_osd_dir }}" 5 | state: directory 6 | sudo: yes 7 | -------------------------------------------------------------------------------- /roles/cloud-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | clear_etcd_data: false 3 | cluster_only_ports: 4 | - 4194 5 | - 9100 6 | - 10250 7 | -------------------------------------------------------------------------------- /roles/cloud-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy cloud config 3 | file: path=/var/lib/coreos-install state=directory 4 | sudo: yes 5 | - name: copy cloud config 6 | template: src=cloud_config.tmpl dest=/var/lib/coreos-install/user_data 7 | sudo: yes 8 | 9 | - name: remove /usr/share/oem/cloud-config.yml on vagrant 10 | file: path=/usr/share/oem/cloud-config.yml state=absent 11 | when: baremetal_provider == 'vagrant' 12 | sudo: yes 13 | 14 | - name: stop etcd.service 15 | command: systemctl stop etcd.service 16 | ignore_errors: true 17 | sudo: yes 18 | when: clear_etcd_data 19 | 20 | - name: stop etcd2.service 21 | command: systemctl stop etcd2.service 22 | ignore_errors: true 23 | sudo: yes 24 | when: clear_etcd_data 25 | 26 | 27 | - name: cleanup /var/lib/etcd/* 28 | command: rm -rf /var/lib/etcd/* 29 | sudo: yes 30 | when: clear_etcd_data 31 | 32 | - name: cleanup /var/lib/etcd2/* 33 | command: rm -rf /var/lib/etcd2/* 34 | sudo: yes 35 | when: clear_etcd_data 36 | 37 | -------------------------------------------------------------------------------- /roles/etcd-certificates/tasks/install_etcdca.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if it is already installed 3 | local_action: "stat path=~/bin/etcd-ca" 4 | register: bin_etcdca 5 | 6 | 7 | - name: ensure ~/bin directory exists 8 | local_action: "file path=~/bin state=directory" 9 | become: false 10 | run_once: true 11 | 12 | - name: Detect local OS 13 | local_action: "command sh -c \"uname | tr '[:upper:]' '[:lower:]'\"" 14 | become: false 15 | register: os 16 | changed_when: false 17 | run_once: true 18 | 19 | - name: download binaries 20 | local_action: "get_url url='https://github.com/lamielle/etcd-ca/releases/download/v0.1.0/etcd-ca_0.1.0_{{os.stdout}}_amd64.tar.gz' dest='/tmp/etcd-ca.tgz'" 21 | become: false 22 | when: bin_etcdca.stat.exists == false 23 | run_once: true 24 | 25 | - name: unpack binaries 26 | local_action: "shell tar -C ~/bin -xf /tmp/etcd-ca.tgz" 27 | when: bin_etcdca.stat.exists == false 28 | 29 | - name: delte temporary files 30 | local_action: "file path=/tmp/etcd-ca.tgz state=absent" 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /roles/etcd-certificates/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install_etcdca.yml 3 | 4 | - name: crate ca home 5 | local_action: file path={{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys state=directory 6 | 7 | - name: init ca 8 | shell: | 9 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/ 10 | etcd-ca init --passphrase="" 11 | args: 12 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/ca.crt" 13 | delegate_to: localhost 14 | connection: local 15 | 16 | - name: create csr for host 17 | shell: | 18 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/ 19 | etcd-ca new-cert --passphrase="" -ip {{ inventory_hostname }} {{ coreos_hostname }} 20 | args: 21 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.host.csr" 22 | delegate_to: localhost 23 | connection: local 24 | 25 | - name: sign csr for host 26 | shell: | 27 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/ 28 | etcd-ca sign --passphrase="" {{ coreos_hostname }} 29 | args: 30 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.host.crt" 31 | delegate_to: localhost 32 | connection: local 33 | 34 | - name: chain certificate 35 | shell: | 36 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/ 37 | etcd-ca chain {{ coreos_hostname }} > {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.chain 38 | args: 39 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.host.chain" 40 | delegate_to: localhost 41 | connection: local 42 | 43 | - name: chain certificate 44 | shell: | 45 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/ 46 | etcd-ca export --insecure --passphrase="" {{ coreos_hostname }} > {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.tar 47 | tar -C {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/ -xf {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.tar {{ coreos_hostname }}.key.insecure 48 | args: 49 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/{{ coreos_hostname }}.key.insecure" 50 | delegate_to: localhost 51 | connection: local 52 | 53 | - name: set_ca_fcts 54 | set_fact: 55 | etcd_ca_certificate: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-keys/.etcd-ca/' + coreos_hostname + '.chain') }}" 56 | etcd_cert: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-keys/.etcd-ca/' + coreos_hostname + '.host.crt') }}" 57 | etcd_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-keys/.etcd-ca/' + coreos_hostname + '.key.insecure') }}" 58 | 59 | -------------------------------------------------------------------------------- /roles/etcd-certs-cfssl/tasks/install-cfssl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: detect local installation of cfssl 3 | local_action: "command which cfssl" 4 | register: local_cfssl 5 | become: false 6 | ignore_errors: yes 7 | run_once: true 8 | 9 | - name: ensure ~/bin directory exists 10 | local_action: "file path=~/bin state=directory" 11 | when: local_cfssl.rc != 0 12 | become: false 13 | run_once: true 14 | 15 | - name: Detect local OS 16 | local_action: "command sh -c \"uname | tr '[:upper:]' '[:lower:]'\"" 17 | become: false 18 | register: os 19 | when: local_cfssl.rc != 0 20 | run_once: true 21 | 22 | - name: download binaries 23 | local_action: "get_url url='https://pkg.cfssl.org/R1.2/{{ item }}_{{os.stdout}}-amd64' dest='~/bin/{{ item }}' mode='0744'" 24 | become: false 25 | with_items: 26 | - cfssl 27 | - cfssljson 28 | when: local_cfssl.rc != 0 29 | run_once: true 30 | -------------------------------------------------------------------------------- /roles/etcd-certs-cfssl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # etcd-ca export --insecure --passphrase '' | sed -e 's/.*-----BEGIN\(.*\)/-----BEGIN\1/' | openssl pkey -out key.pem 3 | # etcd-ca export --insecure --passphrase '' | sed -e 's/.*-----BEGIN\(.*\)/-----BEGIN\1/' | openssl x509 -out caextracted.pem 4 | - include: install-cfssl.yml 5 | 6 | - block: 7 | - name: create etcd-cfssl ca authoritority 8 | file: 9 | path: '{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl' 10 | state: directory 11 | - name: check if there is an old etcdca 12 | stat: 13 | path: '{{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/.etcd-ca/ca.crt' 14 | register: old_ca 15 | - name: copy legacy ca if it exists 16 | shell: | 17 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-etcd-ca-keys/ 18 | etcd-ca export --insecure --passphrase '' | sed -e 's/.*-----BEGIN\(.*\)/-----BEGIN\1/' | openssl pkey -out {{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/ca-key.pem 19 | etcd-ca export --insecure --passphrase '' | sed -e 's/.*-----BEGIN\(.*\)/-----BEGIN\1/' | openssl x509 -out {{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/ca.pem 20 | args: 21 | creates: '{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/ca.pem' 22 | when: old_ca.stat.exists 23 | 24 | - name: Create ca 25 | shell: | 26 | cd {{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl 27 | echo '{"CN":"CA","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare ca -A 28 | args: 29 | creates: '{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/ca.pem' 30 | 31 | - name: Create ca conifg 32 | shell: | 33 | cd {{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl 34 | pwd 35 | echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","server auth","client auth"]}}}' > ca-config.json 36 | args: 37 | creates: '{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/ca-config.json' 38 | 39 | - name: Create server sertificate 40 | shell: | 41 | cd {{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl 42 | echo '{"CN":"'{{ coreos_hostname }}'","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -config=ca-config.json -ca=ca.pem -ca-key=ca-key.pem -hostname="{{ inventory_hostname }}" - | cfssljson -bare {{ coreos_hostname }} 43 | args: 44 | creates: '{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/{{ coreos_hostname }}.pem' 45 | 46 | - name: Create client certificate 47 | shell: | 48 | cd {{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl 49 | echo '{"CN":"'client'","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -config=ca-config.json -ca=ca.pem -ca-key=ca-key.pem -hostname="" - | cfssljson -bare client 50 | args: 51 | creates: '{{ inventory_dir }}/{{ kube_cluster_name }}-etcd-ca-cfssl/client.pem' 52 | connection: local 53 | delegate_to: localhost 54 | 55 | - name: set_ca_fcts 56 | set_fact: 57 | etcd_ca_certificate: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-cfssl/ca.pem') }}" 58 | etcd_cert: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-cfssl/' + coreos_hostname + '.pem') }}" 59 | etcd_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-cfssl/' + coreos_hostname + '-key.pem') }}" 60 | etcd_client_cert: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-cfssl/client.pem') }}" 61 | etcd_client_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-etcd-ca-cfssl/client-key.pem') }}" 62 | 63 | -------------------------------------------------------------------------------- /roles/extra-cas/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: read cas 3 | slurp: 4 | src: "{{ item }}" 5 | 6 | with_fileglob: 7 | - "{{ inventory_dir }}/{{ kube_cluster_name }}-extra-ca-certs/*" 8 | register: extra_cas 9 | delegate_to: localhost 10 | connection: local 11 | 12 | - debug: 13 | var: extra_cas 14 | -------------------------------------------------------------------------------- /roles/install-coreos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | coreos_channel: stable 3 | coreos_public_keys: [] 4 | -------------------------------------------------------------------------------- /roles/install-coreos/tasks/bare_metal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy cloud confgi 3 | template: src=cloud_config_install.tmpl dest=/tmp/cloud-config.yaml 4 | 5 | - name: debug cloud config 6 | shell: cat /tmp/cloud-config.yaml 7 | 8 | 9 | - name: install coreos 10 | shell: | 11 | apt-get -y install gawk 12 | wget https://raw.githubusercontent.com/coreos/init/master/bin/coreos-install -P /tmp 13 | chmod a+x /tmp/coreos-install 14 | sed -i '/# inform the OS of partition table changes/a \ 15 | sleep 10 \ 16 | sync' /tmp/coreos-install 17 | /tmp/coreos-install -d /dev/sda -C {{ coreos_channel }} -c /tmp/cloud-config.yaml 18 | 19 | - name: clear osd disk 20 | command: dd if=/dev/zero of={{ ceph_osd_device }} bs=512 count=1 conv=notrunc 21 | when: ceph_osd_device is defined 22 | 23 | - name: create new partintion table on osd disk 24 | shell: | 25 | echo "g 26 | w" | fdisk {{ ceph_osd_device }} 27 | when: ceph_osd_device is defined 28 | ignore_errors: true 29 | 30 | - name: reboot server 31 | shell: sleep 5 && reboot & 32 | args: 33 | executable: /bin/bash 34 | async: 10 35 | poll: 0 36 | 37 | 38 | - name: waiting for server to go down 39 | local_action: 40 | module: wait_for 41 | host: "{{ inventory_hostname }}" 42 | port: 22 43 | delay: 1 44 | timeout: 20 45 | state: stopped 46 | 47 | - name: waiting for server to come back 48 | local_action: 49 | module: wait_for 50 | host={{ inventory_hostname }} 51 | port=22 52 | delay=1 53 | timeout=200 54 | 55 | - name: remove server from local known_hosts file 56 | local_action: command /usr/bin/ssh-keygen -R {{ inventory_hostname }} 57 | ignore_errors: true 58 | 59 | -------------------------------------------------------------------------------- /roles/install-coreos/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: bare_metal.yml 3 | when: baremetal_provider == 'hetzner' or baremetal_provider == 'ovh' 4 | 5 | - include: vagrant.yml 6 | when: baremetal_provider == 'vagrant' 7 | -------------------------------------------------------------------------------- /roles/install-coreos/tasks/vagrant.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: destroy previous vagrant instances 3 | shell: | 4 | cd {{ playbook_dir }} 5 | vagrant destroy -f 6 | delegate_to: localhost 7 | connection: local 8 | run_once: true 9 | - name: boot vagrant vm 10 | shell: | 11 | cd {{ playbook_dir }} 12 | vagrant up {{ coreos_hostname }} 13 | delegate_to: localhost 14 | connection: local 15 | 16 | - name: waiting for server to come back 17 | local_action: 18 | module: wait_for 19 | host={{ inventory_hostname }} 20 | port=22 21 | delay=1 22 | timeout=200 23 | 24 | 25 | -------------------------------------------------------------------------------- /roles/k8s-dns-addon/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if dns-addon is already installed 3 | command: kubectl get services --namespace=kube-system --output=json --context={{ kube_cluster_name }} 4 | register: dns_service_kubectl_command 5 | changed_when: false 6 | delegate_to: localhost 7 | connection: local 8 | run_once: true 9 | 10 | - name: debug 11 | set_fact: 12 | dns_service_kubectl: "{{ dns_service_kubectl_command.stdout|from_json }}" 13 | delegate_to: localhost 14 | connection: local 15 | run_once: true 16 | 17 | - name: set kubctlexists to default to false 18 | set_fact: 19 | k8s_dns_service: false 20 | changed_when: false 21 | delegate_to: localhost 22 | connection: local 23 | run_once: true 24 | 25 | - name: check if dns-addon is already installed 26 | set_fact: 27 | k8s_dns_service: true 28 | when: item['metadata']['name'] == 'kube-dns' 29 | with_items: '{{ dns_service_kubectl["items"] }}' 30 | changed_when: false 31 | delegate_to: localhost 32 | connection: local 33 | run_once: true 34 | 35 | - name: create tempfile 36 | command: mktemp 37 | register: tmp_file_name 38 | when: k8s_dns_service == false 39 | delegate_to: localhost 40 | connection: local 41 | run_once: true 42 | 43 | - name: template addon k8s service efinition 44 | template: src=dns-addon.yml dest={{ tmp_file_name.stdout }} 45 | when: k8s_dns_service == false 46 | delegate_to: localhost 47 | connection: local 48 | run_once: true 49 | 50 | - name: create dns service 51 | command: kubectl create -f {{ tmp_file_name.stdout }} --context={{ kube_cluster_name }} 52 | when: k8s_dns_service == false 53 | delegate_to: localhost 54 | connection: local 55 | run_once: true 56 | 57 | - name: cleanup tmp file 58 | file: path={{ tmp_file_name.stdout }} state=absent 59 | when: k8s_dns_service == false 60 | connection: local 61 | run_once: true 62 | 63 | - name: wait for k8s dns service to be ready 64 | delegate_to: localhost 65 | connection: local 66 | run_once: true 67 | shell: | 68 | #!/bin/bash 69 | n=0 70 | until [ $n -ge 900 ] 71 | do 72 | [ $(kubectl --namespace=kube-system get pods | grep kube-dns | grep Running | wc -l) -eq 3 ] && break # substitute your command here 73 | n=$[$n+1] 74 | echo "." 75 | sleep 1 76 | done 77 | [ $(kubectl --namespace=kube-system get pods | grep kube-dns | grep Running | wc -l) -eq 3 ] 78 | args: 79 | executable: /bin/bash 80 | -------------------------------------------------------------------------------- /roles/k8s-dns-addon/templates/dns-addon.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeDNS" 10 | spec: 11 | selector: 12 | k8s-app: kube-dns 13 | clusterIP: {{ k8s_dns_service_ip }} 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | 22 | --- 23 | 24 | apiVersion: v1 25 | kind: ReplicationController 26 | metadata: 27 | name: kube-dns-v11 28 | namespace: kube-system 29 | labels: 30 | k8s-app: kube-dns 31 | version: v11 32 | kubernetes.io/cluster-service: "true" 33 | spec: 34 | replicas: 3 35 | selector: 36 | k8s-app: kube-dns 37 | version: v11 38 | template: 39 | metadata: 40 | labels: 41 | k8s-app: kube-dns 42 | version: v11 43 | kubernetes.io/cluster-service: "true" 44 | spec: 45 | containers: 46 | - name: etcd 47 | image: gcr.io/google_containers/etcd-amd64:2.2.1 48 | resources: 49 | limits: 50 | cpu: 100m 51 | memory: 500Mi 52 | requests: 53 | cpu: 100m 54 | memory: 50Mi 55 | command: 56 | - /usr/local/bin/etcd 57 | - -data-dir 58 | - /var/etcd/data 59 | - -listen-client-urls 60 | - http://127.0.0.1:2379,http://127.0.0.1:4001 61 | - -advertise-client-urls 62 | - http://127.0.0.1:2379,http://127.0.0.1:4001 63 | - -initial-cluster-token 64 | - skydns-etcd 65 | volumeMounts: 66 | - name: etcd-storage 67 | mountPath: /var/etcd/data 68 | - name: kube2sky 69 | image: gcr.io/google_containers/kube2sky:1.14 70 | resources: 71 | limits: 72 | cpu: 100m 73 | memory: 200Mi 74 | requests: 75 | cpu: 100m 76 | memory: 50Mi 77 | livenessProbe: 78 | httpGet: 79 | path: /healthz 80 | port: 8080 81 | scheme: HTTP 82 | initialDelaySeconds: 60 83 | timeoutSeconds: 5 84 | successThreshold: 1 85 | failureThreshold: 5 86 | readinessProbe: 87 | httpGet: 88 | path: /readiness 89 | port: 8081 90 | scheme: HTTP 91 | initialDelaySeconds: 30 92 | timeoutSeconds: 5 93 | args: 94 | # command = "/kube2sky" 95 | - --domain={{ k8s_dns_domain }} 96 | - name: skydns 97 | image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c 98 | resources: 99 | limits: 100 | cpu: 100m 101 | memory: 200Mi 102 | requests: 103 | cpu: 100m 104 | memory: 50Mi 105 | args: 106 | # command = "/skydns" 107 | - -machines=http://127.0.0.1:4001 108 | - -addr=0.0.0.0:53 109 | - -ns-rotate=false 110 | - -domain=cluster.local. 111 | ports: 112 | - containerPort: 53 113 | name: dns 114 | protocol: UDP 115 | - containerPort: 53 116 | name: dns-tcp 117 | protocol: TCP 118 | - name: healthz 119 | image: gcr.io/google_containers/exechealthz:1.0 120 | resources: 121 | limits: 122 | cpu: 10m 123 | memory: 20Mi 124 | requests: 125 | cpu: 10m 126 | memory: 20Mi 127 | args: 128 | - -cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null 129 | - -port=8080 130 | ports: 131 | - containerPort: 8080 132 | protocol: TCP 133 | volumes: 134 | - name: etcd-storage 135 | emptyDir: {} 136 | dnsPolicy: Default 137 | -------------------------------------------------------------------------------- /roles/k8s-kubesystem-namespace/files/kube-system-namespace.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kube-system 5 | -------------------------------------------------------------------------------- /roles/k8s-kubesystem-namespace/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if dns-addon is already installed 3 | command: kubectl get namespaces --output=json --context={{ kube_cluster_name }} 4 | register: system_namespace_kubectl_command 5 | changed_when: false 6 | delegate_to: localhost 7 | connection: local 8 | run_once: true 9 | 10 | - name: debug 11 | set_fact: 12 | system_namespace_kubectl: "{{ system_namespace_kubectl_command.stdout|from_json }}" 13 | delegate_to: localhost 14 | connection: local 15 | run_once: true 16 | 17 | - name: set kubctlexists to default to false 18 | set_fact: 19 | k8s_system_namespace: false 20 | changed_when: false 21 | delegate_to: localhost 22 | connection: local 23 | run_once: true 24 | 25 | - name: debug 26 | debug: 27 | var: item 28 | with_items: '{{ system_namespace_kubectl["items"] }}' 29 | changed_when: false 30 | delegate_to: localhost 31 | connection: local 32 | run_once: true 33 | 34 | - name: check if dns-addon is already installed 35 | set_fact: 36 | k8s_system_namespace: true 37 | when: item.metadata.name == "kube-system" 38 | with_items: '{{ system_namespace_kubectl["items"] }}' 39 | changed_when: false 40 | delegate_to: localhost 41 | connection: local 42 | run_once: true 43 | 44 | - name: create dns service 45 | command: kubectl create -f {{ playbook_dir }}/roles/k8s-kubesystem-namespace/files/kube-system-namespace.yml --context={{ kube_cluster_name }} 46 | when: k8s_system_namespace == false 47 | delegate_to: localhost 48 | connection: local 49 | run_once: true 50 | -------------------------------------------------------------------------------- /roles/k8s-resource/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if resource is already exists 3 | command: kubectl get {{ k8s_resource_kind }} --namespace={{ k8s_resource_namespace|default("default") }} --output=json --context={{ kube_cluster_name }} 4 | register: k8s_resource_kubectl_command 5 | changed_when: false 6 | delegate_to: localhost 7 | connection: local 8 | run_once: true 9 | 10 | - name: debug 11 | set_fact: 12 | k8s_resource_kubectl: "{{ k8s_resource_kubectl_command.stdout|from_json }}" 13 | delegate_to: localhost 14 | connection: local 15 | run_once: true 16 | 17 | - name: set kubctlexists to default to false 18 | set_fact: 19 | k8s_resource_exists: false 20 | changed_when: false 21 | delegate_to: localhost 22 | connection: local 23 | run_once: true 24 | 25 | - name: check if is already there 26 | set_fact: 27 | k8s_resource_exists: true 28 | when: item['metadata']['name'] == k8s_resource_name 29 | with_items: '{{ k8s_resource_kubectl["items"] }}' 30 | changed_when: false 31 | delegate_to: localhost 32 | connection: local 33 | run_once: true 34 | 35 | - name: create resource 36 | command: kubectl create -f {{ k8s_resource_file }} --namespace={{ k8s_resource_namespace|default("default") }} --context={{ kube_cluster_name }} 37 | when: k8s_resource_exists == false 38 | delegate_to: localhost 39 | connection: local 40 | run_once: true 41 | -------------------------------------------------------------------------------- /roles/kubectl-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy kubernetes ca 3 | local_action: 4 | module: copy 5 | src: "{{ inventory_dir }}/{{ kube_cluster_name }}-kubernetes-ca/" 6 | dest: "{{ lookup('env','HOME') }}/.kube/{{ kube_cluster_name }}-kubernetes-ca/" 7 | run_once: true 8 | 9 | - name: configure kubectl 10 | shell: | 11 | {{ lookup('env','HOME') }}/bin/kubectl config set-cluster {{ kube_cluster_name }} --server=https://{{ kube_master_dns_name|default(kube_master_ip)}}:6443 --certificate-authority={{ lookup('env','HOME') }}/.kube/{{ kube_cluster_name }}-kubernetes-ca/ca.pem 12 | {{ lookup('env','HOME') }}/bin/kubectl config set-credentials {{ kube_cluster_name }}-admin --certificate-authority={{ lookup('env','HOME') }}/.kube/{{ kube_cluster_name }}-kubernetes-ca/ca.pem --client-key={{ lookup('env','HOME') }}/.kube/{{ kube_cluster_name }}-kubernetes-ca/admin-key.pem --client-certificate={{ lookup('env','HOME') }}/.kube/{{ kube_cluster_name }}-kubernetes-ca/admin.pem 13 | {{ lookup('env','HOME') }}/bin/kubectl config set-context {{ kube_cluster_name }} --cluster={{ kube_cluster_name }} --user={{ kube_cluster_name }}-admin 14 | {{ lookup('env','HOME') }}/bin/kubectl config use-context {{ kube_cluster_name }} 15 | run_once: true 16 | delegate_to: 127.0.0.1 17 | connection: local 18 | -------------------------------------------------------------------------------- /roles/kubectl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: kubectl checksum 3 | local_action: 4 | module: shell 5 | args: "sha256sum ~/bin/kubectl | awk '{ print $1}'" 6 | run_once: yes 7 | register: current_kubectl_checksum 8 | changed_when: false 9 | 10 | - name: download kubectl 11 | local_action: 12 | module: get_url 13 | url: "https://storage.googleapis.com/kubernetes-release/release/v{{ kubernetes_version }}/bin/linux/amd64/kubectl" 14 | dest: ~/bin/ 15 | sha256sum: "{{ kubectl_checksum }}" 16 | mode: 0770 17 | run_once: yes 18 | when: kubectl_checksum != current_kubectl_checksum.stdout 19 | 20 | 21 | -------------------------------------------------------------------------------- /roles/kubernetes-certificates/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://coreos.com/kubernetes/docs/latest/openssl.html 3 | - name: create certificate directory 4 | local_action: file path={{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca state=directory 5 | 6 | - name: create ca 7 | shell: | 8 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca 9 | openssl genrsa -out ca-key.pem 2048 10 | openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" 11 | args: 12 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca/ca-key.pem" 13 | delegate_to: localhost 14 | connection: local 15 | 16 | - name: template openssl.conf 17 | local_action: template src=openssl.cnf.tmpl dest={{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca/openssl.cnf 18 | register: openssl_conf 19 | 20 | - name: template openssl_worker.conf 21 | local_action: template src=openssl_worker.cnf.tmpl dest={{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca/openssl_worker.cnf 22 | register: worker_conf 23 | 24 | - name: Generate the API Server Keypair 25 | shell: | 26 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca 27 | openssl genrsa -out apiserver-key.pem 2048 28 | openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config openssl.cnf 29 | openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile openssl.cnf 30 | args: 31 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca/apiserver.pem" 32 | when: 33 | delegate_to: localhost 34 | connection: local 35 | 36 | - name: Generate the Kubernetes Worker Keypair 37 | shell: | 38 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca 39 | openssl genrsa -out worker-key.pem 2048 40 | openssl req -new -key worker-key.pem -out worker.csr -subj "/CN=kube-worker" -config openssl_worker.cnf 41 | openssl x509 -req -in worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out worker.pem -days 365 -extensions v3_req -extfile openssl_worker.cnf 42 | when: worker_conf|changed 43 | delegate_to: localhost 44 | connection: local 45 | 46 | - name: Generate the Cluster Administrator Keypair 47 | shell: | 48 | cd {{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca 49 | openssl genrsa -out admin-key.pem 2048 50 | openssl req -new -key admin-key.pem -out admin.csr -subj "/CN=kube-admin" 51 | openssl x509 -req -in admin.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin.pem -days 365 52 | args: 53 | creates: "{{ inventory_dir }}/{{ kube_cluster_name}}-kubernetes-ca/admin.pem" 54 | delegate_to: localhost 55 | connection: local 56 | 57 | - name: register kube ca 58 | set_fact: 59 | kube_ca: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/ca.pem' ) }}" 60 | kube_ca_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/ca-key.pem' ) }}" 61 | kube_apiserver_pem: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/apiserver.pem' ) }}" 62 | kube_apiserver_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/apiserver-key.pem' ) }}" 63 | kube_worker_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/worker-key.pem' ) }}" 64 | kube_worker_pem: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/worker.pem' ) }}" 65 | kube_admin_key: "{{ lookup('file', inventory_dir + '/' + kube_cluster_name + '-kubernetes-ca/admin.pem' ) }}" 66 | 67 | - name: debug 68 | debug: var={{ item }} 69 | with_items: 70 | - kube_ca 71 | - kube_ca_key 72 | - kube_apiserver_key 73 | - kube_apiserver_pem 74 | - kube_worker_key 75 | - kube_admin_key 76 | 77 | -------------------------------------------------------------------------------- /roles/kubernetes-certificates/templates/openssl.cnf.tmpl: -------------------------------------------------------------------------------- 1 | [req] 2 | req_extensions = v3_req 3 | distinguished_name = req_distinguished_name 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | basicConstraints = CA:FALSE 7 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 8 | subjectAltName = @alt_names 9 | [alt_names] 10 | DNS.1 = kubernetes 11 | DNS.2 = kubernetes.default 12 | DNS.3 = kubernetes.default.svc 13 | DNS.4 = kubernetes.default.svc.cluster.local 14 | DNS.5 = localhost 15 | {% if kube_master_dns_name is defined %}DNS.5 = {{ kube_master_dns_name }}{% endif %} 16 | 17 | IP.1 = {{ k8s_service_ip }} 18 | IP.2 = 127.0.0.1 19 | {% for host in groups['kubernetes'] %} 20 | IP.{{ loop.index + 2 }} = {{ host }} 21 | {% endfor %} 22 | -------------------------------------------------------------------------------- /roles/kubernetes-certificates/templates/openssl_worker.cnf.tmpl: -------------------------------------------------------------------------------- 1 | [req] 2 | req_extensions = v3_req 3 | distinguished_name = req_distinguished_name 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | basicConstraints = CA:FALSE 7 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 8 | subjectAltName = @alt_names 9 | [alt_names] 10 | DNS.1 = kubernetes 11 | DNS.2 = kubernetes.default 12 | {% if kube_master_dns_name is defined %}DNS.4 = {{ kube_master_dns_name }}{% endif %} 13 | 14 | {% for host in groups['kubernetes'] %} 15 | IP.{{ loop.index }} = {{ host }} 16 | {% endfor %} 17 | -------------------------------------------------------------------------------- /roles/label-node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: get list of nodes and lables 3 | command: kubectl get nodes --output=json 4 | changed_when: false 5 | delegate_to: localhost 6 | connection: local 7 | run_once: true 8 | register: label_kubectl_command 9 | 10 | - name: debug 11 | set_fact: 12 | label_kaubectl_output: "{{ label_kubectl_command.stdout|from_json }}" 13 | delegate_to: localhost 14 | connection: local 15 | run_once: true 16 | 17 | #- name: debug 18 | # debug: 19 | # var: item #['metadata'] #['name'] 20 | # with_items: label_kaubectl_output['items'] 21 | 22 | - name: check for node 23 | set_fact: 24 | node_to_label: "{{ item }}" 25 | when: item['metadata']['name'] == label_node 26 | with_items: '{{ label_kaubectl_output["items"] }}' 27 | changed_when: false 28 | delegate_to: localhost 29 | connection: local 30 | run_once: true 31 | 32 | - name: assert that the node we need to label exist. 33 | assert: 34 | that: node_to_label is defined 35 | 36 | - name: debug node to label 37 | debug: 38 | var: node_to_label['metadata']['labels'] 39 | 40 | - name: label node 41 | command: kubectl label nodes {{ label_node }} {{ label_key }}="{{ label_value }}" 42 | delegate_to: localhost 43 | connection: local 44 | when: label_key not in node_to_label['metadata']['labels'] 45 | 46 | - name: label node 47 | command: kubectl label nodes --overwrite {{ label_node }} {{ label_key }}="{{ label_value }}" 48 | delegate_to: localhost 49 | connection: local 50 | when: node_to_label['metadata']['labels'][label_key] is defined and node_to_label['metadata']['labels'][label_key] != '{{ label_value }}' 51 | -------------------------------------------------------------------------------- /roles/label-node/templates/dns-addon.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeDNS" 10 | spec: 11 | selector: 12 | k8s-app: kube-dns 13 | clusterIP: {{ k8s_dns_service_ip }} 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | 22 | --- 23 | 24 | apiVersion: v1 25 | kind: ReplicationController 26 | metadata: 27 | name: kube-dns-v9 28 | namespace: kube-system 29 | labels: 30 | k8s-app: kube-dns 31 | version: v9 32 | kubernetes.io/cluster-service: "true" 33 | spec: 34 | replicas: 3 35 | selector: 36 | k8s-app: kube-dns 37 | version: v9 38 | template: 39 | metadata: 40 | labels: 41 | k8s-app: kube-dns 42 | version: v9 43 | kubernetes.io/cluster-service: "true" 44 | spec: 45 | containers: 46 | - name: etcd 47 | image: gcr.io/google_containers/etcd:2.0.9 48 | resources: 49 | limits: 50 | cpu: 100m 51 | memory: 50Mi 52 | command: 53 | - /usr/local/bin/etcd 54 | - -data-dir 55 | - /var/etcd/data 56 | - -listen-client-urls 57 | - http://127.0.0.1:2379,http://127.0.0.1:4001 58 | - -advertise-client-urls 59 | - http://127.0.0.1:2379,http://127.0.0.1:4001 60 | - -initial-cluster-token 61 | - skydns-etcd 62 | volumeMounts: 63 | - name: etcd-storage 64 | mountPath: /var/etcd/data 65 | - name: kube2sky 66 | image: gcr.io/google_containers/kube2sky:1.11 67 | resources: 68 | limits: 69 | cpu: 100m 70 | memory: 50Mi 71 | args: 72 | # command = "/kube2sky" 73 | - -domain=cluster.local 74 | - name: skydns 75 | image: gcr.io/google_containers/skydns:2015-03-11-001 76 | resources: 77 | limits: 78 | cpu: 100m 79 | memory: 50Mi 80 | args: 81 | # command = "/skydns" 82 | - -machines=http://localhost:4001 83 | - -addr=0.0.0.0:53 84 | - -domain={{ k8s_dns_domain }}. 85 | ports: 86 | - containerPort: 53 87 | name: dns 88 | protocol: UDP 89 | - containerPort: 53 90 | name: dns-tcp 91 | protocol: TCP 92 | livenessProbe: 93 | httpGet: 94 | path: /healthz 95 | port: 8080 96 | scheme: HTTP 97 | initialDelaySeconds: 30 98 | timeoutSeconds: 5 99 | readinessProbe: 100 | httpGet: 101 | path: /healthz 102 | port: 8080 103 | scheme: HTTP 104 | initialDelaySeconds: 1 105 | timeoutSeconds: 5 106 | - name: healthz 107 | image: gcr.io/google_containers/exechealthz:1.0 108 | resources: 109 | limits: 110 | cpu: 10m 111 | memory: 20Mi 112 | args: 113 | - -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null 114 | - -port=8080 115 | ports: 116 | - containerPort: 8080 117 | protocol: TCP 118 | volumes: 119 | - name: etcd-storage 120 | emptyDir: {} 121 | dnsPolicy: Default 122 | 123 | -------------------------------------------------------------------------------- /roles/loadbalancer-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create loadbalancer config dir 3 | local_action: 4 | module: file 5 | path: "{{ inventory_dir}}/{{ kube_cluster_name }}-loadbalancer" 6 | state: directory 7 | 8 | - name: template loadbalancer daemonset 9 | local_action: 10 | module: template 11 | src: loadbalancer_ds.yml.tmpl 12 | dest: "{{ inventory_dir }}/{{ kube_cluster_name }}-loadbalancer/loadbalancer_ds.yml" 13 | -------------------------------------------------------------------------------- /roles/loadbalancer-config/templates/loadbalancer_ds.yml.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: service-reverse-proxy 5 | labels: 6 | app: service-reverse-proxy 7 | spec: 8 | template: 9 | metadata: 10 | labels: 11 | app: service-loadbalancer 12 | spec: 13 | nodeSelector: 14 | loadbalancer: "true" 15 | 16 | restartPolicy: Always 17 | containers: 18 | - image: quay.io/cornelius/kubernetes-reverseproxy:latest 19 | imagePullPolicy: Always 20 | name: service-reverse-proxy 21 | ports: 22 | # All http services 23 | - containerPort: 80 24 | hostPort: 80 25 | protocol: TCP 26 | - containerPort: 443 27 | hostPort: 443 28 | protocol: TCP 29 | resources: {} 30 | env: 31 | - name: ETCD 32 | # value: "{% for host in groups['etcd-node'] %}https://{{host}}:2380{%if not loop.last %},{% endif %}{% endfor %}" 33 | value: "https://{{groups['etcd-node'][0]}}:2379" 34 | - name: ETCD_CLEINT_CERT 35 | value: "/etc/confd/key.crt" 36 | - name: ETCD_CLEINT_KEY 37 | value: "/etc/confd/key.key" 38 | - name: ETCD_CLEINT_CA 39 | value: "/etc/confd/ca.crt" 40 | volumeMounts: 41 | - mountPath: /etc/confd/ca.crt 42 | name: etcd-ca 43 | 44 | - mountPath: /etc/confd/key.key 45 | name: etcd-client-key 46 | 47 | - mountPath: /etc/confd/key.crt 48 | name: etcd-client-crt 49 | 50 | volumes: 51 | - name: etcd-ca 52 | hostPath: 53 | path: /etc/ssl/etcd/ca.crt 54 | - name: etcd-client-key 55 | hostPath: 56 | path: /etc/ssl/etcd/key.key 57 | - name: etcd-client-crt 58 | hostPath: 59 | path: /etc/ssl/etcd/key.crt 60 | -------------------------------------------------------------------------------- /roles/loadbalancer/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - loadbalancer-config 4 | - { role: label-node, label_node: "{{ inventory_hostname }}", label_key: 'loadbalancer', label_value: 'true', when: "inventory_hostname in groups['loadbalancer']" } 5 | - { role: k8s-resource, k8s_resource_name: service-reverse-proxy, k8s_resource_file: "{{ inventory_dir }}/{{ kube_cluster_name }}-loadbalancer/loadbalancer_ds.yml", k8s_resource_kind: daemonset, k8s_resource_namespace: default } 6 | 7 | -------------------------------------------------------------------------------- /roles/reboot/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: shutdown 2 | command: shutdown -r -t +1 3 | sudo: yes 4 | 5 | - name: wait a minute for reboot 6 | pause: seconds=75 7 | -------------------------------------------------------------------------------- /roles/safe_reboot/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: request safe reboot 2 | command: /opt/bin/update_needed.sh 3 | sudo: yes 4 | -------------------------------------------------------------------------------- /roles/unlabel-node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: get list of nodes and lables 3 | command: kubectl get nodes --output=json 4 | changed_when: false 5 | delegate_to: localhost 6 | connection: local 7 | run_once: true 8 | register: label_kubectl_command 9 | 10 | #- name: debug 11 | # set_fact: 12 | # label_kaubectl_output: "{{ label_kubectl_command.stdout|from_json }}" 13 | # delegate_to: localhost 14 | # connection: local 15 | # run_once: true 16 | 17 | #- name: debug 18 | # debug: 19 | # var: item #['metadata'] #['name'] 20 | # with_items: label_kaubectl_output['items'] 21 | 22 | - name: check if dns-addon is already installed 23 | set_fact: 24 | node_to_label: "{{ item }}" 25 | when: item['metadata']['name'] == label_node 26 | with_items: label_kaubectl_output['items'] 27 | changed_when: false 28 | delegate_to: localhost 29 | connection: local 30 | run_once: true 31 | 32 | - name: assert that the node we need to label exist. 33 | assert: 34 | that: node_to_label is defined 35 | 36 | #- name: debug node to label 37 | # debug: 38 | # var: node_to_label['metadata']['labels'] 39 | 40 | 41 | - name: check if dns-addon is already installed 42 | command: kubectl label nodes {{ label_node }} {{ label_key }}- 43 | delegate_to: localhost 44 | connection: local 45 | when: label_key in node_to_label['metadata']['labels'] 46 | -------------------------------------------------------------------------------- /roles/unlabel-node/templates/dns-addon.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kube-dns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "KubeDNS" 10 | spec: 11 | selector: 12 | k8s-app: kube-dns 13 | clusterIP: {{ k8s_dns_service_ip }} 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | 22 | --- 23 | 24 | apiVersion: v1 25 | kind: ReplicationController 26 | metadata: 27 | name: kube-dns-v9 28 | namespace: kube-system 29 | labels: 30 | k8s-app: kube-dns 31 | version: v9 32 | kubernetes.io/cluster-service: "true" 33 | spec: 34 | replicas: 3 35 | selector: 36 | k8s-app: kube-dns 37 | version: v9 38 | template: 39 | metadata: 40 | labels: 41 | k8s-app: kube-dns 42 | version: v9 43 | kubernetes.io/cluster-service: "true" 44 | spec: 45 | containers: 46 | - name: etcd 47 | image: gcr.io/google_containers/etcd:2.0.9 48 | resources: 49 | limits: 50 | cpu: 100m 51 | memory: 50Mi 52 | command: 53 | - /usr/local/bin/etcd 54 | - -data-dir 55 | - /var/etcd/data 56 | - -listen-client-urls 57 | - http://127.0.0.1:2379,http://127.0.0.1:4001 58 | - -advertise-client-urls 59 | - http://127.0.0.1:2379,http://127.0.0.1:4001 60 | - -initial-cluster-token 61 | - skydns-etcd 62 | volumeMounts: 63 | - name: etcd-storage 64 | mountPath: /var/etcd/data 65 | - name: kube2sky 66 | image: gcr.io/google_containers/kube2sky:1.11 67 | resources: 68 | limits: 69 | cpu: 100m 70 | memory: 50Mi 71 | args: 72 | # command = "/kube2sky" 73 | - -domain=cluster.local 74 | - name: skydns 75 | image: gcr.io/google_containers/skydns:2015-03-11-001 76 | resources: 77 | limits: 78 | cpu: 100m 79 | memory: 50Mi 80 | args: 81 | # command = "/skydns" 82 | - -machines=http://localhost:4001 83 | - -addr=0.0.0.0:53 84 | - -domain={{ k8s_dns_domain }}. 85 | ports: 86 | - containerPort: 53 87 | name: dns 88 | protocol: UDP 89 | - containerPort: 53 90 | name: dns-tcp 91 | protocol: TCP 92 | livenessProbe: 93 | httpGet: 94 | path: /healthz 95 | port: 8080 96 | scheme: HTTP 97 | initialDelaySeconds: 30 98 | timeoutSeconds: 5 99 | readinessProbe: 100 | httpGet: 101 | path: /healthz 102 | port: 8080 103 | scheme: HTTP 104 | initialDelaySeconds: 1 105 | timeoutSeconds: 5 106 | - name: healthz 107 | image: gcr.io/google_containers/exechealthz:1.0 108 | resources: 109 | limits: 110 | cpu: 10m 111 | memory: 20Mi 112 | args: 113 | - -cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null 114 | - -port=8080 115 | ports: 116 | - containerPort: 8080 117 | protocol: TCP 118 | volumes: 119 | - name: etcd-storage 120 | emptyDir: {} 121 | dnsPolicy: Default 122 | 123 | -------------------------------------------------------------------------------- /roles/unsafe_reboot/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reboot server 3 | shell: sleep 5 && reboot & 4 | sudo: yes 5 | args: 6 | executable: /bin/bash 7 | async: 10 8 | poll: 0 9 | 10 | 11 | - name: waiting for server to go down 12 | local_action: 13 | module: wait_for 14 | host: "{{ inventory_hostname }}" 15 | port: 22 16 | delay: 1 17 | timeout: 60 18 | state: stopped 19 | 20 | - name: waiting for server to come back 21 | local_action: 22 | module: wait_for 23 | host={{ inventory_hostname }} 24 | port=22 25 | delay=1 26 | timeout=60 27 | - name: wait some time to let the server rejoin the etcd cluster 28 | pause: seconds={{ unsafe_reboot_dealay|default("300") }} 29 | 30 | -------------------------------------------------------------------------------- /roles/wait_for_k8s_node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: wait for k8s node to appear 3 | delegate_to: localhost 4 | connection: local 5 | shell: | 6 | #!/bin/bash 7 | n=0 8 | until [ $n -ge {{ wait_for_seconds }} ] 9 | do 10 | kubectl --namespace={{ wait_for_namespace|default("default") }} get {{ wait_for_resource_type }} {{ wait_for_resource_name }} && break # substitute your command here 11 | n=$[$n+1] 12 | echo "." 13 | sleep 1 14 | done 15 | kubectl --namespace={{ wait_for_namespace|default("default") }} get {{ wait_for_resource_type }} {{ wait_for_resource_name }} 16 | args: 17 | executable: /bin/bash 18 | 19 | -------------------------------------------------------------------------------- /update_cloudconfig.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update cloudconfig 3 | hosts: coreos 4 | serial: 1 5 | gather_facts: no 6 | user: core 7 | 8 | roles: 9 | - role: kubectl 10 | #- role: etcd-certificates 11 | - role: etcd-certs-cfssl 12 | - role: kubernetes-certificates 13 | - role: kubectl-config 14 | - role: extra-cas 15 | - ceph-on-kubernetes-config 16 | #- ceph-on-kubernetes-resources 17 | - role: cloud-config 18 | #- role: drain 19 | #- { role: unsafe_reboot, unsafe_reboot_dealay=900 } 20 | - role: safe_reboot 21 | - calico 22 | 23 | --------------------------------------------------------------------------------