├── .gitignore ├── Vagrantfile ├── bin └── generate_ssh_config ├── bootstrap.yml ├── config.rb ├── inventory └── vagrant ├── post.md ├── site.yml ├── user-data └── website.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | ssh.config 3 | ansible.cfg 4 | roles 5 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # # vi: set ft=ruby : 3 | 4 | require 'fileutils' 5 | 6 | Vagrant.require_version ">= 1.6.0" 7 | 8 | CLOUD_CONFIG_PATH = File.join(File.dirname(__FILE__), "user-data") 9 | CONFIG = File.join(File.dirname(__FILE__), "config.rb") 10 | 11 | # Defaults for config options defined in CONFIG 12 | $num_instances = 1 13 | $update_channel = "alpha" 14 | $enable_serial_logging = false 15 | $vb_gui = false 16 | $vb_memory = 1024 17 | $vb_cpus = 1 18 | 19 | # Attempt to apply the deprecated environment variable NUM_INSTANCES to 20 | # $num_instances while allowing config.rb to override it 21 | if ENV["NUM_INSTANCES"].to_i > 0 && ENV["NUM_INSTANCES"] 22 | $num_instances = ENV["NUM_INSTANCES"].to_i 23 | end 24 | 25 | if File.exist?(CONFIG) 26 | require CONFIG 27 | end 28 | 29 | Vagrant.configure("2") do |config| 30 | # always use Vagrants insecure key 31 | config.ssh.insert_key = false 32 | 33 | config.vm.box = "coreos-%s" % $update_channel 34 | config.vm.box_version = ">= 308.0.1" 35 | config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % $update_channel 36 | 37 | config.vm.provider :vmware_fusion do |vb, override| 38 | override.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant_vmware_fusion.json" % $update_channel 39 | end 40 | 41 | config.vm.provider :virtualbox do |v| 42 | # On VirtualBox, we don't have guest additions or a functional vboxsf 43 | # in CoreOS, so tell Vagrant that so it can be smarter. 44 | v.check_guest_additions = false 45 | v.functional_vboxsf = false 46 | end 47 | 48 | # plugin conflict 49 | if Vagrant.has_plugin?("vagrant-vbguest") then 50 | config.vbguest.auto_update = false 51 | end 52 | 53 | (1..$num_instances).each do |i| 54 | config.vm.define vm_name = "core-%02d" % i do |config| 55 | config.vm.hostname = vm_name 56 | 57 | if $enable_serial_logging 58 | logdir = File.join(File.dirname(__FILE__), "log") 59 | FileUtils.mkdir_p(logdir) 60 | 61 | serialFile = File.join(logdir, "%s-serial.txt" % vm_name) 62 | FileUtils.touch(serialFile) 63 | 64 | config.vm.provider :vmware_fusion do |v, override| 65 | v.vmx["serial0.present"] = "TRUE" 66 | v.vmx["serial0.fileType"] = "file" 67 | v.vmx["serial0.fileName"] = serialFile 68 | v.vmx["serial0.tryNoRxLoss"] = "FALSE" 69 | end 70 | 71 | config.vm.provider :virtualbox do |vb, override| 72 | vb.customize ["modifyvm", :id, "--uart1", "0x3F8", "4"] 73 | vb.customize ["modifyvm", :id, "--uartmode1", serialFile] 74 | end 75 | end 76 | 77 | if $expose_docker_tcp 78 | config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true 79 | end 80 | 81 | config.vm.provider :vmware_fusion do |vb| 82 | vb.gui = $vb_gui 83 | end 84 | 85 | config.vm.provider :virtualbox do |vb| 86 | vb.gui = $vb_gui 87 | vb.memory = $vb_memory 88 | vb.cpus = $vb_cpus 89 | end 90 | 91 | ip = "172.12.8.#{i+100}" 92 | config.vm.network :private_network, ip: ip 93 | 94 | # Uncomment below to enable NFS for sharing the host machine into the coreos-vagrant VM. 95 | #config.vm.synced_folder ".", "/home/core/share", id: "core", :nfs => true, :mount_options => ['nolock,vers=3,udp'] 96 | 97 | if File.exist?(CLOUD_CONFIG_PATH) 98 | config.vm.provision :file, :source => "#{CLOUD_CONFIG_PATH}", :destination => "/tmp/vagrantfile-user-data" 99 | config.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true 100 | end 101 | 102 | end 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /bin/generate_ssh_config: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat > ansible.cfg << EOF 4 | [ssh_connection] 5 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -F ssh.config 6 | EOF 7 | 8 | cat > ssh.config << EOF 9 | Host * 10 | User core 11 | UserKnownHostsFile /dev/null 12 | StrictHostKeyChecking no 13 | PasswordAuthentication no 14 | IdentityFile $HOME/.vagrant.d/insecure_private_key 15 | IdentitiesOnly yes 16 | LogLevel FATAL 17 | EOF 18 | -------------------------------------------------------------------------------- /bootstrap.yml: -------------------------------------------------------------------------------- 1 | - name: bootstrap coreos hosts 2 | hosts: coreos 3 | gather_facts: False 4 | roles: 5 | - defunctzombie.coreos-bootstrap 6 | -------------------------------------------------------------------------------- /config.rb: -------------------------------------------------------------------------------- 1 | # coreos-vagrant is configured through a series of configuration 2 | # options (global ruby variables) which are detailed below. To modify 3 | # these options, first copy this file to "config.rb". Then simply 4 | # uncomment the necessary lines, leaving the $, and replace everything 5 | # after the equals sign.. 6 | 7 | # Size of the CoreOS cluster created by Vagrant 8 | $num_instances=1 9 | 10 | # Official CoreOS channel from which updates should be downloaded 11 | $update_channel='alpha' 12 | 13 | # Log the serial consoles of CoreOS VMs to log/ 14 | # Enable by setting value to true, disable with false 15 | # WARNING: Serial logging is known to result in extremely high CPU usage with 16 | # VirtualBox, so should only be used in debugging situations 17 | $enable_serial_logging=false 18 | 19 | # Enable port forwarding of Docker TCP socket 20 | # Set to the TCP port you want exposed on the *host* machine, default is 2375 21 | # If 2375 is used, Vagrant will auto-increment (e.g. in the case of $num_instances > 1) 22 | # You can then use the docker tool locally by setting the following env var: 23 | # export DOCKER_HOST='tcp://127.0.0.1:2375' 24 | $expose_docker_tcp=2375 25 | 26 | # Setting for VirtualBox VMs 27 | #$vb_gui = false 28 | $vb_memory = 1024 29 | $vb_cpus = 1 30 | -------------------------------------------------------------------------------- /inventory/vagrant: -------------------------------------------------------------------------------- 1 | ## inventory file for vagrant machines 2 | core-01 ansible_ssh_host=172.12.8.101 3 | 4 | [web] 5 | core-01 6 | 7 | [coreos] 8 | core-01 9 | 10 | [coreos:vars] 11 | ansible_python_interpreter="PATH=/home/core/bin:$PATH python" 12 | -------------------------------------------------------------------------------- /post.md: -------------------------------------------------------------------------------- 1 | # Managing CoreOS with Ansible 2 | 3 | This post will cover basic techniques for managing [CoreOS][coreos] machines using [Ansible][ansible]. Familiarity with Ansible and basic understanding of CoreOS are helpful in following along with this post. 4 | 5 | ## What is Ansible? 6 | 7 | From the [Ansible Documetation](http://docs.ansible.com/) 8 | 9 | > 10 | Ansible is an IT automation tool. It can configure systems, deploy software, and orchestrate more advanced IT tasks such as continuous deployments or zero downtime rolling updates. 11 | 12 | At the most basic level, Ansible is tool that will run sets of commands (typically over SSH) on remote boxes (called the **inventory**). These commands can be as simple as one line shell statements, or use any of the built-in ansible [**modules**](http://docs.ansible.com/modules_by_category.html) for common tasks like file copying, package management, system information, and more. Ansible ships with many useful modules and you can easily create your own. 13 | 14 | ## Why Ansible for CoreOS? 15 | 16 | Ansible does not require a remote agent running on the target machine. It can perform all of its functions over a basic SSH connection. 17 | 18 | CoreOS is a minimal linux distribution meant for running containers. It does not ship with a package manager or many of the other common system elements one might expect coming from other more desktop oriented distributions. 19 | 20 | Because Andible does not require a remote agent, and because CoreOS design favors running containers over system software directly on the machine, the two make a good fit. 21 | 22 | ## Getting Started 23 | 24 | Before continuing, make sure that you have [Ansible installed](http://docs.ansible.com/intro_installation.html). If ansible is properly installed, you should be able to run the following command in a shell. 25 | 26 | ```shell 27 | $ ansible --version 28 | ansible 1.8 29 | ``` 30 | 31 | We have prepared a sample repository with a [Vagrant][vagrant] file to demostrate using Ansible with CoreOS locally. This is also a good way to test your **playbooks** (sets of Ansible commands) to make sure they are working as you expect. You will need to [install vagrant](https://docs.vagrantup.com/v2/installation/) to use these samples. 32 | 33 | If you are already running Ansible and are familiar with launching CoreOS machines in existing cloud providers, you can ignore these steps and jump to the [next section](#first_run) 34 | 35 | ``` 36 | $ git clone https://github.com/defunctzombie/coreos-ansible-example.git 37 | $ cd coreos-ansible-example 38 | $ vagrant up 39 | -- wait for vagrant to finish booting the machine(s) -- 40 | $ ./bin/generate_ssh_config 41 | ``` 42 | 43 | This will boot a CoreOS machine and configure it with some basic networking. After the machine has booted, we will run a local script *generate\_ssh\_config* to create a configuration file for Ansible so that it knows how to access our machine over SSH. 44 | 45 | ## Inventory Setup [](#first_run) 46 | 47 | The inventory file defines the hosts and groups. Our vagrant example has an inventory file in `inventory/vagrant` which we will use when configuring our example CoreOS hosts with ansible. 48 | 49 | ```ini 50 | ## inventory file for vagrant machines 51 | core-01 ansible_ssh_host=172.12.8.101 52 | 53 | [web] 54 | core-01 55 | ``` 56 | 57 | We only have 1 host called `core-01` and we have created a `web` group and listed our host under this group. You can have any number of hosts and groups. Ansible even support [dynamic inventory files](http://docs.ansible.com/intro_dynamic_inventory.html) which is what you are likely to use in large scale production environments. 58 | 59 | To run a test ping against our vagrant inventory, execute the following command in your shell (from the project folder). 60 | 61 | ```shell 62 | $ ansible -i inventory/vagrant all -m setup 63 | ``` 64 | 65 | If everything worked as expected you should see the following output. 66 | 67 | ``` 68 | core-01 | FAILED >> { 69 | "failed": true, 70 | "msg": "/bin/sh: /usr/bin/python: No such file or directory\r\n", 71 | "parsed": false 72 | } 73 | ``` 74 | 75 | The command has failed. To understand why this happened and how to fix it, lets take a closer look at how Ansible runs commands on remote machines. 76 | 77 | ## Getting Ansible Running 78 | 79 | When you run Ansible command or playbooks, Ansible will ssh into the remote machine, copy over the module code (python) and run the module using the arguments specified in the playbook. 80 | 81 | The target machine must have a python interpreter for Ansible to be able to execute these modules and thus configure your machine. 82 | 83 | CoreOS is designed for running containers and does not ship with a python intepreter. Additionaly, it has no package manager to install python. This presents a small chicken-and-egg problem. 84 | 85 | Luckily Ansible has a `raw` execution mode which bypasses python modules and runs shell commands directly on a remote system. We will leverage this feature to bootstrap a lightweight python interpreter onto our CoreOS hosts. Once the hosts are bootstrapped with python, playbooks can leverage the myriad of provided Ansible modules to perform system tasks like start services, install python libraries, and manage docker containers. 86 | 87 | Edit the `inventory/vagrant` file and add the following items at the end 88 | 89 | ``` 90 | [coreos] 91 | core-01 92 | 93 | [coreos:vars] 94 | ansible_ssh_user=core 95 | ansible_python_interpreter="PATH=/home/core/bin:$PATH python" 96 | ``` 97 | 98 | This will configure Ansible to look for `python` and `pip` in `/home/core/bin` and use it for all hosts in the `coreos` group. Without this, Ansible will try to use `/usr/bin/python` which does not exists on our CoreOS hosts. 99 | 100 | To bootstrap our CoreOS hosts, we will use the [coreos-bootstrap][ansible-coreos-bootstrap] role. 101 | 102 | Install the role using the following command 103 | ``` 104 | $ ansible-galaxy install defunctzombie.coreos-bootstrap -p ./roles 105 | ``` 106 | 107 | Now we can run the provided `bootstrap.yml` file using ansible. 108 | ``` 109 | $ ansible-playbook -i inventory/vagrant bootstrap.yml 110 | ``` 111 | 112 | Once this command has completed, we can run our original ansible setup command and see a list of the gathered facts. 113 | 114 | ```shell 115 | $ ansible -i inventory/vagrant all -m setup 116 | core-01 | success >> { 117 | "ansible_facts": { 118 | "ansible_all_ipv4_addresses": [ 119 | "172.17.42.1", 120 | "10.0.2.15", 121 | "172.12.8.101" 122 | ], 123 | ... 124 | ``` 125 | 126 | Take a moment to look at `bootstrap.yml` and `site.yml`. Notice that `bootstrap.yml` is included first. Your own Ansible scripts will need to similaly have a `bootstrap.yml` or playbook which configures CoreOS hosts before running other playbooks. 127 | 128 | ## Example Playbook 129 | 130 | Once Ansible can run successfully on your CoreOS hosts, we can do things like start system services or launch docker containers. 131 | 132 | The `website.yml` file shows an small example. It starts the `etcd` service. Then it installs the `docker-py` library using `pip` and finally uses the Ansible [docker module](http://docs.ansible.com/docker_module.html) to launch a container on the `web` host group. 133 | 134 | ```yml 135 | - name: example nginx website 136 | hosts: web 137 | sudo: true 138 | tasks: 139 | - name: Start etcd 140 | service: name=etcd.service state=started 141 | 142 | - name: Install docker-py 143 | pip: name=docker-py 144 | 145 | - name: pull container 146 | raw: docker pull nginx:1.7.1 147 | 148 | - name: launch nginx container 149 | docker: 150 | image="nginx:1.7.1" 151 | name="example-nginx" 152 | ports="8080:80" 153 | state=running 154 | ``` 155 | 156 | Run this playbook with the following command 157 | 158 | ```shell 159 | $ ansible-playbook -i inventory/vagrant website.yml 160 | ``` 161 | 162 | You can now open [http://172.12.8.101:8080](http://172.12.8.101:8080) and see the default nginx landing page. 163 | 164 | You are now ready to create more plays to configure your CoreOS hosts. Plays can be leveraged for many tasks like automated app deployment, custer management, and more. 165 | 166 | ## Tips 167 | 168 | ### Local Docker Registry 169 | 170 | Downloading imagines from remote registries can be time and bandwidth consuming. You can speed up deployments to a set of machines by running a local registry for a cluster and having other machines pull from the registry. This can be easily automated with roles and playbooks. 171 | 172 | ### Install pip modules in a common playbook 173 | 174 | If you will be using the docker module, consider installing docker-py via the pip module in a playbook that runs on all hosts and comes after the bootstrap.yml playbook. 175 | 176 | ### Prefer containers over local binaries 177 | 178 | Avoid over-configuring the CoreOS hosts with too many locally installed tools and tweaks. In many cases, containerized services will do the job just as well and can be granted limited access to the underlying host. 179 | 180 | You can even monitor host processes by bind mounting the relevant paths into a container. 181 | 182 | 183 | [vagrant]: https://www.vagrantup.com/ 184 | [coreos]: https://coreos.com/ 185 | [ansible]: http://www.ansible.com/home 186 | [ansible-coreos-bootstrap]: https://github.com/defunctzombie/ansible-coreos-bootstrap 187 | [toolbox]: https://github.com/coreos/toolbox 188 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | - include: bootstrap.yml 2 | - include: website.yml 3 | -------------------------------------------------------------------------------- /user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | units: 5 | - name: docker-tcp.socket 6 | command: start 7 | enable: true 8 | content: | 9 | [Unit] 10 | Description=Docker Socket for the API 11 | 12 | [Socket] 13 | ListenStream=2375 14 | Service=docker.service 15 | BindIPv6Only=both 16 | 17 | [Install] 18 | WantedBy=sockets.target 19 | -------------------------------------------------------------------------------- /website.yml: -------------------------------------------------------------------------------- 1 | - name: example nginx website 2 | hosts: web 3 | tasks: 4 | - name: Start etcd 5 | service: name=etcd.service state=started 6 | sudo: yes 7 | sudo_user: root 8 | 9 | - name: Install docker-py 10 | pip: name=docker-py version=1.2.3 11 | 12 | - name: pull container 13 | raw: docker pull nginx:1.7.1 14 | 15 | - name: launch nginx container 16 | docker: 17 | image="nginx:1.7.1" 18 | name="example-nginx" 19 | ports="8080:80" 20 | state=started 21 | --------------------------------------------------------------------------------