├── .gitignore ├── .rspec ├── .travis.yml ├── CHANGELOG.md ├── Gemfile ├── LICENCE ├── README.md ├── Rakefile ├── bin ├── console └── setup ├── doc ├── declarative.md └── programmatic.md ├── lib ├── locales │ └── en.yml └── vagrant │ ├── compose.rb │ └── compose │ ├── config.rb │ ├── declarative │ └── cluster.rb │ ├── errors.rb │ ├── node.rb │ ├── plugin.rb │ ├── programmatic │ ├── cluster.rb │ └── node_group.rb │ └── version.rb └── vagrant-compose.gemspec /.gitignore: -------------------------------------------------------------------------------- 1 | # OS-specific 2 | .DS_Store 3 | 4 | # editors 5 | *.swp 6 | 7 | # Bundler/Rubygems 8 | *.gem 9 | .bundle 10 | pkg/* 11 | tags 12 | Gemfile.lock 13 | 14 | # Vagrant 15 | .vagrant 16 | Vagrantfile 17 | 18 | # Test 19 | provisioning 20 | *.yaml -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --format doc --order random --color --fail-fast -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: ruby 2 | rvm: 3 | - 2.0.0 4 | before_install: gem install bundler -v 1.10.5 5 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 0.7.5 (May 20, 2017) 2 | 3 | * fix #15 can't make the alises to work (using Declarative Approach) 4 | 5 | # 0.7.4 (May 20, 2017) 6 | 7 | * fix multimachine_filter error with commands without vmname 8 | 9 | # 0.7.3 (May 20, 2017) 10 | 11 | * PR Fix issue calling provision without filter #13 (now mutlimachine filter works properly even if mixed with other POSIX options) 12 | * breaking change: removed cluster.multimachine_filter property (use instead config.multimachine_filter) 13 | 14 | # 0.7.2 (November 16, 2016) 15 | 16 | * issues #11 Allow management of Ansible vars for all hosts 17 | * issues #9 Create group_vars and host_vars directory only if necessary 18 | 19 | * breaking change: custom group of groups all_groups:children removed. Insteal use all (automatically created by ansible) 20 | 21 | # 0.7.1 (November 04, 2016) 22 | 23 | * Minor fix 24 | 25 | # 0.7.0 (November 02, 2016) 26 | 27 | * introduced support for declarative cluster definition 28 | 29 | # 0.2.4 (June 26, 2016) 30 | 31 | * issues #3 Now vagrant up and vagrant provision support also a list of machine name / regular expressions. 32 | * pr #3 Support changing ansible_playbook_path & clean up path management 33 | 34 | NB. breaking change 35 | ansible_group_vars_path and ansible_host_vars_path are not supported anymore 36 | 37 | # 0.2.3 (April 2, 2016) 38 | 39 | * Now custer name can be omitted (thanks to jaydoane) 40 | 41 | other changes: 42 | * Documented cluster.debug feature 43 | * Improved code inline documentation 44 | 45 | # 0.2.2 (December 31, 2015) 46 | 47 | breaking changes! 48 | * nodes instances number defined into node method (instances attributes removed) 49 | * cluster.nodes return only nodes (before nodes with index were returned) 50 | 51 | other changes: 52 | * Improved documentation. 53 | * cluster domain now is optional 54 | * nodes code block now is optional 55 | * improved detection of multimachine_filter 56 | * minor fixes 57 | 58 | # 0.1.0 (December 27, 2015) 59 | 60 | * Initial release. 61 | 62 | 63 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | # Specify your gem's dependencies in vagrant-compose.gemspec 4 | gemspec 5 | 6 | group :development do 7 | # We depend on Vagrant for development, but we don't add it as a 8 | # gem dependency because we expect to be installed within the 9 | # Vagrant environment itself using `vagrant plugin`. 10 | gem "vagrant", :git => "https://github.com/mitchellh/vagrant.git" 11 | end 12 | 13 | group :plugins do 14 | gem "vagrant-compose" , path: "." 15 | end 16 | -------------------------------------------------------------------------------- /LICENCE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) 2015 Fabrizio Pandini 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vagrant-compose 2 | 3 | A Vagrant plugin that helps building complex scenarios with many VMs. 4 | 5 | Each VM is a node in the cluster. 6 | Typically, in a cluster nodes are grouped by type, and each group of nodes has different characteristic, software stacks and configuration. 7 | 8 | For instance, if you are setting up an environment for testing [Consul](https://consul.io/), your cluster will be composed by: 9 | 10 | - consul server nodes 11 | - consul agent nodes 12 | 13 | Vagrant-compose streamline the definition of complex multi-VMs scenarios, providing also support for a straight forward provisioning of nodes with Ansible. 14 | 15 | ## Installation 16 | 17 | Install the plugin following the typical Vagrant procedure: 18 | 19 | ``` 20 | $ vagrant plugin install vagrant-compose 21 | ``` 22 | 23 | The declarative approach (see below) additionally requires the vagrant-playbook python package, that can be installed with 24 | 25 | ``` 26 | $ pip install vagrant-playbook 27 | ``` 28 | 29 | # Composing a cluster 30 | Vagrant-compose supports two appraches for definining a cluster of VMs. 31 | 32 | - Programmatic Approach 33 | 34 | Cluster are defined by using the some ruby knowledge that is required for writing Vagrantfiles. 35 | 36 | see [Programmatic Approach](https://github.com/fabriziopandini/vagrant-compose/blob/master/doc/programmatic.md) for more details. 37 | 38 | - Declarative Approach 39 | 40 | By using the declarative approach also people with limited programming background can use vagrant-compose to easily define a cluster composed by many VMs; with declarative approach, the definition of the cluster is done in yaml, and the ruby programming part within the Vagrantfile is reduced to the minimum. 41 | 42 | see [Declarative Approach](https://github.com/fabriziopandini/vagrant-compose/blob/master/doc/declarative.md) for more details. 43 | 44 | # Additional notes 45 | Vagrant compose will play nicely with all vagrant commands. 46 | 47 | For instance, When using vagrant tageting a single machine, like f.i. `vagrant up mesos-master1`, the `cluster.ansible_groups` variable will include only the given machine. 48 | 49 | Happy vagrant-compose! 50 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'bundler/setup' 3 | require 'rspec/core/rake_task' 4 | 5 | # Immediately sync all stdout so that tools like buildbot can 6 | # immediately load in the output. 7 | $stdout.sync = true 8 | $stderr.sync = true 9 | 10 | # Change to the directory of this file. 11 | Dir.chdir(File.expand_path("../", __FILE__)) 12 | 13 | # This installs the tasks that help with gem creation and 14 | # publishing. 15 | Bundler::GemHelper.install_tasks 16 | 17 | # Install the `spec` task so that we can run tests. 18 | RSpec::Core::RakeTask.new 19 | 20 | # Default task is to run the unit tests 21 | task :default => "spec" -------------------------------------------------------------------------------- /bin/console: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "bundler/setup" 4 | require "vagrant/compose" 5 | 6 | # You can add fixtures and/or initialization code here to make experimenting 7 | # with your gem easier. You can also use a different console, if you like. 8 | 9 | # (If you use this, don't forget to add pry to your Gemfile!) 10 | # require "pry" 11 | # Pry.start 12 | 13 | require "irb" 14 | IRB.start 15 | -------------------------------------------------------------------------------- /bin/setup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | IFS=$'\n\t' 4 | 5 | bundle install 6 | 7 | # Do any other automated setup that you need to do here 8 | -------------------------------------------------------------------------------- /doc/declarative.md: -------------------------------------------------------------------------------- 1 | # Declarative Approach 2 | 3 | Vagrant requires some ruby knowledge, because the Vagrantfile itself is based on ruby, and this fact sometimes is and obstacle for people with limited programming background. 4 | 5 | This cannot be avoided, but by using the declarative approach also people with limited programming background can use vagrant-compose to easily define a cluster composed by many VMs. 6 | 7 | With declarative approach, the definition of the cluster is done in yaml, and the ruby programming part within the Vagrantfile is reduced to the minimum. 8 | 9 | ## Quick start 10 | 11 | Create a yaml file containing the definition of a cluster named `kubernates` with one `master` node and three `minions` nodes. 12 | 13 | ```yaml 14 | kubernetes: 15 | master: 16 | instances: 1 17 | minions: 18 | instances: 3 19 | ``` 20 | 21 | Then create the following `Vagrantfile` for parsing the above yaml file 22 | 23 | ```ruby 24 | Vagrant.configure(2) do |config| 25 | #load cluster definition 26 | config.cluster.from("mycluster.yaml") 27 | 28 | #cluster creation 29 | config.cluster.nodes.each do |node, index| 30 | config.vm.define "#{node.boxname}" do |node_vm| 31 | node_vm.vm.box = "#{node.box}" 32 | end 33 | end 34 | end 35 | ``` 36 | 37 | The first part of the `Vagrantfile` contains the command for parsing the cluster definition: 38 | 39 | ```ruby 40 | config.cluster.from("mycluster.yaml") 41 | ``` 42 | 43 | The second part of the `Vagrantfile` creates the cluster by defining a vm in VirtualBox for each node in the cluster: 44 | 45 | ```ruby 46 | config.cluster.nodes.each do |node, index| 47 | config.vm.define "#{node.boxname}" do |node_vm| 48 | node_vm.vm.box = "#{node.box}" 49 | end 50 | end 51 | ``` 52 | 53 | If you run `vagrant up` you will get a 4 node cluster with following machines, based on `ubuntu/trusty64` base box (default). 54 | 55 | - `master1` 56 | - `minion1` 57 | - `minion2` 58 | - `minion3` 59 | 60 | Done ! 61 | 62 | Of course, real-word scenarios are more complex; it is necessary to get more control in configuring the cluster topology and machine attributes, and finally you need also to implement automatic provisioning of software stack installed in the machines. 63 | 64 | See following chapters for more details. 65 | 66 | ## Configuring the cluster 67 | 68 | For instance, if you are setting up an environment for testing [Kubernetes](http://kubernetes.io/), your cluster will be composed by: 69 | 70 | - a group of nodes for kubernetes master roles (masters) 71 | - a group of nodes for deploying pods (minions) 72 | 73 | Using the declarative approach, the above cluster can be defined using a yaml file, and using vagrant-compose plugin, it very easy to instruct Vagrant create a separate VM for each of the above node. 74 | 75 | ### Defining cluster and cluster attributes 76 | 77 | The outer element of the yaml file should be an object with the cluster name: 78 | 79 | ````yaml 80 | kubernetes: 81 | ... 82 | ```` 83 | 84 | Inside the cluster element, cluster attributes ca be defined: 85 | 86 | ```yaml 87 | kubernetes: 88 | box: ubuntu/trusty64 89 | domain: test 90 | ... 91 | ``` 92 | 93 | Valid cluster attributes are defined in the following list; if an attribute is not provided, the default value apply. 94 | - **`box`** 95 | The value/value generator to be used for assigning a base box to nodes in the cluster; it defaults to ubuntu/trusty64. 96 | NB. this attribute acts as defaults for all node groups, but each node group can override it. 97 | - **`node_prefix`** 98 | A prefix to be added before each node name / box name; it defaults to empty string (no prefix) 99 | - **`domain`** 100 | The network domain to wich the cluster belongs. It will be used for computing nodes fqdn; it defaults to vagrant 101 | 102 | ### Defining set of nodes 103 | 104 | A cluster can be composed by one or more set of nodes; each set of nodes represent a group of one or more nodes with similar characteristics. 105 | 106 | Node groups are defined as elements within the cluster element: 107 | 108 | ```yaml 109 | kubernetes: 110 | ... 111 | masters: 112 | ... 113 | minions: 114 | ... 115 | ... 116 | ``` 117 | 118 | Each node group element can contain a list of attributes; attributes defined at node group level will act as templete/value generator for the same attribute for each node within the group. 119 | 120 | Available node group attributes are defined in the following list; if an attribute is not provided, the default value apply. 121 | 122 | - **`instances`** 123 | The number of nodes in the node group; default value equals to 1. 124 | 125 | - **`box`** 126 | The value/value generator to be used for assigning a base box to nodes in the node group; default value equals to**`cluster.box`** attribute. 127 | 128 | - **`boxname`** 129 | The value/value generator to be used for assigning a boxname to nodes in the node group; default value is the following expression: 130 | 131 | **`"{% if cluster_node_prefix %}{{cluster_node_prefix}}-{% endif %}{{group_name}}{{node_index + 1}}"`** 132 | 133 | - **`hostname`** 134 | The value/value generator to be used for assigning a hostname to nodes in the node group; default value is the following expression: 135 | 136 | **`"{{boxname}}"`** 137 | 138 | - **`fqdn`** 139 | 140 | The value/value generator to be used for assigning a fqdn to nodes in the node group; default value is the following expression: 141 | 142 | **`"{{hostname}}{% if cluster_domain %}.{{cluster_domain}}{% endif %}"`** 143 | 144 | - **`aliases`** 145 | The value/value generator to be used for assigning an alias/a list of aliases to nodes in the node group; default value is an empty list. 146 | 147 | - **`ip`** 148 | The value/value generator to be used for assigning an ip address to nodes in the node group; default value is the following expression: 149 | 150 | **`"172.31.{{group_index}}.{{100 + node_index + 1}}"`** 151 | 152 | - **`cpus`** 153 | The value/value generator to be used for defining the number of vCPU for nodes in the node group; default value is 1. 154 | 155 | - **`memory`** 156 | The value/value generator to be used for defining the quantity of memory assigned to nodes in the node group; default value is 256. 157 | 158 | - **`attributes`** 159 | 160 | The value/value generator to be used for defining additional attributes for nodes in the node group; default value is an empty dictionary. 161 | 162 | Please note that each attributes can be set to: 163 | 164 | - A literal value, like for instance `"ubuntu/trusty64" or 256. Such value will be inherited - without changes - by all nodes in the node group. 165 | - A [Jinja2](http://jinja.pocoo.org/docs/dev/) expressions, afterwards value_generator, that will be executed when building the nodes in the node group. 166 | ​ 167 | Jinja2 expressions are described in http://jinja.pocoo.org/docs/dev/templates/ ; on top of out of the box functions/filters defined in Jinja2, it is allowed usage of functions/filters defined in Ansible, as documented in http://docs.ansible.com/ansible/playbooks_filters.html. 168 | ​ 169 | Each expression will be executed within an execution context where a set of varibles is made available by the vagrant-playbook processors: 170 | - cluster_name 171 | - cluster_node_prefix 172 | - cluster_domain 173 | - group_index (the index of the nodegroup within the cluster, zero based) 174 | - group_name 175 | - node_index (the index of the node within the nodegroup, zero based) 176 | - additionally, all attributes already computed for this node group will be presented as variable (attributes are computed in following order: box, boxname, hostname, aliases, fqdn, ip, cpus, memory, ansible_groups, attributes; for instance, when execution the expression for computing the hostname attribute 177 | 178 | ## Composing nodes 179 | 180 | The yaml file containing the cluster definition can be used within a Vagrant file as a recipe for building a cluster with a VM for each node. 181 | 182 | ``` ruby 183 | Vagrant.configure(2) do |config| 184 | ... 185 | config.cluster.from("mycluster.yaml") 186 | ... 187 | end 188 | ``` 189 | The above command will compose the cluster, transforming node groups in node, and store them in the `config.cluster.nodes` variable; each node has following attributes assigned according to value/value generators defined at node group level in the yaml file: 190 | 191 | - **box** 192 | - **boxname** 193 | - **hostname** 194 | - **fqdn** 195 | - **aliases** 196 | - **ip** 197 | - **cpus** 198 | - **memory** 199 | - **attributes** 200 | 201 | Two additional attributes will be automatically set for each node: 202 | 203 | - **index**, [integer (zero based)], uniquely assigned to each node in the cluster 204 | - **group_index**, [integer (zero based)], uniquely assigned to each node in a set of nodes 205 | 206 | ## Creating nodes 207 | 208 | Given the list of nodes stored in the `config.cluster.nodes` variable, it is possible to create a multi-machine environment by iterating over the list: 209 | 210 | ``` ruby 211 | config.cluster.nodes.each do |node| 212 | ... 213 | end 214 | ``` 215 | 216 | Within the cycle you can instruct vagrant to create machines based on attributes of the current node; for instance, you can define a VM in VirtualBox (default Vagrant provider) and use the [vagrant-hostmanager](https://github.com/smdahlen/vagrant-hostmanager) plugin to set the hostname into the guest machine: 217 | 218 | ``` ruby 219 | config.cluster.nodes.each do |node| 220 | config.vm.define "#{node.boxname}" do |node_vm| 221 | node_vm.vm.box = "#{node.box}" 222 | node_vm.vm.network :private_network, ip: "#{node.ip}" 223 | node_vm.vm.hostname = "#{node.fqdn}" 224 | node_vm.hostmanager.aliases = node.aliases unless node.aliases.empty? 225 | node_vm.vm.provision :hostmanager 226 | 227 | node_vm.vm.provider "virtualbox" do |vb| 228 | vb.name = "#{node.boxname}" 229 | vb.memory = node.memory 230 | vb.cpus = node.cpus 231 | end 232 | end 233 | end 234 | ``` 235 | 236 | > In order to increase performance of node creation, you can leverage on support for linked clones introduced by Vagrant 1.8.1. Add the following line to the above script: 237 | > 238 | > vb.linked_clone = true if Vagrant::VERSION =~ /^1.8/ 239 | 240 | [vagrant-hostmanager](https://github.com/smdahlen/vagrant-hostmanager) requires following additional settings before the `config.cluster.nodes.each` command: 241 | 242 | ``` ruby 243 | config.hostmanager.enabled = false 244 | config.hostmanager.manage_host = true 245 | config.hostmanager.include_offline = true 246 | ``` 247 | 248 | ## Configuring ansible provisioning 249 | 250 | The vagrant-compose plugin provides support for a straight forward provisioning of nodes in the cluster implemented with Ansible. 251 | 252 | ### Defining ansible_groups 253 | 254 | Each set of nodes, and therefore all the nodes within the set, can be assigned to one or more ansible_groups. 255 | 256 | In the following example, `masters` nodes will be part of `etcd` and `docker` ansible_groups. 257 | 258 | ```yaml 259 | kubernetes: 260 | ... 261 | masters: 262 | ... 263 | ansible_groups: 264 | - etcd 265 | - docker 266 | ... 267 | minions: 268 | ... 269 | ... 270 | ``` 271 | 272 | This configuration is used by the `config.cluster.from(…)` method in order to define an **inventory file** with all nodes; the resulting list of ansible_groups, each with its own list of host is stored in the `config.cluster.ansible_groups` variable. 273 | 274 | Please note that the possibility to assign a node to one or more groups introduces an high degree of flexibility, as well as the capability to add nodes in different node groups to the same ansible_groups. 275 | 276 | Ansible can leverage on ansible_groups for providing machines with the required software stacks. 277 | NB. you can see resulting ansible_groups by using `debug` command with `verbose` equal to `true`. 278 | 279 | ### Defining group vars 280 | 281 | In Ansible, the inventory file is usually integrated with a set of variables containing settings that will influence playbooks behaviour for all the host in a group. 282 | 283 | The vagrant-compose plugin allows you to define one or more group_vars generator for each ansible_groups; 284 | 285 | ```yaml 286 | kubernetes: 287 | ansible_playbook_path: ... 288 | ... 289 | masters: 290 | ansible_groups: 291 | - etcd 292 | - docker 293 | ... 294 | minions: 295 | ... 296 | ... 297 | ansible_group_vars: 298 | etcd: 299 | var1: ... 300 | docker: 301 | var2: ... 302 | var3: ... 303 | ... 304 | ``` 305 | 306 | Group vars can be set to literal value or to Jinja2 value generators, that will be executed during the parse of the yaml file; each Jinja2 expression will be executed within an execution context where a set of varibles is made available by the vagrant-playbook processors: 307 | 308 | - **context_vars** see below 309 | - **nodes**, list of nodes in the ansible_group to which the group_vars belong 310 | 311 | Additionally it is possible to set variables for all groups/all hosts, by setting vars for the pre-defined `all` group of groups: 312 | 313 | ```yaml 314 | kubernetes: 315 | ... 316 | ansible_group_vars: 317 | all: 318 | var1: ... 319 | ... 320 | ``` 321 | 322 | Ansible group vars will be stored into yaml files saved into `{cluster.ansible_playbook_path}\group_vars` folder. 323 | 324 | The variable `cluster.ansible_playbook_path` defaults to the current directory (the directory of the Vagrantfile) + `/provisioning`; this value can be changed like any other cluster attributes (see Defining cluster & cluster attributes). 325 | 326 | ### Defining host vars 327 | 328 | While group vars will influence playbooks behaviour for all hosts in a group, in Ansible host vars will influence playbooks behaviour for a specific host. 329 | 330 | The vagrant-compose plugin allows to define one or more host_vars generator for each ansible_groups; 331 | 332 | ```yaml 333 | kubernetes: 334 | ansible_playbook_path: ... 335 | ... 336 | masters: 337 | ansible_groups: 338 | - etcd 339 | - docker 340 | ... 341 | minions: 342 | ... 343 | ... 344 | ansible_group_vars: 345 | ... 346 | ansible_host_vars: 347 | etcd : 348 | var5: ... 349 | docker : 350 | var6: ... 351 | var7: ... 352 | ... 353 | ``` 354 | 355 | Host vars can be set to literal value or to Jinja2 value generators, that will be executed during the parse of the yaml file; each Jinja2 expression will be executed within an execution context where a set of varibles is made available by the vagrant-playbook processors: 356 | 357 | - **context_vars** see below 358 | - **node**, the node in the ansible_group to which host_vars belongs 359 | 360 | Ansible host vars will be stored into yaml files saved into `{cluster.ansible_playbook_path}\host_vars` folder. 361 | 362 | ### Context vars 363 | 364 | Group vars and host var generation by design can operate only with the set of information that comes with a groups of nodes or a single node. 365 | 366 | However, sometimes, it is necessary to share some information across group of nodes. 367 | This can be achieved by setting one or more context_vars generator for each ansible_groups. 368 | 369 | ```yaml 370 | kubernetes: 371 | ansible_playbook_path: ... 372 | ... 373 | masters: 374 | ansible_groups: 375 | - etcd 376 | - docker 377 | ... 378 | minions: 379 | ... 380 | ... 381 | context_vars: 382 | etcd : 383 | var8: ... 384 | docker : 385 | var9: ... 386 | var10: ... 387 | ansible_group_vars: 388 | ... 389 | ansible_host_vars: 390 | ... 391 | ... 392 | ``` 393 | 394 | Context vars can be set to literal value or to Jinja2 value generators, that will be executed during the parse of the yaml file; each Jinja2 expression will be executed within an execution context where a set of varibles is made available by the vagrant-playbook processors: 395 | 396 | - nodes, list of nodes in the ansible_group to which the group_vars belong 397 | 398 | > Context_vars generator are always executed before group_vars and host_vars generators; the resulting context, is given in input to group_vars and host_vars generators. 399 | 400 | > In addition to context vars for groups, it is possible to create context_vars for all groups/all hosts, by setting vars for the pre-defined `all` group of groups; in this case, intuitively, the list of nodes whitin the context contains all the nodes. 401 | 402 | Then, you can use the above context var when generating group_vars for host vars. 403 | 404 | ```yaml 405 | kubernetes: 406 | box: centos/7 407 | master: 408 | ... 409 | ansible_groups: 410 | - kb8-master 411 | minions: 412 | ... 413 | ansible_groups: 414 | - kb8-minions 415 | 416 | ansible_context_vars: 417 | all: 418 | var0: "{{ nodes | count }}" 419 | kb8-master: 420 | var1: "{{ nodes | count }}" 421 | 422 | ansible_group_vars: 423 | all: 424 | var0_from_context: "{{ context['var0'] }}" 425 | kb8-master: 426 | var1_from_context: "{{ context['var1'] }}" 427 | ``` 428 | 429 | ### Group of groups 430 | 431 | A useful ansible inventory feature is [group of groups](http://docs.ansible.com/ansible/intro_inventory.html#hosts-and-groups). 432 | 433 | By default ansible has a group named `[all]` with all nodes in the cluster. 434 | 435 | If you need higher control on groups of groups you can simply add a new item to the variable `config.cluster.ansible_groups` before creating nodes. 436 | 437 | For instance: 438 | ```ruby 439 | config.cluster.ansible_groups['k8s-cluster:children'] = ['kube-master', 'kube-nodes'] 440 | ``` 441 | 442 | Please note that you can use this approach also for setting group variables directly into the inventory file using :vars (see ansible documentation). 443 | 444 | ## Creating nodes (with provisioning) 445 | 446 | Given `config.cluster.ansible_groups` variable, generated group_vars and host_vars files, and of course an ansible playbook, it is possible to integrate provisioning into the node creation sequence. 447 | 448 | NB. The example uses ansible parallel execution (all nodes are provisioned together in parallel after completing node creation). 449 | 450 | ``` ruby 451 | config.cluster.from("mycluster.yaml") 452 | ... 453 | config.cluster.nodes.each do |node| 454 | config.vm.define "#{node.boxname}" do |node_vm| 455 | ... 456 | if node.index == config.cluster.nodes.size - 1 457 | node_vm.vm.provision "ansible" do |ansible| 458 | ansible.limit = 'all' # enable parallel provisioning 459 | ansible.playbook = "provisioning/playbook.yml" 460 | ansible.groups = config.cluster.ansible_groups 461 | end 462 | end 463 | end 464 | end 465 | ``` 466 | 467 | -------------------------------------------------------------------------------- /doc/programmatic.md: -------------------------------------------------------------------------------- 1 | # Programmatic Approach 2 | 3 | Vagrant requires some ruby knowledge, because the Vagrantfile itself is based on ruby. 4 | 5 | With the support of vagrant-compose, and leveraging on the same programming skills it is possible to define a cluster composed by many VMs. 6 | 7 | ## Quick start 8 | 9 | Create the following `Vagrantfile` for implementing a multi-machine scenario that defines a cluster named `test` with 3 `consul-server` nodes. 10 | 11 | ``` ruby 12 | Vagrant.configure(2) do |config| 13 | #cluster definition 14 | config.cluster.compose('test') do |c| 15 | c.nodes(3, 'consul-server') 16 | end 17 | 18 | #cluster creation 19 | config.cluster.nodes.each do |node, index| 20 | config.vm.define "#{node.boxname}" do |node_vm| 21 | node_vm.vm.box = "#{node.box}" 22 | end 23 | end 24 | end 25 | ``` 26 | 27 | The first part of the `Vagrantfile` contains the definition of the `test` cluster: 28 | 29 | ``` ruby 30 | config.cluster.compose('test') do |c| 31 | ... 32 | end 33 | ``` 34 | 35 | Please note that the cluster definition, is followed by a block of code that allows to configure the cluster itself; in this example the configuration consists in defining a set of 3 `consul-server` nodes. 36 | 37 | ``` ruby 38 | c.nodes(3, 'consul-server') 39 | ``` 40 | 41 | When the definition of the cluster is completed, behind the scene vagrant-compose take care of composing the cluster, and the resulting list of nodes will be available in the `config.cluster.nodes` variable. 42 | 43 | The second part of the `Vagrantfile` creates the cluster by defining a vm in VirtualBox for each node in the cluster: 44 | 45 | ``` ruby 46 | config.cluster.nodes.each do |node, index| 47 | config.vm.define "#{node.boxname}" do |node_vm| 48 | node_vm.vm.box = "#{node.box}" 49 | end 50 | end 51 | ``` 52 | 53 | If you run `vagrant up` you will get a 3 node cluster with following machines, based on `ubuntu/trusty64` base box (default). 54 | 55 | - `test-consul-server1` 56 | - `test-consul-server2` 57 | - `test-consul-server3` 58 | 59 | Done ! 60 | 61 | Of course, real-word scenarios are more complex; it is necessary to get more control in configuring the cluster topology and machine attributes, and finally you need also to implement automatic provisioning of software stack installed in the machines. 62 | 63 | See following chapters for more details. 64 | 65 | ## Configuring the cluster 66 | 67 | Each cluster can be named passing a value to `cluster.compose` method, and the default behaviour is that name of vagrant boxes and hostnames will be prefixed by such name; if cluster name will be set to nil or "", vagrant boxes and hostnames will be composed without prefix. 68 | 69 | Apart for cluster name, there are several options to customize the cluster definition. 70 | 71 | ### Defining cluster attributes 72 | 73 | Cluster attributes apply to all the node in the cluster. 74 | 75 | You can set set cluster attributes in the block of code that is passed as a second parameter to the `cluster.compose` method, as show in the following example: 76 | 77 | ``` ruby 78 | config.cluster.compose('test') do |c| 79 | c.box = "centos/7" 80 | ... 81 | end 82 | ``` 83 | 84 | Following cluster attributes are available: 85 | 86 | - **box**, [String], default = 'ubuntu/trusty64' 87 | 88 | Sets the base box for nodes, a.k.a the image that will be used to spin up the machine; please note that the base box can be customized also for each set of nodes (see Defining set of nodes). 89 | 90 | 91 | - **domain**, [String], default = 'vagrant' 92 | 93 | Sets the domain used for computing the nodes in the cluster; if the `domain` value is set to `nil` or `““` (empty string), the fully qualified name and the hostname of each nodes will be the same. 94 | 95 | ### Defining set of nodes 96 | 97 | A cluster can be composed by one or more set of nodes. 98 | 99 | Each set of nodes represent a group of one or more nodes with similar characteristics. For instance, in a cluster defined for testing [Consul](https://consul.io/), you will get at least two set of nodes: 100 | 101 | - Consul server nodes 102 | - Consul agent nodes 103 | 104 | Set of nodes can be defined in the block of code that is passed as a second parameter to the `cluster.compose` method, by using the `nodes` method as show in the following example: 105 | 106 | ``` ruby 107 | config.cluster.compose('test') do |c| 108 | ... 109 | c.nodes(3, 'consul-agents') 110 | ... 111 | end 112 | ``` 113 | 114 | The first parameter of the `nodes` method is the number of nodes in the set, while the second parameter is the name of the set; `nodes` accepts an optional third parameter, allowing to define a block of code where it is possible to customize several attributes of the set of nodes itself: 115 | 116 | ``` ruby 117 | config.cluster.compose('test') do |c| 118 | ... 119 | c.nodes(3, 'zookeeper') do |n| 120 | n.box = "centos/7" 121 | end 122 | ... 123 | end 124 | ``` 125 | 126 | Please note that all the available attributes can be set to: 127 | 128 | - A literal value, like for instance `"centos/7". This value will be inherited - without changes - by all nodes in the set. 129 | 130 | - A block of code, afterwards value_generator, that will be executed when building the nodes in the set. When calling the block of code, three parameters will be given: 131 | 132 | - **group_index**, [integer (zero based)], uniquely assigned to each set of nodes 133 | - **group_name**, [String], with the name of the set of nodes 134 | - **node_index**, [integer (zero based)], uniquely assigned to each node in the set 135 | 136 | An example of value_generator is the following lambda expression, that computes the host-name for each node in the cluster (`test-consul-server1`, `test-consul-server2`, etc. etc.): 137 | 138 | ``` ruby 139 | lambda { |group_index, group_name, node_index| 140 | return "#{group_name}#{node_index + 1}" 141 | } 142 | ``` 143 | 144 | Following set of nodes attributes are available: 145 | 146 | - **box**, [String|String_Generator], default = `cluster.box` 147 | 148 | Sets the base box used for creating nodes in this set. 149 | 150 | - **boxname**, [String|String_Generator], default = `"#{group_name}#{node_index + 1}"` 151 | 152 | Sets the box name (a.k.a. the name of the machine in VirtualBox/VMware) for each node in this set. 153 | Note: when generating nodes, if cluster name not equals to nil or empty string the resulting boxname will be automatically prefixed by `"#{cluster_name}-"` if cluster name not equals to nil or empty string. 154 | 155 | - **hostname**, [String|String_Generator], default = `"#{group_name}#{node_index + 1}"` 156 | 157 | Sets the hostname for each node in this set. 158 | 159 | Note: when generating nodes, if cluster name not equals to nil or empty string the resulting hostname will be automatically prefixed by `"#{cluster_name}-"`; additionally the **fqdn** attribute will be computed by concatenating `".#{cluster.domain}"`, if defined (if `domain` is not defined, fqdn will be the same of hostname). 160 | 161 | - **aliases**, [Array(String)|Array(String)_Generator], default = `[]` 162 | 163 | Allows to provide aliases for each node in this set. 164 | 165 | Note: when generating nodes, aliases will be automatically concatenated into a string, comma separated. 166 | 167 | - **ip**, [String|String_Generator], default = `"172.31.#{group_index}.#{100 + node_index + 1}"` 168 | 169 | Sets the ip for for each node in this set. 170 | 171 | - **cpus**, [Integer|Integer_Generator], default = `1` 172 | 173 | Sets the number of cpus for each node in this set. 174 | 175 | - **memory**, [Integer|Integer_Generator], default = `256` (MB) 176 | 177 | Sets the memory allocated for each node in this set. 178 | 179 | - **attributes**, [Hash(String, obj)|Hash(String, obj)_Generator], default = `{}` 180 | 181 | Allows to provide custom additional attributes for each node in this set. 182 | 183 | > Please note that some attribute, like boxname, hostname, ip, *must* be different for each node in the set (and in the cluster). 184 | > 185 | > Use value_generators for those attributes. 186 | 187 | ### Composing nodes 188 | 189 | By executing the code blocks provided to `cluster.compose` method, and also inner code blocks provided to `nodes` calls, the vagrant-compose plugin can compose the cluster topology, as a sum of all the nodes generated by each set. 190 | 191 | The resulting list of nodes is stored in the `config.cluster.nodes` variable; each node has following attributes assigned using value/value generators: 192 | 193 | - **box** 194 | - **boxname** 195 | - **hostname** 196 | - **fqdn** 197 | - **aliases** 198 | - **ip** 199 | - **cpus** 200 | - **memory** 201 | - **attributes** 202 | 203 | Two additional attributes will be automatically set for each node: 204 | 205 | - **index**, [integer (zero based)], uniquely assigned to each node in the cluster 206 | - **group_index**, [integer (zero based)], uniquely assigned to each node in a set of nodes 207 | 208 | ## Checking cluster configuration 209 | 210 | It is possible to check the resulting list of nodes by using the `debug` command: 211 | 212 | ``` ruby 213 | Vagrant.configure(2) do |config| 214 | #cluster definition 215 | config.cluster.compose('test') do |c| 216 | ... 217 | end 218 | 219 | config.cluster.debug 220 | end 221 | ``` 222 | 223 | Main information about nodes will be printed into the sequence of vagrant messages that is generated afeter each vagrant command, like f.i. `vagrant status`. 224 | 225 | `debug` accepts also a parameter `verbose', which defaults to `false`; by changing this parameter you can get all the information about nodes. 226 | 227 | ## Creating nodes 228 | 229 | Given the list of nodes stored in the `config.cluster.nodes` variable, it is possible to create a multi-machine environment by iterating over the list: 230 | 231 | ``` ruby 232 | config.cluster.nodes.each do |node| 233 | ... 234 | end 235 | ``` 236 | 237 | Within the cycle you can instruct vagrant to create machines based on attributes of the current node; for instance, you can define a VM in VirtualBox (default Vagrant provider); the example uses the [vagrant-hostmanager](https://github.com/smdahlen/vagrant-hostmanager) plugin to set the hostname into the guest machine: 238 | 239 | ``` ruby 240 | config.cluster.nodes.each do |node| 241 | config.vm.define "#{node.boxname}" do |node_vm| 242 | node_vm.vm.box = "#{node.box}" 243 | node_vm.vm.network :private_network, ip: "#{node.ip}" 244 | node_vm.vm.hostname = "#{node.fqdn}" 245 | node_vm.hostmanager.aliases = node.aliases unless node.aliases.empty? 246 | node_vm.vm.provision :hostmanager 247 | 248 | node_vm.vm.provider "virtualbox" do |vb| 249 | vb.name = "#{node.boxname}" 250 | vb.memory = node.memory 251 | vb.cpus = node.cpus 252 | end 253 | end 254 | end 255 | ``` 256 | 257 | > In order to increase performance of node creation, you can leverage on support for linked clones introduced by Vagrant 1.8.1. Add the following line to the above script: 258 | > 259 | > vb.linked_clone = true if Vagrant::VERSION =~ /^1.8/ 260 | 261 | Hostmanager requires following additional settings before the `config.cluster.nodes.each` command: 262 | 263 | ``` ruby 264 | config.hostmanager.enabled = false 265 | config.hostmanager.manage_host = true 266 | config.hostmanager.include_offline = true 267 | ``` 268 | 269 | ## Configuring ansible provisioning 270 | 271 | The vagrant-compose plugin provides support for a straight forward provisioning of nodes in the cluster implemented with Ansible. 272 | 273 | ### Defining ansible_groups 274 | 275 | Each set of nodes, and therefore all the nodes within the set, can be assigned to one or more ansible_groups. 276 | 277 | In the following example, `consul-agent` nodes will be part of `consul` and `docker` ansible_groups. 278 | 279 | ``` ruby 280 | c.nodes(3, 'consul-agent') do |n| 281 | ... 282 | n.ansible_groups = ['consul', 'docker'] 283 | end 284 | ``` 285 | 286 | This configuration is used by the `cluster.compose` method in order to define an **inventory file** where nodes (hosts in ansible "") clustered in group; the resulting list of ansible_groups, each with its own list of host is stored in the `config.cluster.ansible_groups` variable. 287 | 288 | Ansible playbook will use groups for providing different software stack to different machines. 289 | 290 | Please note that the possibility to assign a node to one or more groups introduces an high degree of flexibility; for instance, it is easy to change the topology of the cluster above for instance when it is required to implement an http load balancer based on consul service discovery: 291 | 292 | ``` ruby 293 | c.nodes(3, 'consul-agent') do |n| 294 | ... 295 | n.ansible_groups = ['consul', 'docker', 'registrator'] 296 | end 297 | c.nodes(1, 'load-balancer') do |n| 298 | ... 299 | n.ansible_groups = ['consul', 'docker', 'consul-template', 'nginx'] 300 | end 301 | ``` 302 | 303 | As you can see, `consul` and `docker` ansible_groups now include both nodes from `consul-agent` and `load-balancer` node set; vice versa, other groups like `registrator`, `consul-template`, `nginx` contain node only from one of the two nodes set. 304 | 305 | Ansible playbook can additionally leverage on groups for providing machines with the required software stacks. 306 | 307 | > NB. you can see resulting ansible_groups by using `debug` command with `verbose` equal to `true`. 308 | 309 | ### Defining group vars 310 | 311 | In Ansible, the inventory file is usually integrated with a set of variables containing settings that will influence playbooks behaviour for all the host in a group. 312 | 313 | The vagrant-compose plugin allows you to define one or more group_vars generator for each ansible_groups; group_vars generators are code block that will be instantiated during `cluster.compose` with two input parameters: 314 | 315 | - **context_vars** see below 316 | - **nodes**, list of nodes in the ansible_group 317 | 318 | Expected output type is `Hash(String, Obj)`. 319 | 320 | For instance, when building a [Consul](https://consul.io/) cluster, all the `consul-server` nodes have to be configured with the same `bootstrap_expect` parameter, that must be set to the number of `consul-server` nodes in the cluster: 321 | 322 | ``` ruby 323 | config.cluster.compose('test') do |c| 324 | ... 325 | c.ansible_group_vars['consul-server'] = lambda { |context, nodes| 326 | return { 'consul_bootstrap_expect' => nodes.length } 327 | } 328 | ... 329 | end 330 | ``` 331 | 332 | Additionally, it is possible to set variables for all groups/all hosts, by setting vars for the pre-defined `all` group of groups: 333 | 334 | ``` ruby 335 | config.cluster.compose('test') do |c| 336 | ... 337 | c.ansible_group_vars['all'] = lambda { |context, nodes| 338 | return { 'var0' => nodes.length } 339 | } 340 | ... 341 | end 342 | ``` 343 | 344 | Ansible group vars will be stored into yaml files saved into `{cluster.ansible_playbook_path}\group_vars` folder. 345 | 346 | The variable `cluster.ansible_playbook_path` defaults to the current directory (the directory of the Vagrantfile) + `/provisioning`; this value can be changed like any other cluster attributes (see Defining cluster attributes). 347 | 348 | ### Defining host vars 349 | 350 | While group vars will influence playbooks behaviour for all hosts in a group, in Ansible host vars will influence playbooks behaviour for a specific host. 351 | 352 | The vagrant-compose plugin allows to define one or more host_vars generator for each ansible_groups; host_vars generators are code block that will be instantiated during `cluster.compose` with two input parameters: 353 | 354 | - **context_vars** see below 355 | - **node**, one node in the ansible_group 356 | 357 | Expected output type is `Hash(String, Obj)`. 358 | 359 | For instance, when building a [Consul](https://consul.io/) cluster, all the `consul-server` nodes should be configured with the ip to which Consul will bind client interfaces: 360 | 361 | ``` ruby 362 | config.cluster.compose('test') do |c| 363 | ... 364 | c.ansible_host_vars['consul-server'] = lambda { |context, node| 365 | return { 'consul_client_ip' => node.ip } 366 | } 367 | ... 368 | end 369 | ``` 370 | 371 | Ansible host vars will be stored into yaml files saved into `{cluster.ansible_playbook_path}\host_vars` folder. 372 | 373 | ### Context vars 374 | 375 | Group vars and host var generation by design can operate only with the set of information that comes with a groups of nodes or a single node. 376 | 377 | However, sometimes, it is necessary to share some information across group of nodes. 378 | This can be achieved by setting one or more context_vars generator for each ansible_groups. 379 | 380 | For instance, when building a [Consul](https://consul.io/) cluster, all the `consul-agent` nodes should be configured with the ip - the list of ip - to be used when joining the cluster; such list can be generated from the list of nodes in the `consul-server` set of nodes, and stored in a context_vars: 381 | 382 | ``` ruby 383 | config.cluster.compose('test') do |c| 384 | ... 385 | c.ansible_context_vars['consul-server'] = lambda { |context, nodes| 386 | return { 'consul-serverIPs' => nodes.map { |n| n.ip }.to_a } 387 | } 388 | ... 389 | end 390 | ``` 391 | 392 | > Context_vars generator are always executed before group_vars and host_vars generators; the resulting context, is given in input to group_vars and host_vars generators. 393 | 394 | > In addition to context vars for groups, it is possible to create context_vars for all groups/all hosts, by setting vars for the pre-defined `all` group of groups; in this case, intuitively, the list of nodes whitin the context contains all the nodes. 395 | 396 | Then, you can use the above context var when generating group_vars for nodes in the `consul-agent` group. 397 | 398 | ``` ruby 399 | config.cluster.compose('test') do |c| 400 | ... 401 | c.ansible_context_vars['consul-server'] = lambda { |context, nodes| 402 | return { 'serverIPs' => nodes.map { |n| n.ip }.to_a } 403 | } 404 | c.ansible_group_vars['consul-agent'] = lambda { |context, nodes| 405 | return { 'consul_joins' => context['consul-serverIPs'] } 406 | } 407 | ... 408 | end 409 | ``` 410 | 411 | ### Group of groups 412 | A useful ansible inventory feature is [group of groups](http://docs.ansible.com/ansible/intro_inventory.html#hosts-and-groups). 413 | 414 | By default ansible has a group named `[all]` with all the nodes defined in cluster configuration. 415 | 416 | If you need higher control on groups of groups you can simply add a new item to the variable `config.cluster.ansible_groups` before creating nodes. 417 | 418 | For instance: 419 | ```ruby 420 | config.cluster.ansible_groups['k8s-cluster:children'] = ['kube-master', 'kube-nodes'] 421 | ``` 422 | 423 | Please note that you can use this approach also for setting group variables directly into the inventory file using :vars (see ansible documentation). 424 | 425 | ## Creating nodes (with provisioning) 426 | 427 | Given `config.cluster.ansible_groups` variable, generated group_vars and host_vars files, and of course an ansible playbook, it is possible to integrate provisioning into the node creation sequence. 428 | 429 | NB. The example uses ansible parallel execution (all nodes are provisioned together in parallel after completing node creation). 430 | 431 | ``` ruby 432 | config.cluster.nodes.each do |node| 433 | config.vm.define "#{node.boxname}" do |node_vm| 434 | ... 435 | if node.index == config.cluster.nodes.size - 1 436 | node_vm.vm.provision "ansible" do |ansible| 437 | ansible.limit = 'all' # enable parallel provisioning 438 | ansible.playbook = "provisioning/playbook.yml" 439 | ansible.groups = config.cluster.ansible_groups 440 | end 441 | end 442 | end 443 | end 444 | 445 | 446 | ``` 447 | 448 | -------------------------------------------------------------------------------- /lib/locales/en.yml: -------------------------------------------------------------------------------- 1 | en: 2 | vagrant_compose: 3 | already_status: |- 4 | The machine is already %{status}. 5 | errors: 6 | initialize_error: |- 7 | Error executing initialize code for cluster => %{cluster_name}. 8 | The error message is shown below: 9 | %{message} 10 | 11 | attribute_expression_error: |- 12 | Error generating attribute => %{attribute} for node => %{node_index} in group => %{node_group_name}. 13 | The error message is shown below: 14 | %{message} 15 | 16 | Attribute expression expected to be a literal or a code block returning a literal (see documentation). 17 | 18 | context_var_expression_error: |- 19 | Error generating ansible context vars for ansible_group => %{ansible_group} 20 | The error message is shown below: 21 | %{message} 22 | 23 | Ansible context var expression expected to be a Hash literal or a code block returning an Hash literal (see documentation). 24 | 25 | group_var_expression_error: |- 26 | Error generating ansible group vars for ansible_group => %{ansible_group} 27 | The error message is shown below: 28 | %{message} 29 | 30 | Ansible group var expression expected to be a Hash literal or a code block returning an Hash literal (see documentation). 31 | 32 | host_var_expression_error: |- 33 | Error generating ansible host vars for host => %{host} in ansible_group => %{ansible_group} 34 | The error message is shown below: 35 | %{message} 36 | 37 | Ansible host var expression expected to be a Hash literal or a code block returning an Hash literal (see documentation). 38 | 39 | pycompose_missing: |- 40 | Utitity pycompose missing. see documentation. 41 | 42 | pycompose_error: |- 43 | Error running cluster playbook: 44 | %{message} 45 | -------------------------------------------------------------------------------- /lib/vagrant/compose.rb: -------------------------------------------------------------------------------- 1 | require "pathname" 2 | 3 | require "vagrant/compose/plugin" 4 | 5 | module VagrantPlugins 6 | module Compose 7 | lib_path = Pathname.new(File.expand_path("../compose", __FILE__)) 8 | autoload :Errors, lib_path.join("errors") 9 | 10 | # This returns the path to the source of this plugin. 11 | # 12 | # @return [Pathname] 13 | def self.source_root 14 | @source_root ||= Pathname.new(File.expand_path("../../", __FILE__)) 15 | end 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/vagrant/compose/config.rb: -------------------------------------------------------------------------------- 1 | require "vagrant" 2 | 3 | require_relative "programmatic/cluster" 4 | require_relative "declarative/cluster" 5 | 6 | module VagrantPlugins 7 | module Compose 8 | 9 | # Vagrant compose plugin definition class. 10 | # This plugins allows easy configuration of a data structure that can be used as a recipe 11 | # for setting up and provisioning a vagrant cluster composed by several machines with different 12 | # roles. 13 | class Config < Vagrant.plugin("2", :config) 14 | 15 | # After executing compose, it returns the list of nodes in the cluster. 16 | attr_reader :nodes 17 | 18 | # After executing compose, it returns the ansible_groups configuration for provisioning nodes in the cluster. 19 | attr_reader :ansible_groups 20 | 21 | def initialize 22 | @cluster = nil 23 | @nodes = {} 24 | @ansible_groups ={} 25 | @multimachine_filter = getMultimachine_filter() # detect if running vagrant up/provision MACHINE 26 | end 27 | 28 | # Implements cluster creation, through the execution of the give code. 29 | def compose (name, &block) 30 | # create the cluster (the data structure representing the cluster) 31 | @cluster = VagrantPlugins::Compose::Programmatic::Cluster.new(name) 32 | begin 33 | # executes the cluster configuration code 34 | block.call(@cluster) 35 | rescue Exception => e 36 | raise VagrantPlugins::Compose::Errors::ClusterInitializeError, :message => e.message, :cluster_name => name 37 | end 38 | # tranform cluster configuration into a list of nodes/ansible groups to be used for 39 | @nodes, inventory = @cluster.compose 40 | @ansible_groups = filterInventory(inventory) 41 | end 42 | 43 | # Implements cluster creation 44 | def from (playbook_file) 45 | # create the cluster (the data structure representing the cluster) 46 | @cluster = VagrantPlugins::Compose::Declarative::Cluster.new() 47 | 48 | # executes the vagrant playbook 49 | @nodes, inventory = @cluster.from(playbook_file) 50 | @ansible_groups = filterInventory(inventory) 51 | end 52 | 53 | #filter ansible groups if vagrant command specify filters and maps to a list of hostnames 54 | def filterInventory(inventory) 55 | ansible_groups = {} 56 | inventory.each do |group, hosts| 57 | ansible_groups[group] = [] 58 | hosts.each do |host| 59 | if filterBoxname(host['boxname']) 60 | ansible_groups[group] << host['hostname'] 61 | end 62 | end 63 | end 64 | 65 | return ansible_groups 66 | end 67 | 68 | def filterBoxname(boxname) 69 | if @multimachine_filter.length > 0 70 | @multimachine_filter.each do |name| 71 | if pattern = name[/^\/(.+?)\/$/, 1] 72 | # This is a regular expression filter, so we convert to a regular 73 | # expression check for matching. 74 | regex = Regexp.new(pattern) 75 | return boxname =~ regex 76 | else 77 | # filter name, just look for a specific VM 78 | return boxname == name 79 | end 80 | end 81 | else 82 | # No filter was given, so we return every VM in the order 83 | # configured. 84 | return true 85 | end 86 | end 87 | 88 | # Implements a utility method that allows to check the list of nodes generated by compose. 89 | def debug(verbose = false) 90 | puts "==> cluster #{@cluster.name} with #{nodes.size} nodes" 91 | 92 | if not verbose 93 | @nodes.each do |node| 94 | puts " #{node.boxname} accessible as #{node.fqdn} #{node.aliases} #{node.ip} => [#{node.box}, #{node.cpus} cpus, #{node.memory} memory]" 95 | end 96 | else 97 | puts "- nodes" 98 | @nodes.each do |node| 99 | puts "" 100 | puts " - #{node.boxname}" 101 | puts " box #{node.box}" 102 | puts " boxname #{node.boxname}" 103 | puts " hostname #{node.hostname}" 104 | puts " fqdn #{node.fqdn}" 105 | puts " aliases #{node.aliases}" 106 | puts " ip #{node.ip}" 107 | puts " cpus #{node.cpus}" 108 | puts " memory #{node.memory}" 109 | puts " ansible_groups #{node.ansible_groups}" 110 | puts " attributes #{node.attributes}" 111 | puts " index #{node.index}" 112 | puts " group_index #{node.group_index}" 113 | end 114 | 115 | filter = " (NB. filtered by #{@multimachine_filter})" if not @multimachine_filter.empty? 116 | puts "" 117 | puts "- ansible_groups #{filter}" 118 | 119 | @ansible_groups.each do |group, hosts| 120 | puts "" 121 | puts " - #{group}" 122 | hosts.each do |host| 123 | puts " - #{host}" 124 | end 125 | end 126 | end 127 | puts "" 128 | end 129 | 130 | def getMultimachine_filter 131 | if ARGV.length <= 1 132 | return [] 133 | # commands with vmname filter 134 | elsif ["destroy", "halt", "port", "provision", 135 | "reload", "resume", "ssh", "ssh_config", 136 | "status", "suspend", "up"].include?(ARGV[0].downcase) 137 | 138 | args = OptionParser.new do |o| 139 | # Options for all commands with vmname 140 | # vagrant/plugins/commands/destroy/command.rb 141 | # "-f", "--force" 142 | # vagrant/plugins/commands/halt/command.rb 143 | # "-f", "--force" 144 | # vagrant/plugins/commands/port/command.rb 145 | # "--guest PORT", 146 | # "--machine-readable" 147 | # vagrant/plugins/commands/provision/command.rb 148 | # "--provision-with x,y,z" 149 | # vagrant/plugins/commands/reload/command.rb 150 | # "--[no-]provision" 151 | # "--provision-with x,y,z" 152 | # vagrant/plugins/commands/resume/command.rb 153 | # "--[no-]provision" 154 | # "--provision-with x,y,z" 155 | # vagrant/plugins/commands/ssh/command.rb 156 | # "-c", "--command COMMAND" 157 | # "-p", "--plain" 158 | # vagrant/plugins/commands/ssh_config/command.rb 159 | # "--host NAME" 160 | # vagrant/plugins/commands/status/command.rb 161 | # vagrant/plugins/commands/suspend/command.rb 162 | # vagrant/plugins/commands/up/command.rb 163 | # "--[no-]destroy-on-error" 164 | # "--[no-]parallel" 165 | # "--provider PROVIDER" 166 | # "--[no-]install-provider" 167 | # "--[no-]provision" 168 | # "--provision-with x,y,z" 169 | 170 | o.on("-f", "--force", "Destroy without confirmation.") 171 | o.on("--guest PORT", "Output the host port that maps to the given guest port") 172 | o.on("--machine-readable", "Display machine-readable output") 173 | o.on("--provision-with x,y,z", Array, "Enable only certain provisioners, by type or by name.") 174 | o.on("--[no-]provision", "Enable or disable provisioning") 175 | o.on("-c", "--command COMMAND", "Execute an SSH command directly") 176 | o.on("-p", "--plain", "Plain mode, leaves authentication up to user") 177 | o.on("--host NAME", "Name the host for the config") 178 | o.on("--[no-]destroy-on-error", "Destroy machine if any fatal error happens (default to true)") 179 | o.on("--[no-]parallel", "Enable or disable parallelism if provider supports it") 180 | o.on("--provider PROVIDER", String, "Back the machine with a specific provider") 181 | o.on("--[no-]install-provider", "If possible, install the provider if it isn't installed") 182 | end.permute! #Parses command line arguments argv in permutation mode and returns list of non-option arguments. 183 | 184 | return args.length > 1 ? args.drop(1) : [] 185 | else 186 | return [] 187 | end 188 | end 189 | end 190 | end 191 | end 192 | -------------------------------------------------------------------------------- /lib/vagrant/compose/declarative/cluster.rb: -------------------------------------------------------------------------------- 1 | require 'open4' 2 | require_relative "../node" 3 | 4 | module VagrantPlugins 5 | module Compose 6 | module Declarative 7 | 8 | class Cluster 9 | 10 | # The name of the cluster 11 | attr_reader :name 12 | 13 | # The default vagrant base box to be used for creating vagrant machines in this cluster. 14 | # This setting can be changed at group/node level 15 | attr_accessor :box 16 | 17 | # The network domain to wich the cluster belongs (used for computing nodes fqdn) 18 | attr_accessor :domain 19 | 20 | # The root path for ansible playbook; it is used as a base path for computing ansible_group_vars and ansible_host_vars 21 | # It defaults to current directory/provisioning 22 | attr_accessor :ansible_playbook_path 23 | 24 | # Implements cluster creation from a playbook file 25 | def from (file) 26 | # calls vagrant-playbook utility for executing the playbook file. 27 | playbook = YAML.load(pycompose (file)) 28 | 29 | # extract cluster attributes 30 | @name = playbook.keys[0] 31 | @box = playbook[@name]['box'] 32 | @domain = playbook[@name]['domain'] 33 | @ansible_playbook_path = playbook[@name]['ansible_playbook_path'] 34 | 35 | # extract nodes 36 | nodes = [] 37 | playbook[@name]['nodes'].each do |node| 38 | 39 | boxname = node.keys[0] 40 | 41 | box = node[boxname]['box'] 42 | hostname = node[boxname]['hostname'] 43 | aliases = node[boxname]['aliases'] 44 | fqdn = node[boxname]['fqdn'] 45 | ip = node[boxname]['ip'] 46 | cpus = node[boxname]['cpus'] 47 | memory = node[boxname]['memory'] 48 | ansible_groups = node[boxname]['ansible_groups'] 49 | attributes = node[boxname]['attributes'] 50 | index = node[boxname]['index'] 51 | group_index = node[boxname]['group_index'] 52 | 53 | nodes << VagrantPlugins::Compose::Node.new(box, boxname, hostname, fqdn, aliases, ip, cpus, memory, ansible_groups, attributes, index, group_index) 54 | end 55 | 56 | # extract ansible inventory, ansible_group_vars, ansible_host_vars 57 | ansible_groups = {} 58 | if playbook[@name].key?("ansible") 59 | 60 | ansible = playbook[@name]['ansible'] 61 | 62 | # extract ansible inventory 63 | ansible_groups = ansible['inventory'] 64 | 65 | # cleanup ansible_group_vars files 66 | # TODO: make safe 67 | ansible_group_vars_path = File.join(@ansible_playbook_path, 'group_vars') 68 | 69 | if File.exists?(ansible_group_vars_path) 70 | Dir.foreach(ansible_group_vars_path) {|f| fn = File.join(ansible_group_vars_path, f); File.delete(fn) if f.end_with?(".yml")} 71 | end 72 | 73 | #generazione ansible_group_vars file (NB. 1 group = 1 gruppo host ansible) 74 | if ansible.key?("group_vars") 75 | ansible['group_vars'].each do |group, vars| 76 | # crea il file (se sono state generate delle variabili) 77 | unless vars.empty? 78 | FileUtils.mkdir_p(ansible_group_vars_path) unless File.exists?(ansible_group_vars_path) 79 | # TODO: make safe 80 | fileName = group.gsub(':', '_') 81 | File.open(File.join(ansible_group_vars_path,"#{fileName}.yml") , 'w+') do |file| 82 | file.puts YAML::dump(vars) 83 | end 84 | end 85 | end 86 | end 87 | 88 | # cleanup ansible_host_vars files (NB. 1 nodo = 1 host) 89 | # TODO: make safe 90 | ansible_host_vars_path = File.join(@ansible_playbook_path, 'host_vars') 91 | 92 | if File.exists?(ansible_host_vars_path) 93 | Dir.foreach(ansible_host_vars_path) {|f| fn = File.join(ansible_host_vars_path, f); File.delete(fn) if f.end_with?(".yml")} 94 | end 95 | 96 | #generazione ansible_host_vars file 97 | if ansible.key?("host_vars") 98 | ansible['host_vars'].each do |host, vars| 99 | # crea il file (se sono state generate delle variabili) 100 | unless vars.empty? 101 | FileUtils.mkdir_p(ansible_host_vars_path) unless File.exists?(ansible_host_vars_path) 102 | 103 | # TODO: make safe 104 | File.open(File.join(ansible_host_vars_path,"#{host}.yml") , 'w+') do |file| 105 | file.puts YAML::dump(vars) 106 | end 107 | end 108 | end 109 | end 110 | end 111 | 112 | return nodes, ansible_groups 113 | end 114 | 115 | 116 | # Executes pycompose command 117 | def pycompose (file) 118 | p_err = "" 119 | p_out = "" 120 | 121 | begin 122 | p_status = Open4::popen4("vagrant-playbook -f #{file}") do |pid, stdin, stdout, stderr| 123 | p_err = stderr.read.strip 124 | p_out = stdout.read.strip 125 | end 126 | rescue Errno::ENOENT 127 | raise VagrantPlugins::Compose::Errors::PyComposeMissing 128 | rescue Exception => e 129 | raise VagrantPlugins::Compose::Errors::PyComposeError, :message => e.message 130 | end 131 | 132 | if p_status.exitstatus != 0 133 | raise VagrantPlugins::Compose::Errors::PyComposeError, :message => p_err 134 | end 135 | 136 | return p_out 137 | end 138 | 139 | end 140 | end 141 | end 142 | end 143 | -------------------------------------------------------------------------------- /lib/vagrant/compose/errors.rb: -------------------------------------------------------------------------------- 1 | require "vagrant" 2 | 3 | module VagrantPlugins 4 | module Compose 5 | 6 | #Plugin custom error classes, handling localization of error messages 7 | module Errors 8 | #Base class for vagrant compose custom errors 9 | class VagrantComposeError < Vagrant::Errors::VagrantError 10 | error_namespace("vagrant_compose.errors") 11 | end 12 | 13 | class ClusterInitializeError < VagrantComposeError 14 | error_key(:initialize_error) 15 | end 16 | 17 | class AttributeExpressionError < VagrantComposeError 18 | error_key(:attribute_expression_error) 19 | end 20 | 21 | class ContextVarExpressionError < VagrantComposeError 22 | error_key(:context_var_expression_error) 23 | end 24 | 25 | class GroupVarExpressionError < VagrantComposeError 26 | error_key(:group_var_expression_error) 27 | end 28 | 29 | class HostVarExpressionError < VagrantComposeError 30 | error_key(:host_var_expression_error) 31 | end 32 | 33 | class PyComposeMissing < VagrantComposeError 34 | error_key(:pycompose_missing) 35 | end 36 | 37 | class PyComposeError < VagrantComposeError 38 | error_key(:pycompose_error) 39 | end 40 | end 41 | end 42 | end 43 | -------------------------------------------------------------------------------- /lib/vagrant/compose/node.rb: -------------------------------------------------------------------------------- 1 | module VagrantPlugins 2 | module Compose 3 | 4 | # This class define a node throught a set of setting to be used when creating vagrant machines in the cluster. 5 | # Settings will be assigned value by cluster.compose method, according with the connfiguration 6 | # of the group of nodes to which the node belongs. 7 | class Node 8 | 9 | # The vagrant base box to be used for creating the vagrant machine that implements the node. 10 | attr_reader :box 11 | 12 | # The box name for this node a.k.a. the name for the machine in VirtualBox/VMware console. 13 | attr_reader :boxname 14 | 15 | # The hostname for the node. 16 | attr_reader :hostname 17 | 18 | # The fully qualified name for the node. 19 | attr_reader :fqdn 20 | 21 | # The list of aliases a.k.a. alternative host names for the node. 22 | attr_reader :aliases 23 | 24 | # The ip for the node. 25 | attr_reader :ip 26 | 27 | # The cpu for the node. 28 | attr_reader :cpus 29 | 30 | # The memory for the node. 31 | attr_reader :memory 32 | 33 | # The list of ansible_groups for the node. 34 | attr_reader :ansible_groups 35 | 36 | # A set of custom attributes for the node. 37 | attr_reader :attributes 38 | 39 | # A number identifying the node within the group of nodes to which the node belongs. 40 | attr_reader :index 41 | 42 | # A number identifying the group of nodes to which the node belongs. 43 | attr_reader :group_index 44 | 45 | def initialize(box, boxname, hostname, fqdn, aliases, ip, cpus, memory, ansible_groups, attributes, index, group_index) 46 | @box = box 47 | @boxname = boxname 48 | @hostname = hostname 49 | @fqdn = fqdn 50 | @aliases = aliases 51 | @ip = ip 52 | @cpus = cpus 53 | @memory = memory 54 | @ansible_groups = ansible_groups 55 | @attributes = attributes 56 | @index = index 57 | @group_index = group_index 58 | end 59 | end 60 | 61 | end 62 | end -------------------------------------------------------------------------------- /lib/vagrant/compose/plugin.rb: -------------------------------------------------------------------------------- 1 | begin 2 | require "vagrant" 3 | rescue LoadError 4 | raise "The Vagrant Compose plugin must be run within Vagrant." 5 | end 6 | 7 | # This is a sanity check to make sure no one is attempting to install 8 | # this into an early Vagrant version. 9 | if Vagrant::VERSION < "1.8.1" 10 | raise "The Vagrant Compose plugin is only compatible with Vagrant 1.8.1+" 11 | end 12 | 13 | module VagrantPlugins 14 | module Compose 15 | class Plugin < Vagrant.plugin("2") 16 | name "Compose" 17 | description <<-DESC 18 | A Vagrant plugin that helps building complex multi-machine scenarios. 19 | see https://github.com/fabriziopandini/vagrant-compose for documentation. 20 | DESC 21 | 22 | config "cluster" do 23 | # Setup logging and i18n 24 | setup_logging 25 | setup_i18n 26 | 27 | require_relative "config" 28 | Config 29 | end 30 | 31 | 32 | # This initializes the internationalization strings. 33 | def self.setup_i18n 34 | I18n.load_path << File.expand_path("locales/en.yml", Compose.source_root) 35 | I18n.reload! 36 | end 37 | 38 | # This sets up our log level to be whatever VAGRANT_LOG is. 39 | def self.setup_logging 40 | require "log4r" 41 | 42 | level = nil 43 | begin 44 | level = Log4r.const_get(ENV["VAGRANT_LOG"].upcase) 45 | rescue NameError 46 | # This means that the logging constant wasn't found, 47 | # which is fine. We just keep `level` as `nil`. But 48 | # we tell the user. 49 | level = nil 50 | end 51 | 52 | # Some constants, such as "true" resolve to booleans, so the 53 | # above error checking doesn't catch it. This will check to make 54 | # sure that the log level is an integer, as Log4r requires. 55 | level = nil if !level.is_a?(Integer) 56 | 57 | # Set the logging level on all "vagrant" namespaced 58 | # logs as long as we have a valid level. 59 | if level 60 | logger = Log4r::Logger.new("vagrant_compose") 61 | logger.outputters = Log4r::Outputter.stderr 62 | logger.level = level 63 | logger = nil 64 | end 65 | end 66 | end 67 | end 68 | end -------------------------------------------------------------------------------- /lib/vagrant/compose/programmatic/cluster.rb: -------------------------------------------------------------------------------- 1 | require_relative "node_group" 2 | 3 | module VagrantPlugins 4 | module Compose 5 | module Programmatic 6 | 7 | # This class defines a cluster, thas is a set of group of nodes, where nodes in each group has similar characteristics. 8 | # Basically, a cluster is a data structure that can be used as a recipe for setting up and provisioning a 9 | # vagrant cluster composed by several machines with different roles. 10 | class Cluster 11 | 12 | # The name of the cluster 13 | attr_reader :name 14 | 15 | # The default vagrant base box to be used for creating vagrant machines in this cluster. 16 | # This setting can be changed at group/node level 17 | attr_accessor :box 18 | 19 | # The network domain to wich the cluster belongs (used for computing nodes fqdn) 20 | attr_accessor :domain 21 | 22 | # The root path for ansible playbook; it is used as a base path for computing ansible_group_vars and ansible_host_vars 23 | # It defaults to current directory/provisioning 24 | attr_accessor :ansible_playbook_path 25 | 26 | # A dictionary, allowing to setup ansible_group_vars generators foreach node_group 27 | attr_reader :ansible_group_vars 28 | 29 | # A dictionary, allowing to setup ansible_host_vars generators foreach node_group 30 | attr_reader :ansible_host_vars 31 | 32 | # A dictionary, allowing to setup context vars to be uses is value_generators when composing nodes 33 | attr_reader :ansible_context_vars 34 | 35 | # Costruttore di una istanza di cluster. 36 | def initialize(name) 37 | @group_index = 0 38 | @node_groups = {} 39 | @ansible_context_vars = {} 40 | @ansible_group_vars = {} 41 | @ansible_host_vars = {} 42 | @ansible_playbook_path = File.join(Dir.pwd, 'provisioning') 43 | 44 | @name = name 45 | @box = 'ubuntu/trusty64' 46 | @domain = 'vagrant' 47 | end 48 | 49 | # Metodo per la creazione di un gruppo di nodi; in fase di creazione, il blocco inizializza 50 | # i valori/le expressioni da utilizzarsi nella valorizzazione degli attributi dei nodi in fase di compose. 51 | # 52 | # Oltre alla creazione dei nodi, il metodo prevede anche l'esecuzione di un blocco di codice per 53 | # la configurazione del gruppo di nodi stesso. 54 | def nodes(instances, name, &block) 55 | raise RuntimeError, "Nodes #{name} already exists in this cluster." unless not @node_groups.has_key?(name) 56 | 57 | @node_groups[name] = NodeGroup.new(@group_index, instances, name) 58 | @node_groups[name].box = @box 59 | @node_groups[name].boxname = lambda { |group_index, group_name, node_index| return "#{group_name}#{node_index + 1}" } 60 | @node_groups[name].hostname = lambda { |group_index, group_name, node_index| return "#{group_name}#{node_index + 1}" } 61 | @node_groups[name].aliases = [] 62 | @node_groups[name].ip = lambda { |group_index, group_name, node_index| return "172.31.#{group_index}.#{100 + node_index + 1}" } 63 | @node_groups[name].cpus = 1 64 | @node_groups[name].memory = 256 65 | @node_groups[name].ansible_groups = [] 66 | @node_groups[name].attributes = {} 67 | 68 | @group_index += 1 69 | 70 | block.call(@node_groups[name]) if block_given? 71 | end 72 | 73 | # Prepara il provisioning del cluster 74 | def compose 75 | 76 | ## Fase1: Creazione dei nodi 77 | 78 | # sviluppa i vari gruppi di nodi, creando i singoli nodi 79 | nodes = [] 80 | 81 | @node_groups.each do |key, group| 82 | group.compose(@name, @domain, nodes.size) do |node| 83 | nodes << node 84 | end 85 | end 86 | 87 | # sviluppa i gruppi abbinando a ciascono i nodi creati 88 | # NB. tiene in considerazione anche l'eventualità che un gruppo possa essere composto da nodi appartenenti a diversi node_groups 89 | ansible_groups= {} 90 | nodes.each do |node| 91 | node.ansible_groups.each do |ansible_group| 92 | ansible_groups[ansible_group] = [] unless ansible_groups.has_key? (ansible_group) 93 | ansible_groups[ansible_group] << node 94 | end 95 | end 96 | extended_ansible_groups = ansible_groups.merge({'all' => nodes}) 97 | 98 | ## Fase2: Configurazione provisioning del cluster via Ansible 99 | # Ogni nodo diventerà una vm su cui sarà fatto il provisioning, ovvero un host nell'inventory di ansible 100 | # Ad ogni gruppo corrispondono nodi con caratteristiche simili 101 | 102 | # genearazione inventory file per ansible, aka ansible_groups in Vagrant (NB. 1 group = 1 gruppo ansible) 103 | ansible_groups_provision = {} 104 | ansible_groups.each do |ansible_group, ansible_group_nodes| 105 | ansible_groups_provision[ansible_group] = [] 106 | ansible_group_nodes.each do |node| 107 | ansible_groups_provision[ansible_group] << {"boxname" => node.boxname , "hostname" => node.hostname} 108 | end 109 | end 110 | 111 | # Oltre alla creazione del file di inventory per ansible, contenente gruppi e host, è supportata: 112 | # - la creazione di file ansible_group_vars, ovvero di file preposti a contenere una serie di variabili - specifico di ogni gruppo di host - 113 | # per condizionare il provisioning ansible sulla base delle caratteristiche del cluster specifico 114 | # - la creazione di file ansible_host_vars, ovvero di file preposti a contenere una serie di variabili - specifico di ogni host - 115 | # per condizionare il provisioning ansible sulla base delle caratteristiche del cluster specifico 116 | 117 | # La generazione delle variabili utilizza una serie di VariableProvisioner, uno o più d'uno per ogni gruppo di hosts, configurati durante la 118 | # definizione del cluster. 119 | 120 | context = {} 121 | 122 | #genearazione context (NB. 1 group = 1 gruppo host ansible) 123 | extended_ansible_groups.each do |ansible_group, ansible_group_nodes| 124 | # genero le variabili per il group 125 | provisioners = @ansible_context_vars[ansible_group] 126 | unless provisioners.nil? 127 | 128 | # se necessario, normalizzo provisioner in array provisioners 129 | provisioners = [ provisioners ] if not provisioners.respond_to?('each') 130 | # per tutti i provisioners abbinati al ruolo 131 | provisioners.each do |provisioner| 132 | begin 133 | vars = provisioner.call(context, ansible_group_nodes) 134 | 135 | #TODO: gestire conflitto (n>=2 gruppi che generano la stessa variabile - con valori diversi) 136 | context = context.merge(vars) 137 | rescue Exception => e 138 | raise VagrantPlugins::Compose::Errors::ContextVarExpressionError, :message => e.message, :ansible_group => ansible_group 139 | end 140 | end 141 | end 142 | end 143 | 144 | # cleanup ansible_group_vars files 145 | # TODO: make safe 146 | ansible_group_vars_path = File.join(@ansible_playbook_path, 'group_vars') 147 | 148 | if File.exists?(ansible_group_vars_path) 149 | Dir.foreach(ansible_group_vars_path) {|f| fn = File.join(ansible_group_vars_path, f); File.delete(fn) if f.end_with?(".yml")} 150 | end 151 | 152 | #generazione ansible_group_vars file (NB. 1 group = 1 gruppo host ansible) 153 | extended_ansible_groups.each do |ansible_group, ansible_group_nodes| 154 | ansible_group_vars = {} 155 | # genero le variabili per il group 156 | provisioners = @ansible_group_vars[ansible_group] 157 | unless provisioners.nil? 158 | # se necessario, normalizzo provisioner in array provisioners 159 | provisioners = [ provisioners ] if not provisioners.respond_to?('each') 160 | # per tutti i provisioners abbinati al ruolo 161 | provisioners.each do |provisioner| 162 | begin 163 | vars = provisioner.call(context, ansible_group_nodes) 164 | 165 | #TODO: gestire conflitto (n>=2 gruppi che generano la stessa variabile - con valori diversi) 166 | ansible_group_vars = ansible_group_vars.merge(vars) 167 | rescue Exception => e 168 | raise VagrantPlugins::Compose::Errors::GroupVarExpressionError, :message => e.message, :ansible_group => ansible_group 169 | end 170 | end 171 | end 172 | 173 | # crea il file (se sono state generate delle variabili) 174 | unless ansible_group_vars.empty? 175 | FileUtils.mkdir_p(ansible_group_vars_path) unless File.exists?(ansible_group_vars_path) 176 | 177 | # TODO: make safe 178 | fileName = ansible_group.gsub(':', '_') 179 | File.open(File.join(ansible_group_vars_path,"#{fileName}.yml") , 'w+') do |file| 180 | file.puts YAML::dump(ansible_group_vars) 181 | end 182 | end 183 | end 184 | 185 | # cleanup ansible_host_vars files (NB. 1 nodo = 1 host) 186 | # TODO: make safe 187 | ansible_host_vars_path = File.join(@ansible_playbook_path, 'host_vars') 188 | 189 | if File.exists?(ansible_host_vars_path) 190 | Dir.foreach(ansible_host_vars_path) {|f| fn = File.join(ansible_host_vars_path, f); File.delete(fn) if f.end_with?(".yml")} 191 | end 192 | 193 | #generazione ansible_host_vars file 194 | nodes.each do |node| 195 | # genero le variabili per il nodo; il nodo, può essere abbinato a diversi gruppi 196 | ansible_host_vars = {} 197 | node.ansible_groups.each do |ansible_group| 198 | # genero le variabili per il gruppo 199 | provisioners = @ansible_host_vars[ansible_group] 200 | unless provisioners.nil? 201 | # se necessario, normalizzo provisioner in array provisioners 202 | provisioners = [ provisioners ] if not provisioners.respond_to?('each') 203 | # per tutti i provisioners abbinati al gruppo 204 | provisioners.each do |provisioner| 205 | begin 206 | vars = provisioner.call(context, node) 207 | 208 | #TODO: gestire conflitto (n>=2 gruppi che generano la stessa variabile - con valori diversi) 209 | ansible_host_vars = ansible_host_vars.merge(vars) 210 | rescue Exception => e 211 | raise VagrantPlugins::Compose::Errors::HostVarExpressionError, :message => e.message, :host => node.hostname, :ansible_group => ansible_group 212 | end 213 | end 214 | end 215 | end 216 | 217 | # crea il file (se sono state generate delle variabili) 218 | unless ansible_host_vars.empty? 219 | FileUtils.mkdir_p(ansible_host_vars_path) unless File.exists?(ansible_host_vars_path) 220 | 221 | # TODO: make safe 222 | File.open(File.join(ansible_host_vars_path,"#{node.hostname}.yml") , 'w+') do |file| 223 | file.puts YAML::dump(ansible_host_vars) 224 | end 225 | end 226 | end 227 | 228 | return nodes, ansible_groups_provision 229 | end 230 | end 231 | 232 | end 233 | end 234 | end 235 | -------------------------------------------------------------------------------- /lib/vagrant/compose/programmatic/node_group.rb: -------------------------------------------------------------------------------- 1 | require_relative "../node" 2 | 3 | module VagrantPlugins 4 | module Compose 5 | module Programmatic 6 | 7 | # This class defines a group of nodes, representig a set of vagrant machines with similar characteristics. 8 | # Nodes will be composed by NodeGroup.compose method, according with the configuration of values/value_generator 9 | # of the group of node itself. 10 | class NodeGroup 11 | 12 | # A number identifying the group of nodes withing the cluster. 13 | attr_reader :index 14 | 15 | # The name of the group of nodes 16 | attr_reader :name 17 | 18 | # The number of nodes/instances to be created in the group of nodes. 19 | attr_reader :instances 20 | 21 | # The value/value generator to be used for assigning to each node in this group a vagrant base box to be used for creating vagrant machines implementing nodes in this group. 22 | attr_accessor :box 23 | 24 | # The value/value generator to be used for assigning to each node in this group a box name a.k.a. the name for the machine in VirtualBox/VMware console. 25 | attr_accessor :boxname 26 | 27 | # The value/value generator to be used for assigning to each node in this group a unique hostname 28 | attr_accessor :hostname 29 | 30 | # The value/value generator to be used for assigning to each node in this group a unique list of aliases a.k.a. alternative host names 31 | attr_accessor :aliases 32 | 33 | # The value/value generator to be used for assigning to each node in this groupa unique ip 34 | attr_accessor :ip 35 | 36 | # The value/value generator to be used for assigning to each node in this group cpus 37 | attr_accessor :cpus 38 | 39 | # The value/value generator to be used for assigning to each node in this group memory 40 | attr_accessor :memory 41 | 42 | # The value/value generator to be used for assigning each node in this group to a list of ansible groups 43 | attr_accessor :ansible_groups 44 | 45 | # The value/value generator to be used for assigning a dictionary with custom attributes - Hash(String, obj) - to each node in this group. 46 | attr_accessor :attributes 47 | 48 | def initialize(index, instances, name) 49 | @index = index 50 | @name = name 51 | @instances = instances 52 | end 53 | 54 | # Composes the group of nodes, by creating the required number of nodes 55 | # in accordance with values/value_generators. 56 | # Additionally, some "embedded" trasformation will be applied to attributes (boxname, hostname) and 57 | # some "autogenerated" node properties will be computed (fqdn). 58 | def compose(cluster_name, cluster_domain, cluster_offset) 59 | node_index = 0 60 | while node_index < @instances 61 | box = generate(:box, @box, node_index) 62 | boxname = maybe_prefix(cluster_name, 63 | "#{generate(:boxname, @boxname, node_index)}") 64 | hostname = maybe_prefix(cluster_name, 65 | "#{generate(:hostname, @hostname, node_index)}") 66 | aliases = generate(:aliases, @aliases, node_index).join(',') 67 | fqdn = cluster_domain.empty? ? "#{hostname}" : "#{hostname}.#{cluster_domain}" 68 | ip = generate(:ip, @ip, node_index) 69 | cpus = generate(:cpus, @cpus, node_index) 70 | memory = generate(:memory, @memory, node_index) 71 | ansible_groups = generate(:ansible_groups, @ansible_groups, node_index) 72 | attributes = generate(:attributes, @attributes, node_index) 73 | yield VagrantPlugins::Compose::Node.new(box, boxname, hostname, fqdn, aliases, ip, cpus, memory, ansible_groups, attributes, cluster_offset + node_index, node_index) 74 | 75 | node_index += 1 76 | end 77 | end 78 | 79 | # utility function for concatenating cluster name (if present) to boxname/hostname 80 | def maybe_prefix(cluster_name, name) 81 | if cluster_name && cluster_name.length > 0 82 | "#{cluster_name}-" + name 83 | else 84 | name 85 | end 86 | end 87 | 88 | # utility function for resolving value/value generators 89 | def generate(var, generator, node_index) 90 | unless generator.respond_to? :call 91 | return generator 92 | else 93 | begin 94 | return generator.call(@index, @name, node_index) 95 | rescue Exception => e 96 | raise VagrantPlugins::Compose::Errors::AttributeExpressionError, :message => e.message, :attribute => var, :node_index => node_index, :node_group_name => name 97 | end 98 | end 99 | end 100 | end 101 | 102 | end 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /lib/vagrant/compose/version.rb: -------------------------------------------------------------------------------- 1 | module Vagrant 2 | module Compose 3 | VERSION = "0.7.5" 4 | end 5 | end 6 | -------------------------------------------------------------------------------- /vagrant-compose.gemspec: -------------------------------------------------------------------------------- 1 | $:.unshift File.expand_path("../lib", __FILE__) 2 | require 'vagrant/compose/version' 3 | 4 | Gem::Specification.new do |spec| 5 | spec.name = "vagrant-compose" 6 | spec.version = Vagrant::Compose::VERSION 7 | spec.platform = Gem::Platform::RUBY 8 | spec.license = "MIT" 9 | spec.authors = ["Fabrizio Pandini"] 10 | spec.email = ["fabrizio.pandini@gmail.com"] 11 | spec.homepage = "https://github.com/fabriziopandini/vagrant-compose/" 12 | spec.summary = %q{A Vagrant plugin that helps building complex multi-machine scenarios.} 13 | spec.description = %q{A Vagrant plugin that helps building complex multi-machine scenarios.} 14 | 15 | spec.required_rubygems_version = ">= 2.0.13" 16 | 17 | spec.add_development_dependency "rake", "~> 10.4.2" 18 | spec.add_development_dependency "rspec", "~> 3.4.0" 19 | spec.add_development_dependency "rspec-its", "~> 1.2.0" 20 | spec.add_dependency "open4", "~> 1.3.4" 21 | 22 | # The following block of code determines the files that should be included 23 | # in the gem. It does this by reading all the files in the directory where 24 | # this gemspec is, and parsing out the ignored files from the gitignore. 25 | # Note that the entire gitignore(5) syntax is not supported, specifically 26 | # the "!" syntax, but it should mostly work correctly. 27 | root_path = File.dirname(__FILE__) 28 | all_files = Dir.chdir(root_path) { Dir.glob("**/{*,.*}") } 29 | all_files.reject! { |file| [".", ".."].include?(File.basename(file)) } 30 | gitignore_path = File.join(root_path, ".gitignore") 31 | gitignore = File.readlines(gitignore_path) 32 | gitignore.map! { |line| line.chomp.strip } 33 | gitignore.reject! { |line| line.empty? || line =~ /^(#|!)/ } 34 | 35 | unignored_files = all_files.reject do |file| 36 | # Ignore any directories, the gemspec only cares about files 37 | next true if File.directory?(file) 38 | 39 | # Ignore any paths that match anything in the gitignore. We do 40 | # two tests here: 41 | # 42 | # - First, test to see if the entire path matches the gitignore. 43 | # - Second, match if the basename does, this makes it so that things 44 | # like '.DS_Store' will match sub-directories too (same behavior 45 | # as git). 46 | # 47 | gitignore.any? do |ignore| 48 | File.fnmatch(ignore, file, File::FNM_PATHNAME) || 49 | File.fnmatch(ignore, File.basename(file), File::FNM_PATHNAME) 50 | end 51 | end 52 | 53 | spec.files = unignored_files 54 | spec.executables = unignored_files.map { |f| f[/^bin\/(.*)/, 1] }.compact 55 | spec.require_path = 'lib' 56 | end 57 | --------------------------------------------------------------------------------