├── .gitignore ├── roles ├── software │ ├── nomad │ │ ├── templates │ │ │ ├── nomad.sh │ │ │ └── nomad.hcl │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── consul │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── consul.hcl │ └── docker │ │ ├── handlers │ │ └── main.yml │ │ └── tasks │ │ └── main.yml └── common │ └── firewalld │ └── tasks │ └── main.yml ├── ansible.cfg ├── plays ├── nomad.yml └── group_vars │ └── servers.yml ├── prometheus └── prometheus.yml ├── Vagrantfile ├── nomad ├── node-exporter.hcl ├── cadvisor.hcl └── infrastructure.hcl └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | -------------------------------------------------------------------------------- /roles/software/nomad/templates/nomad.sh: -------------------------------------------------------------------------------- 1 | export NOMAD_ADDR={{ nomad__addr }} 2 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | ansible_managed = Ansible managed: {file} 4 | -------------------------------------------------------------------------------- /roles/software/consul/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul__interface: eth0 3 | consul__server: false 4 | consul__server_ip: 127.0.0.1 5 | -------------------------------------------------------------------------------- /roles/software/nomad/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nomad__server: false 3 | nomad__client: true 4 | nomad__interface: eth0 5 | nomad__addr: http://localhost:4646 6 | -------------------------------------------------------------------------------- /roles/software/consul/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart consul 2 | systemd: 3 | name: consul 4 | enabled: yes 5 | state: restarted 6 | daemon_reload: yes 7 | -------------------------------------------------------------------------------- /roles/software/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | service: 4 | name: docker 5 | enabled: yes 6 | state: restarted 7 | daemon_reload: yes 8 | -------------------------------------------------------------------------------- /roles/software/nomad/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nomad 3 | systemd: 4 | name: nomad 5 | enabled: yes 6 | state: restarted 7 | daemon_reload: yes 8 | -------------------------------------------------------------------------------- /plays/nomad.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: setup nomad server 3 | hosts: servers 4 | become: yes 5 | 6 | roles: 7 | - role: software/docker 8 | - role: software/consul 9 | - role: software/nomad 10 | - role: common/firewalld 11 | -------------------------------------------------------------------------------- /roles/common/firewalld/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: stop firwalld 3 | service: 4 | name: firewalld 5 | enabled: no 6 | state: stopped 7 | when: (ansible_facts['distribution'] == "CentOS" and ansible_facts['distribution_major_version'] == "8") 8 | -------------------------------------------------------------------------------- /prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: 'self' 3 | consul_sd_configs: 4 | - server: 'consul.service.consul:8500' 5 | services: [] 6 | relabel_configs: 7 | - source_labels: [__meta_consul_tags] 8 | regex: .*,metrics,.* 9 | action: keep 10 | - source_labels: [__meta_consul_service] 11 | target_label: job 12 | 13 | -------------------------------------------------------------------------------- /roles/software/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install docker-ce repo 3 | yum_repository: 4 | name: docker-ce 5 | description: docker repository 6 | baseurl: https://download.docker.com/linux/centos/$releasever/$basearch/stable 7 | gpgcheck: yes 8 | gpgkey: https://download.docker.com/linux/centos/gpg 9 | 10 | - name: install docker-ce 11 | yum: 12 | name: "{{ item }}" 13 | state: present 14 | loop: 15 | - docker-ce 16 | - docker-ce-cli 17 | - containerd.io 18 | notify: restart docker 19 | -------------------------------------------------------------------------------- /plays/group_vars/servers.yml: -------------------------------------------------------------------------------- 1 | consul__server: true 2 | nomad__server: true 3 | 4 | nomad__interface: "{% if ansible_virtualization_type == 'virtualbox' %}eth0{% elif ansible_virtualization_type == 'lxc' %}eth0{% endif %}" 5 | consul__interface: "{% if ansible_virtualization_type == 'virtualbox' %}eth0{% elif ansible_virtualization_type == 'lxc' %}eth0{% endif %}" 6 | nomad__addr: "http://{% if ansible_virtualization_type == 'virtualbox' %}{{ ansible_eth0.ipv4.address }}{% elif ansible_virtualization_type == 'lxc' %}{{ ansible_eth0.ipv4.address }}{% endif %}:4646" 7 | -------------------------------------------------------------------------------- /roles/software/consul/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install hashicorp repo 3 | yum_repository: 4 | name: hashicorp 5 | description: hashicorp repository 6 | baseurl: https://rpm.releases.hashicorp.com/RHEL/$releasever/$basearch/stable 7 | gpgcheck: yes 8 | gpgkey: https://rpm.releases.hashicorp.com/gpg 9 | 10 | - name: install consul 11 | yum: 12 | name: consul 13 | state: installed 14 | 15 | - name: configure consul 16 | template: 17 | src: consul.hcl 18 | dest: /etc/consul.d/consul.hcl 19 | mode: 0644 20 | notify: restart consul 21 | -------------------------------------------------------------------------------- /roles/software/nomad/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install hashicorp repo 3 | yum_repository: 4 | name: hashicorp 5 | description: hashicorp repository 6 | baseurl: https://rpm.releases.hashicorp.com/RHEL/$releasever/$basearch/stable 7 | gpgcheck: yes 8 | gpgkey: https://rpm.releases.hashicorp.com/gpg 9 | 10 | - name: install nomad 11 | yum: 12 | name: nomad 13 | state: installed 14 | 15 | - name: configure nomad 16 | template: 17 | src: nomad.hcl 18 | dest: /etc/nomad.d/nomad.hcl 19 | mode: 0644 20 | notify: restart nomad 21 | 22 | - name: configure nomad profile.d 23 | template: 24 | src: nomad.sh 25 | dest: /etc/profile.d/nomad.sh 26 | mode: 0644 27 | -------------------------------------------------------------------------------- /roles/software/nomad/templates/nomad.hcl: -------------------------------------------------------------------------------- 1 | # Full configuration options can be found at https://www.nomadproject.io/docs/configuration 2 | 3 | data_dir = "/opt/nomad/data" 4 | bind_addr = "{% raw %}{{{% endraw %} GetInterfaceIP \"{{ nomad__interface }}\" {% raw %}}}{% endraw %}" 5 | 6 | server { 7 | enabled = {{ nomad__server | bool | lower }} 8 | bootstrap_expect = 1 9 | } 10 | 11 | disable_update_check = true 12 | 13 | telemetry { 14 | collection_interval = "1s" 15 | disable_hostname = true 16 | prometheus_metrics = true 17 | publish_allocation_metrics = true 18 | publish_node_metrics = true 19 | } 20 | 21 | 22 | client { 23 | enabled = {{ nomad__client | bool | lower }} 24 | network_interface = "{{ nomad__interface }}" 25 | } 26 | 27 | plugin "docker" { 28 | config { 29 | allow_caps = [ "ALL" ], 30 | volumes { 31 | enabled = true 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | VAGRANTFILE_API_VERSION = "2" 4 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 5 | 6 | if Vagrant.has_plugin?("vagrant-cachier") 7 | config.cache.scope = :box 8 | config.cache.synced_folder_opts = { 9 | type: :rsync, 10 | } 11 | end 12 | 13 | config.vm.box = "centos/8" 14 | 15 | config.vm.provider :lxc do |lxc, override| 16 | override.vm.box = "visibilityspots/centos-8.x-minimal" 17 | lxc.container_name = :machine 18 | end 19 | 20 | config.vm.provider :virtualbox do |virtualbox, override| 21 | virtualbox.customize ["modifyvm", :id, "--memory", 3072] 22 | end 23 | 24 | config.vm.define "nomad" 25 | config.vm.hostname = "nomad" 26 | 27 | config.vm.network "forwarded_port", guest: 8500, host: 8500 28 | config.vm.network "forwarded_port", guest: 4646, host: 4646 29 | config.vm.network "forwarded_port", guest: 9090, host: 9090 30 | 31 | config.vm.synced_folder "nomad", "/opt/nomad", type: "rsync" 32 | config.vm.synced_folder "prometheus", "/opt/prometheus", type: "rsync" 33 | config.vm.provision "ansible" do |ansible| 34 | ansible.config_file = "ansible.cfg" 35 | ansible.playbook = "plays/nomad.yml" 36 | ansible.groups = { 37 | "servers" => ["nomad"], 38 | } 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /nomad/node-exporter.hcl: -------------------------------------------------------------------------------- 1 | job "node-exporter" { 2 | region = "global" 3 | datacenters = ["dc1"] 4 | type = "service" 5 | 6 | group "app" { 7 | count = 1 8 | 9 | restart { 10 | attempts = 3 11 | delay = "20s" 12 | mode = "delay" 13 | } 14 | 15 | network { 16 | port "node_exporter" { 17 | static = 9100 18 | to = 9100 19 | } 20 | } 21 | 22 | 23 | task "node-exporter" { 24 | driver = "docker" 25 | 26 | config { 27 | image = "prom/node-exporter" 28 | ports = ["node_exporter"] 29 | force_pull = true 30 | volumes = [ 31 | "/proc:/host/proc", 32 | "/sys:/host/sys", 33 | "/:/rootfs" 34 | ] 35 | logging { 36 | type = "journald" 37 | config { 38 | tag = "NODE-EXPORTER" 39 | } 40 | } 41 | 42 | } 43 | 44 | service { 45 | name = "node-exporter" 46 | tags = [ 47 | "metrics" 48 | ] 49 | port = "node_exporter" 50 | 51 | check { 52 | type = "http" 53 | path = "/metrics/" 54 | interval = "10s" 55 | timeout = "2s" 56 | } 57 | } 58 | 59 | resources { 60 | cpu = 50 61 | memory = 100 62 | } 63 | } 64 | } 65 | } 66 | 67 | -------------------------------------------------------------------------------- /nomad/cadvisor.hcl: -------------------------------------------------------------------------------- 1 | job "cadvisor" { 2 | region = "global" 3 | datacenters = ["dc1"] 4 | type = "service" 5 | 6 | group "app" { 7 | count = 1 8 | 9 | restart { 10 | attempts = 3 11 | delay = "20s" 12 | mode = "delay" 13 | } 14 | 15 | network { 16 | port "cadvisor" { 17 | static = 8080 18 | to = 8080 19 | } 20 | } 21 | 22 | 23 | task "cadvisor" { 24 | driver = "docker" 25 | 26 | config { 27 | image = "google/cadvisor" 28 | ports = ["cadvisor"] 29 | force_pull = true 30 | volumes = [ 31 | "/:/rootfs:ro", 32 | "/var/run:/var/run:rw", 33 | "/sys:/sys:ro", 34 | "/var/lib/docker/:/var/lib/docker:ro", 35 | "/cgroup:/cgroup:ro" 36 | ] 37 | logging { 38 | type = "journald" 39 | config { 40 | tag = "CADVISOR" 41 | } 42 | } 43 | } 44 | 45 | service { 46 | address_mode = "driver" 47 | name = "cadvisor" 48 | tags = [ 49 | "metrics" 50 | ] 51 | port = "cadvisor" 52 | 53 | check { 54 | type = "http" 55 | path = "/metrics/" 56 | interval = "10s" 57 | timeout = "2s" 58 | } 59 | } 60 | 61 | resources { 62 | cpu = 50 63 | memory = 100 64 | } 65 | } 66 | } 67 | } 68 | 69 | -------------------------------------------------------------------------------- /nomad/infrastructure.hcl: -------------------------------------------------------------------------------- 1 | job "infrastructure" { 2 | region = "global" 3 | datacenters = ["dc1"] 4 | type = "service" 5 | 6 | group "infra" { 7 | count = 1 8 | 9 | restart { 10 | attempts = 3 11 | delay = "20s" 12 | mode = "delay" 13 | } 14 | 15 | network { 16 | port "dns" { 17 | static = 53 18 | to = 53 19 | } 20 | port "prometheus" { 21 | static = 9090 22 | to = 9090 23 | } 24 | port "node_exporter" { 25 | static = 9100 26 | to = 9100 27 | } 28 | port "cadvisor" { 29 | static = 8080 30 | to = 8080 31 | } 32 | } 33 | 34 | task "dnsmasq" { 35 | driver = "docker" 36 | 37 | config { 38 | image = "andyshinn/dnsmasq" 39 | force_pull = true 40 | ports = ["dns"] 41 | args = [ 42 | "-S", "/consul/${NOMAD_IP_dns}#8600" 43 | ] 44 | cap_add = [ 45 | "NET_ADMIN", 46 | ] 47 | logging { 48 | type = "journald" 49 | config { 50 | tag = "DNSMASQ" 51 | } 52 | } 53 | } 54 | 55 | service { 56 | name = "dnsmasq" 57 | port = "dns" 58 | address_mode = "driver" 59 | 60 | check { 61 | type = "tcp" 62 | port = "dns" 63 | interval = "10s" 64 | timeout = "2s" 65 | } 66 | } 67 | 68 | resources { 69 | cpu = 50 70 | memory = 100 71 | } 72 | } 73 | 74 | task "prometheus" { 75 | driver = "docker" 76 | 77 | config { 78 | image = "prom/prometheus" 79 | force_pull = true 80 | network_mode = "host" 81 | dns_servers = ["${NOMAD_IP_dns}"] 82 | volumes = [ 83 | "/opt/prometheus/:/etc/prometheus/" 84 | ] 85 | args = [ 86 | "--config.file=/etc/prometheus/prometheus.yml", 87 | "--storage.tsdb.path=/prometheus", 88 | "--web.console.libraries=/usr/share/prometheus/console_libraries", 89 | "--web.console.templates=/usr/share/prometheus/consoles", 90 | "--web.enable-admin-api" 91 | ] 92 | logging { 93 | type = "journald" 94 | config { 95 | tag = "PROMETHEUS" 96 | } 97 | } 98 | } 99 | 100 | service { 101 | name = "prometheus" 102 | address_mode = "driver" 103 | tags = [ 104 | "metrics" 105 | ] 106 | port = "prometheus" 107 | 108 | check { 109 | type = "http" 110 | path = "/targets" 111 | interval = "10s" 112 | timeout = "2s" 113 | } 114 | } 115 | 116 | resources { 117 | cpu = 50 118 | memory = 100 119 | } 120 | } 121 | 122 | task "cadvisor" { 123 | driver = "docker" 124 | 125 | config { 126 | image = "google/cadvisor" 127 | force_pull = true 128 | dns_servers = ["${NOMAD_IP_dns}"] 129 | volumes = [ 130 | "/:/rootfs:ro", 131 | "/var/run:/var/run:rw", 132 | "/sys:/sys:ro", 133 | "/var/lib/docker/:/var/lib/docker:ro", 134 | "/cgroup:/cgroup:ro" 135 | ] 136 | ports = ["cadvisor"] 137 | logging { 138 | type = "journald" 139 | config { 140 | tag = "CADVISOR" 141 | } 142 | } 143 | } 144 | 145 | service { 146 | address_mode = "driver" 147 | name = "cadvisor" 148 | tags = [ 149 | "metrics" 150 | ] 151 | 152 | port = "cadvisor" 153 | 154 | check { 155 | type = "http" 156 | path = "/metrics/" 157 | interval = "10s" 158 | timeout = "2s" 159 | } 160 | } 161 | 162 | resources { 163 | cpu = 50 164 | memory = 100 165 | } 166 | } 167 | 168 | task "node-exporter" { 169 | driver = "docker" 170 | 171 | config { 172 | image = "prom/node-exporter" 173 | force_pull = true 174 | ports = ["node_exporter"] 175 | volumes = [ 176 | "/proc:/host/proc", 177 | "/sys:/host/sys", 178 | "/:/rootfs" 179 | ] 180 | logging { 181 | type = "journald" 182 | config { 183 | tag = "NODE-EXPORTER" 184 | } 185 | } 186 | 187 | } 188 | 189 | service { 190 | name = "node-exporter" 191 | address_mode = "driver" 192 | tags = [ 193 | "metrics" 194 | ] 195 | 196 | port = "node_exporter" 197 | 198 | check { 199 | type = "http" 200 | path = "/metrics/" 201 | interval = "10s" 202 | timeout = "2s" 203 | } 204 | } 205 | 206 | resources { 207 | cpu = 50 208 | memory = 100 209 | } 210 | } 211 | } 212 | } 213 | 214 | -------------------------------------------------------------------------------- /roles/software/consul/templates/consul.hcl: -------------------------------------------------------------------------------- 1 | # Full configuration options can be found at https://www.consul.io/docs/agent/options.html 2 | 3 | # datacenter 4 | # This flag controls the datacenter in which the agent is running. If not provided, 5 | # it defaults to "dc1". Consul has first-class support for multiple datacenters, but 6 | # it relies on proper configuration. Nodes in the same datacenter should be on a 7 | # single LAN. 8 | #datacenter = "dc1" 9 | 10 | # data_dir 11 | # This flag provides a data directory for the agent to store state. This is required 12 | # for all agents. The directory should be durable across reboots. This is especially 13 | # critical for agents that are running in server mode as they must be able to persist 14 | # cluster state. Additionally, the directory must support the use of filesystem 15 | # locking, meaning some types of mounted folders (e.g. VirtualBox shared folders) may 16 | # not be suitable. 17 | data_dir = "/opt/consul" 18 | 19 | # client_addr 20 | # The address to which Consul will bind client interfaces, including the HTTP and DNS 21 | # servers. By default, this is "127.0.0.1", allowing only loopback connections. In 22 | # Consul 1.0 and later this can be set to a space-separated list of addresses to bind 23 | # to, or a go-sockaddr template that can potentially resolve to multiple addresses. 24 | client_addr = "0.0.0.0" 25 | 26 | # ui 27 | # Enables the built-in web UI server and the required HTTP routes. This eliminates 28 | # the need to maintain the Consul web UI files separately from the binary. 29 | ui = {{ consul__server | bool | lower }} 30 | 31 | # server 32 | # This flag is used to control if an agent is in server or client mode. When provided, 33 | # an agent will act as a Consul server. Each Consul cluster must have at least one 34 | # server and ideally no more than 5 per datacenter. All servers participate in the Raft 35 | # consensus algorithm to ensure that transactions occur in a consistent, linearizable 36 | # manner. Transactions modify cluster state, which is maintained on all server nodes to 37 | # ensure availability in the case of node failure. Server nodes also participate in a 38 | # WAN gossip pool with server nodes in other datacenters. Servers act as gateways to 39 | # other datacenters and forward traffic as appropriate. 40 | server = {{ consul__server | bool | lower }} 41 | 42 | # bootstrap_expect 43 | # This flag provides the number of expected servers in the datacenter. Either this value 44 | # should not be provided or the value must agree with other servers in the cluster. When 45 | # provided, Consul waits until the specified number of servers are available and then 46 | # bootstraps the cluster. This allows an initial leader to be elected automatically. 47 | # This cannot be used in conjunction with the legacy -bootstrap flag. This flag requires 48 | # -server mode. 49 | {% if consul__server %} 50 | bootstrap_expect = {{ groups['servers'] | length }} 51 | {% endif %} 52 | 53 | # encrypt 54 | # Specifies the secret key to use for encryption of Consul network traffic. This key must 55 | # be 32-bytes that are Base64-encoded. The easiest way to create an encryption key is to 56 | # use consul keygen. All nodes within a cluster must share the same encryption key to 57 | # communicate. The provided key is automatically persisted to the data directory and loaded 58 | # automatically whenever the agent is restarted. This means that to encrypt Consul's gossip 59 | # protocol, this option only needs to be provided once on each agent's initial startup 60 | # sequence. If it is provided after Consul has been initialized with an encryption key, 61 | # then the provided key is ignored and a warning will be displayed. 62 | #encrypt = 63 | 64 | # retry_join 65 | # Similar to -join but allows retrying a join until it is successful. Once it joins 66 | # successfully to a member in a list of members it will never attempt to join again. 67 | # Agents will then solely maintain their membership via gossip. This is useful for 68 | # cases where you know the address will eventually be available. This option can be 69 | # specified multiple times to specify multiple agents to join. The value can contain 70 | # IPv4, IPv6, or DNS addresses. In Consul 1.1.0 and later this can be set to a go-sockaddr 71 | # template. If Consul is running on the non-default Serf LAN port, this must be specified 72 | # as well. IPv6 must use the "bracketed" syntax. If multiple values are given, they are 73 | # tried and retried in the order listed until the first succeeds. Here are some examples: 74 | #retry_join = ["consul.domain.internal"] 75 | #retry_join = ["10.0.4.67"] 76 | #retry_join = ["[::1]:8301"] 77 | #retry_join = ["consul.domain.internal", "10.0.4.67"] 78 | # Cloud Auto-join examples: 79 | # More details - https://www.consul.io/docs/agent/cloud-auto-join 80 | #retry_join = ["provider=aws tag_key=... tag_value=..."] 81 | #retry_join = ["provider=azure tag_name=... tag_value=... tenant_id=... client_id=... subscription_id=... secret_access_key=..."] 82 | #retry_join = ["provider=gce project_name=... tag_value=..."] 83 | {% if not consul__server %} 84 | retry_join = ["{{ consul__server_ip }}"] 85 | {% endif %} 86 | bind_addr = "{% raw %}{{{% endraw %} GetInterfaceIP \"{{ consul__interface }}\" {% raw %}}}{% endraw %}" 87 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nomad local development 2 | 3 | The aim of this project is to provide a development environment like docker-compose but without a vendor locking on docker based on [consul](https://www.consul.io/) by using [nomad](https://www.nomadproject.io) 4 | 5 | bring up the environment by using [vagrant](https://www.vagrantup.com) which will bring up a centos 7 virtualbox machine or lxc container. It will use the intialize.sh bash script to install both nomad and consul and also start a nomad job for a prometheus setup. 6 | 7 | The proved working vagrant providers used on an [ArchLinux](https://www.archlinux.org/) system are 8 | * [vagrant-lxc](https://github.com/fgrehm/vagrant-lxc) 9 | * [vagrant-libvirt](https://github.com/vagrant-libvirt/) 10 | * [virtualbox](https://www.virtualbox.org/) 11 | 12 | ```bash 13 | $ vagrant up --provider lxc 14 | OR 15 | $ vagrant up --provider libvirt 16 | OR 17 | $ vagrant up --provider virtualbox 18 | ``` 19 | 20 | Once it is finished, you should be able to connect to the vagrant environment through SSH and interact with Nomad to start a development environment: 21 | 22 | ```bash 23 | $ vagrant ssh 24 | [vagrant@nomad ~]$ nomad run /opt/nomad/infrastructure.hcl 25 | ==> Monitoring evaluation "ad6e7c2e" 26 | Evaluation triggered by job "infrastructure" 27 | Allocation "60f6c5ec" created: node "4b966e54", group "infra" 28 | Evaluation status changed: "pending" -> "complete" 29 | ==> Evaluation "ad6e7c2e" finished with status "complete" 30 | ``` 31 | 32 | get the nomad state 33 | 34 | ``` 35 | [vagrant@nomad ~]$ nomad status infrastructure 36 | ID = infrastructure 37 | Name = infrastructure 38 | Submit Date = 2019-04-17T20:03:23Z 39 | Type = service 40 | Priority = 50 41 | Datacenters = dc1 42 | Status = running 43 | Periodic = false 44 | Parameterized = false 45 | 46 | Summary 47 | Task Group Queued Starting Running Failed Complete Lost 48 | infra 0 0 1 0 0 0 49 | 50 | Allocations 51 | ID Node ID Task Group Version Desired Status Created Modified 52 | 60f6c5ec 4b966e54 infra 0 run running 55s ago 32s ago 53 | ``` 54 | 55 | get the allocation state 56 | 57 | ``` 58 | [vagrant@nomad ~]$ nomad alloc-status 60f6c5ec 59 | ID = 60f6c5ec 60 | Eval ID = ad6e7c2e 61 | Name = infrastructure.infra[0] 62 | Node ID = 4b966e54 63 | Job ID = infrastructure 64 | Job Version = 0 65 | Client Status = running 66 | Client Description = Tasks are running 67 | Desired Status = run 68 | Desired Description = 69 | Created = 1m16s ago 70 | Modified = 53s ago 71 | 72 | Task "cadvisor" is "running" 73 | Task Resources 74 | CPU Memory Disk Addresses 75 | 44/50 MHz 23 MiB/100 MiB 300 MiB http: 10.0.3.185:8080 76 | 77 | Task Events: 78 | Started At = 2019-04-17T20:03:26Z 79 | Finished At = N/A 80 | Total Restarts = 0 81 | Last Restart = N/A 82 | 83 | Recent Events: 84 | Time Type Description 85 | 2019-04-17T20:03:26Z Started Task started by client 86 | 2019-04-17T20:03:23Z Driver Downloading image 87 | 2019-04-17T20:03:23Z Task Setup Building Task Directory 88 | 2019-04-17T20:03:23Z Received Task received by client 89 | 90 | Task "consul" is "running" 91 | Task Resources 92 | CPU Memory Disk Addresses 93 | 97/100 MHz 16 MiB/300 MiB 300 MiB consul_dns: 10.0.3.185:8600 94 | consul: 10.0.3.185:8500 95 | 96 | Task Events: 97 | Started At = 2019-04-17T20:03:26Z 98 | Finished At = N/A 99 | Total Restarts = 0 100 | Last Restart = N/A 101 | 102 | Recent Events: 103 | Time Type Description 104 | 2019-04-17T20:03:26Z Started Task started by client 105 | 2019-04-17T20:03:23Z Driver Downloading image 106 | 2019-04-17T20:03:23Z Task Setup Building Task Directory 107 | 2019-04-17T20:03:23Z Received Task received by client 108 | 109 | Task "dnsmasq" is "running" 110 | Task Resources 111 | CPU Memory Disk Addresses 112 | 4/50 MHz 0 B/100 MiB 300 MiB dns: 10.0.3.185:53 113 | 114 | Task Events: 115 | Started At = 2019-04-17T20:03:26Z 116 | Finished At = N/A 117 | Total Restarts = 0 118 | Last Restart = N/A 119 | 120 | Recent Events: 121 | Time Type Description 122 | 2019-04-17T20:03:26Z Started Task started by client 123 | 2019-04-17T20:03:23Z Driver Downloading image 124 | 2019-04-17T20:03:23Z Task Setup Building Task Directory 125 | 2019-04-17T20:03:23Z Received Task received by client 126 | 127 | Task "node-exporter" is "running" 128 | Task Resources 129 | CPU Memory Disk Addresses 130 | 0/50 MHz 4.4 MiB/100 MiB 300 MiB http: 10.0.3.185:9100 131 | 132 | Task Events: 133 | Started At = 2019-04-17T20:03:26Z 134 | Finished At = N/A 135 | Total Restarts = 0 136 | Last Restart = N/A 137 | 138 | Recent Events: 139 | Time Type Description 140 | 2019-04-17T20:03:26Z Started Task started by client 141 | 2019-04-17T20:03:23Z Driver Downloading image 142 | 2019-04-17T20:03:23Z Task Setup Building Task Directory 143 | 2019-04-17T20:03:23Z Received Task received by client 144 | 145 | Task "prometheus" is "running" 146 | Task Resources 147 | CPU Memory Disk Addresses 148 | 0/50 MHz 21 MiB/100 MiB 300 MiB http: 10.0.3.185:9090 149 | 150 | Task Events: 151 | Started At = 2019-04-17T20:03:25Z 152 | Finished At = N/A 153 | Total Restarts = 0 154 | Last Restart = N/A 155 | 156 | Recent Events: 157 | Time Type Description 158 | 2019-04-17T20:03:25Z Started Task started by client 159 | 2019-04-17T20:03:23Z Driver Downloading image 160 | 2019-04-17T20:03:23Z Task Setup Building Task Directory 161 | 2019-04-17T20:03:23Z Received Task received by client 162 | ``` 163 | 164 | As you could have seen the allocation has been splitted by tasks. This can be configured in the nomad/infrastructure.hcl file. 165 | 166 | The consul container is used to register the different services and the dnsmasq container will be used as a dns forwarder towards the consul dns interface so the services are resolvable between each other. 167 | --------------------------------------------------------------------------------