├── .gitignore
├── LICENSE
├── README.md
├── application-deployment
├── README.md
├── consul-connect-with-nomad
│ ├── .gitignore
│ ├── ConsulIntention.png
│ ├── ConsulServices.png
│ ├── NomadUI.png
│ ├── README.md
│ ├── aws
│ │ ├── main.tf
│ │ ├── modules
│ │ │ └── nomadconsul
│ │ │ │ └── nomadconsul.tf
│ │ ├── outputs.tf
│ │ ├── packer
│ │ │ ├── README.md
│ │ │ └── packer.json
│ │ ├── terraform.tfvars.example
│ │ ├── user-data-client.sh
│ │ ├── user-data-server.sh
│ │ ├── variables.tf
│ │ └── vpc.tf
│ └── shared
│ │ ├── config
│ │ ├── consul.json
│ │ ├── consul_client.json
│ │ ├── consul_upstart.conf
│ │ ├── nomad.hcl
│ │ ├── nomad_client.hcl
│ │ └── nomad_upstart.conf
│ │ ├── jobs
│ │ └── catalogue-with-connect.nomad
│ │ └── scripts
│ │ ├── client.sh
│ │ ├── run-proxy.sh
│ │ ├── server.sh
│ │ └── setup.sh
├── fabio
│ ├── README.md
│ └── fabio.nomad
├── go-blue-green
│ ├── README.md
│ └── go-app.nomad
├── go-vault-dynamic-mysql-creds
│ ├── README.md
│ ├── application.nomad
│ ├── golang_vault_setup.sh
│ └── policy-mysql.hcl
├── haproxy
│ ├── README.md
│ └── haproxy.nomad
├── http-echo
│ └── http-echo.nomad
├── jenkins
│ └── jenkins-java.nomad
├── microservices
│ ├── ConsulUI.png
│ ├── LICENSE
│ ├── NomadUI.png
│ ├── README.md
│ ├── SockShopApp.png
│ ├── aws
│ │ ├── delay-vault-aws
│ │ ├── main.tf
│ │ ├── modules
│ │ │ └── nomadconsul
│ │ │ │ └── nomadconsul.tf
│ │ ├── outputs.tf
│ │ ├── packer
│ │ │ └── packer.json
│ │ ├── terraform.tfvars.example
│ │ ├── user-data-client.sh
│ │ ├── user-data-server.sh
│ │ ├── variables.tf
│ │ └── vpc.tf
│ ├── shared
│ │ ├── config
│ │ │ ├── consul.json
│ │ │ ├── consul_client.json
│ │ │ ├── consul_upstart.conf
│ │ │ ├── nomad.hcl
│ │ │ ├── nomad_client.hcl
│ │ │ └── nomad_upstart.conf
│ │ ├── jobs
│ │ │ ├── sockshop.nomad
│ │ │ └── sockshopui.nomad
│ │ └── scripts
│ │ │ ├── client.sh
│ │ │ ├── server.sh
│ │ │ └── setup.sh
│ ├── slides
│ │ └── HashiCorpMicroservicesDemo.pptx
│ └── vault
│ │ ├── aws-policy.json
│ │ ├── nomad-cluster-role.json
│ │ ├── nomad-server-policy.hcl
│ │ ├── setup_vault.sh
│ │ └── sockshop-read.hcl
├── nginx-vault-kv
│ ├── README.md
│ ├── kv_vault_setup.sh
│ ├── nginx-kv-secret.nomad
│ └── test.policy
├── nginx-vault-pki
│ ├── README.md
│ ├── nginx-pki-secret.nomad
│ ├── pki_vault_setup.sh
│ └── policy-superuser.hcl
├── nginx
│ ├── README.md
│ ├── kv_consul_setup.sh
│ └── nginx-consul.nomad
├── redis
│ ├── README.md
│ └── redis.nomad
└── vault
│ └── vault_exec.hcl
├── assets
├── Consul_GUI_redis.png
├── Fabio_GUI_empty.png
├── Fabio_GUI_goapp.png
├── Nginx_Consul.png
├── Nginx_PKI.png
├── NomadLogo.png
├── Nomad_GUI_redis.png
├── Vault_GUI_leases.png
├── Vault_GUI_main.png
├── Vault_GUI_mysql.png
├── go-app-v1.png
└── go-app-v2.png
├── multi-cloud
└── README.md
├── operations
├── README.md
├── multi-job-demo
│ ├── README.md
│ ├── aws
│ │ ├── acls
│ │ │ ├── anonymous.hcl
│ │ │ ├── dev.hcl
│ │ │ └── qa.hcl
│ │ ├── bootstrap_token
│ │ ├── get_bootstrap_token.sh
│ │ ├── main.tf
│ │ ├── modules
│ │ │ ├── network
│ │ │ │ ├── outputs.tf
│ │ │ │ ├── variables.tf
│ │ │ │ └── vpc.tf
│ │ │ └── nomadconsul
│ │ │ │ ├── nomadconsul.tf
│ │ │ │ ├── outputs.tf
│ │ │ │ ├── scripts
│ │ │ │ ├── user-data-client.sh
│ │ │ │ └── user-data-server.sh
│ │ │ │ └── variables.tf
│ │ ├── outputs.tf
│ │ ├── packer
│ │ │ ├── README.md
│ │ │ └── packer.json
│ │ ├── sentinel
│ │ │ ├── allow-docker-and-java-drivers.sentinel
│ │ │ ├── prevent-docker-host-network.sentinel
│ │ │ └── restrict-docker-images.sentinel
│ │ ├── stop_all_jobs.sh
│ │ ├── terraform.tfvars.example
│ │ └── variables.tf
│ └── shared
│ │ ├── config
│ │ ├── consul.json
│ │ ├── consul_client.json
│ │ ├── consul_upstart.conf
│ │ ├── nomad.hcl
│ │ ├── nomad_client.hcl
│ │ └── nomad_upstart.conf
│ │ ├── jobs
│ │ ├── catalogue.nomad
│ │ ├── sleep.nomad
│ │ ├── webserver-test.nomad
│ │ ├── website-dev.nomad
│ │ └── website-qa.nomad
│ │ └── scripts
│ │ ├── client.sh
│ │ ├── server.sh
│ │ └── setup.sh
├── nomad-vault
│ └── README.md
├── provision-nomad
│ ├── README.md
│ ├── best-practices
│ │ └── terraform-aws
│ │ │ ├── README.md
│ │ │ ├── gitignore.tf
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── terraform.auto.tfvars
│ │ │ └── variables.tf
│ ├── dev
│ │ ├── terraform-aws
│ │ │ ├── README.md
│ │ │ ├── gitignore.tf
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── terraform.auto.tfvars
│ │ │ └── variables.tf
│ │ └── vagrant-local
│ │ │ ├── README.md
│ │ │ ├── Vagrantfile
│ │ │ └── enterprise-binaries
│ │ │ └── README.md
│ ├── quick-start
│ │ └── terraform-aws
│ │ │ ├── README.md
│ │ │ ├── gitignore.tf
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ ├── terraform.auto.tfvars
│ │ │ └── variables.tf
│ └── templates
│ │ ├── best-practices-bastion-systemd.sh.tpl
│ │ ├── best-practices-consul-systemd.sh.tpl
│ │ ├── best-practices-nomad-client-systemd.sh.tpl
│ │ ├── best-practices-nomad-server-systemd.sh.tpl
│ │ ├── best-practices-vault-systemd.sh.tpl
│ │ ├── install-base.sh.tpl
│ │ ├── install-consul-systemd.sh.tpl
│ │ ├── install-docker.sh.tpl
│ │ ├── install-java.sh.tpl
│ │ ├── install-nomad-systemd.sh.tpl
│ │ ├── install-vault-systemd.sh.tpl
│ │ ├── quick-start-bastion-systemd.sh.tpl
│ │ ├── quick-start-consul-systemd.sh.tpl
│ │ ├── quick-start-nomad-client-systemd.sh.tpl
│ │ ├── quick-start-nomad-server-systemd.sh.tpl
│ │ └── quick-start-vault-systemd.sh.tpl
└── sentinel
│ ├── README.md
│ ├── jobs
│ ├── batch.nomad
│ ├── docs.nomad
│ ├── example.nomad
│ └── example_two_groups.nomad
│ └── sentinel_policies
│ ├── all_drivers_docker.sentinel
│ ├── allow-docker-and-java-drivers.sentinel
│ ├── bind-namespaces-to-clients.sentinel
│ ├── enforce_multi_dc.sentinel
│ ├── policy_per_namespace.sentinel
│ ├── prevent-docker-host-network.sentinel
│ ├── require-docker-digests.sentinel
│ ├── resource_check.sentinel
│ ├── restrict-docker-images-and-prevent-latest-tag.sentinel
│ ├── restrict_batch_deploy_time.sentinel
│ ├── restrict_docker_images.sentinel
│ └── restrict_namespace_to_dc.sentinel
├── provision
└── vagrant
│ ├── README.md
│ ├── Vagrantfile
│ ├── vault_init_and_unseal.sh
│ └── vault_nomad_integration.sh
└── workload-flexibility
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled files
2 | *.tfstate
3 | *.tfstate.backup
4 | *.tfstate.lock.info
5 |
6 | # Directories
7 | .terraform/
8 | .vagrant/
9 |
10 | # Ignored Terraform files
11 | *gitignore*.tf
12 |
13 | .vagrant
14 | nomad
15 | vault
16 | consul
17 | *.pem
18 | .DS_Store
19 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ----
2 | - Website: https://www.nomadproject.io
3 | - GitHub repository: https://github.com/hashicorp/nomad
4 | - IRC: `#nomad-tool` on Freenode
5 | - Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce)
6 | - Discussion list: [Google Groups](https://groups.google.com/group/nomad-tool)
7 | - Resources: https://www.nomadproject.io/resources.html
8 |
9 |
10 |
11 | ----
12 |
13 | # Nomad-Guides
14 | Example usage of HashiCorp Nomad (Work In Progress)
15 |
16 | ## provision
17 | This area will contain instructions to provision Nomad and Consul as a first step to start using these tools.
18 |
19 | These may include use cases installing Nomad in cloud services via Terraform, or within virtual environments using Vagrant, or running Nomad in a local development mode.
20 |
21 | ## application-deployment
22 | This area will contain instructions and gudies for deploying applications on Nomad. This area contains examples and guides for deploying secrets (from Vault) into your Nomad applications.
23 |
24 | ## operations
25 | This area will contain instructions for operating Nomad. This includes topics such as configuring Sentinel policies, namespaces, ACLs etc.
26 |
27 | ## `gitignore.tf` Files
28 |
29 | You may notice some [`gitignore.tf`](operations/provision-consul/best-practices/terraform-aws/gitignore.tf) files in certain directories. `.tf` files that contain the word "gitignore" are ignored by git in the [`.gitignore`](./.gitignore) file.
30 |
31 | If you have local Terraform configuration that you want ignored (like Terraform backend configuration), create a new file in the directory (separate from `gitignore.tf`) that contains the word "gitignore" (e.g. `backend.gitignore.tf`) and it won't be picked up as a change.
32 |
33 | ### Contributing
34 | We welcome contributions and feedback! For guide submissions, please see [the contributions guide](CONTRIBUTING.md)
35 |
--------------------------------------------------------------------------------
/application-deployment/README.md:
--------------------------------------------------------------------------------
1 | # To be implemented
2 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/.gitignore:
--------------------------------------------------------------------------------
1 | *~
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/ConsulIntention.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/nomad-guides/cdda5a0ebaaa2c009783c24e98817622d9b7593a/application-deployment/consul-connect-with-nomad/ConsulIntention.png
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/ConsulServices.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/nomad-guides/cdda5a0ebaaa2c009783c24e98817622d9b7593a/application-deployment/consul-connect-with-nomad/ConsulServices.png
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/NomadUI.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/nomad-guides/cdda5a0ebaaa2c009783c24e98817622d9b7593a/application-deployment/consul-connect-with-nomad/NomadUI.png
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/aws/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.11.10"
3 | }
4 |
5 | provider "aws" {
6 | region = "${var.region}"
7 | }
8 |
9 | module "nomadconsul" {
10 | source = "./modules/nomadconsul"
11 |
12 | region = "${var.region}"
13 | ami = "${var.ami}"
14 | vpc_id = "${aws_vpc.catalogue.id}"
15 | subnet_id = "${aws_subnet.public-subnet.id}"
16 | server_instance_type = "${var.server_instance_type}"
17 | client_instance_type = "${var.client_instance_type}"
18 | key_name = "${var.key_name}"
19 | server_count = "${var.server_count}"
20 | client_count = "${var.client_count}"
21 | name_tag_prefix = "${var.name_tag_prefix}"
22 | cluster_tag_value = "${var.cluster_tag_value}"
23 | owner = "${var.owner}"
24 | ttl = "${var.ttl}"
25 | }
26 |
27 | #resource "null_resource" "start_catalogue" {
28 | # provisioner "remote-exec" {
29 | # inline = [
30 | # "sleep 180",
31 | # "nomad job run -address=http://${module.nomadconsul.primary_server_private_ips[0]}:4646 /home/ubuntu/catalogue-with-connect.nomad",
32 | # ]
33 |
34 | # connection {
35 | # host = "${module.nomadconsul.primary_server_public_ips[0]}"
36 | # type = "ssh"
37 | # agent = false
38 | # user = "ubuntu"
39 | # private_key = "${var.private_key_data}"
40 | # }
41 | # }
42 | #}
43 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/aws/outputs.tf:
--------------------------------------------------------------------------------
1 | output "IP_Addresses" {
2 | sensitive = true
3 | value = < >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
6 | sudo bash /ops/shared/scripts/client.sh "${region}" "${cluster_tag_value}" "${server_ip}"
7 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/aws/user-data-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
6 | sudo bash /ops/shared/scripts/server.sh "${server_count}" "${region}" "${cluster_tag_value}"
7 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/aws/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "The AWS region to deploy to."
3 | default = "us-east-1"
4 | }
5 |
6 | variable "ami" {
7 | description = "AMI ID"
8 | default = "ami-01d821506cee7b2c4"
9 | }
10 |
11 | variable "vpc_cidr" {
12 | description = "VPC CIDR"
13 | default = "10.0.0.0/16"
14 | }
15 |
16 | variable "subnet_cidr" {
17 | description = "Subnet CIDR"
18 | default = "10.0.1.0/24"
19 | }
20 |
21 | variable "subnet_az" {
22 | description = "The AZ for the public subnet"
23 | default = "us-east-1a"
24 | }
25 |
26 | variable "server_instance_type" {
27 | description = "The AWS instance type to use for servers."
28 | default = "t2.medium"
29 | }
30 |
31 | variable "client_instance_type" {
32 | description = "The AWS instance type to use for clients."
33 | default = "t2.medium"
34 | }
35 |
36 | variable "key_name" {
37 | description = "name of pre-existing SSH key to be used for provisioner auth"
38 | }
39 |
40 | variable "private_key_data" {
41 | description = "contents of the private key"
42 | }
43 |
44 | variable "server_count" {
45 | description = "The number of servers to provision."
46 | default = "1"
47 | }
48 |
49 | variable "client_count" {
50 | description = "The number of clients to provision."
51 | default = "2"
52 | }
53 |
54 | variable "name_tag_prefix" {
55 | description = "prefixed to Name tag added to EC2 instances and other AWS resources"
56 | default = "nomad-consul"
57 | }
58 |
59 | variable "cluster_tag_value" {
60 | description = "Used by Consul to automatically form a cluster."
61 | default = "nomad-consul-demo"
62 | }
63 |
64 | variable "owner" {
65 | description = "Adds owner tag to EC2 instances"
66 | default = ""
67 | }
68 |
69 | variable "ttl" {
70 | description = "Adds TTL tag to EC2 instances for reaping purposes. Reaping is only done for instances deployed by HashiCorp SEs. In any case, -1 means no reaping."
71 | default = "-1"
72 | }
73 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/aws/vpc.tf:
--------------------------------------------------------------------------------
1 | # Define the VPC.
2 | resource "aws_vpc" "catalogue" {
3 | cidr_block = "${var.vpc_cidr}"
4 | enable_dns_hostnames = true
5 |
6 | tags={
7 | Name = "${var.name_tag_prefix} VPC"
8 | }
9 | }
10 |
11 | # Create an Internet Gateway for the VPC.
12 | resource "aws_internet_gateway" "catalogue" {
13 | vpc_id = "${aws_vpc.catalogue.id}"
14 |
15 | tags={
16 | Name = "${var.name_tag_prefix} IGW"
17 | }
18 | }
19 |
20 | # Create a public subnet.
21 | resource "aws_subnet" "public-subnet" {
22 | vpc_id = "${aws_vpc.catalogue.id}"
23 | cidr_block = "${var.subnet_cidr}"
24 | availability_zone = "${var.subnet_az}"
25 | map_public_ip_on_launch = true
26 | depends_on = ["aws_internet_gateway.catalogue"]
27 |
28 | tags={
29 | Name = "${var.name_tag_prefix} Public Subnet"
30 | }
31 | }
32 |
33 | # Create a route table allowing all addresses access to the IGW.
34 | resource "aws_route_table" "public" {
35 | vpc_id = "${aws_vpc.catalogue.id}"
36 |
37 | route {
38 | cidr_block = "0.0.0.0/0"
39 | gateway_id = "${aws_internet_gateway.catalogue.id}"
40 | }
41 |
42 | tags={
43 | Name = "${var.name_tag_prefix} Public Route Table"
44 | }
45 | }
46 |
47 | # Now associate the route table with the public subnet
48 | # giving all public subnet instances access to the internet.
49 | resource "aws_route_table_association" "public-subnet" {
50 | subnet_id = "${aws_subnet.public-subnet.id}"
51 | route_table_id = "${aws_route_table.public.id}"
52 | }
53 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/config/consul.json:
--------------------------------------------------------------------------------
1 | {
2 | "log_level": "INFO",
3 | "server": true,
4 | "ui": true,
5 | "data_dir": "/opt/consul/data",
6 | "bind_addr": "0.0.0.0",
7 | "client_addr": "0.0.0.0",
8 | "advertise_addr": "IP_ADDRESS",
9 | "recursors": ["10.0.0.2"],
10 | "bootstrap_expect": SERVER_COUNT,
11 | "service": {
12 | "name": "consul"
13 | },
14 | "connect": {
15 | "enabled": true
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/config/consul_client.json:
--------------------------------------------------------------------------------
1 | {
2 | "ui": true,
3 | "log_level": "INFO",
4 | "data_dir": "/opt/consul/data",
5 | "bind_addr": "0.0.0.0",
6 | "client_addr": "0.0.0.0",
7 | "advertise_addr": "IP_ADDRESS",
8 | "recursors": ["10.0.0.2"],
9 | "connect": {
10 | "enabled": true
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/config/consul_upstart.conf:
--------------------------------------------------------------------------------
1 | description "Consul"
2 |
3 | start on runlevel [2345]
4 | stop on runlevel [!2345]
5 |
6 | respawn
7 |
8 | console log
9 |
10 | script
11 | if [ -f "/etc/service/consul" ]; then
12 | . /etc/service/consul
13 | fi
14 |
15 | exec /usr/local/bin/consul agent \
16 | -config-dir="/etc/consul.d" \
17 | -dns-port="53" \
18 | -retry-join "provider=aws tag_key=ConsulAutoJoin tag_value=CLUSTER_TAG_VALUE region=REGION" \
19 | >>/var/log/consul.log 2>&1
20 | end script
21 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/config/nomad.hcl:
--------------------------------------------------------------------------------
1 | data_dir = "/opt/nomad/data"
2 | bind_addr = "IP_ADDRESS"
3 |
4 | # Enable the server
5 | server {
6 | enabled = true
7 | bootstrap_expect = SERVER_COUNT
8 | }
9 |
10 | name = "nomad@IP_ADDRESS"
11 |
12 | consul {
13 | address = "IP_ADDRESS:8500"
14 | }
15 |
16 | telemetry {
17 | publish_allocation_metrics = true
18 | publish_node_metrics = true
19 | }
20 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/config/nomad_client.hcl:
--------------------------------------------------------------------------------
1 | data_dir = "/opt/nomad/data"
2 | bind_addr = "IP_ADDRESS"
3 | name = "nomad@IP_ADDRESS"
4 |
5 | # Enable the client
6 | client {
7 | enabled = true
8 | options = {
9 | driver.java.enable = "1"
10 | docker.cleanup.image = false
11 | }
12 | }
13 |
14 | consul {
15 | address = "IP_ADDRESS:8500"
16 | }
17 |
18 | telemetry {
19 | publish_allocation_metrics = true
20 | publish_node_metrics = true
21 | }
22 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/config/nomad_upstart.conf:
--------------------------------------------------------------------------------
1 | description "Nomad"
2 |
3 | start on runlevel [2345]
4 | stop on runlevel [!2345]
5 |
6 | respawn
7 |
8 | console log
9 |
10 | script
11 | if [ -f "/etc/service/nomad" ]; then
12 | . /etc/service/nomad
13 | fi
14 |
15 | exec /usr/local/bin/nomad agent \
16 | -config="/etc/nomad.d/nomad.hcl" \
17 | >>/var/log/nomad.log 2>&1
18 | end script
19 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/jobs/catalogue-with-connect.nomad:
--------------------------------------------------------------------------------
1 | job "catalogue-with-connect" {
2 | datacenters = ["dc1"]
3 |
4 | constraint {
5 | attribute = "${attr.kernel.name}"
6 | value = "linux"
7 | }
8 |
9 | constraint {
10 | operator = "distinct_hosts"
11 | value = "true"
12 | }
13 |
14 | update {
15 | stagger = "10s"
16 | max_parallel = 1
17 | }
18 |
19 |
20 | # - catalogue - #
21 | group "catalogue" {
22 | count = 1
23 |
24 | restart {
25 | attempts = 10
26 | interval = "5m"
27 | delay = "25s"
28 | mode = "delay"
29 | }
30 |
31 | # - app - #
32 | task "catalogue" {
33 | driver = "docker"
34 |
35 | config {
36 | image = "rberlind/catalogue:latest"
37 | command = "/app"
38 | args = ["-port", "8080", "-DSN", "catalogue_user:default_password@tcp(${NOMAD_ADDR_catalogueproxy_upstream})/socksdb"]
39 | hostname = "catalogue.service.consul"
40 | network_mode = "bridge"
41 | port_map = {
42 | http = 8080
43 | }
44 | }
45 |
46 | service {
47 | name = "catalogue"
48 | tags = ["app", "catalogue"]
49 | port = "http"
50 | }
51 |
52 | resources {
53 | cpu = 100 # 100 Mhz
54 | memory = 128 # 32MB
55 | network {
56 | mbits = 10
57 | port "http" {
58 | static = 8080
59 | }
60 | }
61 | }
62 | } # - end app - #
63 |
64 | # - catalogue connect upstream proxy - #
65 | task "catalogueproxy" {
66 | driver = "exec"
67 |
68 | config {
69 | command = "/usr/local/bin/run-proxy.sh"
70 | args = ["${NOMAD_IP_proxy}", "${NOMAD_TASK_DIR}", "catalogue"]
71 | }
72 |
73 | meta {
74 | proxy_name = "catalogue"
75 | proxy_target = "catalogue-db"
76 | }
77 |
78 | template {
79 | data = </dev/null
7 | }
8 |
9 | trap term SIGINT
10 |
11 | private_ip=$1
12 | local_dir=$2
13 | proxy=$3
14 | echo "main PID is $$"
15 | echo "private_ip is ${private_ip}"
16 | echo "local_dir is ${local_dir}"
17 | echo "proxy is ${proxy}"
18 | curl --request PUT --data @${local_dir}/${proxy}-proxy.json http://localhost:8500/v1/agent/service/register
19 |
20 | /usr/local/bin/consul connect proxy -http-addr http://${private_ip}:8500 -sidecar-for ${proxy} &
21 |
22 | child=$!
23 | wait "$child"
24 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/scripts/server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CONFIGDIR=/ops/shared/config
6 |
7 | CONSULCONFIGDIR=/etc/consul.d
8 | NOMADCONFIGDIR=/etc/nomad.d
9 | HOME_DIR=ubuntu
10 |
11 | # Wait for network
12 | sleep 15
13 |
14 | IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
15 | SERVER_COUNT=$1
16 | REGION=$2
17 | CLUSTER_TAG_VALUE=$3
18 |
19 | # Consul
20 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul.json
21 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/consul.json
22 | sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul_upstart.conf
23 | sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul_upstart.conf
24 | cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
25 | cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
26 |
27 | service consul start
28 | sleep 10
29 | export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
30 |
31 | # Nomad
32 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad.hcl
33 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/nomad.hcl
34 | cp $CONFIGDIR/nomad.hcl $NOMADCONFIGDIR
35 | cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
36 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
37 |
38 | echo "nameserver $IP_ADDRESS" | tee /etc/resolv.conf.new
39 | cat /etc/resolv.conf | tee --append /etc/resolv.conf.new
40 | mv /etc/resolv.conf.new /etc/resolv.conf
41 |
42 | # Add search service.consul at bottom of /etc/resolv.conf
43 | echo "search service.consul" | tee --append /etc/resolv.conf
44 |
45 | # Set env vars for tool CLIs
46 | echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | tee --append /home/$HOME_DIR/.bashrc
47 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | tee --append /home/$HOME_DIR/.bashrc
48 |
49 | # Start Docker
50 | service docker restart
51 |
52 | # Copy Nomad jobs and scripts to desired locations
53 | cp /ops/shared/jobs/* /home/ubuntu/.
54 | chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/
55 | chmod 666 /home/ubuntu/*
56 |
57 | # Start Nomad
58 | service nomad start
59 | sleep 60
60 |
--------------------------------------------------------------------------------
/application-deployment/consul-connect-with-nomad/shared/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | : ${DEBUG=0}
4 | if [ "0" != "$DEBUG" ]; then
5 | set -x
6 | fi
7 |
8 | set -eu
9 |
10 | cd /ops
11 |
12 | CONFIGDIR=/ops/shared/config
13 |
14 | : ${CONSULVERSION=1.10.1}
15 | CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
16 | CONSULCONFIGDIR=/etc/consul.d
17 | CONSULDIR=/opt/consul
18 |
19 | : ${NOMADVERSION=1.1.2}
20 | NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
21 | NOMADCONFIGDIR=/etc/nomad.d
22 | NOMADDIR=/opt/nomad
23 |
24 | # Dependencies
25 | sudo apt-get install -y software-properties-common
26 | sudo apt-get update
27 | sudo apt-get install -y unzip tree redis-tools jq
28 | sudo apt-get install -y upstart-sysv
29 | sudo update-initramfs -u
30 |
31 | # Disable the firewall
32 | sudo ufw disable
33 |
34 | # Download Consul
35 | curl -L $CONSULDOWNLOAD > consul.zip
36 |
37 | ## Install Consul
38 | sudo unzip consul.zip -d /usr/local/bin
39 | sudo chmod 0755 /usr/local/bin/consul
40 | sudo chown root:root /usr/local/bin/consul
41 | sudo setcap "cap_net_bind_service=+ep" /usr/local/bin/consul
42 |
43 | ## Configure Consul
44 | sudo mkdir -p $CONSULCONFIGDIR
45 | sudo chmod 755 $CONSULCONFIGDIR
46 | sudo mkdir -p $CONSULDIR
47 | sudo chmod 755 $CONSULDIR
48 |
49 | # Download Nomad
50 | curl -L $NOMADDOWNLOAD > nomad.zip
51 |
52 | ## Install Nomad
53 | sudo unzip nomad.zip -d /usr/local/bin
54 | sudo chmod 0755 /usr/local/bin/nomad
55 | sudo chown root:root /usr/local/bin/nomad
56 |
57 | ## Configure Nomad
58 | sudo mkdir -p $NOMADCONFIGDIR
59 | sudo chmod 755 $NOMADCONFIGDIR
60 | sudo mkdir -p $NOMADDIR
61 | sudo chmod 755 $NOMADDIR
62 |
63 | # Docker
64 | sudo apt-get update
65 | sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
66 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
67 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
68 | sudo apt-get update
69 | sudo apt-get install -y docker-ce=17.09.1~ce-0~ubuntu
70 | sudo usermod -aG docker ubuntu
71 |
--------------------------------------------------------------------------------
/application-deployment/fabio/fabio.nomad:
--------------------------------------------------------------------------------
1 |
2 | job "fabio" {
3 | datacenters = ["dc1"]
4 | type = "system"
5 |
6 | group "fabio" {
7 | count = 1
8 |
9 | task "fabio" {
10 | driver = "raw_exec"
11 |
12 | artifact {
13 | source = "https://github.com/fabiolb/fabio/releases/download/v1.5.4/fabio-1.5.4-go1.9.2-linux_amd64"
14 | }
15 |
16 | config {
17 | command = "fabio-1.5.4-go1.9.2-linux_amd64"
18 | }
19 |
20 | resources {
21 | cpu = 100 # 500 MHz
22 | memory = 128 # 256MB
23 | network {
24 | mbits = 10
25 | port "http" {
26 | static = 9999
27 | }
28 | port "admin" {
29 | static = 9998
30 | }
31 | }
32 | }
33 | }
34 | }
35 | }
--------------------------------------------------------------------------------
/application-deployment/go-blue-green/go-app.nomad:
--------------------------------------------------------------------------------
1 | job "go-app" {
2 | datacenters = ["dc1"]
3 | type = "service"
4 | update {
5 | max_parallel = 1
6 | min_healthy_time = "10s"
7 | healthy_deadline = "3m"
8 | auto_revert = false
9 | canary = 3
10 | }
11 | group "go-app" {
12 | count = 3
13 | restart {
14 | # The number of attempts to run the job within the specified interval.
15 | attempts = 10
16 | interval = "5m"
17 | # The "delay" parameter specifies the duration to wait before restarting
18 | # a task after it has failed.
19 | delay = "25s"
20 | mode = "delay"
21 | }
22 | ephemeral_disk {
23 | size = 300
24 | }
25 | task "go-app" {
26 | # The "driver" parameter specifies the task driver that should be used to
27 | # run the task.
28 | driver = "docker"
29 | config {
30 | # change to go-app-2.0
31 | image = "aklaas2/go-app-1.0"
32 | port_map {
33 | http = 8080
34 | }
35 | }
36 | resources {
37 | cpu = 500 # 500 MHz
38 | memory = 256 # 256MB
39 | network {
40 | mbits = 10
41 | port "http" {
42 | #static=8080
43 | }
44 | }
45 | }
46 | service {
47 | name = "go-app"
48 | tags = [ "urlprefix-/go-app", "go-app" ]
49 | port = "http"
50 | check {
51 | name = "alive"
52 | type = "tcp"
53 | interval = "10s"
54 | timeout = "2s"
55 | }
56 | }
57 | }
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/application-deployment/go-vault-dynamic-mysql-creds/application.nomad:
--------------------------------------------------------------------------------
1 | job "app" {
2 | datacenters = ["dc1"]
3 | type = "service"
4 |
5 | update {
6 | stagger = "5s"
7 | max_parallel = 1
8 | }
9 |
10 | group "app" {
11 | count = 3
12 |
13 | task "app" {
14 | driver = "exec"
15 | config {
16 | command = "goapp"
17 | }
18 |
19 | env {
20 | VAULT_ADDR = "http://active.vault.service.consul:8200"
21 | APP_DB_HOST = "db.service.consul:3306"
22 | }
23 |
24 | vault {
25 | policies = [ "mysql" ]
26 | }
27 |
28 | artifact {
29 | source = "https://s3.amazonaws.com/ak-bucket-1/goapp"
30 | }
31 |
32 | resources {
33 | cpu = 500
34 | memory = 64
35 | network {
36 | mbits = 1
37 | port "http" {
38 | static = 8080
39 | }
40 | }
41 | }
42 |
43 | service {
44 | name = "app"
45 | tags = ["urlprefix-/app", "go-mysql-app"]
46 | port = "http"
47 | check {
48 | type = "http"
49 | name = "healthz"
50 | interval = "15s"
51 | timeout = "5s"
52 | path = "/healthz"
53 | }
54 | }
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/application-deployment/go-vault-dynamic-mysql-creds/golang_vault_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | consul kv get service/vault/root-token | vault auth -
4 |
5 | POLICY='path "database/creds/readonly" { capabilities = [ "read", "list" ] }'
6 |
7 | echo $POLICY > policy-mysql.hcl
8 |
9 | vault policy-write mysql policy-mysql.hcl
10 |
--------------------------------------------------------------------------------
/application-deployment/go-vault-dynamic-mysql-creds/policy-mysql.hcl:
--------------------------------------------------------------------------------
1 | path "database/creds/readonly" { capabilities = [ "read", "list" ] }
2 |
--------------------------------------------------------------------------------
/application-deployment/haproxy/README.md:
--------------------------------------------------------------------------------
1 | HAPROXY example
2 |
3 | ### TLDR;
4 | ```bash
5 | #Assumes Vagrantfile (lots of port forwarding), fix IP's and ports accordingly
6 |
7 | vagrant@node1:/vagrant/application-deployment/haproxy$ nomad run haproxy.nomad
8 |
9 | vagrant@node1:/vagrant/application-deployment/haproxy$ nomad run /vagrant/application-deployment/go-blue-green/go-app.nomad
10 |
11 | #Golang app (routed via HAPROXY)
12 | http://localhost:9080/
13 |
14 | #Vault GUI:
15 | http://localhost:3200/ui/vault/auth
16 |
17 | #Consul GUI:
18 | http://localhost:3500/ui/#/dc1/services
19 |
20 | #Nomad GUI:
21 | http://localhost:3646/ui/jobs
22 |
23 | ```
24 |
25 |
26 |
27 |
28 |
29 | # GUIDE: TODO
--------------------------------------------------------------------------------
/application-deployment/haproxy/haproxy.nomad:
--------------------------------------------------------------------------------
1 | job "lb" {
2 | region = "global"
3 | datacenters = ["dc1"]
4 | type = "service"
5 | update { stagger = "10s"
6 | max_parallel = 1
7 | }
8 | group "lb" {
9 | count = 3
10 | restart {
11 | interval = "5m"
12 | attempts = 10
13 | delay = "25s"
14 | mode = "delay"
15 | }
16 | task "haproxy" {
17 | driver = "docker"
18 | config {
19 | image = "haproxy"
20 | network_mode = "host"
21 | port_map {
22 | http = 80
23 | }
24 | volumes = [
25 | "custom/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg"
26 | ]
27 | }
28 | template {
29 | #source = "haproxy.cfg.tpl"
30 | data = < >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
6 | sudo bash /ops/shared/scripts/client.sh "${region}" "${cluster_tag_value}" "${server_ip}" "${vault_url}"
7 |
--------------------------------------------------------------------------------
/application-deployment/microservices/aws/user-data-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
6 | sudo bash /ops/shared/scripts/server.sh "${server_count}" "${region}" "${cluster_tag_value}" "${token_for_nomad}" "${vault_url}"
7 |
--------------------------------------------------------------------------------
/application-deployment/microservices/aws/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "The AWS region to deploy to."
3 | default = "us-east-1"
4 | }
5 |
6 | variable "ami" {
7 | description = "AMI ID"
8 | default = "ami-009feb0e09775afc6"
9 | }
10 |
11 | variable "vpc_cidr" {
12 | description = "VPC CIDR"
13 | default = "10.0.0.0/16"
14 | }
15 |
16 | variable "subnet_cidr" {
17 | description = "Subnet CIDR"
18 | default = "10.0.1.0/24"
19 | }
20 |
21 | variable "subnet_az" {
22 | description = "The AZ for the public subnet"
23 | default = "us-east-1a"
24 | }
25 |
26 | variable "server_instance_type" {
27 | description = "The AWS instance type to use for servers."
28 | default = "t2.medium"
29 | }
30 |
31 | variable "client_instance_type" {
32 | description = "The AWS instance type to use for clients."
33 | default = "t2.medium"
34 | }
35 |
36 | variable "key_name" {}
37 |
38 | variable "private_key_data" {
39 | description = "contents of the private key"
40 | }
41 |
42 | variable "server_count" {
43 | description = "The number of servers to provision."
44 | default = "1"
45 | }
46 |
47 | variable "client_count" {
48 | description = "The number of clients to provision."
49 | default = "2"
50 | }
51 |
52 | variable "name_tag_prefix" {
53 | description = "prefixed to Name tag added to EC2 instances and other AWS resources"
54 | default = "nomad-consul"
55 | }
56 |
57 | variable "cluster_tag_value" {
58 | description = "Used by Consul to automatically form a cluster."
59 | default = "nomad-consul-demo"
60 | }
61 |
62 | variable "owner" {
63 | description = "Adds owner tag to EC2 instances"
64 | default = ""
65 | }
66 |
67 | variable "ttl" {
68 | description = "Adds TTL tag to EC2 instances for reaping purposes. Reaping is only done for instances deployed by HashiCorp SEs. In any case, -1 means no reaping."
69 | default = "-1"
70 | }
71 |
72 | variable "token_for_nomad" {
73 | description = "A Vault token for use by Nomad"
74 | }
75 |
76 | variable "vault_url" {
77 | description = "URL of your Vault server including port"
78 | }
79 |
--------------------------------------------------------------------------------
/application-deployment/microservices/aws/vpc.tf:
--------------------------------------------------------------------------------
1 | # Define the VPC.
2 | resource "aws_vpc" "sockshop" {
3 | cidr_block = "${var.vpc_cidr}"
4 | enable_dns_hostnames = true
5 |
6 | tags {
7 | Name = "${var.name_tag_prefix} VPC"
8 | }
9 | }
10 |
11 | # Create an Internet Gateway for the VPC.
12 | resource "aws_internet_gateway" "sockshop" {
13 | vpc_id = "${aws_vpc.sockshop.id}"
14 |
15 | tags {
16 | Name = "${var.name_tag_prefix} IGW"
17 | }
18 | }
19 |
20 | # Create a public subnet.
21 | resource "aws_subnet" "public-subnet" {
22 | vpc_id = "${aws_vpc.sockshop.id}"
23 | cidr_block = "${var.subnet_cidr}"
24 | availability_zone = "${var.subnet_az}"
25 | map_public_ip_on_launch = true
26 | depends_on = ["aws_internet_gateway.sockshop"]
27 |
28 | tags {
29 | Name = "${var.name_tag_prefix} Public Subnet"
30 | }
31 | }
32 |
33 | # Create a route table allowing all addresses access to the IGW.
34 | resource "aws_route_table" "public" {
35 | vpc_id = "${aws_vpc.sockshop.id}"
36 |
37 | route {
38 | cidr_block = "0.0.0.0/0"
39 | gateway_id = "${aws_internet_gateway.sockshop.id}"
40 | }
41 |
42 | tags {
43 | Name = "${var.name_tag_prefix} Public Route Table"
44 | }
45 | }
46 |
47 | # Now associate the route table with the public subnet
48 | # giving all public subnet instances access to the internet.
49 | resource "aws_route_table_association" "public-subnet" {
50 | subnet_id = "${aws_subnet.public-subnet.id}"
51 | route_table_id = "${aws_route_table.public.id}"
52 | }
53 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/config/consul.json:
--------------------------------------------------------------------------------
1 | {
2 | "log_level": "INFO",
3 | "server": true,
4 | "ui": true,
5 | "data_dir": "/opt/consul/data",
6 | "bind_addr": "IP_ADDRESS",
7 | "client_addr": "IP_ADDRESS",
8 | "advertise_addr": "IP_ADDRESS",
9 | "bootstrap_expect": SERVER_COUNT,
10 | "service": {
11 | "name": "consul"
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/config/consul_client.json:
--------------------------------------------------------------------------------
1 | {
2 | "ui": true,
3 | "log_level": "INFO",
4 | "data_dir": "/opt/consul/data",
5 | "bind_addr": "IP_ADDRESS",
6 | "client_addr": "IP_ADDRESS",
7 | "advertise_addr": "IP_ADDRESS"
8 | }
9 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/config/consul_upstart.conf:
--------------------------------------------------------------------------------
1 | description "Consul"
2 |
3 | start on runlevel [2345]
4 | stop on runlevel [!2345]
5 |
6 | respawn
7 |
8 | console log
9 |
10 | script
11 | if [ -f "/etc/service/consul" ]; then
12 | . /etc/service/consul
13 | fi
14 |
15 | exec /usr/local/bin/consul agent \
16 | -config-dir="/etc/consul.d" \
17 | -dns-port="53" \
18 | -recursor="150.10.20.2" \
19 | -retry-join "provider=aws tag_key=ConsulAutoJoin tag_value=CLUSTER_TAG_VALUE region=REGION" \
20 | >>/var/log/consul.log 2>&1
21 | end script
22 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/config/nomad.hcl:
--------------------------------------------------------------------------------
1 | data_dir = "/opt/nomad/data"
2 | bind_addr = "IP_ADDRESS"
3 |
4 | # Enable the server
5 | server {
6 | enabled = true
7 | bootstrap_expect = SERVER_COUNT
8 | }
9 |
10 | name = "nomad@IP_ADDRESS"
11 |
12 | consul {
13 | address = "IP_ADDRESS:8500"
14 | }
15 |
16 | vault {
17 | enabled = true
18 | address = "VAULT_URL"
19 | task_token_ttl = "1h"
20 | create_from_role = "nomad-cluster"
21 | token = "TOKEN_FOR_NOMAD"
22 | }
23 |
24 | telemetry {
25 | publish_allocation_metrics = true
26 | publish_node_metrics = true
27 | }
28 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/config/nomad_client.hcl:
--------------------------------------------------------------------------------
1 | data_dir = "/opt/nomad/data"
2 | bind_addr = "IP_ADDRESS"
3 | name = "nomad@IP_ADDRESS"
4 |
5 | # Enable the client
6 | client {
7 | enabled = true
8 | options = {
9 | driver.java.enable = "1"
10 | docker.cleanup.image = false
11 | }
12 | }
13 |
14 | consul {
15 | address = "IP_ADDRESS:8500"
16 | }
17 |
18 | vault {
19 | enabled = true
20 | address = "VAULT_URL"
21 | }
22 |
23 | telemetry {
24 | publish_allocation_metrics = true
25 | publish_node_metrics = true
26 | }
27 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/config/nomad_upstart.conf:
--------------------------------------------------------------------------------
1 | description "Nomad"
2 |
3 | start on runlevel [2345]
4 | stop on runlevel [!2345]
5 |
6 | respawn
7 |
8 | console log
9 |
10 | script
11 | if [ -f "/etc/service/nomad" ]; then
12 | . /etc/service/nomad
13 | fi
14 |
15 | exec /usr/local/bin/nomad agent \
16 | -config="/etc/nomad.d/nomad.hcl" \
17 | >>/var/log/nomad.log 2>&1
18 | end script
19 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/jobs/sockshopui.nomad:
--------------------------------------------------------------------------------
1 | job "sockshopui" {
2 | datacenters = ["dc1"]
3 |
4 | type = "system"
5 |
6 | constraint {
7 | attribute = "${attr.kernel.name}"
8 | value = "linux"
9 | }
10 |
11 | update {
12 | stagger = "10s"
13 | max_parallel = 1
14 | }
15 |
16 | # - frontend #
17 | group "frontend" {
18 |
19 | restart {
20 | attempts = 10
21 | interval = "5m"
22 | delay = "25s"
23 | mode = "delay"
24 | }
25 |
26 | # - frontend app - #
27 | task "front-end" {
28 | driver = "docker"
29 |
30 | config {
31 | image = "weaveworksdemos/front-end:master-ac9ca707"
32 | command = "/usr/local/bin/node"
33 | args = ["server.js", "--domain=service.consul"]
34 | hostname = "front-end.service.consul"
35 | network_mode = "sockshop"
36 | port_map = {
37 | http = 8079
38 | }
39 | }
40 |
41 | service {
42 | name = "front-end"
43 | tags = ["app", "frontend", "front-end"]
44 | port = "http"
45 | }
46 |
47 | resources {
48 | cpu = 100 # 100 Mhz
49 | memory = 128 # 128MB
50 | network {
51 | mbits = 10
52 | port "http" {
53 | static = 80
54 | }
55 | }
56 | }
57 | } # - end frontend app - #
58 | } # - end frontend - #
59 | }
60 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/scripts/client.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CONFIGDIR=/ops/shared/config
6 |
7 | CONSULCONFIGDIR=/etc/consul.d
8 | NOMADCONFIGDIR=/etc/nomad.d
9 | HOME_DIR=ubuntu
10 |
11 | # Wait for network
12 | sleep 15
13 |
14 | IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
15 | DOCKER_BRIDGE_IP_ADDRESS=(`ifconfig docker0 2>/dev/null|awk '/inet addr:/ {print $2}'|sed 's/addr://'`)
16 | REGION=$1
17 | CLUSTER_TAG_VALUE=$2
18 | SERVER_IP=$3
19 | VAULT_URL=$4
20 |
21 | # Install Java
22 | apt-get update
23 | apt install -y default-jre
24 |
25 | # Consul
26 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul_client.json
27 | sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul_upstart.conf
28 | sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul_upstart.conf
29 | cp $CONFIGDIR/consul_client.json $CONSULCONFIGDIR/consul.json
30 | cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
31 |
32 | service consul start
33 | sleep 10
34 | export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
35 |
36 | # Nomad
37 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad_client.hcl
38 | sed -i "s@VAULT_URL@$VAULT_URL@g" $CONFIGDIR/nomad_client.hcl
39 | cp $CONFIGDIR/nomad_client.hcl $NOMADCONFIGDIR/nomad.hcl
40 | cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
41 |
42 | service nomad start
43 | sleep 10
44 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
45 |
46 | # Add hostname to /etc/hosts
47 | echo "127.0.0.1 $(hostname)" | tee --append /etc/hosts
48 |
49 | # Add Docker bridge network IP to /etc/resolv.conf (at the top)
50 | #echo "nameserver $DOCKER_BRIDGE_IP_ADDRESS" | tee /etc/resolv.conf.new
51 | echo "nameserver $IP_ADDRESS" | tee /etc/resolv.conf.new
52 | cat /etc/resolv.conf | tee --append /etc/resolv.conf.new
53 | mv /etc/resolv.conf.new /etc/resolv.conf
54 |
55 | # Add search service.consul at bottom of /etc/resolv.conf
56 | echo "search service.consul" | tee --append /etc/resolv.conf
57 |
58 | # Set env vars for tool CLIs
59 | echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | tee --append /home/$HOME_DIR/.bashrc
60 | echo "export VAULT_ADDR=$VAULT_URL" | tee --append /home/$HOME_DIR/.bashrc
61 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | tee --append /home/$HOME_DIR/.bashrc
62 |
63 | # Move daemon.json to /etc/docker
64 | echo "{\"hosts\":[\"tcp://0.0.0.0:2375\",\"unix:///var/run/docker.sock\"],\"cluster-store\":\"consul://$IP_ADDRESS:8500\",\"cluster-advertise\":\"$IP_ADDRESS:2375\",\"dns\":[\"$IP_ADDRESS\"],\"dns-search\":[\"service.consul\"]}" > /home/ubuntu/daemon.json
65 | mkdir -p /etc/docker
66 | mv /home/ubuntu/daemon.json /etc/docker/daemon.json
67 |
68 | # Start Docker
69 | service docker restart
70 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/scripts/server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CONFIGDIR=/ops/shared/config
6 |
7 | CONSULCONFIGDIR=/etc/consul.d
8 | NOMADCONFIGDIR=/etc/nomad.d
9 | HOME_DIR=ubuntu
10 |
11 | # Wait for network
12 | sleep 15
13 |
14 | IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
15 | DOCKER_BRIDGE_IP_ADDRESS=(`ifconfig docker0 2>/dev/null|awk '/inet addr:/ {print $2}'|sed 's/addr://'`)
16 | SERVER_COUNT=$1
17 | REGION=$2
18 | CLUSTER_TAG_VALUE=$3
19 | TOKEN_FOR_NOMAD=$4
20 | VAULT_URL=$5
21 |
22 | # Consul
23 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul.json
24 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/consul.json
25 | sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul_upstart.conf
26 | sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul_upstart.conf
27 | cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
28 | cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
29 |
30 | service consul start
31 | sleep 10
32 | export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
33 |
34 | # Nomad
35 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad.hcl
36 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/nomad.hcl
37 | sed -i "s@VAULT_URL@$VAULT_URL@g" $CONFIGDIR/nomad.hcl
38 | sed -i "s/TOKEN_FOR_NOMAD/$TOKEN_FOR_NOMAD/g" $CONFIGDIR/nomad.hcl
39 | cp $CONFIGDIR/nomad.hcl $NOMADCONFIGDIR
40 | cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
41 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
42 |
43 | # Add hostname to /etc/hosts
44 | echo "127.0.0.1 $(hostname)" | tee --append /etc/hosts
45 |
46 | # Add Docker bridge network IP to /etc/resolv.conf (at the top)
47 | #echo "nameserver $DOCKER_BRIDGE_IP_ADDRESS" | tee /etc/resolv.conf.new
48 | echo "nameserver $IP_ADDRESS" | tee /etc/resolv.conf.new
49 | cat /etc/resolv.conf | tee --append /etc/resolv.conf.new
50 | mv /etc/resolv.conf.new /etc/resolv.conf
51 |
52 | # Add search service.consul at bottom of /etc/resolv.conf
53 | echo "search service.consul" | tee --append /etc/resolv.conf
54 |
55 | # Set env vars for tool CLIs
56 | echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | tee --append /home/$HOME_DIR/.bashrc
57 | echo "export VAULT_ADDR=$VAULT_URL" | tee --append /home/$HOME_DIR/.bashrc
58 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | tee --append /home/$HOME_DIR/.bashrc
59 |
60 | # Move daemon.json to /etc/docker
61 | echo "{\"hosts\":[\"tcp://0.0.0.0:2375\",\"unix:///var/run/docker.sock\"],\"cluster-store\":\"consul://$IP_ADDRESS:8500\",\"cluster-advertise\":\"$IP_ADDRESS:2375\",\"dns\":[\"$IP_ADDRESS\"],\"dns-search\":[\"service.consul\"]}" > /home/ubuntu/daemon.json
62 | mkdir -p /etc/docker
63 | mv /home/ubuntu/daemon.json /etc/docker/daemon.json
64 |
65 | # Start Docker
66 | service docker restart
67 |
68 | # Create Docker Networks
69 | for network in sockshop; do
70 | if [ $(docker network ls | grep $network | wc -l) -eq 0 ]
71 | then
72 | docker network create -d overlay --attachable $network
73 | else
74 | echo docker network $network already created
75 | fi
76 | done
77 |
78 | # Copy Nomad jobs and scripts to desired locations
79 | cp /ops/shared/jobs/*.nomad /home/ubuntu/.
80 | chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/
81 | chmod 666 /home/ubuntu/*
82 |
83 | # Start Nomad
84 | service nomad start
85 | sleep 60
86 |
--------------------------------------------------------------------------------
/application-deployment/microservices/shared/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | cd /ops
5 |
6 | CONFIGDIR=/ops/shared/config
7 |
8 | CONSULVERSION=1.3.0
9 | CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
10 | CONSULCONFIGDIR=/etc/consul.d
11 | CONSULDIR=/opt/consul
12 |
13 | NOMADVERSION=0.8.6
14 | NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
15 | NOMADCONFIGDIR=/etc/nomad.d
16 | NOMADDIR=/opt/nomad
17 |
18 | # Dependencies
19 | sudo apt-get install -y software-properties-common
20 | sudo apt-get update
21 | sudo apt-get install -y unzip tree redis-tools jq
22 | sudo apt-get install -y upstart-sysv
23 | sudo update-initramfs -u
24 |
25 | # Disable the firewall
26 | sudo ufw disable
27 |
28 | # Download Consul
29 | curl -L $CONSULDOWNLOAD > consul.zip
30 |
31 | ## Install Consul
32 | sudo unzip consul.zip -d /usr/local/bin
33 | sudo chmod 0755 /usr/local/bin/consul
34 | sudo chown root:root /usr/local/bin/consul
35 | sudo setcap "cap_net_bind_service=+ep" /usr/local/bin/consul
36 |
37 | ## Configure Consul
38 | sudo mkdir -p $CONSULCONFIGDIR
39 | sudo chmod 755 $CONSULCONFIGDIR
40 | sudo mkdir -p $CONSULDIR
41 | sudo chmod 755 $CONSULDIR
42 |
43 | # Download Nomad
44 | curl -L $NOMADDOWNLOAD > nomad.zip
45 |
46 | ## Install Nomad
47 | sudo unzip nomad.zip -d /usr/local/bin
48 | sudo chmod 0755 /usr/local/bin/nomad
49 | sudo chown root:root /usr/local/bin/nomad
50 |
51 | ## Configure Nomad
52 | sudo mkdir -p $NOMADCONFIGDIR
53 | sudo chmod 755 $NOMADCONFIGDIR
54 | sudo mkdir -p $NOMADDIR
55 | sudo chmod 755 $NOMADDIR
56 |
57 | # Docker
58 | sudo apt-get update
59 | sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
60 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
61 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
62 | sudo apt-get update
63 | sudo apt-get install -y docker-ce=17.09.1~ce-0~ubuntu
64 | sudo usermod -aG docker ubuntu
65 |
--------------------------------------------------------------------------------
/application-deployment/microservices/slides/HashiCorpMicroservicesDemo.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/nomad-guides/cdda5a0ebaaa2c009783c24e98817622d9b7593a/application-deployment/microservices/slides/HashiCorpMicroservicesDemo.pptx
--------------------------------------------------------------------------------
/application-deployment/microservices/vault/aws-policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "Stmt1426528957000",
6 | "Effect": "Allow",
7 | "Action": [
8 | "ec2:*",
9 | "iam:*"
10 | ],
11 | "Resource": [
12 | "*"
13 | ]
14 | }
15 | ]
16 | }
17 |
--------------------------------------------------------------------------------
/application-deployment/microservices/vault/nomad-cluster-role.json:
--------------------------------------------------------------------------------
1 | {
2 | "disallowed_policies": "nomad-server",
3 | "explicit_max_ttl": 0,
4 | "name": "nomad-cluster",
5 | "orphan": false,
6 | "period": 259200,
7 | "renewable": true
8 | }
9 |
--------------------------------------------------------------------------------
/application-deployment/microservices/vault/nomad-server-policy.hcl:
--------------------------------------------------------------------------------
1 | # Allow creating tokens under "nomad-cluster" role. The role name should be
2 | # updated if "nomad-cluster" is not used.
3 | path "auth/token/create/nomad-cluster" {
4 | capabilities = ["update"]
5 | }
6 |
7 | # Allow looking up "nomad-cluster" role. The role name should be updated if
8 | # "nomad-cluster" is not used.
9 | path "auth/token/roles/nomad-cluster" {
10 | capabilities = ["read"]
11 | }
12 |
13 | # Allow looking up the token passed to Nomad to validate the token has the
14 | # proper capabilities. This is provided by the "default" policy.
15 | path "auth/token/lookup-self" {
16 | capabilities = ["read"]
17 | }
18 |
19 | # Allow looking up incoming tokens to validate they have permissions to access
20 | # the tokens they are requesting. This is only required if
21 | # `allow_unauthenticated` is set to false.
22 | path "auth/token/lookup" {
23 | capabilities = ["update"]
24 | }
25 |
26 | # Allow revoking tokens that should no longer exist. This allows revoking
27 | # tokens for dead tasks.
28 | path "auth/token/revoke-accessor" {
29 | capabilities = ["update"]
30 | }
31 |
32 | # Allow checking the capabilities of our own token. This is used to validate the
33 | # token upon startup.
34 | path "sys/capabilities-self" {
35 | capabilities = ["update"]
36 | }
37 |
38 | # Allow our own token to be renewed.
39 | path "auth/token/renew-self" {
40 | capabilities = ["update"]
41 | }
42 |
--------------------------------------------------------------------------------
/application-deployment/microservices/vault/setup_vault.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Script to setup of Vault for the Nomad/Consul demo
4 | echo "Before running this, you must export your"
5 | echo "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY keys"
6 | echo "and your VAULT_ADDR and VAULT_TOKEN environment variables."
7 |
8 | # Set up the Vault AWS Secrets Engine
9 | echo "Setting up the AWS Secrets Engine"
10 | echo "Enabling the AWS secrets engine at path aws-tf"
11 | vault secrets enable -path=aws-tf aws
12 | echo "Providing Vault with AWS keys that can create other keys"
13 | vault write aws-tf/config/root access_key=$AWS_ACCESS_KEY_ID secret_key=$AWS_SECRET_ACCESS_KEY
14 | echo "Configuring default and max leases on generated keys"
15 | vault write aws-tf/config/lease lease=1h lease_max=24h
16 | echo "Creating the AWS deploy role and assigning policy to it"
17 | vault write aws-tf/roles/deploy policy=@aws-policy.json
18 |
19 | # Create sockshop-read policy
20 | vault policy write sockshop-read sockshop-read.hcl
21 |
22 | # Write the cataloguedb and userdb passwords to Vault
23 | vault write secret/sockshop/databases/cataloguedb pwd=dioe93kdo931
24 | vault write secret/sockshop/databases/userdb pwd=wo39c5h2sl4r
25 |
26 | # Setup Vault policy/role for Nomad
27 | echo "Setting up Vault policy and role for Nomad"
28 | echo "Writing nomad-server-policy.hcl to Vault"
29 | vault policy write nomad-server nomad-server-policy.hcl
30 | echo "Writing nomad-cluster-role.json to Vault"
31 | vault write auth/token/roles/nomad-cluster @nomad-cluster-role.json
32 |
--------------------------------------------------------------------------------
/application-deployment/microservices/vault/sockshop-read.hcl:
--------------------------------------------------------------------------------
1 | # Read Access to Sock Shop secrets
2 | path "secret/sockshop/*" {
3 | capabilities = ["read"]
4 | }
5 |
--------------------------------------------------------------------------------
/application-deployment/nginx-vault-kv/README.md:
--------------------------------------------------------------------------------
1 | # Nomad-Vault Nginx Key/Value
2 |
3 | ### TLDR;
4 | ```bash
5 | vagrant@node1:/vagrant/vault-examples/nginx/KeyValue$ ./kv_vault_setup.sh
6 | Successfully authenticated! You are now logged in.
7 | token: 25bf4150-94a4-7292-974c-9c3fa4c8ee53
8 | token_duration: 0
9 | token_policies: [root]
10 | Success! Data written to: secret/test
11 | Policy 'test' written.
12 |
13 | vagrant@node1:/vagrant/vault-examples/nginx/KeyValue$ nomad run nginx-kv-secret.nomad
14 |
15 | # in your browser goto (Runs on clients on static port 8080):
16 | http://localhost:8080/nginx-secret/
17 | #Good morning. secret: Live demos rock!!!
18 |
19 | ```
20 |
21 | #Guide: TODO
--------------------------------------------------------------------------------
/application-deployment/nginx-vault-kv/kv_vault_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | consul kv get service/vault/root-token | vault auth -
4 |
5 | vault write secret/test message='Live demos rock!!!'
6 |
7 | cat << EOF > test.policy
8 | path "secret/*" {
9 | capabilities = ["create", "read", "update", "delete", "list"]
10 | }
11 | EOF
12 |
13 | vault policy-write test test.policy
14 |
--------------------------------------------------------------------------------
/application-deployment/nginx-vault-kv/nginx-kv-secret.nomad:
--------------------------------------------------------------------------------
1 | job "nginx" {
2 | datacenters = ["dc1"]
3 | type = "service"
4 |
5 | group "nginx" {
6 | count = 3
7 |
8 | vault {
9 | policies = ["test"]
10 | }
11 |
12 | task "nginx" {
13 | driver = "docker"
14 |
15 | config {
16 | image = "nginx"
17 | port_map {
18 | http = 8080
19 | }
20 | port_map {
21 | https = 443
22 | }
23 | volumes = [
24 | "custom/default.conf:/etc/nginx/conf.d/default.conf"
25 | ]
26 | }
27 |
28 | template {
29 | data = < policy-superuser.hcl
81 |
82 | vault policy-write superuser policy-superuser.hcl
83 | ```
84 |
85 | Execute the script
86 | ```bash
87 | vagrant@node1:/vagrant/vault-examples/nginx/pki$ ./pki_vault_setup.sh
88 | ```
89 |
90 | ## Step 2: Run the Job
91 | ```bash
92 | vagrant@node1:/vagrant/vault-examples/nginx/PKI$ nomad run nginx-pki-secret.nomad
93 | ```
94 |
95 | ## Step 3: Validate Results
96 | The nginx containers should be running on port 443 of your Nomad clients (static port configuration)
97 |
98 | If using the Vagrantfile go to your browswer at:
99 | https://localhost:9443/
100 |
101 | Your browswer should warn you of an untrusted cert. You can use the cert generated from the configuration script (pki_vault_setup.sh) for the root ca in your browser if you would like.
102 |
103 | Once rendered, you should see a webpage showing the dynamic cert and key used by the Nginx task for its SSL config.
104 |
105 | 
106 |
107 |
--------------------------------------------------------------------------------
/application-deployment/nginx-vault-pki/nginx-pki-secret.nomad:
--------------------------------------------------------------------------------
1 | job "nginx" {
2 | datacenters = ["dc1"]
3 | type = "service"
4 |
5 | group "nginx" {
6 | count = 3
7 |
8 | vault {
9 | policies = ["superuser"]
10 | }
11 |
12 | task "nginx" {
13 | driver = "docker"
14 |
15 | config {
16 | image = "nginx"
17 | port_map {
18 | http = 80
19 | }
20 | port_map {
21 | https = 443
22 | }
23 | volumes = [
24 | "custom/default.conf:/etc/nginx/conf.d/default.conf",
25 | "secret/cert.key:/etc/nginx/ssl/nginx.key",
26 | ]
27 | }
28 |
29 | template {
30 | data = <
65 |
66 | {{ with secret "pki/issue/consul-service" "common_name=nginx.service.consul" "ttl=30m" }}
67 | {{ .Data.certificate }}
68 |
69 |
70 | {{ .Data.private_key }}
71 | {{ end }}
72 | EOH
73 |
74 | destination = "local/data/index.html"
75 | }
76 |
77 | resources {
78 | cpu = 100 # 100 MHz
79 | memory = 128 # 128 MB
80 | network {
81 | mbits = 10
82 | port "http" {
83 | static = 80
84 | }
85 | port "https" {
86 | static = 443
87 | }
88 | }
89 | }
90 |
91 | service {
92 | name = "nginx"
93 | tags = ["frontend","urlprefix-/nginx strip=/nginx"]
94 | port = "http"
95 | check {
96 | type = "tcp"
97 | interval = "10s"
98 | timeout = "2s"
99 | }
100 | }
101 | }
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/application-deployment/nginx-vault-pki/pki_vault_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | consul kv get service/vault/root-token | vault auth -
4 |
5 | vault mount pki
6 |
7 | vault write pki/root/generate/internal \
8 | common_name=service.consul
9 |
10 | vault write pki/roles/consul-service \
11 | generate_lease=true \
12 | allowed_domains="service.consul" \
13 | allow_subdomains="true"
14 |
15 | vault write pki/issue/consul-service \
16 | common_name=nginx.service.consul \
17 | ttl=2h
18 |
19 | POLICY='path "*" { capabilities = ["create", "read", "update", "delete", "list", "sudo"] }'
20 |
21 | echo $POLICY > policy-superuser.hcl
22 |
23 | vault policy-write superuser policy-superuser.hcl
24 |
--------------------------------------------------------------------------------
/application-deployment/nginx-vault-pki/policy-superuser.hcl:
--------------------------------------------------------------------------------
1 | path "*" { capabilities = ["create", "read", "update", "delete", "list", "sudo"] }
2 |
--------------------------------------------------------------------------------
/application-deployment/nginx/README.md:
--------------------------------------------------------------------------------
1 | # Nginx Deployment (Template Example)
2 | The goal of this guide is to help users deploy Nginx on Nomad. In the process we will also show how to use Nomad templating to update the configuration of our deployed tasks. (Nomad uses Consul Template under the hood)
3 |
4 | ### TLDR;
5 | ```bash
6 | vagrant@node1:/vagrant/application-deployment/nginx$ ./kv_consul_setup.sh
7 |
8 | vagrant@node1:/vagrant/application-deployment/nginx$ nomad run nginx-consul.nomad
9 |
10 | #Validate the results on Nomad clients, job assigns static port 8080
11 | #if using vagrantfile check:
12 | http://localhost:8080/nginx/
13 |
14 | ```
15 |
16 | ## Estimated Time to Complete
17 | 10 minutes
18 |
19 | ## Prerequisites
20 | A Nomad cluster should be up and running. Setup a cluster with OSS or enterprise binaries using the vagrantfile here: https://github.com/hashicorp/nomad-guides/tree/master/provision/vagrant
21 |
22 | ## Challenge
23 | Keeping environment variables and application configuration files up to date in a dynamic or microservice environment can be difficult to manage and scale.
24 |
25 | ## Solution
26 | Nomad's template block instantiates an instance of a template renderer. This creates a convenient way to ship configuration files that are populated from environment variables, Consul data, Vault secrets, or just general configurations within a Nomad task.
27 |
28 | In this example, we will leverage Consul for our tasks' configuration and deploy Nginx containers.
29 |
30 | # Steps
31 |
32 | ## Step 1: Write a Test Value to Consul
33 | Write the test value to Consul (script included)
34 |
35 | ```bash
36 | vagrant@node1:/vagrant/application-deployment/nginx$ cat kv_consul_setup.sh
37 | #!/bin/bash
38 |
39 | consul kv put features/demo 'Consul Rocks!'
40 |
41 | vagrant@node1:/vagrant/application-deployment/nginx$ ./kv_consul_setup.sh
42 | Success! Data written to: features/demo
43 | ```
44 |
45 | ## Step 2: Review Template stanza
46 | The important piece of this example lies in the template stanza
47 | ```
48 | template {
49 | data = <
52 |
53 | {{ if keyExists "features/demo" }}
54 | Consul Key Value: {{ key "features/demo" }}
55 | {{ else }}
56 | Good morning.
57 | {{ end }}
58 |
59 |
60 | Node Environment Information:
61 | node_id: {{ env "node.unique.id" }}
62 | datacenter: {{ env "NOMAD_DC" }}
63 | EOH
64 | destination = "local/data/nginx/index.html"
65 | }
66 | ```
67 |
68 | In this example the `if KeyExists` block instructs Nomad to pull a value from the Consul key `features/demo` if it exists. We wrote this Consul value in step 1.
69 |
70 | We can also use Nomad's interpolation features to populate config/env variables based on Nomad's runtime information. The `env "node.unique.id"` and `env "NOMAD_DC"` options showcase this. More information is provided here: https://www.nomadproject.io/docs/runtime/interpolation.html
71 |
72 | Nomad will populate the template with those values and place the rendered template file in the specified `destination` location.
73 |
74 | More template options are outlined here: https://www.nomadproject.io/docs/job-specification/template.html
75 |
76 | ## Step 3: Run the Job
77 | Run the nginx job
78 | ```bash
79 | vagrant@node1:/vagrant/application-deployment/nginx$ nomad run nginx-consul.nomad
80 | ==> Monitoring evaluation "61609418"
81 | Evaluation triggered by job "nginx"
82 | Allocation "850b5877" created: node "7de3fca4", group "nginx"
83 | Allocation "40b89ff1" created: node "c4146f97", group "nginx"
84 | Allocation "52d61bf4" created: node "def34073", group "nginx"
85 | Evaluation status changed: "pending" -> "complete"
86 | ==> Evaluation "61609418" finished with status "complete"
87 |
88 | vagrant@node1:/vagrant/application-deployment/nginx$ nomad status nginx
89 | ID = nginx
90 | Name = nginx
91 | Submit Date = 12/27/17 20:54:21 UTC
92 | Type = service
93 | Priority = 50
94 | Datacenters = dc1
95 | Status = running
96 | Periodic = false
97 | Parameterized = false
98 |
99 | Summary
100 | Task Group Queued Starting Running Failed Complete Lost
101 | nginx 0 0 3 0 0 0
102 |
103 | Allocations
104 | ID Node ID Task Group Version Desired Status Created At
105 | 40b89ff1 c4146f97 nginx 0 run running 12/27/17 20:54:21 UTC
106 | 52d61bf4 def34073 nginx 0 run running 12/27/17 20:54:21 UTC
107 | 850b5877 7de3fca4 nginx 0 run running 12/27/17 20:54:21 UTC
108 | ```
109 |
110 | ## Step 4: Validate results
111 | Use Curl or your Browser to validate the tempalte was rendered correctly.
112 |
113 | ```bash
114 | vagrant@node1:/vagrant/application-deployment/nginx$ curl http://10.0.2.15:8080/nginx/
115 | Nomad Template example (Consul value)
116 |
117 |
118 |
119 | Consul Key Value: Consul Rocks!
120 |
121 |
122 |
123 | Node Environment Information:
124 | node_id: def34073-b34d-240d-9155-1adaa8e5da74
125 | datacenter: dc1
126 | ```
127 | Browser (using Vagrantfile):
128 | 
129 |
130 |
131 |
--------------------------------------------------------------------------------
/application-deployment/nginx/kv_consul_setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | consul kv put features/demo 'Consul Rocks!'
4 |
5 |
--------------------------------------------------------------------------------
/application-deployment/nginx/nginx-consul.nomad:
--------------------------------------------------------------------------------
1 | job "nginx" {
2 | datacenters = ["dc1"]
3 | type = "service"
4 | group "nginx" {
5 | count = 3
6 | task "nginx" {
7 | driver = "docker"
8 | config {
9 | image = "nginx"
10 | port_map {
11 | http = 8080
12 | }
13 | port_map {
14 | https = 443
15 | }
16 | volumes = [
17 | "custom/default.conf:/etc/nginx/conf.d/default.conf"
18 | ]
19 | }
20 | template {
21 | data = <
37 |
38 | {{ if keyExists "features/demo" }}
39 | Consul Key Value: {{ key "features/demo" }}
40 | {{ else }}
41 | Good morning.
42 | {{ end }}
43 |
44 |
45 | Node Environment Information:
46 | node_id: {{ env "node.unique.id" }}
47 | datacenter: {{ env "NOMAD_DC" }}
48 | EOH
49 | destination = "local/data/nginx/index.html"
50 | }
51 | resources {
52 | cpu = 100 # 100 MHz
53 | memory = 128 # 128 MB
54 | network {
55 | mbits = 10
56 | port "http" {
57 | static = 8080
58 | }
59 | port "https" {
60 | static= 443
61 | }
62 | }
63 | }
64 | service {
65 | name = "nginx"
66 | tags = [ "nginx", "web", "urlprefix-/nginx" ]
67 | port = "http"
68 | check {
69 | type = "tcp"
70 | interval = "10s"
71 | timeout = "2s"
72 | }
73 | }
74 | }
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/application-deployment/vault/vault_exec.hcl:
--------------------------------------------------------------------------------
1 | # John Boero - jboero@hashicorp.com
2 | # A job spec to install and run Vault with 'exec' driver - no Docker.
3 | # Consul not used by default. Add Consul to config template
4 | # Artifact checksum is for linux-amd64 by default.
5 | job "vault.service" {
6 | datacenters = ["dc1"]
7 | type = "service"
8 | group "vault" {
9 | count = 1
10 |
11 | task "vault.service" {
12 | driver = "exec"
13 | resources {
14 | cpu = 2000
15 | memory = 1024
16 | }
17 |
18 | artifact {
19 | source = "https://releases.hashicorp.com/vault/1.7.0/vault_1.7.0_${attr.kernel.name}_${attr.cpu.arch}.zip"
20 | destination = "/tmp/"
21 | #options {
22 | # checksum = "sha256:2a6958e6c8d6566d8d529fe5ef9378534903305d0f00744d526232d1c860e1ed"
23 | #}
24 | }
25 |
26 | template {
27 | data = <
10 |
11 | ----
12 |
13 | # Operations
14 | This area contains instructions for operating Nomad. This includes topics such as configuring Sentinel policies, namespaces, ACLs etc.
15 |
16 | ## Multi-Job-Demo
17 | Example highlighting enterprise features and advanced use cases, with a Nomad cluster deployed in AWS.
18 |
19 | ## Nomad-Vault
20 | Examples of integrating Nomad with Hashicorp Vault for secrets management.
21 |
22 | ## Provision-Nomad
23 | Examples on how to provision a Nomad cluster. Includes dev mode, quick start and best practices.
24 |
25 | ## Sentinel
26 | Examples using Sentinel with Nomad, a feature of the enterprise version that offers governance capabilities such as limiting types of jobs, enforcing resource quotas and others.
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/acls/anonymous.hcl:
--------------------------------------------------------------------------------
1 | namespace "default" {
2 | capabilities = ["list-jobs"]
3 | }
4 |
5 | agent {
6 | policy = "read"
7 | }
8 |
9 | node {
10 | policy = "read"
11 | }
12 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/acls/dev.hcl:
--------------------------------------------------------------------------------
1 | namespace "default" {
2 | capabilities = ["list-jobs"]
3 | }
4 |
5 | namespace "dev" {
6 | policy = "write"
7 | }
8 |
9 | agent {
10 | policy = "read"
11 | }
12 |
13 | node {
14 | policy = "read"
15 | }
16 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/acls/qa.hcl:
--------------------------------------------------------------------------------
1 | namespace "default" {
2 | capabilities = ["list-jobs"]
3 | }
4 |
5 | namespace "qa" {
6 | policy = "write"
7 | }
8 |
9 | agent {
10 | policy = "read"
11 | }
12 |
13 | node {
14 | policy = "read"
15 | }
16 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/bootstrap_token:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/nomad-guides/cdda5a0ebaaa2c009783c24e98817622d9b7593a/operations/multi-job-demo/aws/bootstrap_token
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/get_bootstrap_token.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | pk=$1
4 | server_ip=$2
5 | echo "${pk}" > private-key.pem
6 | chmod 600 private-key.pem
7 |
8 | while ! [ -f bootstrap.txt ];
9 | do
10 | scp -o StrictHostKeyChecking=no -i private-key.pem ubuntu@${server_ip}:~/bootstrap.txt bootstrap.txt
11 | sleep 5
12 | done
13 |
14 | bootstrap_token=$(sed -n 2,2p bootstrap.txt | cut -d '=' -f 2 | sed 's/ //')
15 |
16 | echo "{\"bootstrap_token\": \"$bootstrap_token\"}"
17 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/network/outputs.tf:
--------------------------------------------------------------------------------
1 | # Outputs
2 |
3 | output "vpc_id" {
4 | value = "${aws_vpc.multi_job_demo.id}"
5 | }
6 |
7 | output "subnet_id" {
8 | value = "${aws_subnet.public_subnet.id}"
9 | }
10 |
11 | output "route_table_association_id" {
12 | value = "${aws_route_table_association.public_subnet.id}"
13 | }
14 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/network/variables.tf:
--------------------------------------------------------------------------------
1 | variable "vpc_cidr" {}
2 | variable "name_tag_prefix" {}
3 | variable "subnet_cidr" {}
4 | variable "subnet_az" {}
5 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/network/vpc.tf:
--------------------------------------------------------------------------------
1 | # Define the VPC.
2 | resource "aws_vpc" "multi_job_demo" {
3 | cidr_block = "${var.vpc_cidr}"
4 | enable_dns_hostnames = true
5 |
6 | tags {
7 | Name = "${var.name_tag_prefix} VPC"
8 | }
9 | }
10 |
11 | # Create an Internet Gateway for the VPC.
12 | resource "aws_internet_gateway" "multi_job_demo" {
13 | vpc_id = "${aws_vpc.multi_job_demo.id}"
14 |
15 | tags {
16 | Name = "${var.name_tag_prefix} IGW"
17 | }
18 | }
19 |
20 | # Create a public subnet.
21 | resource "aws_subnet" "public_subnet" {
22 | vpc_id = "${aws_vpc.multi_job_demo.id}"
23 | cidr_block = "${var.subnet_cidr}"
24 | availability_zone = "${var.subnet_az}"
25 | map_public_ip_on_launch = true
26 | #depends_on = ["aws_internet_gateway.multi_job_demo"]
27 |
28 | tags {
29 | Name = "${var.name_tag_prefix} Public Subnet"
30 | }
31 | }
32 |
33 | # Create a route table allowing all addresses access to the IGW.
34 | resource "aws_route_table" "public" {
35 | vpc_id = "${aws_vpc.multi_job_demo.id}"
36 |
37 | route {
38 | cidr_block = "0.0.0.0/0"
39 | gateway_id = "${aws_internet_gateway.multi_job_demo.id}"
40 | }
41 |
42 | tags {
43 | Name = "${var.name_tag_prefix} Public Route Table"
44 | }
45 | }
46 |
47 | # Now associate the route table with the public subnet
48 | # giving all public subnet instances access to the internet.
49 | resource "aws_route_table_association" "public_subnet" {
50 | subnet_id = "${aws_subnet.public_subnet.id}"
51 | route_table_id = "${aws_route_table.public.id}"
52 | }
53 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/nomadconsul/outputs.tf:
--------------------------------------------------------------------------------
1 | # Outputs
2 | output "primary_server_private_ips" {
3 | value = ["${aws_instance.primary.*.private_ip}"]
4 | }
5 |
6 | output "primary_server_public_ips" {
7 | value = ["${aws_instance.primary.*.public_ip}"]
8 | }
9 |
10 | output "bootstrap_token" {
11 | value = "${data.external.get_bootstrap_token.result["bootstrap_token"]}"
12 | }
13 |
14 | output "client_private_ips" {
15 | value = ["${aws_instance.client.*.private_ip}"]
16 | }
17 |
18 | output "client_public_ips" {
19 | value = ["${aws_instance.client.*.public_ip}"]
20 | }
21 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/nomadconsul/scripts/user-data-client.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
6 | sudo bash /ops/shared/scripts/client.sh "${region}" "${cluster_tag_value}" "${server_ip}"
7 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/nomadconsul/scripts/user-data-server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | exec > >(sudo tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
6 | sudo bash /ops/shared/scripts/server.sh "${server_count}" "${region}" "${cluster_tag_value}"
7 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/modules/nomadconsul/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "ami" {}
3 | variable "server_instance_type" {}
4 | variable "client_instance_type" {}
5 | variable "key_name" {}
6 | variable "server_count" {}
7 | variable "client_count" {}
8 | variable "name_tag_prefix" {}
9 | variable "cluster_tag_value" {}
10 | variable "owner" {}
11 | variable "ttl" {}
12 | variable "vpc_id" {}
13 | variable "subnet_id" {}
14 | variable "private_key_data" {}
15 | variable "route_table_association_id" {}
16 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/aws/outputs.tf:
--------------------------------------------------------------------------------
1 | output "IP_Addresses" {
2 | value = <>/var/log/consul.log 2>&1
20 | end script
21 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/config/nomad.hcl:
--------------------------------------------------------------------------------
1 | data_dir = "/opt/nomad/data"
2 | bind_addr = "IP_ADDRESS"
3 |
4 | # Enable the server
5 | server {
6 | enabled = true
7 | bootstrap_expect = SERVER_COUNT
8 | }
9 |
10 | name = "nomad@IP_ADDRESS"
11 |
12 | consul {
13 | address = "IP_ADDRESS:8500"
14 | }
15 |
16 | telemetry {
17 | publish_allocation_metrics = true
18 | publish_node_metrics = true
19 | }
20 |
21 | acl {
22 | enabled = true
23 | }
24 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/config/nomad_client.hcl:
--------------------------------------------------------------------------------
1 | data_dir = "/opt/nomad/data"
2 | bind_addr = "IP_ADDRESS"
3 | name = "nomad@IP_ADDRESS"
4 |
5 | # Enable the client
6 | client {
7 | enabled = true
8 | options = {
9 | driver.java.enable = "1"
10 | docker.cleanup.image = false
11 | }
12 | }
13 |
14 | consul {
15 | address = "IP_ADDRESS:8500"
16 | }
17 |
18 | telemetry {
19 | publish_allocation_metrics = true
20 | publish_node_metrics = true
21 | }
22 |
23 | acl {
24 | enabled = true
25 | }
26 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/config/nomad_upstart.conf:
--------------------------------------------------------------------------------
1 | description "Nomad"
2 |
3 | start on runlevel [2345]
4 | stop on runlevel [!2345]
5 |
6 | respawn
7 |
8 | console log
9 |
10 | script
11 | if [ -f "/etc/service/nomad" ]; then
12 | . /etc/service/nomad
13 | fi
14 |
15 | exec /usr/local/bin/nomad agent \
16 | -config="/etc/nomad.d/nomad.hcl" \
17 | >>/var/log/nomad.log 2>&1
18 | end script
19 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/jobs/catalogue.nomad:
--------------------------------------------------------------------------------
1 | job "catalogue" {
2 | datacenters = ["dc1"]
3 |
4 | constraint {
5 | attribute = "${attr.kernel.name}"
6 | value = "linux"
7 | }
8 |
9 | update {
10 | stagger = "10s"
11 | max_parallel = 1
12 | }
13 |
14 |
15 | # - catalogue - #
16 | group "catalogue" {
17 | count = 1
18 |
19 | restart {
20 | attempts = 10
21 | interval = "5m"
22 | delay = "25s"
23 | mode = "delay"
24 | }
25 |
26 | # - app - #
27 | task "catalogue" {
28 | driver = "docker"
29 |
30 | config {
31 | image = "rberlind/catalogue:latest"
32 | command = "/app"
33 | args = ["-port", "8080", "-DSN", "catalogue_user:default_password@tcp(127.0.0.1:3306)/socksdb"]
34 | hostname = "catalogue.service.consul"
35 | network_mode = "host"
36 | port_map = {
37 | http = 8080
38 | }
39 | }
40 |
41 | service {
42 | name = "catalogue"
43 | tags = ["app", "catalogue"]
44 | port = "http"
45 | }
46 |
47 | resources {
48 | cpu = 100 # 100 Mhz
49 | memory = 128 # 32MB
50 | network {
51 | mbits = 10
52 | port "http" {
53 | static = 8080
54 | }
55 | }
56 | }
57 | } # - end app - #
58 |
59 | # - db - #
60 | task "cataloguedb" {
61 | driver = "docker"
62 |
63 | config {
64 | image = "rberlind/catalogue-db:latest"
65 | hostname = "catalogue-db.service.consul"
66 | command = "docker-entrypoint.sh"
67 | args = ["mysqld", "--bind-address", "127.0.0.1"]
68 | network_mode = "host"
69 | port_map = {
70 | http = 3306
71 | }
72 | }
73 |
74 | env {
75 | MYSQL_DATABASE = "socksdb"
76 | MYSQL_ALLOW_EMPTY_PASSWORD = "true"
77 | }
78 |
79 | service {
80 | name = "catalogue-db"
81 | tags = ["db", "catalogue", "catalogue-db"]
82 | port = "http"
83 | }
84 |
85 | resources {
86 | cpu = 100 # 100 Mhz
87 | memory = 256 # 256MB
88 | network {
89 | mbits = 10
90 | port "http" {
91 | static = 3306
92 | }
93 | }
94 | }
95 |
96 | } # - end db - #
97 |
98 | } # - end group - #
99 | }
100 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/jobs/sleep.nomad:
--------------------------------------------------------------------------------
1 | job "sleep" {
2 | datacenters = ["dc1"]
3 |
4 | task "sleep" {
5 | driver = "exec"
6 |
7 | config {
8 | command = "/bin/sleep"
9 | args = ["60"]
10 | }
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/jobs/webserver-test.nomad:
--------------------------------------------------------------------------------
1 | job "webserver-test" {
2 | datacenters = ["dc1"]
3 | namespace = "qa"
4 |
5 | constraint {
6 | attribute = "${attr.kernel.name}"
7 | value = "linux"
8 | }
9 |
10 | update {
11 | stagger = "10s"
12 | max_parallel = 1
13 | }
14 |
15 | group "webserver" {
16 | count = 2
17 |
18 | restart {
19 | attempts = 10
20 | interval = "5m"
21 | delay = "25s"
22 | mode = "delay"
23 | }
24 |
25 | # - db - #
26 | task "webserver" {
27 | driver = "docker"
28 |
29 | config {
30 | # "httpd" is not an allowed image
31 | image = "httpd"
32 | port_map = {
33 | http = 80
34 | }
35 | }
36 |
37 | service {
38 | name = "webserver-test"
39 | tags = ["test", "webserver", "qa"]
40 | port = "http"
41 | }
42 |
43 | resources {
44 | cpu = 500 # 500 Mhz
45 | memory = 512 # 512MB
46 | network {
47 | mbits = 10
48 | port "http" {}
49 | }
50 | }
51 | } # - end task - #
52 | } # - end group - #
53 | }
54 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/jobs/website-dev.nomad:
--------------------------------------------------------------------------------
1 | job "website" {
2 | datacenters = ["dc1"]
3 | namespace = "dev"
4 |
5 | constraint {
6 | attribute = "${attr.kernel.name}"
7 | value = "linux"
8 | }
9 |
10 | update {
11 | stagger = "10s"
12 | max_parallel = 1
13 | }
14 |
15 | group "nginx" {
16 | count = 2
17 |
18 | restart {
19 | attempts = 10
20 | interval = "5m"
21 | delay = "25s"
22 | mode = "delay"
23 | }
24 |
25 | # - db - #
26 | task "nginx" {
27 | driver = "docker"
28 |
29 | config {
30 | image = "nginx:1.15.6"
31 | port_map = {
32 | http = 80
33 | }
34 | }
35 |
36 | service {
37 | name = "nginx-dev"
38 | tags = ["web", "nginx", "dev"]
39 | port = "http"
40 | }
41 |
42 | resources {
43 | cpu = 500 # 500 Mhz
44 | memory = 512 # 512MB
45 | network {
46 | mbits = 10
47 | port "http" {}
48 | }
49 | }
50 | } # - end task - #
51 | } # - end group - #
52 |
53 | group "mongodb" {
54 | count = 2
55 |
56 | restart {
57 | attempts = 10
58 | interval = "5m"
59 | delay = "25s"
60 | mode = "delay"
61 | }
62 |
63 | # - db - #
64 | task "mongodb" {
65 | driver = "docker"
66 |
67 | config {
68 | image = "mongo:3.4.3"
69 | port_map = {
70 | http = 27017
71 | }
72 | }
73 |
74 | service {
75 | name = "mongodb-dev"
76 | tags = ["db", "mongodb", "dev"]
77 | port = "http"
78 | }
79 |
80 | resources {
81 | cpu = 500 # 500 Mhz
82 | memory = 512 # 512MB
83 | network {
84 | mbits = 10
85 | port "http" {}
86 | }
87 | }
88 | } # - end task - #
89 | } # - end group - #
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/jobs/website-qa.nomad:
--------------------------------------------------------------------------------
1 | job "website" {
2 | datacenters = ["dc1"]
3 | namespace = "qa"
4 |
5 | constraint {
6 | attribute = "${attr.kernel.name}"
7 | value = "linux"
8 | }
9 |
10 | update {
11 | stagger = "10s"
12 | max_parallel = 1
13 | }
14 |
15 | group "nginx" {
16 | count = 2
17 |
18 | restart {
19 | attempts = 10
20 | interval = "5m"
21 | delay = "25s"
22 | mode = "delay"
23 | }
24 |
25 | # - db - #
26 | task "nginx" {
27 | driver = "docker"
28 |
29 | config {
30 | image = "nginx:1.15.6"
31 | port_map = {
32 | http = 80
33 | }
34 | }
35 |
36 | service {
37 | name = "nginx-qa"
38 | tags = ["web", "nginx", "qa"]
39 | port = "http"
40 | }
41 |
42 | resources {
43 | cpu = 500 # 500 Mhz
44 | memory = 1024 # 1024MB
45 | network {
46 | mbits = 10
47 | port "http" {}
48 | }
49 | }
50 | } # - end task - #
51 | } # - end group - #
52 |
53 | group "mongodb" {
54 | count = 2
55 |
56 | restart {
57 | attempts = 10
58 | interval = "5m"
59 | delay = "25s"
60 | mode = "delay"
61 | }
62 |
63 | # - db - #
64 | task "mongodb" {
65 | driver = "docker"
66 |
67 | config {
68 | image = "mongo:3.4.3"
69 | port_map = {
70 | http = 27017
71 | }
72 | }
73 |
74 | service {
75 | name = "mongodb-qa"
76 | tags = ["db", "mongodb", "qa"]
77 | port = "http"
78 | }
79 |
80 | resources {
81 | cpu = 500 # 500 Mhz
82 | memory = 1024 # 1024MB
83 | network {
84 | mbits = 10
85 | port "http" {}
86 | }
87 | }
88 | } # - end task - #
89 | } # - end group - #
90 |
91 | }
92 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/scripts/client.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CONFIGDIR=/ops/shared/config
6 |
7 | CONSULCONFIGDIR=/etc/consul.d
8 | NOMADCONFIGDIR=/etc/nomad.d
9 | HOME_DIR=ubuntu
10 |
11 | # Wait for network
12 | sleep 15
13 |
14 | IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
15 | REGION=$1
16 | CLUSTER_TAG_VALUE=$2
17 | SERVER_IP=$3
18 |
19 | # Install Java
20 | apt-get update
21 | apt install -y default-jre
22 |
23 | # Consul
24 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul_client.json
25 | sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul_upstart.conf
26 | sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul_upstart.conf
27 | cp $CONFIGDIR/consul_client.json $CONSULCONFIGDIR/consul.json
28 | cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
29 |
30 | service consul start
31 | sleep 10
32 | export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
33 |
34 | # Nomad
35 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad_client.hcl
36 | sed -i "s@VAULT_URL@$VAULT_URL@g" $CONFIGDIR/nomad_client.hcl
37 | cp $CONFIGDIR/nomad_client.hcl $NOMADCONFIGDIR/nomad.hcl
38 | cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
39 |
40 | service nomad start
41 | sleep 10
42 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
43 |
44 | echo "nameserver $IP_ADDRESS" | tee /etc/resolv.conf.new
45 | cat /etc/resolv.conf | tee --append /etc/resolv.conf.new
46 | mv /etc/resolv.conf.new /etc/resolv.conf
47 |
48 | # Add search service.consul at bottom of /etc/resolv.conf
49 | echo "search service.consul" | tee --append /etc/resolv.conf
50 |
51 | # Set env vars for tool CLIs
52 | echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | tee --append /home/$HOME_DIR/.bashrc
53 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | tee --append /home/$HOME_DIR/.bashrc
54 |
55 | # Start Docker
56 | service docker restart
57 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/scripts/server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CONFIGDIR=/ops/shared/config
6 |
7 | CONSULCONFIGDIR=/etc/consul.d
8 | NOMADCONFIGDIR=/etc/nomad.d
9 | HOME_DIR=ubuntu
10 |
11 | # Wait for network
12 | sleep 15
13 |
14 | IP_ADDRESS=$(curl http://instance-data/latest/meta-data/local-ipv4)
15 | SERVER_COUNT=$1
16 | REGION=$2
17 | CLUSTER_TAG_VALUE=$3
18 |
19 | # Consul
20 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul.json
21 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/consul.json
22 | sed -i "s/REGION/$REGION/g" $CONFIGDIR/consul_upstart.conf
23 | sed -i "s/CLUSTER_TAG_VALUE/$CLUSTER_TAG_VALUE/g" $CONFIGDIR/consul_upstart.conf
24 | cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
25 | cp $CONFIGDIR/consul_upstart.conf /etc/init/consul.conf
26 |
27 | service consul start
28 | sleep 10
29 | export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
30 |
31 | # Nomad
32 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad.hcl
33 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/nomad.hcl
34 | cp $CONFIGDIR/nomad.hcl $NOMADCONFIGDIR
35 | cp $CONFIGDIR/nomad_upstart.conf /etc/init/nomad.conf
36 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
37 |
38 | echo "nameserver $IP_ADDRESS" | tee /etc/resolv.conf.new
39 | cat /etc/resolv.conf | tee --append /etc/resolv.conf.new
40 | mv /etc/resolv.conf.new /etc/resolv.conf
41 |
42 | # Add search service.consul at bottom of /etc/resolv.conf
43 | echo "search service.consul" | tee --append /etc/resolv.conf
44 |
45 | # Set env vars for tool CLIs
46 | echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | tee --append /home/$HOME_DIR/.bashrc
47 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | tee --append /home/$HOME_DIR/.bashrc
48 |
49 | # Start Docker
50 | service docker restart
51 |
52 | # Copy Nomad jobs and scripts to desired locations
53 | cp /ops/shared/jobs/* /home/ubuntu/.
54 | chown -R $HOME_DIR:$HOME_DIR /home/$HOME_DIR/
55 | chmod 666 /home/ubuntu/*
56 |
57 | # Start Nomad
58 | service nomad start
59 | sleep 60
60 |
--------------------------------------------------------------------------------
/operations/multi-job-demo/shared/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | cd /ops
5 |
6 | CONFIGDIR=/ops/shared/config
7 |
8 | CONSULVERSION=1.3.0
9 | CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
10 | CONSULCONFIGDIR=/etc/consul.d
11 | CONSULDIR=/opt/consul
12 |
13 | NOMADVERSION=0.8.6
14 | #NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
15 | # Will use S3 for Nomad Enterprise
16 | NOMADDOWNLOAD=s3://hc-enterprise-binaries/nomad-enterprise/${NOMADVERSION}/nomad-enterprise_${NOMADVERSION}+ent_linux_amd64.zip
17 | NOMADCONFIGDIR=/etc/nomad.d
18 | NOMADDIR=/opt/nomad
19 |
20 | # Dependencies
21 | sudo apt-get install -y software-properties-common
22 | sudo apt-get update
23 | sudo apt-get install -y unzip tree redis-tools jq
24 | sudo apt-get install -y upstart-sysv
25 | sudo update-initramfs -u
26 | sudo apt-get install -y awscli
27 |
28 | # Disable the firewall
29 | sudo ufw disable
30 |
31 | # Download Consul
32 | curl -L $CONSULDOWNLOAD > consul.zip
33 |
34 | ## Install Consul
35 | sudo unzip consul.zip -d /usr/local/bin
36 | sudo chmod 0755 /usr/local/bin/consul
37 | sudo chown root:root /usr/local/bin/consul
38 | sudo setcap "cap_net_bind_service=+ep" /usr/local/bin/consul
39 |
40 | ## Configure Consul
41 | sudo mkdir -p $CONSULCONFIGDIR
42 | sudo chmod 755 $CONSULCONFIGDIR
43 | sudo mkdir -p $CONSULDIR
44 | sudo chmod 755 $CONSULDIR
45 |
46 | # Download Nomad
47 | #curl -L $NOMADDOWNLOAD > nomad.zip
48 | # Use S3 for Nomad Enterprise
49 | aws s3 cp --region="us-east-1" s3://hc-enterprise-binaries/nomad-enterprise/0.8.6/nomad-enterprise_0.8.6+ent_linux_amd64.zip nomad.zip
50 |
51 | ## Install Nomad
52 | sudo unzip nomad.zip -d /usr/local/bin
53 | sudo chmod 0755 /usr/local/bin/nomad
54 | sudo chown root:root /usr/local/bin/nomad
55 |
56 | ## Configure Nomad
57 | sudo mkdir -p $NOMADCONFIGDIR
58 | sudo chmod 755 $NOMADCONFIGDIR
59 | sudo mkdir -p $NOMADDIR
60 | sudo chmod 755 $NOMADDIR
61 |
62 | # Docker
63 | sudo apt-get update
64 | sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
65 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
66 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
67 | sudo apt-get update
68 | sudo apt-get install -y docker-ce=17.09.1~ce-0~ubuntu
69 | sudo usermod -aG docker ubuntu
70 |
--------------------------------------------------------------------------------
/operations/provision-nomad/README.md:
--------------------------------------------------------------------------------
1 | # Provision Nomad
2 |
3 | The goal of this guide is to allows users to easily provision a Nomad cluster in just a few short commands.
4 |
5 | ## Reference Material
6 |
7 | - [Terraform](https://www.terraform.io/)
8 | - [Consul](https://www.consul.io/)
9 | - [Nomad](https://www.nomadproject.io/)
10 |
11 | ## Estimated Time to Complete
12 |
13 | 5 minutes.
14 |
15 | ## Personas
16 |
17 | ### Operator
18 |
19 | The operator is responsible for producing the Nomad cluster infrastructure and managing day 1 & 2 operations. This includes initial service administration, upgrades, logging/monitoring, and more.
20 |
21 | ### Developer
22 |
23 | The developer will be consuming the Nomad services and developing against it. This may be leveraging Nomad for mananging Application Deployment, Multi-Cloud applications, or Flexible Workloads.
24 |
25 | ### InfoSec
26 |
27 | Infosec will be creating and managing ACLs for Nomad, this may include both ACLs and Sentinel policies.
28 |
29 | ## Challenge
30 |
31 | There are many different ways to provision and configure a Nomad cluster, making it difficult to get started.
32 |
33 | ## Solution
34 |
35 | Provision a Nomad cluster. This will enable users to easily provision a Nomad cluster for their desired use case.
36 |
37 | ### Dev
38 |
39 | The [Nomad Dev Guides](./dev) are for **educational purposes only**. They're designed to allow you to quickly standup a single instance with Nomad running in `-dev` mode in your desired provider. The single node is provisioned into a single public subnet that's completely open, allowing for easy (and insecure) access to the instance. Because Nomad is running in `-dev` mode, all data is in-memory and not persisted to disk. If any agent fails or the node restarts, all data will be lost. This is in no way, shape, or form meant for Production use, please use with caution.
40 |
41 | ### Quick Start
42 |
43 | The [Nomad Quick Start Guide](./quick-start) provisions a 3 node Nomad server cluster, a 3 node Consul cluster, and a 1 node Nomad client cluster in the provider of your choice.
44 |
45 | The Quick Start guide leverages the scripts in the [Guides Configuration Repo](https://github.com/hashicorp/guides-configuration) to do runtime configuration of Nomad. Although using `curl bash` at runtime is _not_ best practices, this makes it quick and easy to standup a Nomad cluster with no external dependencies like pre-built images. This guide will also forgo setting up TLS/encryption on Nomad for the sake of simplicity.
46 |
47 | ### Best Practices
48 |
49 | The [Nomad Best Practices Guide](./best-practices) provisions a 3 node Nomad cluster with a similar architecture to the [Quick Start](#quick-start) guide in the provider of your choice. The difference is this guide will setup TLS/encryption across Nomad and depends on pre-built images rather than runtime configuration. You can find the Packer templates to create these Nomad images in the [Guides Configuration Repo](https://github.com/hashicorp/guides-configuration/tree/master/vault).
50 |
51 | ## Steps
52 |
53 | We will now provision the Nomad cluster.
54 |
55 | ### Step 1: Choose your Preferred Guide
56 |
57 | `cd` into one of the below guides from the root of the repository and follow the instructions from there.
58 |
59 | - [Vagrant dev](./dev/vagrant-local)
60 | - [AWS dev](./dev/terraform-aws)
61 | - [AWS quick-start](./quick-start/terraform-aws)
62 | - [AWS best-practices](./best-practices/terraform-aws)
63 |
64 | #### CLI
65 |
66 | ```sh
67 | $ cd operations/provision-nomad/dev/vagrant-local
68 | $ cd operations/provision-nomad/dev/terraform-aws
69 | $ cd operations/provision-nomad/quick-start/terraform-aws
70 | $ cd operations/provision-nomad/best-practices/terraform-aws
71 | ```
72 |
73 | ## Next Steps
74 |
75 | Now that you've provisioned and configured Nomad, start walking through the [Nomad Guides](https://www.nomadproject.io/guides/index.html).
76 |
--------------------------------------------------------------------------------
/operations/provision-nomad/best-practices/terraform-aws/README.md:
--------------------------------------------------------------------------------
1 | # Provision a Best Practices Nomad Cluster in AWS
2 |
3 | The goal of this guide is to allows users to easily provision a best practices Nomad & Consul cluster in just a few commands.
4 |
5 | ## Reference Material
6 |
7 | - [Terraform Getting Started](https://www.terraform.io/intro/getting-started/install.html)
8 | - [Terraform Docs](https://www.terraform.io/docs/index.html)
9 | - [Consul Getting Started](https://www.consul.io/intro/getting-started/install.html)
10 | - [Consul Docs](https://www.consul.io/docs/index.html)
11 | - [Nomad Getting Started](https://www.nomadproject.io/intro/getting-started/install.html)
12 | - [Nomad Docs](https://www.nomadproject.io/docs/index.html)
13 |
14 | ## Estimated Time to Complete
15 |
16 | 5 minutes.
17 |
18 | ## Challenge
19 |
20 | There are many different ways to provision and configure an easily accessible best practices Nomad & Consul cluster, making it difficult to get started.
21 |
22 | ## Solution
23 |
24 | Provision a best practices Nomad & Consul cluster in a private network with a bastion host.
25 |
26 | The AWS Best Practices Nomad guide provisions a 3 node Nomad and 3 node Consul cluster with a similar architecture to the [Quick Start](../quick-start) guide. The difference is this guide will setup TLS/encryption across Nomad & Consul and depends on pre-built images rather than runtime configuration. You can find the Packer templates to create the [Consul image](https://github.com/hashicorp/guides-configuration/blob/master/consul/consul-aws.json) and [Nomad image](https://github.com/hashicorp/guides-configuration/blob/master/nomad/nomad-aws.json) in the [Guides Configuration Repo](https://github.com/hashicorp/guides-configuration/).
27 |
28 | ## Prerequisites
29 |
30 | - [Download Terraform](https://www.terraform.io/downloads.html)
31 |
32 | ## Steps
33 |
34 | We will now provision the best practices Nomad cluster.
35 |
36 | ### Step 1: Initialize
37 |
38 | Initialize Terraform - download providers and modules.
39 |
40 | #### CLI
41 |
42 | [`terraform init` Command](https://www.terraform.io/docs/commands/init.html)
43 |
44 | ##### Request
45 |
46 | ```sh
47 | $ terraform init
48 | ```
49 |
50 | ##### Response
51 | ```
52 | ```
53 |
54 | ### Step 2: Plan
55 |
56 | Run a `terraform plan` to ensure Terraform will provision what you expect.
57 |
58 | #### CLI
59 |
60 | [`terraform plan` Command](https://www.terraform.io/docs/commands/plan.html)
61 |
62 | ##### Request
63 |
64 | ```sh
65 | $ terraform plan
66 | ```
67 |
68 | ##### Response
69 | ```
70 | ```
71 |
72 | ### Step 3: Apply
73 |
74 | Run a `terraform apply` to provision the HashiStack. One provisioned, view the `zREADME` instructions output from Terraform for next steps.
75 |
76 | #### CLI
77 |
78 | [`terraform apply` command](https://www.terraform.io/docs/commands/apply.html)
79 |
80 | ##### Request
81 |
82 | ```sh
83 | $ terraform apply
84 | ```
85 |
86 | ##### Response
87 | ```
88 | ```
89 |
90 | ## Next Steps
91 |
92 | Now that you've provisioned and configured a best practices Nomad & Consul cluster, start walking through the [Nomad Guides](https://www.nomadproject.io/guides/index.html).
93 |
--------------------------------------------------------------------------------
/operations/provision-nomad/best-practices/terraform-aws/gitignore.tf:
--------------------------------------------------------------------------------
1 | # `.tf` files that contain the word "gitignore" are ignored
2 | # by git in the `.gitignore` file at the root of this repo.
3 |
4 | # If you have local Terraform configuration that you want
5 | # ignored like Terraform backend configuration, create
6 | # a new file (separate from this one) that contains the
7 | # word "gitignore" (e.g. `backend.gitignore.tf`).
8 |
--------------------------------------------------------------------------------
/operations/provision-nomad/dev/terraform-aws/README.md:
--------------------------------------------------------------------------------
1 | # Provision a Development Nomad Cluster in AWS
2 |
3 | The goal of this guide is to allows users to easily provision a development Nomad cluster in just a few commands.
4 |
5 | ## Reference Material
6 |
7 | - [Terraform Getting Started](https://www.terraform.io/intro/getting-started/install.html)
8 | - [Terraform Docs](https://www.terraform.io/docs/index.html)
9 | - [Nomad Getting Started](https://www.nomadproject.io/intro/getting-started/install.html)
10 | - [Nomad Docs](https://www.nomadproject.io/docs/index.html)
11 |
12 | ## Estimated Time to Complete
13 |
14 | 5 minutes.
15 |
16 | ## Challenge
17 |
18 | There are many different ways to provision and configure an easily accessible development Nomad cluster, making it difficult to get started.
19 |
20 | ## Solution
21 |
22 | Provision a development Nomad cluster in a public subnet open to the world.
23 |
24 | The AWS Development Nomad guide is for **educational purposes only**. It's designed to allow you to quickly standup a single instance with Nomad running in `-dev` mode. The single node is provisioned into a single public subnet that's completely open, allowing for easy (and insecure) access to the instance. Because Nomad is running in `-dev` mode, all data is in-memory and not persisted to disk. If any agent fails or the node restarts, all data will be lost. This is in no way, shape, or form meant for Production use, please use with caution.
25 |
26 | ## Prerequisites
27 |
28 | - [Download Terraform](https://www.terraform.io/downloads.html)
29 |
30 | ## Steps
31 |
32 | We will now provision the development Nomad cluster.
33 |
34 | ### Step 1: Initialize
35 |
36 | Initialize Terraform - download providers and modules.
37 |
38 | #### CLI
39 |
40 | [`terraform init` Command](https://www.terraform.io/docs/commands/init.html)
41 |
42 | ##### Request
43 |
44 | ```sh
45 | $ terraform init
46 | ```
47 |
48 | ##### Response
49 | ```
50 | ```
51 |
52 | ### Step 2: Plan
53 |
54 | Run a `terraform plan` to ensure Terraform will provision what you expect.
55 |
56 | #### CLI
57 |
58 | [`terraform plan` Command](https://www.terraform.io/docs/commands/plan.html)
59 |
60 | ##### Request
61 |
62 | ```sh
63 | $ terraform plan
64 | ```
65 |
66 | ##### Response
67 | ```
68 | ```
69 |
70 | ### Step 3: Apply
71 |
72 | Run a `terraform apply` to provision the HashiStack. One provisioned, view the `zREADME` instructions output from Terraform for next steps.
73 |
74 | #### CLI
75 |
76 | [`terraform apply` command](https://www.terraform.io/docs/commands/apply.html)
77 |
78 | ##### Request
79 |
80 | ```sh
81 | $ terraform apply
82 | ```
83 |
84 | ##### Response
85 | ```
86 | ```
87 |
88 | ## Next Steps
89 |
90 | Now that you've provisioned and configured a development Nomad cluster, start walking through the [Nomad Guides](https://www.nomadproject.io/guides/index.html).
91 |
--------------------------------------------------------------------------------
/operations/provision-nomad/dev/terraform-aws/gitignore.tf:
--------------------------------------------------------------------------------
1 | # `.tf` files that contain the word "gitignore" are ignored
2 | # by git in the `.gitignore` file at the root of this repo.
3 |
4 | # If you have local Terraform configuration that you want
5 | # ignored like Terraform backend configuration, create
6 | # a new file (separate from this one) that contains the
7 | # word "gitignore" (e.g. `backend.gitignore.tf`).
8 |
--------------------------------------------------------------------------------
/operations/provision-nomad/dev/terraform-aws/terraform.auto.tfvars:
--------------------------------------------------------------------------------
1 | # ---------------------------------------------------------------------------------------------------------------------
2 | # General Variables
3 | # ---------------------------------------------------------------------------------------------------------------------
4 | # name = "nomad-dev"
5 | # ami_owner = "099720109477" # Base image owner, defaults to RHEL
6 | # ami_name = "*ubuntu-xenial-16.04-amd64-server-*" # Base image name, defaults to RHEL
7 |
8 | # ---------------------------------------------------------------------------------------------------------------------
9 | # Network Variables
10 | # ---------------------------------------------------------------------------------------------------------------------
11 | # vpc_cidr = "172.19.0.0/16"
12 | # vpc_cidrs_public = ["172.19.0.0/20", "172.19.16.0/20", "172.19.32.0/20",]
13 | # vpc_cidrs_private = ["172.19.48.0/20", "172.19.64.0/20", "172.19.80.0/20",]
14 |
15 | # nat_count = 1 # Defaults to 1
16 | # bastion_servers = 0 # Defaults to 0
17 | # bastion_image_id = "" # AMI ID override, defaults to base RHEL AMI
18 |
19 | # network_tags = {"owner" = "hashicorp", "TTL" = "24"}
20 |
21 | # ---------------------------------------------------------------------------------------------------------------------
22 | # Consul Variables
23 | # ---------------------------------------------------------------------------------------------------------------------
24 | # consul_install = true # Install Consul
25 | # consul_version = "1.2.3" # Consul Version for runtime install, defaults to 1.2.3
26 | # consul_url = "" # Consul Enterprise download URL for runtime install, defaults to Consul OSS
27 |
28 | # consul_config_override = <
12 |
13 | # The policy ensures that every job has a suitable constraint.
14 | # The constraint then ensures that jobs can only be deployed to suitable clients.
15 |
16 | # validate_namespace_isolation function
17 | validate_namespace_isolation = func() {
18 | validated = false
19 |
20 | # Check constraints until we find one with attribute (l_target) set to
21 | # ${meta.namespace}. Then verify that the constraint's value (r_target) is
22 | # set to the job's namespace.
23 | for job.constraints as c {
24 | if c.l_target is "${meta.namespace}" and c.r_target is job.namespace {
25 | validated = true
26 | break
27 | }
28 | }
29 |
30 | # Print violation message if a suitable constraint was not found
31 | if not validated {
32 | print("You tried to run a job in the", job.namespace, "namespace.")
33 | print("Each job must include a constraint with attribute set to",
34 | "${meta.namespace} and value set to the namespace of the job.")
35 | }
36 |
37 | return validated
38 | }
39 |
40 | # Call the validate_namespace_isolation function
41 | validated = validate_namespace_isolation()
42 |
43 | # Main rule
44 | main = rule {
45 | validated
46 | }
47 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/enforce_multi_dc.sentinel:
--------------------------------------------------------------------------------
1 | main = rule { enforce_multi_dc }
2 |
3 | enforce_multi_dc = rule {
4 | length(job.datacenters) > 1
5 | }
6 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/policy_per_namespace.sentinel:
--------------------------------------------------------------------------------
1 | main = rule { task_users() }
2 | # Check that the namespace is using the correct user
3 | task_users = func() {
4 | ns = job.namespace
5 | for job.task_groups as tg {
6 | for tg.tasks as task {
7 | user = task.user
8 | if user is "" {
9 | continue
10 | }
11 | if ns == "groupA" and user != "ubuntu" {
12 | return false
13 | } else if ns == "groupB" and user != "nobody" {
14 | return false
15 | }
16 | }
17 | }
18 | return true
19 | }
20 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/prevent-docker-host-network.sentinel:
--------------------------------------------------------------------------------
1 | # Prevent Docker containers from running with host network mode
2 |
3 | # prevent_host_network rule
4 | prevent_host_network = rule {
5 | all job.task_groups as tg {
6 | all tg.tasks as task {
7 | (task.config.network_mode is not "host") else true
8 | }
9 | }
10 | }
11 |
12 | # Main rule
13 | main = rule {
14 | prevent_host_network
15 | }
16 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/require-docker-digests.sentinel:
--------------------------------------------------------------------------------
1 | # Policy to enforce all images have digests except for listed exceptions
2 | # This requires explicit digest to be used instead of standard tags
3 | # even if the standard tag corresponds to a signed image. This protects against
4 | # updates to existing standard tags since the digest would change.
5 |
6 | # If you want all Docker images to be signed, instead of using this Sentinel
7 | # policy, just set the environment variable DOCKER_CONTENT_TRUST to 1
8 | # for the Docker client on all Nomad clients.
9 |
10 | # Standard strings import
11 | import "strings"
12 |
13 | # Exception Docker images (that do not have to be signed)
14 | exception_images = [
15 | "nginx",
16 | ]
17 |
18 | restrict_images = func(exceptions) {
19 |
20 | # Initialize validated boolean
21 | validated = true
22 |
23 | # Iterate over all task groups and tasks
24 | for job.task_groups as tg {
25 | for tg.tasks as task {
26 | if task.driver is "docker" {
27 | # image specified for the task
28 | full_image = task.config.image
29 | split_image = strings.split(full_image, "@")
30 |
31 | # Check if there was an explicit digest tag
32 | # That will be the case is split_image has 2 elements.
33 | if length(split_image) < 2 {
34 | # There was no digest, but we have to parse out image name
35 | base_image = split_image[0]
36 | # Strip "https://" if present
37 | base_image_no_protocol = strings.trim_prefix(base_image, "https://")
38 | # Strip "http://" if present
39 | base_image_no_protocol = strings.trim_prefix(base_image, "http://")
40 | # Strip off tag
41 | split_base_image = strings.split(base_image_no_protocol, "/")
42 | image_with_tag = split_base_image[length(split_base_image) - 1]
43 | image_without_tag = strings.split(image_with_tag, ":")[0]
44 |
45 | # See if image name in exceptions
46 | if image_without_tag not in exceptions {
47 | print("Docker image", full_image, "did not have a digest and was",
48 | "not in the list of exception images", exceptions)
49 | validated = false
50 | }
51 |
52 | } // end digest check
53 | } // end if docker driver
54 | } // end for tasks
55 | } // end for task groups
56 |
57 | return validated
58 |
59 | }
60 |
61 | # Main rule
62 | main = rule {
63 | restrict_images(exception_images)
64 | }
65 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/resource_check.sentinel:
--------------------------------------------------------------------------------
1 | import "units"
2 |
3 | resource_check = func(task_groups, resource) {
4 | result = 0
5 | for task_groups as g {
6 | for g.tasks as t {
7 | result = result + t.resources[resource] * g.count
8 | }
9 | }
10 | return result
11 | }
12 |
13 | main = rule {
14 | resource_check(job.task_groups, "cpu") <= 1500 and
15 | resource_check(job.task_groups, "memory_mb") <= 2500
16 | }
17 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/restrict-docker-images-and-prevent-latest-tag.sentinel:
--------------------------------------------------------------------------------
1 | # This policy restricts which Docker images are allowed and also prevents use of
2 | # the "latest" tag since the image must specify a tag that starts with a number.
3 |
4 | # Allowed Docker images
5 | allowed_images = [
6 | "nginx",
7 | "mongo",
8 | ]
9 |
10 | # Restrict allowed Docker images
11 | restrict_images = rule {
12 | all job.task_groups as tg {
13 | all tg.tasks as task {
14 | any allowed_images as allowed {
15 | # Note that we require ":" and a tag after it
16 | # which must start with a number, preventing "latest"
17 | task.config.image matches allowed + ":[0-9](.*)"
18 | }
19 | }
20 | }
21 | }
22 |
23 | # Main rule
24 | main = rule {
25 | restrict_images
26 | }
27 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/restrict_batch_deploy_time.sentinel:
--------------------------------------------------------------------------------
1 | import "time"
2 |
3 | batch_job = rule {
4 | job.type is "batch"
5 | }
6 |
7 | is_weekday = rule { time.day not in ["saturday", "sunday"] }
8 | is_open_hours = rule { time.hour > 8 and time.hour < 16 }
9 |
10 | main = rule { is_open_hours and is_weekday and batch_job }
11 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/restrict_docker_images.sentinel:
--------------------------------------------------------------------------------
1 | main = rule { all_drivers_docker and allowed_docker_images }
2 |
3 | allowed_images = [
4 | "https://hub.docker.internal/",
5 | "https://hub-test.docker.internal/",
6 | "redis",
7 | ]
8 |
9 | all_drivers_docker = rule {
10 | all job.task_groups as tg {
11 | all tg.tasks as task {
12 | task.driver is "docker"
13 | }
14 | }
15 | }
16 |
17 | allowed_docker_images = rule {
18 | all job.task_groups as tg {
19 | all tg.tasks as task {
20 | any allowed_images as allowed {
21 | task.config.image matches allowed
22 | }
23 | }
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/operations/sentinel/sentinel_policies/restrict_namespace_to_dc.sentinel:
--------------------------------------------------------------------------------
1 | main = rule { task_datacenter() }
2 | task_datacenter = func() {
3 | allowed_datacenter = "dc1"
4 | ns = job.namespace
5 | for job.datacenters as datacenter {
6 | if ns == "unit1" and datacenter in allowed_datacenter {
7 | return true
8 | } else {
9 | return false
10 | }
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/provision/vagrant/README.md:
--------------------------------------------------------------------------------
1 | # Vagrant: Nomad Cluster (Single Vault server on node3)
2 | Spins up 3 virtual machines with Nomad installed as both Client and Server mode. Node3 also has Vault installed to show Nomad-Vault integration as well as MySQL server. WARNING: Nomad severs are configured with the root-token. A Nomad token role should be used in production as shown here: https://www.nomadproject.io/docs/vault-integration/index.html.
3 |
4 | # Usage
5 | If you would like to use the enterprise binaries download and place the unzipped binaries in the root directory of nomad-guides
6 |
7 | ```bash
8 | $ pwd
9 | /Users/andrewklaas/hashicorp/nomad-guides
10 | $ ls -l
11 | application-deployment
12 | consul #consul enterprise binary
13 | multi-cloud
14 | nomad #nomad enterprise binary
15 | operations
16 | provision
17 | shared
18 | vault #vault enterprise binary
19 | workload-flexibility
20 | ```
21 |
22 | 1. Run `vagrant up`
23 | ```bash
24 | $ vagrant up
25 | . . .
26 | . . . Vagrant running . . .
27 | . . .
28 | ==> node1: Nomad has been provisioned and is available at the following web address:
29 | ==> node1: http://localhost:4646/ui/ <<---- Primary Nomad UI (node1)
30 | ==> node1: Nomad has Consul storage backend with web UI available at the following web address:
31 | ==> node1: http://localhost:8500/ui/ <<---- Primary Consul UI (node1)
32 | ==> node1: Primary Vault node has been provisioned and is available at the following web address:
33 | ==> node1: http://localhost:8200/ui/ <<---- Primary Vault UI (node3)
34 | ==> node1:
35 | ==> node1: Nomad node2 has been provisioned and is available at the following web address:
36 | ==> node1: http://localhost:5646/ui/ <<---- Nomad UI (node2)
37 | ==> node1: Nomad node3 has been provisioned and is available at the following web address:
38 | ==> node1: http://localhost:6646/ui/ <<---- Nomad UI (node3)
39 |
40 |
41 | ```
42 |
43 | 2. Ssh into one of the nodes (Vault is running on Node3)
44 | ```bash
45 | vagrant ssh node1
46 | ```
47 |
48 | 3. Generate Nomad's bootstrap token
49 | ```bash
50 | vagrant@node1:~$ nomad acl bootstrap
51 | Accessor ID = b1c4417e-411c-0fd8-4c29-7a51fc89ec79
52 | Secret ID = e133205b-439c-f67d-7b58-96a87d68d7b2
53 | Name = Bootstrap Token
54 | Type = management
55 | Global = true
56 | Policies = n/a
57 | Create Time = 2017-12-19 18:33:53.024032646 +0000 UTC
58 | Create Index = 23
59 | Modify Index = 23
60 |
61 | vagrant@node1:~$ export NOMAD_TOKEN=e133205b-439c-f67d-7b58-96a87d68d7b2
62 | ```
63 |
64 | 4. Check Nomad cluster health
65 | ```bash
66 | vagrant@node1:~$ nomad server-members
67 | Name Address Port Status Leader Protocol Build Datacenter Region
68 | node1.global 192.168.50.150 4648 alive false 2 0.7.0+ent dc1 global
69 | node2.global 192.168.50.151 4648 alive false 2 0.7.0+ent dc1 global
70 | node3.global 192.168.50.152 4648 alive true 2 0.7.0+ent dc1 global
71 | vagrant@node1:~$ nomad node-status
72 | ID DC Name Class Drain Status
73 | 5ba5d3c6 dc1 node2 false ready
74 | fb792a08 dc1 node3 false ready
75 | 1a3bf4ca dc1 node1 false ready
76 | ```
77 |
78 | 5. If you want to use Vault from the CLI, Grab the root-token from Consul (NOT BEST PRACTICE: FOR DEMO USE ONLY)
79 | ```bash
80 | vagrant@node1:~$ consul kv get service/vault/root-token
81 | 6389c4e7-9f0a-f5f2-9c71-d5cec294c99a
82 |
83 | vagrant@node1:~$ export VAULT_TOKEN=6389c4e7-9f0a-f5f2-9c71-d5cec294c99a
84 |
85 | vagrant@node1:~$ vault status
86 | Type: shamir
87 | Sealed: false
88 | Key Shares: 5
89 | Key Threshold: 3
90 | Unseal Progress: 0
91 | Unseal Nonce:
92 | Version: 0.9.0.1+ent
93 | Cluster Name: vault-cluster-4a931870
94 | Cluster ID: 955835ed-dc1d-004f-c4ee-4637384e21ff
95 |
96 | High-Availability Enabled: true
97 | Mode: active
98 | Leader Cluster Address: https://192.168.50.152:8201
99 |
100 | ```
101 |
--------------------------------------------------------------------------------
/provision/vagrant/vault_init_and_unseal.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | set -v
4 | set -x
5 |
6 | export VAULT_ADDR=http://127.0.0.1:8200
7 | cget() { curl -sf "http://127.0.0.1:8500/v1/kv/service/vault/$1?raw"; }
8 |
9 | if [ ! $(cget root-token) ]; then
10 | logger "$0 - Initializing Vault"
11 |
12 | curl \
13 | --silent \
14 | --request PUT \
15 | --data '{"secret_shares": 1, "secret_threshold": 1}' \
16 | ${VAULT_ADDR}/v1/sys/init | tee \
17 | >(jq -r .root_token > /tmp/root-token) \
18 | >(jq -r .keys[0] > /tmp/unseal-key)
19 |
20 | curl -sfX PUT 127.0.0.1:8500/v1/kv/service/vault/unseal-key -d $(cat /tmp/unseal-key)
21 | curl -sfX PUT 127.0.0.1:8500/v1/kv/service/vault/root-token -d $(cat /tmp/root-token)
22 |
23 | vault operator unseal $(cget unseal-key)
24 |
25 | export ROOT_TOKEN=$(cget root-token)
26 |
27 | echo "Remove master keys from disk"
28 |
29 | else
30 | logger "$0 - Vault already initialized"
31 | fi
32 |
33 | logger "$0 - Unsealing Vault"
34 | vault operator unseal $(cget unseal-key)
35 |
36 | export ROOT_TOKEN=$(cget root-token)
37 | vault auth $ROOT_TOKEN
38 |
39 | #Create admin user
40 | echo '
41 | path "*" {
42 | capabilities = ["create", "read", "update", "delete", "list", "sudo"]
43 | }' | vault policy-write vault-admin -
44 | vault auth-enable userpass
45 | vault write auth/userpass/users/vault password=vault policies=vault-admin
46 |
47 | vault mount database
48 | vault write database/config/mysql \
49 | plugin_name=mysql-legacy-database-plugin \
50 | connection_url="vaultadmin:vaultadminpassword@tcp(192.168.50.152:3306)/" \
51 | allowed_roles="readonly"
52 |
53 | vault write database/roles/readonly \
54 | db_name=mysql \
55 | creation_statements="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';" \
56 | default_ttl="30m" \
57 | max_ttl="24h"
58 |
59 | vault mount mysql
60 | vault write mysql/config/connection \
61 | connection_url="vaultadmin:vaultadminpassword@tcp(192.168.50.152:3306)/"
62 |
63 | vault write mysql/roles/app \
64 | sql="CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';GRANT SELECT ON *.* TO '{{name}}'@'%';"
65 |
66 | logger "$0 - Vault setup complete"
67 |
68 | vault status
69 |
--------------------------------------------------------------------------------
/provision/vagrant/vault_nomad_integration.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | set -v
4 | set -x
5 |
6 | export VAULT_ADDR=http://127.0.0.1:8200
7 | cget() { curl -sf "http://127.0.0.1:8500/v1/kv/service/vault/$1?raw"; }
8 |
9 | if [ $(cget root-token) ]; then
10 | export ROOT_TOKEN=$(cget root-token)
11 | else
12 | exit
13 | fi
14 |
15 | vault auth $ROOT_TOKEN
16 |
17 | echo '
18 | # Allow creating tokens under "nomad-cluster" token role. The token role name
19 | # should be updated if "nomad-cluster" is not used.
20 | path "auth/token/create/nomad-cluster" {
21 | capabilities = ["update"]
22 | }
23 | # Allow looking up "nomad-cluster" token role. The token role name should be
24 | # updated if "nomad-cluster" is not used.
25 | path "auth/token/roles/nomad-cluster" {
26 | capabilities = ["read"]
27 | }
28 | # Allow looking up the token passed to Nomad to validate # the token has the
29 | # proper capabilities. This is provided by the "default" policy.
30 | path "auth/token/lookup-self" {
31 | capabilities = ["read"]
32 | }
33 | # Allow looking up incoming tokens to validate they have permissions to access
34 | # the tokens they are requesting. This is only required if
35 | # `allow_unauthenticated` is set to false.
36 | path "auth/token/lookup" {
37 | capabilities = ["update"]
38 | }
39 | # Allow revoking tokens that should no longer exist. This allows revoking
40 | # tokens for dead tasks.
41 | path "auth/token/revoke-accessor" {
42 | capabilities = ["update"]
43 | }
44 | # Allow checking the capabilities of our own token. This is used to validate the
45 | # token upon startup.
46 | path "sys/capabilities-self" {
47 | capabilities = ["update"]
48 | }
49 | # Allow our own token to be renewed.
50 | path "auth/token/renew-self" {
51 | capabilities = ["update"]
52 | }' | vault policy-write nomad-server -
53 |
54 | echo '
55 | {
56 | "disallowed_policies": "nomad-server",
57 | "explicit_max_ttl": 0,
58 | "name": "nomad-cluster",
59 | "orphan": false,
60 | "period": 259200,
61 | "renewable": true
62 | }' | vault write /auth/token/roles/nomad-cluster -
63 |
64 | NOMAD_TOKEN=$(vault token-create -policy nomad-server -period 72h -orphan | awk 'FNR == 3 {print$2}')
65 |
66 | curl -sfX PUT 127.0.0.1:8500/v1/kv/service/vault/nomad-token -d $NOMAD_TOKEN
--------------------------------------------------------------------------------
/workload-flexibility/README.md:
--------------------------------------------------------------------------------
1 | # To be implemented
2 |
--------------------------------------------------------------------------------