├── .github └── workflows │ ├── ci-only.yaml │ └── release.yaml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── demo ├── .gitignore ├── .terraform.lock.hcl ├── image │ ├── main.tf │ ├── packer │ │ ├── do-packer.pkr.hcl │ │ └── scripts │ │ │ ├── client.sh │ │ │ ├── server.sh │ │ │ └── setup.sh │ └── variable.tf ├── infrastructure │ ├── main.tf │ ├── outputs.tf │ ├── templates │ │ ├── client.sh │ │ └── server.sh │ └── variables.tf ├── jobs │ ├── files │ │ └── grafana_dashboard.json │ ├── main.tf │ ├── templates │ │ ├── autoscaler.nomad │ │ ├── batch.nomad │ │ ├── grafana.nomad │ │ ├── prometheus.nomad │ │ └── traefik.nomad │ └── variables.tf ├── main.tf ├── outputs.tf ├── terraform.tfvars.sample └── variables.tf ├── go.mod ├── go.sum ├── main.go ├── plugin ├── digitalocean.go ├── plugin.go ├── plugin_test.go ├── retry.go ├── retry_test.go ├── shutdown.go └── wait.go └── scripts └── dist.sh /.github/workflows/ci-only.yaml: -------------------------------------------------------------------------------- 1 | name: ci-only 2 | 3 | on: 4 | push: 5 | branches: [ '*' ] 6 | pull_request: 7 | branches: [ '*' ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/setup-go@v2 14 | with: 15 | go-version: ^1.16 16 | 17 | - uses: actions/checkout@v2.3.4 18 | with: 19 | fetch-depth: 1 20 | 21 | - uses: actions/cache@v2 22 | with: 23 | path: ~/go/pkg/mod 24 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 25 | restore-keys: | 26 | ${{ runner.os }}-go- 27 | 28 | - run: make test build 29 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | release: 5 | types: [ created ] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/setup-go@v2 12 | with: 13 | go-version: ^1.16 14 | 15 | - uses: actions/checkout@v2.3.4 16 | with: 17 | fetch-depth: 1 18 | 19 | - run: make test dist 20 | 21 | - uses: skx/github-action-publish-binaries@master 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 24 | with: 25 | args: "./dist/*" 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | dist 3 | nomad-droplets-autoscaler 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Johan Siebens 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := bash 2 | LDFLAGS := "-s -w" 3 | .PHONY: all 4 | 5 | .PHONY: %.zip 6 | %.zip: 7 | touch $@ 8 | 9 | .PHONY: test 10 | test: 11 | go test ./... 12 | 13 | .PHONY: build 14 | build: 15 | CGO_ENABLED=0 go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o dist/do-droplets 16 | 17 | .PHONY: dist 18 | dist: 19 | mkdir -p dist 20 | ./scripts/dist.sh linux amd64 21 | ./scripts/dist.sh linux arm64 22 | ./scripts/dist.sh linux arm 23 | ./scripts/dist.sh darwin amd64 24 | ./scripts/dist.sh windows amd64 25 | cd dist && shasum -a 256 *.zip > do-droplets_SHA256SUMS && cd .. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nomad DigitalOcean Droplets Autoscaler 2 | 3 | The `do-droplets` target plugin allows for the scaling of the Nomad cluster clients via creating and 4 | destroying [DigitalOcean Droplets](https://www.digitalocean.com/products/droplets/). 5 | 6 | ## Requirements 7 | 8 | * nomad autoscaler 0.3.0+ 9 | * DigitalOcean account 10 | 11 | ## Documentation 12 | 13 | ### Agent Configuration Options 14 | 15 | To use the `do-droplets` target plugin, the agent configuration needs to be populated with the appropriate target block. 16 | Currently, Personal Access Token (PAT) is the only method of authenticating with the API. You can manage your tokens at the DigitalOcean Control Panel [Applications Page](https://cloud.digitalocean.com/settings/applications). 17 | 18 | ``` 19 | target "do-droplets" { 20 | driver = "do-droplets" 21 | config = { 22 | token = "local/token" 23 | } 24 | } 25 | ``` 26 | 27 | - `token` `(string: "")` - a DigitalOcean API token or a path to a file containing a token. Alternatively, this can also be specified using environment variables ordered by precedence: 28 | - `DIGITALOCEAN_TOKEN` 29 | - `DIGITALOCEAN_ACCESS_TOKEN` 30 | 31 | ### Policy Configuration Options 32 | 33 | ``` hcl 34 | check "hashistack-allocated-cpu" { 35 | # ... 36 | target "do-droplets" { 37 | name = "hashi-worker" 38 | region = "nyc1" 39 | size = "s-1vcpu-1gb" 40 | snapshot_id = 84589509 41 | user_data = "local/hashi-worker-user-data.sh" 42 | tags = "hashi-stack" 43 | node_class = "hashistack" 44 | node_drain_deadline = "5m" 45 | node_purge = "true" 46 | } 47 | # ... 48 | } 49 | ``` 50 | - `name` `(string: )` - A logical name of a Droplet "group". Every managed Droplet will be tagged with this value and its name is this value with a random suffix 51 | 52 | - `region` `(string: )` - The region to start in. 53 | 54 | - `vpc_uuid` `(string: )` - The ID of the VPC where the Droplet will be located. 55 | 56 | - `size` `(string: )` - The unique slug that indentifies the type of Droplet. You can find a list of available slugs on [DigitalOcean API documentation](https://developers.digitalocean.com/documentation/v2/#list-all-sizes). 57 | 58 | - `snapshot_id` `(string: )` - The Droplet image ID. 59 | 60 | - `user_data` `(string: "")` - A string of the desired User Data for the Droplet or a path to a file containing the User Data 61 | 62 | - `ssh_keys` `(string: "")` - A comma-separated list of SSH fingerprints to enable 63 | 64 | - `tags` `(string: "")` - A comma-separated list of additional tags to be applied to the Droplets. 65 | 66 | - `datacenter` `(string: "")` - The Nomad client [datacenter](https://www.nomadproject.io/docs/configuration#datacenter) 67 | identifier used to group nodes into a pool of resource. Conflicts with 68 | `node_class`. 69 | 70 | - `node_class` `(string: "")` - The Nomad [client node class](https://www.nomadproject.io/docs/configuration/client#node_class) 71 | identifier used to group nodes into a pool of resource. Conflicts with 72 | `datacenter`. 73 | 74 | - `node_drain_deadline` `(duration: "15m")` The Nomad [drain deadline](https://www.nomadproject.io/api-docs/nodes#deadline) to use when performing node draining 75 | actions. **Note that the default value for this setting differs from Nomad's 76 | default of 1h.** 77 | 78 | - `node_drain_ignore_system_jobs` `(bool: "false")` A boolean flag used to 79 | control if system jobs should be stopped when performing node draining 80 | actions. 81 | 82 | - `node_purge` `(bool: "false")` A boolean flag to determine whether Nomad 83 | clients should be [purged](https://www.nomadproject.io/api-docs/nodes#purge-node) when performing scale in 84 | actions. 85 | 86 | - `node_selector_strategy` `(string: "least_busy")` The strategy to use when 87 | selecting nodes for termination. Refer to the [node selector 88 | strategy](https://www.nomadproject.io/docs/autoscaling/internals/node-selector-strategy) documentation for more information. -------------------------------------------------------------------------------- /demo/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | terraform.tfvars 3 | terraform.tfstate 4 | terraform.tfstate.backup -------------------------------------------------------------------------------- /demo/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/digitalocean/digitalocean" { 5 | version = "2.8.0" 6 | constraints = "2.8.0" 7 | hashes = [ 8 | "h1:xgnxZ+FhFn+okWjp4gmr6cpDzpWkMIwi0LQT8zJ83Sc=", 9 | "zh:01ef183926274cf27b83cf3ac15fbf449a192527536cea31eb7f07854af650df", 10 | "zh:0f44cbc4c5884581104a26bc11bdebb4bb57860c9828470315562c2e23906565", 11 | "zh:2090e977047aa1fac3c6c102295c0836e87ad1ec4ca2db22882e7a4ce6217a56", 12 | "zh:3e7d338fe44918ba66465b7bc448560f4dd677bdc3af7737b78110b7e4519526", 13 | "zh:3fe03db92754ac9825f08741a1f1e24ed9446e24c067cf73198d94f0d167c056", 14 | "zh:813523fbb26c3cc5ac7346e13b9b09dc50b2794d4f6e17f2e2bc6bd301e822b5", 15 | "zh:87b96042f59271c2ada41d51ae8e3b4c4ce79b2712f6582ab7124bbbf38b8b05", 16 | "zh:a30204152076af9654016d177a7035a272665f0ca31ce075031d089b85e5605d", 17 | "zh:b02a427393c0d67b14258dc09f46166163e019a7c539895135d9b35046a868c8", 18 | "zh:b6e7df534961ef1f1fcbca95ed50a88aba960dabd5932ed1a0afa6f8bce40d7d", 19 | "zh:caa0cff23e6ce214d75be2104dff87cfbea3e6e4046f5c046bd79e8fa6be88ab", 20 | "zh:d8145f2219aa3c2ac146b795870b0d40702ca17089e07ab1a2579d6d42e45364", 21 | "zh:e520edb92880ff731ba8db16a8f3dbf01e28a60d0028619e31aa559da9992892", 22 | "zh:ff45eb42b58a1228395b81bb15d8fc1e8d8d66b6c3c36139752cd654be9b83da", 23 | "zh:ff9c6f62808c25fb7bd85d25b794aac686304a9d5e370b6b6970bb1b0c30033c", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/hashicorp/local" { 28 | version = "2.1.0" 29 | hashes = [ 30 | "h1:EYZdckuGU3n6APs97nS2LxZm3dDtGqyM4qaIvsmac8o=", 31 | "zh:0f1ec65101fa35050978d483d6e8916664b7556800348456ff3d09454ac1eae2", 32 | "zh:36e42ac19f5d68467aacf07e6adcf83c7486f2e5b5f4339e9671f68525fc87ab", 33 | "zh:6db9db2a1819e77b1642ec3b5e95042b202aee8151a0256d289f2e141bf3ceb3", 34 | "zh:719dfd97bb9ddce99f7d741260b8ece2682b363735c764cac83303f02386075a", 35 | "zh:7598bb86e0378fd97eaa04638c1a4c75f960f62f69d3662e6d80ffa5a89847fe", 36 | "zh:ad0a188b52517fec9eca393f1e2c9daea362b33ae2eb38a857b6b09949a727c1", 37 | "zh:c46846c8df66a13fee6eff7dc5d528a7f868ae0dcf92d79deaac73cc297ed20c", 38 | "zh:dc1a20a2eec12095d04bf6da5321f535351a594a636912361db20eb2a707ccc4", 39 | "zh:e57ab4771a9d999401f6badd8b018558357d3cbdf3d33cc0c4f83e818ca8e94b", 40 | "zh:ebdcde208072b4b0f8d305ebf2bfdc62c926e0717599dcf8ec2fd8c5845031c3", 41 | "zh:ef34c52b68933bedd0868a13ccfd59ff1c820f299760b3c02e008dc95e2ece91", 42 | ] 43 | } 44 | 45 | provider "registry.terraform.io/hashicorp/nomad" { 46 | version = "1.4.15" 47 | hashes = [ 48 | "h1:4H/rReWAv3a3N2etfZswKz1dIeMiPru6cIrBqnuuIsc=", 49 | "zh:0c8d6ea88571113f5c886566ee2220ee78fe6537d9f96bc107fb39d45de5cba5", 50 | "zh:412f4648e6989a1c4d5eb27ec072317bd5a6e401ca2109f9869ac4f4ab49ff2f", 51 | "zh:4351ec9274264aa081f5cc07e8b19e4b40c81a9d23d7f4ddd88dd63988ffe311", 52 | "zh:44143738cbdb7defaf180cb59cba3d6ece7e3e2565bd3c8c9fa9b75d38fb53ca", 53 | "zh:5a1485b482d9602759749a97847d6f81d96499c2bb53c69596c2465109d43197", 54 | "zh:5f71922db49674b3263f79252d51fb16a69b45ff0c22079d7a1210cc479c27e5", 55 | "zh:626bd4fb8eab5abc55014190fa8bd877bb04a58c186920f4bf2d2cc625c22e84", 56 | "zh:865f198f8e710955f23d7d75f95cb20510574d7931c71b5e8508b785ed52ea3d", 57 | "zh:924120d03bb25c2c8120507b3e95341a42bab62a6cd5866dbd2190c5b336475f", 58 | "zh:95b58368e11c8511a8bc88dd470cae3cc60e397dd39581211f1de23d12552990", 59 | "zh:b29ebbb0045dd4ba919728cafb509424d5465605fdef4f67171e22033c2cf921", 60 | ] 61 | } 62 | 63 | provider "registry.terraform.io/hashicorp/null" { 64 | version = "3.1.0" 65 | hashes = [ 66 | "h1:vpC6bgUQoJ0znqIKVFevOdq+YQw42bRq0u+H3nto8nA=", 67 | "zh:02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2", 68 | "zh:53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515", 69 | "zh:5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521", 70 | "zh:9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2", 71 | "zh:a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e", 72 | "zh:a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53", 73 | "zh:c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d", 74 | "zh:cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8", 75 | "zh:e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70", 76 | "zh:fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b", 77 | "zh:fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e", 78 | ] 79 | } 80 | 81 | provider "registry.terraform.io/hashicorp/random" { 82 | version = "3.1.0" 83 | hashes = [ 84 | "h1:BZMEPucF+pbu9gsPk0G0BHx7YP04+tKdq2MrRDF1EDM=", 85 | "zh:2bbb3339f0643b5daa07480ef4397bd23a79963cc364cdfbb4e86354cb7725bc", 86 | "zh:3cd456047805bf639fbf2c761b1848880ea703a054f76db51852008b11008626", 87 | "zh:4f251b0eda5bb5e3dc26ea4400dba200018213654b69b4a5f96abee815b4f5ff", 88 | "zh:7011332745ea061e517fe1319bd6c75054a314155cb2c1199a5b01fe1889a7e2", 89 | "zh:738ed82858317ccc246691c8b85995bc125ac3b4143043219bd0437adc56c992", 90 | "zh:7dbe52fac7bb21227acd7529b487511c91f4107db9cc4414f50d04ffc3cab427", 91 | "zh:a3a9251fb15f93e4cfc1789800fc2d7414bbc18944ad4c5c98f466e6477c42bc", 92 | "zh:a543ec1a3a8c20635cf374110bd2f87c07374cf2c50617eee2c669b3ceeeaa9f", 93 | "zh:d9ab41d556a48bd7059f0810cf020500635bfc696c9fc3adab5ea8915c1d886b", 94 | "zh:d9e13427a7d011dbd654e591b0337e6074eef8c3b9bb11b2e39eaaf257044fd7", 95 | "zh:f7605bd1437752114baf601bdf6931debe6dc6bfe3006eb7e9bb9080931dca8a", 96 | ] 97 | } 98 | -------------------------------------------------------------------------------- /demo/image/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | digitalocean = { 4 | source = "digitalocean/digitalocean" 5 | version = "2.8.0" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | build_image = var.image == "" 12 | image = local.build_image ? data.digitalocean_image.built[0] : data.digitalocean_image.existing[0] 13 | } 14 | 15 | resource "random_pet" "name" { 16 | count = local.build_image ? 1 : 0 17 | } 18 | 19 | resource "null_resource" "packer_build" { 20 | count = local.build_image ? 1 : 0 21 | 22 | provisioner "local-exec" { 23 | command = </dev/null </dev/null </dev/null; do echo waiting ...; sleep 1; done' 7 | 8 | # Disable interactive apt prompts 9 | export DEBIAN_FRONTEND=noninteractive 10 | echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections 11 | 12 | apt-get update 13 | apt-get install -y dnsmasq curl unzip docker.io 14 | 15 | mkdir -p /opt/cni/bin 16 | curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz | tar -xvz -C /opt/cni/bin 17 | 18 | curl -sL get.hashi-up.dev | sh 19 | 20 | hashi-up consul install \ 21 | --version 1.9.5 \ 22 | --local \ 23 | --skip-enable 24 | 25 | hashi-up nomad install \ 26 | --version 1.1.0 \ 27 | --local \ 28 | --skip-enable 29 | 30 | echo "server=/consul/127.0.0.1#8600" > /etc/dnsmasq.d/10-consul 31 | echo "server=8.8.8.8" > /etc/dnsmasq.d/99-default 32 | 33 | systemctl disable systemd-resolved.service 34 | systemctl stop systemd-resolved 35 | rm /etc/resolv.conf 36 | 37 | echo 'debconf debconf/frontend select Dialog' | sudo debconf-set-selections -------------------------------------------------------------------------------- /demo/image/variable.tf: -------------------------------------------------------------------------------- 1 | variable "do_token" {} 2 | 3 | variable "region" { 4 | type = string 5 | } 6 | 7 | variable "image" { 8 | type = string 9 | default = "" 10 | } -------------------------------------------------------------------------------- /demo/infrastructure/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | digitalocean = { 4 | source = "digitalocean/digitalocean" 5 | version = "2.8.0" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | nr_of_servers = 1 12 | server_tag = "hashi-server" 13 | } 14 | 15 | resource "digitalocean_vpc" "hashi" { 16 | name = "hashistack" 17 | region = var.region 18 | ip_range = var.ip_range 19 | } 20 | 21 | resource "digitalocean_droplet" "hashi-server" { 22 | count = local.nr_of_servers 23 | image = var.snapshot_id 24 | name = "hashi-server-0${count.index + 1}" 25 | region = var.region 26 | size = "s-1vcpu-1gb" 27 | tags = ["hashi-stack", local.server_tag] 28 | user_data = templatefile("${path.module}/templates/server.sh", { server_tag = local.server_tag, do_token = var.do_token, nr_of_servers = local.nr_of_servers }) 29 | vpc_uuid = digitalocean_vpc.hashi.id 30 | ssh_keys = [var.ssh_key] 31 | } 32 | 33 | resource "digitalocean_droplet" "platform" { 34 | count = 1 35 | image = var.snapshot_id 36 | name = "hashi-platform-0${count.index + 1}" 37 | region = var.region 38 | size = "s-1vcpu-2gb" 39 | tags = ["hashi-stack", "hashi-platform"] 40 | user_data = templatefile("${path.module}/templates/client.sh", { datacenter = "platform", server_tag = local.server_tag, do_token = var.do_token }) 41 | vpc_uuid = digitalocean_vpc.hashi.id 42 | ssh_keys = [var.ssh_key] 43 | } 44 | 45 | module "my_ip_address" { 46 | source = "matti/resource/shell" 47 | command = "curl https://ipinfo.io/ip" 48 | } 49 | 50 | resource "digitalocean_firewall" "hashi-stack-internal" { 51 | name = "hashi-stack" 52 | 53 | tags = ["hashi-stack"] 54 | 55 | inbound_rule { 56 | protocol = "tcp" 57 | port_range = "4646" 58 | source_addresses = ["${module.my_ip_address.stdout}/32"] 59 | } 60 | 61 | inbound_rule { 62 | protocol = "tcp" 63 | port_range = "8500" 64 | source_addresses = ["${module.my_ip_address.stdout}/32"] 65 | } 66 | 67 | inbound_rule { 68 | protocol = "tcp" 69 | port_range = "3000" 70 | source_addresses = ["${module.my_ip_address.stdout}/32"] 71 | } 72 | 73 | inbound_rule { 74 | protocol = "tcp" 75 | port_range = "8081" 76 | source_addresses = ["${module.my_ip_address.stdout}/32"] 77 | } 78 | 79 | inbound_rule { 80 | protocol = "tcp" 81 | port_range = "9090" 82 | source_addresses = ["${module.my_ip_address.stdout}/32"] 83 | } 84 | 85 | inbound_rule { 86 | protocol = "tcp" 87 | port_range = "1-65535" 88 | source_tags = ["hashi-stack"] 89 | } 90 | 91 | inbound_rule { 92 | protocol = "udp" 93 | port_range = "1-65535" 94 | source_tags = ["hashi-stack"] 95 | } 96 | 97 | inbound_rule { 98 | protocol = "icmp" 99 | source_tags = ["hashi-stack"] 100 | } 101 | 102 | outbound_rule { 103 | protocol = "tcp" 104 | port_range = "1-65535" 105 | destination_addresses = ["0.0.0.0/0", "::/0"] 106 | } 107 | 108 | outbound_rule { 109 | protocol = "udp" 110 | port_range = "1-65535" 111 | destination_addresses = ["0.0.0.0/0", "::/0"] 112 | } 113 | 114 | outbound_rule { 115 | protocol = "icmp" 116 | destination_addresses = ["0.0.0.0/0", "::/0"] 117 | } 118 | } -------------------------------------------------------------------------------- /demo/infrastructure/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_uuid" { 2 | value = digitalocean_vpc.hashi.id 3 | } 4 | 5 | output "nomad_addr" { 6 | value = "http://${digitalocean_droplet.hashi-server[0].ipv4_address}:4646" 7 | } 8 | 9 | output "consul_addr" { 10 | value = "http://${digitalocean_droplet.hashi-server[0].ipv4_address}:8500" 11 | } 12 | 13 | output "platform_addr" { 14 | value = digitalocean_droplet.platform[0].ipv4_address 15 | } -------------------------------------------------------------------------------- /demo/infrastructure/templates/client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /ops/scripts/client.sh "${datacenter}" "${server_tag}" "${do_token}" 4 | -------------------------------------------------------------------------------- /demo/infrastructure/templates/server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /ops/scripts/server.sh ${nr_of_servers} "${server_tag}" "${do_token}" 4 | -------------------------------------------------------------------------------- /demo/infrastructure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "do_token" {} 2 | 3 | variable "region" { 4 | type = string 5 | } 6 | 7 | variable "ip_range" { 8 | type = string 9 | } 10 | 11 | variable "snapshot_id" { 12 | type = string 13 | } 14 | 15 | variable "ssh_key" { 16 | type = string 17 | } -------------------------------------------------------------------------------- /demo/jobs/files/grafana_dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "limit": 100, 11 | "name": "Annotations & Alerts", 12 | "showIn": 0, 13 | "type": "dashboard" 14 | }, 15 | { 16 | "datasource": "Loki", 17 | "enable": true, 18 | "expr": "{task=\"autoscaler\"} |= \"scaling target\" | logfmt | line_format \"{{.reason}}\"", 19 | "hide": false, 20 | "iconColor": "#5794F2", 21 | "name": "Autoscaler", 22 | "showIn": 0, 23 | "target": {} 24 | } 25 | ] 26 | }, 27 | "editable": true, 28 | "gnetId": null, 29 | "graphTooltip": 0, 30 | "links": [], 31 | "panels": [ 32 | { 33 | "datasource": null, 34 | "fieldConfig": { 35 | "defaults": { 36 | "color": { 37 | "fixedColor": "green", 38 | "mode": "palette-classic" 39 | }, 40 | "custom": {}, 41 | "mappings": [], 42 | "thresholds": { 43 | "mode": "absolute", 44 | "steps": [ 45 | { 46 | "color": "green", 47 | "value": null 48 | }, 49 | { 50 | "color": "red", 51 | "value": 80 52 | } 53 | ] 54 | } 55 | }, 56 | "overrides": [ 57 | { 58 | "matcher": { 59 | "id": "byName", 60 | "options": "complete" 61 | }, 62 | "properties": [ 63 | { 64 | "id": "color", 65 | "value": { 66 | "fixedColor": "green", 67 | "mode": "fixed" 68 | } 69 | } 70 | ] 71 | }, 72 | { 73 | "matcher": { 74 | "id": "byName", 75 | "options": "failed" 76 | }, 77 | "properties": [ 78 | { 79 | "id": "color", 80 | "value": { 81 | "fixedColor": "red", 82 | "mode": "fixed" 83 | } 84 | } 85 | ] 86 | } 87 | ] 88 | }, 89 | "gridPos": { 90 | "h": 8, 91 | "w": 17, 92 | "x": 0, 93 | "y": 0 94 | }, 95 | "id": 14, 96 | "options": { 97 | "reduceOptions": { 98 | "calcs": [ 99 | "lastNotNull" 100 | ], 101 | "fields": "", 102 | "values": false 103 | }, 104 | "showThresholdLabels": false, 105 | "showThresholdMarkers": true, 106 | "text": {} 107 | }, 108 | "pluginVersion": "7.4.2", 109 | "targets": [ 110 | { 111 | "expr": "sum(nomad_nomad_job_summary_queued{exported_job=~\"batch/.*\"}) OR on() vector(0)", 112 | "interval": "", 113 | "legendFormat": "queued", 114 | "refId": "A" 115 | }, 116 | { 117 | "expr": "sum(nomad_nomad_job_summary_running{exported_job=~\"batch/.*\"}) OR on() vector(0)", 118 | "hide": false, 119 | "interval": "", 120 | "legendFormat": "running", 121 | "refId": "B" 122 | }, 123 | { 124 | "expr": "sum(nomad_nomad_job_summary_complete{exported_job=~\"batch/.*\"}) OR on() vector(0)", 125 | "hide": false, 126 | "interval": "", 127 | "legendFormat": "complete", 128 | "refId": "C" 129 | }, 130 | { 131 | "expr": "sum(nomad_nomad_job_summary_failed{exported_job=~\"batch/.*\"}) OR on() vector(0)", 132 | "hide": false, 133 | "interval": "", 134 | "legendFormat": "failed", 135 | "refId": "D" 136 | } 137 | ], 138 | "title": "Batch job count per status", 139 | "type": "gauge" 140 | }, 141 | { 142 | "datasource": null, 143 | "fieldConfig": { 144 | "defaults": { 145 | "color": { 146 | "mode": "palette-classic" 147 | }, 148 | "custom": {}, 149 | "decimals": 0, 150 | "displayName": "${__series.name}", 151 | "mappings": [], 152 | "max": 5, 153 | "min": 0, 154 | "noValue": "0", 155 | "thresholds": { 156 | "mode": "absolute", 157 | "steps": [ 158 | { 159 | "color": "green", 160 | "value": null 161 | }, 162 | { 163 | "color": "red", 164 | "value": 80 165 | } 166 | ] 167 | } 168 | }, 169 | "overrides": [ 170 | { 171 | "matcher": { 172 | "id": "byRegexp", 173 | "options": "batch_workers" 174 | }, 175 | "properties": [ 176 | { 177 | "id": "color", 178 | "value": { 179 | "fixedColor": "green", 180 | "mode": "fixed" 181 | } 182 | } 183 | ] 184 | }, 185 | { 186 | "matcher": { 187 | "id": "byRegexp", 188 | "options": "platform" 189 | }, 190 | "properties": [ 191 | { 192 | "id": "color", 193 | "value": { 194 | "fixedColor": "yellow", 195 | "mode": "fixed" 196 | } 197 | } 198 | ] 199 | } 200 | ] 201 | }, 202 | "gridPos": { 203 | "h": 8, 204 | "w": 7, 205 | "x": 17, 206 | "y": 0 207 | }, 208 | "id": 8, 209 | "options": { 210 | "orientation": "auto", 211 | "reduceOptions": { 212 | "calcs": [ 213 | "last" 214 | ], 215 | "fields": "", 216 | "values": false 217 | }, 218 | "showThresholdLabels": false, 219 | "showThresholdMarkers": true, 220 | "text": {} 221 | }, 222 | "pluginVersion": "7.4.2", 223 | "targets": [ 224 | { 225 | "expr": "count by (datacenter) (nomad_client_uptime{node_status=\"ready\"})", 226 | "interval": "", 227 | "legendFormat": "{{datacenter}}", 228 | "refId": "A" 229 | } 230 | ], 231 | "timeFrom": null, 232 | "timeShift": null, 233 | "title": "Clients per datacenter", 234 | "type": "gauge" 235 | }, 236 | { 237 | "aliasColors": {}, 238 | "bars": true, 239 | "dashLength": 10, 240 | "dashes": false, 241 | "datasource": null, 242 | "fieldConfig": { 243 | "defaults": { 244 | "custom": {} 245 | }, 246 | "overrides": [] 247 | }, 248 | "fill": 10, 249 | "fillGradient": 0, 250 | "gridPos": { 251 | "h": 9, 252 | "w": 12, 253 | "x": 0, 254 | "y": 8 255 | }, 256 | "hiddenSeries": false, 257 | "id": 2, 258 | "legend": { 259 | "avg": false, 260 | "current": false, 261 | "max": false, 262 | "min": false, 263 | "show": true, 264 | "total": false, 265 | "values": false 266 | }, 267 | "lines": false, 268 | "linewidth": 0, 269 | "nullPointMode": "null", 270 | "options": { 271 | "alertThreshold": true 272 | }, 273 | "percentage": false, 274 | "pluginVersion": "7.4.2", 275 | "pointradius": 2, 276 | "points": false, 277 | "renderer": "flot", 278 | "seriesOverrides": [ 279 | { 280 | "$$hashKey": "object:1253", 281 | "alias": "complete", 282 | "bars": false, 283 | "fill": 0, 284 | "lines": true, 285 | "linewidth": 1, 286 | "stack": false, 287 | "yaxis": 2 288 | } 289 | ], 290 | "spaceLength": 10, 291 | "stack": true, 292 | "steppedLine": true, 293 | "targets": [ 294 | { 295 | "expr": "sum(nomad_nomad_job_summary_running{exported_job=~\"batch/.*\"}) OR on() vector(0)", 296 | "hide": false, 297 | "interval": "", 298 | "legendFormat": "running", 299 | "refId": "B" 300 | }, 301 | { 302 | "expr": "sum(nomad_nomad_job_summary_queued{exported_job=~\"batch/.*\"}) OR on() vector(0)", 303 | "hide": false, 304 | "interval": "", 305 | "legendFormat": "queued", 306 | "refId": "A" 307 | } 308 | ], 309 | "thresholds": [], 310 | "timeFrom": null, 311 | "timeRegions": [], 312 | "timeShift": null, 313 | "title": "Batch jobs in progress", 314 | "tooltip": { 315 | "shared": true, 316 | "sort": 0, 317 | "value_type": "individual" 318 | }, 319 | "type": "graph", 320 | "xaxis": { 321 | "buckets": null, 322 | "mode": "time", 323 | "name": null, 324 | "show": true, 325 | "values": [] 326 | }, 327 | "yaxes": [ 328 | { 329 | "$$hashKey": "object:133", 330 | "decimals": 0, 331 | "format": "none", 332 | "label": null, 333 | "logBase": 1, 334 | "max": null, 335 | "min": null, 336 | "show": true 337 | }, 338 | { 339 | "$$hashKey": "object:134", 340 | "format": "short", 341 | "label": null, 342 | "logBase": 1, 343 | "max": null, 344 | "min": null, 345 | "show": true 346 | } 347 | ], 348 | "yaxis": { 349 | "align": false, 350 | "alignLevel": null 351 | } 352 | }, 353 | { 354 | "aliasColors": {}, 355 | "bars": false, 356 | "dashLength": 10, 357 | "dashes": false, 358 | "datasource": null, 359 | "fieldConfig": { 360 | "defaults": { 361 | "custom": {} 362 | }, 363 | "overrides": [] 364 | }, 365 | "fill": 1, 366 | "fillGradient": 0, 367 | "gridPos": { 368 | "h": 9, 369 | "w": 12, 370 | "x": 12, 371 | "y": 8 372 | }, 373 | "hiddenSeries": false, 374 | "id": 10, 375 | "legend": { 376 | "avg": false, 377 | "current": false, 378 | "max": false, 379 | "min": false, 380 | "show": true, 381 | "total": false, 382 | "values": false 383 | }, 384 | "lines": true, 385 | "linewidth": 1, 386 | "nullPointMode": "null", 387 | "options": { 388 | "alertThreshold": true 389 | }, 390 | "percentage": false, 391 | "pluginVersion": "7.4.2", 392 | "pointradius": 2, 393 | "points": false, 394 | "renderer": "flot", 395 | "seriesOverrides": [ 396 | { 397 | "$$hashKey": "object:96", 398 | "alias": "platform", 399 | "color": "#FADE2A" 400 | }, 401 | { 402 | "$$hashKey": "object:104", 403 | "alias": "batch", 404 | "color": "#73BF69" 405 | } 406 | ], 407 | "spaceLength": 10, 408 | "stack": false, 409 | "steppedLine": true, 410 | "targets": [ 411 | { 412 | "expr": "count by (datacenter) (nomad_client_uptime{node_status=\"ready\"})", 413 | "instant": false, 414 | "interval": "", 415 | "intervalFactor": 1, 416 | "legendFormat": "{{datacenter}}", 417 | "refId": "A" 418 | } 419 | ], 420 | "thresholds": [], 421 | "timeFrom": null, 422 | "timeRegions": [], 423 | "timeShift": null, 424 | "title": "Number of clients", 425 | "tooltip": { 426 | "shared": true, 427 | "sort": 0, 428 | "value_type": "individual" 429 | }, 430 | "type": "graph", 431 | "xaxis": { 432 | "buckets": null, 433 | "mode": "time", 434 | "name": null, 435 | "show": true, 436 | "values": [] 437 | }, 438 | "yaxes": [ 439 | { 440 | "$$hashKey": "object:1577", 441 | "decimals": 0, 442 | "format": "short", 443 | "label": null, 444 | "logBase": 1, 445 | "max": null, 446 | "min": "0", 447 | "show": true 448 | }, 449 | { 450 | "$$hashKey": "object:1578", 451 | "format": "short", 452 | "label": null, 453 | "logBase": 1, 454 | "max": null, 455 | "min": null, 456 | "show": true 457 | } 458 | ], 459 | "yaxis": { 460 | "align": false, 461 | "alignLevel": null 462 | } 463 | } 464 | ], 465 | "refresh": "5s", 466 | "schemaVersion": 27, 467 | "style": "dark", 468 | "tags": [], 469 | "templating": { 470 | "list": [] 471 | }, 472 | "time": { 473 | "from": "now-15m", 474 | "to": "now" 475 | }, 476 | "timepicker": {}, 477 | "timezone": "", 478 | "title": "On-demand batch job demo", 479 | "uid": "CJlc3r_Mk", 480 | "version": 1 481 | } 482 | -------------------------------------------------------------------------------- /demo/jobs/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "nomad_readiness" { 2 | triggers = { 3 | address = var.nomad_addr 4 | } 5 | 6 | provisioner "local-exec" { 7 | command = <[a-zA-Z0-9_-]+).*reason="(?P.+)"' 176 | - labels: 177 | policy_id: 178 | reason: 179 | EOH 180 | 181 | destination = "local/promtail.yaml" 182 | } 183 | 184 | resources { 185 | cpu = 50 186 | memory = 32 187 | } 188 | 189 | service { 190 | name = "promtail" 191 | port = "promtail" 192 | 193 | check { 194 | type = "http" 195 | path = "/ready" 196 | interval = "10s" 197 | timeout = "2s" 198 | } 199 | } 200 | } 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /demo/jobs/templates/batch.nomad: -------------------------------------------------------------------------------- 1 | job "batch" { 2 | datacenters = ["batch_workers"] 3 | type = "batch" 4 | 5 | parameterized { 6 | meta_optional = ["sleep", "splay"] 7 | } 8 | 9 | meta { 10 | sleep = "180" 11 | splay = "60" 12 | } 13 | 14 | group "batch" { 15 | task "sleep" { 16 | driver = "docker" 17 | 18 | config { 19 | image = "alpine:3.13" 20 | command = "/bin/ash" 21 | args = ["${NOMAD_TASK_DIR}/sleep.sh"] 22 | } 23 | 24 | template { 25 | data = < target { 249 | return desired - target, "out" 250 | } 251 | return 0, "" 252 | } 253 | 254 | func (t *TargetPlugin) getValue(config map[string]string, name string) (string, bool) { 255 | v, ok := config[name] 256 | if ok { 257 | return v, true 258 | } 259 | 260 | v, ok = t.config[name] 261 | if ok { 262 | return v, true 263 | } 264 | 265 | return "", false 266 | } 267 | 268 | func pathOrContents(poc string) (string, error) { 269 | if len(poc) == 0 { 270 | return poc, nil 271 | } 272 | 273 | path := poc 274 | if path[0] == '~' { 275 | var err error 276 | path, err = homedir.Expand(path) 277 | if err != nil { 278 | return path, err 279 | } 280 | } 281 | 282 | if _, err := os.Stat(path); err == nil { 283 | contents, err := ioutil.ReadFile(path) 284 | if err != nil { 285 | return string(contents), err 286 | } 287 | return string(contents), nil 288 | } 289 | 290 | return poc, nil 291 | } 292 | 293 | func getEnv(keys ...string) string { 294 | for _, key := range keys { 295 | v := os.Getenv(key) 296 | if len(v) != 0 { 297 | return v 298 | } 299 | } 300 | return "" 301 | } 302 | -------------------------------------------------------------------------------- /plugin/plugin_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestTargetPlugin_calculateDirection(t *testing.T) { 10 | testCases := []struct { 11 | inputMigTarget int64 12 | inputStrategyDesired int64 13 | expectedOutputNum int64 14 | expectedOutputString string 15 | name string 16 | }{ 17 | { 18 | inputMigTarget: 10, 19 | inputStrategyDesired: 11, 20 | expectedOutputNum: 1, 21 | expectedOutputString: "out", 22 | name: "scale out desired", 23 | }, 24 | { 25 | inputMigTarget: 10, 26 | inputStrategyDesired: 9, 27 | expectedOutputNum: 1, 28 | expectedOutputString: "in", 29 | name: "scale in desired", 30 | }, 31 | { 32 | inputMigTarget: 10, 33 | inputStrategyDesired: 10, 34 | expectedOutputNum: 0, 35 | expectedOutputString: "", 36 | name: "scale not desired", 37 | }, 38 | } 39 | 40 | tp := TargetPlugin{} 41 | 42 | for _, tc := range testCases { 43 | t.Run(tc.name, func(t *testing.T) { 44 | actualNum, actualString := tp.calculateDirection(tc.inputMigTarget, tc.inputStrategyDesired) 45 | assert.Equal(t, tc.expectedOutputNum, actualNum, tc.name) 46 | assert.Equal(t, tc.expectedOutputString, actualString, tc.name) 47 | }) 48 | } 49 | } 50 | 51 | func TestTargetPlugin_createDropletTemplate(t *testing.T) { 52 | input := map[string]string{ 53 | "name": "hashi-batch", 54 | "region": "ny1", 55 | "size": "s-1vcpu-1gb", 56 | "vpc_uuid": "b6ac51f4-dc83-11e8-a3da-3cfdfea9f0d8", 57 | "snapshot_id": "123", 58 | "node_class": "batch", 59 | } 60 | 61 | plugin := TargetPlugin{} 62 | dropletTemplate, err := plugin.createDropletTemplate(input) 63 | 64 | assert.Nil(t, err) 65 | assert.Equal(t, []string{}, dropletTemplate.sshKeys) 66 | assert.Equal(t, "hashi-batch", dropletTemplate.name) 67 | assert.Equal(t, []string{"hashi-batch"}, dropletTemplate.tags) 68 | } 69 | 70 | func TestTargetPlugin_createDropletTemplateWithMultipleTags(t *testing.T) { 71 | input := map[string]string{ 72 | "name": "hashi-batch", 73 | "region": "ny1", 74 | "size": "s-1vcpu-1gb", 75 | "vpc_uuid": "b6ac51f4-dc83-11e8-a3da-3cfdfea9f0d8", 76 | "snapshot_id": "123", 77 | "tags": "tag1,tag2", 78 | "node_class": "hashistack", 79 | } 80 | 81 | plugin := TargetPlugin{} 82 | dropletTemplate, err := plugin.createDropletTemplate(input) 83 | 84 | assert.Nil(t, err) 85 | assert.Equal(t, []string{"hashi-batch", "tag1", "tag2"}, dropletTemplate.tags) 86 | } 87 | -------------------------------------------------------------------------------- /plugin/retry.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | ) 9 | 10 | // retryFunc is the function signature for a function which is retryable. The 11 | // stop bool indicates whether or not the retry should be halted indicating a 12 | // terminal error. The error return can accompany either a true or false stop 13 | // return to provide context when needed. 14 | type retryFunc func(ctx context.Context) (stop bool, err error) 15 | 16 | // retry will retry the passed function f until any of the following conditions 17 | // are met: 18 | // - the function returns stop=true and err=nil 19 | // - the retryAttempts limit is reached 20 | // - the context is cancelled 21 | func retry(ctx context.Context, retryInterval time.Duration, retryAttempts int, f retryFunc) error { 22 | 23 | var ( 24 | retryCount int 25 | lastErr error 26 | ) 27 | 28 | for { 29 | if ctx.Err() != nil { 30 | if lastErr != nil { 31 | return fmt.Errorf("retry failed with %v; last error: %v", ctx.Err(), lastErr) 32 | } 33 | return ctx.Err() 34 | } 35 | 36 | stop, err := f(ctx) 37 | if stop { 38 | return err 39 | } 40 | 41 | if err != nil && err != context.Canceled && err != context.DeadlineExceeded { 42 | lastErr = err 43 | } 44 | 45 | if err == nil { 46 | return nil 47 | } 48 | 49 | retryCount++ 50 | 51 | if retryCount == retryAttempts { 52 | return errors.New("reached retry limit") 53 | } 54 | time.Sleep(retryInterval) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /plugin/retry_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func Test_retry(t *testing.T) { 13 | testCases := []struct { 14 | inputContext context.Context 15 | inputInterval time.Duration 16 | inputRetry int 17 | inputFunc retryFunc 18 | expectedOutput error 19 | name string 20 | }{ 21 | { 22 | inputContext: context.Background(), 23 | inputInterval: 1 * time.Millisecond, 24 | inputRetry: 1, 25 | inputFunc: func(ctx context.Context) (stop bool, err error) { 26 | return true, nil 27 | }, 28 | expectedOutput: nil, 29 | name: "successful function first time", 30 | }, 31 | { 32 | inputContext: context.Background(), 33 | inputInterval: 1 * time.Millisecond, 34 | inputRetry: 1, 35 | inputFunc: func(ctx context.Context) (stop bool, err error) { 36 | return false, errors.New("error") 37 | }, 38 | expectedOutput: errors.New("reached retry limit"), 39 | name: "function never successful and reaches retry limit", 40 | }, 41 | } 42 | 43 | for _, tc := range testCases { 44 | t.Run(tc.name, func(t *testing.T) { 45 | actualOutput := retry(tc.inputContext, tc.inputInterval, tc.inputRetry, tc.inputFunc) 46 | assert.Equal(t, tc.expectedOutput, actualOutput, tc.name) 47 | }) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /plugin/shutdown.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/digitalocean/godo" 9 | "github.com/hashicorp/go-hclog" 10 | ) 11 | 12 | func shutdownDroplet( 13 | dropletId int, 14 | client *godo.Client, 15 | log hclog.Logger) error { 16 | 17 | // Gracefully power off the droplet. 18 | log.Debug("Gracefully shutting down droplet...") 19 | _, _, err := client.DropletActions.PowerOff(context.TODO(), dropletId) 20 | if err != nil { 21 | // If we get an error the first time, actually report it 22 | return fmt.Errorf("error shutting down droplet: %s", err) 23 | } 24 | 25 | err = waitForDropletState("off", dropletId, client, log, 5*time.Minute) 26 | if err != nil { 27 | log.Warn("Timeout while waiting to for droplet to become 'off'") 28 | } 29 | 30 | log.Debug("Deleting Droplet...") 31 | _, err = client.Droplets.Delete(context.TODO(), dropletId) 32 | if err != nil { 33 | return fmt.Errorf("error deleting droplet: %s", err) 34 | } 35 | 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /plugin/wait.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/digitalocean/godo" 9 | "github.com/hashicorp/go-hclog" 10 | ) 11 | 12 | func waitForDropletState( 13 | desiredState string, dropletId int, 14 | client *godo.Client, 15 | log hclog.Logger, 16 | timeout time.Duration) error { 17 | done := make(chan struct{}) 18 | defer close(done) 19 | 20 | result := make(chan error, 1) 21 | go func() { 22 | attempts := 0 23 | for { 24 | attempts += 1 25 | 26 | log.Debug(fmt.Sprintf("Checking droplet status... (attempt: %d)", attempts)) 27 | droplet, _, err := client.Droplets.Get(context.TODO(), dropletId) 28 | if err != nil { 29 | result <- err 30 | return 31 | } 32 | 33 | if droplet.Status == desiredState { 34 | result <- nil 35 | return 36 | } 37 | 38 | // Wait 3 seconds in between 39 | time.Sleep(3 * time.Second) 40 | 41 | // Verify we shouldn't exit 42 | select { 43 | case <-done: 44 | // We finished, so just exit the goroutine 45 | return 46 | default: 47 | // Keep going 48 | } 49 | } 50 | }() 51 | 52 | log.Debug(fmt.Sprintf("Waiting for up to %d seconds for droplet to become %s", timeout/time.Second, desiredState)) 53 | select { 54 | case err := <-result: 55 | return err 56 | case <-time.After(timeout): 57 | err := fmt.Errorf("timeout while waiting to for droplet to become '%s'", desiredState) 58 | return err 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /scripts/dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | suffix="" 4 | if [ $1 == "windows" ]; then 5 | suffix=".exe" 6 | fi 7 | 8 | CGO_ENABLED=0 GOOS=$1 GOARCH=$2 go build -ldflags "-s -w" -a -installsuffix cgo -o "dist/do-droplets${suffix}" 9 | zip -j dist/do-droplets_$1_$2.zip "dist/do-droplets${suffix}" 10 | rm -rf "dist/do-droplets${suffix}" --------------------------------------------------------------------------------