├── .dockerignore ├── .gitignore ├── readiness.go ├── util.go ├── Dockerfile ├── configs.go ├── utilities_test.go ├── .github └── workflows │ └── main.yml ├── main.go ├── go.mod ├── main_test.go ├── original_desired.go ├── Makefile ├── kubernetes.go ├── aws.go ├── LICENSE ├── go.sum ├── README.md ├── aws_internal_test.go ├── roller.go └── roller_internal_test.go /.dockerignore: -------------------------------------------------------------------------------- 1 | .gocache 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | .gocache/ 3 | .go/ 4 | vendor 5 | -------------------------------------------------------------------------------- /readiness.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | type readiness interface { 4 | getUnreadyCount(hostnames []string, ids []string) (int, error) 5 | prepareTermination(hostnames []string, ids []string, drain, drainForce bool) error 6 | } 7 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // p2v is the equivalent of referencing a pointer, but safely (no panic). 4 | // Should be used for printing purposes (i.e. fmt.Printf(...)) 5 | func p2v(p interface{}) interface{} { 6 | switch value := p.(type) { 7 | case *string: 8 | if value == nil { 9 | return "" 10 | } 11 | return *value 12 | case *int64: 13 | if value == nil { 14 | return "" 15 | } 16 | return *value 17 | default: 18 | return value 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # base golang image 2 | ARG GOVER="1.15.6-alpine3.12" 3 | FROM golang:${GOVER} as golang 4 | 5 | ARG REPO 6 | 7 | RUN apk add -U --no-cache git ca-certificates 8 | 9 | RUN GO111MODULE=off go get -u golang.org/x/lint/golint 10 | 11 | ENV GO111MODULE=on 12 | ENV CGO_ENABLED=0 13 | 14 | WORKDIR /go/src/${REPO} 15 | 16 | COPY go.mod . 17 | COPY go.sum . 18 | RUN go mod download 19 | COPY . . 20 | 21 | # these have to be last steps so they do not bust the cache with each change 22 | ARG OS 23 | ARG ARCH 24 | ENV GOOS=${OS} 25 | ENV GOARCH=${ARCH} 26 | 27 | # builder 28 | FROM golang as build 29 | 30 | RUN go build -v -i -o /usr/local/bin/aws-asg-roller 31 | 32 | FROM scratch 33 | 34 | COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 35 | COPY --from=build /usr/local/bin/aws-asg-roller /aws-asg-roller 36 | 37 | CMD ["/aws-asg-roller"] 38 | -------------------------------------------------------------------------------- /configs.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "time" 4 | 5 | // Configs struct deals with env configuration 6 | type Configs struct { 7 | Interval time.Duration `env:"ROLLER_INTERVAL" envDefault:"30s"` 8 | CheckDelay int `env:"ROLLER_CHECK_DELAY" envDefault:"30"` 9 | Drain bool `env:"ROLLER_DRAIN" envDefault:"true"` 10 | DrainForce bool `env:"ROLLER_DRAIN_FORCE" envDefault:"true"` 11 | IncreaseMax bool `env:"ROLLER_CAN_INCREASE_MAX" envDefault:"false"` 12 | IgnoreDaemonSets bool `env:"ROLLER_IGNORE_DAEMONSETS" envDefault:"true"` 13 | DeleteLocalData bool `env:"ROLLER_DELETE_LOCAL_DATA" envDefault:"false"` 14 | OriginalDesiredOnTag bool `env:"ROLLER_ORIGINAL_DESIRED_ON_TAG" envDefault:"false"` 15 | ASGS []string `env:"ROLLER_ASG,required" envSeparator:","` 16 | KubernetesEnabled bool `env:"ROLLER_KUBERNETES" envDefault:"true"` 17 | Verbose bool `env:"ROLLER_VERBOSE" envDefault:"false"` 18 | } 19 | -------------------------------------------------------------------------------- /utilities_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func testStringEq(a, b []string) bool { 4 | 5 | // If one is nil, the other must also be nil. 6 | if (a == nil) != (b == nil) { 7 | return false 8 | } 9 | 10 | if len(a) != len(b) { 11 | return false 12 | } 13 | 14 | for i := range a { 15 | if a[i] != b[i] { 16 | return false 17 | } 18 | } 19 | return true 20 | } 21 | 22 | type funcCounter struct { 23 | count []funcCounterImpl 24 | } 25 | type funcCounterImpl struct { 26 | name string 27 | params []interface{} 28 | } 29 | 30 | func (f *funcCounter) add(name string, params ...interface{}) { 31 | f.count = append(f.count, funcCounterImpl{ 32 | name: name, 33 | params: params, 34 | }) 35 | } 36 | func (f *funcCounter) last() (string, []interface{}) { //nolint:unused 37 | l := len(f.count) 38 | if l > 0 { 39 | return f.count[l-1].name, f.count[l-1].params 40 | } 41 | return "", nil 42 | } 43 | func (f *funcCounter) lastByName(name string) []interface{} { //nolint:unused 44 | var params []interface{} 45 | for _, call := range f.count { 46 | if call.name == name { 47 | params = call.params 48 | } 49 | } 50 | return params 51 | } 52 | func (f *funcCounter) filterByName(name string) []funcCounterImpl { 53 | ret := make([]funcCounterImpl, 0) 54 | for _, call := range f.count { 55 | if call.name == name { 56 | ret = append(ret, call) 57 | } 58 | } 59 | return ret 60 | } 61 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | on: 3 | pull_request: 4 | types: [opened, synchronize, reopened] 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | report: 11 | name: Report 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: ref 15 | run: echo ${{ github.ref }} 16 | - name: event_name 17 | run: echo ${{ github.event_name }} 18 | build: 19 | name: Build 20 | runs-on: ubuntu-latest 21 | steps: 22 | - name: checkout 23 | uses: actions/checkout@v2 24 | - uses: actions/setup-go@v1 25 | with: 26 | go-version: '1.15.6' # The Go version to download (if necessary) and use. 27 | - name: ci 28 | run: make ci BUILD=local 29 | - name: hub login 30 | if: (github.event_name == 'push' && endsWith(github.ref,'/master')) || (github.event_name == 'create' && startsWith(github.ref,'refs/tags/')) 31 | run: echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin 32 | - name: deploy # when merged into master, tag master and push - ideally, this would be a separate job, but you cannot share docker build cache between jobs 33 | if: github.event_name == 'push' && endsWith(github.ref,'/master') 34 | run: make cd CONFIRM=true BRANCH_NAME=master 35 | - name: release # when based on a tag, tag master and push - ideally, this would be a separate job, but you cannot share docker build cache between jobs 36 | if: github.event_name == 'create' && startsWith(github.ref,'refs/tags/') 37 | run: make release CONFIRM=true 38 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "strings" 7 | "time" 8 | 9 | env "github.com/caarlos0/env/v6" 10 | ) 11 | 12 | func main() { 13 | configs := getConfigs() 14 | 15 | // get a kube connection 16 | readinessHandler, err := kubeGetReadinessHandler(configs.KubernetesEnabled, configs.IgnoreDaemonSets, configs.DeleteLocalData) 17 | if err != nil { 18 | log.Fatalf("Error getting kubernetes readiness handler when required: %v", err) 19 | } 20 | 21 | // get the AWS sessions 22 | ec2Svc, asgSvc, err := awsGetServices() 23 | if err != nil { 24 | log.Fatalf("Unable to create an AWS session: %v", err) 25 | } 26 | 27 | // to keep track of original target sizes during rolling updates 28 | originalDesired := map[string]int64{} 29 | 30 | // infinite loop 31 | for { 32 | err := adjust( 33 | configs.KubernetesEnabled, configs.ASGS, ec2Svc, asgSvc, 34 | readinessHandler, originalDesired, configs.OriginalDesiredOnTag, 35 | configs.IncreaseMax, configs.Verbose, configs.Drain, configs.DrainForce, 36 | ) 37 | if err != nil { 38 | log.Printf("Error adjusting AutoScaling Groups: %v", err) 39 | } 40 | // delay with each loop 41 | log.Printf("Sleeping %v\n", configs.Interval) 42 | time.Sleep(configs.Interval) 43 | } 44 | } 45 | 46 | func getConfigs() (configs Configs) { 47 | // Compat helper 48 | val, ok := os.LookupEnv("ROLLER_CHECK_DELAY") 49 | if ok { 50 | // Use value from check delay to set an interval 51 | if !strings.HasSuffix(val, "s") { 52 | os.Setenv("ROLLER_INTERVAL", val+"s") 53 | } 54 | } 55 | 56 | if err := env.Parse(&configs); err != nil { 57 | log.Panicf("unexpected error while initializing the config: %v", err) 58 | } 59 | 60 | return configs 61 | } 62 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/deitch/aws-asg-roller 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.21.8 7 | github.com/caarlos0/env/v6 v6.6.0 8 | github.com/davecgh/go-spew v1.1.1 // indirect 9 | github.com/go-log/log v0.2.0 // indirect 10 | github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e // indirect 11 | github.com/golang/glog v0.0.0-20141105023935-44145f04b68c // indirect 12 | github.com/google/btree v1.0.0 // indirect 13 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d // indirect 14 | github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 // indirect 15 | github.com/imdario/mergo v0.3.6 // indirect 16 | github.com/kr/pretty v0.1.0 // indirect 17 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect 18 | github.com/openshift/kubernetes-drain v0.0.0-20180831174519-c2e51be1758e 19 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect 20 | github.com/spf13/pflag v1.0.3 // indirect 21 | github.com/stretchr/testify v1.7.0 22 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 // indirect 23 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect 24 | golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181 // indirect 25 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect 26 | golang.org/x/time v0.0.0-20161028155119-f51c12702a4d // indirect 27 | google.golang.org/appengine v1.3.0 // indirect 28 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 29 | gopkg.in/inf.v0 v0.9.0 // indirect 30 | k8s.io/api v0.0.0-20181004124137-fd83cbc87e76 31 | k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395 32 | k8s.io/client-go v9.0.0+incompatible 33 | k8s.io/kube-openapi v0.0.0-20190426233423-c5d3b0f4bee0 // indirect 34 | ) 35 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "reflect" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func setBaseEnvs() { 14 | os.Clearenv() 15 | 16 | os.Setenv("ROLLER_ASG", "group1") 17 | os.Setenv("ROLLER_INTERVAL", "30s") 18 | } 19 | 20 | func TestGetConfigs(t *testing.T) { 21 | tests := []struct { 22 | env string 23 | name string 24 | field string 25 | want interface{} 26 | envValue string 27 | shouldError bool 28 | }{ 29 | // check delay gets translated to interval 30 | {"ROLLER_CHECK_DELAY", "should return default", "Interval", time.Duration(30 * time.Second), "", false}, 31 | {"ROLLER_CHECK_DELAY", "should return override", "Interval", time.Duration(17 * time.Second), "17", false}, 32 | {"ROLLER_CHECK_DELAY", "should fail due to wrong type", "CheckDelay", 0, "17s", true}, 33 | {"ROLLER_CHECK_DELAY", "should error if override invalid", "CheckDelay", 0, "fake", true}, 34 | {"ROLLER_INTERVAL", "should return default", "Interval", time.Duration(30 * time.Second), "", false}, 35 | {"ROLLER_INTERVAL", "should fail due to wrong type", "Interval", 0, "17", true}, 36 | {"ROLLER_INTERVAL", "should return override", "Interval", time.Duration(17 * time.Second), "17s", false}, 37 | {"ROLLER_INTERVAL", "should error if override invalid", "Interval", 0, "fake", true}, 38 | {"ROLLER_ASG", "should error on empty", "ASGS", 0, "", true}, 39 | {"ROLLER_ASG", "should work with single value", "ASGS", []string{"grp1"}, "grp1", false}, 40 | {"ROLLER_ASG", "should work with multiple values", "ASGS", []string{"grp1", "grp2"}, "grp1,grp2", false}, 41 | {"ROLLER_ASG", "should work with multiple values with space after comma", "ASGS", []string{"grp1", " grp2"}, "grp1, grp2", false}, 42 | } 43 | for _, tt := range tests { 44 | t.Run(tt.env+":"+tt.name, func(t *testing.T) { 45 | setBaseEnvs() 46 | os.Unsetenv(tt.env) 47 | 48 | if tt.envValue != "" { 49 | os.Setenv(tt.env, tt.envValue) 50 | } 51 | 52 | if tt.shouldError { 53 | require.Panics(t, func() { 54 | getConfigs() 55 | }) 56 | } else { 57 | got := getConfigs() 58 | // use reflect to access struct dynamically 59 | r := reflect.ValueOf(got) 60 | f := reflect.Indirect(r).FieldByName(tt.field).Interface() 61 | assert.EqualValues(t, tt.want, f) 62 | } 63 | }) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /original_desired.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "strconv" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/service/autoscaling" 10 | "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" 11 | ) 12 | 13 | const asgTagNameOriginalDesired = "aws-asg-roller/OriginalDesired" 14 | 15 | // Populates the original desired values for each ASG, based on the current 'desired' value if unkonwn. 16 | // The original desired value is recorded as a tag on the respective ASG. Subsequent runs attempt to 17 | // read the value of the tag to preserve state in the case of the process terminating. 18 | func populateOriginalDesired(originalDesired map[string]int64, asgs []*autoscaling.Group, asgSvc autoscalingiface.AutoScalingAPI, storeOriginalDesiredOnTag bool, verbose bool) error { 19 | for _, asg := range asgs { 20 | asgName := *asg.AutoScalingGroupName 21 | if storeOriginalDesiredOnTag { 22 | tagOriginalDesired, err := getOriginalDesiredTag(asgSvc, asgName, verbose) 23 | if err != nil { 24 | return err 25 | } 26 | if tagOriginalDesired >= 0 { 27 | originalDesired[asgName] = tagOriginalDesired 28 | continue 29 | } 30 | } 31 | // guess based on the current value 32 | originalDesired[asgName] = *asg.DesiredCapacity 33 | if verbose { 34 | log.Printf("guessed desired value of %d from current desired on ASG: %s", *asg.DesiredCapacity, asgName) 35 | } 36 | if storeOriginalDesiredOnTag { 37 | err := setOriginalDesiredTag(asgSvc, asgName, asg, verbose) 38 | if err != nil { 39 | return err 40 | } 41 | } 42 | } 43 | return nil 44 | } 45 | 46 | // attempt to read the original desired value from the ASG tag 47 | // returns 48 | // the original desired value from the tag, if present, otherwise -1 49 | // error 50 | func getOriginalDesiredTag(asgSvc autoscalingiface.AutoScalingAPI, asgName string, verbose bool) (int64, error) { 51 | tags, err := asgSvc.DescribeTags(&autoscaling.DescribeTagsInput{ 52 | Filters: []*autoscaling.Filter{ 53 | { 54 | Name: aws.String("auto-scaling-group"), 55 | Values: aws.StringSlice([]string{asgName}), 56 | }, 57 | { 58 | Name: aws.String("key"), 59 | Values: aws.StringSlice([]string{asgTagNameOriginalDesired}), 60 | }, 61 | }, 62 | }) 63 | if err != nil { 64 | return -1, fmt.Errorf("unable to read tag '%s' for ASG %s: %v", asgTagNameOriginalDesired, asgName, err) 65 | } 66 | if len(tags.Tags) == 1 { 67 | if tagOriginalDesired, err := strconv.ParseInt(aws.StringValue(tags.Tags[0].Value), 10, 64); err == nil { 68 | if verbose { 69 | log.Printf("read original desired of %d from tag on ASG: %s", tagOriginalDesired, asgName) 70 | } 71 | return tagOriginalDesired, nil 72 | } 73 | return -1, fmt.Errorf("unable to read tag '%s' for ASG %s: %v", asgTagNameOriginalDesired, asgName, err) 74 | } 75 | return -1, nil 76 | } 77 | 78 | // record original desired value on a tag, in case of process restart 79 | func setOriginalDesiredTag(asgSvc autoscalingiface.AutoScalingAPI, asgName string, asg *autoscaling.Group, verbose bool) error { 80 | _, err := asgSvc.CreateOrUpdateTags(&autoscaling.CreateOrUpdateTagsInput{ 81 | Tags: []*autoscaling.Tag{ 82 | { 83 | Key: aws.String(asgTagNameOriginalDesired), 84 | PropagateAtLaunch: aws.Bool(false), 85 | ResourceId: aws.String(asgName), 86 | ResourceType: aws.String("auto-scaling-group"), 87 | Value: aws.String(strconv.FormatInt(*asg.DesiredCapacity, 10)), 88 | }, 89 | }, 90 | }) 91 | if err != nil { 92 | return fmt.Errorf("unable to set tag '%s' for ASG %s: %v", asgTagNameOriginalDesired, asgName, err) 93 | } 94 | if verbose { 95 | log.Printf("recorded desired value of %d in tag on ASG: %s", *asg.DesiredCapacity, asgName) 96 | } 97 | return nil 98 | } 99 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all tag image push 2 | 3 | IMAGE ?= deitch/aws-asg-roller 4 | HASH ?= $(shell git show --format=%T -s) 5 | 6 | # check if we should append a dirty tag 7 | DIRTY ?= $(shell git status --short) 8 | ifneq ($(DIRTY),) 9 | TAG = $(HASH)-dirty 10 | else 11 | TAG = $(HASH) 12 | endif 13 | 14 | # BUILDARCH is the host architecture 15 | # ARCH is the target architecture 16 | # we need to keep track of them separately 17 | BUILDARCH ?= $(shell uname -m) 18 | BUILDOS ?= $(shell uname -s | tr A-Z a-z) 19 | 20 | # canonicalized names for host architecture 21 | ifeq ($(BUILDARCH),aarch64) 22 | BUILDARCH=arm64 23 | endif 24 | ifeq ($(BUILDARCH),x86_64) 25 | BUILDARCH=amd64 26 | endif 27 | 28 | # unless otherwise set, I am building for my own architecture and OS, i.e. not cross-compiling 29 | ARCH ?= $(BUILDARCH) 30 | OS ?= $(BUILDOS) 31 | # canonicalized names for target architecture 32 | ifeq ($(ARCH),aarch64) 33 | override ARCH=arm64 34 | endif 35 | ifeq ($(ARCH),x86_64) 36 | override ARCH=amd64 37 | endif 38 | 39 | PACKAGE_NAME ?= github.com/$(IMAGE) 40 | IMGTAG = $(IMAGE):$(TAG) 41 | BUILDERTAG = $(IMGTAG)-builder 42 | BINDIR ?= bin 43 | BINARY ?= $(BINDIR)/aws-asg-roller-$(OS)-$(ARCH) 44 | 45 | GOVER ?= 1.15.6-alpine3.12 46 | 47 | GO ?= GOOS=$(OS) GOARCH=$(ARCH) GO111MODULE=on CGO_ENABLED=0 48 | 49 | ifneq ($(BUILD),local) 50 | GO = docker run --rm $(BUILDERTAG) 51 | endif 52 | 53 | GOPATH ?= $(shell go env GOPATH) 54 | GOBIN ?= $(GOPATH)/bin 55 | LINTER ?= $(GOBIN)/golangci-lint 56 | 57 | GO_FILES := $(shell find . -type f -name '*.go') 58 | 59 | .PHONY: all tag build image push test-start test-run test-run-interactive test-stop test build-test vendor 60 | .PHONY: lint vet golint fmt-check ci cd 61 | 62 | all: push 63 | 64 | tag: 65 | @echo $(TAG) 66 | 67 | gitstat: 68 | @git status 69 | 70 | vendor: 71 | ifeq ($(BUILD),local) 72 | $(GO) go mod download 73 | endif 74 | 75 | build: vendor $(BINARY) 76 | 77 | $(BINDIR): 78 | mkdir -p $(BINDIR) 79 | 80 | $(BINARY): $(BINDIR) 81 | ifneq ($(BUILD),local) 82 | $(MAKE) image 83 | # because there is no way to `docker extract` or `docker cp` from an image 84 | CID=$$(docker create $(IMGTAG)) && \ 85 | docker cp $${CID}:/aws-asg-roller $(BINARY) && \ 86 | docker rm $${CID} 87 | else 88 | $(GO) go build -v -i -o $(BINARY) 89 | endif 90 | 91 | image: gitstat 92 | docker build -t $(IMGTAG) --build-arg OS=$(OS) --build-arg ARCH=$(ARCH) --build-arg REPO=$(PACKAGE_NAME) --build-arg GOVER=$(GOVER) . 93 | 94 | push: gitstat image 95 | docker push $(IMGTAG) 96 | 97 | ci: gitstat tag build fmt-check lint test vet image 98 | 99 | builder: 100 | ifneq ($(BUILD),local) 101 | docker build -t $(BUILDERTAG) --build-arg OS=$(OS) --build-arg ARCH=$(ARCH) --build-arg REPO=$(PACKAGE_NAME) --build-arg GOVER=$(GOVER) --target=golang . 102 | endif 103 | 104 | fmt-check: builder 105 | if [ -n "$$($(GO) gofmt -l ${GO_FILES})" ]; then \ 106 | $(GO) gofmt -s -e -d ${GO_FILES}; \ 107 | exit 1; \ 108 | fi 109 | 110 | golangci-lint: $(LINTER) 111 | $(LINTER): 112 | ifeq ($(BUILD),local) 113 | curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(GOPATH)/bin v1.21.0 114 | endif 115 | 116 | golint: 117 | ifeq ($(BUILD),local) 118 | ifeq (, $(shell which golint)) 119 | # the stupid things we need to do because "go get" tries to modify go.mod/go.sum since go1.11 modules... 120 | # see https://github.com/golang/go/issues/27643 and related 121 | $(GO) GO111MODULE=off go get -u golang.org/x/lint/golint 122 | # just check the status; this will go away 123 | git status 124 | endif 125 | endif 126 | 127 | ## Lint files 128 | lint: golint golangci-lint builder 129 | $(GO) $(LINTER) run -E golint -E gofmt ./... 130 | 131 | ## Run unit tests 132 | test: builder 133 | # must run go test on my local arch and os 134 | $(GO) env GOOS= GOARCH= go test -short ./... 135 | 136 | ## Vet the files 137 | vet: builder 138 | $(GO) go vet ./... 139 | 140 | cd: 141 | ifndef BRANCH_NAME 142 | $(error BRANCH_NAME is undefined - run using make BRANCH_NAME=var or set an environment variable) 143 | endif 144 | $(MAKE) push IMAGETAG=${BRANCH_NAME} 145 | $(MAKE) push IMAGETAG=${HASH} 146 | -------------------------------------------------------------------------------- /kubernetes.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "path/filepath" 8 | 9 | drainer "github.com/openshift/kubernetes-drain" 10 | corev1 "k8s.io/api/core/v1" 11 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/client-go/kubernetes" 13 | "k8s.io/client-go/rest" 14 | "k8s.io/client-go/tools/clientcmd" 15 | ) 16 | 17 | const clusterAutoscalerScaleDownDisabledFlag = "cluster-autoscaler.kubernetes.io/scale-down-disabled" 18 | 19 | type kubernetesReadiness struct { 20 | clientset *kubernetes.Clientset 21 | ignoreDaemonSets bool 22 | deleteLocalData bool 23 | } 24 | 25 | func (k *kubernetesReadiness) getUnreadyCount(hostnames []string, ids []string) (int, error) { 26 | hostHash := map[string]bool{} 27 | for _, h := range hostnames { 28 | hostHash[h] = true 29 | } 30 | /* 31 | in AWS, the `name` of the node *always* is the internal private DNS name 32 | you can get a node by name by doing Nodes().Get(name) 33 | In other words the `name` of the node is set independently and does not care what 34 | the kubelet had for --hostname-override. 35 | However, if you want multiple nodes, you need to use the `List()` interface. 36 | This interface does not accept multiple hostnames. It lists everything, subject only to a filter 37 | The filter, however, can filter only on labels, and not on the name. 38 | We _should_ be able to just filter on kubernetes.io/hostname label, but this label *does* 39 | respect --hostname-override, which we do not know if it is set or not. Oops. 40 | This, for now, we are stuck doing multiple Get(), one for each hostname, or doing a List() of all nodes 41 | */ 42 | nodes, err := k.clientset.CoreV1().Nodes().List(v1.ListOptions{}) 43 | if err != nil { 44 | return 0, fmt.Errorf("Unexpected error getting nodes for cluster: %v", err) 45 | } 46 | unReadyCount := 0 47 | for _, n := range nodes.Items { 48 | // first make sure that this is one of the new nodes we care about 49 | if _, ok := hostHash[n.ObjectMeta.Name]; !ok { 50 | continue 51 | } 52 | // next check its status 53 | conditions := n.Status.Conditions 54 | if conditions[len(conditions)-1].Type != corev1.NodeReady { 55 | unReadyCount++ 56 | } 57 | } 58 | return unReadyCount, nil 59 | } 60 | func (k *kubernetesReadiness) prepareTermination(hostnames []string, ids []string, drain, drainForce bool) error { 61 | // get the node reference - first need the hostname 62 | var ( 63 | node *corev1.Node 64 | err error 65 | ) 66 | 67 | // Skip drain 68 | if !drain { 69 | return nil 70 | } 71 | 72 | for _, h := range hostnames { 73 | node, err = k.clientset.CoreV1().Nodes().Get(h, v1.GetOptions{}) 74 | if err != nil { 75 | return fmt.Errorf("Unexpected error getting kubernetes node %s: %v", h, err) 76 | } 77 | // set options and drain nodes 78 | err = drainer.Drain(k.clientset, []*corev1.Node{node}, &drainer.DrainOptions{ 79 | IgnoreDaemonsets: k.ignoreDaemonSets, 80 | GracePeriodSeconds: -1, 81 | Force: drainForce, 82 | DeleteLocalData: k.deleteLocalData, 83 | }) 84 | if err != nil { 85 | return fmt.Errorf("Unexpected error draining kubernetes node %s: %v", h, err) 86 | } 87 | } 88 | return nil 89 | } 90 | 91 | func kubeGetClientset(kubernetesEnabled bool) (*kubernetes.Clientset, error) { 92 | // if it is *explicitly* set to false, then do nothing 93 | if !kubernetesEnabled { 94 | return nil, nil 95 | } 96 | 97 | // creates the in-cluster config 98 | config, err := rest.InClusterConfig() 99 | if err != nil { 100 | if err == rest.ErrNotInCluster { 101 | if !kubernetesEnabled { 102 | return nil, nil 103 | } 104 | config, err = getKubeOutOfCluster() 105 | if err != nil { 106 | return nil, err 107 | } 108 | } else { 109 | return nil, fmt.Errorf("Error getting kubernetes config from within cluster") 110 | } 111 | } 112 | clientset, err := kubernetes.NewForConfig(config) 113 | if err != nil { 114 | return nil, err 115 | } 116 | return clientset, nil 117 | } 118 | func getKubeOutOfCluster() (*rest.Config, error) { 119 | kubeconfig := os.Getenv("KUBECONFIG") 120 | if kubeconfig == "" { 121 | if home := homeDir(); home != "" { 122 | kubeconfig = filepath.Join(home, ".kube", "config") 123 | } else { 124 | return nil, fmt.Errorf("Not KUBECONFIG provided and no home available") 125 | } 126 | } 127 | 128 | // use the current context in kubeconfig 129 | config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 130 | if err != nil { 131 | panic(err.Error()) 132 | } 133 | return config, nil 134 | } 135 | 136 | func homeDir() string { 137 | if h := os.Getenv("HOME"); h != "" { 138 | return h 139 | } 140 | return os.Getenv("USERPROFILE") // windows 141 | } 142 | 143 | func kubeGetReadinessHandler(kubernetesEnabled, ignoreDaemonSets, deleteLocalData bool) (readiness, error) { 144 | clientset, err := kubeGetClientset(kubernetesEnabled) 145 | if err != nil { 146 | log.Fatalf("Error getting kubernetes connection: %v", err) 147 | } 148 | if clientset == nil { 149 | return nil, nil 150 | } 151 | return &kubernetesReadiness{clientset: clientset, ignoreDaemonSets: ignoreDaemonSets, deleteLocalData: deleteLocalData}, nil 152 | } 153 | 154 | // setScaleDownDisabledAnnotation set the "cluster-autoscaler.kubernetes.io/scale-down-disabled" annotation 155 | // on the list of nodes if required. Returns a list of 151 where the annotation 156 | // is applied. 157 | func setScaleDownDisabledAnnotation(kubernetesEnabled bool, hostnames []string) ([]string, error) { 158 | // get the node reference - first need the hostname 159 | var ( 160 | node *corev1.Node 161 | err error 162 | key = clusterAutoscalerScaleDownDisabledFlag 163 | annotated = []string{} 164 | ) 165 | clientset, err := kubeGetClientset(kubernetesEnabled) 166 | if err != nil { 167 | log.Fatalf("Error getting kubernetes connection: %v", err) 168 | } 169 | if clientset == nil { 170 | return annotated, nil 171 | } 172 | nodes := clientset.CoreV1().Nodes() 173 | for _, h := range hostnames { 174 | node, err = nodes.Get(h, v1.GetOptions{}) 175 | if err != nil { 176 | return annotated, fmt.Errorf("Unexpected error getting kubernetes node %s: %v", h, err) 177 | } 178 | annotations := node.GetAnnotations() 179 | if value := annotations[key]; value != "true" { 180 | annotations[key] = "true" 181 | node.SetAnnotations(annotations) 182 | _, err := nodes.Update(node) 183 | if err != nil { 184 | return annotated, err 185 | } 186 | annotated = append(annotated, h) 187 | } 188 | } 189 | return annotated, nil 190 | } 191 | func removeScaleDownDisabledAnnotation(kubernetesEnabled bool, hostnames []string) error { 192 | // get the node reference - first need the hostname 193 | var ( 194 | node *corev1.Node 195 | err error 196 | key = clusterAutoscalerScaleDownDisabledFlag 197 | ) 198 | clientset, err := kubeGetClientset(kubernetesEnabled) 199 | if err != nil { 200 | log.Fatalf("Error getting kubernetes connection: %v", err) 201 | } 202 | if clientset == nil { 203 | return nil 204 | } 205 | nodes := clientset.CoreV1().Nodes() 206 | for _, h := range hostnames { 207 | node, err = nodes.Get(h, v1.GetOptions{}) 208 | if err != nil { 209 | return fmt.Errorf("Unexpected error getting kubernetes node %s: %v", h, err) 210 | } 211 | annotations := node.GetAnnotations() 212 | if _, ok := annotations[key]; ok { 213 | delete(annotations, key) 214 | node.SetAnnotations(annotations) 215 | _, err := nodes.Update(node) 216 | if err != nil { 217 | return err 218 | } 219 | } 220 | } 221 | return nil 222 | } 223 | -------------------------------------------------------------------------------- /aws.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/aws/aws-sdk-go/aws" 6 | "github.com/aws/aws-sdk-go/aws/awserr" 7 | "github.com/aws/aws-sdk-go/aws/session" 8 | "github.com/aws/aws-sdk-go/service/autoscaling" 9 | "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" 10 | "github.com/aws/aws-sdk-go/service/ec2" 11 | "github.com/aws/aws-sdk-go/service/ec2/ec2iface" 12 | "log" 13 | ) 14 | 15 | func setAsgDesired(svc autoscalingiface.AutoScalingAPI, asg *autoscaling.Group, count int64, canIncreaseMax, verbose bool) error { 16 | if count > *asg.MaxSize { 17 | if canIncreaseMax { 18 | err := setAsgMax(svc, asg, count, verbose) 19 | if err != nil { 20 | return err 21 | } 22 | } else { 23 | return fmt.Errorf("unable to increase ASG %s desired size to %d as greater than max size %d", *asg.AutoScalingGroupName, count, *asg.MaxSize) 24 | } 25 | } 26 | if verbose { 27 | log.Printf("increasing ASG %s desired count to %d", *asg.AutoScalingGroupName, count) 28 | } 29 | desiredInput := &autoscaling.SetDesiredCapacityInput{ 30 | AutoScalingGroupName: asg.AutoScalingGroupName, 31 | DesiredCapacity: aws.Int64(count), 32 | HonorCooldown: aws.Bool(true), 33 | } 34 | _, err := svc.SetDesiredCapacity(desiredInput) 35 | if err != nil { 36 | errMsg := fmt.Sprintf("unable to increase ASG %s desired count to %d", *asg.AutoScalingGroupName, count) 37 | if aerr, ok := err.(awserr.Error); ok { 38 | switch aerr.Code() { 39 | case autoscaling.ErrCodeScalingActivityInProgressFault: 40 | return fmt.Errorf("%s - %s %v", errMsg, autoscaling.ErrCodeScalingActivityInProgressFault, aerr.Error()) 41 | case autoscaling.ErrCodeResourceContentionFault: 42 | return fmt.Errorf("%s - %s %v", errMsg, autoscaling.ErrCodeResourceContentionFault, aerr.Error()) 43 | default: 44 | return fmt.Errorf("%s - unexpected and unknown AWS error: %v", errMsg, aerr.Error()) 45 | } 46 | } 47 | 48 | return fmt.Errorf("%s - unexpected and unknown non-AWS error: %v", errMsg, err.Error()) 49 | } 50 | if verbose { 51 | log.Printf("increased ASG %s desired count to %d", *asg.AutoScalingGroupName, count) 52 | } 53 | return nil 54 | } 55 | 56 | func setAsgMax(svc autoscalingiface.AutoScalingAPI, asg *autoscaling.Group, count int64, verbose bool) error { 57 | if verbose { 58 | log.Printf("increasing ASG %s max size to %d to accommodate desired count", *asg.AutoScalingGroupName, count) 59 | } 60 | _, err := svc.UpdateAutoScalingGroup(&autoscaling.UpdateAutoScalingGroupInput{ 61 | AutoScalingGroupName: asg.AutoScalingGroupName, 62 | MaxSize: aws.Int64(count), 63 | }) 64 | if err != nil { 65 | errMsg := fmt.Sprintf("unable to increase ASG %s max size to %d", *asg.AutoScalingGroupName, count) 66 | if aerr, ok := err.(awserr.Error); ok { 67 | switch aerr.Code() { 68 | case autoscaling.ErrCodeScalingActivityInProgressFault: 69 | return fmt.Errorf("%s - %s %v", errMsg, autoscaling.ErrCodeScalingActivityInProgressFault, aerr.Error()) 70 | case autoscaling.ErrCodeResourceContentionFault: 71 | return fmt.Errorf("%s - %s %v", errMsg, autoscaling.ErrCodeResourceContentionFault, aerr.Error()) 72 | default: 73 | return fmt.Errorf("%s - unexpected and unknown AWS error: %v", errMsg, aerr.Error()) 74 | } 75 | } 76 | 77 | return fmt.Errorf("%s - unexpected and unknown non-AWS error: %v", errMsg, err.Error()) 78 | } 79 | if verbose { 80 | log.Printf("increased ASG %s max size to %d to accommodate desired count", *asg.AutoScalingGroupName, count) 81 | } 82 | return nil 83 | } 84 | 85 | func awsGetHostname(svc ec2iface.EC2API, id string) (string, error) { 86 | hostnames, err := awsGetHostnames(svc, []string{id}) 87 | if err != nil { 88 | return "", err 89 | } 90 | if len(hostnames) < 1 { 91 | return "", err 92 | } 93 | return hostnames[0], nil 94 | } 95 | func awsGetLaunchTemplateByID(svc ec2iface.EC2API, id string) (*ec2.LaunchTemplate, error) { 96 | input := &ec2.DescribeLaunchTemplatesInput{ 97 | LaunchTemplateIds: []*string{ 98 | aws.String(id), 99 | }, 100 | } 101 | return awsGetLaunchTemplate(svc, input) 102 | } 103 | func awsGetLaunchTemplateByName(svc ec2iface.EC2API, name string) (*ec2.LaunchTemplate, error) { 104 | input := &ec2.DescribeLaunchTemplatesInput{ 105 | LaunchTemplateNames: []*string{ 106 | aws.String(name), 107 | }, 108 | } 109 | return awsGetLaunchTemplate(svc, input) 110 | } 111 | func awsGetLaunchTemplate(svc ec2iface.EC2API, input *ec2.DescribeLaunchTemplatesInput) (*ec2.LaunchTemplate, error) { 112 | templatesOutput, err := svc.DescribeLaunchTemplates(input) 113 | descriptiveMsg := fmt.Sprintf("%v / %v", input.LaunchTemplateIds, input.LaunchTemplateNames) 114 | if err != nil { 115 | return nil, fmt.Errorf("Unable to get description for Launch Template %s: %v", descriptiveMsg, err) 116 | } 117 | if len(templatesOutput.LaunchTemplates) < 1 { 118 | return nil, nil 119 | } 120 | return templatesOutput.LaunchTemplates[0], nil 121 | } 122 | func awsGetHostnames(svc ec2iface.EC2API, ids []string) ([]string, error) { 123 | if len(ids) == 0 { 124 | return []string{}, nil 125 | } 126 | ec2input := &ec2.DescribeInstancesInput{ 127 | InstanceIds: aws.StringSlice(ids), 128 | } 129 | nodesResult, err := svc.DescribeInstances(ec2input) 130 | if err != nil { 131 | return nil, fmt.Errorf("Unable to get description for node %v: %v", ids, err) 132 | } 133 | if len(nodesResult.Reservations) < 1 { 134 | return nil, fmt.Errorf("Did not get any reservations for node %v", ids) 135 | } 136 | hostnames := make([]string, 0) 137 | for _, i := range nodesResult.Reservations { 138 | for _, j := range i.Instances { 139 | hostnames = append(hostnames, *j.PrivateDnsName) 140 | } 141 | } 142 | return hostnames, nil 143 | } 144 | 145 | func awsDescribeGroups(svc autoscalingiface.AutoScalingAPI, names []string) ([]*autoscaling.Group, error) { 146 | input := &autoscaling.DescribeAutoScalingGroupsInput{ 147 | AutoScalingGroupNames: aws.StringSlice(names), 148 | } 149 | result, err := svc.DescribeAutoScalingGroups(input) 150 | if err != nil { 151 | if aerr, ok := err.(awserr.Error); ok { 152 | switch aerr.Code() { 153 | case autoscaling.ErrCodeInvalidNextToken: 154 | return nil, fmt.Errorf("Unexpected AWS NextToken error when doing non-pagination describe") 155 | case autoscaling.ErrCodeResourceContentionFault: 156 | return nil, fmt.Errorf("Unexpected AWS ResourceContentionFault when doing describe") 157 | default: 158 | return nil, fmt.Errorf("Unexpected and unknown AWS error when doing describe: %v", aerr) 159 | } 160 | } else { 161 | // Print the error, cast err to awserr.Error to get the Code and 162 | // Message from an error. 163 | return nil, fmt.Errorf("Unexpected and unknown non-AWS error when doing describe: %v", err.Error()) 164 | } 165 | } 166 | return result.AutoScalingGroups, nil 167 | } 168 | 169 | func awsTerminateNode(svc autoscalingiface.AutoScalingAPI, id string) error { 170 | input := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ 171 | InstanceId: aws.String(id), 172 | ShouldDecrementDesiredCapacity: aws.Bool(false), 173 | } 174 | 175 | _, err := svc.TerminateInstanceInAutoScalingGroup(input) 176 | if err != nil { 177 | if aerr, ok := err.(awserr.Error); ok { 178 | switch aerr.Code() { 179 | case autoscaling.ErrCodeScalingActivityInProgressFault: 180 | return fmt.Errorf("Could not terminate instance, autoscaling already in progress, will try next loop") 181 | case autoscaling.ErrCodeResourceContentionFault: 182 | return fmt.Errorf("Could not terminate instance, instance in contention, will try next loop") 183 | default: 184 | return fmt.Errorf("Unknown aws error when terminating old instance: %v", aerr.Error()) 185 | } 186 | } else { 187 | // Print the error, cast err to awserr.Error to get the Code and 188 | // Message from an error. 189 | return fmt.Errorf("Unknown non-aws error when terminating old instance: %v", err.Error()) 190 | } 191 | } 192 | return nil 193 | } 194 | 195 | func awsGetServices() (ec2iface.EC2API, autoscalingiface.AutoScalingAPI, error) { 196 | sess, err := session.NewSession() 197 | if err != nil { 198 | return nil, nil, err 199 | } 200 | asgSvc := autoscaling.New(sess) 201 | ec2svc := ec2.New(sess) 202 | return ec2svc, asgSvc, nil 203 | } 204 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= 2 | github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= 3 | github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= 4 | github.com/aws/aws-sdk-go v1.21.8 h1:Lv6hW2twBhC6mGZAuWtqplEpIIqtVctJg02sE7Qn0Zw= 5 | github.com/aws/aws-sdk-go v1.21.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= 6 | github.com/caarlos0/env/v6 v6.6.0 h1:kVhajCpqX5pSfH41gFd8cPXPZahqJrnn9HxJ1vKftW4= 7 | github.com/caarlos0/env/v6 v6.6.0/go.mod h1:P0BVSgU9zfkxfSpFUs6KsO3uWR4k3Ac0P66ibAGTybM= 8 | github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 10 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 11 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= 13 | github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= 14 | github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 15 | github.com/go-log/log v0.2.0 h1:z8i91GBudxD5L3RmF0KVpetCbcGWAV7q1Tw1eRwQM9Q= 16 | github.com/go-log/log v0.2.0/go.mod h1:xzCnwajcues/6w7lne3yK2QU7DBPW7kqbgPGG5AF65U= 17 | github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= 18 | github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= 19 | github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= 20 | github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= 21 | github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e h1:ago6fNuQ6IhszPsXkeU7qRCyfsIX7L67WDybsAPkLl8= 22 | github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 23 | github.com/golang/glog v0.0.0-20141105023935-44145f04b68c h1:CbdkBQ1/PiAo0FYJhQGwASD8wrgNvTdf01g6+O9tNuA= 24 | github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 25 | github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 26 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 27 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 28 | github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= 29 | github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 30 | github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4= 31 | github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= 32 | github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= 33 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= 34 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= 35 | github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w= 36 | github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= 37 | github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= 38 | github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= 39 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= 40 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= 41 | github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3 h1:/UewZcckqhvnnS0C6r3Sher2hSEbVmM6Ogpcjen08+Y= 42 | github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 43 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 44 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 45 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 46 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 47 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 48 | github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= 49 | github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= 50 | github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= 51 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 52 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 53 | github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 54 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= 55 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 56 | github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 57 | github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c h1:Hww8mOyEKTeON4bZn7FrlLismspbPc1teNRUVH7wLQ8= 58 | github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 59 | github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U= 60 | github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= 61 | github.com/openshift/kubernetes-drain v0.0.0-20180831174519-c2e51be1758e h1:+dghxLlr/512Npnj6wrYMhjrD69Xj7ZqZW6fHRFTJBw= 62 | github.com/openshift/kubernetes-drain v0.0.0-20180831174519-c2e51be1758e/go.mod h1:Qjq5nGWuMWEjosMJNDhpFQuhJLdmNB2yRFeHTb9cgAU= 63 | github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= 64 | github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= 65 | github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 66 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 67 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 68 | github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 69 | github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= 70 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 71 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 72 | github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 73 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= 74 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 75 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 76 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= 77 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 78 | golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 79 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 80 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 81 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= 82 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 83 | golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181 h1:/4OaQ4bC66Oq9JDhUnxTjBGt8XBhDuwgMRXHgvfcCUY= 84 | golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 85 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= 86 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 87 | golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 88 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 89 | golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= 90 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 91 | golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 92 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 93 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 94 | golang.org/x/time v0.0.0-20161028155119-f51c12702a4d h1:TnM+PKb3ylGmZvyPXmo9m/wktg7Jn/a/fNmr33HSj8g= 95 | golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 96 | golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 97 | google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk= 98 | google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 99 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 100 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= 101 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 102 | gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= 103 | gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 104 | gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= 105 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 106 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= 107 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 108 | k8s.io/api v0.0.0-20181004124137-fd83cbc87e76 h1:cGc6jt7tNK7a2WfgNKjxjoU/UXXr9Q7JTqvCupZ+6+Y= 109 | k8s.io/api v0.0.0-20181004124137-fd83cbc87e76/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= 110 | k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395 h1:X+c9tYTDc9Pmt+Z1YSMqmUTCYf13VYe1u+ZwzjgpK0M= 111 | k8s.io/apimachinery v0.0.0-20180913025736-6dd46049f395/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= 112 | k8s.io/client-go v9.0.0+incompatible h1:2kqW3X2xQ9SbFvWZjGEHBLlWc1LG9JIJNXWkuqwdZ3A= 113 | k8s.io/client-go v9.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= 114 | k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= 115 | k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 116 | k8s.io/kube-openapi v0.0.0-20190426233423-c5d3b0f4bee0 h1:D//p4U8H78y6as0HmrCe8QucazHZqI6hs9eu+rJflKw= 117 | k8s.io/kube-openapi v0.0.0-20190426233423-c5d3b0f4bee0/go.mod h1:iU+ZGYsNlvU9XKUSso6SQfKTCCw7lFduMZy26Mgr2Fw= 118 | sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= 119 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Amazon Web Services AutoScaling Group Roller 2 | 3 | Rolling updates for AWS AutoScaling Groups! 4 | 5 | [https://github.com/deitch/aws-asg-roller](https://github.com/deitch/aws-asg-roller) 6 | 7 | AWS AutoScaling Groups (ASG) are wonderful. They let you declare a configuration, a minimum and maximum number of ec2 nodes, a desired number, and it keeps that number going for you. It even lets you set it up to scale up or down automatically based on cloudwatch events, effectively adjusting the desired number of nodes in the ASG, in response to load. 8 | 9 | The challenge is: how do you update it? 10 | 11 | If you change the launch configuration or launch template, it does **not** cause new nodes to be rolled in. Even if it did, you would want them to roll in sanely and slowly, one at a time, rather than all at once. Further, you may have app-specific "readiness" requirements, of which AWS simply isn't aware. For example, if you are running Kubernetes workloads on the nodes, you may want to drain the nodes _before_ terminating a node. 12 | 13 | [Terraform](https://terraform.io) does a decent job, with a little extra work, making a blue/green deployment: 14 | 15 | 1. Create a new auto-scaling group 16 | 2. Make sure all of the nodes in the new ASG are functioning 17 | 3. Terminate the old one 18 | 19 | If this is good enough for you, check out either [this](https://medium.com/@endofcake/using-terraform-for-zero-downtime-updates-of-an-auto-scaling-group-in-aws-60faca582664) or [this](https://www.joshdurbin.net/posts/2018-05-auto-scaling-rollout-on-aws-with-terraform/) blog post. 20 | 21 | However, even if this "big bang" switchover works for you, you _still_ might want app-specific "readiness" before rolling over. To use our previous example, drain all of the existing Kubernetes workers before destroying the old ASG. The above blue/green examples do not work. 22 | 23 | The other offerred solution is to use CloudFormation. While the AWS ASG API does not offer rolling updates, AWS CloudFormation does. You can set the update method to `RollingUpdate`, and a change in the launch configuration or launch template will cause AWS to add a new node and terminate an old one when the new one is ready, one at a time, until all of the nodes are running the new configuration or template. 24 | 25 | This has two challenges: 26 | 27 | 1. You might not be using CloudFormation, or want to mix it into your terraform or other deployer. 28 | 2. AWS _still_ doesn't know about your app being ready (drained those kubernetes workers yet?). 29 | 30 | ## ASG Roller 31 | Enter ASG Roller. 32 | 33 | ASG Roller is a simple service that watches your AWS ASG, checks the nodes, and, if the nodes are not in sync with the configuration or template, updates them. 34 | 35 | The update methodology is simple: 36 | 37 | 1. Increment `desired` setting. 38 | 2. Watch the new node come online. 39 | 3. When new node is ready, select and terminate one old node. 40 | 4. Repeat until the number of nodes with the correct configuration or template matches the _original_ `desired` setting. At this point, there is likely to be one old node left. 41 | 5. Decrement the `desired` setting. 42 | 43 | ASG Roller will check both launch configurations, comparing names of the launch configuration used, and launch templates, comparing ID or Name, and version. 44 | 45 | ASG Roller will store the original desired value of the ASG as a tag on the ASG, with the key `aws-asg-roller/OriginalDesired`. This helps maintain state in the situation where the process terminates. 46 | 47 | ## App Awareness 48 | In addition to the above, ASG Roller is able to insert app-specific logic at two distinct points: 49 | 50 | * Testing if the new node is "ready for usage". 51 | * Preparing the old node for terminnation. 52 | 53 | ### Ready for Usage 54 | AWS's definition of "ready for usage" normally is one of: 55 | 56 | * node is up and running 57 | * node responds to ELB health checks, one of TCP or other supported protocol checks (like HTTP) 58 | 59 | In addition, ASG Roller supports specific logic, such as checking if Kubernetes registers the node as online and `Ready`. As of this writing, the only supported method is Kubernetes node, but others are in the works, and we are happy to accept pull requests for more. 60 | 61 | ### Preparing for Termination 62 | Prior to terminating the old node, ASG Roller can execute commands to prepare the node for termination. AWS ASG does nothing other than shutting the node down. While well-built apps should be able to handle termination of a node without disruption, in real-world scenarios we often prefer a clean shutdown. 63 | 64 | We can execute such a clean shutdown via supported commands. 65 | 66 | As of this writing, the only supported method is Kubernetes draining, but others are in the works, and we are happy to accept pull requests for more. 67 | 68 | ## Deployment 69 | ASG Roller is available as a docker image. To run on a node: 70 | 71 | ``` 72 | docker run -d deitch/aws-asg-roller: 73 | ``` 74 | 75 | ### Permissions 76 | AWS ASG Roller requires IAM rights to: 77 | 78 | * Read the information about an ASG 79 | * Modify the min, max and desired parameters of an ASG 80 | * Read the launch configuration for an ASG 81 | * Terminate ASG nodes 82 | 83 | These permissions are as follows: 84 | 85 | ``` 86 | - Effect: Allow 87 | Action: 88 | - "autoscaling:DescribeAutoScalingGroups" 89 | - "autoscaling:DescribeAutoScalingInstances" 90 | - "autoscaling:SetDesiredCapacity" 91 | - "autoscaling:TerminateInstanceInAutoScalingGroup" 92 | - "autoscaling:UpdateAutoScalingGroup" 93 | - "autoscaling:DescribeTags" 94 | - "autoscaling:DescribeLaunchConfigurations" 95 | - "ec2:DescribeLaunchTemplates" 96 | - "ec2:DescribeInstances" 97 | Resource: "*" 98 | ``` 99 | 100 | If the `ROLLER_ORIGINAL_DESIRED_ON_TAG` tag option is enabled, the following permission is also required: 101 | 102 | ``` 103 | autoscaling:CreateOrUpdateTags 104 | ``` 105 | 106 | These permissions can be set either via running ASG Roller on an AWS node that has the correct role, or via API keys to a user that has the correct roles/permissions. 107 | 108 | * If the AWS environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`are set, it will use those 109 | * If the AWS environment variables are not set, it will fall back to relying on the local node's IAM role 110 | 111 | ### Running in Kubernetes 112 | To run in Kubernetes: 113 | 114 | ```yml 115 | apiVersion: core/v1 116 | kind: ServiceAccount 117 | metadata: 118 | name: asg-roller 119 | labels: 120 | name: asg-roller 121 | namespace: kube-system 122 | --- 123 | apiVersion: rbac.authorization.k8s.io/v1beta1 124 | kind: ClusterRole 125 | metadata: 126 | name: asg-roller 127 | labels: 128 | name: asg-roller 129 | rules: 130 | - apiGroups: 131 | - "*" 132 | resources: 133 | - "*" 134 | verbs: 135 | - get 136 | - list 137 | - watch 138 | - apiGroups: 139 | - "*" 140 | resources: 141 | - nodes 142 | verbs: 143 | - get 144 | - list 145 | - watch 146 | - update 147 | - patch 148 | - apiGroups: 149 | - "*" 150 | resources: 151 | - pods/eviction 152 | verbs: 153 | - get 154 | - list 155 | - create 156 | - apiGroups: 157 | - "*" 158 | resources: 159 | - pods 160 | verbs: 161 | - get 162 | - list 163 | --- 164 | apiVersion: rbac.authorization.k8s.io/v1beta1 165 | kind: ClusterRoleBinding 166 | metadata: 167 | name: asg-roller 168 | labels: 169 | name: asg-roller 170 | roleRef: 171 | kind: ClusterRole 172 | name: asg-roller 173 | apiGroup: rbac.authorization.k8s.io 174 | subjects: 175 | - kind: ServiceAccount 176 | name: asg-roller 177 | namespace: kube-system 178 | --- 179 | apiVersion: apps/v1 180 | kind: Deployment 181 | metadata: 182 | name: aws-asg-roller 183 | labels: 184 | name: aws-asg-roller 185 | namespace: kube-system # or in another namespace, if you prefer 186 | spec: 187 | replicas: 1 188 | template: 189 | metadata: 190 | labels: 191 | name: aws-asg-roller 192 | spec: 193 | containers: 194 | - name: aws-asg-roller 195 | # the below is if you are using AWS credentials; if you are relying on the node's IAM role, remove the `envFrom` section 196 | envFrom: 197 | - secretRef: 198 | name: aws-asg-roller 199 | image: 'deitch/aws-asg-roller' 200 | imagePullPolicy: Always 201 | restartPolicy: Always 202 | serviceAccountName: asg-roller 203 | # to allow it to run on master 204 | tolerations: 205 | - effect: NoSchedule 206 | operator: Exists 207 | # we specifically want to run on master - remove the remaining lines if you do not care where it runns 208 | affinity: 209 | nodeAffinity: 210 | requiredDuringSchedulingIgnoredDuringExecution: 211 | nodeSelectorTerms: 212 | - matchExpressions: 213 | - key: kubernetes.io/role 214 | operator: In 215 | values: ["master"] 216 | ``` 217 | 218 | Several key areas of potential modification: 219 | 220 | * Allowed to run on master nodes: the above config allows it to run on master nodes. If you do not want to allow it to run on masters, remove the `tolerations` 221 | * Required to run on master nodes: the above config requires it to run on master nodes. If you do not want to require it on masters, remove the `affinity` 222 | * Image version: use a real version; don't use no tag (implying `latest`) 223 | * Credentials: the above example reads the AWS credentials as environment variables from a secret named `aws-asg-roller`. If you have a different secret, use that one; if you are relying on host IAM roles, remove the `envFrom` entirely. 224 | 225 | 226 | ## Configuration 227 | ASG Roller takes its configuration via environment variables. All environment variables that affect ASG Roller begin with `ROLLER_`. 228 | 229 | * `ROLLER_ASG` [`string`, required]: comma-separated list of auto-scaling groups that should be managed. 230 | * `ROLLER_KUBERNETES` [`bool`, default: `true`]: If set to `true`, will check if a new node is ready via-a-vis Kubernetes before declaring it "ready", and will drain an old node before eliminating it. Defaults to `true` when running in Kubernetes as a pod, `false` otherwise. 231 | * `ROLLER_DRAIN` [`bool`, default: `true`]: If set to `true`, will handle draining of pods and other kubernetes resources. Consider setting to false if your distribution has a built in drain on terminate. 232 | * `ROLLER_DRAIN_FORCE` [`bool` default: `true`]: If drain will force delete kubernetes resources if they violate PDB or grace periods. 233 | * `ROLLER_IGNORE_DAEMONSETS` [`bool`, default: `true`]: If set to `false`, will not reclaim a node until there are no DaemonSets running on the node; if set to `true` (default), will reclaim node when all regular pods are drained off, but will ignore the presence of DaemonSets, which should be present on every node anyways. Normally, you want this set to `true`. 234 | * `ROLLER_DELETE_LOCAL_DATA` [`bool`, default: `false`]: If set to `false` (default), will not reclaim a node until there are no pods with [emptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) running on the node; if set to `true`, will continue to terminate the pod and delete the local data before reclaiming the node. The default is `false` to maintain backward compatibility. 235 | * `ROLLER_INTERVAL` [`time.Duration`, default: `30s`]: Time between roller runs. Decimal number with a unit suffix, such as "10s", "10m", "10d", "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Internally uses [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) 236 | * `ROLLER_CHECK_DELAY` [`int`]: Time, in seconds, between checks of ASG status. **Deprecated**, use `ROLLER_INTERVAL`. If both `ROLLER_CHECK_DELAY` and `ROLLER_INTERVAL` are specified then `ROLLER_INTERVAL` is used. 237 | * `ROLLER_CAN_INCREASE_MAX` `bool`: If set to `true`, will increase the ASG maximum size to accommodate the increase in desired count. If set to `false`, will instead error when desired is higher than max. 238 | * `ROLLER_ORIGINAL_DESIRED_ON_TAG` [`bool`, default: `false`]: If set to `true`, will store the original desired value of the ASG as a tag on the ASG, with the key `aws-asg-roller/OriginalDesired`. This helps maintain state in the situation where the process terminates. 239 | * `ROLLER_VERBOSE` [`bool`, default: `false`]: If set to `true`, will increase verbosity of logs. 240 | * `KUBECONFIG` [`string`]: Path to kubernetes config file for authenticating to the kubernetes cluster. Required only if `ROLLER_KUBERNETES` is `true` and we are not operating in a kubernetes cluster. 241 | 242 | ## Interaction with cluster-autoscaler 243 | 244 | [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) is a tool that commonly used to automatically adjusts the size of the Kubernetes cluster. However, there might be some conflicts (see [#19](https://github.com/deitch/aws-asg-roller/issues/19) for more details) between cluster-autoscaler and aws-asg-roller when they are both trying to schedule the asg. A workaround was implemented in aws-asg-roller by annotating all the managed nodes with `cluster-autoscaler.kubernetes.io/scale-down-disabled` when rolling-update is required. 245 | 246 | The general flow can be summarized as follow: 247 | * Check if any nodes in the asg needs to be updated. 248 | * If there are nodes that needs to be updated, annotate all up-to-date or new nodes with `cluster-autoscaler.kubernetes.io/scale-down-disabled` 249 | * Update asg to spin up a new node before draining any old nodes. 250 | * Sleep and repeat (i.e. annotate new unutilized node to prevent it from being scaled-down). 251 | * If all nodes are up-to-date, remove `cluster-autoscaler.kubernetes.io/scale-down-disabled` if any from all the nodes - i.e. normal cluster-autoscaler management resumes. 252 | 253 | > NOTE: `cluster-autoscaler.kubernetes.io/scale-down-disabled` is only supported for cluster-autoscaler v1.0.0 and above. 254 | 255 | ## Template or Configuration 256 | 257 | Ideally, AWS will enforce that every autoscaling group has only one of _either_ launch template _or_ launch configuration. In practice, we don't rely on it. Thus, if the autoscaling group has a launch template, it will use that. If it does not, it will fall back to using the launch configuration. 258 | 259 | Since AWS recommends launch templates over launch configurations going forward, and is likely to deprecate them eventually, this is a reasonable approach. 260 | 261 | ## Building 262 | 263 | The only pre-requisite for building is [docker](https://docker.com). All builds take place inside a docker container. If you want, you _may_ build locally using locally installed go. It requires go version 1.12+. 264 | 265 | If required, set the target OS/architecture, for example: 266 | 267 | ```sh 268 | export BUILDOS=linux 269 | export BUILDARCH=amd64 270 | ``` 271 | 272 | To build: 273 | 274 | ```sh 275 | $ make build # builds the binary via docker in `bin/aws-asg-roller-${OS}-${ARCH} 276 | $ make image # builds the docker image 277 | ``` 278 | 279 | To build locally: 280 | 281 | ```sh 282 | $ make build BUILD=local # builds the binary via locally installed go in `bin/aws-asg-roller-${OS}-${ARCH} 283 | $ make image BUILD=local # builds the docker image 284 | ``` 285 | -------------------------------------------------------------------------------- /aws_internal_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/aws/awserr" 10 | "github.com/aws/aws-sdk-go/service/autoscaling" 11 | "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" 12 | "github.com/aws/aws-sdk-go/service/ec2" 13 | "github.com/aws/aws-sdk-go/service/ec2/ec2iface" 14 | ) 15 | 16 | func testASGEq(a, b []*autoscaling.Group) bool { 17 | 18 | // If one is nil, the other must also be nil. 19 | if (a == nil) != (b == nil) { 20 | return false 21 | } 22 | 23 | if len(a) != len(b) { 24 | return false 25 | } 26 | 27 | for i := range a { 28 | if *a[i].AutoScalingGroupName != *b[i].AutoScalingGroupName { 29 | return false 30 | } 31 | } 32 | return true 33 | } 34 | 35 | var validLaunchTemplates = map[string]*ec2.LaunchTemplate{ 36 | "12345": { 37 | LaunchTemplateId: aws.String("12345"), 38 | LatestVersionNumber: aws.Int64(65), 39 | DefaultVersionNumber: aws.Int64(59), 40 | }, 41 | "67890": { 42 | LaunchTemplateId: aws.String("67890"), 43 | LatestVersionNumber: aws.Int64(10), 44 | DefaultVersionNumber: aws.Int64(10), 45 | }, 46 | "lt1": { 47 | LaunchTemplateName: aws.String("lt1"), 48 | LatestVersionNumber: aws.Int64(4), 49 | DefaultVersionNumber: aws.Int64(1), 50 | }, 51 | "lt2": { 52 | LaunchTemplateName: aws.String("lt2"), 53 | LatestVersionNumber: aws.Int64(40), 54 | DefaultVersionNumber: aws.Int64(30), 55 | }, 56 | } 57 | 58 | type mockEc2Svc struct { 59 | ec2iface.EC2API 60 | autodescribe bool 61 | counter funcCounter 62 | } 63 | 64 | func (m *mockEc2Svc) DescribeInstances(in *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) { 65 | m.counter.add("DescribeInstances", in) 66 | hostMap := map[string]string{ 67 | "12345": "host12345", 68 | "67890": "host67890", 69 | } 70 | instances := make([]*ec2.Instance, 0) 71 | for _, i := range in.InstanceIds { 72 | if name, ok := hostMap[*i]; ok { 73 | instances = append(instances, &ec2.Instance{ 74 | InstanceId: i, 75 | PrivateDnsName: &name, 76 | }) 77 | continue 78 | } 79 | if m.autodescribe { 80 | name := fmt.Sprintf("host%s", *i) 81 | instances = append(instances, &ec2.Instance{ 82 | InstanceId: i, 83 | PrivateDnsName: &name, 84 | }) 85 | continue 86 | } 87 | return nil, fmt.Errorf("Unknown ID %s", *i) 88 | } 89 | ret := &ec2.DescribeInstancesOutput{ 90 | Reservations: []*ec2.Reservation{ 91 | { 92 | Instances: instances, 93 | }, 94 | }, 95 | } 96 | return ret, nil 97 | } 98 | 99 | func (m *mockEc2Svc) DescribeLaunchTemplates(in *ec2.DescribeLaunchTemplatesInput) (*ec2.DescribeLaunchTemplatesOutput, error) { 100 | m.counter.add("DescribeLaunchTemplates:", in) 101 | templates := make([]*ec2.LaunchTemplate, 0) 102 | for _, i := range in.LaunchTemplateIds { 103 | for _, t := range validLaunchTemplates { 104 | if t.LaunchTemplateId != nil && *t.LaunchTemplateId == *i { 105 | templates = append(templates, t) 106 | } 107 | } 108 | } 109 | for _, i := range in.LaunchTemplateNames { 110 | for _, t := range validLaunchTemplates { 111 | if t.LaunchTemplateName != nil && *t.LaunchTemplateName == *i { 112 | templates = append(templates, t) 113 | } 114 | } 115 | } 116 | ret := &ec2.DescribeLaunchTemplatesOutput{ 117 | LaunchTemplates: templates, 118 | } 119 | return ret, nil 120 | } 121 | 122 | type mockAsgSvc struct { 123 | autoscalingiface.AutoScalingAPI 124 | err error 125 | counter funcCounter 126 | groups map[string]*autoscaling.Group 127 | } 128 | 129 | func (m *mockAsgSvc) TerminateInstanceInAutoScalingGroup(in *autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) { 130 | m.counter.add("TerminateInstanceInAutoScalingGroup", in) 131 | ret := &autoscaling.TerminateInstanceInAutoScalingGroupOutput{} 132 | return ret, m.err 133 | } 134 | func (m *mockAsgSvc) DescribeAutoScalingGroups(in *autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { 135 | m.counter.add("DescribeAutoScalingGroups", in) 136 | groups := make([]*autoscaling.Group, 0) 137 | for _, n := range in.AutoScalingGroupNames { 138 | if group, ok := m.groups[*n]; ok { 139 | groups = append(groups, group) 140 | } 141 | } 142 | return &autoscaling.DescribeAutoScalingGroupsOutput{ 143 | AutoScalingGroups: groups, 144 | }, m.err 145 | } 146 | func (m *mockAsgSvc) SetDesiredCapacity(in *autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) { 147 | m.counter.add("SetDesiredCapacity", in) 148 | ret := &autoscaling.SetDesiredCapacityOutput{} 149 | return ret, m.err 150 | } 151 | func (m *mockAsgSvc) UpdateAutoScalingGroup(in *autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error) { 152 | m.counter.add("UpdateAutoScalingGroup", in) 153 | ret := &autoscaling.UpdateAutoScalingGroupOutput{} 154 | return ret, m.err 155 | } 156 | func (m *mockAsgSvc) DescribeTags(in *autoscaling.DescribeTagsInput) (*autoscaling.DescribeTagsOutput, error) { 157 | m.counter.add("DescribeTags", in) 158 | ret := &autoscaling.DescribeTagsOutput{ 159 | // value of "auto-scaling-group" tag is the ASG name 160 | Tags: m.groups[*in.Filters[0].Values[0]].Tags, 161 | } 162 | return ret, m.err 163 | } 164 | func (m *mockAsgSvc) CreateOrUpdateTags(in *autoscaling.CreateOrUpdateTagsInput) (*autoscaling.CreateOrUpdateTagsOutput, error) { 165 | m.counter.add("CreateOrUpdateTags", in) 166 | ret := &autoscaling.CreateOrUpdateTagsOutput{} 167 | return ret, m.err 168 | } 169 | 170 | func TestAwsGetHostnames(t *testing.T) { 171 | tests := []struct { 172 | ids []string 173 | hostnames []string 174 | err error 175 | }{ 176 | {[]string{"12345", "67890"}, []string{"host12345", "host67890"}, nil}, 177 | {[]string{"67890"}, []string{"host67890"}, nil}, 178 | {[]string{"notexist"}, nil, fmt.Errorf("Unable to get description")}, 179 | } 180 | for _, tt := range tests { 181 | hostnames, err := awsGetHostnames(&mockEc2Svc{}, tt.ids) 182 | switch { 183 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 184 | t.Errorf("Mismatched error, actual then expected") 185 | t.Logf("%v", err) 186 | t.Logf("%v", tt.err) 187 | case !testStringEq(hostnames, tt.hostnames): 188 | t.Errorf("Mismatched results, actual then expected") 189 | t.Logf("%v", hostnames) 190 | t.Logf("%v", tt.hostnames) 191 | } 192 | } 193 | } 194 | func TestAwsGetHostname(t *testing.T) { 195 | tests := []struct { 196 | id string 197 | hostname string 198 | err error 199 | }{ 200 | {"12345", "host12345", nil}, 201 | {"notexist", "", fmt.Errorf("Unable to get description")}, 202 | } 203 | for _, tt := range tests { 204 | hostname, err := awsGetHostname(&mockEc2Svc{}, tt.id) 205 | switch { 206 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 207 | t.Errorf("Mismatched error, actual then expected") 208 | t.Logf("%v", err) 209 | t.Logf("%v", tt.err) 210 | case hostname != tt.hostname: 211 | t.Errorf("Mismatched results, actual then expected") 212 | t.Logf("%v", hostname) 213 | t.Logf("%v", tt.hostname) 214 | } 215 | } 216 | } 217 | 218 | func TestAwsGetServices(t *testing.T) { 219 | ec2, asg, err := awsGetServices() 220 | if err != nil { 221 | t.Fatalf("Unexpected err %v", err) 222 | } 223 | if ec2 == nil { 224 | t.Fatalf("ec2 unexpectedly nil") 225 | } 226 | if asg == nil { 227 | t.Fatalf("asg unexpectedly nil") 228 | } 229 | } 230 | 231 | func TestAwsTerminateNode(t *testing.T) { 232 | id := "12345" 233 | tests := []struct { 234 | awserr error 235 | err error 236 | }{ 237 | {awserr.New(autoscaling.ErrCodeScalingActivityInProgressFault, "", nil), fmt.Errorf("Could not terminate instance, autoscaling already in progress")}, 238 | {awserr.New(autoscaling.ErrCodeResourceContentionFault, "", nil), fmt.Errorf("Could not terminate instance, instance in contention")}, 239 | {awserr.New("test it new", "", nil), fmt.Errorf("Unknown aws error when terminating old instance")}, 240 | {fmt.Errorf("test it new"), fmt.Errorf("Unknown non-aws error when terminating old instance")}, 241 | } 242 | for i, tt := range tests { 243 | err := awsTerminateNode(&mockAsgSvc{ 244 | err: tt.awserr, 245 | }, id) 246 | if (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())) { 247 | t.Errorf("%d: mismatched errors, actual then expected", i) 248 | t.Logf("%v", err) 249 | t.Logf("%v", tt.err) 250 | } 251 | } 252 | } 253 | func TestAwsDescribeGroups(t *testing.T) { 254 | nogroup := "notexist" 255 | tests := []struct { 256 | names []string 257 | setErr error 258 | err error 259 | }{ 260 | {[]string{"abc", "def"}, nil, nil}, 261 | {[]string{"67890"}, nil, nil}, 262 | {[]string{nogroup}, awserr.New(autoscaling.ErrCodeResourceContentionFault, "", nil), fmt.Errorf("Unexpected AWS Resource")}, 263 | {[]string{nogroup}, awserr.New("testabc", "", nil), fmt.Errorf("Unexpected and unknown AWS error")}, 264 | {[]string{nogroup}, fmt.Errorf("testabc"), fmt.Errorf("Unexpected and unknown non-AWS error")}, 265 | } 266 | for i, tt := range tests { 267 | validGroups := map[string]*autoscaling.Group{} 268 | for _, n := range tt.names { 269 | if n == nogroup { 270 | continue 271 | } 272 | name := n 273 | validGroups[n] = &autoscaling.Group{ 274 | AutoScalingGroupName: &name, 275 | } 276 | } 277 | groups, err := awsDescribeGroups(&mockAsgSvc{ 278 | err: tt.setErr, 279 | groups: validGroups, 280 | }, tt.names) 281 | var expectedGroups []*autoscaling.Group 282 | if tt.err == nil { 283 | expectedGroups = make([]*autoscaling.Group, 0) 284 | for _, n := range tt.names { 285 | name := n 286 | expectedGroups = append(expectedGroups, &autoscaling.Group{ 287 | AutoScalingGroupName: &name, 288 | }) 289 | } 290 | } 291 | switch { 292 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 293 | t.Errorf("%d: Mismatched error, actual then expected", i) 294 | t.Logf("%v", err) 295 | t.Logf("%v", tt.err) 296 | case !testASGEq(groups, expectedGroups): 297 | t.Errorf("%d: Mismatched results, actual then expected", i) 298 | t.Logf("%v", groups) 299 | t.Logf("%v", expectedGroups) 300 | } 301 | } 302 | } 303 | 304 | func TestAwsSetAsgDesired(t *testing.T) { 305 | groupName := "mygroup" 306 | tests := []struct { 307 | desired int64 308 | max int64 309 | canIncreaseMax bool 310 | setErr error 311 | err error 312 | verbose bool 313 | }{ 314 | {3, 3, true, nil, nil, false}, 315 | {2, 2, true, nil, nil, false}, 316 | {15, 15, true, awserr.New(autoscaling.ErrCodeResourceContentionFault, "", nil), fmt.Errorf("unable to increase ASG mygroup desired count to 15 - ResourceContention"), false}, 317 | {1, 1, true, awserr.New("testabc", "", nil), fmt.Errorf("unable to increase ASG mygroup desired count to 1 - unexpected and unknown AWS error"), false}, 318 | {25, 25, true, fmt.Errorf("testabc"), fmt.Errorf("unable to increase ASG mygroup desired count to 25 - unexpected and unknown non-AWS error"), false}, 319 | {31, 30, false, nil, fmt.Errorf("unable to increase ASG mygroup desired size to 31 as greater than max size 30"), false}, 320 | {31, 30, true, nil, nil, false}, 321 | } 322 | for i, tt := range tests { 323 | asg := &autoscaling.Group{ 324 | AutoScalingGroupName: &groupName, 325 | MaxSize: &tt.max, 326 | } 327 | err := setAsgDesired(&mockAsgSvc{ 328 | err: tt.setErr, 329 | }, asg, tt.desired, tt.canIncreaseMax, tt.verbose) 330 | switch { 331 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 332 | t.Errorf("%d: Mismatched error, actual then expected", i) 333 | t.Logf("%v", err) 334 | t.Logf("%v", tt.err) 335 | } 336 | } 337 | } 338 | 339 | func TestAwsSetAsgMax(t *testing.T) { 340 | groupName := "mygroup" 341 | tests := []struct { 342 | max int64 343 | setErr error 344 | err error 345 | verbose bool 346 | }{ 347 | {3, nil, nil, false}, 348 | {2, nil, nil, false}, 349 | {15, awserr.New(autoscaling.ErrCodeResourceContentionFault, "", nil), fmt.Errorf("unable to increase ASG mygroup max size to 15 - ResourceContention"), false}, 350 | {1, awserr.New("testabc", "", nil), fmt.Errorf("unable to increase ASG mygroup max size to 1 - unexpected and unknown AWS error: testabc"), false}, 351 | {25, fmt.Errorf("testabc"), fmt.Errorf("unable to increase ASG mygroup max size to 25 - unexpected and unknown non-AWS error: testabc"), false}, 352 | } 353 | for i, tt := range tests { 354 | asg := &autoscaling.Group{ 355 | AutoScalingGroupName: &groupName, 356 | } 357 | err := setAsgMax(&mockAsgSvc{ 358 | err: tt.setErr, 359 | }, asg, tt.max, tt.verbose) 360 | switch { 361 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 362 | t.Errorf("%d: Mismatched error, actual then expected", i) 363 | t.Logf("%v", err) 364 | t.Logf("%v", tt.err) 365 | } 366 | } 367 | } 368 | 369 | func TestAwsGetLaunchTemplate(t *testing.T) { 370 | tests := []struct { 371 | names []string 372 | ids []string 373 | template *ec2.LaunchTemplate 374 | err error 375 | }{ 376 | {nil, nil, nil, nil}, // nothing passed, should get nothing back but no errors 377 | {[]string{"lt1", "lt2"}, nil, validLaunchTemplates["lt1"], nil}, // two names match, so should get first one 378 | {[]string{"lt2", "lt1"}, nil, validLaunchTemplates["lt2"], nil}, // two names match, so should get first one 379 | {nil, []string{"12345", "67890"}, validLaunchTemplates["12345"], nil}, // two ids match, so should get first one 380 | {nil, []string{"67890", "12345"}, validLaunchTemplates["67890"], nil}, // two ids match, so should get first one 381 | {[]string{"lt2", "lt1"}, []string{"67890", "12345"}, validLaunchTemplates["67890"], nil}, // ids override names 382 | } 383 | for i, tt := range tests { 384 | input := &ec2.DescribeLaunchTemplatesInput{ 385 | LaunchTemplateNames: aws.StringSlice(tt.names), 386 | LaunchTemplateIds: aws.StringSlice(tt.ids), 387 | } 388 | template, err := awsGetLaunchTemplate(&mockEc2Svc{}, input) 389 | switch { 390 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 391 | t.Errorf("%d: Mismatched error, actual then expected", i) 392 | t.Logf("%v", err) 393 | t.Logf("%v", tt.err) 394 | case (template == nil && tt.template != nil) || (template != nil && tt.template == nil): 395 | t.Errorf("%d: Mismatched nil/not-nil templates, actual then expected", i) 396 | t.Logf("%v:", template) 397 | t.Logf("%v:", tt.template) 398 | case template != nil && tt.template != nil && !testCompareLaunchTemplate(template, tt.template): 399 | t.Errorf("%d: Mismatched templates, actual then expected", i) 400 | t.Logf("%v:", template) 401 | t.Logf("%v:", tt.template) 402 | } 403 | } 404 | } 405 | 406 | func testCompareLaunchTemplate(t1, t2 *ec2.LaunchTemplate) bool { 407 | return t1.LaunchTemplateName == t2.LaunchTemplateName && t1.LaunchTemplateId == t2.LaunchTemplateId && t1.DefaultVersionNumber == t2.DefaultVersionNumber && t1.LatestVersionNumber == t2.LatestVersionNumber 408 | } 409 | -------------------------------------------------------------------------------- /roller.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | "github.com/aws/aws-sdk-go/aws" 8 | "github.com/aws/aws-sdk-go/service/autoscaling" 9 | "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" 10 | "github.com/aws/aws-sdk-go/service/ec2" 11 | "github.com/aws/aws-sdk-go/service/ec2/ec2iface" 12 | ) 13 | 14 | const ( 15 | healthy = "Healthy" 16 | ) 17 | 18 | // adjust runs a single adjustment in the loop to update an ASG in a rolling fashion to latest launch config 19 | func adjust(kubernetesEnabled bool, asgList []string, ec2Svc ec2iface.EC2API, asgSvc autoscalingiface.AutoScalingAPI, readinessHandler readiness, originalDesired map[string]int64, storeOriginalDesiredOnTag, canIncreaseMax, verbose, drain, drainForce bool) error { 20 | // get information on all of the groups 21 | asgs, err := awsDescribeGroups(asgSvc, asgList) 22 | if err != nil { 23 | return fmt.Errorf("Unexpected error describing ASGs, skipping: %v", err) 24 | } 25 | 26 | // look up and record original desired values 27 | err = populateOriginalDesired(originalDesired, asgs, asgSvc, storeOriginalDesiredOnTag, verbose) 28 | if err != nil { 29 | return fmt.Errorf("unexpected error looking up original desired values for ASGs, skipping: %v", err) 30 | } 31 | 32 | asgMap := map[string]*autoscaling.Group{} 33 | // get information on all of the ec2 instances 34 | instances := make([]*autoscaling.Instance, 0) 35 | for _, asg := range asgs { 36 | oldInstances, newInstances, err := groupInstances(asg, ec2Svc, verbose) 37 | if err != nil { 38 | return fmt.Errorf("unable to group instances into new and old: %v", err) 39 | } 40 | // if there are no outdated instances skip updating 41 | if len(oldInstances) == 0 && *asg.DesiredCapacity == originalDesired[*asg.AutoScalingGroupName] { 42 | log.Printf("[%s] ok\n", *asg.AutoScalingGroupName) 43 | err := ensureNoScaleDownDisabledAnnotation(kubernetesEnabled, ec2Svc, mapInstancesIds(asg.Instances)) 44 | if err != nil { 45 | log.Printf("[%s] Unable to update node annotations: %v\n", *asg.AutoScalingGroupName, err) 46 | } 47 | continue 48 | } 49 | 50 | log.Printf("[%s] need updates: %d\n", *asg.AutoScalingGroupName, len(oldInstances)) 51 | 52 | asgMap[*asg.AutoScalingGroupName] = asg 53 | instances = append(instances, oldInstances...) 54 | instances = append(instances, newInstances...) 55 | } 56 | // no instances no work needed 57 | if len(instances) == 0 { 58 | return nil 59 | } 60 | ids := mapInstancesIds(instances) 61 | hostnames, err := awsGetHostnames(ec2Svc, ids) 62 | if err != nil { 63 | return fmt.Errorf("unable to get aws hostnames for ids %v: %v", ids, err) 64 | } 65 | hostnameMap := map[string]string{} 66 | for i, id := range ids { 67 | hostnameMap[id] = hostnames[i] 68 | } 69 | newDesired := map[string]int64{} 70 | newTerminate := map[string]string{} 71 | 72 | // keep keyed references to the ASGs 73 | for _, asg := range asgMap { 74 | newDesiredA, terminateID, err := calculateAdjustment(kubernetesEnabled, asg, ec2Svc, hostnameMap, readinessHandler, originalDesired[*asg.AutoScalingGroupName], verbose, drain, drainForce) 75 | log.Printf("[%v] desired: %d original: %d", p2v(asg.AutoScalingGroupName), newDesiredA, originalDesired[*asg.AutoScalingGroupName]) 76 | if err != nil { 77 | log.Printf("[%v] error calculating adjustment - skipping: %v\n", p2v(asg.AutoScalingGroupName), err) 78 | continue 79 | } 80 | if newDesiredA != *asg.DesiredCapacity { 81 | newDesired[*asg.AutoScalingGroupName] = newDesiredA 82 | } 83 | if terminateID != "" { 84 | log.Printf("[%v] scheduled termination: %s", asg.AutoScalingGroupName, terminateID) 85 | newTerminate[*asg.AutoScalingGroupName] = terminateID 86 | } 87 | } 88 | // adjust current desired 89 | for asg, desired := range newDesired { 90 | log.Printf("[%s] set desired instances: %d\n", asg, desired) 91 | err = setAsgDesired(asgSvc, asgMap[asg], desired, canIncreaseMax, verbose) 92 | if err != nil { 93 | return fmt.Errorf("[%s] error setting desired to %d: %v", asg, desired, err) 94 | } 95 | } 96 | // terminate nodes 97 | for asg, id := range newTerminate { 98 | log.Printf("[%s] terminating node: %s\n", asg, id) 99 | // all new config instances are ready, terminate an old one 100 | err = awsTerminateNode(asgSvc, id) 101 | if err != nil { 102 | return fmt.Errorf("[%s] error terminating node %s: %v", asg, id, err) 103 | } 104 | } 105 | return nil 106 | } 107 | 108 | // ensureNoScaleDownDisabledAnnotation remove any "cluster-autoscaler.kubernetes.io/scale-down-disabled" 109 | // annotations in the nodes as no update is required anymore. 110 | func ensureNoScaleDownDisabledAnnotation(kubernetesEnabled bool, ec2Svc ec2iface.EC2API, ids []string) error { 111 | hostnames, err := awsGetHostnames(ec2Svc, ids) 112 | if err != nil { 113 | return fmt.Errorf("unable to get aws hostnames for ids %v: %v", ids, err) 114 | } 115 | return removeScaleDownDisabledAnnotation(kubernetesEnabled, hostnames) 116 | } 117 | 118 | // calculateAdjustment calculates the new settings for the desired number, and which node (if any) to terminate 119 | // this makes no actual adjustment, only calculates what new settings should be 120 | // returns: 121 | // what the new desired number of instances should be 122 | // ID of an instance to terminate, "" if none 123 | // error 124 | func calculateAdjustment(kubernetesEnabled bool, asg *autoscaling.Group, ec2Svc ec2iface.EC2API, hostnameMap map[string]string, readinessHandler readiness, originalDesired int64, verbose, drain, drainForce bool) (int64, string, error) { 125 | desired := *asg.DesiredCapacity 126 | 127 | // get instances with old launch config 128 | oldInstances, newInstances, err := groupInstances(asg, ec2Svc, verbose) 129 | if err != nil { 130 | return originalDesired, "", fmt.Errorf("unable to group instances into new and old: %v", err) 131 | } 132 | 133 | // Possibilities: 134 | // 1- we have some old ones, but have not started updates yet: set the desired, increment and loop 135 | // 2- we have no old ones: we must be at end or have no work to do, so finish 136 | // 3- we have some old ones, but have started updates: run the updates 137 | if len(oldInstances) == 0 { 138 | // we are done 139 | if verbose && desired != originalDesired { 140 | log.Printf("[%v] returning desired to original value %d", p2v(asg.AutoScalingGroupName), originalDesired) 141 | } 142 | return originalDesired, "", nil 143 | } 144 | if originalDesired == desired { 145 | // we have not started updates; raise the desired count 146 | return originalDesired + 1, "", nil 147 | } 148 | 149 | // how we determine if we can terminate one 150 | // we already know we have increased desired capacity 151 | // check if: 152 | // a- actual instance count matches our new desired 153 | // b- all new config instances are in valid state 154 | // if yes, terminate one old one 155 | // if not, loop around again - eventually it will be 156 | 157 | // do we have at least one more more ready instances than the original desired? if not, loop again until we do 158 | readyCount := 0 159 | for _, i := range asg.Instances { 160 | if *i.HealthStatus == healthy { 161 | readyCount++ 162 | } 163 | } 164 | if int64(readyCount) < originalDesired+1 { 165 | return desired, "", nil 166 | } 167 | // are any of the updated config instances not ready? 168 | unReadyCount := 0 169 | // should check if new node *really* is ready to function 170 | for _, i := range newInstances { 171 | if *i.HealthStatus != healthy { 172 | unReadyCount++ 173 | } 174 | } 175 | if unReadyCount > 0 { 176 | return desired, "", nil 177 | } 178 | // do we have additional requirements for readiness? 179 | if readinessHandler != nil { 180 | var ( 181 | hostnames []string 182 | err error 183 | ) 184 | // check if the new nodes all are in ready state 185 | ids := mapInstancesIds(newInstances) 186 | hostnames = make([]string, 0) 187 | for _, i := range ids { 188 | hostnames = append(hostnames, hostnameMap[i]) 189 | } 190 | _, err = setScaleDownDisabledAnnotation(kubernetesEnabled, hostnames) 191 | if err != nil { 192 | log.Printf("Unable to set disabled scale down annotations: %v", err) 193 | } 194 | unReadyCount, err = readinessHandler.getUnreadyCount(hostnames, ids) 195 | if err != nil { 196 | return desired, "", fmt.Errorf("error getting readiness new node status: %v", err) 197 | } 198 | if unReadyCount > 0 { 199 | log.Printf("[%v] Nodes not ready: %d", p2v(asg.AutoScalingGroupName), unReadyCount) 200 | return desired, "", nil 201 | } 202 | } 203 | candidate := *oldInstances[0].InstanceId 204 | 205 | if readinessHandler != nil { 206 | // get the node reference - first need the hostname 207 | var ( 208 | hostname string 209 | err error 210 | ) 211 | hostname = hostnameMap[candidate] 212 | err = readinessHandler.prepareTermination([]string{hostname}, []string{candidate}, drain, drainForce) 213 | if err != nil { 214 | return desired, "", fmt.Errorf("unexpected error readiness handler terminating node %s: %v", hostname, err) 215 | } 216 | } 217 | 218 | // all new config instances are ready, terminate an old one 219 | return desired, candidate, nil 220 | } 221 | 222 | // groupInstances handles all of the logic for determining which nodes in the ASG have an old or outdated 223 | // config, and which are up to date. It should do nothing else. 224 | // The entire rest of the code should rely on this for making the determination 225 | func groupInstances(asg *autoscaling.Group, ec2Svc ec2iface.EC2API, verbose bool) ([]*autoscaling.Instance, []*autoscaling.Instance, error) { 226 | oldInstances := make([]*autoscaling.Instance, 0) 227 | newInstances := make([]*autoscaling.Instance, 0) 228 | // we want to be able to handle LaunchTemplate as well 229 | targetLc := asg.LaunchConfigurationName 230 | targetLt := asg.LaunchTemplate 231 | // check for mixed instance policy 232 | if targetLt == nil && asg.MixedInstancesPolicy != nil && asg.MixedInstancesPolicy.LaunchTemplate != nil { 233 | if verbose { 234 | log.Printf("[%v] using mixed instances policy launch template", p2v(asg.AutoScalingGroupName)) 235 | } 236 | targetLt = asg.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification 237 | } 238 | // prioritize LaunchTemplate over LaunchConfiguration 239 | if targetLt != nil { 240 | // we are using LaunchTemplate. Unlike LaunchConfiguration, you can have two nodes in the ASG 241 | // with the same LT name, same ID but different versions, so need to check version. 242 | // they even can have the same version, if the version is `$Latest` or `$Default`, so need 243 | // to get actual versions for each 244 | var ( 245 | targetTemplate *ec2.LaunchTemplate 246 | err error 247 | ) 248 | switch { 249 | case targetLt.LaunchTemplateId != nil && *targetLt.LaunchTemplateId != "": 250 | if targetTemplate, err = awsGetLaunchTemplateByID(ec2Svc, *targetLt.LaunchTemplateId); err != nil { 251 | return nil, nil, fmt.Errorf("[%v] error retrieving information about launch template ID %v: %v", p2v(asg.AutoScalingGroupName), p2v(targetLt.LaunchTemplateId), err) 252 | } 253 | case targetLt.LaunchTemplateName != nil && *targetLt.LaunchTemplateName != "": 254 | if targetTemplate, err = awsGetLaunchTemplateByName(ec2Svc, *targetLt.LaunchTemplateName); err != nil { 255 | return nil, nil, fmt.Errorf("[%v] error retrieving information about launch template name %v: %v", p2v(asg.AutoScalingGroupName), p2v(targetLt.LaunchTemplateName), err) 256 | } 257 | default: 258 | return nil, nil, fmt.Errorf("AutoScaling Group %s had invalid Launch Template", *asg.AutoScalingGroupName) 259 | } 260 | // extra safety check 261 | if targetTemplate == nil { 262 | return nil, nil, fmt.Errorf("no template found") 263 | } 264 | if verbose { 265 | log.Printf("Grouping instances for ASG named %v with target template name %v, id %v, latest version %v and default version %v", p2v(asg.AutoScalingGroupName), p2v(targetTemplate.LaunchTemplateName), p2v(targetTemplate.LaunchTemplateId), p2v(targetTemplate.LatestVersionNumber), p2v(targetTemplate.DefaultVersionNumber)) 266 | } 267 | // now we can loop through each node and compare 268 | for _, i := range asg.Instances { 269 | switch { 270 | case i.LaunchTemplate == nil: 271 | if verbose { 272 | log.Printf("[%v] adding %v to list of old instances because it does not have a launch template", p2v(asg.AutoScalingGroupName), p2v(i.InstanceId)) 273 | } 274 | // has no launch template at all 275 | oldInstances = append(oldInstances, i) 276 | case aws.StringValue(i.LaunchTemplate.LaunchTemplateName) != aws.StringValue(targetLt.LaunchTemplateName): 277 | // mismatched name 278 | if verbose { 279 | log.Printf("[%v] adding %v to list of old instances because its name is %v and the target template's name is %v", p2v(asg.AutoScalingGroupName), p2v(i.InstanceId), p2v(i.LaunchTemplate.LaunchTemplateName), p2v(targetLt.LaunchTemplateName)) 280 | } 281 | oldInstances = append(oldInstances, i) 282 | case aws.StringValue(i.LaunchTemplate.LaunchTemplateId) != aws.StringValue(targetLt.LaunchTemplateId): 283 | // mismatched ID 284 | if verbose { 285 | log.Printf("[%v] adding %v to list of old instances because its template id is %v and the target template's id is %v", p2v(asg.AutoScalingGroupName), p2v(i.InstanceId), p2v(i.LaunchTemplate.LaunchTemplateId), p2v(targetLt.LaunchTemplateId)) 286 | } 287 | oldInstances = append(oldInstances, i) 288 | // name and id match, just need to check versions 289 | case !compareLaunchTemplateVersions(targetTemplate, targetLt, i.LaunchTemplate): 290 | if verbose { 291 | log.Printf("[%v] adding %v to list of old instances because the launch template versions do not match (%v!=%v)", p2v(asg.AutoScalingGroupName), p2v(i.InstanceId), p2v(i.LaunchTemplate.Version), p2v(targetLt.Version)) 292 | } 293 | oldInstances = append(oldInstances, i) 294 | default: 295 | if verbose { 296 | log.Printf("[%v] adding %v to list of new instances because the instance matches the launch template with id %v", p2v(asg.AutoScalingGroupName), p2v(i.InstanceId), p2v(targetLt.LaunchTemplateId)) 297 | } 298 | newInstances = append(newInstances, i) 299 | } 300 | } 301 | } else if targetLc != nil { 302 | // go through each instance and find those that are not with the target LC 303 | for _, i := range asg.Instances { 304 | if i.LaunchConfigurationName != nil && *i.LaunchConfigurationName == *targetLc { 305 | newInstances = append(newInstances, i) 306 | } else { 307 | if verbose { 308 | log.Printf("[%v] adding %v to list of old instances because the launch configuration names do not match (%v!=%v)", p2v(asg.AutoScalingGroupName), p2v(i.InstanceId), p2v(i.LaunchConfigurationName), p2v(targetLc)) 309 | } 310 | oldInstances = append(oldInstances, i) 311 | } 312 | } 313 | } else { 314 | return nil, nil, fmt.Errorf("[%v] both target launch configuration and launch template are nil", p2v(asg.AutoScalingGroupName)) 315 | } 316 | return oldInstances, newInstances, nil 317 | } 318 | 319 | func mapInstancesIds(instances []*autoscaling.Instance) []string { 320 | ids := make([]string, 0) 321 | for _, i := range instances { 322 | ids = append(ids, *i.InstanceId) 323 | } 324 | return ids 325 | } 326 | 327 | // compareLaunchTemplateVersions compare two launch template versions and see if they match 328 | // can handle `$Latest` and `$Default` by resolving to the actual version in use 329 | func compareLaunchTemplateVersions(targetTemplate *ec2.LaunchTemplate, lt1, lt2 *autoscaling.LaunchTemplateSpecification) bool { 330 | // if both versions do not start with `$`, then just compare 331 | if lt1 == nil && lt2 == nil { 332 | return true 333 | } 334 | if (lt1 == nil && lt2 != nil) || (lt1 != nil && lt2 == nil) { 335 | return false 336 | } 337 | if lt1.Version == nil && lt2.Version == nil { 338 | return true 339 | } 340 | if (lt1.Version == nil && lt2.Version != nil) || (lt1.Version != nil && lt2.Version == nil) { 341 | return false 342 | } 343 | // if either version starts with `$`, then resolve to actual version from LaunchTemplate 344 | var lt1version, lt2version string 345 | switch *lt1.Version { 346 | case "$Default": 347 | lt1version = fmt.Sprintf("%d", *targetTemplate.DefaultVersionNumber) 348 | case "$Latest": 349 | lt1version = fmt.Sprintf("%d", *targetTemplate.LatestVersionNumber) 350 | default: 351 | lt1version = *lt1.Version 352 | } 353 | switch *lt2.Version { 354 | case "$Default": 355 | lt2version = fmt.Sprintf("%d", *targetTemplate.DefaultVersionNumber) 356 | case "$Latest": 357 | lt2version = fmt.Sprintf("%d", *targetTemplate.LatestVersionNumber) 358 | default: 359 | lt2version = *lt2.Version 360 | } 361 | return lt1version == lt2version 362 | } 363 | -------------------------------------------------------------------------------- /roller_internal_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/service/autoscaling" 11 | "github.com/aws/aws-sdk-go/service/ec2" 12 | ) 13 | 14 | // Tests do not talk to a live kubernetes cluster 15 | const kubernetesEnabled = false 16 | 17 | type testReadyHandler struct { 18 | unreadyCount int 19 | unreadyError error 20 | terminateError error 21 | } 22 | 23 | func (t *testReadyHandler) getUnreadyCount(hostnames []string, ids []string) (int, error) { 24 | return t.unreadyCount, t.unreadyError 25 | } 26 | func (t *testReadyHandler) prepareTermination(hostnames []string, ids []string, drain, drainForce bool) error { 27 | return t.terminateError 28 | } 29 | 30 | func TestCalculateAdjustment(t *testing.T) { 31 | /* 32 | Each test should have: 33 | inputs 34 | - number of instances 35 | - number with old config 36 | - number with new config 37 | - desired number of instances 38 | - config name 39 | - kube enabled (bool) 40 | - original desired number 41 | - if kube enabled: 42 | - state of each old node 43 | - state of each new node 44 | outputs 45 | - new desired number 46 | - node id to terminated (if any) 47 | - errors (if any) 48 | */ 49 | unreadyCountHandler := &testReadyHandler{ 50 | unreadyCount: 1, 51 | } 52 | unreadyErrorHandler := &testReadyHandler{ 53 | unreadyError: fmt.Errorf("Error"), 54 | } 55 | readyHandler := &testReadyHandler{ 56 | unreadyCount: 0, 57 | } 58 | terminateHandler := &testReadyHandler{} 59 | terminateErrorHandler := &testReadyHandler{ 60 | terminateError: fmt.Errorf("Error"), 61 | } 62 | 63 | tests := []struct { 64 | oldInstances []string 65 | newInstancesHealthy []string 66 | newInstancesUnhealthy []string 67 | desired int64 68 | originalDesired int64 69 | readiness readiness 70 | targetDesired int64 71 | targetTerminate string 72 | err error 73 | verbose bool 74 | drain bool 75 | drainForce bool 76 | }{ 77 | // 1 old, 2 new healthy, 0 new unhealthy, should terminate old 78 | {[]string{"1"}, []string{"2", "3"}, []string{}, 3, 2, nil, 3, "1", nil, false, true, true}, 79 | // 0 old, 2 new healthy, 0 new unhealthy, should indicate end of process 80 | {[]string{}, []string{"2", "3"}, []string{}, 2, 2, nil, 2, "", nil, false, true, true}, 81 | // 2 old, 0 new healthy, 0 new unhealthy, should indicate start of process 82 | {[]string{"1", "2"}, []string{}, []string{}, 2, 2, nil, 3, "", nil, false, true, true}, 83 | // 2 old, 0 new healthy, 0 new unhealthy, started, should not do anything until new healthy one 84 | {[]string{"1", "2"}, []string{}, []string{}, 3, 2, nil, 3, "", nil, false, true, true}, 85 | // 2 old, 1 new healthy, 0 new unhealthy, remove an old one 86 | {[]string{"1", "2"}, []string{"3"}, []string{}, 3, 2, nil, 3, "1", nil, false, true, true}, 87 | // 2 old, 0 new healthy, 1 new unhealthy, started, should not do anything until new one is healthy 88 | {[]string{"1", "2"}, []string{}, []string{"3"}, 3, 2, nil, 3, "", nil, false, true, true}, 89 | 90 | // 2 old, 1 new healthy, 0 new unhealthy, 1 new unready, should not change anything 91 | {[]string{"1", "2"}, []string{"3"}, []string{}, 3, 2, unreadyCountHandler, 3, "", nil, false, true, true}, 92 | // 2 old, 1 new healthy, 0 new unhealthy, 0 new unready, 1 error: should not change anything 93 | {[]string{"1", "2"}, []string{"3"}, []string{}, 3, 2, unreadyErrorHandler, 3, "", fmt.Errorf("error"), false, true, true}, 94 | // 2 old, 1 new healthy, 0 new unhealthy, 0 unready, remove an old one 95 | {[]string{"1", "2"}, []string{"3"}, []string{}, 3, 2, readyHandler, 3, "1", nil, false, true, true}, 96 | // 2 old, 1 new healthy, 0 new unhealthy, 0 new unready, 1 error: should not change anything 97 | {[]string{"1", "2"}, []string{"3"}, []string{}, 3, 2, terminateErrorHandler, 3, "", fmt.Errorf("unexpected error"), false, true, true}, 98 | // 2 old, 1 new healthy, 0 new unhealthy, 0 unready, successful terminate: remove an old one 99 | {[]string{"1", "2"}, []string{"3"}, []string{}, 3, 2, terminateHandler, 3, "1", nil, false, true, true}, 100 | } 101 | hostnameMap := map[string]string{} 102 | for i := 0; i < 20; i++ { 103 | hostnameMap[fmt.Sprintf("%d", i)] = fmt.Sprintf("host%d", i) 104 | } 105 | for i, tt := range tests { 106 | // construct Instances for the group 107 | lcName := "newconf" 108 | instances := make([]*autoscaling.Instance, 0) 109 | lcNameOld := fmt.Sprintf("mod-%s", lcName) 110 | statusHealthy := "Healthy" 111 | statusUnhealthy := "Down" 112 | for _, instance := range tt.oldInstances { 113 | id := instance 114 | instances = append(instances, &autoscaling.Instance{ 115 | InstanceId: &id, 116 | LaunchConfigurationName: &lcNameOld, 117 | HealthStatus: &statusHealthy, 118 | }) 119 | } 120 | lcNameNew := lcName 121 | for _, instance := range tt.newInstancesHealthy { 122 | id := instance 123 | instances = append(instances, &autoscaling.Instance{ 124 | InstanceId: &id, 125 | LaunchConfigurationName: &lcNameNew, 126 | HealthStatus: &statusHealthy, 127 | }) 128 | } 129 | for _, instance := range tt.newInstancesUnhealthy { 130 | id := instance 131 | instances = append(instances, &autoscaling.Instance{ 132 | InstanceId: &id, 133 | LaunchConfigurationName: &lcNameNew, 134 | HealthStatus: &statusUnhealthy, 135 | }) 136 | } 137 | // construct the Group we will pass 138 | asg := &autoscaling.Group{ 139 | DesiredCapacity: &tt.desired, 140 | LaunchConfigurationName: &lcName, 141 | Instances: instances, 142 | AutoScalingGroupName: aws.String("myasg"), 143 | } 144 | ec2Svc := &mockEc2Svc{ 145 | autodescribe: true, 146 | } 147 | desired, terminate, err := calculateAdjustment(kubernetesEnabled, asg, ec2Svc, hostnameMap, tt.readiness, tt.originalDesired, tt.verbose, tt.drain, tt.drainForce) 148 | switch { 149 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 150 | t.Errorf("%d: mismatched errors, actual then expected", i) 151 | t.Logf("%v", err) 152 | t.Logf("%v", tt.err) 153 | case desired != tt.targetDesired: 154 | t.Errorf("%d: Mismatched desired, actual %d expected %d", i, desired, tt.targetDesired) 155 | case terminate != tt.targetTerminate: 156 | t.Errorf("%d: Mismatched terminate ID, actual %s expected %s", i, terminate, tt.targetTerminate) 157 | } 158 | } 159 | } 160 | 161 | func TestAdjust(t *testing.T) { 162 | tests := []struct { 163 | desc string 164 | asgs []string 165 | handler readiness 166 | err error 167 | oldIds map[string][]string 168 | newIds map[string][]string 169 | asgCurrentDesired map[string]int64 170 | originalDesired map[string]int64 171 | newDesired map[string]int64 172 | max map[string]int64 173 | terminate []string 174 | canIncreaseMax bool 175 | persistOriginalDesiredOnTag bool 176 | verbose bool 177 | drain bool 178 | drainForce bool 179 | }{ 180 | { 181 | "2 asgs adjust first run", 182 | []string{"myasg", "anotherasg"}, 183 | nil, 184 | nil, 185 | map[string][]string{ 186 | "myasg": {"1", "2"}, 187 | "anotherasg": {}, 188 | }, 189 | map[string][]string{ 190 | "myasg": {}, 191 | "anotherasg": {"8", "9", "10"}, 192 | }, 193 | map[string]int64{"myasg": 2, "anotherasg": 3}, 194 | map[string]int64{"myasg": 2}, 195 | map[string]int64{"myasg": 3}, 196 | map[string]int64{"myasg": 3, "anotherasg": 4}, 197 | []string{}, 198 | false, 199 | false, 200 | false, 201 | true, 202 | true, 203 | }, 204 | { 205 | "2 asgs adjust in progress", 206 | []string{"myasg", "anotherasg"}, 207 | nil, 208 | nil, 209 | map[string][]string{ 210 | "myasg": {"1"}, 211 | "anotherasg": {}, 212 | }, 213 | map[string][]string{ 214 | "myasg": {"2", "3"}, 215 | "anotherasg": {"8", "9", "10"}, 216 | }, 217 | map[string]int64{"myasg": 2, "anotherasg": 10}, 218 | map[string]int64{"myasg": 2, "anotherasg": 10}, 219 | map[string]int64{"myasg": 3}, 220 | map[string]int64{"myasg": 3, "anotherasg": 11}, 221 | []string{}, 222 | false, 223 | false, 224 | false, 225 | true, 226 | true, 227 | }, 228 | { 229 | "2 asgs adjust in progress with ROLLER_ORIGINAL_DESIRED_ON_TAG set to true", 230 | []string{"myasg", "anotherasg"}, 231 | nil, 232 | nil, 233 | map[string][]string{ 234 | "myasg": {"1"}, 235 | "anotherasg": {}, 236 | }, 237 | map[string][]string{ 238 | "myasg": {"2", "3"}, 239 | "anotherasg": {"8", "9", "10"}, 240 | }, 241 | map[string]int64{"myasg": 3, "anotherasg": 3}, 242 | map[string]int64{"myasg": 2, "anotherasg": 3}, 243 | map[string]int64{}, 244 | map[string]int64{"myasg": 3, "anotherasg": 4}, 245 | []string{"1"}, 246 | false, 247 | true, 248 | false, 249 | true, 250 | true, 251 | }, 252 | { 253 | "2 asgs adjust complete", 254 | []string{"myasg", "anotherasg"}, 255 | nil, 256 | nil, 257 | map[string][]string{ 258 | "myasg": {}, 259 | "anotherasg": {}, 260 | }, 261 | map[string][]string{ 262 | "myasg": {"1", "2", "3"}, 263 | "anotherasg": {"8", "9", "10"}, 264 | }, 265 | map[string]int64{"myasg": 2}, 266 | map[string]int64{"myasg": 2}, 267 | map[string]int64{}, 268 | map[string]int64{"myasg": 3}, 269 | []string{}, 270 | false, 271 | false, 272 | false, 273 | true, 274 | true, 275 | }, 276 | { 277 | "2 asgs adjust increase max fail", 278 | []string{"myasg", "anotherasg"}, 279 | nil, 280 | fmt.Errorf("[myasg] error setting desired to 3: unable to increase ASG myasg desired size to 3 as greater than max size 2"), 281 | map[string][]string{ 282 | "myasg": {"1"}, 283 | "anotherasg": {}, 284 | }, 285 | map[string][]string{ 286 | "myasg": {"2", "3"}, 287 | "anotherasg": {"8", "9", "10"}, 288 | }, 289 | map[string]int64{"myasg": 2}, 290 | map[string]int64{"myasg": 2}, 291 | map[string]int64{}, 292 | map[string]int64{"myasg": 2}, 293 | []string{}, 294 | false, 295 | false, 296 | false, 297 | true, 298 | true, 299 | }, 300 | { 301 | "2 asgs adjust increase max succeed", 302 | []string{"myasg", "anotherasg"}, 303 | nil, 304 | nil, 305 | map[string][]string{ 306 | "myasg": {"1"}, 307 | "anotherasg": {}, 308 | }, 309 | map[string][]string{ 310 | "myasg": {"2", "3"}, 311 | "anotherasg": {"8", "9", "10"}, 312 | }, 313 | map[string]int64{"myasg": 2}, 314 | map[string]int64{"myasg": 2}, 315 | map[string]int64{"myasg": 3}, 316 | map[string]int64{"myasg": 2}, 317 | []string{}, 318 | true, 319 | false, 320 | false, 321 | true, 322 | true, 323 | }, 324 | } 325 | 326 | for i, tt := range tests { 327 | t.Run(tt.desc, func(t *testing.T) { 328 | validGroups := map[string]*autoscaling.Group{} 329 | for _, n := range tt.asgs { 330 | name := n 331 | lcName := "lconfig" 332 | oldLcName := fmt.Sprintf("old%s", lcName) 333 | myHealthy := healthy 334 | desired := tt.asgCurrentDesired[name] 335 | max := tt.max[name] 336 | instances := make([]*autoscaling.Instance, 0) 337 | for _, id := range tt.oldIds[name] { 338 | idd := id 339 | instances = append(instances, &autoscaling.Instance{ 340 | InstanceId: &idd, 341 | LaunchConfigurationName: &oldLcName, 342 | HealthStatus: &myHealthy, 343 | }) 344 | } 345 | for _, id := range tt.newIds[name] { 346 | idd := id 347 | instances = append(instances, &autoscaling.Instance{ 348 | InstanceId: &idd, 349 | LaunchConfigurationName: &lcName, 350 | HealthStatus: &myHealthy, 351 | }) 352 | } 353 | // construct the Group we will pass 354 | validGroup := &autoscaling.Group{ 355 | AutoScalingGroupName: &name, 356 | DesiredCapacity: &desired, 357 | Instances: instances, 358 | LaunchConfigurationName: &lcName, 359 | MaxSize: &max, 360 | } 361 | 362 | if tt.persistOriginalDesiredOnTag { 363 | if originalDesired, ok := tt.originalDesired[name]; ok { 364 | validGroup.Tags = []*autoscaling.TagDescription{ 365 | { 366 | Key: aws.String(asgTagNameOriginalDesired), 367 | PropagateAtLaunch: aws.Bool(false), 368 | ResourceId: &name, 369 | ResourceType: aws.String("auto-scaling-group"), 370 | Value: aws.String(strconv.FormatInt(originalDesired, 10)), 371 | }, 372 | } 373 | } 374 | } 375 | validGroups[n] = validGroup 376 | } 377 | asgSvc := &mockAsgSvc{ 378 | groups: validGroups, 379 | } 380 | ec2Svc := &mockEc2Svc{ 381 | autodescribe: true, 382 | } 383 | // convert maps from map[string] to map[*string] 384 | originalDesiredPtr := map[*string]int64{} 385 | for k, v := range tt.originalDesired { 386 | ks := k 387 | originalDesiredPtr[&ks] = v 388 | } 389 | newDesiredPtr := map[*string]int64{} 390 | for k, v := range tt.newDesired { 391 | ks := k 392 | newDesiredPtr[&ks] = v 393 | } 394 | err := adjust(kubernetesEnabled, tt.asgs, ec2Svc, asgSvc, tt.handler, tt.originalDesired, tt.persistOriginalDesiredOnTag, tt.canIncreaseMax, tt.verbose, tt.drain, tt.drainForce) 395 | // what were our last calls to each? 396 | switch { 397 | case (err == nil && tt.err != nil) || (err != nil && tt.err == nil) || (err != nil && tt.err != nil && !strings.HasPrefix(err.Error(), tt.err.Error())): 398 | t.Errorf("%d: mismatched errors, actual then expected", i) 399 | t.Logf("%v", err) 400 | t.Logf("%v", tt.err) 401 | } 402 | 403 | // check each svc with its correct calls 404 | desiredCalls := asgSvc.counter.filterByName("SetDesiredCapacity") 405 | if len(desiredCalls) != len(tt.newDesired) { 406 | t.Errorf("%d: Expected %d SetDesiredCapacity calls but had %d", i, len(tt.newDesired), len(desiredCalls)) 407 | } 408 | // sort through by the relevant inputs 409 | for _, d := range desiredCalls { 410 | asg := d.params[0].(*autoscaling.SetDesiredCapacityInput) 411 | name := asg.AutoScalingGroupName 412 | if *asg.DesiredCapacity != tt.newDesired[*name] { 413 | t.Errorf("%d: Mismatched call to set capacity for ASG '%s': actual %d, expected %d", i, *name, *asg.DesiredCapacity, tt.newDesired[*name]) 414 | } 415 | } 416 | // convert list of terminations into map 417 | ids := map[string]bool{} 418 | for _, id := range tt.terminate { 419 | ids[id] = true 420 | } 421 | terminateCalls := asgSvc.counter.filterByName("TerminateInstanceInAutoScalingGroup") 422 | if len(terminateCalls) != len(tt.terminate) { 423 | t.Errorf("%d: Expected %d Terminate calls but had %d", i, len(tt.terminate), len(terminateCalls)) 424 | } 425 | for _, d := range terminateCalls { 426 | in := d.params[0].(*autoscaling.TerminateInstanceInAutoScalingGroupInput) 427 | id := in.InstanceId 428 | if _, ok := ids[*id]; !ok { 429 | t.Errorf("%d: Requested call to terminate instance %s, unexpected", i, *id) 430 | } 431 | } 432 | // check for calls to update the group (e.g. to raise max) 433 | updateGroupCalls := asgSvc.counter.filterByName("UpdateAutoScalingGroup") 434 | for k, desired := range tt.newDesired { 435 | if desired > tt.max[k] && len(updateGroupCalls) == 0 { 436 | t.Errorf("%d: Expected call to UpdateAutoScalingGroup to set max but there was none", i) 437 | } 438 | } 439 | }) 440 | } 441 | } 442 | 443 | func TestGroupInstances(t *testing.T) { 444 | runTest := func(t *testing.T, asg *autoscaling.Group, i int, oldIds, newIds []string) { 445 | ec2Svc := &mockEc2Svc{ 446 | autodescribe: true, 447 | } 448 | oldInstances, newInstances, err := groupInstances(asg, ec2Svc, false) 449 | if err != nil { 450 | t.Errorf("unexpected error grouping instances: %v", err) 451 | return 452 | } 453 | oldList := make([]string, 0) 454 | newList := make([]string, 0) 455 | for _, i := range oldInstances { 456 | oldList = append(oldList, *i.InstanceId) 457 | } 458 | for _, i := range newInstances { 459 | newList = append(newList, *i.InstanceId) 460 | } 461 | if !testStringEq(oldList, oldIds) { 462 | t.Errorf("%d: mismatched old Ids. Actual %v, expected %v", i, oldList, oldIds) 463 | } 464 | if !testStringEq(newList, newIds) { 465 | t.Errorf("%d: mismatched new Ids. Actual %v, expected %v", i, newList, newIds) 466 | } 467 | } 468 | tests := []struct { 469 | oldIds []string 470 | newIds []string 471 | }{ 472 | {[]string{"1", "2"}, []string{"3"}}, 473 | {[]string{"1", "2", "3"}, []string{}}, 474 | {[]string{}, []string{"1", "2", "3"}}, 475 | {[]string{}, []string{"1", "2", "$D"}}, 476 | } 477 | t.Run("launchconfiguration", func(t *testing.T) { 478 | for i, tt := range tests { 479 | instances := make([]*autoscaling.Instance, 0) 480 | lcName := "lcname" 481 | lcNameNew := lcName 482 | lcNameOld := fmt.Sprintf("old-%s", lcName) 483 | for _, instance := range tt.oldIds { 484 | id := instance 485 | instances = append(instances, &autoscaling.Instance{ 486 | InstanceId: &id, 487 | LaunchConfigurationName: &lcNameOld, 488 | }) 489 | } 490 | for _, instance := range tt.newIds { 491 | id := instance 492 | instances = append(instances, &autoscaling.Instance{ 493 | InstanceId: &id, 494 | LaunchConfigurationName: &lcNameNew, 495 | }) 496 | } 497 | // construct the Group we will pass 498 | asg := &autoscaling.Group{ 499 | LaunchConfigurationName: &lcName, 500 | Instances: instances, 501 | } 502 | runTest(t, asg, i, tt.oldIds, tt.newIds) 503 | } 504 | }) 505 | t.Run("launchtemplate", func(t *testing.T) { 506 | for i, tt := range tests { 507 | instances := make([]*autoscaling.Instance, 0) 508 | ltName := "lt1" 509 | ltNameNew := ltName 510 | ltNameOld := fmt.Sprintf("old-%s", ltName) 511 | for _, instance := range tt.oldIds { 512 | id := instance 513 | instances = append(instances, &autoscaling.Instance{ 514 | InstanceId: &id, 515 | LaunchTemplate: &autoscaling.LaunchTemplateSpecification{LaunchTemplateName: <NameOld}, 516 | }) 517 | } 518 | for _, instance := range tt.newIds { 519 | id := instance 520 | instances = append(instances, &autoscaling.Instance{ 521 | InstanceId: &id, 522 | LaunchTemplate: &autoscaling.LaunchTemplateSpecification{LaunchTemplateName: <NameNew}, 523 | }) 524 | } 525 | // construct the Group we will pass 526 | asg := &autoscaling.Group{ 527 | LaunchTemplate: &autoscaling.LaunchTemplateSpecification{LaunchTemplateName: <Name}, 528 | Instances: instances, 529 | } 530 | runTest(t, asg, i, tt.oldIds, tt.newIds) 531 | } 532 | }) 533 | t.Run("launchtemplatemixedinstances", func(t *testing.T) { 534 | for i, tt := range tests { 535 | instances := make([]*autoscaling.Instance, 0) 536 | ltName := "lt1" 537 | ltNameNew := ltName 538 | ltNameOld := fmt.Sprintf("old-%s", ltName) 539 | for _, instance := range tt.oldIds { 540 | id := instance 541 | instances = append(instances, &autoscaling.Instance{ 542 | InstanceId: &id, 543 | LaunchTemplate: &autoscaling.LaunchTemplateSpecification{LaunchTemplateName: <NameOld}, 544 | }) 545 | } 546 | for _, instance := range tt.newIds { 547 | id := instance 548 | instances = append(instances, &autoscaling.Instance{ 549 | InstanceId: &id, 550 | LaunchTemplate: &autoscaling.LaunchTemplateSpecification{LaunchTemplateName: <NameNew}, 551 | }) 552 | } 553 | // construct the Group we will pass 554 | asg := &autoscaling.Group{ 555 | MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{ 556 | LaunchTemplate: &autoscaling.LaunchTemplate{ 557 | LaunchTemplateSpecification: &autoscaling.LaunchTemplateSpecification{LaunchTemplateName: <Name}, 558 | }, 559 | }, 560 | Instances: instances, 561 | } 562 | runTest(t, asg, i, tt.oldIds, tt.newIds) 563 | } 564 | }) 565 | 566 | } 567 | 568 | func TestMapInstanceIds(t *testing.T) { 569 | ids := []string{"1", "2", "10"} 570 | instances := make([]*autoscaling.Instance, 0) 571 | for _, i := range ids { 572 | id := i 573 | instances = append(instances, &autoscaling.Instance{ 574 | InstanceId: &id, 575 | }) 576 | } 577 | m := mapInstancesIds(instances) 578 | if !testStringEq(m, ids) { 579 | t.Errorf("mismatched ids. Actual %v, expected %v", m, ids) 580 | } 581 | } 582 | 583 | func TestCompareLaunchTemplateVersions(t *testing.T) { 584 | template := &ec2.LaunchTemplate{ 585 | DefaultVersionNumber: aws.Int64(25), 586 | LatestVersionNumber: aws.Int64(64), 587 | } 588 | tests := []struct { 589 | lt1 *autoscaling.LaunchTemplateSpecification 590 | lt2 *autoscaling.LaunchTemplateSpecification 591 | expected bool 592 | }{ 593 | {nil, nil, true}, 594 | {nil, &autoscaling.LaunchTemplateSpecification{}, false}, 595 | {&autoscaling.LaunchTemplateSpecification{}, nil, false}, 596 | {&autoscaling.LaunchTemplateSpecification{}, &autoscaling.LaunchTemplateSpecification{}, true}, 597 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, &autoscaling.LaunchTemplateSpecification{}, false}, 598 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("26")}, false}, 599 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, true}, 600 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, true}, 601 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("64")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("$Latest")}, true}, 602 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("25")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("$Default")}, true}, 603 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("$Default")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("$Default")}, true}, 604 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("$Latest")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("$Latest")}, true}, 605 | {&autoscaling.LaunchTemplateSpecification{Version: aws.String("$Default")}, &autoscaling.LaunchTemplateSpecification{Version: aws.String("$Latest")}, false}, 606 | } 607 | for i, tt := range tests { 608 | result := compareLaunchTemplateVersions(template, tt.lt1, tt.lt2) 609 | if result != tt.expected { 610 | t.Errorf("%d: mismatched results, received %v expected %v", i, result, tt.expected) 611 | } 612 | } 613 | } 614 | --------------------------------------------------------------------------------