├── Dockerfile ├── Dockerfile.docs ├── Dockerfile.linux-arm ├── Dockerfile.test ├── ISSUE_TEMPLATE.md ├── Jenkinsfile ├── Jenkinsfile.orig ├── LICENSE.md ├── README.md ├── args.go ├── args_test.go ├── build.md ├── docker-compose.yml ├── docs ├── config.md ├── feedback-and-contribution.md ├── index.md ├── license.md ├── production.md ├── release-notes.md ├── tutorial.md └── usage.md ├── go.mod ├── go.sum ├── helm └── docker-flow-swarm-listener │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── deployment.yaml │ ├── ing.yaml │ ├── issuer.yaml │ └── svc.yaml │ └── values.yaml ├── k8s └── docker-flow-swarm-listener │ └── templates │ ├── deployment.yaml │ ├── ing.yaml │ ├── issuer.yaml │ └── svc.yaml ├── main.go ├── metrics └── prometheus.go ├── mkdocs.yml ├── run-tests.sh ├── scripts └── dm-swarm.sh ├── serve.go ├── serve_test.go ├── service ├── cancelmanager.go ├── cancelmanager_test.go ├── docker.go ├── eventnodelistener.go ├── eventnodelistener_test.go ├── eventservicelistener.go ├── eventservicelistener_test.go ├── minify.go ├── minify_test.go ├── mocks.go ├── node.go ├── node_test.go ├── nodecache.go ├── nodecache_test.go ├── nodepoller.go ├── nodepoller_test.go ├── notifier.go ├── notifier_test.go ├── notifydistributor.go ├── notifydistributor_test.go ├── parameters.go ├── parameters_test.go ├── service.go ├── service_test.go ├── servicecache.go ├── servicecache_test.go ├── servicepoller.go ├── servicepoller_test.go ├── swarmlistener.go ├── swarmlistener_test.go ├── task.go ├── task_test.go ├── test_utils.go ├── types.go └── types_test.go └── stack.yml /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.11.1-alpine3.8 AS build 2 | 3 | RUN apk add --no-cache --update git 4 | WORKDIR /develop 5 | COPY . . 6 | RUN CGO_ENABLED=0 GOOS=linux go build -o docker-flow-swarm-listener -ldflags '-w' 7 | 8 | FROM alpine:3.8 9 | LABEL maintainer="Viktor Farcic " 10 | 11 | ENV DF_DOCKER_HOST="unix:///var/run/docker.sock" \ 12 | DF_NOTIFICATION_URL="" \ 13 | DF_RETRY="50" \ 14 | DF_RETRY_INTERVAL="5" \ 15 | DF_NOTIFY_LABEL="com.df.notify" \ 16 | DF_INCLUDE_NODE_IP_INFO="false" \ 17 | DF_NODE_IP_INFO_INCLUDES_TASK_ADDRESS="true" \ 18 | DF_SERVICE_POLLING_INTERVAL="-1" \ 19 | DF_USE_DOCKER_SERVICE_EVENTS="true" \ 20 | DF_NODE_POLLING_INTERVAL="-1" \ 21 | DF_USE_DOCKER_NODE_EVENTS="true" \ 22 | DF_SERVICE_NAME_PREFIX="" \ 23 | DF_NOTIFY_CREATE_SERVICE_METHOD="GET" \ 24 | DF_NOTIFY_REMOVE_SERVICE_METHOD="GET" \ 25 | DF_NOTIFY_CREATE_SERVICE_IMMEDIATELY="false" 26 | 27 | EXPOSE 8080 28 | 29 | CMD ["docker-flow-swarm-listener"] 30 | 31 | HEALTHCHECK --interval=10s --start-period=5s --timeout=5s CMD wget -qO- "http://localhost:8080/v1/docker-flow-swarm-listener/ping" 32 | 33 | COPY --from=build /develop/docker-flow-swarm-listener /usr/local/bin/docker-flow-swarm-listener 34 | RUN chmod +x /usr/local/bin/docker-flow-swarm-listener 35 | -------------------------------------------------------------------------------- /Dockerfile.docs: -------------------------------------------------------------------------------- 1 | FROM cilerler/mkdocs AS build 2 | MAINTAINER Viktor Farcic 3 | ADD . /docs 4 | RUN pip install pygments && pip install pymdown-extensions 5 | RUN mkdocs build --site-dir /site 6 | 7 | 8 | FROM nginx:1.11-alpine 9 | MAINTAINER Viktor Farcic 10 | COPY --from=build /site /usr/share/nginx/html -------------------------------------------------------------------------------- /Dockerfile.linux-arm: -------------------------------------------------------------------------------- 1 | FROM arm32v6/alpine 2 | COPY tmp/qemu-arm-static /usr/bin/qemu-arm-static 3 | 4 | MAINTAINER Raymond Mouthaan 5 | 6 | RUN apk --no-cache add --virtual build-dependencies wget ca-certificates 7 | 8 | ENV DF_DOCKER_HOST="unix:///var/run/docker.sock" \ 9 | DF_NOTIFICATION_URL="" \ 10 | DF_RETRY="50" \ 11 | DF_RETRY_INTERVAL="5" \ 12 | DF_NOTIFY_LABEL="com.df.notify" \ 13 | DF_INCLUDE_NODE_IP_INFO="false" \ 14 | DF_SERVICE_POLLING_INTERVAL="-1" \ 15 | DF_USE_DOCKER_SERVICE_EVENTS="true" \ 16 | DF_NODE_POLLING_INTERVAL="-1" \ 17 | DF_USE_DOCKER_NODE_EVENTS="true" \ 18 | SERVICE_NAME_PREFIX="" \ 19 | DF_NOTIFY_CREATE_SERVICE_METHOD="GET" \ 20 | DF_NOTIFY_REMOVE_SERVICE_METHOD="GET" 21 | 22 | COPY docker-flow-swarm-listener_linux_arm /usr/local/bin/docker-flow-swarm-listener 23 | RUN chmod +x /usr/local/bin/docker-flow-swarm-listener 24 | 25 | HEALTHCHECK --interval=5s --start-period=3s --timeout=5s CMD wget -qO- "http://localhost:8080/v1/docker-flow-swarm-listener/ping" 26 | 27 | EXPOSE 8080 28 | 29 | CMD ["docker-flow-swarm-listener"] 30 | -------------------------------------------------------------------------------- /Dockerfile.test: -------------------------------------------------------------------------------- 1 | FROM golang:1.11.1-alpine3.8 2 | 3 | RUN apk add --no-cache gcc musl-dev openssl git go expect curl docker 4 | 5 | COPY . /src 6 | WORKDIR /src 7 | RUN chmod +x /src/run-tests.sh 8 | 9 | CMD ["sh", "-c", "/src/run-tests.sh"] 10 | -------------------------------------------------------------------------------- /ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | **Description** 13 | 14 | 17 | 18 | **Steps to reproduce the issue:** 19 | 1. 20 | 2. 21 | 3. 22 | 23 | **Describe the results you received:** 24 | 25 | 26 | **Describe the results you expected:** 27 | 28 | 29 | **Additional information you deem important (e.g. issue happens only occasionally):** 30 | 31 | 32 | **Additional environment details (AWS, VirtualBox, physical, etc.):** 33 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | import java.text.SimpleDateFormat 2 | 3 | pipeline { 4 | agent { 5 | label "test" 6 | } 7 | options { 8 | buildDiscarder(logRotator(numToKeepStr: '2')) 9 | disableConcurrentBuilds() 10 | } 11 | stages { 12 | stage("build") { 13 | steps { 14 | script { 15 | def dateFormat = new SimpleDateFormat("yy.MM.dd") 16 | currentBuild.displayName = dateFormat.format(new Date()) + "-" + env.BUILD_NUMBER 17 | } 18 | dfBuild2("docker-flow-swarm-listener", "thomasjpfan/gox-build:0.1.1-1.11.0-alpine3.8") 19 | sh "docker-compose run --rm tests" 20 | } 21 | } 22 | stage("release") { 23 | when { 24 | branch "master" 25 | } 26 | steps { 27 | dfRelease2("docker-flow-swarm-listener") 28 | dfReleaseGithub2("docker-flow-swarm-listener") 29 | } 30 | } 31 | stage("deploy") { 32 | when { 33 | branch "master" 34 | } 35 | agent { 36 | label "prod" 37 | } 38 | steps { 39 | sh "helm upgrade -i docker-flow-swarm-listener helm/docker-flow-swarm-listener --namespace df --set image.tag=${currentBuild.displayName}" 40 | } 41 | } 42 | } 43 | post { 44 | always { 45 | sh "docker system prune -f" 46 | } 47 | failure { 48 | slackSend( 49 | color: "danger", 50 | message: "${env.JOB_NAME} failed: ${env.RUN_DISPLAY_URL}" 51 | ) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /Jenkinsfile.orig: -------------------------------------------------------------------------------- 1 | import java.text.SimpleDateFormat 2 | 3 | pipeline { 4 | agent { 5 | label "test" 6 | } 7 | options { 8 | buildDiscarder(logRotator(numToKeepStr: '2')) 9 | disableConcurrentBuilds() 10 | } 11 | stages { 12 | stage("build") { 13 | steps { 14 | script { 15 | def dateFormat = new SimpleDateFormat("yy.MM.dd") 16 | currentBuild.displayName = dateFormat.format(new Date()) + "-" + env.BUILD_NUMBER 17 | } 18 | dfBuild2("docker-flow-swarm-listener") 19 | sh "docker-compose run --rm tests" 20 | } 21 | } 22 | stage("release") { 23 | when { 24 | branch "master" 25 | } 26 | steps { 27 | dfRelease2("docker-flow-swarm-listener") 28 | dfReleaseGithub2("docker-flow-swarm-listener") 29 | } 30 | } 31 | stage("deploy") { 32 | when { 33 | branch "master" 34 | } 35 | agent { 36 | label "prod" 37 | } 38 | steps { 39 | dfDeploy2("docker-flow-swarm-listener", "swarm-listener_swarm-listener", "swarm-listener_docs") 40 | dfDeploy2("docker-flow-swarm-listener", "monitor_swarm-listener", "") 41 | } 42 | } 43 | } 44 | post { 45 | always { 46 | sh "docker system prune -f" 47 | } 48 | failure { 49 | slackSend( 50 | color: "danger", 51 | message: "${env.JOB_NAME} failed: ${env.RUN_DISPLAY_URL}" 52 | ) 53 | } 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Viktor Farcic 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker Flow Swarm Listener 2 | 3 | [![GitHub release](https://img.shields.io/github/release/docker-flow/docker-flow-swarm-listener.svg)]() 4 | [![license](https://img.shields.io/github/license/docker-flow/docker-flow-swarm-listener.svg)]() 5 | [![Docker Pulls](https://img.shields.io/docker/pulls/vfarcic/docker-flow-swarm-listener.svg)]() 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/docker-flow/docker-flow-swarm-listener)](https://goreportcard.com/report/github.com/docker-flow/docker-flow-swarm-listener) 7 | 8 | The goal of the *Docker Flow Swarm Listener* project is to listen to Docker Swarm events and send requests when a change occurs. At the moment, the only supported option is to send a notification when a new service is created or updated, or an existing service was removed from the cluster. More extensive feature support is coming soon. 9 | 10 | Supported architectures are: 11 | - linux-amd64 12 | - linux-arm 13 | 14 | Please visit the **[project documentation](http://swarmlistener.dockerflow.com)** for more info or join the #df-swarm-listener Slack channel in [DevOps20](http://slack.devops20toolkit.com/) if you have any questions, suggestions, or problems. 15 | 16 | Buy Me a Coffee at ko-fi.com 17 | -------------------------------------------------------------------------------- /args.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | ) 7 | 8 | type args struct { 9 | ServicePollingInterval int 10 | NodePollingInterval int 11 | Retry int 12 | RetryInterval int 13 | } 14 | 15 | func getArgs() *args { 16 | return &args{ 17 | ServicePollingInterval: getValue(-1, "DF_SERVICE_POLLING_INTERVAL"), 18 | NodePollingInterval: getValue(-1, "DF_NODE_POLLING_INTERVAL"), 19 | Retry: getValue(1, "DF_RETRY"), 20 | RetryInterval: getValue(0, "DF_RETRY_INTERVAL"), 21 | } 22 | } 23 | 24 | func getValue(defValue int, varName string) int { 25 | value := defValue 26 | if len(os.Getenv(varName)) > 0 { 27 | value, _ = strconv.Atoi(os.Getenv(varName)) 28 | } 29 | return value 30 | } 31 | -------------------------------------------------------------------------------- /args_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "math/rand" 5 | "os" 6 | "strconv" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/suite" 10 | ) 11 | 12 | type ArgsTestSuite struct { 13 | suite.Suite 14 | serviceName string 15 | } 16 | 17 | func TestArgsUnitTestSuite(t *testing.T) { 18 | s := new(ArgsTestSuite) 19 | 20 | suite.Run(t, s) 21 | } 22 | 23 | // GetArgs 24 | 25 | func (s *ArgsTestSuite) Test_GetArgs_ReturnsDefaultValues() { 26 | args := getArgs() 27 | 28 | s.Equal(1, args.Retry) 29 | s.Equal(0, args.RetryInterval) 30 | } 31 | 32 | func (s *ArgsTestSuite) Test_GetArgs_ReturnsRetryFromEnv() { 33 | expected := rand.Int() 34 | intervalOrig := os.Getenv("DF_RETRY") 35 | defer func() { os.Setenv("DF_RETRY", intervalOrig) }() 36 | os.Setenv("DF_RETRY", strconv.Itoa(expected)) 37 | 38 | args := getArgs() 39 | 40 | s.Equal(expected, args.Retry) 41 | } 42 | 43 | func (s *ArgsTestSuite) Test_GetArgs_ReturnsRetryIntervalFromEnv() { 44 | expected := rand.Int() 45 | intervalOrig := os.Getenv("DF_RETRY_INTERVAL") 46 | defer func() { os.Setenv("DF_RETRY_INTERVAL", intervalOrig) }() 47 | os.Setenv("DF_RETRY_INTERVAL", strconv.Itoa(expected)) 48 | 49 | args := getArgs() 50 | 51 | s.Equal(expected, args.RetryInterval) 52 | } 53 | -------------------------------------------------------------------------------- /build.md: -------------------------------------------------------------------------------- 1 | ## Automated Test 2 | 3 | ```bash 4 | go test --cover 5 | ``` 6 | 7 | ## Build 8 | 9 | ```bash 10 | docker run --rm -v $PWD:/usr/src/myapp -w /usr/src/myapp -v /tmp/linux-go:/go golang:1.9 sh -c "go get -d -v -t && CGO_ENABLED=0 GOOS=linux go build -v -o docker-flow-swarm-listener" 11 | 12 | docker build -t dockerflow/docker-flow-swarm-listener:latest . 13 | ``` 14 | ## Publish 15 | 16 | ```bash 17 | VERSION=0.7 18 | 19 | docker tag dockerflow/docker-flow-swarm-listener:latest dockerflow/docker-flow-swarm-listener:$VERSION 20 | 21 | docker push dockerflow/docker-flow-swarm-listener:$VERSION 22 | 23 | docker push dockerflow/docker-flow-swarm-listener:latest 24 | ``` 25 | 26 | ## Manual Tests 27 | 28 | ```bash 29 | docker-machine create -d virtualbox test 30 | 31 | eval $(docker-machine env test) 32 | 33 | docker swarm init --advertise-addr $(docker-machine ip test) 34 | 35 | docker run --rm -v $PWD:/usr/src/myapp -w /usr/src/myapp -v go:/go golang:1.9 bash -c "go get -d -v -t && go build -v -o docker-flow-swarm-listener" 36 | 37 | docker build -t dockerflow/docker-flow-swarm-listener:beta . 38 | 39 | docker network create --driver overlay proxy 40 | 41 | docker service create --name swarm-listener \ 42 | --network proxy \ 43 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 44 | -e DF_NOTIFY_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 45 | -e DF_NOTIFY_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 46 | dockerflow/docker-flow-swarm-listener:beta 47 | 48 | docker service create --name go-demo-db \ 49 | --network proxy \ 50 | mongo 51 | 52 | docker service create --name go-demo \ 53 | -e DB=go-demo-db \ 54 | --network proxy \ 55 | -l com.df.notify=true \ 56 | -l com.df.servicePath=/demo \ 57 | -l com.df.port=8080 \ 58 | vfarcic/go-demo 59 | 60 | docker service create --name util \ 61 | --network proxy \ 62 | alpine sleep 1000000 63 | 64 | docker service create --name proxy \ 65 | -p 80:80 \ 66 | -p 443:443 \ 67 | -p 8080:8080 \ 68 | --network proxy \ 69 | t dockerflow/docker-flow-proxy 70 | 71 | docker service ls 72 | 73 | PROXY_ID=$(docker ps -q --filter "ancestor=dockerflow/docker-flow-proxy") 74 | 75 | docker exec -it $PROXY_ID cat /cfg/haproxy.cfg 76 | 77 | UTIL_ID=$(docker ps -q --filter "ancestor=alpine") 78 | 79 | docker exec -it $UTIL_ID apk add --update drill 80 | 81 | docker exec -it $UTIL_ID apk add --update curl 82 | 83 | docker exec -it $UTIL_ID drill swarm-listener 84 | 85 | docker exec -it $UTIL_ID curl swarm-listener:8080/v1/docker-flow-swarm-listener/notify-services 86 | 87 | docker exec -it $PROXY_ID cat /cfg/haproxy.cfg 88 | 89 | DFSL_ID=$(docker ps -q -f ancestor=dockerflow/docker-flow-swarm-listener) 90 | 91 | docker logs $DFSL_ID 92 | 93 | DFP_ID=$(docker ps -q -f ancestor=dockerflow/docker-flow-proxy) 94 | 95 | docker exec -it $DFP_ID cat /cfg/haproxy.cfg 96 | 97 | docker service rm go-demo 98 | 99 | docker logs $DFSL_ID 100 | 101 | docker exec -it $DFP_ID cat /cfg/haproxy.cfg 102 | 103 | docker service create --name go-demo \ 104 | -e DB=go-demo-db \ 105 | --network proxy \ 106 | -l com.df.notify=true \ 107 | -l com.df.servicePath=/demo \ 108 | -l com.df.port=8080 \ 109 | vfarcic/go-demo 110 | 111 | docker logs $DFSL_ID 112 | 113 | docker exec -it $DFP_ID cat /cfg/haproxy.cfg 114 | 115 | docker service rm proxy swarm-listener go-demo go-demo-db 116 | 117 | docker network rm proxy 118 | ``` 119 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | 4 | services: 5 | 6 | tests: 7 | image: dockerflow/docker-flow-swarm-listener-test 8 | volumes: 9 | - /var/run/docker.sock:/var/run/docker.sock 10 | - $PWD:/src 11 | networks: 12 | - dfsl_network 13 | 14 | docs: 15 | image: cilerler/mkdocs 16 | volumes: 17 | - .:/docs 18 | command: bash -c "pip install pygments && pip install pymdown-extensions && mkdocs build" 19 | 20 | tests_local: 21 | image: dockerflow/docker-flow-swarm-listener-test 22 | volumes: 23 | - /var/run/docker.sock:/var/run/docker.sock 24 | - $PWD:/src 25 | networks: 26 | - dfsl_network 27 | 28 | networks: 29 | dfsl_network: 30 | -------------------------------------------------------------------------------- /docs/config.md: -------------------------------------------------------------------------------- 1 | # Configuring Docker Flow Swarm Listener 2 | 3 | The following environment variables can be used when creating the `swarm-listener` service. 4 | 5 | |Name |Description | 6 | |-------------------|-------------------------------------------------------------------------------| 7 | |DF_DOCKER_HOST |Path to the Docker socket
**Default**: `unix:///var/run/docker.sock` | 8 | |DF_NOTIFY_LABEL |Label that is used to distinguish whether a service should trigger a notification
**Default**: `com.df.notify`
**Example**: `com.df.notifyDev`| 9 | |DF_NOTIFY_CREATE_SERVICE_URL|Comma separated list of URLs that will be used to send notification requests when a service is created. If `com.df.notifyService` service labels is present, only URLs related to that service will be used. The `com.df.notifyService` label can have multiple values separated with comma (`,`).
**Example**: `url1,url2`| 10 | |DF_NOTIFY_REMOVE_SERVICE_URL|Comma separated list of URLs that will be used to send notification requests when a service is removed.
**Example**: `url1,url2`| 11 | |DF_NOTIFY_CREATE_SERVICE_METHOD|Comma separated list of HTTP methods used to send requests to its corresponding `DF_NOTIFY_CREATE_SERVICE_URL`. If the number of comma separated list of HTTP methods is less than the number of create service URLs, then the last HTTP method in the list will be used for the rest of the services.
**Default**: `GET`
**Example**: `GET,POST`| 12 | |DF_NOTIFY_REMOVE_SERVICE_METHOD|Comma separated list of HTTP methods used to send requests to its corresponding `DF_NOTIFY_REMOVE_SERVICE_URL`. If the number of comma separated list of HTTP methods is less than the number of remove service URLs, then the last HTTP method in the list will be used for the rest of the services
**Default**: `GET`
**Example**: `GET,POST`| 13 | |DF_INCLUDE_NODE_IP_INFO|Include node and ip information for service in notification.
**Default**:`false`| 14 | |DF_NODE_IP_INFO_INCLUDES_TASK_ADDRESS|Include task ip address when `DF_INCLUDE_NODE_IP_INFO` is true.
**Default**: `true`| 15 | |DF_NOTIFY_CREATE_NODE_URL |Comma separated list of URLs that will be used to send notification requests when a node is created or updated.
**Example**: `url1,url2`| 16 | |DF_NOTIFY_REMOVE_NODE_URL |Comma separated list of URLs that will be used to send notification requests when a node is remove.
**Example**: `url1,url2`| 17 | |DF_RETRY |Number of notification request retries
**Default**: `50`
**Example**: `100`| 18 | |DF_RETRY_INTERVAL |Time between each notificationo request retry, in seconds.
**Default**: `5`
**Example**:`10`| 19 | |DF_SERVICE_POLLING_INTERVAL |Time between each service polling request, in seconds. When this value is set less than or equal to zero, service polling is disabled.
**Default**: `-1`
**Example**:`20`| 20 | |DF_USE_DOCKER_SERVICE_EVENTS|Use docker events api to get service updates.
**Default**:`true`| 21 | |DF_NODE_POLLING_INTERVAL |Time between each node polling request, in seconds. When this value is set less than or equal to zero, node polling is disabled.
**Default**: `-1`
**Example**:`20`| 22 | |DF_USE_DOCKER_NODE_EVENTS|Use docker events api to get node updates.
**Default**:`true`| 23 | |DF_SERVICE_NAME_PREFIX|Value to prefix service names with.
**Example**:`dev1`| 24 | |DF_NOTIFY_CREATE_SERVICE_IMMEDIATELY|Sends create service without waiting for service to converge. After the service converges, another create notifcation will be sent out.
**Default**: `false`| 25 | 26 | ## Configuring Notification URLS with Docker Secrets 27 | 28 | *Docker Flow Swarm Listener*'s notification URLs can be set with Docker Secrets. Secrets with names `df_notify_create_service_url`, 29 | `df_notify_remove_service_url`, `df_notify_create_node_url`, and `df_notify_remove_node_url` are used, in addition to their 30 | corresponding environment variables, to configure notification urls. The secrets must be a comma separated list of URLs. 31 | -------------------------------------------------------------------------------- /docs/feedback-and-contribution.md: -------------------------------------------------------------------------------- 1 | # Feedback and Contribution 2 | 3 | The *Docker Flow Swarm Listener* project welcomes, and depends, on contributions from developers and users in the open source community. Contributions can be made in a number of ways, a few examples are: 4 | 5 | * Code patches or new features via pull requests 6 | * Documentation improvements 7 | * Bug reports and patch reviews 8 | 9 | ## Reporting an Issue 10 | 11 | Feel fee to [create a new issue](https://github.com/docker-flow/docker-flow-swarm-listener/issues). Include as much detail as you can. 12 | 13 | If an issue is a bug, please provide steps to reproduce it. 14 | 15 | If an issue is a request for a new feature, please specify the use-case behind it. 16 | 17 | ## Discussion 18 | 19 | Please join the [DevOps20](http://slack.devops20toolkit.com/) Slack channel if you'd like to discuss the project or have a problem you'd like us to solve. 20 | 21 | ## Contributing To The Project 22 | 23 | I encourage you to contribute to the *Docker Flow Swarm Listener* project. 24 | 25 | The project is developed using *Test Driven Development* and *Continuous Deployment* process. Test are divided into unit and integration tests. Every code file has an equivalent with tests (e.g. `reconfigure.go` and `reconfigure_test.go`). Ideally, I expect you to write a test that defines that should be developed, run all the unit tests and confirm that the test fails, write just enough code to make the test pass, repeat. If you are new to testing, feel free to create a pull request indicating that tests are missing and I'll help you out. 26 | 27 | Once you are finish implementing a new feature or fixing a bug, run the *Complete Cycle*. You'll find the instructions below. 28 | 29 | ### Repository 30 | 31 | Fork [docker-flow-swarm-listener](https://github.com/docker-flow/docker-flow-swarm-listener). 32 | 33 | ### Unit Testing 34 | 35 | !!! info 36 | *Docker Flow Swarm Listener* supports golang 1.11 modules. 37 | 38 | ```bash 39 | go get -d -v -t 40 | 41 | go test ./... -cover -run UnitTest 42 | ``` 43 | 44 | ### Building 45 | 46 | ```bash 47 | export DOCKER_HUB_USER=[...] # Change to your user in hub.docker.com 48 | 49 | docker image build -t $DOCKER_HUB_USER/docker-flow-swarm-listener:beta . 50 | 51 | docker image push $DOCKER_HUB_USER/docker-flow-swarm-listener:beta 52 | ``` 53 | 54 | ### Pull Request 55 | 56 | Once the feature is done, create a pull request. 57 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Docker Flow Swarm Listener 2 | 3 | [![GitHub release](https://img.shields.io/github/release/docker-flow/docker-flow-swarm-listener.svg)]() 4 | [![license](https://img.shields.io/github/license/docker-flow/docker-flow-swarm-listener.svg)]() 5 | [![Docker Pulls](https://img.shields.io/docker/pulls/vfarcic/docker-flow-swarm-listener.svg)]() 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/docker-flow/docker-flow-swarm-listener)](https://goreportcard.com/report/github.com/docker-flow/docker-flow-swarm-listener) 7 | 8 | The goal of the *Docker Flow Swarm Listener* project is to listen to Docker Swarm events and send requests when a change occurs. At the moment, the only supported option is to send a notification when a new service is created or updated, or an existing service was removed from the cluster. More extensive feature support is coming soon. 9 | 10 | [Feedback and contributions](feedback-and-contribution.md) are appreciated. 11 | 12 | *Docker Flow Swarm Listener* examples can be found in the [Tutorial](tutorial.md) section. 13 | 14 | Please visit the [Configuring Docker Flow Swarm Listener](config.md) section for more details. 15 | 16 | [Feedback and contributions](feedback-and-contribution.md) are appreciated. 17 | 18 | Please join the #df-proxy Slack channel in [DevOps20](http://slack.devops20toolkit.com/) if you have any questions, suggestions, or problems. 19 | 20 | Buy Me a Coffee at ko-fi.com 21 | -------------------------------------------------------------------------------- /docs/license.md: -------------------------------------------------------------------------------- 1 | # Docker Flow Swarm Listener License (MIT) 2 | 3 | Copyright (c) 2017 Viktor Farcic 4 | 5 | The MIT License (MIT) 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in all 15 | copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE. -------------------------------------------------------------------------------- /docs/production.md: -------------------------------------------------------------------------------- 1 | # Production 2 | 3 | ## Deploy 4 | 5 | ```bash 6 | docker network create \ 7 | --driver overlay \ 8 | proxy 9 | 10 | curl -o swarm-listener.yml \ 11 | https://raw.githubusercontent.com/docker-flow/docker-flow-swarm-listener/master/stack.yml 12 | 13 | docker stack deploy \ 14 | -c swarm-listener.yml \ 15 | swarm-listener 16 | ``` -------------------------------------------------------------------------------- /docs/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | Please visit [project releases](https://github.com/docker-flow/docker-flow-swarm-listener/releases). -------------------------------------------------------------------------------- /docs/tutorial.md: -------------------------------------------------------------------------------- 1 | # Docker Flow Swarm Listener Walkthrough 2 | 3 | This tutorial will walk you through some of the most common use cases. 4 | 5 | !!! info 6 | If you are a Windows user, please run all the examples from *Git Bash* (installed through *Docker Toolbox* or *Git*). 7 | 8 | ## Sending Notification Requests On Service Creation and Removal 9 | 10 | The example that follows will use the *Swarm Listener* to reconfigure the [Docker Flow: Proxy](https://github.com/docker-flow/docker-flow-proxy) whenever a new service is created. 11 | 12 | I will assume that you already have a Swarm cluster set up with Docker Machines. If that's not the case, feel free to use the [scripts/dm-swarm.sh](https://github.com/docker-flow/docker-flow-swarm-listener/blob/master/scripts/dm-swarm.sh) script to create a three nodes cluster. 13 | 14 | Let's run the Proxy service. We'll use it as a way to demonstrate how *Swarm Listener* works. 15 | 16 | ```bash 17 | docker network create --driver overlay proxy 18 | 19 | docker service create --name proxy \ 20 | -p 80:80 \ 21 | -p 443:443 \ 22 | -p 8080:8080 \ 23 | --network proxy \ 24 | dockerflow/docker-flow-proxy 25 | ``` 26 | 27 | Next, we'll create the `swarm-listener` service. 28 | 29 | ```bash 30 | docker service create --name swarm-listener \ 31 | --network proxy \ 32 | --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ 33 | -e DF_NOTIFY_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \ 34 | -e DF_NOTIFY_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \ 35 | --constraint 'node.role==manager' \ 36 | dockerflow/docker-flow-swarm-listener 37 | ``` 38 | 39 | The service is attached to the proxy network (just as the `proxy` service), mounts the Docker socket, and declares the environment variables `DF_NOTIFY_CREATE_SERVICE_URL` and `DF_NOTIFY_REMOVE_SERVICE_URL`. We'll see the purpose of the variables soon. 40 | 41 | Now we can deploy a service that will trigger the listener. 42 | 43 | ```bash 44 | docker service create --name go-demo-db \ 45 | --network proxy \ 46 | mongo 47 | 48 | docker service create --name go-demo \ 49 | -e DB=go-demo-db \ 50 | --network proxy \ 51 | -l com.df.notify=true \ 52 | -l com.df.servicePath=/demo \ 53 | -l com.df.port=8080 \ 54 | vfarcic/go-demo 55 | ``` 56 | 57 | Please note that we declared the label `com.df.notify`. Only services with this label (it can hold any value) will be eligible to receive notifications through *Docker Flow: Swarm Listener*. We also declared a couple of other labels (`DF_servicePath` and `DF_port`). 58 | 59 | Before proceeding, we should wait until all the services are up and running. Please use the `docker service ls` command to check the status. 60 | 61 | Please output the *Docker Flow: Swarm Listener* logs. If you're using Docker Machine, the command are as follows. 62 | 63 | ```bash 64 | docker service logs swarm-listener 65 | ``` 66 | 67 | ''' warning 68 | At the time of this writing, `docker service logs` command is still in experimental stage. It might not work if you used your own cluster without experimental features enabled. 69 | 70 | The output is as follows (timestamps are removed for brevity). 71 | 72 | ``` 73 | Starting Docker Flow: Swarm Listener 74 | Starting iterations 75 | Sending service created notification to http://proxy:8080/v1/docker-flow-proxy/reconfigure?port=8080&serviceName=go-demo&servicePath=%2Fdemo 76 | ``` 77 | 78 | As you can see, the listener detected that the `go-demo` service has the label `com.df.notify` and sent the notification request. The address of the notification request is the value of the environment variable `DF_NOTIFY_CREATE_SERVICE_URL` declared in the `swarm-listener` service. The parameters are a combination of the service name and all the labels prefixed with `DF_`. 79 | 80 | You might have seen few entries stating that the notification request failed and will be retried. *Docker Flow: Swarm Listener* has a built-in retry mechanism. As long as the output message does not start with `ERROR:`, the notification will reach the destination. Please see the [Environment Variables](#environment-variables) for more info. 81 | 82 | Let's see what happens if a service is removed. 83 | 84 | ```bash 85 | docker service rm go-demo 86 | 87 | docker service logs swarm-listener 88 | ``` 89 | 90 | The output of the `docker logs` commands is as follows (timestamps are removed for brevity). 91 | 92 | ```bash 93 | Starting Docker Flow: Swarm Listener 94 | Starting iterations 95 | Sending service created notification to http://proxy:8080/v1/docker-flow-proxy/reconfigure?port=8080&serviceName=go-demo&servicePath=%2Fdemo 96 | Sending service removed notification to http://proxy:8080/v1/docker-flow-proxy/remove?distribute=true&serviceName=go-demo 97 | ``` 98 | 99 | As you can see, the last output entry was the acknowledgment that the listener detected that the service was removed and that the notification was sent. 100 | 101 | ## Sending Notification Requests To Multiple Destinations 102 | 103 | *Docker Flow Swarm Listener* accepts multiple notification URLs as well. That can come in handy when you want to send notification requests to multiple services at the same time. 104 | 105 | We'll start by recreating the `go-demo` service we removed a few moments ago. 106 | 107 | ```bash 108 | docker service create --name go-demo \ 109 | -e DB=go-demo-db \ 110 | --network proxy \ 111 | -l com.df.notify=true \ 112 | -l com.df.servicePath=/demo \ 113 | -l com.df.port=8080 \ 114 | vfarcic/go-demo 115 | ``` 116 | 117 | The environment variables `DF_NOTIFY_CREATE_SERVICE_URL` and `DF_NOTIFY_REMOVE_SERVICE_URL` allow multiple values separated with comma (*,*). We can, for example, configure the `swarm-listener` service to send notifications both to the *proxy* and the *go-demo* services. Since the `swarm-listener` service is already running, we'll update it with the new values. 118 | 119 | ```bash 120 | docker service update \ 121 | --env-add DF_NOTIFY_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure,http://go-demo:8080/demo/hello \ 122 | --env-add DF_NOTIFY_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove,http://go-demo:8080/demo/hello \ 123 | swarm-listener 124 | ``` 125 | 126 | Now we can consult the logs and confirm that the request was sent to both addresses. 127 | 128 | ```bash 129 | docker service logs swarm-listener 130 | ``` 131 | 132 | The output is as follows (timestamps are removed for brevity). 133 | 134 | ```bash 135 | Starting Docker Flow: Swarm Listener 136 | Starting iterations 137 | Sending service created notification to http://proxy:8080/v1/docker-flow-proxy/reconfigure?port=8080&serviceName=go-demo&servicePath=%2Fdemo 138 | Sending service created notification to http://go-demo:8080/demo/hello?port=8080&serviceName=go-demo&servicePath=%2Fdemo 139 | ``` 140 | 141 | As you can see, the notification requests were sent both to the `proxy` and `go-demo` addresses. 142 | -------------------------------------------------------------------------------- /docs/usage.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | ## Notification Format 4 | 5 | *Docker Flow Swarm Listener*, sends GET notifcations to configured URLs when a service or node is created, updated or removed. Please consult the [configuration](config.md) page on how to configure the URLs. 6 | 7 | ### Service Notification 8 | 9 | When a service is created or updated a notification will be sent to **[DF_NOTIFY_CREATE_SERVICE_URL]** with the following parameters: 10 | 11 | | Query | Description | Example | 12 | |-------------|------------------------------------------------------------------------|---------| 13 | | serviceName | Name of service. If `com.df.shortName` is true, and the service is part of a stack the stack name will be trimed off. | `go-demo` | 14 | | replicas | Number of replicas of service. If the service is global, this parameter will be excluded.| `3` | 15 | | nodeInfo | An array of node with its ip on an overlay network. The network is defined with the label: `com.df.scrapeNetwork`. This parameter is included when environment variable, `DF_INCLUDE_NODE_IP_INFO`, is true. | `[["node-3","10.0.0.23", "node-3id"], ["node-2", "10.0.0.22", "node-2id"]]` | 16 | 17 | All service labels prefixed by `com.df.` will be added to the notification. For example, a service with label `com.df.hello=world` will translate to parameter: `hello=world`. 18 | 19 | When a service is removed, a notification will be sent to **[DF_NOTIFY_REMOVE_SERVICE_URL]**. The `serviceName` parameter and `com.df.` labels are included in service removal notifications. 20 | 21 | ### Node Notification 22 | 23 | When a node is created or updated a notification will be sent to **[DF_NOTIFY_CREATE_NODE_URL]** with the following parameters: 24 | 25 | | Query | Description | Example | 26 | |-------|-------------|---------| 27 | | id | The ID of node given by docker | `2pe2xpkrx780xrhujws42a73w` | 28 | | hostname | Hostname of node | `ap1.hostname.com` | 29 | | address | Address of node | `10.0.0.1` | 30 | | versionIndex | The version index of node | `24` | 31 | | state | State of node. [`unknown`, `down`, `ready`, `disconnected`] | `down` | 32 | | role | Role of node. [`worker`, `manager`] | `worker` | 33 | | availability | Availability of node. [`active`, `pause`, `drain` ]| `active` | 34 | 35 | All service labels prefixed by `com.df.` will be added to the notification. For example, a node with label `com.df.hello=world` will translate to parameter: `hello=world`. 36 | 37 | When a node is removed, a notification will be sent to **[DF_NOTIFY_REMOVE_NODE_URL]**. Only the `id`, `hostname`, and `address` parameters are included. 38 | 39 | ## API 40 | 41 | *Docker Flow Swarm Listener* exposes a API to query series and to send notifications. 42 | 43 | ### Get Services 44 | 45 | The *Get Services* endpoint is used to query all running services with the `DF_NOTIFY_LABEL` label. A `GET` request to **[SWARM_LISTENER_IP]:[SWARM_LISTENER_PORT]/v1/docker-flow-swarm-listener/get-services** returns a json representation of these services. 46 | 47 | ### Notify Services 48 | 49 | *DFSL* normally sends out notifcations when a service is created, updated, or removed. The *Notify Services* endpoint will force *DFSL* to send out notifications for all running services with the `DF_NOTIFY_LABEL` label. A `GET` request to **[SWARM_LISTENER_IP]:[SWARM_LISTENER_PORT]/v1/docker-flow-swarm-listener/notify-services** sends out the notifications. 50 | 51 | ### Get Nodes 52 | 53 | The *Get Nodes* endpoint is used to query all nodes. A `GET` request to **[SWARM_LISTENER_IP]:[SWARM_LISTENER_PORT]/v1/docker-flow-swarm-listener/get-nodes** returns a json representation of these nodes. 54 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/docker-flow/docker-flow-swarm-listener 2 | 3 | require ( 4 | github.com/Microsoft/go-winio v0.4.11 // indirect 5 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect 6 | github.com/davecgh/go-spew v1.1.1 // indirect 7 | github.com/docker/distribution v2.7.0-rc.0.0.20181024170156-93e082742a00+incompatible // indirect 8 | github.com/docker/docker v0.7.3-0.20181027010111-b8e87cfdad8d 9 | github.com/docker/go-connections v0.4.0 // indirect 10 | github.com/docker/go-units v0.3.3 // indirect 11 | github.com/gogo/protobuf v1.1.1 // indirect 12 | github.com/golang/protobuf v1.2.0 // indirect 13 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 14 | github.com/opencontainers/go-digest v1.0.0-rc1 // indirect 15 | github.com/opencontainers/image-spec v1.0.1 // indirect 16 | github.com/pkg/errors v0.8.0 // indirect 17 | github.com/pmezard/go-difflib v1.0.0 // indirect 18 | github.com/prometheus/client_golang v0.9.0 19 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect 20 | github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 // indirect 21 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect 22 | github.com/stretchr/objx v0.1.1 // indirect 23 | github.com/stretchr/testify v1.2.2 24 | golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 // indirect 25 | golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 // indirect 26 | ) 27 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= 2 | github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= 3 | github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= 4 | github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= 5 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= 6 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/docker-flow/docker-flow-swarm-listener v0.0.0-20180824145208-cff6f781c240 h1:gnR8WhvenCXjoELX9vYXIvyGFIL28ohRLqhad5/LPpE= 10 | github.com/docker-flow/docker-flow-swarm-listener v0.0.0-20180824145208-cff6f781c240/go.mod h1:oEMd6mumD8A6mcVnSNX0Ao9pYJNcE52s/PVrxPMp5wY= 11 | github.com/docker/distribution v2.6.0-rc.1.0.20180824213512-3354cf98e3a5+incompatible h1:OWZF5cs1HnL/TYfYWKnNEaPO4aLpBchS8cjJEKCVIRc= 12 | github.com/docker/distribution v2.6.0-rc.1.0.20180824213512-3354cf98e3a5+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= 13 | github.com/docker/distribution v2.6.2+incompatible h1:4FI6af79dfCS/CYb+RRtkSHw3q1L/bnDjG1PcPZtQhM= 14 | github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= 15 | github.com/docker/distribution v2.7.0-rc.0.0.20181024170156-93e082742a00+incompatible h1:YOfVNTgst//UrD5ZhDfbY0+GTSWjXfXOYLYHhw0kMpo= 16 | github.com/docker/distribution v2.7.0-rc.0.0.20181024170156-93e082742a00+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= 17 | github.com/docker/docker v0.7.3-0.20180825175422-41481abdc657 h1:nKNfp97c1zs0vVAn+J6DnsWHnvRJKfYvLgD4c+UEgBw= 18 | github.com/docker/docker v0.7.3-0.20180825175422-41481abdc657/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 19 | github.com/docker/docker v0.7.3-0.20181027010111-b8e87cfdad8d h1:/4OivNB4IJIue2ZUKccE2zgp/2C8xPBShMGETIlauCA= 20 | github.com/docker/docker v0.7.3-0.20181027010111-b8e87cfdad8d/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 21 | github.com/docker/docker v1.13.1 h1:5VBhsO6ckUxB0A8CE5LlUJdXzik9cbEbBTQ/ggeml7M= 22 | github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 23 | github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= 24 | github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= 25 | github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= 26 | github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 27 | github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= 28 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 29 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 30 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 31 | github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= 32 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 33 | github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= 34 | github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= 35 | github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= 36 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 37 | github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= 38 | github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= 39 | github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= 40 | github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= 41 | github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= 42 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 43 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 44 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 45 | github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= 46 | github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 47 | github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= 48 | github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 49 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= 50 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 51 | github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= 52 | github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 53 | github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 h1:Cto4X6SVMWRPBkJ/3YHn1iDGDGc/Z+sW+AEMKHMVvN4= 54 | github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 55 | github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= 56 | github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 57 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= 58 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 59 | github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= 60 | github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= 61 | github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= 62 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 63 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 64 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 65 | golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= 66 | golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 67 | golang.org/x/net v0.0.0-20180824152047-4bcd98cce591 h1:4S2XUgvg3hUNTvxI307qkFPb9zKHG3Nf9TXFzX/DZZI= 68 | golang.org/x/net v0.0.0-20180824152047-4bcd98cce591/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 69 | golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519 h1:x6rhz8Y9CjbgQkccRGmELH6K+LJj7tOoh3XWeC1yaQM= 70 | golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 71 | golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87 h1:GqwDwfvIpC33dK9bA1fD+JiDUNsuAiQiEkpHqUKze4o= 72 | golang.org/x/sys v0.0.0-20180824143301-4910a1d54f87/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 73 | golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= 74 | golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 75 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 76 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 77 | golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= 78 | golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 79 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= 80 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 81 | google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= 82 | google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= 83 | gotest.tools v2.1.0+incompatible h1:5USw7CrJBYKqjg9R7QlA6jzqZKEAtvW82aNmsxxGPxw= 84 | gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= 85 | -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: docker-flow-swarm-listener 2 | version: 0.0.1 3 | apiVersion: v1 4 | description: Docker Flow Swarm Listener 5 | keywords: 6 | - docker 7 | - swarm 8 | - docker-flow 9 | home: https://swarmlistener.dockerflow.com 10 | sources: 11 | - https://github.com/docker-flow/docker-flow-swarm-listener 12 | maintainers: 13 | - name: Viktor Farcic 14 | email: viktor@farcic.com 15 | -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/README.md: -------------------------------------------------------------------------------- 1 | # Docker Flow Swarm Listener -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/templates/NOTES.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker-flow/docker-flow-swarm-listener/e7128a54aff436e50cd53a29a89289a1bd2d4587/helm/docker-flow-swarm-listener/templates/NOTES.txt -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "helm.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "helm.fullname" -}} 15 | {{- $name := default .Chart.Name .Values.nameOverride -}} 16 | {{- if contains $name .Release.Name -}} 17 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 18 | {{- else -}} 19 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 20 | {{- end -}} 21 | {{- end -}} 22 | -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "helm.fullname" . }} 5 | labels: 6 | app: {{ template "helm.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "helm.name" . }} 14 | release: {{ .Release.Name }} 15 | template: 16 | metadata: 17 | labels: 18 | app: {{ template "helm.name" . }} 19 | release: {{ .Release.Name }} 20 | spec: 21 | containers: 22 | - name: ui 23 | image: dockerflow/docker-flow-swarm-listener-docs:{{ .Values.image.tag }} 24 | readinessProbe: 25 | httpGet: 26 | path: / 27 | port: 80 28 | periodSeconds: 1 29 | livenessProbe: 30 | httpGet: 31 | path: / 32 | port: 80 33 | resources: 34 | {{ toYaml .Values.resources | indent 10 }} 35 | -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/templates/ing.yaml: -------------------------------------------------------------------------------- 1 | {{- $serviceName := include "helm.fullname" . -}} 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: {{ template "helm.fullname" . }} 6 | labels: 7 | app: {{ template "helm.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | annotations: 12 | kubernetes.io/ingress.class: "nginx" 13 | certmanager.k8s.io/issuer: "letsencrypt-prod" 14 | certmanager.k8s.io/acme-challenge-type: http01 15 | spec: 16 | rules: 17 | {{- range .Values.ingress.host }} 18 | {{- $url := splitList "/" . }} 19 | - host: {{ first $url }} 20 | http: 21 | paths: 22 | - path: /{{ rest $url | join "/" }} 23 | backend: 24 | serviceName: {{ $serviceName }} 25 | servicePort: 80 26 | {{- end -}} 27 | {{- range .Values.ingress.host }} 28 | {{- $url := splitList "/" . }} 29 | tls: 30 | - hosts: 31 | - {{ first $url }} 32 | secretName: le-{{ $serviceName }} 33 | {{- end -}} 34 | -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/templates/issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certmanager.k8s.io/v1alpha1 2 | kind: Issuer 3 | metadata: 4 | name: letsencrypt-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | email: viktor@farcic.com 9 | privateKeySecretRef: 10 | name: letsencrypt-prod 11 | http01: {} 12 | -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/templates/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "helm.fullname" . }} 5 | labels: 6 | app: {{ template "helm.name" . }} 7 | chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | spec: 11 | ports: 12 | - port: 80 13 | type: NodePort 14 | selector: 15 | app: {{ template "helm.name" . }} 16 | release: {{ .Release.Name }} -------------------------------------------------------------------------------- /helm/docker-flow-swarm-listener/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | tag: latest 3 | ingress: 4 | host: 5 | - swarmlistener.dockerflow.com 6 | resources: 7 | limits: 8 | cpu: 10m 9 | memory: 10Mi 10 | requests: 11 | cpu: 5m 12 | memory: 5Mi -------------------------------------------------------------------------------- /k8s/docker-flow-swarm-listener/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: docker-flow-swarm-listener/templates/deployment.yaml 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: swarm-listener-docker-flow-swarm-listener 7 | labels: 8 | app: docker-flow-swarm-listener 9 | chart: docker-flow-swarm-listener-0.0.1 10 | release: swarm-listener 11 | heritage: Tiller 12 | spec: 13 | selector: 14 | matchLabels: 15 | app: docker-flow-swarm-listener 16 | release: swarm-listener 17 | template: 18 | metadata: 19 | labels: 20 | app: docker-flow-swarm-listener 21 | release: swarm-listener 22 | spec: 23 | containers: 24 | - name: ui 25 | image: dockerflow/docker-flow-swarm-listener-docs:latest 26 | readinessProbe: 27 | httpGet: 28 | path: / 29 | port: 80 30 | periodSeconds: 1 31 | livenessProbe: 32 | httpGet: 33 | path: / 34 | port: 80 35 | resources: 36 | limits: 37 | cpu: 10m 38 | memory: 10Mi 39 | requests: 40 | cpu: 5m 41 | memory: 5Mi 42 | 43 | -------------------------------------------------------------------------------- /k8s/docker-flow-swarm-listener/templates/ing.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: docker-flow-swarm-listener/templates/ing.yaml 3 | apiVersion: extensions/v1beta1 4 | kind: Ingress 5 | metadata: 6 | name: swarm-listener-docker-flow-swarm-listener 7 | labels: 8 | app: docker-flow-swarm-listener 9 | chart: docker-flow-swarm-listener-0.0.1 10 | release: swarm-listener 11 | heritage: Tiller 12 | annotations: 13 | kubernetes.io/ingress.class: "nginx" 14 | certmanager.k8s.io/issuer: "letsencrypt-prod" 15 | certmanager.k8s.io/acme-challenge-type: http01 16 | spec: 17 | rules: 18 | - host: swarmlistener.dockerflow.com 19 | http: 20 | paths: 21 | - path: / 22 | backend: 23 | serviceName: swarm-listener-docker-flow-swarm-listener 24 | servicePort: 80 25 | tls: 26 | - hosts: 27 | - swarmlistener.dockerflow.com 28 | secretName: le-swarm-listener-docker-flow-swarm-listener -------------------------------------------------------------------------------- /k8s/docker-flow-swarm-listener/templates/issuer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: docker-flow-swarm-listener/templates/issuer.yaml 3 | apiVersion: certmanager.k8s.io/v1alpha1 4 | kind: Issuer 5 | metadata: 6 | name: letsencrypt-prod 7 | spec: 8 | acme: 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | email: viktor@farcic.com 11 | privateKeySecretRef: 12 | name: letsencrypt-prod 13 | http01: {} 14 | -------------------------------------------------------------------------------- /k8s/docker-flow-swarm-listener/templates/svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: docker-flow-swarm-listener/templates/svc.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: swarm-listener-docker-flow-swarm-listener 7 | labels: 8 | app: docker-flow-swarm-listener 9 | chart: docker-flow-swarm-listener-0.0.1 10 | release: swarm-listener 11 | heritage: Tiller 12 | spec: 13 | ports: 14 | - port: 80 15 | type: NodePort 16 | selector: 17 | app: docker-flow-swarm-listener 18 | release: swarm-listener -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | "github.com/docker-flow/docker-flow-swarm-listener/service" 8 | ) 9 | 10 | func main() { 11 | l := log.New(os.Stdout, "", log.LstdFlags) 12 | 13 | l.Printf("Starting Docker Flow: Swarm Listener") 14 | args := getArgs() 15 | swarmListener, err := service.NewSwarmListenerFromEnv( 16 | args.Retry, args.RetryInterval, 17 | args.ServicePollingInterval, args.NodePollingInterval, l) 18 | if err != nil { 19 | l.Printf("Failed to initialize Docker Flow: Swarm Listener") 20 | l.Printf("ERROR: %v", err) 21 | return 22 | } 23 | 24 | l.Printf("Sending notifications for running services and nodes") 25 | go swarmListener.CompletelyNotifyServices() 26 | go swarmListener.NotifyNodes(false) 27 | 28 | swarmListener.Run() 29 | serve := NewServe(swarmListener, l) 30 | l.Fatal(Run(serve)) 31 | } 32 | -------------------------------------------------------------------------------- /metrics/prometheus.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | var serviceName = "swarm_listener" 8 | var errorCounter = prometheus.NewCounterVec( 9 | prometheus.CounterOpts{ 10 | Subsystem: "docker_flow", 11 | Name: "error", 12 | Help: "Error counter", 13 | }, 14 | []string{"service", "operation"}, 15 | ) 16 | 17 | var serviceGauge = prometheus.NewGaugeVec( 18 | prometheus.GaugeOpts{ 19 | Subsystem: "docker_flow", 20 | Name: "service_count", 21 | Help: "Service gauge", 22 | }, 23 | []string{"service"}, 24 | ) 25 | 26 | func init() { 27 | prometheus.MustRegister(errorCounter, serviceGauge) 28 | } 29 | 30 | // RecordError stores error information as Prometheus metric. 31 | // the `operation` argument is used to identify the error. 32 | func RecordError(operation string) { 33 | errorCounter.With(prometheus.Labels{ 34 | "service": serviceName, 35 | "operation": operation, 36 | }).Inc() 37 | } 38 | 39 | // RecordService stores the number of services as Prometheus metric. 40 | func RecordService(count int) { 41 | serviceGauge.With(prometheus.Labels{ 42 | "service": serviceName, 43 | }).Set(float64(count)) 44 | } 45 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Docker Flow Swarm Listener 2 | pages: 3 | - Home: index.md 4 | - Tutorial: tutorial.md 5 | - Configuration: config.md 6 | - Usage: usage.md 7 | - About: 8 | - Release Notes: release-notes.md 9 | - Feedback and Contribution: feedback-and-contribution.md 10 | - License: license.md 11 | repo_url: https://github.com/docker-flow/docker-flow-swarm-listener 12 | site_author: Viktor Farcic 13 | copyright: Copyright © 2017 Viktor Farcic 14 | strict: true 15 | theme: 'material' 16 | extra: 17 | palette: 18 | primary: 'blue' 19 | accent: 'light blue' 20 | markdown_extensions: 21 | - toc 22 | - admonition 23 | - codehilite: 24 | guess_lang: false 25 | - toc: 26 | permalink: true 27 | - footnotes 28 | - pymdownx.arithmatex 29 | - pymdownx.betterem: 30 | smart_enable: all 31 | - pymdownx.caret 32 | - pymdownx.critic 33 | - pymdownx.emoji: 34 | emoji_generator: !!python/name:pymdownx.emoji.to_svg 35 | - pymdownx.inlinehilite 36 | - pymdownx.magiclink 37 | - pymdownx.mark 38 | - pymdownx.smartsymbols 39 | - pymdownx.superfences 40 | - pymdownx.tasklist: 41 | custom_checkbox: true 42 | - pymdownx.tilde 43 | -------------------------------------------------------------------------------- /run-tests.sh: -------------------------------------------------------------------------------- 1 | go test --cover ./... -p 1 2 | -------------------------------------------------------------------------------- /scripts/dm-swarm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | for i in 1 2 3; do 4 | docker-machine create \ 5 | -d virtualbox \ 6 | swarm-$i 7 | done 8 | 9 | eval $(docker-machine env swarm-1) 10 | 11 | docker swarm init \ 12 | --advertise-addr $(docker-machine ip swarm-1) 13 | 14 | TOKEN=$(docker swarm join-token -q manager) 15 | 16 | for i in 2 3; do 17 | eval $(docker-machine env swarm-$i) 18 | 19 | docker swarm join \ 20 | --token $TOKEN \ 21 | --advertise-addr $(docker-machine ip swarm-$i) \ 22 | $(docker-machine ip swarm-1):2377 23 | done 24 | 25 | for i in 1 2 3; do 26 | eval $(docker-machine env swarm-$i) 27 | 28 | docker node update \ 29 | --label-add env=prod \ 30 | --label-add type=manager \ 31 | swarm-$i 32 | done 33 | 34 | echo ">> The swarm cluster is up and running" -------------------------------------------------------------------------------- /serve.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "net/http" 7 | 8 | "github.com/docker-flow/docker-flow-swarm-listener/metrics" 9 | "github.com/docker-flow/docker-flow-swarm-listener/service" 10 | 11 | "github.com/prometheus/client_golang/prometheus" 12 | ) 13 | 14 | var httpListenAndServe = http.ListenAndServe 15 | var httpWriterSetContentType = func(w http.ResponseWriter, value string) { 16 | w.Header().Set("Content-Type", value) 17 | } 18 | 19 | //Response message 20 | type Response struct { 21 | Status string 22 | } 23 | 24 | // Serve is the instance structure 25 | type Serve struct { 26 | SwarmListener service.SwarmListening 27 | Log *log.Logger 28 | } 29 | 30 | type server interface { 31 | NotifyServices(w http.ResponseWriter, req *http.Request) 32 | GetServices(w http.ResponseWriter, req *http.Request) 33 | GetNodes(w http.ResponseWriter, req *http.Request) 34 | PingHandler(w http.ResponseWriter, req *http.Request) 35 | } 36 | 37 | // NewServe returns a new instance of the `Serve` 38 | func NewServe(swarmListener service.SwarmListening, logger *log.Logger) *Serve { 39 | return &Serve{ 40 | SwarmListener: swarmListener, 41 | Log: logger, 42 | } 43 | } 44 | 45 | // Run executes a server 46 | func Run(s server) error { 47 | mux := attachRoutes(s) 48 | return httpListenAndServe(":8080", mux) 49 | } 50 | 51 | // attachRoutes attaches routes to services and returns the mux 52 | func attachRoutes(s server) *http.ServeMux { 53 | mux := http.NewServeMux() 54 | mux.HandleFunc("/v1/docker-flow-swarm-listener/notify-services", s.NotifyServices) 55 | mux.HandleFunc("/v1/docker-flow-swarm-listener/get-nodes", s.GetNodes) 56 | mux.HandleFunc("/v1/docker-flow-swarm-listener/get-services", s.GetServices) 57 | mux.HandleFunc("/v1/docker-flow-swarm-listener/ping", s.PingHandler) 58 | mux.Handle("/metrics", prometheus.Handler()) 59 | return mux 60 | } 61 | 62 | // NotifyServices notifies all configured endpoints of new, updated, or removed services 63 | func (m Serve) NotifyServices(w http.ResponseWriter, req *http.Request) { 64 | go m.SwarmListener.NotifyServices(false) 65 | js, _ := json.Marshal(Response{Status: "OK"}) 66 | httpWriterSetContentType(w, "application/json") 67 | w.WriteHeader(http.StatusOK) 68 | w.Write(js) 69 | } 70 | 71 | // GetServices retrieves all services with the `com.df.notify` label set to `true` 72 | func (m Serve) GetServices(w http.ResponseWriter, req *http.Request) { 73 | parameters, err := m.SwarmListener.GetServicesParameters(req.Context()) 74 | if err != nil { 75 | m.Log.Printf("ERROR: Unable to prepare response: %s", err) 76 | metrics.RecordError("serveGetServices") 77 | w.WriteHeader(http.StatusInternalServerError) 78 | } 79 | bytes, err := json.Marshal(parameters) 80 | if err != nil { 81 | m.Log.Printf("ERROR: Unable to prepare response: %s", err) 82 | metrics.RecordError("serveGetServices") 83 | w.WriteHeader(http.StatusInternalServerError) 84 | } else { 85 | // NOTE: For an unknown reason, `httpWriterSetContentType` does not work so the header is set directly 86 | w.Header().Set("Content-Type", "application/json") 87 | httpWriterSetContentType(w, "application/json") 88 | w.Write(bytes) 89 | } 90 | } 91 | 92 | // GetNodes retrieves all nodes 93 | func (m Serve) GetNodes(w http.ResponseWriter, req *http.Request) { 94 | parameters, err := m.SwarmListener.GetNodesParameters(req.Context()) 95 | if err != nil { 96 | m.Log.Printf("ERROR: Unable to prepare response: %s", err) 97 | metrics.RecordError("serveGetNodes") 98 | w.WriteHeader(http.StatusInternalServerError) 99 | } 100 | bytes, err := json.Marshal(parameters) 101 | if err != nil { 102 | m.Log.Printf("ERROR: Unable to prepare response: %s", err) 103 | metrics.RecordError("serveGetNodes") 104 | w.WriteHeader(http.StatusInternalServerError) 105 | } else { 106 | // NOTE: For an unknown reason, `httpWriterSetContentType` does not work so the header is set directly 107 | w.Header().Set("Content-Type", "application/json") 108 | httpWriterSetContentType(w, "application/json") 109 | w.Write(bytes) 110 | } 111 | } 112 | 113 | // PingHandler is used for health checks 114 | func (m Serve) PingHandler(w http.ResponseWriter, req *http.Request) { 115 | js, _ := json.Marshal(Response{Status: "OK"}) 116 | httpWriterSetContentType(w, "application/json") 117 | w.WriteHeader(http.StatusOK) 118 | w.Write(js) 119 | } 120 | -------------------------------------------------------------------------------- /serve_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "log" 8 | "net/http" 9 | "net/http/httptest" 10 | "os" 11 | "testing" 12 | 13 | "github.com/stretchr/testify/mock" 14 | "github.com/stretchr/testify/suite" 15 | ) 16 | 17 | type ServerTestSuite struct { 18 | suite.Suite 19 | Log *log.Logger 20 | RWMock *ResponseWriterMock 21 | SLMock *SwarmListeningMock 22 | } 23 | 24 | func TestServerUnitTestSuite(t *testing.T) { 25 | suite.Run(t, new(ServerTestSuite)) 26 | } 27 | 28 | func (s *ServerTestSuite) SetupTest() { 29 | s.Log = log.New(os.Stdout, "", 0) 30 | 31 | s.RWMock = new(ResponseWriterMock) 32 | s.RWMock.On("Header").Return(nil) 33 | s.RWMock.On("Write", mock.Anything).Return(0, nil) 34 | s.RWMock.On("WriteHeader", mock.Anything) 35 | s.SLMock = new(SwarmListeningMock) 36 | } 37 | 38 | func (s *ServerTestSuite) Test_Run_InvokesHTTPListenAndServe() { 39 | var actual string 40 | expected := fmt.Sprintf(":8080") 41 | httpListenAndServe = func(addr string, handler http.Handler) error { 42 | actual = addr 43 | return nil 44 | } 45 | 46 | serve := Serve{} 47 | Run(serve) 48 | 49 | s.Equal(expected, actual) 50 | } 51 | 52 | func (s *ServerTestSuite) Test_Run_ReturnsError_WhenHTTPListenAndServeFails() { 53 | orig := httpListenAndServe 54 | defer func() { 55 | httpListenAndServe = orig 56 | }() 57 | httpListenAndServe = func(addr string, handler http.Handler) error { 58 | return fmt.Errorf("This is an error") 59 | } 60 | 61 | serve := Serve{} 62 | actual := Run(serve) 63 | 64 | s.Error(actual) 65 | } 66 | 67 | func (s *ServerTestSuite) Test_RestNotifyServices_RoutesTo_NotifyServices() { 68 | sm := new(serverMock) 69 | sm.On("NotifyServices", mock.Anything, mock.Anything).Return(nil) 70 | mux := attachRoutes(sm) 71 | 72 | req := httptest.NewRequest("GET", "/v1/docker-flow-swarm-listener/notify-services", nil) 73 | w := httptest.NewRecorder() 74 | mux.ServeHTTP(w, req) 75 | 76 | sm.AssertExpectations(s.T()) 77 | } 78 | 79 | func (s *ServerTestSuite) Test_RestGetNodes_RoutesTo_GetNodes() { 80 | 81 | sm := new(serverMock) 82 | sm.On("GetNodes", mock.Anything, mock.Anything).Return(nil) 83 | mux := attachRoutes(sm) 84 | 85 | req := httptest.NewRequest("GET", "/v1/docker-flow-swarm-listener/get-nodes", nil) 86 | w := httptest.NewRecorder() 87 | mux.ServeHTTP(w, req) 88 | 89 | sm.AssertExpectations(s.T()) 90 | } 91 | 92 | func (s *ServerTestSuite) Test_RestGetServices_RoutesTo_GetServices() { 93 | 94 | sm := new(serverMock) 95 | sm.On("GetServices", mock.Anything, mock.Anything).Return(nil) 96 | mux := attachRoutes(sm) 97 | 98 | req := httptest.NewRequest("GET", "/v1/docker-flow-swarm-listener/get-services", nil) 99 | w := httptest.NewRecorder() 100 | mux.ServeHTTP(w, req) 101 | 102 | sm.AssertExpectations(s.T()) 103 | } 104 | 105 | func (s *ServerTestSuite) Test_RestPing_RoutesTo_GetPing() { 106 | 107 | sm := new(serverMock) 108 | sm.On("PingHandler", mock.Anything, mock.Anything).Return(nil) 109 | mux := attachRoutes(sm) 110 | 111 | req := httptest.NewRequest("GET", "/v1/docker-flow-swarm-listener/ping", nil) 112 | w := httptest.NewRecorder() 113 | mux.ServeHTTP(w, req) 114 | 115 | sm.AssertExpectations(s.T()) 116 | } 117 | 118 | func (s *ServerTestSuite) Test_NotifyServices_ReturnsStatus200() { 119 | s.SLMock.On("NotifyServices", false).Return() 120 | 121 | req, _ := http.NewRequest("GET", "/v1/docker-flow-swarm-listener/notify-services", nil) 122 | expected, _ := json.Marshal(Response{Status: "OK"}) 123 | 124 | srv := NewServe(s.SLMock, s.Log) 125 | srv.NotifyServices(s.RWMock, req) 126 | 127 | s.RWMock.AssertCalled(s.T(), "WriteHeader", 200) 128 | s.RWMock.AssertCalled(s.T(), "Write", []byte(expected)) 129 | } 130 | 131 | func (s *ServerTestSuite) Test_NotifyServices_SetsContentTypeToJSON() { 132 | var actual string 133 | httpWriterSetContentTypeOrig := httpWriterSetContentType 134 | defer func() { httpWriterSetContentType = httpWriterSetContentTypeOrig }() 135 | httpWriterSetContentType = func(w http.ResponseWriter, value string) { 136 | actual = value 137 | } 138 | req, _ := http.NewRequest("GET", "/v1/docker-flow-swarm-listener/notify-services", nil) 139 | s.SLMock.On("NotifyServices", false).Return() 140 | 141 | srv := NewServe(s.SLMock, s.Log) 142 | srv.NotifyServices(s.RWMock, req) 143 | 144 | s.Equal("application/json", actual) 145 | } 146 | 147 | // GetServices 148 | 149 | func (s *ServerTestSuite) Test_GetServices_ReturnsServices() { 150 | mapParam := []map[string]string{ 151 | { 152 | "serviceName": "demo", 153 | "notify": "true", 154 | "servicePath": "/demo", 155 | "distribute": "true", 156 | }, 157 | } 158 | s.SLMock.On("GetServicesParameters", mock.Anything).Return(mapParam, nil) 159 | req, _ := http.NewRequest("GET", "/v1/docker-flow-swarm-listener/get-services", nil) 160 | srv := NewServe(s.SLMock, s.Log) 161 | srv.GetServices(s.RWMock, req) 162 | 163 | call := s.RWMock.GetLastMethodCall("Write") 164 | value, _ := call.Arguments.Get(0).([]byte) 165 | rsp := []map[string]string{} 166 | json.Unmarshal(value, &rsp) 167 | s.Equal(mapParam, rsp) 168 | } 169 | 170 | // GetNodes 171 | 172 | func (s *ServerTestSuite) Test_GetNodes_ReturnNodes() { 173 | mapParam := []map[string]string{ 174 | { 175 | "id": "node1", 176 | "hostname": "node1hostname", 177 | "address": "10.0.0.1", 178 | "versionIndex": "24", 179 | "state": "ready", 180 | "role": "worker", 181 | "availability": "active", 182 | }, 183 | } 184 | s.SLMock.On("GetNodesParameters", mock.Anything).Return(mapParam, nil) 185 | req, _ := http.NewRequest("GET", "/v1/docker-flow-swarm-listener/get-nodes", nil) 186 | srv := NewServe(s.SLMock, s.Log) 187 | srv.GetNodes(s.RWMock, req) 188 | call := s.RWMock.GetLastMethodCall("Write") 189 | value, _ := call.Arguments.Get(0).([]byte) 190 | rsp := []map[string]string{} 191 | json.Unmarshal(value, &rsp) 192 | s.Equal(mapParam, rsp) 193 | } 194 | 195 | // PingHandler 196 | 197 | func (s *ServerTestSuite) Test_PingHandler_ReturnsStatus200() { 198 | actual := "" 199 | httpWriterSetContentTypeOrig := httpWriterSetContentType 200 | defer func() { httpWriterSetContentType = httpWriterSetContentTypeOrig }() 201 | httpWriterSetContentType = func(w http.ResponseWriter, value string) { 202 | actual = value 203 | } 204 | req, _ := http.NewRequest("GET", "/v1/docker-flow-swarm-listener/ping", nil) 205 | expected, _ := json.Marshal(Response{Status: "OK"}) 206 | 207 | srv := NewServe(s.SLMock, s.Log) 208 | srv.PingHandler(s.RWMock, req) 209 | 210 | s.Equal("application/json", actual) 211 | s.RWMock.AssertCalled(s.T(), "WriteHeader", 200) 212 | s.RWMock.AssertCalled(s.T(), "Write", []byte(expected)) 213 | } 214 | 215 | // Mocks 216 | 217 | type ResponseWriterMock struct { 218 | mock.Mock 219 | } 220 | 221 | func (m *ResponseWriterMock) GetLastMethodCall(methodName string) *mock.Call { 222 | for _, call := range m.Calls { 223 | if call.Method == methodName { 224 | return &call 225 | } 226 | } 227 | return nil 228 | } 229 | 230 | func (m *ResponseWriterMock) Header() http.Header { 231 | m.Called() 232 | return make(map[string][]string) 233 | } 234 | 235 | func (m *ResponseWriterMock) Write(data []byte) (int, error) { 236 | params := m.Called(data) 237 | return params.Int(0), params.Error(1) 238 | } 239 | 240 | func (m *ResponseWriterMock) WriteHeader(header int) { 241 | m.Called(header) 242 | } 243 | 244 | type SwarmListeningMock struct { 245 | mock.Mock 246 | } 247 | 248 | func (m *SwarmListeningMock) Run() { 249 | m.Called() 250 | } 251 | func (m *SwarmListeningMock) NotifyServices(consultCache bool) { 252 | m.Called(consultCache) 253 | } 254 | func (m *SwarmListeningMock) NotifyNodes(consultCache bool) { 255 | m.Called(consultCache) 256 | } 257 | func (m *SwarmListeningMock) GetServicesParameters(ctx context.Context) ([]map[string]string, error) { 258 | args := m.Called(ctx) 259 | return args.Get(0).([]map[string]string), args.Error(1) 260 | } 261 | func (m *SwarmListeningMock) GetNodesParameters(ctx context.Context) ([]map[string]string, error) { 262 | args := m.Called(ctx) 263 | return args.Get(0).([]map[string]string), args.Error(1) 264 | } 265 | 266 | type serverMock struct { 267 | mock.Mock 268 | } 269 | 270 | func (m *serverMock) NotifyServices(w http.ResponseWriter, req *http.Request) { 271 | m.Called(w, req) 272 | } 273 | 274 | func (m *serverMock) GetServices(w http.ResponseWriter, req *http.Request) { 275 | m.Called(w, req) 276 | } 277 | 278 | func (m *serverMock) GetNodes(w http.ResponseWriter, req *http.Request) { 279 | m.Called(w, req) 280 | } 281 | func (m *serverMock) PingHandler(w http.ResponseWriter, req *http.Request) { 282 | m.Called(w, req) 283 | } 284 | -------------------------------------------------------------------------------- /service/cancelmanager.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | ) 7 | 8 | // CancelManaging manages canceling of contexts 9 | type CancelManaging interface { 10 | Add(rootCtx context.Context, id string, reqID int64) context.Context 11 | Delete(id string, reqID int64) bool 12 | } 13 | 14 | type cancelPair struct { 15 | Cancel context.CancelFunc 16 | ReqID int64 17 | } 18 | 19 | // CancelManager implements the `CancelManaging` interface that is thread safe 20 | type CancelManager struct { 21 | v map[string]cancelPair 22 | mux sync.Mutex 23 | } 24 | 25 | // NewCancelManager creates a new `CancelManager` 26 | func NewCancelManager() *CancelManager { 27 | return &CancelManager{ 28 | v: map[string]cancelPair{}, 29 | mux: sync.Mutex{}, 30 | } 31 | } 32 | 33 | // Add creates an context for `id` and `reqID` and returns that context. 34 | // If `id` exists in memory, the task with that `id` will be canceled. 35 | func (m *CancelManager) Add(rootCtx context.Context, id string, reqID int64) context.Context { 36 | m.mux.Lock() 37 | defer m.mux.Unlock() 38 | 39 | pair, ok := m.v[id] 40 | if ok { 41 | pair.Cancel() 42 | delete(m.v, id) 43 | } 44 | 45 | ctx, cancel := context.WithCancel(rootCtx) 46 | m.v[id] = cancelPair{ 47 | Cancel: cancel, 48 | ReqID: reqID, 49 | } 50 | return ctx 51 | } 52 | 53 | // Delete calls cancel context with the corresponding `id` and `reqID` and 54 | // removes 'id' from map 55 | // If the corresponding `id` and `reqID` are not present, Delete does nothing. 56 | // In all cases, Delete returns true if an item was deleted 57 | func (m *CancelManager) Delete(id string, reqID int64) bool { 58 | m.mux.Lock() 59 | defer m.mux.Unlock() 60 | 61 | pair, ok := m.v[id] 62 | 63 | if !ok || pair.ReqID != reqID { 64 | return false 65 | } 66 | 67 | pair.Cancel() 68 | delete(m.v, id) 69 | return true 70 | } 71 | -------------------------------------------------------------------------------- /service/cancelmanager_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/suite" 9 | ) 10 | 11 | type CancelManagerTestSuite struct { 12 | suite.Suite 13 | ctx context.Context 14 | } 15 | 16 | func TestCancelManagerUnitTestSuite(t *testing.T) { 17 | suite.Run(t, new(CancelManagerTestSuite)) 18 | } 19 | 20 | func (s *CancelManagerTestSuite) SetupSuite() { 21 | s.ctx = context.Background() 22 | } 23 | 24 | func (s *CancelManagerTestSuite) Test_Add_IDEqual_CancelsContext_Returns_Context() { 25 | cm := NewCancelManager() 26 | ctx := cm.Add(s.ctx, "id1", 1) 27 | cm.Add(s.ctx, "id1", 2) 28 | 29 | L: 30 | for { 31 | select { 32 | case <-time.After(time.Second * 5): 33 | s.Fail("Timeout") 34 | return 35 | case <-ctx.Done(): 36 | break L 37 | } 38 | } 39 | 40 | s.Equal(int64(2), cm.v["id1"].ReqID) 41 | } 42 | 43 | func (s *CancelManagerTestSuite) Test_Add_IDNotExist_Returns_Context() { 44 | 45 | cm := NewCancelManager() 46 | firstCtx := cm.Add(s.ctx, "id1", 1) 47 | s.NotNil(firstCtx) 48 | 49 | s.Require().Contains(cm.v, "id1") 50 | s.Equal(cm.v["id1"].ReqID, int64(1)) 51 | } 52 | 53 | func (s *CancelManagerTestSuite) Test_Delete_IDEqual_ReqIDNotEqual_DoesNothing() { 54 | cm := NewCancelManager() 55 | cm.Add(s.ctx, "id1", 1) 56 | 57 | s.Require().Len(cm.v, 1) 58 | 59 | s.False(cm.Delete("id1", 2)) 60 | s.Require().Len(cm.v, 1) 61 | s.Require().Contains(cm.v, "id1") 62 | s.Equal(cm.v["id1"].ReqID, int64(1)) 63 | } 64 | 65 | func (s *CancelManagerTestSuite) Test_Delete_IDEqual_ReqIDEqual_CallsCancel_RemovesFromMemory() { 66 | cm := NewCancelManager() 67 | ctx := cm.Add(s.ctx, "id1", 1) 68 | 69 | s.Require().Len(cm.v, 1) 70 | 71 | s.True(cm.Delete("id1", 1)) 72 | s.Require().Len(cm.v, 0) 73 | 74 | L: 75 | for { 76 | select { 77 | case <-time.After(time.Second * 5): 78 | s.Fail("Timeout") 79 | return 80 | case <-ctx.Done(): 81 | break L 82 | } 83 | } 84 | } 85 | 86 | func (s *CancelManagerTestSuite) Test_Delete_IDEqual_ReqIDEqual_CntNotZero_StaysInMemory() { 87 | // Set startingCnt to 2 88 | cm := NewCancelManager() 89 | cm.Add(s.ctx, "id1", 1) 90 | s.Require().Len(cm.v, 1) 91 | s.Require().Contains(cm.v, "id1") 92 | 93 | s.True(cm.Delete("id1", 1)) 94 | s.Require().Len(cm.v, 0) 95 | } 96 | -------------------------------------------------------------------------------- /service/docker.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "os" 6 | 7 | "github.com/docker/docker/client" 8 | ) 9 | 10 | var dockerAPIVersion = "1.37" 11 | 12 | // NewDockerClientFromEnv returns a `*client.Client` struct using environment variable 13 | // `DF_DOCKER_HOST` for the host 14 | func NewDockerClientFromEnv() (*client.Client, error) { 15 | host := "unix:///var/run/docker.sock" 16 | if len(os.Getenv("DF_DOCKER_HOST")) > 0 { 17 | host = os.Getenv("DF_DOCKER_HOST") 18 | } 19 | defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} 20 | cli, err := client.NewClient(host, dockerAPIVersion, nil, defaultHeaders) 21 | if err != nil { 22 | return cli, err 23 | } 24 | cli.NegotiateAPIVersion(context.Background()) 25 | return cli, nil 26 | } 27 | -------------------------------------------------------------------------------- /service/eventnodelistener.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "time" 7 | 8 | "github.com/docker/docker/api/types" 9 | "github.com/docker/docker/api/types/events" 10 | "github.com/docker/docker/api/types/filters" 11 | "github.com/docker/docker/client" 12 | 13 | "github.com/docker-flow/docker-flow-swarm-listener/metrics" 14 | ) 15 | 16 | // NodeListening listens to node events 17 | type NodeListening interface { 18 | ListenForNodeEvents(eventChan chan<- Event) 19 | } 20 | 21 | // NodeListener listens for docker node events 22 | type NodeListener struct { 23 | dockerClient *client.Client 24 | log *log.Logger 25 | } 26 | 27 | // NewNodeListener creates a `NodeListener`` 28 | func NewNodeListener(c *client.Client, logger *log.Logger) *NodeListener { 29 | return &NodeListener{dockerClient: c, log: logger} 30 | } 31 | 32 | // ListenForNodeEvents listens for events and places them on channels 33 | func (s NodeListener) ListenForNodeEvents( 34 | eventChan chan<- Event) { 35 | 36 | go func() { 37 | filter := filters.NewArgs() 38 | filter.Add("type", "node") 39 | msgStream, msgErrs := s.dockerClient.Events( 40 | context.Background(), types.EventsOptions{Filters: filter}) 41 | 42 | for { 43 | select { 44 | case msg := <-msgStream: 45 | if !s.validEventNode(msg) { 46 | continue 47 | } 48 | eventType := s.getEventType(msg) 49 | eventChan <- Event{ 50 | Type: eventType, 51 | ID: msg.Actor.ID, 52 | TimeNano: msg.TimeNano, 53 | ConsultCache: true, 54 | } 55 | case err := <-msgErrs: 56 | s.log.Printf("%v, Restarting docker event stream", err) 57 | metrics.RecordError("ListenForNodeEvents") 58 | time.Sleep(time.Second) 59 | // Reopen event stream 60 | msgStream, msgErrs = s.dockerClient.Events( 61 | context.Background(), types.EventsOptions{Filters: filter}) 62 | } 63 | } 64 | }() 65 | 66 | } 67 | 68 | // validEventNode returns true when event is valid (should be passed through) 69 | // this will still allow through 4-5 events from changing a worker node 70 | // to a manager node or vise versa. 71 | func (s NodeListener) validEventNode(msg events.Message) bool { 72 | if msg.Action == "remove" { 73 | return true 74 | } 75 | if name, ok := msg.Actor.Attributes["name"]; !ok || len(name) == 0 { 76 | return false 77 | } 78 | return true 79 | } 80 | 81 | func (s NodeListener) getEventType(msg events.Message) EventType { 82 | if msg.Action == "remove" { 83 | return EventTypeRemove 84 | } 85 | 86 | if name := msg.Actor.Attributes["state.new"]; name == "down" { 87 | return EventTypeRemove 88 | } 89 | 90 | return EventTypeCreate 91 | } -------------------------------------------------------------------------------- /service/eventnodelistener_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "log" 7 | "testing" 8 | "time" 9 | 10 | "github.com/docker/docker/client" 11 | "github.com/stretchr/testify/suite" 12 | ) 13 | 14 | type EventListenerNodeTestSuite struct { 15 | suite.Suite 16 | DockerClient *client.Client 17 | Logger *log.Logger 18 | LogBytes *bytes.Buffer 19 | NetworkName string 20 | Node0 string 21 | Node0JoinToken string 22 | } 23 | 24 | func TestEventListenerNodeTestSuite(t *testing.T) { 25 | suite.Run(t, new(EventListenerNodeTestSuite)) 26 | } 27 | 28 | func (s *EventListenerNodeTestSuite) SetupSuite() { 29 | s.LogBytes = new(bytes.Buffer) 30 | s.Logger = log.New(s.LogBytes, "", 0) 31 | 32 | // Assumes running test with docker-compose.yml 33 | network, err := getNetworkNameWithSuffix("dfsl_network") 34 | s.Require().NoError(err) 35 | s.NetworkName = network 36 | s.Node0 = "node0" 37 | 38 | createNode(s.Node0, s.NetworkName) 39 | time.Sleep(time.Second) 40 | initSwarm(s.Node0) 41 | time.Sleep(time.Second) 42 | 43 | s.Node0JoinToken = getWorkerToken(s.Node0) 44 | time.Sleep(time.Second) 45 | 46 | client, err := newTestNodeDockerClient(s.Node0) 47 | s.Require().NoError(err) 48 | s.DockerClient = client 49 | 50 | } 51 | 52 | func (s *EventListenerNodeTestSuite) TearDownSuite() { 53 | destroyNode(s.Node0) 54 | } 55 | 56 | func (s *EventListenerNodeTestSuite) Test_ListenForNodeEvents_NodeCreate() { 57 | 58 | enl := NewNodeListener(s.DockerClient, s.Logger) 59 | 60 | // Listen for events 61 | eventChan := make(chan Event) 62 | enl.ListenForNodeEvents(eventChan) 63 | 64 | // Create node1 65 | createNode("node1", s.NetworkName) 66 | defer func() { 67 | destroyNode("node1") 68 | }() 69 | 70 | time.Sleep(time.Second) 71 | joinSwarm("node1", s.Node0, s.Node0JoinToken) 72 | 73 | // Wait for events 74 | event, err := s.waitForEvent(eventChan) 75 | s.Require().NoError(err) 76 | s.True(event.ConsultCache) 77 | 78 | node1ID, err := getNodeID("node1", "node0") 79 | s.Require().NoError(err) 80 | 81 | s.Equal(node1ID, event.ID) 82 | s.Equal(EventTypeCreate, event.Type) 83 | } 84 | 85 | // This test is not consistent 86 | // func (s *EventListenerNodeTestSuite) Test_ListenForNodeEvents_NodeRemove() { 87 | 88 | // enl := NewNodeListener(s.DockerClient, s.Logger) 89 | 90 | // // Create node1 and joing swarm 91 | // createNode("node1", s.NetworkName) 92 | // defer func() { 93 | // destroyNode("node1") 94 | // }() 95 | // joinSwarm("node1", s.Node0, s.Node0JoinToken) 96 | 97 | // node1ID, err := getNodeID("node1", s.Node0) 98 | // s.Require().NoError(err) 99 | 100 | // // Listen for events 101 | // eventChan := make(chan Event) 102 | // enl.ListenForNodeEvents(eventChan) 103 | 104 | // //Remove node1 105 | // removeNodeFromSwarm("node1", s.Node0) 106 | 107 | // // Wait for events 108 | // event, err := s.waitForEvent(eventChan) 109 | // s.Require().NoError(err) 110 | 111 | // s.Equal(node1ID, event.ID) 112 | // s.Equal(EventTypeRemove, event.Type) 113 | // } 114 | 115 | func (s *EventListenerNodeTestSuite) Test_ListenForNodeEvents_NodeUpdateLabel() { 116 | // Create one node 117 | enl := NewNodeListener(s.DockerClient, s.Logger) 118 | 119 | // Listen for events 120 | eventChan := make(chan Event) 121 | enl.ListenForNodeEvents(eventChan) 122 | 123 | // addLabelToNode 124 | addLabelToNode(s.Node0, "cats=flay", s.Node0) 125 | 126 | // Wait for events 127 | event, err := s.waitForEvent(eventChan) 128 | s.Require().NoError(err) 129 | s.True(event.ConsultCache) 130 | 131 | node0ID, err := getNodeID(s.Node0, s.Node0) 132 | s.Require().NoError(err) 133 | 134 | s.Equal(node0ID, event.ID) 135 | s.Equal(EventTypeCreate, event.Type) 136 | 137 | // removeLabelFromNode 138 | removeLabelFromNode(s.Node0, "cats", s.Node0) 139 | 140 | // Wait for events 141 | event, err = s.waitForEvent(eventChan) 142 | s.Require().NoError(err) 143 | s.True(event.ConsultCache) 144 | 145 | s.Equal(node0ID, event.ID) 146 | s.Equal(EventTypeCreate, event.Type) 147 | } 148 | 149 | func (s *EventListenerNodeTestSuite) waitForEvent(events <-chan Event) (*Event, error) { 150 | timeOut := time.NewTimer(time.Second * 5).C 151 | for { 152 | select { 153 | case event := <-events: 154 | return &event, nil 155 | case <-timeOut: 156 | return nil, fmt.Errorf("Timeout") 157 | } 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /service/eventservicelistener.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "time" 7 | 8 | "github.com/docker/docker/api/types" 9 | "github.com/docker/docker/api/types/events" 10 | "github.com/docker/docker/api/types/filters" 11 | "github.com/docker/docker/client" 12 | "github.com/docker-flow/docker-flow-swarm-listener/metrics" 13 | ) 14 | 15 | // SwarmServiceListening listens for service events 16 | type SwarmServiceListening interface { 17 | ListenForServiceEvents(chan<- Event) 18 | } 19 | 20 | // SwarmServiceListener listens for docker service events 21 | type SwarmServiceListener struct { 22 | dockerClient *client.Client 23 | log *log.Logger 24 | } 25 | 26 | // NewSwarmServiceListener creates a `SwarmServiceListener` 27 | func NewSwarmServiceListener(c *client.Client, logger *log.Logger) *SwarmServiceListener { 28 | return &SwarmServiceListener{dockerClient: c, log: logger} 29 | } 30 | 31 | // ListenForServiceEvents listens for events and places them on channels 32 | func (s SwarmServiceListener) ListenForServiceEvents(eventChan chan<- Event) { 33 | go func() { 34 | filter := filters.NewArgs() 35 | filter.Add("type", "service") 36 | msgStream, msgErrs := s.dockerClient.Events( 37 | context.Background(), types.EventsOptions{Filters: filter}) 38 | 39 | for { 40 | select { 41 | case msg := <-msgStream: 42 | if !s.validEventNode(msg) { 43 | continue 44 | } 45 | eventType := EventTypeCreate 46 | if msg.Action == "remove" { 47 | eventType = EventTypeRemove 48 | } 49 | eventChan <- Event{ 50 | Type: eventType, 51 | ID: msg.Actor.ID, 52 | TimeNano: msg.TimeNano, 53 | ConsultCache: true, 54 | } 55 | case err := <-msgErrs: 56 | s.log.Printf("%v, Restarting docker event stream", err) 57 | metrics.RecordError("ListenForServiceEvents") 58 | time.Sleep(time.Second) 59 | // Reopen event stream 60 | msgStream, msgErrs = s.dockerClient.Events( 61 | context.Background(), types.EventsOptions{Filters: filter}) 62 | } 63 | } 64 | }() 65 | } 66 | 67 | // validEventNode returns true when event is valid (should be passed through) 68 | func (s SwarmServiceListener) validEventNode(msg events.Message) bool { 69 | return true 70 | } 71 | -------------------------------------------------------------------------------- /service/eventservicelistener_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/docker/docker/client" 11 | "github.com/stretchr/testify/suite" 12 | ) 13 | 14 | type SwarmServiceListenerTestSuite struct { 15 | suite.Suite 16 | ServiceName string 17 | DockerClient *client.Client 18 | Logger *log.Logger 19 | } 20 | 21 | func TestSwarmServiceListenerTestSuite(t *testing.T) { 22 | suite.Run(t, new(SwarmServiceListenerTestSuite)) 23 | } 24 | 25 | func (s *SwarmServiceListenerTestSuite) SetupSuite() { 26 | s.ServiceName = "my-service" 27 | client, err := NewDockerClientFromEnv() 28 | s.Require().NoError(err) 29 | s.DockerClient = client 30 | s.Logger = log.New(os.Stdout, "", 0) 31 | } 32 | 33 | func (s *SwarmServiceListenerTestSuite) Test_ListenForServiceEvents_CreateService() { 34 | snl := NewSwarmServiceListener(s.DockerClient, s.Logger) 35 | 36 | // Listen for events 37 | eventChan := make(chan Event) 38 | snl.ListenForServiceEvents(eventChan) 39 | 40 | createTestService("util-1", []string{}, false, "", "") 41 | defer func() { 42 | removeTestService("util-1") 43 | }() 44 | 45 | time.Sleep(time.Second) 46 | utilID, err := getServiceID("util-1") 47 | s.Require().NoError(err) 48 | 49 | event, err := s.waitForServiceEvent(eventChan) 50 | s.Require().NoError(err) 51 | 52 | s.Equal(EventTypeCreate, event.Type) 53 | s.Equal(utilID, event.ID) 54 | s.True(event.ConsultCache) 55 | } 56 | 57 | func (s *SwarmServiceListenerTestSuite) Test_ListenForServiceEvents_UpdateService() { 58 | snl := NewSwarmServiceListener(s.DockerClient, s.Logger) 59 | 60 | createTestService("util-1", []string{}, false, "", "") 61 | defer func() { 62 | removeTestService("util-1") 63 | }() 64 | 65 | time.Sleep(time.Second) 66 | utilID, err := getServiceID("util-1") 67 | s.Require().NoError(err) 68 | 69 | // Listen for events 70 | eventChan := make(chan Event) 71 | snl.ListenForServiceEvents(eventChan) 72 | 73 | // Update label 74 | addLabelToService("util-1", "hello=world") 75 | 76 | event, err := s.waitForServiceEvent(eventChan) 77 | s.Require().NoError(err) 78 | 79 | s.Equal(EventTypeCreate, event.Type) 80 | s.Equal(utilID, event.ID) 81 | s.True(event.ConsultCache) 82 | 83 | // Remove label 84 | removeLabelFromService("util-1", "hello") 85 | 86 | event, err = s.waitForServiceEvent(eventChan) 87 | s.Require().NoError(err) 88 | 89 | s.Equal(EventTypeCreate, event.Type) 90 | s.Equal(utilID, event.ID) 91 | s.True(event.ConsultCache) 92 | } 93 | 94 | func (s *SwarmServiceListenerTestSuite) Test_ListenForServiceEvents_RemoveService() { 95 | snl := NewSwarmServiceListener(s.DockerClient, s.Logger) 96 | 97 | createTestService("util-1", []string{}, false, "", "") 98 | defer func() { 99 | removeTestService("util-1") 100 | }() 101 | 102 | time.Sleep(time.Second) 103 | utilID, err := getServiceID("util-1") 104 | s.Require().NoError(err) 105 | 106 | // Listen for events 107 | eventChan := make(chan Event) 108 | snl.ListenForServiceEvents(eventChan) 109 | 110 | // Remove service 111 | removeTestService("util-1") 112 | 113 | event, err := s.waitForServiceEvent(eventChan) 114 | s.Require().NoError(err) 115 | 116 | s.Equal(EventTypeRemove, event.Type) 117 | s.Equal(utilID, event.ID) 118 | s.True(event.ConsultCache) 119 | } 120 | 121 | func (s *SwarmServiceListenerTestSuite) waitForServiceEvent(events <-chan Event) (*Event, error) { 122 | timeOut := time.NewTimer(time.Second * 5).C 123 | for { 124 | select { 125 | case event := <-events: 126 | return &event, nil 127 | case <-timeOut: 128 | return nil, fmt.Errorf("Timeout") 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /service/minify.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/docker/docker/api/types/swarm" 7 | ) 8 | 9 | // MinifyNode minifies `swarm.Node` 10 | // only labels prefixed with `com.df.` will be used 11 | func MinifyNode(n swarm.Node) NodeMini { 12 | engineLabels := map[string]string{} 13 | for k, v := range n.Description.Engine.Labels { 14 | if strings.HasPrefix(k, "com.df.") { 15 | engineLabels[k] = v 16 | } 17 | } 18 | nodeLabels := map[string]string{} 19 | for k, v := range n.Spec.Labels { 20 | if strings.HasPrefix(k, "com.df.") { 21 | nodeLabels[k] = v 22 | } 23 | } 24 | 25 | return NodeMini{ 26 | ID: n.ID, 27 | Hostname: n.Description.Hostname, 28 | VersionIndex: n.Meta.Version.Index, 29 | State: n.Status.State, 30 | Addr: n.Status.Addr, 31 | NodeLabels: nodeLabels, 32 | EngineLabels: engineLabels, 33 | Role: n.Spec.Role, 34 | Availability: n.Spec.Availability, 35 | } 36 | } 37 | 38 | // MinifySwarmService minifies `SwarmService` 39 | // only labels prefixed with `com.df.` will be used 40 | // `ignoreKey` wll be ignored from labels 41 | // `includeKey` will be included 42 | func MinifySwarmService(ss SwarmService, ignoreKey string, includeKey string) SwarmServiceMini { 43 | filterLabels := map[string]string{} 44 | for k, v := range ss.Spec.Labels { 45 | if k != ignoreKey && strings.HasPrefix(k, "com.df.") || 46 | k == includeKey { 47 | filterLabels[k] = v 48 | } 49 | } 50 | ssm := SwarmServiceMini{ 51 | ID: ss.ID, 52 | Name: ss.Spec.Name, 53 | Labels: filterLabels, 54 | NodeInfo: ss.NodeInfo, 55 | } 56 | 57 | if ss.Spec.TaskTemplate.ContainerSpec != nil { 58 | ssm.ContainerImage = ss.Spec.TaskTemplate.ContainerSpec.Image 59 | } 60 | 61 | if ss.Spec.Mode.Global != nil { 62 | ssm.Global = true 63 | return ssm 64 | } 65 | if ss.Spec.Mode.Replicated != nil && ss.Spec.Mode.Replicated.Replicas != nil { 66 | ssm.Replicas = *ss.Spec.Mode.Replicated.Replicas 67 | } 68 | return ssm 69 | } 70 | -------------------------------------------------------------------------------- /service/minify_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/docker/docker/api/types/swarm" 7 | "github.com/stretchr/testify/suite" 8 | ) 9 | 10 | type MinifyUnitTestSuite struct { 11 | suite.Suite 12 | } 13 | 14 | func TestMinifyUnitTest(t *testing.T) { 15 | suite.Run(t, new(MinifyUnitTestSuite)) 16 | } 17 | 18 | func (s *MinifyUnitTestSuite) Test_MinifyNode() { 19 | meta := swarm.Meta{ 20 | Version: swarm.Version{Index: uint64(10)}, 21 | } 22 | annot := swarm.Annotations{ 23 | Labels: map[string]string{ 24 | "cows": "moo", 25 | "birds": "fly", 26 | "com.df.wow": "yup", 27 | }, 28 | } 29 | spec := swarm.NodeSpec{ 30 | Annotations: annot, 31 | Role: swarm.NodeRoleWorker, 32 | Availability: swarm.NodeAvailabilityActive, 33 | } 34 | engineDesp := swarm.EngineDescription{ 35 | Labels: map[string]string{ 36 | "squrriels": "climb", 37 | "com.df.world": "round", 38 | }, 39 | } 40 | des := swarm.NodeDescription{ 41 | Hostname: "nodehostname", 42 | Engine: engineDesp, 43 | } 44 | nodeStatus := swarm.NodeStatus{ 45 | State: swarm.NodeStateReady, 46 | Addr: "nodeaddr", 47 | } 48 | 49 | n := swarm.Node{ 50 | ID: "nodeID", 51 | Meta: meta, 52 | Spec: spec, 53 | Description: des, 54 | Status: nodeStatus, 55 | } 56 | expectMini := NodeMini{ 57 | ID: "nodeID", 58 | Hostname: "nodehostname", 59 | VersionIndex: uint64(10), 60 | State: swarm.NodeStateReady, 61 | Addr: "nodeaddr", 62 | NodeLabels: map[string]string{ 63 | "com.df.wow": "yup", 64 | }, 65 | EngineLabels: map[string]string{ 66 | "com.df.world": "round", 67 | }, 68 | Role: swarm.NodeRoleWorker, 69 | Availability: swarm.NodeAvailabilityActive, 70 | } 71 | 72 | nodeMini := MinifyNode(n) 73 | 74 | s.Equal(expectMini, nodeMini) 75 | } 76 | 77 | func (s *MinifyUnitTestSuite) Test_MinifySwarmService_Global() { 78 | annot := swarm.Annotations{ 79 | Name: "serviceName", 80 | Labels: map[string]string{ 81 | "cows": "moo", 82 | "birds": "fly", 83 | "com.df.hello": "nyc", 84 | "com.df.notify": "true", 85 | }, 86 | } 87 | mode := swarm.ServiceMode{ 88 | Global: &swarm.GlobalService{}, 89 | } 90 | 91 | serviceSpec := swarm.ServiceSpec{ 92 | Annotations: annot, 93 | Mode: mode, 94 | } 95 | 96 | nodeSet := NodeIPSet{} 97 | nodeSet.Add("node-1", "1.0.0.1", "id1") 98 | nodeSet.Add("node-2", "1.0.1.1", "id2") 99 | 100 | service := swarm.Service{ 101 | ID: "serviceID", 102 | Spec: serviceSpec, 103 | } 104 | 105 | expectMini := SwarmServiceMini{ 106 | ID: "serviceID", 107 | Name: "serviceName", 108 | Labels: map[string]string{ 109 | "com.df.hello": "nyc", 110 | }, 111 | Global: true, 112 | Replicas: uint64(0), 113 | NodeInfo: nodeSet, 114 | } 115 | 116 | ss := SwarmService{service, nodeSet} 117 | ssMini := MinifySwarmService(ss, "com.df.notify", "com.docker.stack.namespace") 118 | 119 | s.Equal(expectMini, ssMini) 120 | } 121 | 122 | func (s *MinifyUnitTestSuite) Test_MinifySwarmService_Replicas() { 123 | annot := swarm.Annotations{ 124 | Name: "serviceName", 125 | Labels: map[string]string{ 126 | "cows": "moo", 127 | "birds": "fly", 128 | "com.df.hello": "world", 129 | "com.df.notify": "true", 130 | "com.docker.stack.namespace": "really", 131 | }, 132 | } 133 | replicas := uint64(3) 134 | mode := swarm.ServiceMode{ 135 | Replicated: &swarm.ReplicatedService{ 136 | Replicas: &replicas, 137 | }, 138 | } 139 | 140 | serviceSpec := swarm.ServiceSpec{ 141 | Annotations: annot, 142 | Mode: mode, 143 | } 144 | 145 | nodeSet := NodeIPSet{} 146 | nodeSet.Add("node-1", "1.0.0.1", "id1") 147 | 148 | service := swarm.Service{ 149 | ID: "serviceID", 150 | Spec: serviceSpec, 151 | } 152 | 153 | expectMini := SwarmServiceMini{ 154 | ID: "serviceID", 155 | Name: "serviceName", 156 | Labels: map[string]string{ 157 | "com.df.hello": "world", 158 | "com.docker.stack.namespace": "really", 159 | }, 160 | Global: false, 161 | Replicas: uint64(3), 162 | NodeInfo: nodeSet, 163 | } 164 | 165 | ss := SwarmService{service, nodeSet} 166 | ssMini := MinifySwarmService(ss, "com.df.notify", "com.docker.stack.namespace") 167 | 168 | s.Equal(expectMini, ssMini) 169 | } 170 | -------------------------------------------------------------------------------- /service/mocks.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/docker/docker/api/types/swarm" 7 | "github.com/stretchr/testify/mock" 8 | ) 9 | 10 | type notificationSenderMock struct { 11 | mock.Mock 12 | } 13 | 14 | func (m *notificationSenderMock) Create(ctx context.Context, params string) error { 15 | args := m.Called(ctx, params) 16 | return args.Error(0) 17 | } 18 | 19 | func (m *notificationSenderMock) Remove(ctx context.Context, params string) error { 20 | args := m.Called(ctx, params) 21 | return args.Error(0) 22 | } 23 | 24 | func (m *notificationSenderMock) GetCreateAddr() string { 25 | args := m.Called() 26 | return args.String(0) 27 | } 28 | 29 | func (m *notificationSenderMock) GetRemoveAddr() string { 30 | args := m.Called() 31 | return args.String(0) 32 | } 33 | 34 | type swarmServiceListeningMock struct { 35 | mock.Mock 36 | } 37 | 38 | func (m *swarmServiceListeningMock) ListenForServiceEvents(eventChan chan<- Event) { 39 | m.Called(eventChan) 40 | } 41 | 42 | type swarmServiceInspector struct { 43 | mock.Mock 44 | } 45 | 46 | func (m *swarmServiceInspector) SwarmServiceInspect(ctx context.Context, serviceID string) (*SwarmService, error) { 47 | args := m.Called(ctx, serviceID) 48 | return args.Get(0).(*SwarmService), args.Error(1) 49 | } 50 | 51 | func (m *swarmServiceInspector) SwarmServiceList(ctx context.Context) ([]SwarmService, error) { 52 | args := m.Called(ctx) 53 | return args.Get(0).([]SwarmService), args.Error(1) 54 | } 55 | 56 | func (m *swarmServiceInspector) GetNodeInfo(ctx context.Context, ss SwarmService) (NodeIPSet, error) { 57 | args := m.Called(ctx, ss) 58 | return args.Get(0).(NodeIPSet), args.Error(1) 59 | } 60 | 61 | func (m *swarmServiceInspector) SwarmServiceRunning(ctx context.Context, serviceID string) (bool, error) { 62 | args := m.Called(ctx, serviceID) 63 | return args.Bool(0), args.Error(1) 64 | } 65 | 66 | type swarmServiceCacherMock struct { 67 | mock.Mock 68 | } 69 | 70 | func (m *swarmServiceCacherMock) InsertAndCheck(ss SwarmServiceMini) bool { 71 | args := m.Called(ss) 72 | return args.Bool(0) 73 | } 74 | 75 | func (m *swarmServiceCacherMock) IsNewOrUpdated(ss SwarmServiceMini) bool { 76 | args := m.Called(ss) 77 | return args.Bool(0) 78 | } 79 | 80 | func (m *swarmServiceCacherMock) Delete(ID string) { 81 | m.Called(ID) 82 | } 83 | 84 | func (m *swarmServiceCacherMock) Get(ID string) (SwarmServiceMini, bool) { 85 | args := m.Called(ID) 86 | return args.Get(0).(SwarmServiceMini), args.Bool(1) 87 | } 88 | 89 | func (m *swarmServiceCacherMock) Len() int { 90 | args := m.Called() 91 | return args.Int(0) 92 | } 93 | 94 | func (m *swarmServiceCacherMock) Keys() map[string]struct{} { 95 | args := m.Called() 96 | return args.Get(0).(map[string]struct{}) 97 | } 98 | 99 | type nodeListeningMock struct { 100 | mock.Mock 101 | } 102 | 103 | func (m *nodeListeningMock) ListenForNodeEvents(eventChan chan<- Event) { 104 | m.Called(eventChan) 105 | } 106 | 107 | type nodeInspectorMock struct { 108 | mock.Mock 109 | } 110 | 111 | func (m *nodeInspectorMock) NodeInspect(nodeID string) (swarm.Node, error) { 112 | args := m.Called(nodeID) 113 | return args.Get(0).(swarm.Node), args.Error(1) 114 | } 115 | 116 | func (m *nodeInspectorMock) NodeList(ctx context.Context) ([]swarm.Node, error) { 117 | args := m.Called(ctx) 118 | return args.Get(0).([]swarm.Node), args.Error(1) 119 | } 120 | 121 | type nodeCacherMock struct { 122 | mock.Mock 123 | } 124 | 125 | func (m *nodeCacherMock) InsertAndCheck(n NodeMini) bool { 126 | args := m.Called(n) 127 | return args.Bool(0) 128 | } 129 | 130 | func (m *nodeCacherMock) Delete(ID string) { 131 | m.Called(ID) 132 | } 133 | 134 | func (m *nodeCacherMock) Get(ID string) (NodeMini, bool) { 135 | args := m.Called(ID) 136 | return args.Get(0).(NodeMini), args.Bool(1) 137 | } 138 | 139 | func (m *nodeCacherMock) IsNewOrUpdated(n NodeMini) bool { 140 | args := m.Called(n) 141 | return args.Bool(0) 142 | } 143 | 144 | func (m *nodeCacherMock) Keys() map[string]struct{} { 145 | args := m.Called() 146 | return args.Get(0).(map[string]struct{}) 147 | } 148 | 149 | type notifyDistributorMock struct { 150 | mock.Mock 151 | } 152 | 153 | func (m *notifyDistributorMock) Run(serviceChan <-chan Notification, nodeChan <-chan Notification) { 154 | m.Called(serviceChan, nodeChan) 155 | } 156 | 157 | func (m *notifyDistributorMock) HasServiceListeners() bool { 158 | return m.Called().Bool(0) 159 | } 160 | 161 | func (m *notifyDistributorMock) HasNodeListeners() bool { 162 | return m.Called().Bool(0) 163 | } 164 | 165 | type swarmServicePollingMock struct { 166 | mock.Mock 167 | } 168 | 169 | func (m *swarmServicePollingMock) Run(eventChan chan<- Event) { 170 | m.Called(eventChan) 171 | } 172 | 173 | type nodePollingMock struct { 174 | mock.Mock 175 | } 176 | 177 | func (m *nodePollingMock) Run(eventChan chan<- Event) { 178 | m.Called(eventChan) 179 | } 180 | -------------------------------------------------------------------------------- /service/node.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/docker/docker/api/types" 7 | "github.com/docker/docker/api/types/swarm" 8 | "github.com/docker/docker/client" 9 | ) 10 | 11 | // NodeInspector is able to inspect a swarm node 12 | type NodeInspector interface { 13 | NodeInspect(nodeID string) (swarm.Node, error) 14 | NodeList(ctx context.Context) ([]swarm.Node, error) 15 | } 16 | 17 | // NodeClient implementes `NodeInspector` for docker 18 | type NodeClient struct { 19 | DockerClient *client.Client 20 | } 21 | 22 | // NewNodeClient creates a `NodeClient` 23 | func NewNodeClient(c *client.Client) *NodeClient { 24 | return &NodeClient{DockerClient: c} 25 | } 26 | 27 | // NodeInspect returns `swarm.Node` from its ID 28 | func (c NodeClient) NodeInspect(nodeID string) (swarm.Node, error) { 29 | node, _, err := c.DockerClient.NodeInspectWithRaw(context.Background(), nodeID) 30 | return node, err 31 | } 32 | 33 | // NodeList returns a list of all nodes 34 | func (c NodeClient) NodeList(ctx context.Context) ([]swarm.Node, error) { 35 | return c.DockerClient.NodeList(ctx, types.NodeListOptions{}) 36 | } 37 | -------------------------------------------------------------------------------- /service/node_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/suite" 9 | ) 10 | 11 | type NodeInspectorTestSuite struct { 12 | suite.Suite 13 | NClient *NodeClient 14 | } 15 | 16 | func TestNodeInspectorTestSuite(t *testing.T) { 17 | suite.Run(t, new(NodeInspectorTestSuite)) 18 | } 19 | 20 | func (s *NodeInspectorTestSuite) SetupSuite() { 21 | c, err := newTestNodeDockerClient("node1") 22 | s.Require().NoError(err) 23 | s.NClient = NewNodeClient(c) 24 | 25 | // Create swarm of two nodes 26 | // Assumes running test with docker-compose.yml 27 | network, err := getNetworkNameWithSuffix("dfsl_network") 28 | s.Require().NoError(err) 29 | 30 | createNode("node1", network) 31 | time.Sleep(time.Second) 32 | 33 | initSwarm("node1") 34 | time.Sleep(time.Second) 35 | 36 | joinToken := getWorkerToken("node1") 37 | 38 | createNode("node2", network) 39 | time.Sleep(time.Second) 40 | joinSwarm("node2", "node1", joinToken) 41 | time.Sleep(time.Second) 42 | } 43 | 44 | func (s *NodeInspectorTestSuite) TearDownSuite() { 45 | destroyNode("node2") 46 | destroyNode("node1") 47 | } 48 | 49 | func (s *NodeInspectorTestSuite) Test_NodeInspect() { 50 | nodeID, err := getNodeID("node2", "node1") 51 | s.Require().NoError(err) 52 | 53 | node, err := s.NClient.NodeInspect("node2") 54 | s.Require().NoError(err) 55 | 56 | s.Equal("node2", node.Description.Hostname) 57 | s.Equal(nodeID, node.ID) 58 | } 59 | 60 | func (s *NodeInspectorTestSuite) Test_NodeInspect_Error() { 61 | 62 | _, err := s.NClient.NodeInspect("node3") 63 | s.Error(err) 64 | } 65 | 66 | func (s *NodeInspectorTestSuite) Test_NodeList() { 67 | nodes, err := s.NClient.NodeList(context.Background()) 68 | s.Require().NoError(err) 69 | s.Len(nodes, 2) 70 | } 71 | -------------------------------------------------------------------------------- /service/nodecache.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import "sync" 4 | 5 | // NodeCacher caches sevices 6 | type NodeCacher interface { 7 | InsertAndCheck(n NodeMini) bool 8 | IsNewOrUpdated(n NodeMini) bool 9 | Delete(ID string) 10 | Get(ID string) (NodeMini, bool) 11 | Keys() map[string]struct{} 12 | } 13 | 14 | // NodeCache implements `NodeCacher` 15 | // Not threadsafe! 16 | type NodeCache struct { 17 | cache map[string]NodeMini 18 | mux sync.RWMutex 19 | } 20 | 21 | // NewNodeCache creates a new `NewNodeCache` 22 | func NewNodeCache() *NodeCache { 23 | return &NodeCache{ 24 | cache: map[string]NodeMini{}, 25 | } 26 | } 27 | 28 | // InsertAndCheck inserts `NodeMini` into cache 29 | // If the node is new or updated `InsertAndCheck` returns true. 30 | func (c *NodeCache) InsertAndCheck(n NodeMini) bool { 31 | c.mux.Lock() 32 | defer c.mux.Unlock() 33 | 34 | cachedNode, ok := c.cache[n.ID] 35 | c.cache[n.ID] = n 36 | 37 | return !ok || !n.Equal(cachedNode) 38 | } 39 | 40 | // Delete removes node from cache 41 | func (c *NodeCache) Delete(ID string) { 42 | c.mux.Lock() 43 | defer c.mux.Unlock() 44 | 45 | delete(c.cache, ID) 46 | } 47 | 48 | // Get gets node from cache 49 | func (c *NodeCache) Get(ID string) (NodeMini, bool) { 50 | c.mux.RLock() 51 | defer c.mux.RUnlock() 52 | 53 | v, ok := c.cache[ID] 54 | return v, ok 55 | } 56 | 57 | // IsNewOrUpdated returns true if node is new or updated 58 | func (c *NodeCache) IsNewOrUpdated(n NodeMini) bool { 59 | c.mux.RLock() 60 | defer c.mux.RUnlock() 61 | 62 | cachedNode, ok := c.cache[n.ID] 63 | return !ok || !n.Equal(cachedNode) 64 | } 65 | 66 | // Keys return the keys of the cache 67 | func (c *NodeCache) Keys() map[string]struct{} { 68 | c.mux.RLock() 69 | defer c.mux.RUnlock() 70 | output := map[string]struct{}{} 71 | for key := range c.cache { 72 | output[key] = struct{}{} 73 | } 74 | return output 75 | } 76 | -------------------------------------------------------------------------------- /service/nodecache_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/docker/docker/api/types/swarm" 7 | "github.com/stretchr/testify/suite" 8 | ) 9 | 10 | type NodeCacheTestSuite struct { 11 | suite.Suite 12 | Cache *NodeCache 13 | NMini NodeMini 14 | } 15 | 16 | func TestNodeCacheUnitTestSuite(t *testing.T) { 17 | suite.Run(t, new(NodeCacheTestSuite)) 18 | } 19 | 20 | func (s *NodeCacheTestSuite) SetupTest() { 21 | s.Cache = NewNodeCache() 22 | s.NMini = getNewNodeMini() 23 | } 24 | 25 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_NewNode_ReturnsTrue() { 26 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 27 | s.True(isUpdated) 28 | s.AssertInCache(s.NMini) 29 | } 30 | 31 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_SameLabel_ReturnsFalse() { 32 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 33 | s.True(isUpdated) 34 | s.AssertInCache(s.NMini) 35 | 36 | isUpdated = s.Cache.InsertAndCheck(s.NMini) 37 | s.False(isUpdated) 38 | s.AssertInCache(s.NMini) 39 | } 40 | 41 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_NewNodeLabel_ReturnsTrue() { 42 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 43 | s.True(isUpdated) 44 | s.AssertInCache(s.NMini) 45 | 46 | newNMini := getNewNodeMini() 47 | newNMini.NodeLabels["com.df.wow2"] = "yup2" 48 | 49 | isUpdated = s.Cache.InsertAndCheck(newNMini) 50 | s.True(isUpdated) 51 | s.AssertInCache(newNMini) 52 | } 53 | 54 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_UpdateNodeLabel_ReturnsTrue() { 55 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 56 | s.True(isUpdated) 57 | s.AssertInCache(s.NMini) 58 | 59 | newNMini := getNewNodeMini() 60 | newNMini.NodeLabels["com.df.wow"] = "yup2" 61 | 62 | isUpdated = s.Cache.InsertAndCheck(newNMini) 63 | s.True(isUpdated) 64 | s.AssertInCache(newNMini) 65 | } 66 | 67 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_NewEngineLabel_ReturnsTrue() { 68 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 69 | s.True(isUpdated) 70 | s.AssertInCache(s.NMini) 71 | 72 | newNMini := getNewNodeMini() 73 | newNMini.NodeLabels["com.df.mars"] = "far" 74 | 75 | isUpdated = s.Cache.InsertAndCheck(newNMini) 76 | s.True(isUpdated) 77 | s.AssertInCache(newNMini) 78 | } 79 | 80 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_UpdateEngineLabel_ReturnsTrue() { 81 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 82 | s.True(isUpdated) 83 | s.AssertInCache(s.NMini) 84 | 85 | newNMini := getNewNodeMini() 86 | newNMini.NodeLabels["com.df.world"] = "flat" 87 | 88 | isUpdated = s.Cache.InsertAndCheck(newNMini) 89 | s.True(isUpdated) 90 | s.AssertInCache(newNMini) 91 | } 92 | 93 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_ChangeRole_ReturnsTrue() { 94 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 95 | s.True(isUpdated) 96 | s.AssertInCache(s.NMini) 97 | 98 | newNMini := getNewNodeMini() 99 | newNMini.Role = swarm.NodeRoleManager 100 | 101 | isUpdated = s.Cache.InsertAndCheck(newNMini) 102 | s.True(isUpdated) 103 | s.AssertInCache(newNMini) 104 | } 105 | 106 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_ChangeState_ReturnsTrue() { 107 | 108 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 109 | s.True(isUpdated) 110 | s.AssertInCache(s.NMini) 111 | 112 | newNMini := getNewNodeMini() 113 | newNMini.State = swarm.NodeStateDown 114 | 115 | isUpdated = s.Cache.InsertAndCheck(newNMini) 116 | s.True(isUpdated) 117 | s.AssertInCache(newNMini) 118 | } 119 | 120 | func (s *NodeCacheTestSuite) Test_InsertAndCheck_ChangeAvailability_ReturnsTrue() { 121 | 122 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 123 | s.True(isUpdated) 124 | s.AssertInCache(s.NMini) 125 | 126 | newNMini := getNewNodeMini() 127 | newNMini.Availability = swarm.NodeAvailabilityPause 128 | 129 | isUpdated = s.Cache.InsertAndCheck(newNMini) 130 | s.True(isUpdated) 131 | s.AssertInCache(newNMini) 132 | } 133 | 134 | func (s *NodeCacheTestSuite) Test_GetAndRemove_InCache_ReturnsNodeMini_RemovesFromCache() { 135 | 136 | isUpdated := s.Cache.InsertAndCheck(s.NMini) 137 | s.True(isUpdated) 138 | s.AssertInCache(s.NMini) 139 | 140 | removedNMini, ok := s.Cache.Get(s.NMini.ID) 141 | s.True(ok) 142 | s.Cache.Delete(s.NMini.ID) 143 | s.AssertNotInCache(s.NMini) 144 | s.Equal(s.NMini, removedNMini) 145 | } 146 | 147 | func (s *NodeCacheTestSuite) Test_GetAndRemove_NotInCache_ReturnsFalse() { 148 | _, ok := s.Cache.Get(s.NMini.ID) 149 | s.False(ok) 150 | } 151 | 152 | func (s *NodeCacheTestSuite) Test_IsNewOrUpdated_NodeInCache() { 153 | s.Cache.InsertAndCheck(s.NMini) 154 | s.AssertInCache(s.NMini) 155 | 156 | newOrUpdated := s.Cache.IsNewOrUpdated(s.NMini) 157 | s.False(newOrUpdated) 158 | } 159 | 160 | func (s *NodeCacheTestSuite) Test_IsNewOrUpdated_NodeNotInCache() { 161 | newOrUpdated := s.Cache.IsNewOrUpdated(s.NMini) 162 | s.True(newOrUpdated) 163 | } 164 | 165 | func (s *NodeCacheTestSuite) Test_IsNewOrUpdated_NodeIsDifferentCache() { 166 | 167 | s.Cache.InsertAndCheck(s.NMini) 168 | s.AssertInCache(s.NMini) 169 | 170 | anotherNMini := getNewNodeMini() 171 | anotherNMini.State = swarm.NodeStateDown 172 | 173 | newOrUpdated := s.Cache.IsNewOrUpdated(anotherNMini) 174 | s.True(newOrUpdated) 175 | 176 | } 177 | 178 | func (s *NodeCacheTestSuite) Test_Keys() { 179 | s.Cache.InsertAndCheck(s.NMini) 180 | s.AssertInCache(s.NMini) 181 | 182 | keys := s.Cache.Keys() 183 | 184 | s.Require().Len(keys, 1) 185 | s.Contains(keys, s.NMini.ID) 186 | 187 | } 188 | 189 | func (s *NodeCacheTestSuite) AssertInCache(nm NodeMini) { 190 | ss, ok := s.Cache.Get(nm.ID) 191 | s.True(ok) 192 | s.Equal(nm, ss) 193 | } 194 | 195 | func (s *NodeCacheTestSuite) AssertNotInCache(nm NodeMini) { 196 | _, ok := s.Cache.Get(nm.ID) 197 | s.False(ok) 198 | } 199 | -------------------------------------------------------------------------------- /service/nodepoller.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "time" 7 | 8 | "github.com/docker/docker/api/types/swarm" 9 | ) 10 | 11 | // NodePolling provides an interface for polling node changes 12 | type NodePolling interface { 13 | Run(eventChan chan<- Event) 14 | } 15 | 16 | // NodePoller implements `NodePolling` 17 | type NodePoller struct { 18 | Client NodeInspector 19 | Cache NodeCacher 20 | PollingInterval int 21 | MinifyFunc func(swarm.Node) NodeMini 22 | Log *log.Logger 23 | } 24 | 25 | // NewNodePoller creates a new `NodePoller` 26 | func NewNodePoller( 27 | client NodeInspector, 28 | cache NodeCacher, 29 | pollingInterval int, 30 | minifyFunc func(swarm.Node) NodeMini, 31 | log *log.Logger, 32 | ) *NodePoller { 33 | return &NodePoller{ 34 | Client: client, 35 | Cache: cache, 36 | PollingInterval: pollingInterval, 37 | MinifyFunc: minifyFunc, 38 | Log: log, 39 | } 40 | } 41 | 42 | // Run starts poller and places events onto `eventChan` 43 | func (n NodePoller) Run(eventChan chan<- Event) { 44 | 45 | if n.PollingInterval <= 0 { 46 | return 47 | } 48 | 49 | ctx := context.Background() 50 | 51 | n.Log.Printf("Polling for Node Changes") 52 | time.Sleep(time.Duration(n.PollingInterval) * time.Second) 53 | 54 | for { 55 | nodes, err := n.Client.NodeList(ctx) 56 | if err != nil { 57 | n.Log.Printf("ERROR (NodePoller): %v", err) 58 | } else { 59 | nowTimeNano := time.Now().UTC().UnixNano() 60 | keys := n.Cache.Keys() 61 | for _, node := range nodes { 62 | delete(keys, node.ID) 63 | 64 | nodeMini := n.MinifyFunc(node) 65 | if n.Cache.IsNewOrUpdated(nodeMini) { 66 | eventChan <- Event{ 67 | Type: EventTypeCreate, 68 | ID: node.ID, 69 | TimeNano: nowTimeNano, 70 | ConsultCache: true, 71 | } 72 | } 73 | } 74 | 75 | // Remaining key sare removal events 76 | for k := range keys { 77 | eventChan <- Event{ 78 | Type: EventTypeRemove, 79 | ID: k, 80 | TimeNano: nowTimeNano, 81 | ConsultCache: true, 82 | } 83 | } 84 | } 85 | time.Sleep(time.Duration(n.PollingInterval) * time.Second) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /service/nodepoller_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "testing" 7 | "time" 8 | 9 | "github.com/docker/docker/api/types/swarm" 10 | "github.com/stretchr/testify/mock" 11 | "github.com/stretchr/testify/suite" 12 | ) 13 | 14 | type NodePollerTestSuite struct { 15 | suite.Suite 16 | NodeClientMock *nodeInspectorMock 17 | NodeCacheMock *nodeCacherMock 18 | 19 | NodePoller *NodePoller 20 | Logger *log.Logger 21 | LogBytes *bytes.Buffer 22 | } 23 | 24 | func TestNodePollerUnitTestSuite(t *testing.T) { 25 | suite.Run(t, new(NodePollerTestSuite)) 26 | } 27 | 28 | func (s *NodePollerTestSuite) SetupTest() { 29 | s.NodeClientMock = new(nodeInspectorMock) 30 | s.NodeCacheMock = new(nodeCacherMock) 31 | 32 | s.LogBytes = new(bytes.Buffer) 33 | s.Logger = log.New(s.LogBytes, "", 0) 34 | 35 | s.NodePoller = NewNodePoller( 36 | s.NodeClientMock, 37 | s.NodeCacheMock, 38 | 1, 39 | MinifyNode, 40 | s.Logger, 41 | ) 42 | } 43 | 44 | func (s *NodePollerTestSuite) Test_Run_NoCache() { 45 | 46 | expNodes := []swarm.Node{ 47 | {ID: "nodeID1"}, {ID: "nodeID2"}, 48 | } 49 | keys := map[string]struct{}{} 50 | miniNode1 := NodeMini{ID: "nodeID1", EngineLabels: map[string]string{}, NodeLabels: map[string]string{}} 51 | miniNode2 := NodeMini{ID: "nodeID2", EngineLabels: map[string]string{}, NodeLabels: map[string]string{}} 52 | 53 | eventChan := make(chan Event) 54 | 55 | s.NodeClientMock. 56 | On("NodeList", mock.AnythingOfType("*context.emptyCtx")).Return(expNodes, nil) 57 | 58 | s.NodeCacheMock. 59 | On("Keys").Return(keys). 60 | On("IsNewOrUpdated", miniNode1).Return(true). 61 | On("IsNewOrUpdated", miniNode2).Return(true) 62 | 63 | go s.NodePoller.Run(eventChan) 64 | 65 | timeout := time.NewTimer(time.Second * 5).C 66 | eventsNum := 0 67 | 68 | for { 69 | if eventsNum == 2 { 70 | break 71 | } 72 | select { 73 | case event := <-eventChan: 74 | s.Require().Equal(EventTypeCreate, event.Type) 75 | eventsNum++ 76 | case <-timeout: 77 | s.FailNow("Timeout") 78 | } 79 | } 80 | 81 | s.Equal(2, eventsNum) 82 | s.NodeClientMock.AssertExpectations(s.T()) 83 | s.NodeCacheMock.AssertExpectations(s.T()) 84 | } 85 | 86 | func (s *NodePollerTestSuite) Test_Run_HalfInCache() { 87 | expNodes := []swarm.Node{ 88 | {ID: "nodeID1"}, {ID: "nodeID2"}, 89 | } 90 | miniNode1 := NodeMini{ID: "nodeID1", EngineLabels: map[string]string{}, NodeLabels: map[string]string{}} 91 | miniNode2 := NodeMini{ID: "nodeID2", EngineLabels: map[string]string{}, NodeLabels: map[string]string{}} 92 | 93 | keys := map[string]struct{}{} 94 | keys["nodeID1"] = struct{}{} 95 | 96 | eventChan := make(chan Event) 97 | 98 | s.NodeClientMock. 99 | On("NodeList", mock.AnythingOfType("*context.emptyCtx")).Return(expNodes, nil) 100 | 101 | s.NodeCacheMock. 102 | On("Keys").Return(keys). 103 | On("IsNewOrUpdated", miniNode1).Return(false). 104 | On("IsNewOrUpdated", miniNode2).Return(true) 105 | 106 | go s.NodePoller.Run(eventChan) 107 | 108 | timeout := time.NewTimer(time.Second * 5).C 109 | var eventCreate *Event 110 | eventsNum := 0 111 | 112 | for { 113 | if eventsNum == 1 { 114 | break 115 | } 116 | select { 117 | case event := <-eventChan: 118 | if event.ID == "nodeID2" { 119 | eventCreate = &event 120 | } 121 | eventsNum++ 122 | case <-timeout: 123 | s.FailNow("Timeout") 124 | } 125 | } 126 | 127 | s.Equal(1, eventsNum) 128 | s.Require().NotNil(eventCreate) 129 | 130 | s.Equal("nodeID2", eventCreate.ID) 131 | s.NodeClientMock.AssertExpectations(s.T()) 132 | s.NodeCacheMock.AssertExpectations(s.T()) 133 | } 134 | 135 | func (s *NodePollerTestSuite) Test_Run_MoreInCache() { 136 | expNodes := []swarm.Node{ 137 | {ID: "nodeID1"}, {ID: "nodeID2"}, 138 | } 139 | miniNode1 := NodeMini{ID: "nodeID1", EngineLabels: map[string]string{}, NodeLabels: map[string]string{}} 140 | miniNode2 := NodeMini{ID: "nodeID2", EngineLabels: map[string]string{}, NodeLabels: map[string]string{}} 141 | 142 | keys := map[string]struct{}{} 143 | keys["nodeID1"] = struct{}{} 144 | keys["nodeID2"] = struct{}{} 145 | keys["nodeID3"] = struct{}{} 146 | 147 | eventChan := make(chan Event) 148 | 149 | s.NodeClientMock. 150 | On("NodeList", mock.AnythingOfType("*context.emptyCtx")).Return(expNodes, nil) 151 | 152 | s.NodeCacheMock. 153 | On("Keys").Return(keys). 154 | On("IsNewOrUpdated", miniNode1).Return(true). 155 | On("IsNewOrUpdated", miniNode2).Return(false) 156 | 157 | go s.NodePoller.Run(eventChan) 158 | 159 | timeout := time.NewTimer(time.Second * 5).C 160 | var eventCreate *Event 161 | var eventRemove *Event 162 | eventsNum := 0 163 | 164 | for { 165 | if eventsNum == 2 { 166 | break 167 | } 168 | select { 169 | case event := <-eventChan: 170 | if event.ID == "nodeID1" { 171 | eventCreate = &event 172 | } else if event.ID == "nodeID3" { 173 | eventRemove = &event 174 | } 175 | eventsNum++ 176 | case <-timeout: 177 | s.FailNow("Timeout") 178 | } 179 | } 180 | 181 | s.Equal(2, eventsNum) 182 | s.Require().NotNil(eventCreate) 183 | s.Require().NotNil(eventRemove) 184 | 185 | s.Equal("nodeID1", eventCreate.ID) 186 | s.Equal("nodeID3", eventRemove.ID) 187 | s.NodeClientMock.AssertExpectations(s.T()) 188 | s.NodeCacheMock.AssertExpectations(s.T()) 189 | 190 | } 191 | -------------------------------------------------------------------------------- /service/notifier.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | "log" 8 | "net/http" 9 | "net/url" 10 | "strings" 11 | "time" 12 | 13 | "github.com/docker-flow/docker-flow-swarm-listener/metrics" 14 | ) 15 | 16 | // NotifyType is the type of notification to send 17 | type NotifyType string 18 | 19 | // NotificationSender sends notifications to listeners 20 | type NotificationSender interface { 21 | Create(ctx context.Context, params string) error 22 | Remove(ctx context.Context, params string) error 23 | GetCreateAddr() string 24 | GetRemoveAddr() string 25 | } 26 | 27 | // Notifier implements `NotificationSender` 28 | type Notifier struct { 29 | createAddr string 30 | createHTTPMethod string 31 | removeAddr string 32 | removeHTTPMethod string 33 | notifyType string 34 | retries int 35 | interval int 36 | createErrorMetric string 37 | removeErrorMetric string 38 | log *log.Logger 39 | } 40 | 41 | // NewNotifier returns a `Notifier` 42 | func NewNotifier( 43 | createAddr, removeAddr, createHTTPMethod, 44 | removeHTTPMethod, notifyType string, 45 | retries int, interval int, logger *log.Logger) *Notifier { 46 | return &Notifier{ 47 | createAddr: createAddr, 48 | createHTTPMethod: createHTTPMethod, 49 | removeAddr: removeAddr, 50 | removeHTTPMethod: removeHTTPMethod, 51 | notifyType: notifyType, 52 | retries: retries, 53 | interval: interval, 54 | createErrorMetric: fmt.Sprintf("notificationSendCreate%sRequest", notifyType), 55 | removeErrorMetric: fmt.Sprintf("notificationSendRemove%sRequest", notifyType), 56 | log: logger, 57 | } 58 | } 59 | 60 | // GetCreateAddr returns create addresses 61 | func (n Notifier) GetCreateAddr() string { 62 | return n.createAddr 63 | } 64 | 65 | // GetRemoveAddr returns create addresses 66 | func (n Notifier) GetRemoveAddr() string { 67 | return n.removeAddr 68 | } 69 | 70 | // Create sends create notifications to listeners 71 | func (n Notifier) Create(ctx context.Context, params string) error { 72 | if len(n.createAddr) == 0 { 73 | return nil 74 | } 75 | 76 | urlObj, err := url.Parse(n.createAddr) 77 | if err != nil { 78 | n.log.Printf("ERROR: %v", err) 79 | metrics.RecordError(n.createErrorMetric) 80 | return err 81 | } 82 | 83 | if len(params) > 0 { 84 | if currentParams := urlObj.Query().Encode(); len(currentParams) > 0 { 85 | newParams := fmt.Sprintf("%s&%s", currentParams, params) 86 | urlObj.RawQuery = newParams 87 | } else { 88 | urlObj.RawQuery = params 89 | } 90 | } 91 | 92 | fullURL := urlObj.String() 93 | req, err := http.NewRequest(n.createHTTPMethod, fullURL, nil) 94 | if err != nil { 95 | n.log.Printf("ERROR: Incorrect fullURL: %s", fullURL) 96 | metrics.RecordError(n.createErrorMetric) 97 | return err 98 | } 99 | req = req.WithContext(ctx) 100 | 101 | n.log.Printf("Sending %s created notification to %s", n.notifyType, fullURL) 102 | retryChan := make(chan int, 1) 103 | retryChan <- 1 104 | for { 105 | select { 106 | case i := <-retryChan: 107 | resp, err := http.DefaultClient.Do(req) 108 | if err != nil { 109 | if strings.Contains(err.Error(), "context") { 110 | n.log.Printf("Canceling %s create notification to %s", n.notifyType, fullURL) 111 | return nil 112 | } 113 | if i <= n.retries && n.interval > 0 { 114 | n.log.Printf("Retrying %s created notification to %s (%d try)", n.notifyType, fullURL, i) 115 | time.Sleep(time.Second * time.Duration(n.interval)) 116 | retryChan <- i + 1 117 | continue 118 | } else { 119 | n.log.Printf("ERROR: %v", err) 120 | metrics.RecordError(n.createErrorMetric) 121 | return err 122 | } 123 | } 124 | defer resp.Body.Close() 125 | 126 | if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { 127 | return nil 128 | } else if i <= n.retries && n.interval > 0 { 129 | n.log.Printf("Retrying %s created notification to %s (%d try)", n.notifyType, fullURL, i) 130 | time.Sleep(time.Second * time.Duration(n.interval)) 131 | retryChan <- i + 1 132 | continue 133 | } else if resp.StatusCode == http.StatusConflict || resp.StatusCode != http.StatusOK { 134 | body, err := ioutil.ReadAll(resp.Body) 135 | if err != nil { 136 | err = fmt.Errorf("Failed at retrying request to %s returned status code %d", fullURL, resp.StatusCode) 137 | n.log.Printf("ERROR: %v", err) 138 | metrics.RecordError(n.createErrorMetric) 139 | return err 140 | } 141 | err = fmt.Errorf("Failed at retrying request to %s returned status code %d\n%s", fullURL, resp.StatusCode, string(body[:])) 142 | n.log.Printf("ERROR: %v", err) 143 | metrics.RecordError(n.createErrorMetric) 144 | return err 145 | } 146 | err = fmt.Errorf("Failed at retrying request to %s returned status code %d", fullURL, resp.StatusCode) 147 | n.log.Printf("ERROR: %v", err) 148 | metrics.RecordError(n.createErrorMetric) 149 | return err 150 | case <-ctx.Done(): 151 | n.log.Printf("Canceling %s create notification to %s", n.notifyType, fullURL) 152 | return nil 153 | } 154 | 155 | } 156 | } 157 | 158 | // Remove sends remove notifications to listeners 159 | func (n Notifier) Remove(ctx context.Context, params string) error { 160 | if len(n.removeAddr) == 0 { 161 | return nil 162 | } 163 | 164 | urlObj, err := url.Parse(n.removeAddr) 165 | if err != nil { 166 | n.log.Printf("ERROR: %v", err) 167 | metrics.RecordError(n.removeErrorMetric) 168 | return err 169 | } 170 | 171 | if len(params) > 0 { 172 | if currentParams := urlObj.Query().Encode(); len(currentParams) > 0 { 173 | newParams := fmt.Sprintf("%s&%s", currentParams, params) 174 | urlObj.RawQuery = newParams 175 | } else { 176 | urlObj.RawQuery = params 177 | } 178 | } 179 | 180 | fullURL := urlObj.String() 181 | req, err := http.NewRequest(n.removeHTTPMethod, fullURL, nil) 182 | if err != nil { 183 | n.log.Printf("ERROR: Incorrect fullURL: %s", fullURL) 184 | metrics.RecordError(n.removeErrorMetric) 185 | return err 186 | } 187 | req = req.WithContext(ctx) 188 | 189 | n.log.Printf("Sending %s removed notification to %s", n.notifyType, fullURL) 190 | retryChan := make(chan int, 1) 191 | retryChan <- 1 192 | for { 193 | select { 194 | case i := <-retryChan: 195 | resp, err := http.DefaultClient.Do(req) 196 | if err != nil { 197 | if strings.Contains(err.Error(), "context") { 198 | n.log.Printf("Canceling %s remove notification to %s", n.notifyType, fullURL) 199 | return nil 200 | } 201 | if i <= n.retries && n.interval > 0 { 202 | n.log.Printf("Retrying %s removed notification to %s (%d try)", n.notifyType, fullURL, i) 203 | time.Sleep(time.Second * time.Duration(n.interval)) 204 | retryChan <- i + 1 205 | continue 206 | } else { 207 | n.log.Printf("ERROR: %v", err) 208 | metrics.RecordError(n.removeErrorMetric) 209 | return err 210 | } 211 | } 212 | defer resp.Body.Close() 213 | 214 | if resp.StatusCode == http.StatusOK { 215 | return nil 216 | } else if i <= n.retries && n.interval > 0 { 217 | n.log.Printf("Retrying %s removed notification to %s (%d try)", n.notifyType, fullURL, i) 218 | time.Sleep(time.Second * time.Duration(n.interval)) 219 | retryChan <- i + 1 220 | continue 221 | } else { 222 | body, err := ioutil.ReadAll(resp.Body) 223 | if err != nil { 224 | err = fmt.Errorf("Failed at retrying request to %s returned status code %d", fullURL, resp.StatusCode) 225 | n.log.Printf("ERROR: %v", err) 226 | metrics.RecordError(n.removeErrorMetric) 227 | return err 228 | } 229 | err = fmt.Errorf("Failed at retrying request to %s returned status code %d\n%s", fullURL, resp.StatusCode, string(body[:])) 230 | n.log.Printf("ERROR: %v", err) 231 | metrics.RecordError(n.removeErrorMetric) 232 | return err 233 | } 234 | case <-ctx.Done(): 235 | n.log.Printf("Canceling %s remove notification to %s", n.notifyType, fullURL) 236 | return nil 237 | } 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /service/notifydistributor.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "net/http" 8 | "net/url" 9 | "os" 10 | "strings" 11 | "sync" 12 | ) 13 | 14 | // Notification is a node notification 15 | type Notification struct { 16 | EventType EventType 17 | ID string 18 | Parameters string 19 | TimeNano int64 20 | Context context.Context 21 | ErrorChan chan error 22 | } 23 | 24 | type internalNotification struct { 25 | Notification 26 | Ctx context.Context 27 | } 28 | 29 | // NotifyEndpoint holds Notifiers and channels to watch 30 | type NotifyEndpoint struct { 31 | ServiceNotifier NotificationSender 32 | NodeNotifier NotificationSender 33 | } 34 | 35 | // NotifyDistributing takes a stream of `Notification` and 36 | // NodeNotifiction and distributes it listeners 37 | type NotifyDistributing interface { 38 | Run(serviceChan <-chan Notification, nodeChan <-chan Notification) 39 | HasServiceListeners() bool 40 | HasNodeListeners() bool 41 | } 42 | 43 | // NotifyDistributor distributes service and node notifications to `NotifyEndpoints` 44 | // `NotifyEndpoints` are keyed by hostname to send notifications to 45 | type NotifyDistributor struct { 46 | NotifyEndpoints map[string]NotifyEndpoint 47 | ServiceCancelManager CancelManaging 48 | NodeCancelManager CancelManaging 49 | log *log.Logger 50 | interval int 51 | } 52 | 53 | func newNotifyDistributor(notifyEndpoints map[string]NotifyEndpoint, 54 | serviceCancelManager CancelManaging, nodeCancelManager CancelManaging, 55 | interval int, logger *log.Logger) *NotifyDistributor { 56 | return &NotifyDistributor{ 57 | NotifyEndpoints: notifyEndpoints, 58 | ServiceCancelManager: serviceCancelManager, 59 | NodeCancelManager: nodeCancelManager, 60 | interval: interval, 61 | log: logger, 62 | } 63 | } 64 | 65 | func newNotifyDistributorfromStrings( 66 | serviceCreateAddrs, serviceRemoveAddrs, nodeCreateAddrs, nodeRemoveAddrs, 67 | serviceCreateMethods, serviceRemoveMethods string, 68 | retries, interval int, logger *log.Logger) *NotifyDistributor { 69 | tempNotifyEP := map[string]map[string]string{} 70 | 71 | insertAddrStringIntoMap( 72 | tempNotifyEP, "createService", serviceCreateAddrs, 73 | "createServiceMethod", serviceCreateMethods) 74 | insertAddrStringIntoMap( 75 | tempNotifyEP, "removeService", serviceRemoveAddrs, 76 | "removeServiceMethod", serviceRemoveMethods) 77 | insertAddrStringIntoMap( 78 | tempNotifyEP, "createNode", nodeCreateAddrs, 79 | "createNodeMethod", http.MethodGet) 80 | insertAddrStringIntoMap( 81 | tempNotifyEP, "removeNode", nodeRemoveAddrs, 82 | "removeNodeMethod", http.MethodGet) 83 | 84 | notifyEndpoints := map[string]NotifyEndpoint{} 85 | 86 | for hostname, addrMap := range tempNotifyEP { 87 | ep := NotifyEndpoint{} 88 | if len(addrMap["createService"]) > 0 || len(addrMap["removeService"]) > 0 { 89 | ep.ServiceNotifier = NewNotifier( 90 | addrMap["createService"], 91 | addrMap["removeService"], 92 | addrMap["createServiceMethod"], 93 | addrMap["removeServiceMethod"], 94 | "service", 95 | retries, 96 | interval, 97 | logger, 98 | ) 99 | } 100 | if len(addrMap["createNode"]) > 0 || len(addrMap["removeNode"]) > 0 { 101 | ep.NodeNotifier = NewNotifier( 102 | addrMap["createNode"], 103 | addrMap["removeNode"], 104 | addrMap["createNodeMethod"], 105 | addrMap["removeNodeMethod"], 106 | "node", 107 | retries, 108 | interval, 109 | logger, 110 | ) 111 | } 112 | if ep.ServiceNotifier != nil || ep.NodeNotifier != nil { 113 | notifyEndpoints[hostname] = ep 114 | } 115 | } 116 | 117 | return newNotifyDistributor( 118 | notifyEndpoints, 119 | NewCancelManager(), 120 | NewCancelManager(), 121 | interval, 122 | logger) 123 | } 124 | 125 | func insertAddrStringIntoMap( 126 | tempEP map[string]map[string]string, key, addrs, methodsKey, methods string) { 127 | 128 | addrsList := strings.Split(addrs, ",") 129 | methodsList := strings.Split(methods, ",") 130 | maxMethodsIdx := len(methodsList) - 1 131 | 132 | for addrsIdx, v := range addrsList { 133 | urlObj, err := url.Parse(v) 134 | if err != nil { 135 | continue 136 | } 137 | host := urlObj.Host 138 | if len(host) == 0 { 139 | continue 140 | } 141 | if tempEP[host] == nil { 142 | tempEP[host] = map[string]string{} 143 | } 144 | tempEP[host][key] = v 145 | 146 | if addrsIdx <= maxMethodsIdx { 147 | tempEP[host][methodsKey] = methodsList[addrsIdx] 148 | } else { 149 | tempEP[host][methodsKey] = methodsList[maxMethodsIdx] 150 | } 151 | } 152 | } 153 | 154 | // NewNotifyDistributorFromEnv creates `NotifyDistributor` from environment variables 155 | func NewNotifyDistributorFromEnv(retries, interval int, 156 | extraCreateServiceAddr, extraRemoveServiceAddr, 157 | extraCreateNodeAddr, extraRemoveNodeAddr string, 158 | logger *log.Logger) *NotifyDistributor { 159 | var createServiceAddr, removeServiceAddr string 160 | if len(os.Getenv("DF_NOTIF_CREATE_SERVICE_URL")) > 0 { 161 | createServiceAddr = os.Getenv("DF_NOTIF_CREATE_SERVICE_URL") 162 | } else if len(os.Getenv("DF_NOTIFY_CREATE_SERVICE_URL")) > 0 { 163 | createServiceAddr = os.Getenv("DF_NOTIFY_CREATE_SERVICE_URL") 164 | } else { 165 | createServiceAddr = os.Getenv("DF_NOTIFICATION_URL") 166 | } 167 | if len(os.Getenv("DF_NOTIF_REMOVE_SERVICE_URL")) > 0 { 168 | removeServiceAddr = os.Getenv("DF_NOTIF_REMOVE_SERVICE_URL") 169 | } else if len(os.Getenv("DF_NOTIFY_REMOVE_SERVICE_URL")) > 0 { 170 | removeServiceAddr = os.Getenv("DF_NOTIFY_REMOVE_SERVICE_URL") 171 | } else { 172 | removeServiceAddr = os.Getenv("DF_NOTIFICATION_URL") 173 | } 174 | createNodeAddr := os.Getenv("DF_NOTIFY_CREATE_NODE_URL") 175 | removeNodeAddr := os.Getenv("DF_NOTIFY_REMOVE_NODE_URL") 176 | 177 | createServiceMethods := strings.ToUpper(os.Getenv("DF_NOTIFY_CREATE_SERVICE_METHOD")) 178 | removeServiceMethods := strings.ToUpper(os.Getenv("DF_NOTIFY_REMOVE_SERVICE_METHOD")) 179 | 180 | if len(extraCreateServiceAddr) > 0 { 181 | createServiceAddr = fmt.Sprintf("%s,%s", createServiceAddr, extraCreateServiceAddr) 182 | } 183 | if len(extraRemoveServiceAddr) > 0 { 184 | removeServiceAddr = fmt.Sprintf("%s,%s", removeServiceAddr, extraRemoveServiceAddr) 185 | } 186 | if len(extraCreateNodeAddr) > 0 { 187 | createNodeAddr = fmt.Sprintf("%s,%s", createNodeAddr, extraCreateNodeAddr) 188 | } 189 | if len(extraRemoveNodeAddr) > 0 { 190 | removeNodeAddr = fmt.Sprintf("%s,%s", removeNodeAddr, extraRemoveNodeAddr) 191 | } 192 | 193 | if len(createServiceMethods) == 0 { 194 | createServiceMethods = http.MethodGet 195 | } 196 | if len(removeServiceMethods) == 0 { 197 | removeServiceMethods = http.MethodGet 198 | } 199 | 200 | return newNotifyDistributorfromStrings( 201 | createServiceAddr, removeServiceAddr, createNodeAddr, removeNodeAddr, 202 | createServiceMethods, removeServiceMethods, retries, interval, logger) 203 | 204 | } 205 | 206 | // Run starts the distributor 207 | func (d NotifyDistributor) Run(serviceChan <-chan Notification, nodeChan <-chan Notification) { 208 | 209 | if serviceChan != nil { 210 | go func() { 211 | for n := range serviceChan { 212 | go d.distributeServiceNotification(n) 213 | } 214 | }() 215 | } 216 | if nodeChan != nil { 217 | go func() { 218 | for n := range nodeChan { 219 | go d.distributeNodeNotification(n) 220 | } 221 | }() 222 | } 223 | } 224 | 225 | func (d NotifyDistributor) distributeServiceNotification(n Notification) { 226 | // Use time as request id 227 | ctx := d.ServiceCancelManager.Add(context.Background(), n.ID, n.TimeNano) 228 | defer d.ServiceCancelManager.Delete(n.ID, n.TimeNano) 229 | 230 | var wg sync.WaitGroup 231 | for _, endpoint := range d.NotifyEndpoints { 232 | wg.Add(1) 233 | go func(endpoint NotifyEndpoint) { 234 | defer wg.Done() 235 | d.processServiceNotification(ctx, n, endpoint) 236 | }(endpoint) 237 | } 238 | wg.Wait() 239 | 240 | if n.ErrorChan != nil { 241 | n.ErrorChan <- nil 242 | } 243 | } 244 | 245 | func (d NotifyDistributor) distributeNodeNotification(n Notification) { 246 | // Use time as request id 247 | ctx := d.NodeCancelManager.Add(context.Background(), n.ID, n.TimeNano) 248 | defer d.NodeCancelManager.Delete(n.ID, n.TimeNano) 249 | 250 | var wg sync.WaitGroup 251 | for _, endpoint := range d.NotifyEndpoints { 252 | wg.Add(1) 253 | go func(endpoint NotifyEndpoint) { 254 | defer wg.Done() 255 | d.processNodeNotification(ctx, n, endpoint) 256 | }(endpoint) 257 | } 258 | wg.Wait() 259 | if n.ErrorChan != nil { 260 | n.ErrorChan <- nil 261 | } 262 | } 263 | 264 | func (d NotifyDistributor) processServiceNotification( 265 | ctx context.Context, n Notification, endpoint NotifyEndpoint) { 266 | 267 | if endpoint.ServiceNotifier == nil { 268 | return 269 | } 270 | 271 | if n.EventType == EventTypeCreate { 272 | err := endpoint.ServiceNotifier.Create(ctx, n.Parameters) 273 | if err != nil && !strings.Contains(err.Error(), "context canceled") { 274 | d.log.Printf("ERROR: Unable to send ServiceCreateNotify to %s, params: %s", endpoint.ServiceNotifier.GetCreateAddr(), n.Parameters) 275 | } 276 | } else if n.EventType == EventTypeRemove { 277 | err := endpoint.ServiceNotifier.Remove(ctx, n.Parameters) 278 | if err != nil && !strings.Contains(err.Error(), "context canceled") { 279 | d.log.Printf("ERROR: Unable to send ServiceRemoveNotify to %s, params: %s", endpoint.ServiceNotifier.GetRemoveAddr(), n.Parameters) 280 | } 281 | } 282 | } 283 | 284 | func (d NotifyDistributor) processNodeNotification( 285 | ctx context.Context, n Notification, endpoint NotifyEndpoint) { 286 | 287 | if endpoint.NodeNotifier == nil { 288 | return 289 | } 290 | 291 | if n.EventType == EventTypeCreate { 292 | err := endpoint.NodeNotifier.Create(ctx, n.Parameters) 293 | if err != nil { 294 | d.log.Printf("ERROR: Unable to send NodeCreateNotify to %s, params: %s", 295 | endpoint.NodeNotifier.GetCreateAddr(), n.Parameters) 296 | } 297 | } else if n.EventType == EventTypeRemove { 298 | err := endpoint.NodeNotifier.Remove(ctx, n.Parameters) 299 | if err != nil { 300 | d.log.Printf("ERROR: Unable to send NodeRemoveNotify to %s, params: %s", 301 | endpoint.NodeNotifier.GetRemoveAddr(), n.Parameters) 302 | } 303 | } 304 | } 305 | 306 | // HasServiceListeners when there exists service listeners 307 | func (d NotifyDistributor) HasServiceListeners() bool { 308 | for _, endpoint := range d.NotifyEndpoints { 309 | if endpoint.ServiceNotifier != nil { 310 | return true 311 | } 312 | } 313 | return false 314 | } 315 | 316 | // HasNodeListeners when there exists node listeners 317 | func (d NotifyDistributor) HasNodeListeners() bool { 318 | for _, endpoint := range d.NotifyEndpoints { 319 | if endpoint.NodeNotifier != nil { 320 | return true 321 | } 322 | } 323 | return false 324 | } 325 | -------------------------------------------------------------------------------- /service/parameters.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/url" 7 | "strings" 8 | ) 9 | 10 | // GetNodeMiniCreateParameters converts `NodeMini` into parameters 11 | func GetNodeMiniCreateParameters(node NodeMini) map[string]string { 12 | params := map[string]string{} 13 | 14 | for k, v := range node.EngineLabels { 15 | if !strings.HasPrefix(k, "com.df.") { 16 | continue 17 | } 18 | key := strings.TrimPrefix(k, "com.df.") 19 | if len(key) > 0 { 20 | params[key] = v 21 | } 22 | } 23 | 24 | for k, v := range node.NodeLabels { 25 | if !strings.HasPrefix(k, "com.df.") { 26 | continue 27 | } 28 | key := strings.TrimPrefix(k, "com.df.") 29 | if len(key) > 0 { 30 | params[key] = v 31 | } 32 | } 33 | 34 | params["id"] = node.ID 35 | params["hostname"] = node.Hostname 36 | params["address"] = node.Addr 37 | params["versionIndex"] = fmt.Sprintf("%d", node.VersionIndex) 38 | params["state"] = string(node.State) 39 | params["role"] = string(node.Role) 40 | params["availability"] = string(node.Availability) 41 | return params 42 | } 43 | 44 | // GetSwarmServiceMiniCreateParameters converts `SwarmServiceMini` into parameters 45 | func GetSwarmServiceMiniCreateParameters(ssm SwarmServiceMini) map[string]string { 46 | params := map[string]string{} 47 | for k, v := range ssm.Labels { 48 | if !strings.HasPrefix(k, "com.df.") { 49 | continue 50 | } 51 | key := strings.TrimPrefix(k, "com.df.") 52 | if len(key) > 0 { 53 | params[key] = v 54 | } 55 | } 56 | serviceName := ssm.Name 57 | stackName := ssm.Labels["com.docker.stack.namespace"] 58 | if len(stackName) > 0 && 59 | strings.EqualFold(ssm.Labels["com.df.shortName"], "true") { 60 | serviceName = strings.TrimPrefix(serviceName, stackName+"_") 61 | } 62 | params["serviceName"] = serviceName 63 | 64 | if !ssm.Global { 65 | params["replicas"] = fmt.Sprintf("%d", ssm.Replicas) 66 | } 67 | 68 | if _, ok := params["distribute"]; !ok { 69 | params["distribute"] = "true" 70 | } 71 | 72 | if ssm.NodeInfo != nil { 73 | b, err := json.Marshal(ssm.NodeInfo) 74 | if err == nil { 75 | params["nodeInfo"] = string(b) 76 | } 77 | } 78 | 79 | return params 80 | } 81 | 82 | // GetNodeMiniRemoveParameters converts `NodeMini` into remove parameters 83 | func GetNodeMiniRemoveParameters(node NodeMini) map[string]string { 84 | params := map[string]string{} 85 | params["id"] = node.ID 86 | params["hostname"] = node.Hostname 87 | params["address"] = node.Addr 88 | 89 | return params 90 | } 91 | 92 | // GetSwarmServiceMiniRemoveParameters converts `SwarmServiceMini` into remove parameters 93 | func GetSwarmServiceMiniRemoveParameters(ssm SwarmServiceMini) map[string]string { 94 | params := map[string]string{} 95 | for k, v := range ssm.Labels { 96 | if !strings.HasPrefix(k, "com.df.") { 97 | continue 98 | } 99 | key := strings.TrimPrefix(k, "com.df.") 100 | if len(key) > 0 { 101 | params[key] = v 102 | } 103 | } 104 | serviceName := ssm.Name 105 | stackName := ssm.Labels["com.docker.stack.namespace"] 106 | if len(stackName) > 0 && 107 | strings.EqualFold(ssm.Labels["com.df.shortName"], "true") { 108 | serviceName = strings.TrimPrefix(serviceName, stackName+"_") 109 | } 110 | params["serviceName"] = serviceName 111 | 112 | if v, ok := ssm.Labels["com.df.distribute"]; ok { 113 | params["distribute"] = v 114 | } 115 | 116 | if _, ok := params["distribute"]; !ok { 117 | params["distribute"] = "true" 118 | } 119 | return params 120 | } 121 | 122 | // ConvertMapStringStringToURLValues converts params to `url.Values` 123 | func ConvertMapStringStringToURLValues(params map[string]string) url.Values { 124 | values := url.Values{} 125 | for k, v := range params { 126 | values.Set(k, v) 127 | } 128 | return values 129 | } 130 | -------------------------------------------------------------------------------- /service/parameters_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "net/url" 6 | "testing" 7 | 8 | "github.com/docker/docker/api/types/swarm" 9 | "github.com/stretchr/testify/suite" 10 | ) 11 | 12 | type ParametersTestSuite struct { 13 | suite.Suite 14 | } 15 | 16 | func TestParametersUnitTestSuite(t *testing.T) { 17 | suite.Run(t, new(ParametersTestSuite)) 18 | } 19 | 20 | func (s *ParametersTestSuite) Test_GetNodeMiniCreateParameters_DistributeUndefined_AddsDistrubte() { 21 | nm := getNewNodeMini() 22 | 23 | expected := map[string]string{ 24 | "id": "nodeID", 25 | "hostname": "nodehostname", 26 | "address": "nodeaddr", 27 | "versionIndex": "10", 28 | "state": "ready", 29 | "role": "worker", 30 | "availability": "active", 31 | "world": "round", 32 | "wow": "yup", 33 | } 34 | 35 | params := GetNodeMiniCreateParameters(nm) 36 | s.Equal(expected, params) 37 | } 38 | 39 | func (s *ParametersTestSuite) Test_GetNodeMiniCreateParameters_LabelsTakeSecondPriority() { 40 | nm := getNewNodeMini() 41 | nm.NodeLabels["com.df.state"] = "cow" 42 | nm.Role = swarm.NodeRoleManager 43 | nm.Availability = swarm.NodeAvailabilityDrain 44 | 45 | expected := map[string]string{ 46 | "id": "nodeID", 47 | "hostname": "nodehostname", 48 | "address": "nodeaddr", 49 | "versionIndex": "10", 50 | "state": "ready", 51 | "role": "manager", 52 | "availability": "drain", 53 | "world": "round", 54 | "wow": "yup", 55 | } 56 | params := GetNodeMiniCreateParameters(nm) 57 | s.Equal(expected, params) 58 | 59 | } 60 | 61 | func (s *ParametersTestSuite) Test_GetNodeMiniCreateParameters_NodeLabelsHigherPriority() { 62 | nm := getNewNodeMini() 63 | nm.NodeLabels["com.df.dogs"] = "chase" 64 | nm.EngineLabels["com.df.dogs"] = "cry" 65 | 66 | expected := map[string]string{ 67 | "id": "nodeID", 68 | "hostname": "nodehostname", 69 | "address": "nodeaddr", 70 | "versionIndex": "10", 71 | "state": "ready", 72 | "role": "worker", 73 | "availability": "active", 74 | "world": "round", 75 | "wow": "yup", 76 | "dogs": "chase", 77 | } 78 | 79 | params := GetNodeMiniCreateParameters(nm) 80 | s.Equal(expected, params) 81 | } 82 | 83 | func (s *ParametersTestSuite) Test_GetNodeMiniRemoveParameters() { 84 | nm := getNewNodeMini() 85 | 86 | expected := map[string]string{ 87 | "id": "nodeID", 88 | "hostname": "nodehostname", 89 | "address": "nodeaddr", 90 | } 91 | params := GetNodeMiniRemoveParameters(nm) 92 | s.Equal(expected, params) 93 | } 94 | 95 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_Global() { 96 | ssm := getNewSwarmServiceMini() 97 | ssm.Replicas = uint64(0) 98 | ssm.Global = true 99 | 100 | b, err := json.Marshal(ssm.NodeInfo) 101 | s.Require().NoError(err) 102 | 103 | expected := map[string]string{ 104 | "serviceName": "demo-go", 105 | "hello": "nyc", 106 | "distribute": "true", 107 | "nodeInfo": string(b), 108 | } 109 | 110 | params := GetSwarmServiceMiniCreateParameters(ssm) 111 | s.Equal(expected, params) 112 | } 113 | 114 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_LabelsTakeSecondPriority() { 115 | ssm := getNewSwarmServiceMini() 116 | ssm.Labels["com.df.serviceName"] = "thisisbad" 117 | 118 | b, err := json.Marshal(ssm.NodeInfo) 119 | s.Require().NoError(err) 120 | 121 | expected := map[string]string{ 122 | "serviceName": "demo-go", 123 | "hello": "nyc", 124 | "distribute": "true", 125 | "nodeInfo": string(b), 126 | "replicas": "3", 127 | } 128 | 129 | params := GetSwarmServiceMiniCreateParameters(ssm) 130 | s.Equal(expected, params) 131 | } 132 | 133 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_Replicas() { 134 | ssm := getNewSwarmServiceMini() 135 | 136 | b, err := json.Marshal(ssm.NodeInfo) 137 | s.Require().NoError(err) 138 | 139 | expected := map[string]string{ 140 | "serviceName": "demo-go", 141 | "hello": "nyc", 142 | "distribute": "true", 143 | "nodeInfo": string(b), 144 | "replicas": "3", 145 | } 146 | 147 | params := GetSwarmServiceMiniCreateParameters(ssm) 148 | s.Equal(expected, params) 149 | } 150 | 151 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_DistributeDefined() { 152 | ssm := getNewSwarmServiceMini() 153 | ssm.Labels["com.df.distribute"] = "false" 154 | 155 | b, err := json.Marshal(ssm.NodeInfo) 156 | s.Require().NoError(err) 157 | 158 | expected := map[string]string{ 159 | "serviceName": "demo-go", 160 | "hello": "nyc", 161 | "nodeInfo": string(b), 162 | "distribute": "false", 163 | "replicas": "3", 164 | } 165 | 166 | params := GetSwarmServiceMiniCreateParameters(ssm) 167 | s.Equal(expected, params) 168 | 169 | } 170 | 171 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_NoNodeInfo() { 172 | ssm := getNewSwarmServiceMini() 173 | ssm.NodeInfo = nil 174 | 175 | expected := map[string]string{ 176 | "serviceName": "demo-go", 177 | "hello": "nyc", 178 | "distribute": "true", 179 | "replicas": "3", 180 | } 181 | 182 | params := GetSwarmServiceMiniCreateParameters(ssm) 183 | s.Equal(expected, params) 184 | 185 | } 186 | 187 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_StackNamespace_ShortNameTrue_Combines_ServiceName() { 188 | ssm := getNewSwarmServiceMini() 189 | ssm.Name = "stack_demo-go" 190 | ssm.Labels["com.docker.stack.namespace"] = "stack" 191 | ssm.Labels["com.df.shortName"] = "true" 192 | 193 | b, err := json.Marshal(ssm.NodeInfo) 194 | s.Require().NoError(err) 195 | 196 | expected := map[string]string{ 197 | "serviceName": "demo-go", 198 | "hello": "nyc", 199 | "nodeInfo": string(b), 200 | "distribute": "true", 201 | "replicas": "3", 202 | "shortName": "true", 203 | } 204 | 205 | params := GetSwarmServiceMiniCreateParameters(ssm) 206 | s.Equal(expected, params) 207 | } 208 | 209 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_StackNamespace_ShortNameUndefined_DoesNotCombineServiceName() { 210 | ssm := getNewSwarmServiceMini() 211 | ssm.Name = "stack_demo-go" 212 | ssm.Labels["com.docker.stack.namespace"] = "stack" 213 | 214 | b, err := json.Marshal(ssm.NodeInfo) 215 | s.Require().NoError(err) 216 | 217 | expected := map[string]string{ 218 | "serviceName": "stack_demo-go", 219 | "hello": "nyc", 220 | "nodeInfo": string(b), 221 | "distribute": "true", 222 | "replicas": "3", 223 | } 224 | 225 | params := GetSwarmServiceMiniCreateParameters(ssm) 226 | s.Equal(expected, params) 227 | } 228 | 229 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniCreateParameters_StackNamespace_ShortNameFalse_DoesNotCombineServiceName() { 230 | ssm := getNewSwarmServiceMini() 231 | ssm.Name = "stack_demo-go" 232 | ssm.Labels["com.docker.stack.namespace"] = "stack" 233 | ssm.Labels["com.df.shortName"] = "false" 234 | 235 | b, err := json.Marshal(ssm.NodeInfo) 236 | s.Require().NoError(err) 237 | 238 | expected := map[string]string{ 239 | "serviceName": "stack_demo-go", 240 | "hello": "nyc", 241 | "nodeInfo": string(b), 242 | "distribute": "true", 243 | "replicas": "3", 244 | "shortName": "false", 245 | } 246 | 247 | params := GetSwarmServiceMiniCreateParameters(ssm) 248 | s.Equal(expected, params) 249 | } 250 | 251 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniRemoveParameters() { 252 | ssm := getNewSwarmServiceMini() 253 | expected := map[string]string{ 254 | "serviceName": "demo-go", 255 | "distribute": "true", 256 | "hello": "nyc", 257 | } 258 | params := GetSwarmServiceMiniRemoveParameters(ssm) 259 | s.Equal(expected, params) 260 | } 261 | 262 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniRemoveParameters_ShortNameUndefined_DoesNotCombineServiceName() { 263 | ssm := getNewSwarmServiceMini() 264 | ssm.Name = "stack_demo-go" 265 | ssm.Labels["com.docker.stack.namespace"] = "stack" 266 | 267 | expected := map[string]string{ 268 | "serviceName": "stack_demo-go", 269 | "distribute": "true", 270 | "hello": "nyc", 271 | } 272 | params := GetSwarmServiceMiniRemoveParameters(ssm) 273 | s.Equal(expected, params) 274 | } 275 | 276 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniRemoveParameters_ShortNameFalse_DoesNotCombineServiceName() { 277 | ssm := getNewSwarmServiceMini() 278 | ssm.Name = "stack_demo-go" 279 | ssm.Labels["com.docker.stack.namespace"] = "stack" 280 | ssm.Labels["com.df.shortName"] = "false" 281 | 282 | expected := map[string]string{ 283 | "serviceName": "stack_demo-go", 284 | "distribute": "true", 285 | "hello": "nyc", 286 | "shortName": "false", 287 | } 288 | params := GetSwarmServiceMiniRemoveParameters(ssm) 289 | s.Equal(expected, params) 290 | } 291 | 292 | func (s *ParametersTestSuite) Test_GetSwarmServiceMiniRemoveParameters_ShortNameTrue_CombineServiceName() { 293 | ssm := getNewSwarmServiceMini() 294 | ssm.Name = "stack_demo-go" 295 | ssm.Labels["com.docker.stack.namespace"] = "stack" 296 | ssm.Labels["com.df.shortName"] = "true" 297 | 298 | expected := map[string]string{ 299 | "serviceName": "demo-go", 300 | "distribute": "true", 301 | "hello": "nyc", 302 | "shortName": "true", 303 | } 304 | params := GetSwarmServiceMiniRemoveParameters(ssm) 305 | s.Equal(expected, params) 306 | } 307 | 308 | func (s *ParametersTestSuite) Test_ConvertMapStringStringToURLValues() { 309 | expected := url.Values{} 310 | expected.Add("id", "nodeID") 311 | expected.Add("hostname", "nodehostname") 312 | expected.Add("versionIndex", "10") 313 | expected.Add("state", "ready") 314 | expected.Add("role", "worker") 315 | expected.Add("availability", "active") 316 | 317 | // labels 318 | expected.Add("world", "round") 319 | expected.Add("wow", "yup") 320 | 321 | params := map[string]string{ 322 | "id": "nodeID", 323 | "hostname": "nodehostname", 324 | "versionIndex": "10", 325 | "state": "ready", 326 | "role": "worker", 327 | "availability": "active", 328 | "world": "round", 329 | "wow": "yup", 330 | } 331 | 332 | convertedURLValues := ConvertMapStringStringToURLValues(params) 333 | 334 | for k := range params { 335 | s.Equal(expected.Get(k), 336 | convertedURLValues.Get(k)) 337 | } 338 | 339 | } 340 | -------------------------------------------------------------------------------- /service/service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "strings" 8 | 9 | "github.com/docker/docker/api/types" 10 | "github.com/docker/docker/api/types/filters" 11 | "github.com/docker/docker/api/types/swarm" 12 | "github.com/docker/docker/client" 13 | ) 14 | 15 | // SwarmServiceInspector is able to inspect services 16 | type SwarmServiceInspector interface { 17 | SwarmServiceInspect(ctx context.Context, serviceID string) (*SwarmService, error) 18 | SwarmServiceList(ctx context.Context) ([]SwarmService, error) 19 | GetNodeInfo(ctx context.Context, ss SwarmService) (NodeIPSet, error) 20 | SwarmServiceRunning(ctx context.Context, serviceID string) (bool, error) 21 | } 22 | 23 | // SwarmServiceClient implements `SwarmServiceInspector` for docker 24 | type SwarmServiceClient struct { 25 | DockerClient *client.Client 26 | FilterLabel string 27 | FilterKey string 28 | ScrapeNetLabel string 29 | ServiceNamePrefix string 30 | IncludeTaskAddressInNodeInfo bool 31 | Log *log.Logger 32 | } 33 | 34 | // NewSwarmServiceClient creates a `SwarmServiceClient` 35 | func NewSwarmServiceClient( 36 | c *client.Client, filterLabel, scrapNetLabel string, serviceNamePrefix string, includeAddressInNodeInfo bool, 37 | logger *log.Logger) *SwarmServiceClient { 38 | key := strings.SplitN(filterLabel, "=", 2)[0] 39 | return &SwarmServiceClient{DockerClient: c, 40 | FilterLabel: filterLabel, 41 | FilterKey: key, 42 | ScrapeNetLabel: scrapNetLabel, 43 | ServiceNamePrefix: serviceNamePrefix, 44 | IncludeTaskAddressInNodeInfo: includeAddressInNodeInfo, 45 | Log: logger, 46 | } 47 | } 48 | 49 | // SwarmServiceInspect returns `SwarmService` from its ID 50 | // Returns nil when service doesnt not have the `FilterLabel` 51 | // When `includeNodeIPInfo` is true, return node info as well 52 | func (c SwarmServiceClient) SwarmServiceInspect(ctx context.Context, serviceID string) (*SwarmService, error) { 53 | service, _, err := c.DockerClient.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | // Check if service has label and true 59 | if value, ok := service.Spec.Labels[c.FilterKey]; !ok || !strings.EqualFold(value, "true") { 60 | return nil, nil 61 | } 62 | 63 | if len(c.ServiceNamePrefix) > 0 { 64 | service.Spec.Name = fmt.Sprintf("%s_%s", c.ServiceNamePrefix, service.Spec.Name) 65 | } 66 | 67 | ss := SwarmService{service, nil} 68 | return &ss, nil 69 | } 70 | 71 | // SwarmServiceList returns a list of services 72 | func (c SwarmServiceClient) SwarmServiceList(ctx context.Context) ([]SwarmService, error) { 73 | filter := filters.NewArgs() 74 | filter.Add("label", c.FilterLabel) 75 | services, err := c.DockerClient.ServiceList(ctx, types.ServiceListOptions{Filters: filter}) 76 | if err != nil { 77 | return nil, err 78 | } 79 | swarmServices := []SwarmService{} 80 | for _, s := range services { 81 | if len(c.ServiceNamePrefix) > 0 { 82 | s.Spec.Name = fmt.Sprintf("%s_%s", c.ServiceNamePrefix, s.Spec.Name) 83 | } 84 | ss := SwarmService{s, nil} 85 | swarmServices = append(swarmServices, ss) 86 | } 87 | return swarmServices, nil 88 | } 89 | 90 | // GetNodeInfo returns node info for swarm service 91 | func (c SwarmServiceClient) GetNodeInfo(ctx context.Context, ss SwarmService) (NodeIPSet, error) { 92 | 93 | // For services that do not have `ScrapeNetLabel` will 94 | // early exit, and avoid getting the task list 95 | _, ok := ss.Spec.Labels[c.ScrapeNetLabel] 96 | if !ok { 97 | return nil, nil 98 | } 99 | 100 | taskList, err := GetTaskList(ctx, c.DockerClient, ss.ID) 101 | if err != nil { 102 | return NodeIPSet{}, err 103 | } 104 | return c.getNodeInfo(ctx, taskList, ss.Service) 105 | } 106 | 107 | // SwarmServiceRunning returns true if service is running 108 | func (c SwarmServiceClient) SwarmServiceRunning(ctx context.Context, serviceID string) (bool, error) { 109 | return TasksAllRunning(ctx, c.DockerClient, serviceID) 110 | } 111 | 112 | func (c SwarmServiceClient) getNodeInfo(ctx context.Context, taskList []swarm.Task, ss swarm.Service) (NodeIPSet, error) { 113 | 114 | networkName, ok := ss.Spec.Labels[c.ScrapeNetLabel] 115 | if c.IncludeTaskAddressInNodeInfo && !ok { 116 | return nil, fmt.Errorf("Unable to get NodeInfo: %s label is not defined for service %s", c.ScrapeNetLabel, ss.Spec.Name) 117 | } 118 | 119 | nodeInfo := NodeIPSet{} 120 | nodeIPCache := map[string]string{} 121 | for _, task := range taskList { 122 | 123 | address := "" 124 | if c.IncludeTaskAddressInNodeInfo { 125 | 126 | if len(task.NetworksAttachments) == 0 || len(task.NetworksAttachments[0].Addresses) == 0 { 127 | continue 128 | } 129 | for _, networkAttach := range task.NetworksAttachments { 130 | if networkAttach.Network.Spec.Name == networkName && len(networkAttach.Addresses) > 0 { 131 | address = strings.Split(networkAttach.Addresses[0], "/")[0] 132 | } 133 | } 134 | 135 | if len(address) == 0 { 136 | continue 137 | } 138 | } 139 | 140 | if nodeName, ok := nodeIPCache[task.NodeID]; ok { 141 | nodeInfo.Add(nodeName, address, task.NodeID) 142 | } else { 143 | node, _, err := c.DockerClient.NodeInspectWithRaw(ctx, task.NodeID) 144 | if err != nil { 145 | continue 146 | } 147 | nodeInfo.Add(node.Description.Hostname, address, task.NodeID) 148 | nodeIPCache[task.NodeID] = node.Description.Hostname 149 | } 150 | } 151 | 152 | if nodeInfo.Cardinality() == 0 { 153 | return nil, nil 154 | } 155 | return nodeInfo, nil 156 | } 157 | -------------------------------------------------------------------------------- /service/service_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "log" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/suite" 11 | ) 12 | 13 | type SwarmServiceClientTestSuite struct { 14 | suite.Suite 15 | SClient *SwarmServiceClient 16 | Util1ID string 17 | Util2ID string 18 | Util3ID string 19 | Util4ID string 20 | Logger *log.Logger 21 | LogBytes *bytes.Buffer 22 | } 23 | 24 | func TestSwarmServiceClientTestSuite(t *testing.T) { 25 | suite.Run(t, new(SwarmServiceClientTestSuite)) 26 | } 27 | 28 | func (s *SwarmServiceClientTestSuite) SetupSuite() { 29 | createTestOverlayNetwork("util-network") 30 | createTestService("util-1", []string{"com.df.notify=true", "com.df.scrapeNetwork=util-network"}, false, "", "util-network") 31 | createTestService("util-2", []string{"com.df.notify=false"}, false, "", "util-network") 32 | createTestService("util-3", []string{"com.df.notify=true"}, true, "", "util-network") 33 | createTestService("util-4", []string{"com.df.notify=true", "com.df.scrapeNetwork=util-network"}, false, "2", "util-network") 34 | createTestService("util-5", []string{}, false, "", "util-network") 35 | 36 | time.Sleep(time.Second) 37 | ID1, err := getServiceID("util-1") 38 | s.Require().NoError(err) 39 | s.Util1ID = ID1 40 | 41 | ID2, err := getServiceID("util-2") 42 | s.Require().NoError(err) 43 | s.Util2ID = ID2 44 | 45 | ID3, err := getServiceID("util-3") 46 | s.Require().NoError(err) 47 | s.Util3ID = ID3 48 | 49 | ID4, err := getServiceID("util-4") 50 | s.Require().NoError(err) 51 | s.Util4ID = ID4 52 | } 53 | 54 | func (s *SwarmServiceClientTestSuite) SetupTest() { 55 | c, err := NewDockerClientFromEnv() 56 | s.Require().NoError(err) 57 | 58 | s.LogBytes = new(bytes.Buffer) 59 | s.Logger = log.New(s.LogBytes, "", 0) 60 | 61 | s.SClient = NewSwarmServiceClient(c, "com.df.notify=true", "com.df.scrapeNetwork", "", true, s.Logger) 62 | } 63 | 64 | func (s *SwarmServiceClientTestSuite) TearDownSuite() { 65 | removeTestService("util-1") 66 | removeTestService("util-2") 67 | removeTestService("util-3") 68 | removeTestService("util-4") 69 | removeTestNetwork("util-network") 70 | } 71 | 72 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceInspect_NodeInfo_UndefinedScrapeNetwork() { 73 | 74 | util3Service, err := s.SClient.SwarmServiceInspect(context.Background(), s.Util3ID) 75 | s.Require().NoError(err) 76 | s.Require().NotNil(util3Service) 77 | 78 | s.Equal(s.Util3ID, util3Service.ID) 79 | 80 | nodeInfo, err := s.SClient.GetNodeInfo(context.Background(), *util3Service) 81 | s.Require().NoError(err) 82 | util3Service.NodeInfo = nodeInfo 83 | s.Nil(util3Service.NodeInfo) 84 | } 85 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceInspect_With_Service_Name_Prefix() { 86 | s.SClient.ServiceNamePrefix = "dev1" 87 | 88 | util1Service, err := s.SClient.SwarmServiceInspect(context.Background(), s.Util1ID) 89 | s.Require().NoError(err) 90 | s.Require().NotNil(util1Service) 91 | 92 | s.Equal(s.Util1ID, util1Service.ID) 93 | s.Require().Nil(util1Service.NodeInfo) 94 | s.Equal("dev1_util-1", util1Service.Spec.Name) 95 | } 96 | 97 | func (s *SwarmServiceClientTestSuite) Test_ServiceList_Filtered() { 98 | 99 | util2Service, err := s.SClient.SwarmServiceInspect(context.Background(), s.Util2ID) 100 | s.Require().NoError(err) 101 | s.Nil(util2Service) 102 | 103 | } 104 | 105 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceInspect_NodeInfo_OneReplica() { 106 | util1Service, err := s.SClient.SwarmServiceInspect(context.Background(), s.Util1ID) 107 | s.Require().NoError(err) 108 | s.Require().NotNil(util1Service) 109 | 110 | s.Equal(s.Util1ID, util1Service.ID) 111 | 112 | nodeInfo, err := s.SClient.GetNodeInfo(context.Background(), *util1Service) 113 | s.Require().NoError(err) 114 | util1Service.NodeInfo = nodeInfo 115 | 116 | s.Require().NotNil(util1Service.NodeInfo) 117 | s.Require().Len(nodeInfo, 1) 118 | } 119 | 120 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceInspect_NodeInfo_TwoReplica() { 121 | 122 | util4Service, err := s.SClient.SwarmServiceInspect(context.Background(), s.Util4ID) 123 | s.Require().NoError(err) 124 | s.Require().NotNil(util4Service) 125 | 126 | s.Equal(s.Util4ID, util4Service.ID) 127 | nodeInfo, err := s.SClient.GetNodeInfo(context.Background(), *util4Service) 128 | s.Require().NoError(err) 129 | util4Service.NodeInfo = nodeInfo 130 | 131 | s.Require().NotNil(util4Service.NodeInfo) 132 | 133 | s.Require().Len(nodeInfo, 2) 134 | } 135 | 136 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceInspect_IncorrectName() { 137 | _, err := s.SClient.SwarmServiceInspect(context.Background(), "cowsfly") 138 | s.Error(err) 139 | } 140 | 141 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceList_GetNodeInfo() { 142 | services, err := s.SClient.SwarmServiceList(context.Background()) 143 | s.Require().NoError(err) 144 | s.Len(services, 3) 145 | 146 | for _, ss := range services { 147 | nodeInfo, err := s.SClient.GetNodeInfo(context.Background(), ss) 148 | s.Require().NoError(err) 149 | if ss.Spec.Name == "util-1" || ss.Spec.Name == "util-4" { 150 | s.NotNil(nodeInfo) 151 | } else { 152 | s.Nil(nodeInfo) 153 | } 154 | } 155 | } 156 | func (s *SwarmServiceClientTestSuite) Test_SwarmServiceList_ServiceNamePrefix() { 157 | s.SClient.ServiceNamePrefix = "dev1" 158 | services, err := s.SClient.SwarmServiceList(context.Background()) 159 | s.Require().NoError(err) 160 | s.Len(services, 3) 161 | 162 | expectedNames := []string{"dev1_util-1", "dev1_util-3", "dev1_util-4"} 163 | for _, ss := range services { 164 | s.Contains(expectedNames, ss.Spec.Name) 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /service/servicecache.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import "sync" 4 | 5 | // SwarmServiceCacher caches sevices 6 | type SwarmServiceCacher interface { 7 | InsertAndCheck(ss SwarmServiceMini) bool 8 | IsNewOrUpdated(ss SwarmServiceMini) bool 9 | Delete(ID string) 10 | Get(ID string) (SwarmServiceMini, bool) 11 | Len() int 12 | Keys() map[string]struct{} 13 | } 14 | 15 | // SwarmServiceCache implements `SwarmServiceCacher` 16 | type SwarmServiceCache struct { 17 | cache map[string]SwarmServiceMini 18 | mux sync.RWMutex 19 | } 20 | 21 | // NewSwarmServiceCache creates a new `NewSwarmServiceCache` 22 | func NewSwarmServiceCache() *SwarmServiceCache { 23 | return &SwarmServiceCache{ 24 | cache: map[string]SwarmServiceMini{}, 25 | } 26 | } 27 | 28 | // InsertAndCheck inserts `SwarmServiceMini` into cache 29 | // If the service is new or updated `InsertAndCheck` returns true. 30 | func (c *SwarmServiceCache) InsertAndCheck(ss SwarmServiceMini) bool { 31 | c.mux.Lock() 32 | defer c.mux.Unlock() 33 | 34 | cachedService, ok := c.cache[ss.ID] 35 | c.cache[ss.ID] = ss 36 | 37 | return !ok || !ss.Equal(cachedService) 38 | 39 | } 40 | 41 | // IsNewOrUpdated returns true if service is new or updated 42 | func (c *SwarmServiceCache) IsNewOrUpdated(ss SwarmServiceMini) bool { 43 | c.mux.RLock() 44 | defer c.mux.RUnlock() 45 | 46 | cachedService, ok := c.cache[ss.ID] 47 | return !ok || !ss.Equal(cachedService) 48 | } 49 | 50 | // Delete delets service from cache 51 | func (c *SwarmServiceCache) Delete(ID string) { 52 | c.mux.Lock() 53 | defer c.mux.Unlock() 54 | delete(c.cache, ID) 55 | } 56 | 57 | // Get gets service from cache 58 | func (c *SwarmServiceCache) Get(ID string) (SwarmServiceMini, bool) { 59 | c.mux.RLock() 60 | defer c.mux.RUnlock() 61 | v, ok := c.cache[ID] 62 | return v, ok 63 | } 64 | 65 | // Len returns the number of items in cache 66 | func (c *SwarmServiceCache) Len() int { 67 | c.mux.RLock() 68 | defer c.mux.RUnlock() 69 | return len(c.cache) 70 | } 71 | 72 | // Keys returns the keys of the cache 73 | func (c *SwarmServiceCache) Keys() map[string]struct{} { 74 | c.mux.RLock() 75 | defer c.mux.RUnlock() 76 | output := map[string]struct{}{} 77 | for key := range c.cache { 78 | output[key] = struct{}{} 79 | } 80 | return output 81 | } 82 | -------------------------------------------------------------------------------- /service/servicecache_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/suite" 7 | ) 8 | 9 | type SwarmServiceCacheTestSuite struct { 10 | suite.Suite 11 | Cache *SwarmServiceCache 12 | SSMini SwarmServiceMini 13 | } 14 | 15 | func TestSwarmServiceCacheUnitTestSuite(t *testing.T) { 16 | suite.Run(t, new(SwarmServiceCacheTestSuite)) 17 | } 18 | 19 | func (s *SwarmServiceCacheTestSuite) SetupTest() { 20 | s.Cache = NewSwarmServiceCache() 21 | s.SSMini = getNewSwarmServiceMini() 22 | } 23 | 24 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_NewService_ReturnsTrue() { 25 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 26 | s.True(isUpdated) 27 | 28 | s.AssertInCache(s.SSMini) 29 | s.Equal(1, s.Cache.Len()) 30 | } 31 | 32 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_NewServiceGlobal_ReturnsTrue() { 33 | 34 | s.SSMini.Replicas = uint64(0) 35 | s.SSMini.Global = true 36 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 37 | s.True(isUpdated) 38 | s.AssertInCache(s.SSMini) 39 | } 40 | 41 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_SameService_ReturnsFalse() { 42 | 43 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 44 | s.True(isUpdated) 45 | s.AssertInCache(s.SSMini) 46 | 47 | newSSMini := getNewSwarmServiceMini() 48 | 49 | isUpdated = s.Cache.InsertAndCheck(newSSMini) 50 | s.False(isUpdated) 51 | s.AssertInCache(newSSMini) 52 | } 53 | 54 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_NewLabel_ReturnsTrue() { 55 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 56 | s.True(isUpdated) 57 | s.AssertInCache(s.SSMini) 58 | 59 | newSSMini := getNewSwarmServiceMini() 60 | newSSMini.Labels["com.df.whatisthis"] = "howareyou" 61 | 62 | isUpdated = s.Cache.InsertAndCheck(newSSMini) 63 | s.True(isUpdated) 64 | s.AssertInCache(newSSMini) 65 | } 66 | 67 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_NewLabel_SameKey_ReturnsTrue() { 68 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 69 | s.True(isUpdated) 70 | s.AssertInCache(s.SSMini) 71 | 72 | newSSMini := getNewSwarmServiceMini() 73 | newSSMini.Labels["com.df.hello"] = "sf" 74 | 75 | isUpdated = s.Cache.InsertAndCheck(newSSMini) 76 | s.True(isUpdated) 77 | s.AssertInCache(newSSMini) 78 | } 79 | 80 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_ChangedReplicas_ReturnsTrue() { 81 | 82 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 83 | s.True(isUpdated) 84 | s.AssertInCache(s.SSMini) 85 | 86 | newSSMini := getNewSwarmServiceMini() 87 | newSSMini.Replicas = uint64(4) 88 | 89 | isUpdated = s.Cache.InsertAndCheck(newSSMini) 90 | s.True(isUpdated) 91 | s.AssertInCache(newSSMini) 92 | } 93 | 94 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_ReplicasDescToZero_ReturnsTrue() { 95 | 96 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 97 | s.True(isUpdated) 98 | s.AssertInCache(s.SSMini) 99 | 100 | newSSMini := getNewSwarmServiceMini() 101 | newSSMini.Replicas = uint64(0) 102 | 103 | isUpdated = s.Cache.InsertAndCheck(newSSMini) 104 | s.True(isUpdated) 105 | s.AssertInCache(newSSMini) 106 | } 107 | 108 | func (s *SwarmServiceCacheTestSuite) Test_InsertAndCheck_NewNodeInfo_ReturnsTrue() { 109 | 110 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 111 | s.True(isUpdated) 112 | s.AssertInCache(s.SSMini) 113 | 114 | newSSMini := getNewSwarmServiceMini() 115 | nodeSet := NodeIPSet{} 116 | nodeSet.Add("node-3", "1.0.2.1", "id3") 117 | newSSMini.NodeInfo = nodeSet 118 | 119 | isUpdated = s.Cache.InsertAndCheck(newSSMini) 120 | s.True(isUpdated) 121 | s.AssertInCache(newSSMini) 122 | } 123 | 124 | func (s *SwarmServiceCacheTestSuite) Test_GetAndRemove_InCache_ReturnsSwarmServiceMini_RemovesFromCache() { 125 | 126 | isUpdated := s.Cache.InsertAndCheck(s.SSMini) 127 | s.True(isUpdated) 128 | s.AssertInCache(s.SSMini) 129 | 130 | removedSSMini, ok := s.Cache.Get(s.SSMini.ID) 131 | s.True(ok) 132 | s.Cache.Delete(s.SSMini.ID) 133 | s.AssertNotInCache(s.SSMini) 134 | s.Equal(s.SSMini, removedSSMini) 135 | } 136 | 137 | func (s *SwarmServiceCacheTestSuite) Test_Keys() { 138 | s.Cache.InsertAndCheck(s.SSMini) 139 | s.AssertInCache(s.SSMini) 140 | 141 | keys := s.Cache.Keys() 142 | 143 | s.Require().Len(keys, 1) 144 | s.Contains(keys, s.SSMini.ID) 145 | 146 | } 147 | 148 | func (s *SwarmServiceCacheTestSuite) Test_IsNewOrUpdated_ServiceInCache() { 149 | s.Cache.InsertAndCheck(s.SSMini) 150 | s.AssertInCache(s.SSMini) 151 | 152 | newOrUpdated := s.Cache.IsNewOrUpdated(s.SSMini) 153 | s.False(newOrUpdated) 154 | } 155 | 156 | func (s *SwarmServiceCacheTestSuite) Test_IsNewOrUpdated_ServiceNotInCache() { 157 | newOrUpdated := s.Cache.IsNewOrUpdated(s.SSMini) 158 | s.True(newOrUpdated) 159 | } 160 | 161 | func (s *SwarmServiceCacheTestSuite) Test_IsNewOrUpdated_ServiceIsDifferentCache() { 162 | 163 | s.Cache.InsertAndCheck(s.SSMini) 164 | s.AssertInCache(s.SSMini) 165 | 166 | anotherSSMini := getNewSwarmServiceMini() 167 | anotherSSMini.Name = "anotherName" 168 | 169 | newOrUpdated := s.Cache.IsNewOrUpdated(anotherSSMini) 170 | s.True(newOrUpdated) 171 | 172 | } 173 | func (s *SwarmServiceCacheTestSuite) Test_GetAndRemove_NotInCache_ReturnsFalse() { 174 | 175 | _, ok := s.Cache.Get(s.SSMini.ID) 176 | s.False(ok) 177 | } 178 | 179 | func (s *SwarmServiceCacheTestSuite) AssertInCache(ssm SwarmServiceMini) { 180 | ss, ok := s.Cache.Get(ssm.ID) 181 | s.True(ok) 182 | s.Equal(ssm, ss) 183 | } 184 | 185 | func (s *SwarmServiceCacheTestSuite) AssertNotInCache(ssm SwarmServiceMini) { 186 | _, ok := s.Cache.Get(ssm.ID) 187 | s.False(ok) 188 | } 189 | -------------------------------------------------------------------------------- /service/servicepoller.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "time" 7 | ) 8 | 9 | // SwarmServicePolling provides an interface for polling service changes 10 | type SwarmServicePolling interface { 11 | Run(eventChan chan<- Event) 12 | } 13 | 14 | // SwarmServicePoller implements `SwarmServicePoller` 15 | type SwarmServicePoller struct { 16 | SSClient SwarmServiceInspector 17 | SSCache SwarmServiceCacher 18 | PollingInterval int 19 | IncludeNodeInfo bool 20 | MinifyFunc func(SwarmService) SwarmServiceMini 21 | Log *log.Logger 22 | } 23 | 24 | // NewSwarmServicePoller creates a new `SwarmServicePoller` 25 | func NewSwarmServicePoller( 26 | ssClient SwarmServiceInspector, 27 | ssCache SwarmServiceCacher, 28 | pollingInterval int, 29 | includeNodeInfo bool, 30 | minifyFunc func(SwarmService) SwarmServiceMini, 31 | log *log.Logger, 32 | ) *SwarmServicePoller { 33 | return &SwarmServicePoller{ 34 | SSClient: ssClient, 35 | SSCache: ssCache, 36 | PollingInterval: pollingInterval, 37 | IncludeNodeInfo: includeNodeInfo, 38 | MinifyFunc: minifyFunc, 39 | Log: log, 40 | } 41 | } 42 | 43 | // Run starts poller and places events onto `eventChan` 44 | func (s SwarmServicePoller) Run( 45 | eventChan chan<- Event) { 46 | 47 | if s.PollingInterval <= 0 { 48 | return 49 | } 50 | 51 | ctx := context.Background() 52 | 53 | s.Log.Printf("Polling for Service Changes") 54 | time.Sleep(time.Duration(s.PollingInterval) * time.Second) 55 | 56 | for { 57 | services, err := s.SSClient.SwarmServiceList(ctx) 58 | if err != nil { 59 | s.Log.Printf("ERROR (SwarmServicePolling): %v", err) 60 | } else { 61 | nowTimeNano := time.Now().UTC().UnixNano() 62 | keys := s.SSCache.Keys() 63 | for _, ss := range services { 64 | delete(keys, ss.ID) 65 | 66 | if s.IncludeNodeInfo { 67 | nodeInfo, err := s.SSClient.GetNodeInfo(ctx, ss) 68 | if err != nil { 69 | s.Log.Printf("ERROR: GetServicesParameters, %v", err) 70 | } else { 71 | ss.NodeInfo = nodeInfo 72 | } 73 | } 74 | 75 | ssMini := s.MinifyFunc(ss) 76 | if s.SSCache.IsNewOrUpdated(ssMini) { 77 | eventChan <- Event{ 78 | Type: EventTypeCreate, 79 | ID: ss.ID, 80 | TimeNano: nowTimeNano, 81 | ConsultCache: true, 82 | } 83 | } 84 | } 85 | 86 | // Remaining keys are removal events 87 | for k := range keys { 88 | eventChan <- Event{ 89 | Type: EventTypeRemove, 90 | ID: k, 91 | TimeNano: nowTimeNano, 92 | ConsultCache: true, 93 | } 94 | } 95 | } 96 | time.Sleep(time.Duration(s.PollingInterval) * time.Second) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /service/servicepoller_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "log" 6 | "testing" 7 | "time" 8 | 9 | "github.com/docker/docker/api/types/swarm" 10 | "github.com/stretchr/testify/mock" 11 | "github.com/stretchr/testify/suite" 12 | ) 13 | 14 | type ServicePollerTestSuite struct { 15 | suite.Suite 16 | SSClientMock *swarmServiceInspector 17 | SSCacheMock *swarmServiceCacherMock 18 | MinifyFunc func(ss SwarmService) SwarmServiceMini 19 | 20 | SSPoller *SwarmServicePoller 21 | Logger *log.Logger 22 | LogBytes *bytes.Buffer 23 | } 24 | 25 | func TestServicePollerUnitTestSuite(t *testing.T) { 26 | suite.Run(t, new(ServicePollerTestSuite)) 27 | } 28 | 29 | func (s *ServicePollerTestSuite) SetupTest() { 30 | s.SSClientMock = new(swarmServiceInspector) 31 | s.SSCacheMock = new(swarmServiceCacherMock) 32 | 33 | s.MinifyFunc = func(ss SwarmService) SwarmServiceMini { 34 | return MinifySwarmService(ss, "com.df.notify", "com.df.scrapeNetwork") 35 | } 36 | s.LogBytes = new(bytes.Buffer) 37 | s.Logger = log.New(s.LogBytes, "", 0) 38 | 39 | s.SSPoller = NewSwarmServicePoller( 40 | s.SSClientMock, 41 | s.SSCacheMock, 42 | 1, 43 | false, 44 | s.MinifyFunc, 45 | s.Logger, 46 | ) 47 | } 48 | 49 | func (s *ServicePollerTestSuite) Test_Run_NoCache() { 50 | 51 | expServices := []SwarmService{ 52 | {swarm.Service{ID: "serviceID1"}, nil}, 53 | {swarm.Service{ID: "serviceID2"}, nil}, 54 | } 55 | keys := map[string]struct{}{} 56 | miniSS1 := SwarmServiceMini{ 57 | ID: "serviceID1", Labels: map[string]string{}} 58 | miniSS2 := SwarmServiceMini{ 59 | ID: "serviceID2", Labels: map[string]string{}} 60 | 61 | eventChan := make(chan Event) 62 | 63 | s.SSClientMock. 64 | On("SwarmServiceList", mock.AnythingOfType("*context.emptyCtx")).Return(expServices, nil) 65 | s.SSCacheMock. 66 | On("Keys").Return(keys). 67 | On("IsNewOrUpdated", miniSS1).Return(true). 68 | On("IsNewOrUpdated", miniSS2).Return(true) 69 | 70 | go s.SSPoller.Run(eventChan) 71 | 72 | timeout := time.NewTimer(time.Second * 5).C 73 | eventsNum := 0 74 | 75 | for { 76 | if eventsNum == 2 { 77 | break 78 | } 79 | select { 80 | case event := <-eventChan: 81 | s.Require().Equal(EventTypeCreate, event.Type) 82 | eventsNum++ 83 | case <-timeout: 84 | s.FailNow("Timeout") 85 | } 86 | } 87 | 88 | s.Equal(2, eventsNum) 89 | s.SSClientMock.AssertExpectations(s.T()) 90 | s.SSCacheMock.AssertExpectations(s.T()) 91 | } 92 | 93 | func (s *ServicePollerTestSuite) Test_Run_HalfInCache() { 94 | 95 | expServices := []SwarmService{ 96 | {swarm.Service{ID: "serviceID1"}, nil}, 97 | {swarm.Service{ID: "serviceID2"}, nil}, 98 | } 99 | miniSS1 := SwarmServiceMini{ 100 | ID: "serviceID1", Labels: map[string]string{}} 101 | miniSS2 := SwarmServiceMini{ 102 | ID: "serviceID2", Labels: map[string]string{}} 103 | 104 | keys := map[string]struct{}{} 105 | keys["serviceID1"] = struct{}{} 106 | 107 | eventChan := make(chan Event) 108 | 109 | s.SSClientMock. 110 | On("SwarmServiceList", mock.AnythingOfType("*context.emptyCtx")).Return(expServices, nil) 111 | s.SSCacheMock. 112 | On("Keys").Return(keys). 113 | On("IsNewOrUpdated", miniSS1).Return(false). 114 | On("IsNewOrUpdated", miniSS2).Return(true) 115 | 116 | go s.SSPoller.Run(eventChan) 117 | 118 | timeout := time.NewTimer(time.Second * 5).C 119 | var eventCreate *Event 120 | eventsNum := 0 121 | 122 | for { 123 | if eventsNum == 1 { 124 | break 125 | } 126 | select { 127 | case event := <-eventChan: 128 | if event.ID == "serviceID2" { 129 | eventCreate = &event 130 | } 131 | eventsNum++ 132 | case <-timeout: 133 | s.Fail("Timeout") 134 | return 135 | } 136 | } 137 | 138 | s.Equal(1, eventsNum) 139 | s.Require().NotNil(eventCreate) 140 | 141 | s.Equal("serviceID2", eventCreate.ID) 142 | s.SSClientMock.AssertExpectations(s.T()) 143 | s.SSCacheMock.AssertExpectations(s.T()) 144 | } 145 | 146 | func (s *ServicePollerTestSuite) Test_Run_MoreInCache() { 147 | 148 | expServices := []SwarmService{ 149 | {swarm.Service{ID: "serviceID1"}, nil}, 150 | {swarm.Service{ID: "serviceID2"}, nil}, 151 | } 152 | miniSS1 := SwarmServiceMini{ 153 | ID: "serviceID1", Labels: map[string]string{}} 154 | miniSS2 := SwarmServiceMini{ 155 | ID: "serviceID2", Labels: map[string]string{}} 156 | 157 | keys := map[string]struct{}{} 158 | keys["serviceID1"] = struct{}{} 159 | keys["serviceID2"] = struct{}{} 160 | keys["serviceID3"] = struct{}{} 161 | 162 | eventChan := make(chan Event) 163 | 164 | s.SSClientMock. 165 | On("SwarmServiceList", mock.AnythingOfType("*context.emptyCtx")).Return(expServices, nil) 166 | s.SSCacheMock. 167 | On("Keys").Return(keys). 168 | On("IsNewOrUpdated", miniSS1).Return(true). 169 | On("IsNewOrUpdated", miniSS2).Return(false) 170 | 171 | go s.SSPoller.Run(eventChan) 172 | 173 | timeout := time.NewTimer(time.Second * 5).C 174 | var eventCreate *Event 175 | var eventRemove *Event 176 | eventsNum := 0 177 | 178 | for { 179 | if eventsNum == 2 { 180 | break 181 | } 182 | select { 183 | case event := <-eventChan: 184 | if event.ID == "serviceID1" { 185 | eventCreate = &event 186 | } else if event.ID == "serviceID3" { 187 | eventRemove = &event 188 | } 189 | eventsNum++ 190 | case <-timeout: 191 | s.Fail("Timeout") 192 | return 193 | } 194 | } 195 | 196 | s.Equal(2, eventsNum) 197 | s.Require().NotNil(eventCreate) 198 | s.Require().NotNil(eventRemove) 199 | 200 | s.Equal("serviceID1", eventCreate.ID) 201 | s.Equal("serviceID3", eventRemove.ID) 202 | s.SSClientMock.AssertExpectations(s.T()) 203 | s.SSCacheMock.AssertExpectations(s.T()) 204 | } 205 | -------------------------------------------------------------------------------- /service/task.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/docker/docker/api/types" 10 | "github.com/docker/docker/api/types/filters" 11 | "github.com/docker/docker/api/types/swarm" 12 | "github.com/docker/docker/client" 13 | ) 14 | 15 | // https://github.com/docker/cli/blob/master/cli/command/service/progress/progress.go 16 | // The same structure as `progress.go` without stdout 17 | 18 | var ( 19 | numberedStates = map[swarm.TaskState]int64{ 20 | swarm.TaskStateNew: 1, 21 | swarm.TaskStateAllocated: 2, 22 | swarm.TaskStatePending: 3, 23 | swarm.TaskStateAssigned: 4, 24 | swarm.TaskStateAccepted: 5, 25 | swarm.TaskStatePreparing: 6, 26 | swarm.TaskStateReady: 7, 27 | swarm.TaskStateStarting: 8, 28 | swarm.TaskStateRunning: 9, 29 | 30 | // The following states are not actually shown in progress 31 | // output, but are used internally for ordering. 32 | swarm.TaskStateComplete: 10, 33 | swarm.TaskStateShutdown: 11, 34 | swarm.TaskStateFailed: 12, 35 | swarm.TaskStateRejected: 13, 36 | } 37 | 38 | longestState int 39 | ) 40 | 41 | func init() { 42 | for state := range numberedStates { 43 | if !terminalState(state) && len(state) > longestState { 44 | longestState = len(state) 45 | } 46 | } 47 | } 48 | 49 | func terminalState(state swarm.TaskState) bool { 50 | return numberedStates[state] > numberedStates[swarm.TaskStateRunning] 51 | } 52 | 53 | func stateToProgress(state swarm.TaskState, rollback bool) int64 { 54 | if !rollback { 55 | return numberedStates[state] 56 | } 57 | return numberedStates[swarm.TaskStateRunning] - numberedStates[state] 58 | } 59 | 60 | func getActiveNodes(ctx context.Context, client *client.Client) (map[string]struct{}, error) { 61 | nodes, err := client.NodeList(ctx, types.NodeListOptions{}) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | activeNodes := make(map[string]struct{}) 67 | for _, n := range nodes { 68 | if n.Status.State != swarm.NodeStateDown { 69 | activeNodes[n.ID] = struct{}{} 70 | } 71 | } 72 | return activeNodes, nil 73 | } 74 | 75 | func initializeUpdater(service swarm.Service) (progressUpdater, error) { 76 | if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { 77 | return &replicatedProgressUpdater{}, nil 78 | } 79 | if service.Spec.Mode.Global != nil { 80 | return &globalProgressUpdater{}, nil 81 | } 82 | return nil, errors.New("unrecognized service mode") 83 | } 84 | 85 | type progressUpdater interface { 86 | update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) 87 | } 88 | 89 | // GetTaskList returns tasks when it is the service is converged 90 | func GetTaskList(ctx context.Context, client *client.Client, serviceID string) ([]swarm.Task, error) { 91 | 92 | taskFilter := filters.NewArgs() 93 | taskFilter.Add("service", serviceID) 94 | taskFilter.Add("_up-to-date", "true") 95 | taskFilter.Add("desired-state", "running") 96 | taskFilter.Add("desired-state", "accepted") 97 | 98 | getUpToDateTasks := func() ([]swarm.Task, error) { 99 | return client.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) 100 | } 101 | 102 | var ( 103 | updater progressUpdater 104 | converged bool 105 | convergedAt time.Time 106 | monitor = 5 * time.Second 107 | rollback bool 108 | ) 109 | 110 | taskList, err := getUpToDateTasks() 111 | if err != nil { 112 | return taskList, err 113 | } 114 | 115 | for { 116 | service, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 117 | if err != nil { 118 | return taskList, err 119 | } 120 | 121 | if service.Spec.UpdateConfig != nil && service.Spec.UpdateConfig.Monitor != 0 { 122 | monitor = service.Spec.UpdateConfig.Monitor 123 | } 124 | 125 | if updater == nil { 126 | updater, err = initializeUpdater(service) 127 | if err != nil { 128 | return taskList, err 129 | } 130 | } 131 | 132 | if service.UpdateStatus != nil { 133 | switch service.UpdateStatus.State { 134 | case swarm.UpdateStateUpdating: 135 | rollback = false 136 | case swarm.UpdateStateCompleted: 137 | if !converged { 138 | return taskList, nil 139 | } 140 | case swarm.UpdateStatePaused: 141 | return taskList, fmt.Errorf("service update paused: %s", service.UpdateStatus.Message) 142 | case swarm.UpdateStateRollbackStarted: 143 | rollback = true 144 | case swarm.UpdateStateRollbackPaused: 145 | return taskList, fmt.Errorf("service rollback paused %s", service.UpdateStatus.Message) 146 | case swarm.UpdateStateRollbackCompleted: 147 | if !converged { 148 | return taskList, fmt.Errorf("service rolled back: %s", service.UpdateStatus.Message) 149 | } 150 | } 151 | } 152 | if converged && time.Since(convergedAt) >= monitor { 153 | return taskList, nil 154 | } 155 | 156 | taskList, err = getUpToDateTasks() 157 | if err != nil { 158 | return taskList, err 159 | } 160 | 161 | activeNodes, err := getActiveNodes(ctx, client) 162 | if err != nil { 163 | return taskList, err 164 | } 165 | 166 | converged, err = updater.update(service, taskList, activeNodes, rollback) 167 | if err != nil { 168 | return taskList, err 169 | } 170 | if converged { 171 | if convergedAt.IsZero() { 172 | convergedAt = time.Now() 173 | } 174 | } else { 175 | convergedAt = time.Time{} 176 | } 177 | 178 | <-time.After(200 * time.Millisecond) 179 | } 180 | 181 | } 182 | 183 | // TasksAllRunning checks if a service is currently up and running 184 | func TasksAllRunning(ctx context.Context, cli *client.Client, serviceID string) (bool, error) { 185 | 186 | service, _, err := cli.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{}) 187 | if err != nil { 188 | return false, err 189 | } 190 | updater, err := initializeUpdater(service) 191 | if err != nil { 192 | return false, err 193 | } 194 | 195 | taskFilter := filters.NewArgs() 196 | taskFilter.Add("service", serviceID) 197 | taskFilter.Add("_up-to-date", "true") 198 | taskFilter.Add("desired-state", "running") 199 | taskFilter.Add("desired-state", "accepted") 200 | 201 | tasks, err := cli.TaskList(ctx, types.TaskListOptions{Filters: taskFilter}) 202 | if err != nil { 203 | return false, err 204 | } 205 | 206 | activeNodes, err := getActiveNodes(ctx, cli) 207 | if err != nil { 208 | return false, err 209 | } 210 | 211 | return updater.update(service, tasks, activeNodes, false) 212 | } 213 | 214 | type replicatedProgressUpdater struct { 215 | initialized bool 216 | done bool 217 | } 218 | 219 | func (u *replicatedProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { 220 | 221 | if service.Spec.Mode.Replicated == nil || service.Spec.Mode.Replicated.Replicas == nil { 222 | return false, errors.New("no replica count") 223 | } 224 | 225 | replicas := *service.Spec.Mode.Replicated.Replicas 226 | 227 | if !u.initialized { 228 | u.initialized = true 229 | } 230 | 231 | tasksBySlot := u.tasksBySlot(tasks, activeNodes) 232 | 233 | // If we had reached a converged state, check if we are still converged. 234 | if u.done { 235 | for _, task := range tasksBySlot { 236 | if task.Status.State != swarm.TaskStateRunning { 237 | u.done = false 238 | break 239 | } 240 | } 241 | } 242 | 243 | running := uint64(0) 244 | 245 | for _, task := range tasksBySlot { 246 | if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { 247 | running++ 248 | } 249 | } 250 | 251 | if !u.done && running == replicas { 252 | u.done = true 253 | } 254 | 255 | return u.done == true, nil 256 | } 257 | 258 | func (u *replicatedProgressUpdater) tasksBySlot(tasks []swarm.Task, activeNodes map[string]struct{}) map[int]swarm.Task { 259 | // If there are multiple tasks with the same slot number, favor the one 260 | // with the *lowest* desired state. This can happen in restart 261 | // scenarios. 262 | tasksBySlot := make(map[int]swarm.Task) 263 | for _, task := range tasks { 264 | if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { 265 | continue 266 | } 267 | if existingTask, ok := tasksBySlot[task.Slot]; ok { 268 | if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { 269 | continue 270 | } 271 | // If the desired states match, observed state breaks 272 | // ties. This can happen with the "start first" service 273 | // update mode. 274 | if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && 275 | numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { 276 | continue 277 | } 278 | } 279 | if task.NodeID != "" { 280 | if _, nodeActive := activeNodes[task.NodeID]; !nodeActive { 281 | continue 282 | } 283 | } 284 | tasksBySlot[task.Slot] = task 285 | } 286 | 287 | return tasksBySlot 288 | } 289 | 290 | type globalProgressUpdater struct { 291 | initialized bool 292 | done bool 293 | } 294 | 295 | func (u *globalProgressUpdater) update(service swarm.Service, tasks []swarm.Task, activeNodes map[string]struct{}, rollback bool) (bool, error) { 296 | tasksByNode := u.tasksByNode(tasks) 297 | // We don't have perfect knowledge of how many nodes meet the 298 | // constraints for this service. But the orchestrator creates tasks 299 | // for all eligible nodes at the same time, so we should see all those 300 | // nodes represented among the up-to-date tasks. 301 | nodeCount := len(tasksByNode) 302 | 303 | if !u.initialized { 304 | if nodeCount == 0 { 305 | // Two possibilities: either the orchestrator hasn't created 306 | // the tasks yet, or the service doesn't meet constraints for 307 | // any node. Either way, we wait. 308 | return false, nil 309 | } 310 | 311 | u.initialized = true 312 | } 313 | 314 | // If we had reached a converged state, check if we are still converged. 315 | if u.done { 316 | for _, task := range tasksByNode { 317 | if task.Status.State != swarm.TaskStateRunning { 318 | u.done = false 319 | break 320 | } 321 | } 322 | } 323 | 324 | running := 0 325 | 326 | for _, task := range tasksByNode { 327 | if _, nodeActive := activeNodes[task.NodeID]; nodeActive { 328 | if !terminalState(task.DesiredState) && task.Status.State == swarm.TaskStateRunning { 329 | running++ 330 | } 331 | } 332 | } 333 | 334 | if !u.done && running == nodeCount { 335 | u.done = true 336 | } 337 | 338 | return running == nodeCount, nil 339 | } 340 | 341 | func (u *globalProgressUpdater) tasksByNode(tasks []swarm.Task) map[string]swarm.Task { 342 | // If there are multiple tasks with the same node ID, favor the one 343 | // with the *lowest* desired state. This can happen in restart 344 | // scenarios. 345 | tasksByNode := make(map[string]swarm.Task) 346 | for _, task := range tasks { 347 | if numberedStates[task.DesiredState] == 0 || numberedStates[task.Status.State] == 0 { 348 | continue 349 | } 350 | if existingTask, ok := tasksByNode[task.NodeID]; ok { 351 | if numberedStates[existingTask.DesiredState] < numberedStates[task.DesiredState] { 352 | continue 353 | } 354 | 355 | // If the desired states match, observed state breaks 356 | // ties. This can happen with the "start first" service 357 | // update mode. 358 | if numberedStates[existingTask.DesiredState] == numberedStates[task.DesiredState] && 359 | numberedStates[existingTask.Status.State] <= numberedStates[task.Status.State] { 360 | continue 361 | } 362 | 363 | } 364 | tasksByNode[task.NodeID] = task 365 | } 366 | 367 | return tasksByNode 368 | } 369 | -------------------------------------------------------------------------------- /service/task_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "strconv" 5 | "testing" 6 | 7 | "github.com/docker/docker/api/types/swarm" 8 | "github.com/stretchr/testify/suite" 9 | ) 10 | 11 | // https://github.com/docker/cli/blob/master/cli/command/service/progress/progress_test.go 12 | // Inspired by `progress_test.go` 13 | 14 | type TaskTestSuite struct { 15 | suite.Suite 16 | service swarm.Service 17 | updater progressUpdater 18 | activeNodes map[string]struct{} 19 | rollback bool 20 | } 21 | 22 | func TestTaskUnitTestSuite(t *testing.T) { 23 | suite.Run(t, new(TaskTestSuite)) 24 | } 25 | 26 | func (s *TaskTestSuite) Test_ReplicatedProcessUpdaterOneReplica() { 27 | replicas := uint64(1) 28 | 29 | service := swarm.Service{ 30 | Spec: swarm.ServiceSpec{ 31 | Mode: swarm.ServiceMode{ 32 | Replicated: &swarm.ReplicatedService{ 33 | Replicas: &replicas, 34 | }, 35 | }, 36 | }, 37 | } 38 | 39 | s.service = service 40 | s.updater = new(replicatedProgressUpdater) 41 | s.activeNodes = map[string]struct{}{"a": {}, "b": {}} 42 | 43 | tasks := []swarm.Task{} 44 | 45 | // No tasks 46 | s.AssertConvergence(false, tasks) 47 | 48 | // Tasks with DesiredState beyond running is not updated 49 | tasks = append(tasks, 50 | swarm.Task{ID: "1", 51 | NodeID: "a", 52 | DesiredState: swarm.TaskStateShutdown, 53 | Status: swarm.TaskStatus{State: swarm.TaskStateNew}, 54 | }) 55 | s.AssertConvergence(false, tasks) 56 | 57 | // First time task reaches TaskStateRunning, service has not updated yet 58 | // The task is "new" 59 | tasks[0].DesiredState = swarm.TaskStateRunning 60 | s.AssertConvergence(false, tasks) 61 | 62 | // When an error appears, service is not updated 63 | tasks[0].Status.Err = "something is wrong" 64 | s.AssertConvergence(false, tasks) 65 | 66 | // When the tasks reaches running again, updated is true 67 | tasks[0].Status.Err = "" 68 | tasks[0].Status.State = swarm.TaskStateRunning 69 | s.AssertConvergence(true, tasks) 70 | 71 | // When tasks fails, update is false 72 | tasks[0].Status.Err = "task failed" 73 | tasks[0].Status.State = swarm.TaskStateFailed 74 | s.AssertConvergence(false, tasks) 75 | 76 | // If the task is restarted, update is true 77 | tasks[0].DesiredState = swarm.TaskStateShutdown 78 | tasks = append(tasks, 79 | swarm.Task{ 80 | ID: "2", 81 | NodeID: "b", 82 | DesiredState: swarm.TaskStateRunning, 83 | Status: swarm.TaskStatus{State: swarm.TaskStateRunning}, 84 | }) 85 | s.AssertConvergence(true, tasks) 86 | 87 | // Add a new task while the current one is still running, to simulate 88 | // "start-then-stop" updates. 89 | tasks = append(tasks, 90 | swarm.Task{ 91 | ID: "3", 92 | NodeID: "b", 93 | DesiredState: swarm.TaskStateRunning, 94 | Status: swarm.TaskStatus{State: swarm.TaskStatePreparing}, 95 | }) 96 | s.AssertConvergence(false, tasks) 97 | 98 | } 99 | 100 | func (s *TaskTestSuite) Test_ReplicatedProcessUpdaterManyReplica() { 101 | replicas := uint64(50) 102 | service := swarm.Service{ 103 | Spec: swarm.ServiceSpec{ 104 | Mode: swarm.ServiceMode{ 105 | Replicated: &swarm.ReplicatedService{ 106 | Replicas: &replicas, 107 | }, 108 | }, 109 | }, 110 | } 111 | 112 | s.service = service 113 | s.updater = new(replicatedProgressUpdater) 114 | s.activeNodes = map[string]struct{}{"a": {}, "b": {}} 115 | 116 | tasks := []swarm.Task{} 117 | 118 | // No tasks 119 | s.AssertConvergence(false, tasks) 120 | 121 | for i := 0; i != int(replicas); i++ { 122 | tasks = append(tasks, 123 | swarm.Task{ 124 | ID: strconv.Itoa(i), 125 | Slot: i + 1, 126 | NodeID: "a", 127 | DesiredState: swarm.TaskStateRunning, 128 | Status: swarm.TaskStatus{State: swarm.TaskStateNew}, 129 | }) 130 | if i%2 == 1 { 131 | tasks[i].NodeID = "b" 132 | } 133 | s.AssertConvergence(false, tasks) 134 | tasks[i].Status.State = swarm.TaskStateRunning 135 | s.AssertConvergence(uint64(i) == replicas-1, tasks) 136 | } 137 | } 138 | 139 | func (s *TaskTestSuite) Test_GlobalProgressUpdaterOneNode() { 140 | 141 | service := swarm.Service{ 142 | Spec: swarm.ServiceSpec{ 143 | Mode: swarm.ServiceMode{ 144 | Global: &swarm.GlobalService{}, 145 | }, 146 | }, 147 | } 148 | 149 | s.activeNodes = map[string]struct{}{"a": {}, "b": {}} 150 | s.service = service 151 | s.updater = new(globalProgressUpdater) 152 | 153 | tasks := []swarm.Task{} 154 | 155 | // No tasks 156 | s.AssertConvergence(false, tasks) 157 | 158 | // Task with DesiredState beyond Running is ignored 159 | tasks = append(tasks, 160 | swarm.Task{ 161 | ID: "1", 162 | NodeID: "a", 163 | DesiredState: swarm.TaskStateShutdown, 164 | Status: swarm.TaskStatus{State: swarm.TaskStateNew}, 165 | }) 166 | s.AssertConvergence(false, tasks) 167 | 168 | // First time task reaches TaskStateRunning, service has not converged yet 169 | // The task is "new" 170 | tasks[0].DesiredState = swarm.TaskStateRunning 171 | s.AssertConvergence(false, tasks) 172 | 173 | // If the task exposes an error, update is false 174 | tasks[0].Status.Err = "something is wrong" 175 | s.AssertConvergence(false, tasks) 176 | 177 | // When the task reaches running, update is true 178 | tasks[0].Status.Err = "" 179 | tasks[0].Status.State = swarm.TaskStateRunning 180 | s.AssertConvergence(true, tasks) 181 | 182 | // If the task fails, update is false 183 | tasks[0].Status.Err = "task failed" 184 | tasks[0].Status.State = swarm.TaskStateFailed 185 | s.AssertConvergence(false, tasks) 186 | 187 | // If task is restarted, update is true 188 | tasks[0].DesiredState = swarm.TaskStateShutdown 189 | tasks = append(tasks, 190 | swarm.Task{ 191 | ID: "2", 192 | NodeID: "a", 193 | DesiredState: swarm.TaskStateRunning, 194 | Status: swarm.TaskStatus{State: swarm.TaskStateRunning}, 195 | }) 196 | s.AssertConvergence(true, tasks) 197 | 198 | tasks = append(tasks, 199 | swarm.Task{ 200 | ID: "3", 201 | NodeID: "a", 202 | DesiredState: swarm.TaskStateRunning, 203 | Status: swarm.TaskStatus{State: swarm.TaskStatePreparing}, 204 | }) 205 | s.AssertConvergence(false, tasks) 206 | 207 | } 208 | 209 | func (s *TaskTestSuite) Test_GlobalProgressUpdaterManyNodes() { 210 | nodes := 50 211 | 212 | service := swarm.Service{ 213 | Spec: swarm.ServiceSpec{ 214 | Mode: swarm.ServiceMode{ 215 | Global: &swarm.GlobalService{}, 216 | }, 217 | }, 218 | } 219 | 220 | s.service = service 221 | s.updater = new(globalProgressUpdater) 222 | s.activeNodes = map[string]struct{}{} 223 | 224 | for i := 0; i != nodes; i++ { 225 | s.activeNodes[strconv.Itoa(i)] = struct{}{} 226 | } 227 | 228 | tasks := []swarm.Task{} 229 | 230 | // No tasks 231 | s.AssertConvergence(false, tasks) 232 | 233 | for i := 0; i != nodes; i++ { 234 | tasks = append(tasks, 235 | swarm.Task{ 236 | ID: "task" + strconv.Itoa(i), 237 | NodeID: strconv.Itoa(i), 238 | DesiredState: swarm.TaskStateRunning, 239 | Status: swarm.TaskStatus{State: swarm.TaskStateNew}, 240 | }) 241 | } 242 | // All tasks are in "new" state 243 | s.AssertConvergence(false, tasks) 244 | 245 | for i := 0; i != nodes; i++ { 246 | tasks[i].Status.State = swarm.TaskStateRunning 247 | s.AssertConvergence(i == nodes-1, tasks) 248 | } 249 | } 250 | 251 | func (s *TaskTestSuite) AssertConvergence(expectedConvergence bool, tasks []swarm.Task) { 252 | converged, err := s.updater.update( 253 | s.service, tasks, s.activeNodes, s.rollback) 254 | s.Require().NoError(err) 255 | s.Equal(expectedConvergence, converged) 256 | } 257 | -------------------------------------------------------------------------------- /service/test_utils.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os/exec" 7 | "strings" 8 | 9 | "github.com/docker/docker/api/types/swarm" 10 | "github.com/docker/docker/client" 11 | ) 12 | 13 | // Node utils 14 | func createNode(name string, network string) { 15 | exec.Command("docker", "container", "run", "-d", "--rm", 16 | "--privileged", "--network", network, "--name", name, 17 | "--hostname", name, "docker:17.12.1-ce-dind").Output() 18 | } 19 | 20 | func destroyNode(name string) { 21 | exec.Command("docker", "container", "stop", name).Output() 22 | } 23 | 24 | func newTestNodeDockerClient(nodeName string) (*client.Client, error) { 25 | host := fmt.Sprintf("tcp://%s:2375", nodeName) 26 | defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} 27 | cli, err := client.NewClient(host, dockerAPIVersion, nil, defaultHeaders) 28 | if err != nil { 29 | return cli, err 30 | } 31 | cli.NegotiateAPIVersion(context.Background()) 32 | return cli, err 33 | } 34 | 35 | func getWorkerToken(nodeName string) string { 36 | args := []string{"swarm", "join-token", "worker", "-q"} 37 | token, _ := runDockerCommandOnNode(args, nodeName) 38 | return strings.TrimRight(string(token), "\n") 39 | } 40 | func initSwarm(nodeName string) { 41 | args := []string{"swarm", "init"} 42 | runDockerCommandOnNode(args, nodeName) 43 | } 44 | 45 | func joinSwarm(nodeName, rootNodeName, token string) { 46 | rootHost := fmt.Sprintf("%s:2377", rootNodeName) 47 | args := []string{"swarm", "join", "--token", token, rootHost} 48 | runDockerCommandOnNode(args, nodeName) 49 | } 50 | 51 | func getNodeID(nodeName, rootNodeName string) (string, error) { 52 | args := []string{"node", "inspect", nodeName, "-f", "{{ .ID }}"} 53 | ID, err := runDockerCommandOnNode(args, rootNodeName) 54 | return strings.TrimRight(string(ID), "\n"), err 55 | } 56 | 57 | func removeNodeFromSwarm(nodeName, rootNodeName string) { 58 | args := []string{"node", "rm", "--force", nodeName} 59 | runDockerCommandOnNode(args, rootNodeName) 60 | } 61 | 62 | func addLabelToNode(nodeName, label, rootNodeName string) { 63 | args := []string{"node", "update", "--label-add", label, nodeName} 64 | runDockerCommandOnNode(args, nodeName) 65 | } 66 | 67 | func removeLabelFromNode(nodeName, label, rootNodeName string) { 68 | args := []string{"node", "update", "--label-rm", label, nodeName} 69 | runDockerCommandOnNode(args, nodeName) 70 | } 71 | 72 | func runDockerCommandOnNode(args []string, nodeName string) (string, error) { 73 | host := fmt.Sprintf("tcp://%s:2375", nodeName) 74 | dockerCmd := []string{"-H", host} 75 | fullCmd := append(dockerCmd, args...) 76 | output, err := exec.Command("docker", fullCmd...).Output() 77 | return string(output), err 78 | } 79 | 80 | // Service Utils 81 | 82 | func createTestOverlayNetwork(name string) { 83 | args := []string{"network", "create", "-d", "overlay", name} 84 | runDockerCommandOnSocket(args) 85 | } 86 | 87 | func removeTestNetwork(name string) { 88 | args := []string{"network", "create", "rm", name} 89 | runDockerCommandOnSocket(args) 90 | } 91 | 92 | func getServiceID(name string) (string, error) { 93 | args := []string{"service", "inspect", name, "-f", "{{ .ID }}"} 94 | ID, err := runDockerCommandOnSocket(args) 95 | return strings.TrimRight(string(ID), "\n"), err 96 | } 97 | 98 | func createTestService(name string, labels []string, global bool, replicas string, network string) { 99 | args := []string{"service", "create", "--name", name} 100 | for _, v := range labels { 101 | args = append(args, "-l", v) 102 | } 103 | if global { 104 | args = append(args, "--mode", "global") 105 | } else if len(replicas) > 0 { 106 | args = append(args, "--replicas", replicas) 107 | } 108 | if len(network) > 0 { 109 | args = append(args, "--network", network) 110 | } 111 | args = append(args, "alpine", "sleep", "1000000000") 112 | runDockerCommandOnSocket(args) 113 | } 114 | 115 | func replicaTestService(name string, count string) { 116 | args := []string{"service", "update", "--replicas", count, name} 117 | runDockerCommandOnSocket(args) 118 | } 119 | 120 | func removeTestService(name string) { 121 | args := []string{"service", "rm", name} 122 | runDockerCommandOnSocket(args) 123 | } 124 | 125 | func addLabelToService(name, label string) { 126 | args := []string{"service", "update", "--label-add", label, name} 127 | runDockerCommandOnSocket(args) 128 | } 129 | 130 | func removeLabelFromService(name, label string) { 131 | args := []string{"service", "update", "--label-rm", label, name} 132 | runDockerCommandOnSocket(args) 133 | } 134 | 135 | func getNetworkNameWithSuffix(suffix string) (string, error) { 136 | filter := fmt.Sprintf("name=%s$", suffix) 137 | args := []string{"network", "ls", "--filter", filter, "--format", "{{ .ID }}"} 138 | output, err := runDockerCommandOnSocket(args) 139 | if err != nil { 140 | return "", err 141 | } 142 | firstNetwork := strings.Split(output, "\n") 143 | return firstNetwork[0], nil 144 | } 145 | 146 | func runDockerCommandOnSocket(args []string) (string, error) { 147 | output, err := exec.Command("docker", args...).Output() 148 | return string(output), err 149 | } 150 | 151 | // SwarmServiceMini Utils 152 | 153 | func getNewSwarmServiceMini() SwarmServiceMini { 154 | nodeSet := NodeIPSet{} 155 | nodeSet.Add("node-1", "1.0.0.1", "id1") 156 | 157 | return SwarmServiceMini{ 158 | ID: "serviceID", 159 | Name: "demo-go", 160 | Labels: map[string]string{ 161 | "com.df.hello": "nyc", 162 | }, 163 | Replicas: uint64(3), 164 | Global: false, 165 | NodeInfo: nodeSet, 166 | } 167 | } 168 | 169 | func getNewNodeMini() NodeMini { 170 | return NodeMini{ 171 | ID: "nodeID", 172 | Hostname: "nodehostname", 173 | VersionIndex: uint64(10), 174 | State: swarm.NodeStateReady, 175 | Addr: "nodeaddr", 176 | NodeLabels: map[string]string{ 177 | "com.df.wow": "yup", 178 | }, 179 | EngineLabels: map[string]string{ 180 | "com.df.world": "round", 181 | }, 182 | Role: swarm.NodeRoleWorker, 183 | Availability: swarm.NodeAvailabilityActive, 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /service/types.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | 6 | "github.com/docker/docker/api/types/swarm" 7 | ) 8 | 9 | // SwarmServiceMini is a optimized version of `SwarmService` for caching purposes 10 | type SwarmServiceMini struct { 11 | ID string 12 | Name string 13 | Labels map[string]string 14 | Global bool 15 | Replicas uint64 16 | ContainerImage string 17 | NodeInfo NodeIPSet 18 | } 19 | 20 | // Equal returns when SwarmServiceMini is equal to `other` 21 | func (ssm SwarmServiceMini) Equal(other SwarmServiceMini) bool { 22 | 23 | return (ssm.ID == other.ID) && 24 | (ssm.Name == other.Name) && 25 | EqualMapStringString(ssm.Labels, other.Labels) && 26 | (ssm.Global == other.Global) && 27 | (ssm.Replicas == other.Replicas) && 28 | (ssm.ContainerImage == other.ContainerImage) && 29 | EqualNodeIPSet(ssm.NodeInfo, other.NodeInfo) 30 | } 31 | 32 | // NodeMini is a optimized version of `swarm.Node` for caching purposes 33 | type NodeMini struct { 34 | ID string 35 | Hostname string 36 | VersionIndex uint64 37 | State swarm.NodeState 38 | Addr string 39 | NodeLabels map[string]string 40 | EngineLabels map[string]string 41 | Role swarm.NodeRole 42 | Availability swarm.NodeAvailability 43 | } 44 | 45 | // Equal returns true when NodeMini is equal to `other` 46 | func (ns NodeMini) Equal(other NodeMini) bool { 47 | return (ns.ID == other.ID) && 48 | (ns.Hostname == other.Hostname) && 49 | (ns.State == other.State) && 50 | (ns.Addr == other.Addr) && 51 | EqualMapStringString(ns.NodeLabels, other.NodeLabels) && 52 | EqualMapStringString(ns.EngineLabels, other.EngineLabels) && 53 | (ns.Role == other.Role) && 54 | (ns.Availability == other.Availability) 55 | } 56 | 57 | // EqualMapStringString Returns true when the two maps are equal 58 | func EqualMapStringString(l map[string]string, r map[string]string) bool { 59 | if len(l) != len(r) { 60 | return false 61 | } 62 | for lk, lv := range l { 63 | if rv, ok := r[lk]; !ok || lv != rv { 64 | return false 65 | } 66 | } 67 | 68 | return true 69 | } 70 | 71 | // SwarmService defines internal structure with service information 72 | type SwarmService struct { 73 | swarm.Service 74 | NodeInfo NodeIPSet 75 | } 76 | 77 | // EventType is the type of event from eventlisteners 78 | type EventType string 79 | 80 | const ( 81 | // EventTypeCreate is for create or update event 82 | EventTypeCreate EventType = "create" 83 | // EventTypeRemove is for remove events 84 | EventTypeRemove EventType = "remove" 85 | ) 86 | 87 | // Event contains information about docker events 88 | type Event struct { 89 | Type EventType 90 | ID string 91 | TimeNano int64 92 | ConsultCache bool 93 | } 94 | 95 | // NodeIP defines a node/addr pair 96 | type NodeIP struct { 97 | Name string `json:"name"` 98 | Addr string `json:"addr"` 99 | ID string `json:"id"` 100 | } 101 | 102 | // NodeIPSet is a set of NodeIPs 103 | type NodeIPSet map[NodeIP]struct{} 104 | 105 | // Add node to set 106 | func (ns NodeIPSet) Add(name, addr, id string) { 107 | ns[NodeIP{Name: name, Addr: addr, ID: id}] = struct{}{} 108 | } 109 | 110 | // EqualNodeIPSet returns true when NodeIPSets contain the same elements 111 | func EqualNodeIPSet(l NodeIPSet, r NodeIPSet) bool { 112 | 113 | if l == nil && r == nil { 114 | return true 115 | } else if l == nil && r != nil { 116 | return false 117 | } else if l != nil && r == nil { 118 | return false 119 | } 120 | 121 | if l.Cardinality() != r.Cardinality() { 122 | return false 123 | } 124 | 125 | for ip := range l { 126 | if _, ok := (r)[ip]; !ok { 127 | return false 128 | } 129 | } 130 | return true 131 | } 132 | 133 | // Cardinality returns the size of set 134 | func (ns NodeIPSet) Cardinality() int { 135 | return len(ns) 136 | } 137 | 138 | // MarshalJSON creates JSON array from NodeIPSet 139 | func (ns NodeIPSet) MarshalJSON() ([]byte, error) { 140 | items := make([][]string, 0, ns.Cardinality()) 141 | 142 | for elem := range ns { 143 | items = append(items, []string{elem.Name, elem.Addr, elem.ID}) 144 | } 145 | return json.Marshal(items) 146 | } 147 | 148 | // UnmarshalJSON recreates NodeIPSet from a JSON array 149 | func (ns *NodeIPSet) UnmarshalJSON(b []byte) error { 150 | 151 | items := [][]string{} 152 | err := json.Unmarshal(b, &items) 153 | if err != nil { 154 | return err 155 | } 156 | 157 | for _, item := range items { 158 | nodeIP := NodeIP{Name: item[0], Addr: item[1]} 159 | if len(item) == 3 { 160 | nodeIP.ID = item[2] 161 | } 162 | (*ns)[nodeIP] = struct{}{} 163 | } 164 | 165 | return nil 166 | } 167 | -------------------------------------------------------------------------------- /service/types_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/suite" 8 | ) 9 | 10 | type TypesTestSuite struct { 11 | suite.Suite 12 | } 13 | 14 | func TestTypesUnitTestSuite(t *testing.T) { 15 | suite.Run(t, new(TypesTestSuite)) 16 | } 17 | 18 | func (s *TypesTestSuite) Test_EqualMapStringString_SameKeys_DifferentValue() { 19 | a := map[string]string{"k1": "v1", "k2": "v2"} 20 | b := map[string]string{"k1": "v2", "k2": "v2"} 21 | 22 | s.False(EqualMapStringString(a, b)) 23 | s.False(EqualMapStringString(b, a)) 24 | } 25 | 26 | func (s *TypesTestSuite) Test_EqualMapStringString_SameKeys_SameValue() { 27 | 28 | a := map[string]string{"k1": "v1", "k2": "v2"} 29 | b := map[string]string{"k1": "v1", "k2": "v2"} 30 | 31 | s.True(EqualMapStringString(a, b)) 32 | s.True(EqualMapStringString(b, a)) 33 | } 34 | 35 | func (s *TypesTestSuite) Test_EqualMapStringString_DifferentKeys() { 36 | 37 | a := map[string]string{"k1": "v1", "k2": "v2"} 38 | b := map[string]string{"k1": "v1", "k3": "v2"} 39 | 40 | s.False(EqualMapStringString(a, b)) 41 | s.False(EqualMapStringString(b, a)) 42 | } 43 | 44 | func (s *TypesTestSuite) Test_EqualMapStringString_DifferentNumberOfValues() { 45 | 46 | a := map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"} 47 | b := map[string]string{"k1": "v1", "k2": "v2"} 48 | 49 | s.False(EqualMapStringString(a, b)) 50 | s.False(EqualMapStringString(b, a)) 51 | } 52 | 53 | func (s *TypesTestSuite) Test_Cardinality_DifferentElms() { 54 | a := NodeIPSet{} 55 | a.Add("node-1", "1.0.0.1", "id1") 56 | a.Add("node-2", "1.0.1.1", "id2") 57 | s.Equal(2, a.Cardinality()) 58 | } 59 | 60 | func (s *TypesTestSuite) Test_Cardinality_RepeatElems() { 61 | a := NodeIPSet{} 62 | a.Add("node-1", "1.0.0.1", "id1") 63 | a.Add("node-1", "1.0.0.1", "id1") 64 | s.Equal(1, a.Cardinality()) 65 | } 66 | 67 | func (s *TypesTestSuite) Test_NodeIPSetEqual_RepeatElems() { 68 | a := NodeIPSet{} 69 | a.Add("node-1", "1.0.0.1", "id1") 70 | a.Add("node-1", "1.0.0.1", "id1") 71 | b := NodeIPSet{} 72 | b.Add("node-1", "1.0.0.1", "id1") 73 | s.True(EqualNodeIPSet(a, b)) 74 | } 75 | 76 | func (s *TypesTestSuite) Test_NodeIPSetEqual_LenUnequal() { 77 | a := NodeIPSet{} 78 | a.Add("node-1", "1.0.0.1", "id1") 79 | a.Add("node-2", "1.0.1.1", "id2") 80 | b := NodeIPSet{} 81 | b.Add("node-1", "1.0.0.1", "id1") 82 | b.Add("node-2", "1.0.1.1", "id2") 83 | b.Add("node-2", "1.0.1.2", "id2") 84 | s.False(EqualNodeIPSet(a, b)) 85 | } 86 | 87 | func (s *TypesTestSuite) Test_NodeIPSetEqual_EqualSets() { 88 | a := NodeIPSet{} 89 | a.Add("node-1", "1.0.0.1", "id1") 90 | a.Add("node-2", "1.0.1.1", "id2") 91 | b := NodeIPSet{} 92 | b.Add("node-1", "1.0.0.1", "id1") 93 | b.Add("node-2", "1.0.1.1", "id2") 94 | s.True(EqualNodeIPSet(a, b)) 95 | } 96 | 97 | func (s *TypesTestSuite) Test_NodeIPSetEqual_AddrNotEqual() { 98 | a := NodeIPSet{} 99 | a.Add("node-1", "1.0.0.1", "id1") 100 | a.Add("node-2", "1.0.1.1", "id2") 101 | b := NodeIPSet{} 102 | b.Add("node-1", "1.0.0.1", "id1") 103 | b.Add("node-2", "1.0.1.2", "id2") 104 | s.False(EqualNodeIPSet(a, b)) 105 | } 106 | 107 | func (s *TypesTestSuite) Test_NodeIPSetEqual_NodeNameNotEqual() { 108 | a := NodeIPSet{} 109 | a.Add("node-1", "1.0.0.1", "id1") 110 | a.Add("node-2", "1.0.1.1", "id2") 111 | b := NodeIPSet{} 112 | b.Add("node-1", "1.0.0.1", "id1") 113 | b.Add("node-1", "1.0.1.1", "id1") 114 | s.False(EqualNodeIPSet(a, b)) 115 | } 116 | 117 | func (s *TypesTestSuite) Test_NodeIPSetEqual_EmptySets() { 118 | a := NodeIPSet{} 119 | b := NodeIPSet{} 120 | s.True(EqualNodeIPSet(a, b)) 121 | } 122 | 123 | func (s *TypesTestSuite) Test_NodeIPSetEqual_OneEmpty() { 124 | a := NodeIPSet{} 125 | b := NodeIPSet{} 126 | b.Add("node-1", "1.0.0.1", "id1") 127 | b.Add("node-1", "1.0.1.1", "id1") 128 | s.False(EqualNodeIPSet(a, b)) 129 | } 130 | 131 | func (s *TypesTestSuite) Test_NodeIPMarshallJSON_EmptySet() { 132 | a := NodeIPSet{} 133 | b, err := json.Marshal(a) 134 | s.Require().NoError(err) 135 | 136 | s.Equal([]byte("[]"), b) 137 | } 138 | 139 | func (s *TypesTestSuite) Test_NodeIP_JSONCycle() { 140 | a := NodeIPSet{} 141 | a.Add("node-1", "1.0.0.1", "id1") 142 | a.Add("node-2", "1.0.1.1", "id2") 143 | by, err := json.Marshal(a) 144 | s.Require().NoError(err) 145 | 146 | i := NodeIPSet{} 147 | err = json.Unmarshal(by, &i) 148 | s.Require().NoError(err) 149 | 150 | s.True(EqualNodeIPSet(a, i)) 151 | } 152 | 153 | func (s *TypesTestSuite) Test_NodeIPSet_Add() { 154 | a := NodeIPSet{} 155 | a.Add("node-1", "1.0.0.1", "id1") 156 | a.Add("node-1", "1.0.1.1", "id1") 157 | b := NodeIPSet{} 158 | b.Add("node-1", "1.0.0.1", "id1") 159 | b.Add("node-1", "1.0.1.1", "id1") 160 | 161 | s.True(EqualNodeIPSet(a, b)) 162 | } 163 | 164 | func (s *TypesTestSuite) Test_NodeIP_WithTwoEntries() { 165 | nodeBytes := []byte("[[\"node-1\", \"1.0.0.1\"], [\"node-2\", \"1.0.1.1\"]]") 166 | ipSet := NodeIPSet{} 167 | err := json.Unmarshal(nodeBytes, &ipSet) 168 | s.Require().NoError(err) 169 | 170 | s.Contains(ipSet, NodeIP{Name: "node-1", Addr: "1.0.0.1"}) 171 | s.Contains(ipSet, NodeIP{Name: "node-2", Addr: "1.0.1.1"}) 172 | } 173 | -------------------------------------------------------------------------------- /stack.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | 5 | swarm-listener: 6 | image: dockerflow/docker-flow-swarm-listener:${TAG:-latest} 7 | networks: 8 | - proxy 9 | volumes: 10 | - /var/run/docker.sock:/var/run/docker.sock 11 | environment: 12 | - DF_NOTIFY_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure 13 | - DF_NOTIFY_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove 14 | deploy: 15 | labels: 16 | - com.df.notify=true 17 | - com.df.distribute=true 18 | - com.df.alertName=memlimit 19 | - com.df.alertIf=@service_mem_limit:0.8 20 | - com.df.alertFor=30s 21 | placement: 22 | constraints: [node.role == manager] 23 | resources: 24 | reservations: 25 | memory: 10M 26 | limits: 27 | memory: 20M 28 | 29 | docs: 30 | image: dockerflow/docker-flow-swarm-listener-docs:${TAG:-latest} 31 | networks: 32 | - proxy 33 | deploy: 34 | labels: 35 | - com.df.distribute=true 36 | - com.df.notify=true 37 | - com.df.reschedule=true 38 | - com.df.port=80 39 | - com.df.serviceDomain=swarmlistener.dockerflow.com,sl.dockerflow.com 40 | - com.df.servicePath=/ 41 | - com.df.alertName=memlimit 42 | - com.df.alertIf=@service_mem_limit:0.8 43 | - com.df.alertFor=30s 44 | replicas: 2 45 | resources: 46 | reservations: 47 | memory: 5M 48 | limits: 49 | memory: 10M 50 | 51 | networks: 52 | proxy: 53 | external: true 54 | --------------------------------------------------------------------------------