├── notify ├── endpoint.go ├── factory.go ├── endpoint_smtp.go ├── typedef.go ├── endpoint_api.go ├── events.go ├── notify.go └── template.html ├── api ├── listener_windows.go ├── request │ └── request.go ├── middleware │ └── logger.go ├── utils.go ├── listener_linux.go ├── response │ ├── response.go │ └── respcluster.go ├── router.go ├── server.go └── context.go ├── cluster ├── storage │ ├── entry │ │ └── entry.go │ ├── dao │ │ ├── json.go │ │ └── dao.go │ ├── storage.go │ └── node │ │ └── node.go ├── types │ ├── webhook.go │ ├── placement.go │ ├── responseerror.go │ ├── upgradecontainer.go │ ├── options.go │ ├── removedcontainer.go │ ├── operatedcontainer.go │ ├── createdcontainer.go │ ├── groupcontainer.go │ └── node.go ├── reduce.go ├── errors.go ├── enginespriority.go ├── weighted.go ├── enginesfilter.go ├── notify.go ├── hooks.go ├── utils.go ├── enginespool.go ├── constraint.go ├── client.go ├── container.go └── upgrade.go ├── README.md ├── vendor └── github.com │ └── docker │ └── libkv │ ├── script │ ├── travis_zk.sh │ ├── travis_etcd.sh │ ├── travis_consul.sh │ ├── coverage │ ├── validate-gofmt │ └── .validate │ ├── libkv_test.go │ ├── MAINTAINERS │ ├── libkv.go │ ├── store │ ├── helpers.go │ ├── zookeeper │ │ └── zookeeper_test.go │ ├── etcd │ │ └── etcd_test.go │ ├── consul │ │ └── consul_test.go │ ├── mock │ │ └── mock.go │ ├── boltdb │ │ └── boltdb_test.go │ └── store.go │ ├── .travis.yml │ ├── docs │ ├── examples.md │ └── compatibility.md │ ├── README.md │ └── LICENSE.code ├── .gitignore ├── main.go ├── .editorconfig ├── Dockerfile ├── etc ├── config.yaml ├── configuration.go └── lookupenv.go ├── ctrl ├── controller.go └── cluster.go ├── server └── service.go └── LICENSE /notify/endpoint.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | // IEndPoint is exported 4 | // sender endPoint interface 5 | type IEndPoint interface { 6 | DoEvent(event *Event, data interface{}) 7 | } 8 | -------------------------------------------------------------------------------- /api/listener_windows.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "net" 7 | ) 8 | 9 | func newUnixListener(addr string, tlsConfig *tls.Config) (net.Listener, error) { 10 | 11 | return nil, fmt.Errorf("Windows platform does not support a unix socket") 12 | } 13 | -------------------------------------------------------------------------------- /cluster/storage/entry/entry.go: -------------------------------------------------------------------------------- 1 | package entry 2 | 3 | import "github.com/humpback/humpback-center/cluster/types" 4 | 5 | //Node is exported 6 | type Node struct { 7 | *types.NodeData 8 | NodeLabels map[string]string `json:"nodelabels"` 9 | Availability string `json:"availability"` 10 | } 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## humpback-center 2 | 3 | Humpback Center 主要为 [Humpback](https://github.com/humpback/humpback) 平台提供集群容器调度服务,以集群中心角色实现各个 Group 的容器分配管理。 4 | 5 | ### API Usage 6 | 7 | [API 文档](https://github.com/humpback/humpback-center/wiki/api-usage) 8 | 9 | ### 模式架构 10 | 11 | ![架构图](https://humpback.github.io/humpback/_media/cluster-mode.png) 12 | 13 | ## License 14 | 15 | Apache-2.0 16 | 17 | 18 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/travis_zk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -gt 0 ] ; then 4 | ZK_VERSION="$1" 5 | else 6 | ZK_VERSION="3.4.7" 7 | fi 8 | 9 | wget "http://apache.cs.utah.edu/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz" 10 | tar -xvf "zookeeper-${ZK_VERSION}.tar.gz" 11 | mv zookeeper-$ZK_VERSION zk 12 | mv ./zk/conf/zoo_sample.cfg ./zk/conf/zoo.cfg 13 | -------------------------------------------------------------------------------- /api/request/request.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | const ( 8 | RequestSuccessed int = 0 9 | RequestInvalid int = -1001 10 | RequestFailure int = -1002 11 | ) 12 | 13 | var ( 14 | ErrRequestSuccessed = errors.New("request successed") 15 | ErrRequestInvalid = errors.New("request resolve error") 16 | ErrRequestFailure = errors.New("request failure error") 17 | ) 18 | -------------------------------------------------------------------------------- /cluster/storage/dao/json.go: -------------------------------------------------------------------------------- 1 | package dao 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | // MarshalObject encodes an object to binary format 8 | func MarshalObject(object interface{}) ([]byte, error) { 9 | return json.Marshal(object) 10 | } 11 | 12 | // UnmarshalObject decodes an object from binary data 13 | func UnmarshalObject(data []byte, object interface{}) error { 14 | return json.Unmarshal(data, object) 15 | } 16 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/travis_etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -gt 0 ] ; then 4 | ETCD_VERSION="$1" 5 | else 6 | ETCD_VERSION="2.2.0" 7 | fi 8 | 9 | curl -L https://github.com/coreos/etcd/releases/download/v$ETCD_VERSION/etcd-v$ETCD_VERSION-linux-amd64.tar.gz -o etcd-v$ETCD_VERSION-linux-amd64.tar.gz 10 | tar xzvf etcd-v$ETCD_VERSION-linux-amd64.tar.gz 11 | mv etcd-v$ETCD_VERSION-linux-amd64 etcd 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | *.exe 22 | *.test 23 | *.prof 24 | *.log 25 | humpback-center 26 | humpback-center.pid 27 | -------------------------------------------------------------------------------- /api/middleware/logger.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import "github.com/humpback/gounits/logger" 4 | 5 | import ( 6 | "net/http" 7 | "time" 8 | ) 9 | 10 | func Logger(inner http.Handler) http.Handler { 11 | 12 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 13 | t := time.Now() 14 | inner.ServeHTTP(w, r) 15 | logger.INFO("[#api#] HTTP %s\t%s\t%s", r.Method, r.RequestURI, time.Since(t)) 16 | }) 17 | } 18 | -------------------------------------------------------------------------------- /api/utils.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "net/http" 4 | 5 | func httpError(w http.ResponseWriter, err string, code int) { 6 | http.Error(w, err, code) 7 | } 8 | 9 | func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { 10 | w.Header().Add("Access-Control-Allow-Origin", "*") 11 | w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") 12 | w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS, HEAD") 13 | } 14 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/travis_consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -gt 0 ] ; then 4 | CONSUL_VERSION="$1" 5 | else 6 | CONSUL_VERSION="0.5.2" 7 | fi 8 | 9 | # install consul 10 | wget "https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip" 11 | unzip "consul_${CONSUL_VERSION}_linux_amd64.zip" 12 | 13 | # make config for minimum ttl 14 | touch config.json 15 | echo "{\"session_ttl_min\": \"1s\"}" >> config.json 16 | 17 | # check 18 | ./consul --version 19 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/libkv_test.go: -------------------------------------------------------------------------------- 1 | package libkv 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv/store" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestNewStoreUnsupported(t *testing.T) { 12 | client := "localhost:9999" 13 | 14 | kv, err := NewStore( 15 | "unsupported", 16 | []string{client}, 17 | &store.Config{ 18 | ConnectionTimeout: 10 * time.Second, 19 | }, 20 | ) 21 | assert.Error(t, err) 22 | assert.Nil(t, kv) 23 | assert.Equal(t, "Backend storage not supported yet, please choose one of ", err.Error()) 24 | } 25 | -------------------------------------------------------------------------------- /api/listener_linux.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "crypto/tls" 5 | "net" 6 | "os" 7 | "syscall" 8 | ) 9 | 10 | func newUnixListener(addr string, tlsConfig *tls.Config) (net.Listener, error) { 11 | 12 | if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { 13 | return nil, err 14 | } 15 | 16 | mask := syscall.Umask(0777) 17 | defer syscall.Umask(mask) 18 | l, err := newListener("unix", addr, tlsConfig) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | if err := os.Chmod(addr, 0600); err != nil { 24 | return nil, err 25 | } 26 | return l, nil 27 | } 28 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/humpback/gounits/system" 4 | import "github.com/humpback/humpback-center/server" 5 | 6 | import ( 7 | "log" 8 | "os" 9 | ) 10 | 11 | func main() { 12 | 13 | service, err := server.NewCenterService() 14 | if err != nil { 15 | log.Printf("service error:%s\n", err.Error()) 16 | os.Exit(system.ErrorExitCode(err)) 17 | } 18 | 19 | defer func() { 20 | service.Stop() 21 | os.Exit(0) 22 | }() 23 | 24 | if err := service.Startup(); err != nil { 25 | log.Printf("service start error:%s\n", err.Error()) 26 | os.Exit(system.ErrorExitCode(err)) 27 | } 28 | system.InitSignal(nil) 29 | } 30 | -------------------------------------------------------------------------------- /cluster/types/webhook.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // WebHook is exported 4 | type WebHook struct { 5 | URL string `json:"Url"` 6 | SecretToken string `json:"SecretToken"` 7 | } 8 | 9 | // WebHooks is exported 10 | type WebHooks []WebHook 11 | 12 | // BindConfig is exported 13 | type BindConfig struct { 14 | Category string `json:"Category"` 15 | OwnerToken string `json:"OwnerToken"` 16 | Sandbox bool `json:"Sandbox"` 17 | Location string `json:"Location"` 18 | Protocol string `json:"Protocol"` 19 | Port int `json:"Port"` 20 | Health string `json:"Health"` 21 | APIIDs []string `json:"APIIDs"` 22 | } 23 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/coverage: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODE="mode: count" 4 | ROOT=${TRAVIS_BUILD_DIR:-.}/../../.. 5 | 6 | # Grab the list of packages. 7 | # Exclude the API and CLI from coverage as it will be covered by integration tests. 8 | PACKAGES=`go list ./...` 9 | 10 | # Create the empty coverage file. 11 | echo $MODE > goverage.report 12 | 13 | # Run coverage on every package. 14 | for package in $PACKAGES; do 15 | output="$ROOT/$package/coverage.out" 16 | 17 | go test -test.short -covermode=count -coverprofile=$output $package 18 | if [ -f "$output" ] ; then 19 | cat "$output" | grep -v "$MODE" >> goverage.report 20 | fi 21 | done 22 | -------------------------------------------------------------------------------- /cluster/types/placement.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Spread is exported 4 | type Spread struct { 5 | SpreadDescriptor string `json:"SpreadDescriptor"` 6 | } 7 | 8 | // Preference is exported 9 | type Preference struct { 10 | Spread `json:"Spread"` 11 | } 12 | 13 | // Platform is exported 14 | type Platform struct { 15 | Architecture string `json:"Architecture"` 16 | OS string `json:"OS"` 17 | } 18 | 19 | // Placement is exported 20 | // Cluster services placement constraints 21 | type Placement struct { 22 | Constraints []string `json:"Constraints"` 23 | Preferences []Preference `json:"Preferences"` 24 | Platforms []Platform `json:"Platforms"` 25 | } 26 | -------------------------------------------------------------------------------- /notify/factory.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | //INotifyEndPointFactory is exported 4 | type INotifyEndPointFactory interface { 5 | CreateAPIEndPoint(endpoint EndPoint) IEndPoint 6 | CreateSMTPEndPoint(endpoint EndPoint) IEndPoint 7 | } 8 | 9 | //NotifyEndPointFactory is exported 10 | type NotifyEndPointFactory struct { 11 | INotifyEndPointFactory 12 | } 13 | 14 | //CreateAPIEndPoint is exported 15 | func (factory *NotifyEndPointFactory) CreateAPIEndPoint(endpoint EndPoint) IEndPoint { 16 | return NewAPIEndPoint(endpoint) 17 | } 18 | 19 | //CreateSMTPEndPoint is exported 20 | func (factory *NotifyEndPointFactory) CreateSMTPEndPoint(endpoint EndPoint) IEndPoint { 21 | return NewSMTPEndpoint(endpoint) 22 | } 23 | -------------------------------------------------------------------------------- /cluster/types/responseerror.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/gounits/httpx" 4 | 5 | import ( 6 | "fmt" 7 | ) 8 | 9 | /* 10 | Humpback api response exception struct 11 | */ 12 | 13 | // ResponseError is exported 14 | type ResponseError struct { 15 | Code int `json:"Code"` 16 | Detail string `json:"Detail"` 17 | Message string `json:"Message"` 18 | } 19 | 20 | // ParseHTTPResponseError is exported 21 | func ParseHTTPResponseError(response *httpx.HttpResponse) string { 22 | 23 | responseError := &ResponseError{} 24 | if err := response.JSON(responseError); err != nil { 25 | return fmt.Sprintf("engine client error, httpcode: %d", response.StatusCode()) 26 | } 27 | return responseError.Detail 28 | } 29 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig coding styles definitions. For more information about the 2 | # properties used in this file, please see the EditorConfig documentation: 3 | # http://editorconfig.org/ 4 | 5 | # indicate this is the root of the project 6 | root = true 7 | 8 | [*] 9 | charset = utf-8 10 | 11 | end_of_line = CRLF 12 | insert_final_newline = true 13 | trim_trailing_whitespace = true 14 | 15 | indent_style = space 16 | indent_size = 4 17 | 18 | [Makefile] 19 | indent_style = tab 20 | 21 | [*.md] 22 | trim_trailing_whitespace = false 23 | 24 | [*.go] 25 | indent_style = tab 26 | 27 | [*.json] 28 | charset = utf-8 29 | 30 | end_of_line = CRLF 31 | insert_final_newline = true 32 | trim_trailing_whitespace = true 33 | 34 | indent_style = space 35 | indent_size = 2 36 | 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM frolvlad/alpine-glibc:alpine-3.7 2 | 3 | MAINTAINER bobliu bobliu0909@gmail.com 4 | 5 | RUN apk add --no-cache bash 6 | 7 | RUN mkdir -p /opt/humpback-center/cache 8 | 9 | RUN mkdir -p /opt/humpback-center/etc 10 | 11 | RUN mkdir -p /opt/humpback-center/notify 12 | 13 | COPY etc/config.yaml /opt/humpback-center/etc/config.yaml 14 | 15 | COPY notify/template.html /opt/humpback-center/notify/template.html 16 | 17 | COPY humpback-center /opt/humpback-center/humpback-center 18 | 19 | COPY dumb-init /dumb-init 20 | 21 | ENTRYPOINT ["/dumb-init", "--"] 22 | 23 | WORKDIR /opt/humpback-center 24 | 25 | VOLUME ["/opt/humpback-center/etc"] 26 | 27 | VOLUME ["/opt/humpback-center/cache"] 28 | 29 | VOLUME ["/opt/humpback-center/logs"] 30 | 31 | CMD ["./humpback-center"] 32 | 33 | EXPOSE 8589 34 | -------------------------------------------------------------------------------- /cluster/types/upgradecontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/common/models" 4 | 5 | // UpgradeContainerResponse is exported 6 | type UpgradeContainerResponse struct { 7 | ID string `json:"Id"` 8 | } 9 | 10 | // UpgradeContainer is exported 11 | type UpgradeContainer struct { 12 | IP string `json:"IP"` 13 | HostName string `json:"HostName"` 14 | models.Container 15 | } 16 | 17 | // UpgradeContainers is exported 18 | type UpgradeContainers []*UpgradeContainer 19 | 20 | // SetUpgradePair is exported 21 | func (upgrade UpgradeContainers) SetUpgradePair(ip string, hostname string, container models.Container) UpgradeContainers { 22 | 23 | upgradeContainer := &UpgradeContainer{ 24 | IP: ip, 25 | HostName: hostname, 26 | Container: container, 27 | } 28 | upgrade = append(upgrade, upgradeContainer) 29 | return upgrade 30 | } 31 | -------------------------------------------------------------------------------- /api/response/response.go: -------------------------------------------------------------------------------- 1 | package response 2 | 3 | type Response interface { 4 | SetError(code int, err error, content string) 5 | SetResponse(data interface{}) 6 | } 7 | 8 | /* 9 | 消息返回响应结构体 10 | Code: 响应码, == 0 成功, < 0 失败 11 | Error: 失败名称 12 | Content: 成功/失败描述 13 | ResponseID: 14 | Data: 响应数据 15 | */ 16 | type ResponseResult struct { 17 | Response `json:"-,omitempty"` 18 | Code int `json:"Code"` 19 | Error string `json:"Error"` 20 | Content string `json:"Contnet"` 21 | ResponseID string `json:"ResponseID"` 22 | Data interface{} `json:"Data,omitpty"` 23 | } 24 | 25 | func (r *ResponseResult) SetError(code int, err error, content string) { 26 | 27 | r.Code = code 28 | r.Error = err.Error() 29 | r.Content = content 30 | } 31 | 32 | func (r *ResponseResult) SetResponse(data interface{}) { 33 | 34 | r.Data = data 35 | } 36 | -------------------------------------------------------------------------------- /cluster/types/options.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | //CreateOption is exported 4 | //cluster create containers option values. 5 | //`IsReCreate` indicates whether to re-create an existing containers each time it is built. 6 | //`ForceRemove` is an attached property. When `IsReCreate` is true, it means to force delete or directly upgrade an existing containers. 7 | //`IsRemoveDelay` delay (8 minutes) remove unused containers for service debounce. 8 | //`IsRecovery` service containers recovery check enable. 9 | type CreateOption struct { 10 | IsReCreate bool `json:"IsReCreate"` 11 | ForceRemove bool `json:"ForceRemove"` 12 | IsRemoveDelay bool `json:"IsRemoveDelay"` 13 | IsRecovery bool `json:"IsRecovery"` 14 | } 15 | 16 | //UpdateOption is exported 17 | type UpdateOption struct { 18 | IsRemoveDelay bool `json:"IsRemoveDelay"` 19 | IsRecovery bool `json:"IsRecovery"` 20 | } 21 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/validate-gofmt: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "$(dirname "$BASH_SOURCE")/.validate" 4 | 5 | IFS=$'\n' 6 | files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^Godeps/' || true) ) 7 | unset IFS 8 | 9 | badFiles=() 10 | for f in "${files[@]}"; do 11 | # we use "git show" here to validate that what's committed is formatted 12 | if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then 13 | badFiles+=( "$f" ) 14 | fi 15 | done 16 | 17 | if [ ${#badFiles[@]} -eq 0 ]; then 18 | echo 'Congratulations! All Go source files are properly formatted.' 19 | else 20 | { 21 | echo "These files are not properly gofmt'd:" 22 | for f in "${badFiles[@]}"; do 23 | echo " - $f" 24 | done 25 | echo 26 | echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' 27 | echo 28 | } >&2 29 | false 30 | fi 31 | -------------------------------------------------------------------------------- /cluster/types/removedcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // RemovedContainer is exported 4 | type RemovedContainer struct { 5 | IP string `json:"IP"` 6 | HostName string `json:"HostName"` 7 | ContainerID string `json:"ContainerId"` 8 | Result string `json:"Result"` 9 | } 10 | 11 | // RemovedContainers is exported 12 | type RemovedContainers []*RemovedContainer 13 | 14 | // SetRemovedPair is exported 15 | func (removed RemovedContainers) SetRemovedPair(ip string, hostname string, containerid string, err error) RemovedContainers { 16 | 17 | result := "remove successed." 18 | if err != nil { 19 | result = "remove failure, " + err.Error() 20 | } 21 | 22 | removedContainer := &RemovedContainer{ 23 | IP: ip, 24 | HostName: hostname, 25 | ContainerID: containerid, 26 | Result: result, 27 | } 28 | removed = append(removed, removedContainer) 29 | return removed 30 | } 31 | -------------------------------------------------------------------------------- /cluster/types/operatedcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // OperatedContainer is exported 4 | type OperatedContainer struct { 5 | IP string `json:"IP"` 6 | HostName string `json:"HostName"` 7 | ContainerID string `json:"ContainerId"` 8 | Result string `json:"Result"` 9 | } 10 | 11 | // OperatedContainers is exported 12 | type OperatedContainers []*OperatedContainer 13 | 14 | // SetOperatedPair is exported 15 | func (operated OperatedContainers) SetOperatedPair(ip string, hostname string, containerid string, action string, err error) OperatedContainers { 16 | 17 | result := action + " successed." 18 | if err != nil { 19 | result = action + " failure, " + err.Error() 20 | } 21 | 22 | operatedContainer := &OperatedContainer{ 23 | IP: ip, 24 | HostName: hostname, 25 | ContainerID: containerid, 26 | Result: result, 27 | } 28 | operated = append(operated, operatedContainer) 29 | return operated 30 | } 31 | -------------------------------------------------------------------------------- /cluster/types/createdcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/common/models" 4 | 5 | // CreateContainerResponse is exported 6 | type CreateContainerResponse struct { 7 | ID string `json:"Id"` 8 | Name string `json:"Name"` 9 | Warnings []string `json:"Warnings"` 10 | } 11 | 12 | // CreatedContainer is exported 13 | type CreatedContainer struct { 14 | IP string `json:"IP"` 15 | HostName string `json:"HostName"` 16 | models.Container 17 | } 18 | 19 | // CreatedContainers is exported 20 | type CreatedContainers []*CreatedContainer 21 | 22 | // SetCreatedPair is exported 23 | func (created CreatedContainers) SetCreatedPair(ip string, hostname string, container models.Container) CreatedContainers { 24 | 25 | createdContainer := &CreatedContainer{ 26 | IP: ip, 27 | HostName: hostname, 28 | Container: container, 29 | } 30 | created = append(created, createdContainer) 31 | return created 32 | } 33 | -------------------------------------------------------------------------------- /notify/endpoint_smtp.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "gopkg.in/gomail.v1" 5 | 6 | // SMTPEndPoint is exported 7 | type SMTPEndPoint struct { 8 | IEndPoint 9 | EndPoint 10 | mailer *gomail.Mailer 11 | } 12 | 13 | // NewSMTPEndpoint is exported 14 | func NewSMTPEndpoint(endpoint EndPoint) IEndPoint { 15 | 16 | mailer := gomail.NewMailer(endpoint.Host, endpoint.User, endpoint.Password, endpoint.Port) 17 | return &SMTPEndPoint{ 18 | EndPoint: endpoint, 19 | mailer: mailer, 20 | } 21 | } 22 | 23 | // DoEvent is exported 24 | func (endpoint *SMTPEndPoint) DoEvent(event *Event, data interface{}) { 25 | 26 | if !endpoint.Enabled { 27 | return 28 | } 29 | 30 | msg := gomail.NewMessage() 31 | msg.SetHeader("From", endpoint.Sender) 32 | msg.SetHeader("To", event.ContactInfo) 33 | msg.SetHeader("Subject", event.makeSubjectText()) 34 | msg.SetBody("text/html", data.(string)) 35 | if err := endpoint.mailer.Send(msg); err != nil { 36 | logger.ERROR("[#notify#] smtp endpoint post error: %s", err.Error()) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /cluster/types/groupcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/common/models" 4 | 5 | // EngineContainer is exported 6 | type EngineContainer struct { 7 | IP string `json:"IP"` 8 | HostName string `json:"HostName"` 9 | Container models.Container `json:"Container"` 10 | } 11 | 12 | // GroupContainer is exported 13 | type GroupContainer struct { 14 | GroupID string `json:"GroupId"` 15 | MetaID string `json:"MetaId"` 16 | IsRemoveDelay bool `json:"IsRemoveDelay"` 17 | IsRecovery bool `json:"IsRecovery"` 18 | Instances int `json:"Instances"` 19 | Placement Placement `json:"Placement"` 20 | WebHooks WebHooks `json:"WebHooks"` 21 | Config models.Container `json:"Config"` 22 | Containers []*EngineContainer `json:"Containers"` 23 | CreateAt int64 `json:"CreateAt"` 24 | LastUpdateAt int64 `json:"LastUpdateAt"` 25 | } 26 | 27 | // GroupContainers is exported 28 | type GroupContainers []*GroupContainer 29 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/.validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$VALIDATE_UPSTREAM" ]; then 4 | # this is kind of an expensive check, so let's not do this twice if we 5 | # are running more than one validate bundlescript 6 | 7 | VALIDATE_REPO='https://github.com/docker/libkv.git' 8 | VALIDATE_BRANCH='master' 9 | 10 | if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then 11 | VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" 12 | VALIDATE_BRANCH="${TRAVIS_BRANCH}" 13 | fi 14 | 15 | VALIDATE_HEAD="$(git rev-parse --verify HEAD)" 16 | 17 | git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" 18 | VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" 19 | 20 | VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" 21 | VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" 22 | 23 | validate_diff() { 24 | if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then 25 | git diff "$VALIDATE_COMMIT_DIFF" "$@" 26 | fi 27 | } 28 | validate_log() { 29 | if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then 30 | git log "$VALIDATE_COMMIT_LOG" "$@" 31 | fi 32 | } 33 | fi 34 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/MAINTAINERS: -------------------------------------------------------------------------------- 1 | # Libkv maintainers file 2 | # 3 | # This file describes who runs the docker/libkv project and how. 4 | # This is a living document - if you see something out of date or missing, speak up! 5 | # 6 | # It is structured to be consumable by both humans and programs. 7 | # To extract its contents programmatically, use any TOML-compliant parser. 8 | # 9 | # This file is compiled into the MAINTAINERS file in docker/opensource. 10 | # 11 | [Org] 12 | [Org."Core maintainers"] 13 | people = [ 14 | "aluzzardi", 15 | "sanimej", 16 | "vieux", 17 | ] 18 | 19 | [people] 20 | 21 | # A reference list of all people associated with the project. 22 | # All other sections should refer to people by their canonical key 23 | # in the people section. 24 | 25 | # ADD YOURSELF HERE IN ALPHABETICAL ORDER 26 | 27 | [people.aluzzardi] 28 | Name = "Andrea Luzzardi" 29 | Email = "al@docker.com" 30 | GitHub = "aluzzardi" 31 | 32 | [people.sanimej] 33 | Name = "Santhosh Manohar" 34 | Email = "santhosh@docker.com" 35 | GitHub = "sanimej" 36 | 37 | [people.vieux] 38 | Name = "Victor Vieux" 39 | Email = "vieux@docker.com" 40 | GitHub = "vieux" 41 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/libkv.go: -------------------------------------------------------------------------------- 1 | package libkv 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | 8 | "github.com/docker/libkv/store" 9 | ) 10 | 11 | // Initialize creates a new Store object, initializing the client 12 | type Initialize func(addrs []string, options *store.Config) (store.Store, error) 13 | 14 | var ( 15 | // Backend initializers 16 | initializers = make(map[store.Backend]Initialize) 17 | 18 | supportedBackend = func() string { 19 | keys := make([]string, 0, len(initializers)) 20 | for k := range initializers { 21 | keys = append(keys, string(k)) 22 | } 23 | sort.Strings(keys) 24 | return strings.Join(keys, ", ") 25 | }() 26 | ) 27 | 28 | // NewStore creates an instance of store 29 | func NewStore(backend store.Backend, addrs []string, options *store.Config) (store.Store, error) { 30 | if init, exists := initializers[backend]; exists { 31 | return init(addrs, options) 32 | } 33 | 34 | return nil, fmt.Errorf("%s %s", store.ErrBackendNotSupported.Error(), supportedBackend) 35 | } 36 | 37 | // AddStore adds a new store backend to libkv 38 | func AddStore(store store.Backend, init Initialize) { 39 | initializers[store] = init 40 | } 41 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/helpers.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // CreateEndpoints creates a list of endpoints given the right scheme 8 | func CreateEndpoints(addrs []string, scheme string) (entries []string) { 9 | for _, addr := range addrs { 10 | entries = append(entries, scheme+"://"+addr) 11 | } 12 | return entries 13 | } 14 | 15 | // Normalize the key for each store to the form: 16 | // 17 | // /path/to/key 18 | // 19 | func Normalize(key string) string { 20 | return "/" + join(SplitKey(key)) 21 | } 22 | 23 | // GetDirectory gets the full directory part of 24 | // the key to the form: 25 | // 26 | // /path/to/ 27 | // 28 | func GetDirectory(key string) string { 29 | parts := SplitKey(key) 30 | parts = parts[:len(parts)-1] 31 | return "/" + join(parts) 32 | } 33 | 34 | // SplitKey splits the key to extract path informations 35 | func SplitKey(key string) (path []string) { 36 | if strings.Contains(key, "/") { 37 | path = strings.Split(key, "/") 38 | } else { 39 | path = []string{key} 40 | } 41 | return path 42 | } 43 | 44 | // join the path parts with '/' 45 | func join(parts []string) string { 46 | return strings.Join(parts, "/") 47 | } 48 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.7.1 5 | 6 | # let us have speedy Docker-based Travis workers 7 | sudo: false 8 | 9 | before_install: 10 | # Symlink below is needed for Travis CI to work correctly on personal forks of libkv 11 | - ln -s $HOME/gopath/src/github.com/${TRAVIS_REPO_SLUG///libkv/} $HOME/gopath/src/github.com/docker 12 | - go get golang.org/x/tools/cmd/cover 13 | - go get github.com/mattn/goveralls 14 | - go get github.com/golang/lint/golint 15 | - go get github.com/GeertJohan/fgt 16 | 17 | before_script: 18 | - script/travis_consul.sh 0.6.3 19 | - script/travis_etcd.sh 3.0.0 20 | - script/travis_zk.sh 3.5.1-alpha 21 | 22 | script: 23 | - ./consul agent -server -bootstrap -advertise=127.0.0.1 -data-dir /tmp/consul -config-file=./config.json 1>/dev/null & 24 | - ./etcd/etcd --listen-client-urls 'http://0.0.0.0:4001' --advertise-client-urls 'http://127.0.0.1:4001' >/dev/null 2>&1 & 25 | - ./zk/bin/zkServer.sh start ./zk/conf/zoo.cfg 1> /dev/null 26 | - script/validate-gofmt 27 | - go vet ./... 28 | - fgt golint ./... 29 | - go test -v -race ./... 30 | - script/coverage 31 | - goveralls -service=travis-ci -coverprofile=goverage.report 32 | -------------------------------------------------------------------------------- /etc/config.yaml: -------------------------------------------------------------------------------- 1 | version: 1.3.7 2 | pidfile: ./humpback-center.pid 3 | retrystartup: true 4 | siteapi: http://192.168.2.80:8012/api 5 | cluster: 6 | opts: [ 7 | #"location=dev", 8 | "datapath=./data", 9 | "cacheroot=./cache", 10 | "overcommit=0.08", 11 | "recoveryinterval=320s", 12 | "createretry=2", 13 | "migratedelay=145s", 14 | "removedelay=500s" 15 | ] 16 | discovery: 17 | uris: etcd://192.168.2.80:2379 18 | cluster: humpback/center 19 | heartbeat: 8s 20 | api: 21 | hosts: [":8589"] 22 | enablecors: true 23 | notifications: 24 | endpoints: 25 | #- name: api 26 | # url: http://192.168.139.1:8009/framework/v1/mail 27 | # headers: 28 | # x-cluster-notify: ["endo"] 29 | # content-type: ["application/json; charset=utf-8"] 30 | # sender: humpback@newegg.com 31 | # enabled: true 32 | #- name: smtp 33 | # host: smtp.example.com 34 | # port: 25 35 | # user: u1 36 | # password: 123456 37 | # sender: humpback@newegg.com 38 | # enabled: true 39 | logger: 40 | logfile: ./logs/humpback-center.log 41 | loglevel: info 42 | logsize: 20971520 43 | ... 44 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/zookeeper/zookeeper_test.go: -------------------------------------------------------------------------------- 1 | package zookeeper 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv" 8 | "github.com/docker/libkv/store" 9 | "github.com/docker/libkv/testutils" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | client = "localhost:2181" 15 | ) 16 | 17 | func makeZkClient(t *testing.T) store.Store { 18 | kv, err := New( 19 | []string{client}, 20 | &store.Config{ 21 | ConnectionTimeout: 3 * time.Second, 22 | }, 23 | ) 24 | 25 | if err != nil { 26 | t.Fatalf("cannot create store: %v", err) 27 | } 28 | 29 | return kv 30 | } 31 | 32 | func TestRegister(t *testing.T) { 33 | Register() 34 | 35 | kv, err := libkv.NewStore(store.ZK, []string{client}, nil) 36 | assert.NoError(t, err) 37 | assert.NotNil(t, kv) 38 | 39 | if _, ok := kv.(*Zookeeper); !ok { 40 | t.Fatal("Error registering and initializing zookeeper") 41 | } 42 | } 43 | 44 | func TestZkStore(t *testing.T) { 45 | kv := makeZkClient(t) 46 | ttlKV := makeZkClient(t) 47 | 48 | defer testutils.RunCleanup(t, kv) 49 | 50 | testutils.RunTestCommon(t, kv) 51 | testutils.RunTestAtomic(t, kv) 52 | testutils.RunTestWatch(t, kv) 53 | testutils.RunTestLock(t, kv) 54 | testutils.RunTestTTL(t, kv, ttlKV) 55 | } 56 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/etcd/etcd_test.go: -------------------------------------------------------------------------------- 1 | package etcd 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv" 8 | "github.com/docker/libkv/store" 9 | "github.com/docker/libkv/testutils" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | client = "localhost:4001" 15 | ) 16 | 17 | func makeEtcdClient(t *testing.T) store.Store { 18 | kv, err := New( 19 | []string{client}, 20 | &store.Config{ 21 | ConnectionTimeout: 3 * time.Second, 22 | Username: "test", 23 | Password: "very-secure", 24 | }, 25 | ) 26 | 27 | if err != nil { 28 | t.Fatalf("cannot create store: %v", err) 29 | } 30 | 31 | return kv 32 | } 33 | 34 | func TestRegister(t *testing.T) { 35 | Register() 36 | 37 | kv, err := libkv.NewStore(store.ETCD, []string{client}, nil) 38 | assert.NoError(t, err) 39 | assert.NotNil(t, kv) 40 | 41 | if _, ok := kv.(*Etcd); !ok { 42 | t.Fatal("Error registering and initializing etcd") 43 | } 44 | } 45 | 46 | func TestEtcdStore(t *testing.T) { 47 | kv := makeEtcdClient(t) 48 | lockKV := makeEtcdClient(t) 49 | ttlKV := makeEtcdClient(t) 50 | 51 | defer testutils.RunCleanup(t, kv) 52 | 53 | testutils.RunTestCommon(t, kv) 54 | testutils.RunTestAtomic(t, kv) 55 | testutils.RunTestWatch(t, kv) 56 | testutils.RunTestLock(t, kv) 57 | testutils.RunTestLockTTL(t, kv, lockKV) 58 | testutils.RunTestLockWait(t, kv, lockKV) 59 | testutils.RunTestTTL(t, kv, ttlKV) 60 | } 61 | -------------------------------------------------------------------------------- /notify/typedef.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "net/http" 4 | 5 | //EndPoint is exported 6 | type EndPoint struct { 7 | Name string `yaml:"name"` 8 | URL string `yaml:"url"` 9 | Enabled bool `yaml:"enabled"` 10 | Sender string `yaml:"sender"` 11 | Headers http.Header `yaml:"headers"` 12 | Host string `yaml:"host"` 13 | Port int `yaml:"port"` 14 | User string `yaml:"user"` 15 | Password string `yaml:"password"` 16 | } 17 | 18 | //Notifications is exported 19 | type Notifications struct { 20 | EndPoints []EndPoint `yaml:"endpoints,omitempty"` 21 | } 22 | 23 | //Engine is exported 24 | type Engine struct { 25 | IP string 26 | Name string 27 | State string 28 | } 29 | 30 | //WatchGroup is exported 31 | type WatchGroup struct { 32 | GroupID string 33 | GroupName string 34 | Location string 35 | ContactInfo string 36 | Engines []*Engine 37 | } 38 | 39 | //WatchGroups is exported 40 | type WatchGroups map[string]*WatchGroup 41 | 42 | //Container is exported 43 | type Container struct { 44 | ID string 45 | Name string 46 | Server string 47 | State string 48 | } 49 | 50 | //GroupMeta is exported 51 | type GroupMeta struct { 52 | MetaID string 53 | MetaName string 54 | Location string 55 | GroupID string 56 | GroupName string 57 | Instances int 58 | Image string 59 | ContactInfo string 60 | Engines []*Engine 61 | Containers []*Container 62 | } 63 | -------------------------------------------------------------------------------- /cluster/reduce.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | // ReduceEngine is exported 4 | type ReduceEngine struct { 5 | metaid string 6 | engine *Engine 7 | container *Container 8 | } 9 | 10 | // Containers is exported 11 | // Return engine's containers of metaid 12 | func (reduce *ReduceEngine) Containers() Containers { 13 | 14 | if reduce.engine != nil { 15 | return reduce.engine.Containers(reduce.metaid) 16 | } 17 | return Containers{} 18 | } 19 | 20 | // ReduceContainer is exported 21 | func (reduce *ReduceEngine) ReduceContainer() *Container { 22 | 23 | return reduce.container 24 | } 25 | 26 | // Engine is exported 27 | func (reduce *ReduceEngine) Engine() *Engine { 28 | 29 | return reduce.engine 30 | } 31 | 32 | type reduceEngines []*ReduceEngine 33 | 34 | func (engines reduceEngines) Len() int { 35 | 36 | return len(engines) 37 | } 38 | 39 | func (engines reduceEngines) Swap(i, j int) { 40 | 41 | engines[i], engines[j] = engines[j], engines[i] 42 | } 43 | 44 | func (engines reduceEngines) Less(i, j int) bool { 45 | 46 | return len(engines[i].Containers()) > len(engines[j].Containers()) 47 | } 48 | 49 | func selectReduceEngines(metaid string, engines []*Engine) reduceEngines { 50 | 51 | out := reduceEngines{} 52 | for _, engine := range engines { 53 | if engine.IsHealthy() { 54 | containers := engine.Containers(metaid) 55 | if len(containers) > 0 { 56 | out = append(out, &ReduceEngine{ 57 | engine: engine, 58 | metaid: metaid, 59 | container: containers[0], 60 | }) 61 | } 62 | } 63 | } 64 | return out 65 | } 66 | -------------------------------------------------------------------------------- /ctrl/controller.go: -------------------------------------------------------------------------------- 1 | package ctrl 2 | 3 | import "github.com/humpback/gounits/httpx" 4 | import "github.com/humpback/gounits/logger" 5 | import "github.com/humpback/humpback-center/cluster" 6 | import "github.com/humpback/humpback-center/etc" 7 | 8 | import ( 9 | "net" 10 | "net/http" 11 | "time" 12 | ) 13 | 14 | // Controller is exprted 15 | type Controller struct { 16 | client *httpx.HttpClient 17 | Configuration *etc.Configuration 18 | Cluster *cluster.Cluster 19 | } 20 | 21 | // NewController is exported 22 | func NewController(configuration *etc.Configuration) (*Controller, error) { 23 | 24 | cluster, err := createCluster(configuration) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | client := httpx.NewClient(). 30 | SetTransport(&http.Transport{ 31 | Proxy: http.ProxyFromEnvironment, 32 | DialContext: (&net.Dialer{ 33 | Timeout: 45 * time.Second, 34 | KeepAlive: 90 * time.Second, 35 | }).DialContext, 36 | DisableKeepAlives: false, 37 | MaxIdleConns: 10, 38 | MaxIdleConnsPerHost: 10, 39 | IdleConnTimeout: 90 * time.Second, 40 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 41 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 42 | }) 43 | 44 | return &Controller{ 45 | client: client, 46 | Configuration: configuration, 47 | Cluster: cluster, 48 | }, nil 49 | } 50 | 51 | // Initialize is exported 52 | // init cluster 53 | func (c *Controller) Initialize() error { 54 | 55 | logger.INFO("[#ctrl#] controller initialize.....") 56 | logger.INFO("[#ctrl#] configuration %+v", c.Configuration) 57 | return c.startCluster() 58 | } 59 | 60 | // UnInitialize is exported 61 | // uninit cluster 62 | func (c *Controller) UnInitialize() { 63 | 64 | c.stopCluster() 65 | logger.INFO("[#ctrl#] controller uninitialized.") 66 | } 67 | -------------------------------------------------------------------------------- /cluster/storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import "github.com/boltdb/bolt" 4 | import "github.com/humpback/gounits/system" 5 | import "github.com/humpback/humpback-center/cluster/storage/node" 6 | 7 | import ( 8 | "fmt" 9 | "path" 10 | "path/filepath" 11 | "time" 12 | ) 13 | 14 | const ( 15 | databaseFileName = "data.db" 16 | ) 17 | 18 | // DataStorage defines the implementation of datastore using 19 | // BoltDB as the storage system. 20 | type DataStorage struct { 21 | path string 22 | driver *bolt.DB 23 | NodeStorage *node.NodeStorage 24 | } 25 | 26 | // NewDataStorage is exported 27 | func NewDataStorage(storePath string) (*DataStorage, error) { 28 | 29 | var err error 30 | storePath, err = filepath.Abs(storePath) 31 | if err != nil { 32 | return nil, fmt.Errorf("storage driver path invalid, %s", err) 33 | } 34 | 35 | storePath = filepath.Clean(storePath) 36 | if err = system.MakeDirectory(storePath); err != nil { 37 | return nil, fmt.Errorf("storage driver make directory failure, %s", err) 38 | } 39 | 40 | databasePath := path.Join(storePath, databaseFileName) 41 | databasePath = filepath.Clean(databasePath) 42 | return &DataStorage{ 43 | path: databasePath, 44 | }, nil 45 | } 46 | 47 | // Open is exported 48 | // open storage driver file. 49 | func (storage *DataStorage) Open() error { 50 | 51 | if storage.driver == nil { 52 | driver, err := bolt.Open(storage.path, 0600, &bolt.Options{Timeout: 1 * time.Second}) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | nodeStorage, err := node.NewNodeStorage(driver) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | storage.NodeStorage = nodeStorage 63 | storage.driver = driver 64 | } 65 | return nil 66 | } 67 | 68 | // Close is exported 69 | // Close storage driver file. 70 | func (storage *DataStorage) Close() error { 71 | 72 | if storage.driver != nil { 73 | return storage.driver.Close() 74 | } 75 | return nil 76 | } 77 | -------------------------------------------------------------------------------- /cluster/errors.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "errors" 4 | 5 | // cluster errors define 6 | var ( 7 | //cluster discovery is nil. 8 | ErrClusterDiscoveryInvalid = errors.New("cluster discovery invalid") 9 | //cluster meta not found 10 | ErrClusterMetaDataNotFound = errors.New("cluster metadata not found") 11 | //cluster group not found 12 | ErrClusterGroupNotFound = errors.New("cluster group not found") 13 | //cluster container not found 14 | ErrClusterContainerNotFound = errors.New("cluster container not found") 15 | //cluster server not found 16 | ErrClusterServerNotFound = errors.New("cluster server not found") 17 | //cluster group no docker engine available 18 | ErrClusterNoEngineAvailable = errors.New("cluster no docker-engine available") 19 | //cluster containers instances invalid 20 | ErrClusterContainersInstancesInvalid = errors.New("cluster containers instances invalid") 21 | //cluster containers meta create failure 22 | ErrClusterContainersMetaCreateFailure = errors.New("cluster containers meta create failure") 23 | //cluster create containers name conflict 24 | ErrClusterCreateContainerNameConflict = errors.New("cluster create containers name conflict, this cluster already exists") 25 | //cluster create containers tag already using 26 | ErrClusterCreateContainerTagAlreadyUsing = errors.New("cluster create containers tag is already using") 27 | //cluster create containers all failure 28 | ErrClusterCreateContainerFailure = errors.New("cluster create containers failure") 29 | //cluster containers is upgrading 30 | ErrClusterContainersUpgrading = errors.New("cluster containers state is upgrading") 31 | //cluster containers is migrating 32 | ErrClusterContainersMigrating = errors.New("cluster containers state is migrating") 33 | //cluster containers is setting 34 | ErrClusterContainersSetting = errors.New("cluster containers state is setting") 35 | //cluster containers instances no change 36 | ErrClusterContainersInstancesNoChange = errors.New("cluster containers instances no change") 37 | ) 38 | -------------------------------------------------------------------------------- /notify/endpoint_api.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "github.com/humpback/gounits/httpx" 4 | import "github.com/humpback/gounits/logger" 5 | 6 | import ( 7 | "context" 8 | "time" 9 | "net" 10 | "net/http" 11 | ) 12 | 13 | // APIEndPoint is exported 14 | type APIEndPoint struct { 15 | IEndPoint 16 | EndPoint 17 | client *httpx.HttpClient 18 | } 19 | 20 | // NewAPIEndPoint is exported 21 | func NewAPIEndPoint(endpoint EndPoint) IEndPoint { 22 | 23 | client := httpx.NewClient(). 24 | SetTransport(&http.Transport{ 25 | Proxy: http.ProxyFromEnvironment, 26 | DialContext: (&net.Dialer{ 27 | Timeout: 45 * time.Second, 28 | KeepAlive: 90 * time.Second, 29 | }).DialContext, 30 | DisableKeepAlives: false, 31 | MaxIdleConns: 10, 32 | MaxIdleConnsPerHost: 10, 33 | IdleConnTimeout: 90 * time.Second, 34 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 35 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 36 | }) 37 | 38 | return &APIEndPoint{ 39 | EndPoint: endpoint, 40 | client: client, 41 | } 42 | } 43 | 44 | // DoEvent is exported 45 | func (endpoint *APIEndPoint) DoEvent(event *Event, data interface{}) { 46 | 47 | if !endpoint.Enabled { 48 | return 49 | } 50 | 51 | value := map[string]interface{}{ 52 | "From": endpoint.Sender, 53 | "To": event.ContactInfo, 54 | "Subject": event.makeSubjectText(), 55 | "Body": data, 56 | "ContentType": "HTML", 57 | "MailType": "Smtp", 58 | "SmtpSetting": map[string]interface{}{}, 59 | } 60 | 61 | response, err := endpoint.client.PostJSON(context.Background(), endpoint.URL, nil, value, endpoint.Headers) 62 | if err != nil { 63 | logger.ERROR("[#notify#] api endpoint error: %s", err.Error()) 64 | return 65 | } 66 | defer response.Close() 67 | if response.StatusCode() >= http.StatusBadRequest { 68 | logger.ERROR("[#notify#] api endpoint response code: %d", response.StatusCode()) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /cluster/enginespriority.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "sync" 4 | 5 | // EnginePriorities is exported 6 | type EnginePriorities struct { 7 | sync.RWMutex 8 | Engines map[string]*Engine 9 | } 10 | 11 | // NewEnginePriorities is exported 12 | func NewEnginePriorities(metaData *MetaData, engines []*Engine) *EnginePriorities { 13 | 14 | enginePriorities := &EnginePriorities{ 15 | Engines: make(map[string]*Engine), 16 | } 17 | 18 | for _, baseConfig := range metaData.BaseConfigs { 19 | for _, engine := range engines { 20 | if engine.IsHealthy() && engine.HasContainer(baseConfig.ID) { 21 | enginePriorities.Add(baseConfig.ID, engine) 22 | break 23 | } 24 | } 25 | } 26 | return enginePriorities 27 | } 28 | 29 | // EngineStrings is exported 30 | func (priorities *EnginePriorities) EngineStrings() []string { 31 | 32 | engines := []string{} 33 | priorities.RLock() 34 | defer priorities.RUnlock() 35 | for _, engine := range priorities.Engines { 36 | engines = append(engines, engine.IP) 37 | } 38 | return engines 39 | } 40 | 41 | // Select is exported 42 | func (priorities *EnginePriorities) Select() *Engine { 43 | 44 | var engine *Engine 45 | priorities.Lock() 46 | defer priorities.Unlock() 47 | if len(priorities.Engines) == 0 { 48 | return nil 49 | } 50 | 51 | for containerid, e := range priorities.Engines { 52 | engine = e 53 | delete(priorities.Engines, containerid) 54 | break 55 | } 56 | return engine 57 | } 58 | 59 | // Size is exported 60 | func (priorities *EnginePriorities) Size() int { 61 | 62 | size := 0 63 | priorities.RLock() 64 | size = len(priorities.Engines) 65 | priorities.RUnlock() 66 | return size 67 | } 68 | 69 | // Add is exported 70 | func (priorities *EnginePriorities) Add(containerid string, engine *Engine) { 71 | 72 | priorities.Lock() 73 | if _, ret := priorities.Engines[containerid]; !ret { 74 | priorities.Engines[containerid] = engine 75 | } 76 | priorities.Unlock() 77 | } 78 | 79 | // Remove is exported 80 | func (priorities *EnginePriorities) Remove(containerid string) { 81 | 82 | priorities.Lock() 83 | delete(priorities.Engines, containerid) 84 | priorities.Unlock() 85 | } 86 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/consul/consul_test.go: -------------------------------------------------------------------------------- 1 | package consul 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv" 8 | "github.com/docker/libkv/store" 9 | "github.com/docker/libkv/testutils" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | client = "localhost:8500" 15 | ) 16 | 17 | func makeConsulClient(t *testing.T) store.Store { 18 | 19 | kv, err := New( 20 | []string{client}, 21 | &store.Config{ 22 | ConnectionTimeout: 3 * time.Second, 23 | }, 24 | ) 25 | 26 | if err != nil { 27 | t.Fatalf("cannot create store: %v", err) 28 | } 29 | 30 | return kv 31 | } 32 | 33 | func TestRegister(t *testing.T) { 34 | Register() 35 | 36 | kv, err := libkv.NewStore(store.CONSUL, []string{client}, nil) 37 | assert.NoError(t, err) 38 | assert.NotNil(t, kv) 39 | 40 | if _, ok := kv.(*Consul); !ok { 41 | t.Fatal("Error registering and initializing consul") 42 | } 43 | } 44 | 45 | func TestConsulStore(t *testing.T) { 46 | kv := makeConsulClient(t) 47 | lockKV := makeConsulClient(t) 48 | ttlKV := makeConsulClient(t) 49 | 50 | defer testutils.RunCleanup(t, kv) 51 | 52 | testutils.RunTestCommon(t, kv) 53 | testutils.RunTestAtomic(t, kv) 54 | testutils.RunTestWatch(t, kv) 55 | testutils.RunTestLock(t, kv) 56 | testutils.RunTestLockTTL(t, kv, lockKV) 57 | testutils.RunTestLockWait(t, kv, lockKV) 58 | testutils.RunTestTTL(t, kv, ttlKV) 59 | } 60 | 61 | func TestGetActiveSession(t *testing.T) { 62 | kv := makeConsulClient(t) 63 | 64 | consul := kv.(*Consul) 65 | 66 | key := "foo" 67 | value := []byte("bar") 68 | 69 | // Put the first key with the Ephemeral flag 70 | err := kv.Put(key, value, &store.WriteOptions{TTL: 2 * time.Second}) 71 | assert.NoError(t, err) 72 | 73 | // Session should not be empty 74 | session, err := consul.getActiveSession(key) 75 | assert.NoError(t, err) 76 | assert.NotEqual(t, session, "") 77 | 78 | // Delete the key 79 | err = kv.Delete(key) 80 | assert.NoError(t, err) 81 | 82 | // Check the session again, it should return nothing 83 | session, err = consul.getActiveSession(key) 84 | assert.NoError(t, err) 85 | assert.Equal(t, session, "") 86 | } 87 | -------------------------------------------------------------------------------- /server/service.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import "github.com/humpback/gounits/fprocess" 4 | import "github.com/humpback/gounits/logger" 5 | import "github.com/humpback/humpback-center/api" 6 | import "github.com/humpback/humpback-center/ctrl" 7 | import "github.com/humpback/humpback-center/etc" 8 | 9 | import ( 10 | "flag" 11 | ) 12 | 13 | /* 14 | CenterService is exported 15 | humpback center service 16 | */ 17 | type CenterService struct { 18 | PIDFile *fprocess.PIDFile 19 | APIServer *api.Server 20 | Controller *ctrl.Controller 21 | } 22 | 23 | // NewCenterService exported 24 | func NewCenterService() (*CenterService, error) { 25 | 26 | var conf string 27 | flag.StringVar(&conf, "f", "etc/config.yaml", "humpback center configuration file.") 28 | flag.Parse() 29 | configuration, err := etc.NewConfiguration(conf) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | pidfile, err := fprocess.New(configuration.PIDFile) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | largs := configuration.GetLogger() 40 | logger.OPEN(largs) 41 | controller, err := ctrl.NewController(configuration) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | apiserver := api.NewServer(configuration.API.Hosts, nil, controller, configuration.API.EnableCors) 47 | return &CenterService{ 48 | PIDFile: pidfile, 49 | APIServer: apiserver, 50 | Controller: controller, 51 | }, nil 52 | } 53 | 54 | func (service *CenterService) Startup() error { 55 | 56 | logger.INFO("[#service#] service start...") 57 | if err := service.Controller.Initialize(); err != nil { 58 | return err 59 | } 60 | logger.INFO("[#service#] center process %d", service.PIDFile.PID) 61 | //apiserver start. 62 | go func() { 63 | logger.INFO("[#service#] center API listen: %s", service.APIServer.ListenHosts()) 64 | if err := service.APIServer.Startup(); err != nil { 65 | logger.ERROR("[#service#] service API start error:%s", err.Error()) 66 | } 67 | }() 68 | return nil 69 | } 70 | 71 | func (service *CenterService) Stop() error { 72 | 73 | service.Controller.UnInitialize() 74 | service.PIDFile.Remove() 75 | logger.INFO("[#service#] service closed.") 76 | logger.CLOSE() 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /notify/events.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "github.com/humpback/gounits/rand" 4 | 5 | import ( 6 | "bytes" 7 | "html/template" 8 | "time" 9 | ) 10 | 11 | //EventType is exported 12 | type EventType int 13 | 14 | const ( 15 | //GroupEnginesWatchEvent is exported 16 | //cluster discovery watch nodes event 17 | GroupEnginesWatchEvent EventType = 1000 18 | //GroupMetaContainersEvent is exported 19 | //cluster meta containers migrated or recovered to warning event 20 | GroupMetaContainersEvent EventType = 1001 21 | ) 22 | 23 | //eventsTextMap is exported 24 | var eventsTextMap = map[EventType]string{ 25 | GroupEnginesWatchEvent: "GroupEnginesWatchEvent", 26 | GroupMetaContainersEvent: "GroupMetaContainersEvent", 27 | } 28 | 29 | //Event is exported 30 | type Event struct { 31 | ID string 32 | Type EventType 33 | Name string 34 | Error error 35 | ContactInfo string 36 | Endpoints []IEndPoint 37 | data map[string]interface{} 38 | } 39 | 40 | //NewEvent is exported 41 | func NewEvent(eventType EventType, description string, err error, contactInfo string, siteurl string, endpoints []IEndPoint) *Event { 42 | 43 | seed := time.Now() 44 | event := &Event{ 45 | ID: rand.UUID(true), 46 | Type: eventType, 47 | Name: eventsTextMap[eventType], 48 | Error: err, 49 | ContactInfo: contactInfo, 50 | Endpoints: endpoints, 51 | } 52 | 53 | event.data = map[string]interface{}{ 54 | "SiteURL": siteurl, 55 | "ID": event.ID, 56 | "Event": event.Name, 57 | "Description": description, 58 | "Timestamp": seed.UnixNano(), 59 | "Datetime": seed, 60 | } 61 | 62 | if err != nil { 63 | event.data["Exception"] = err.Error() 64 | } 65 | return event 66 | } 67 | 68 | //Dispatch is exported 69 | func (event *Event) dispatch(templateBody string) { 70 | 71 | if len(templateBody) > 0 { 72 | var buf bytes.Buffer 73 | t := template.New("") 74 | t.Parse(templateBody) 75 | t.Execute(&buf, event.data) 76 | for _, endPoint := range event.Endpoints { 77 | endPoint.DoEvent(event, buf.String()) 78 | } 79 | } 80 | } 81 | 82 | //makeSubjectText is exported 83 | func (event *Event) makeSubjectText() string { 84 | 85 | subjectPrefix := "(info)" 86 | if event.Error != nil { 87 | subjectPrefix = "(warn)" 88 | } 89 | return subjectPrefix + " Humpback Notification" 90 | } 91 | -------------------------------------------------------------------------------- /etc/configuration.go: -------------------------------------------------------------------------------- 1 | package etc 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "github.com/humpback/humpback-center/notify" 5 | import "gopkg.in/yaml.v2" 6 | 7 | import ( 8 | "io/ioutil" 9 | "os" 10 | ) 11 | 12 | // Configuration is exported 13 | type Configuration struct { 14 | 15 | //base options 16 | Version string `yaml:"version" json:"version"` 17 | PIDFile string `yaml:"pidfile" json:"pidfile"` 18 | RetryStartup bool `yaml:"retrystartup" json:"retrystartup"` 19 | SiteAPI string `yaml:"siteapi" json:"siteapi"` 20 | 21 | Cluster struct { 22 | //driver opts 23 | DriverOpts []string `yaml:"opts" json:"opts"` 24 | //service discovery opts 25 | Discovery struct { 26 | URIs string `yaml:"uris" json:"uris"` 27 | Cluster string `yaml:"cluster" json:"cluster"` 28 | Heartbeat string `yaml:"heartbeat" json:"heartbeat"` 29 | } `yaml:"discovery" json:"discovery"` 30 | } `yaml:"cluster" json:"cluster"` 31 | 32 | //api options 33 | API struct { 34 | Hosts []string `yaml:"hosts" json:"hosts"` 35 | EnableCors bool `yaml:"enablecors" json:"enablecors"` 36 | } `yaml:"api" json:"api"` 37 | 38 | Notifications notify.Notifications `yaml:"notifications,omitempty" json:"notifications,omitempty"` 39 | 40 | //log options 41 | Logger struct { 42 | LogFile string `yaml:"logfile" json:"logfile"` 43 | LogLevel string `yaml:"loglevel" json:"loglevel"` 44 | LogSize int64 `yaml:"logsize" json:"logsize"` 45 | } `yaml:"logger" json:"logger"` 46 | } 47 | 48 | // NewConfiguration is exported 49 | func NewConfiguration(file string) (*Configuration, error) { 50 | 51 | fd, err := os.OpenFile(file, os.O_RDWR, 0777) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | defer fd.Close() 57 | data, err := ioutil.ReadAll(fd) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | conf := &Configuration{} 63 | if err := yaml.Unmarshal([]byte(data), conf); err != nil { 64 | return nil, err 65 | } 66 | 67 | if err := conf.ParseEnv(); err != nil { 68 | return nil, err 69 | } 70 | return conf, nil 71 | } 72 | 73 | // GetNotificationsEndPoints is exported 74 | func (conf *Configuration) GetNotificationsEndPoints() []notify.EndPoint { 75 | 76 | return conf.Notifications.EndPoints 77 | } 78 | 79 | // GetLogger is exported 80 | func (conf *Configuration) GetLogger() *logger.Args { 81 | 82 | return &logger.Args{ 83 | FileName: conf.Logger.LogFile, 84 | Level: conf.Logger.LogLevel, 85 | MaxSize: conf.Logger.LogSize, 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /cluster/weighted.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "github.com/humpback/common/models" 5 | 6 | // WeightedEngine is exported 7 | type WeightedEngine struct { 8 | engine *Engine 9 | weight int64 10 | } 11 | 12 | // Containers is exported 13 | // Return engine's containers 14 | func (weighted *WeightedEngine) Containers() Containers { 15 | 16 | if weighted.engine != nil { 17 | return weighted.engine.Containers("") 18 | } 19 | return Containers{} 20 | } 21 | 22 | // Engine is exported 23 | func (weighted *WeightedEngine) Engine() *Engine { 24 | 25 | return weighted.engine 26 | } 27 | 28 | // Weight is exported 29 | func (weighted *WeightedEngine) Weight() int64 { 30 | 31 | return weighted.weight 32 | } 33 | 34 | type weightedEngines []*WeightedEngine 35 | 36 | func (engines weightedEngines) Len() int { 37 | 38 | return len(engines) 39 | } 40 | 41 | func (engines weightedEngines) Swap(i, j int) { 42 | 43 | engines[i], engines[j] = engines[j], engines[i] 44 | } 45 | 46 | func (engines weightedEngines) Less(i, j int) bool { 47 | 48 | if engines[i].Weight() == engines[j].Weight() { 49 | return len(engines[i].Containers()) < len(engines[j].Containers()) 50 | } 51 | return engines[i].Weight() < engines[j].Weight() 52 | } 53 | 54 | func (engines weightedEngines) Engines() []*Engine { 55 | 56 | out := []*Engine{} 57 | for _, weightedEngine := range engines { 58 | out = append(out, weightedEngine.Engine()) 59 | } 60 | return out 61 | } 62 | 63 | func selectWeightdEngines(engines []*Engine, config models.Container) weightedEngines { 64 | 65 | out := weightedEngines{} 66 | for _, engine := range engines { 67 | totalCpus := engine.TotalCpus() 68 | totalMemory := engine.TotalMemory() 69 | if totalMemory < config.Memory || totalCpus < config.CPUShares { 70 | logger.INFO("[#cluster#] weighted engine %s filter.", engine.IP) 71 | continue 72 | } 73 | 74 | var cpuScore int64 = 100 75 | var memoryScore int64 = 100 76 | 77 | if config.CPUShares > 0 { 78 | cpuScore = (engine.UsedCpus() + config.CPUShares) * 100 / totalCpus 79 | } 80 | 81 | if config.Memory > 0 { 82 | memoryScore = (engine.UsedMemory()/1024/1024 + config.Memory) * 100 / totalMemory 83 | } 84 | 85 | //logger.INFO("[#cluster#] weighted engine %s cpuScore:%d memorySocre:%d weight:%d", engine.IP, cpuScore, memoryScore, cpuScore+memoryScore) 86 | if cpuScore <= 100 && memoryScore <= 100 { 87 | out = append(out, &WeightedEngine{ 88 | engine: engine, 89 | weight: cpuScore + memoryScore, 90 | }) 91 | } 92 | } 93 | return out 94 | } 95 | -------------------------------------------------------------------------------- /cluster/enginesfilter.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // EnginesFilter is exported 8 | type EnginesFilter struct { 9 | sync.RWMutex 10 | allocEngines map[string]*Engine 11 | failEngines map[string]*Engine 12 | } 13 | 14 | // NewEnginesFilter is exported 15 | func NewEnginesFilter() *EnginesFilter { 16 | 17 | return &EnginesFilter{ 18 | allocEngines: make(map[string]*Engine), 19 | failEngines: make(map[string]*Engine), 20 | } 21 | } 22 | 23 | // Size is exported 24 | func (filter *EnginesFilter) Size() int { 25 | 26 | filter.RLock() 27 | defer filter.RUnlock() 28 | return len(filter.allocEngines) + len(filter.failEngines) 29 | } 30 | 31 | // SetAllocEngine is exported 32 | func (filter *EnginesFilter) SetAllocEngine(engine *Engine) { 33 | 34 | filter.Lock() 35 | if engine != nil { 36 | if _, ret := filter.allocEngines[engine.IP]; !ret { 37 | filter.allocEngines[engine.IP] = engine 38 | } 39 | } 40 | filter.Unlock() 41 | } 42 | 43 | // SetFailEngine is exported 44 | func (filter *EnginesFilter) SetFailEngine(engine *Engine) { 45 | 46 | filter.Lock() 47 | if engine != nil { 48 | if _, ret := filter.failEngines[engine.IP]; !ret { 49 | filter.failEngines[engine.IP] = engine 50 | } 51 | } 52 | filter.Unlock() 53 | } 54 | 55 | // AllocEngines is exported 56 | func (filter *EnginesFilter) AllocEngines() []*Engine { 57 | 58 | filter.RLock() 59 | defer filter.RUnlock() 60 | engines := []*Engine{} 61 | for _, engine := range filter.allocEngines { 62 | engines = append(engines, engine) 63 | } 64 | return engines 65 | } 66 | 67 | // FailEngines is exported 68 | func (filter *EnginesFilter) FailEngines() []*Engine { 69 | 70 | filter.RLock() 71 | defer filter.RUnlock() 72 | engines := []*Engine{} 73 | for _, engine := range filter.failEngines { 74 | engines = append(engines, engine) 75 | } 76 | return engines 77 | } 78 | 79 | // Filter is exported 80 | func (filter *EnginesFilter) Filter(engines []*Engine) []*Engine { 81 | 82 | if filter.Size() == 0 { 83 | return engines 84 | } 85 | 86 | filter.RLock() 87 | filterEngines := make(map[string]*Engine) 88 | for _, engine := range filter.allocEngines { 89 | filterEngines[engine.IP] = engine 90 | } 91 | 92 | for _, engine := range filter.failEngines { 93 | filterEngines[engine.IP] = engine 94 | } 95 | 96 | out := []*Engine{} 97 | for _, engine := range engines { 98 | if _, ret := filterEngines[engine.IP]; !ret { 99 | out = append(out, engine) 100 | } 101 | } 102 | filter.RUnlock() 103 | return out 104 | } 105 | -------------------------------------------------------------------------------- /cluster/storage/dao/dao.go: -------------------------------------------------------------------------------- 1 | package dao 2 | 3 | import "github.com/boltdb/bolt" 4 | 5 | import ( 6 | "encoding/binary" 7 | "errors" 8 | ) 9 | 10 | var ( 11 | ErrStorageObjectNotFound = errors.New("object not found") 12 | ) 13 | 14 | // Itob returns an 8-byte big endian representation of v. 15 | // This function is typically used for encoding integer IDs to byte slices 16 | // so that they can be used as BoltDB keys. 17 | func Itob(v int) []byte { 18 | b := make([]byte, 8) 19 | binary.BigEndian.PutUint64(b, uint64(v)) 20 | return b 21 | } 22 | 23 | // CreateBucket is a generic function used to create a bucket inside a bolt database. 24 | func CreateBucket(db *bolt.DB, bucketName string) error { 25 | return db.Update(func(tx *bolt.Tx) error { 26 | _, err := tx.CreateBucketIfNotExists([]byte(bucketName)) 27 | if err != nil { 28 | return err 29 | } 30 | return nil 31 | }) 32 | } 33 | 34 | // GetObject is a generic function used to retrieve an unmarshalled object from a bolt database. 35 | func GetObject(db *bolt.DB, bucketName string, key []byte, object interface{}) error { 36 | var data []byte 37 | 38 | err := db.View(func(tx *bolt.Tx) error { 39 | bucket := tx.Bucket([]byte(bucketName)) 40 | 41 | value := bucket.Get(key) 42 | if value == nil { 43 | return ErrStorageObjectNotFound 44 | } 45 | 46 | data = make([]byte, len(value)) 47 | copy(data, value) 48 | 49 | return nil 50 | }) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | return UnmarshalObject(data, object) 56 | } 57 | 58 | // UpdateObject is a generic function used to update an object inside a bolt database. 59 | func UpdateObject(db *bolt.DB, bucketName string, key []byte, object interface{}) error { 60 | return db.Update(func(tx *bolt.Tx) error { 61 | bucket := tx.Bucket([]byte(bucketName)) 62 | 63 | data, err := MarshalObject(object) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | err = bucket.Put(key, data) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | return nil 74 | }) 75 | } 76 | 77 | // DeleteObject is a generic function used to delete an object inside a bolt database. 78 | func DeleteObject(db *bolt.DB, bucketName string, key []byte) error { 79 | return db.Update(func(tx *bolt.Tx) error { 80 | bucket := tx.Bucket([]byte(bucketName)) 81 | return bucket.Delete(key) 82 | }) 83 | } 84 | 85 | // GetNextIdentifier is a generic function that returns the specified bucket identifier incremented by 1. 86 | func GetNextIdentifier(db *bolt.DB, bucketName string) int { 87 | var identifier int 88 | 89 | db.View(func(tx *bolt.Tx) error { 90 | bucket := tx.Bucket([]byte(bucketName)) 91 | id := bucket.Sequence() 92 | identifier = int(id) 93 | return nil 94 | }) 95 | 96 | identifier++ 97 | return identifier 98 | } 99 | -------------------------------------------------------------------------------- /api/router.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "github.com/gorilla/mux" 4 | import "github.com/humpback/humpback-center/ctrl" 5 | 6 | import ( 7 | "net/http" 8 | ) 9 | 10 | type handler func(c *Context) error 11 | 12 | var routes = map[string]map[string]handler{ 13 | "GET": { 14 | "/v1/_ping": ping, 15 | "/v1/configuration": getConfiguration, 16 | "/v1/groups/{groupid}/collections": getGroupAllContainers, 17 | "/v1/groups/{groupid}/engines": getGroupEngines, 18 | "/v1/groups/collections/{metaid}": getGroupContainers, 19 | "/v1/groups/collections/{metaid}/base": getGroupContainersMetaBase, 20 | "/v1/groups/engines/{server}": getGroupEngine, 21 | }, 22 | "POST": { 23 | "/v1/groups/event": postGroupEvent, 24 | "/v1/cluster/event": postClusterEvent, 25 | "/v1/groups/collections": postGroupCreateContainers, 26 | }, 27 | "PUT": { 28 | "/v1/groups/collections": putGroupUpdateContainers, 29 | "/v1/groups/collections/upgrade": putGroupUpgradeContainers, 30 | "/v1/groups/collections/action": putGroupOperateContainers, 31 | "/v1/groups/container/action": putGroupOperateContainer, 32 | "/v1/groups/nodelabels": putGroupServerNodeLabels, 33 | }, 34 | "DELETE": { 35 | "/v1/groups/{groupid}/collections/{metaname}": deleteGroupRemoveContainersOfMetaName, 36 | "/v1/groups/collections/{metaid}": deleteGroupRemoveContainers, 37 | "/v1/groups/container/{containerid}": deleteGroupRemoveContainer, 38 | }, 39 | } 40 | 41 | func NewRouter(controller *ctrl.Controller, enableCors bool) *mux.Router { 42 | 43 | router := mux.NewRouter() 44 | for method, mappings := range routes { 45 | for route, handler := range mappings { 46 | routemethod := method 47 | routepattern := route 48 | routehandler := handler 49 | wrap := func(w http.ResponseWriter, r *http.Request) { 50 | if enableCors { 51 | writeCorsHeaders(w, r) 52 | } 53 | c := NewContext(w, r, controller) 54 | routehandler(c) 55 | } 56 | router.Path(routepattern).Methods(routemethod).HandlerFunc(wrap) 57 | if enableCors { 58 | optionsmethod := "OPTIONS" 59 | optionshandler := optionsHandler 60 | wrap := func(w http.ResponseWriter, r *http.Request) { 61 | if enableCors { 62 | writeCorsHeaders(w, r) 63 | } 64 | c := NewContext(w, r, controller) 65 | optionshandler(c) 66 | } 67 | router.Path(routepattern).Methods(optionsmethod).HandlerFunc(wrap) 68 | } 69 | } 70 | } 71 | return router 72 | } 73 | 74 | func ping(ctx *Context) error { 75 | 76 | return ctx.JSON(http.StatusOK, "PANG") 77 | } 78 | 79 | func getConfiguration(ctx *Context) error { 80 | 81 | return ctx.JSON(http.StatusOK, ctx.Controller.Configuration) 82 | } 83 | 84 | func optionsHandler(ctx *Context) error { 85 | 86 | ctx.WriteHeader(http.StatusOK) 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /notify/notify.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import ( 4 | "io/ioutil" 5 | "strings" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | //notify template string 11 | var templateBody string 12 | 13 | //NotifySender is exported 14 | type NotifySender struct { 15 | sync.RWMutex 16 | SiteURL string 17 | initWatch bool 18 | endPoints []IEndPoint 19 | events map[string]*Event 20 | } 21 | 22 | //NewNotifySender is exported 23 | func NewNotifySender(siteurl string, endPoints []EndPoint) *NotifySender { 24 | 25 | sender := &NotifySender{ 26 | SiteURL: siteurl, 27 | initWatch: true, 28 | endPoints: []IEndPoint{}, 29 | events: make(map[string]*Event), 30 | } 31 | 32 | if buf, err := ioutil.ReadFile("./notify/template.html"); err == nil { 33 | templateBody = string(buf) 34 | } 35 | 36 | factory := &NotifyEndPointFactory{} 37 | sender.Lock() 38 | for _, endPoint := range endPoints { 39 | switch strings.ToUpper(endPoint.Name) { 40 | case "API": 41 | apiEndPoint := factory.CreateAPIEndPoint(endPoint) 42 | sender.endPoints = append(sender.endPoints, apiEndPoint) 43 | case "SMTP": 44 | smtpEndPoint := factory.CreateSMTPEndPoint(endPoint) 45 | sender.endPoints = append(sender.endPoints, smtpEndPoint) 46 | } 47 | } 48 | sender.Unlock() 49 | 50 | go func() { 51 | time.Sleep(30 * time.Second) 52 | sender.initWatch = false 53 | }() 54 | return sender 55 | } 56 | 57 | //AddGroupEnginesWatchEvent is exported 58 | func (sender *NotifySender) AddGroupEnginesWatchEvent(description string, watchGroup *WatchGroup) { 59 | 60 | event := NewEvent(GroupEnginesWatchEvent, description, nil, watchGroup.ContactInfo, sender.SiteURL, sender.endPoints) 61 | event.data["WatchGroup"] = watchGroup 62 | sender.Lock() 63 | sender.events[event.ID] = event 64 | sender.Unlock() 65 | go sender.dispatchEvents() 66 | } 67 | 68 | //AddGroupMetaContainersEvent is exported 69 | func (sender *NotifySender) AddGroupMetaContainersEvent(description string, err error, groupMeta *GroupMeta) { 70 | 71 | event := NewEvent(GroupMetaContainersEvent, description, err, groupMeta.ContactInfo, sender.SiteURL, sender.endPoints) 72 | event.data["GroupMeta"] = groupMeta 73 | sender.Lock() 74 | sender.events[event.ID] = event 75 | sender.Unlock() 76 | go sender.dispatchEvents() 77 | } 78 | 79 | //dispatchEvents is exported 80 | //dispatch all events. 81 | func (sender *NotifySender) dispatchEvents() { 82 | 83 | sender.Lock() 84 | for { 85 | if len(sender.events) == 0 { 86 | break 87 | } 88 | if !sender.initWatch { 89 | wgroup := sync.WaitGroup{} 90 | for _, event := range sender.events { 91 | wgroup.Add(1) 92 | go func(e *Event) { 93 | e.dispatch(templateBody) 94 | wgroup.Done() 95 | }(event) 96 | } 97 | wgroup.Wait() 98 | } 99 | for _, event := range sender.events { 100 | delete(sender.events, event.ID) 101 | } 102 | } 103 | sender.Unlock() 104 | } 105 | -------------------------------------------------------------------------------- /api/server.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "github.com/humpback/humpback-center/api/middleware" 4 | import "github.com/humpback/humpback-center/ctrl" 5 | 6 | import ( 7 | "crypto/tls" 8 | "fmt" 9 | "net" 10 | "net/http" 11 | "strings" 12 | ) 13 | 14 | type Dispatcher struct { 15 | handler http.Handler 16 | } 17 | 18 | func (dispatcher *Dispatcher) SetHandler(handler http.Handler) { 19 | 20 | dispatcher.handler = handler 21 | } 22 | 23 | func (dispatcher *Dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { 24 | 25 | if dispatcher.handler == nil { 26 | httpError(w, "API Dispatcher Invalid.", http.StatusInternalServerError) 27 | return 28 | } 29 | handler := middleware.Logger(dispatcher.handler) 30 | handler.ServeHTTP(w, r) 31 | } 32 | 33 | type Server struct { 34 | hosts []string 35 | tlsConfig *tls.Config 36 | dispatcher *Dispatcher 37 | } 38 | 39 | func NewServer(hosts []string, tlsConfig *tls.Config, controller *ctrl.Controller, enablecors bool) *Server { 40 | 41 | router := NewRouter(controller, enablecors) 42 | return &Server{ 43 | hosts: hosts, 44 | tlsConfig: tlsConfig, 45 | dispatcher: &Dispatcher{ 46 | handler: router, 47 | }, 48 | } 49 | } 50 | 51 | func (server *Server) ListenHosts() []string { 52 | 53 | return server.hosts 54 | } 55 | 56 | func (server *Server) SetHandler(handler http.Handler) { 57 | 58 | server.dispatcher.SetHandler(handler) 59 | } 60 | 61 | func (server *Server) Startup() error { 62 | 63 | errorsCh := make(chan error, len(server.hosts)) 64 | for _, host := range server.hosts { 65 | protoAddrParts := strings.SplitN(host, "://", 2) 66 | if len(protoAddrParts) == 1 { 67 | protoAddrParts = append([]string{"tcp"}, protoAddrParts...) 68 | } 69 | 70 | go func() { 71 | var ( 72 | err error 73 | l net.Listener 74 | s = http.Server{ 75 | Addr: protoAddrParts[1], 76 | Handler: server.dispatcher, 77 | } 78 | ) 79 | 80 | switch protoAddrParts[0] { 81 | case "unix": 82 | l, err = newUnixListener(protoAddrParts[1], server.tlsConfig) 83 | case "tcp": 84 | l, err = newListener("tcp", protoAddrParts[1], server.tlsConfig) 85 | default: 86 | err = fmt.Errorf("API UnSupported Protocol:%q", protoAddrParts[0]) 87 | } 88 | if err != nil { 89 | errorsCh <- err 90 | } else { 91 | errorsCh <- s.Serve(l) 92 | } 93 | }() 94 | } 95 | 96 | for i := 0; i < len(server.hosts); i++ { 97 | err := <-errorsCh 98 | if err != nil { 99 | return err 100 | } 101 | } 102 | return nil 103 | } 104 | 105 | func newListener(proto string, addr string, tlsConfig *tls.Config) (net.Listener, error) { 106 | 107 | l, err := net.Listen(proto, addr) 108 | if err != nil { 109 | return nil, err 110 | } 111 | 112 | if tlsConfig != nil { 113 | tlsConfig.NextProtos = []string{"http/1.1"} 114 | l = tls.NewListener(l, tlsConfig) 115 | } 116 | return l, nil 117 | } 118 | -------------------------------------------------------------------------------- /cluster/notify.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/humpback-center/notify" 4 | 5 | //WatchEngines is exported 6 | type WatchEngines []*Engine 7 | 8 | //NewWatchEngine is exported 9 | func NewWatchEngine(ip string, name string, state EngineState) *Engine { 10 | 11 | return &Engine{ 12 | IP: ip, 13 | Name: name, 14 | state: state, 15 | } 16 | } 17 | 18 | //NotifyGroupEnginesWatchEvent is exported 19 | func (cluster *Cluster) NotifyGroupEnginesWatchEvent(description string, watchEngines WatchEngines) { 20 | 21 | watchGroups := make(notify.WatchGroups) 22 | for _, engine := range watchEngines { 23 | e := ¬ify.Engine{ 24 | IP: engine.IP, 25 | Name: engine.Name, 26 | State: stateText[engine.state], 27 | } 28 | groups := cluster.GetEngineGroups(engine) 29 | for _, group := range groups { 30 | if watchGroup, ret := watchGroups[group.ID]; !ret { 31 | watchGroup = ¬ify.WatchGroup{ 32 | GroupID: group.ID, 33 | GroupName: group.Name, 34 | Location: group.Location, 35 | ContactInfo: group.ContactInfo, 36 | Engines: []*notify.Engine{e}, 37 | } 38 | watchGroups[group.ID] = watchGroup 39 | } else { 40 | watchGroup.Engines = append(watchGroup.Engines, e) 41 | } 42 | } 43 | } 44 | for _, watchGroup := range watchGroups { 45 | cluster.NotifySender.AddGroupEnginesWatchEvent(description, watchGroup) 46 | } 47 | } 48 | 49 | //NotifyGroupMetaContainersEvent is exported 50 | func (cluster *Cluster) NotifyGroupMetaContainersEvent(description string, exception error, metaid string) { 51 | 52 | metaData, engines, err := cluster.GetMetaDataEngines(metaid) 53 | if err != nil { 54 | return 55 | } 56 | 57 | group := cluster.GetGroup(metaData.GroupID) 58 | if group == nil { 59 | return 60 | } 61 | 62 | containers := []*notify.Container{} 63 | for _, baseConfig := range metaData.BaseConfigs { 64 | for _, engine := range engines { 65 | if engine.IsHealthy() && engine.HasContainer(baseConfig.ID) { 66 | state := "Unkonw" 67 | if c := engine.Container(baseConfig.ID); c != nil { 68 | state = StateString(c.Info.State) 69 | } 70 | containers = append(containers, ¬ify.Container{ 71 | ID: ShortContainerID(baseConfig.ID), 72 | Name: baseConfig.Name, 73 | Server: engine.IP, 74 | State: state, 75 | }) 76 | } 77 | } 78 | } 79 | 80 | nEngines := []*notify.Engine{} 81 | engines = cluster.GetGroupAllEngines(metaData.GroupID) 82 | for _, engine := range engines { 83 | e := ¬ify.Engine{ 84 | IP: engine.IP, 85 | Name: engine.Name, 86 | State: stateText[engine.state], 87 | } 88 | nEngines = append(nEngines, e) 89 | } 90 | 91 | groupMeta := ¬ify.GroupMeta{ 92 | MetaID: metaData.MetaID, 93 | MetaName: metaData.Config.Name, 94 | Location: group.Location, 95 | GroupID: group.ID, 96 | GroupName: group.Name, 97 | Instances: metaData.Instances, 98 | Image: metaData.Config.Image, 99 | ContactInfo: group.ContactInfo, 100 | Engines: nEngines, 101 | Containers: containers, 102 | } 103 | cluster.NotifySender.AddGroupMetaContainersEvent(description, exception, groupMeta) 104 | } 105 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/mock/mock.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "github.com/docker/libkv/store" 5 | "github.com/stretchr/testify/mock" 6 | ) 7 | 8 | // Mock store. Mocks all Store functions using testify.Mock 9 | type Mock struct { 10 | mock.Mock 11 | 12 | // Endpoints passed to InitializeMock 13 | Endpoints []string 14 | 15 | // Options passed to InitializeMock 16 | Options *store.Config 17 | } 18 | 19 | // New creates a Mock store 20 | func New(endpoints []string, options *store.Config) (store.Store, error) { 21 | s := &Mock{} 22 | s.Endpoints = endpoints 23 | s.Options = options 24 | return s, nil 25 | } 26 | 27 | // Put mock 28 | func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { 29 | args := s.Mock.Called(key, value, opts) 30 | return args.Error(0) 31 | } 32 | 33 | // Get mock 34 | func (s *Mock) Get(key string) (*store.KVPair, error) { 35 | args := s.Mock.Called(key) 36 | return args.Get(0).(*store.KVPair), args.Error(1) 37 | } 38 | 39 | // Delete mock 40 | func (s *Mock) Delete(key string) error { 41 | args := s.Mock.Called(key) 42 | return args.Error(0) 43 | } 44 | 45 | // Exists mock 46 | func (s *Mock) Exists(key string) (bool, error) { 47 | args := s.Mock.Called(key) 48 | return args.Bool(0), args.Error(1) 49 | } 50 | 51 | // Watch mock 52 | func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { 53 | args := s.Mock.Called(key, stopCh) 54 | return args.Get(0).(<-chan *store.KVPair), args.Error(1) 55 | } 56 | 57 | // WatchTree mock 58 | func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { 59 | args := s.Mock.Called(prefix, stopCh) 60 | return args.Get(0).(chan []*store.KVPair), args.Error(1) 61 | } 62 | 63 | // NewLock mock 64 | func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { 65 | args := s.Mock.Called(key, options) 66 | return args.Get(0).(store.Locker), args.Error(1) 67 | } 68 | 69 | // List mock 70 | func (s *Mock) List(prefix string) ([]*store.KVPair, error) { 71 | args := s.Mock.Called(prefix) 72 | return args.Get(0).([]*store.KVPair), args.Error(1) 73 | } 74 | 75 | // DeleteTree mock 76 | func (s *Mock) DeleteTree(prefix string) error { 77 | args := s.Mock.Called(prefix) 78 | return args.Error(0) 79 | } 80 | 81 | // AtomicPut mock 82 | func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { 83 | args := s.Mock.Called(key, value, previous, opts) 84 | return args.Bool(0), args.Get(1).(*store.KVPair), args.Error(2) 85 | } 86 | 87 | // AtomicDelete mock 88 | func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { 89 | args := s.Mock.Called(key, previous) 90 | return args.Bool(0), args.Error(1) 91 | } 92 | 93 | // Lock mock implementation of Locker 94 | type Lock struct { 95 | mock.Mock 96 | } 97 | 98 | // Lock mock 99 | func (l *Lock) Lock(stopCh chan struct{}) (<-chan struct{}, error) { 100 | args := l.Mock.Called(stopCh) 101 | return args.Get(0).(<-chan struct{}), args.Error(1) 102 | } 103 | 104 | // Unlock mock 105 | func (l *Lock) Unlock() error { 106 | args := l.Mock.Called() 107 | return args.Error(0) 108 | } 109 | 110 | // Close mock 111 | func (s *Mock) Close() { 112 | return 113 | } 114 | -------------------------------------------------------------------------------- /api/context.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "github.com/humpback/gounits/rand" 4 | import "github.com/humpback/humpback-center/ctrl" 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "net/http" 10 | "net/url" 11 | ) 12 | 13 | type ( 14 | store map[string]interface{} 15 | 16 | Response struct { 17 | writer http.ResponseWriter 18 | status int 19 | size int64 20 | } 21 | 22 | Context struct { 23 | context.Context 24 | ID string 25 | request *http.Request 26 | response *Response 27 | query url.Values 28 | store store 29 | Controller *ctrl.Controller 30 | } 31 | ) 32 | 33 | func NewResponse(w http.ResponseWriter) *Response { 34 | 35 | return &Response{ 36 | writer: w, 37 | } 38 | } 39 | 40 | func (r *Response) SetWriter(w http.ResponseWriter) { 41 | 42 | r.writer = w 43 | } 44 | 45 | func (r *Response) Header() http.Header { 46 | 47 | return r.writer.Header() 48 | } 49 | 50 | func (r *Response) Writer() http.ResponseWriter { 51 | 52 | return r.writer 53 | } 54 | 55 | func (r *Response) WriteHeader(code int) { 56 | 57 | r.status = code 58 | r.writer.WriteHeader(code) 59 | } 60 | 61 | func (r *Response) Write(b []byte) (int, error) { 62 | 63 | n, err := r.writer.Write(b) 64 | if err == nil { 65 | r.size += int64(n) 66 | } 67 | return n, err 68 | } 69 | 70 | func (r *Response) Flush() { 71 | 72 | r.writer.(http.Flusher).Flush() 73 | } 74 | 75 | func (r *Response) Size() int64 { 76 | 77 | return r.size 78 | } 79 | 80 | func (r *Response) Status() int { 81 | 82 | return r.status 83 | } 84 | 85 | func NewContext(w http.ResponseWriter, r *http.Request, controller *ctrl.Controller) *Context { 86 | 87 | return &Context{ 88 | ID: rand.UUID(true), 89 | request: r, 90 | response: NewResponse(w), 91 | store: make(store), 92 | Controller: controller, 93 | } 94 | } 95 | 96 | func (c *Context) Request() *http.Request { 97 | 98 | return c.request 99 | } 100 | 101 | func (c *Context) Response() *Response { 102 | 103 | return c.response 104 | } 105 | 106 | func (c *Context) Get(key string) interface{} { 107 | 108 | return c.store[key] 109 | } 110 | 111 | func (c *Context) Set(key string, v interface{}) { 112 | 113 | if c.store == nil { 114 | c.store = make(store) 115 | } 116 | c.store[key] = v 117 | } 118 | 119 | func (c *Context) WriteHeader(code int) { 120 | 121 | c.response.WriteHeader(code) 122 | } 123 | 124 | func (c *Context) Query(name string) string { 125 | 126 | if c.query == nil { 127 | c.query = c.request.URL.Query() 128 | } 129 | return c.query.Get(name) 130 | } 131 | 132 | func (c *Context) Form(name string) string { 133 | 134 | return c.request.FormValue(name) 135 | } 136 | 137 | func (c *Context) JSON(code int, v interface{}) error { 138 | 139 | data, err := json.Marshal(v) 140 | if err != nil { 141 | return err 142 | } 143 | c.response.Header().Set("Content-Type", "application/json; charset=utf-8") 144 | c.response.WriteHeader(code) 145 | if _, err := c.response.Write(data); err != nil { 146 | return err 147 | } 148 | return nil 149 | } 150 | 151 | func (c *Context) JSONP(code int, callback string, v interface{}) error { 152 | 153 | b, err := json.Marshal(v) 154 | if err != nil { 155 | return err 156 | } 157 | c.response.Header().Set("Content-Type", "application/javascript; charset=utf-8") 158 | c.response.WriteHeader(code) 159 | data := []byte(callback + "(") 160 | data = append(data, b...) 161 | data = append(data, []byte(");")...) 162 | if _, err := c.response.Write(data); err != nil { 163 | return err 164 | } 165 | return nil 166 | } 167 | -------------------------------------------------------------------------------- /cluster/storage/node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import "github.com/boltdb/bolt" 4 | import "github.com/humpback/humpback-center/cluster/types" 5 | import "github.com/humpback/humpback-center/cluster/storage/dao" 6 | import "github.com/humpback/humpback-center/cluster/storage/entry" 7 | 8 | import ( 9 | "strings" 10 | ) 11 | 12 | const ( 13 | // BucketName represents the name of the bucket where this stores data. 14 | BucketName = "nodes" 15 | ) 16 | 17 | // NodeStorage is exported 18 | type NodeStorage struct { 19 | driver *bolt.DB 20 | } 21 | 22 | // NewNodeStorage is exported 23 | func NewNodeStorage(driver *bolt.DB) (*NodeStorage, error) { 24 | 25 | err := dao.CreateBucket(driver, BucketName) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | return &NodeStorage{ 31 | driver: driver, 32 | }, nil 33 | } 34 | 35 | // NodeByIP is exported 36 | func (nodeStorage *NodeStorage) NodeByIP(ip string) (*entry.Node, error) { 37 | 38 | var node entry.Node 39 | err := dao.GetObject(nodeStorage.driver, BucketName, []byte(ip), &node) 40 | if err != nil { 41 | return nil, err 42 | } 43 | return &node, nil 44 | } 45 | 46 | // NodeByID is exported 47 | func (nodeStorage *NodeStorage) NodeByID(id string) (*entry.Node, error) { 48 | 49 | var node *entry.Node 50 | err := nodeStorage.driver.View(func(tx *bolt.Tx) error { 51 | bucket := tx.Bucket([]byte(BucketName)) 52 | cursor := bucket.Cursor() 53 | for k, v := cursor.First(); k != nil; k, v = cursor.Next() { 54 | var value entry.Node 55 | err := dao.UnmarshalObject(v, &value) 56 | if err != nil { 57 | return err 58 | } 59 | if strings.ToUpper(value.ID) == strings.ToUpper(id) { 60 | node = &value 61 | break 62 | } 63 | } 64 | if node == nil { 65 | return dao.ErrStorageObjectNotFound 66 | } 67 | return nil 68 | }) 69 | return node, err 70 | } 71 | 72 | // NodeByName is exported 73 | func (nodeStorage *NodeStorage) NodeByName(name string) (*entry.Node, error) { 74 | 75 | var node *entry.Node 76 | err := nodeStorage.driver.View(func(tx *bolt.Tx) error { 77 | bucket := tx.Bucket([]byte(BucketName)) 78 | cursor := bucket.Cursor() 79 | for k, v := cursor.First(); k != nil; k, v = cursor.Next() { 80 | var value entry.Node 81 | err := dao.UnmarshalObject(v, &value) 82 | if err != nil { 83 | return err 84 | } 85 | if strings.ToUpper(value.Name) == strings.ToUpper(name) { 86 | node = &value 87 | break 88 | } 89 | } 90 | if node == nil { 91 | return dao.ErrStorageObjectNotFound 92 | } 93 | return nil 94 | }) 95 | return node, err 96 | } 97 | 98 | // SetNodeData set a node entry. 99 | func (nodeStorage *NodeStorage) SetNodeData(nodeData *types.NodeData) error { 100 | 101 | var node *entry.Node 102 | node, _ = nodeStorage.NodeByIP(nodeData.IP) 103 | if node == nil { 104 | node = &entry.Node{ 105 | NodeLabels: map[string]string{}, 106 | Availability: "Active", 107 | } 108 | } 109 | 110 | node.NodeData = nodeData 111 | return nodeStorage.driver.Update(func(tx *bolt.Tx) error { 112 | bucket := tx.Bucket([]byte(BucketName)) 113 | data, err := dao.MarshalObject(node) 114 | if err != nil { 115 | return err 116 | } 117 | return bucket.Put([]byte(node.IP), data) 118 | }) 119 | } 120 | 121 | // SetNodeLabels set a node labels. 122 | func (nodeStorage *NodeStorage) SetNodeLabels(ip string, labels map[string]string) error { 123 | 124 | var node *entry.Node 125 | node, err := nodeStorage.NodeByIP(ip) 126 | if err != nil { 127 | return err 128 | } 129 | 130 | node.NodeLabels = labels 131 | return nodeStorage.driver.Update(func(tx *bolt.Tx) error { 132 | bucket := tx.Bucket([]byte(BucketName)) 133 | data, err := dao.MarshalObject(node) 134 | if err != nil { 135 | return err 136 | } 137 | return bucket.Put([]byte(node.IP), data) 138 | }) 139 | } 140 | 141 | // DeleteNode deletes a node entry. 142 | func (nodeStorage *NodeStorage) DeleteNode(ip string) error { 143 | 144 | return dao.DeleteObject(nodeStorage.driver, BucketName, []byte(ip)) 145 | } 146 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/boltdb/boltdb_test.go: -------------------------------------------------------------------------------- 1 | package boltdb 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | 8 | "github.com/docker/libkv" 9 | "github.com/docker/libkv/store" 10 | "github.com/docker/libkv/testutils" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func makeBoltDBClient(t *testing.T) store.Store { 15 | kv, err := New([]string{"/tmp/not_exist_dir/__boltdbtest"}, &store.Config{Bucket: "boltDBTest"}) 16 | 17 | if err != nil { 18 | t.Fatalf("cannot create store: %v", err) 19 | } 20 | 21 | return kv 22 | } 23 | 24 | func TestRegister(t *testing.T) { 25 | Register() 26 | 27 | kv, err := libkv.NewStore( 28 | store.BOLTDB, 29 | []string{"/tmp/not_exist_dir/__boltdbtest"}, 30 | &store.Config{Bucket: "boltDBTest"}, 31 | ) 32 | assert.NoError(t, err) 33 | assert.NotNil(t, kv) 34 | 35 | if _, ok := kv.(*BoltDB); !ok { 36 | t.Fatal("Error registering and initializing boltDB") 37 | } 38 | 39 | _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") 40 | } 41 | 42 | // TestMultiplePersistConnection tests the second connection to a 43 | // BoltDB fails when one is already open with PersistConnection flag 44 | func TestMultiplePersistConnection(t *testing.T) { 45 | kv, err := libkv.NewStore( 46 | store.BOLTDB, 47 | []string{"/tmp/not_exist_dir/__boltdbtest"}, 48 | &store.Config{ 49 | Bucket: "boltDBTest", 50 | ConnectionTimeout: 1 * time.Second, 51 | PersistConnection: true}, 52 | ) 53 | assert.NoError(t, err) 54 | assert.NotNil(t, kv) 55 | 56 | if _, ok := kv.(*BoltDB); !ok { 57 | t.Fatal("Error registering and initializing boltDB") 58 | } 59 | 60 | // Must fail if multiple boltdb requests are made with a valid timeout 61 | kv, err = libkv.NewStore( 62 | store.BOLTDB, 63 | []string{"/tmp/not_exist_dir/__boltdbtest"}, 64 | &store.Config{ 65 | Bucket: "boltDBTest", 66 | ConnectionTimeout: 1 * time.Second, 67 | PersistConnection: true}, 68 | ) 69 | assert.Error(t, err) 70 | 71 | _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") 72 | } 73 | 74 | // TestConcurrentConnection tests simultaenous get/put using 75 | // two handles. 76 | func TestConcurrentConnection(t *testing.T) { 77 | var err error 78 | kv1, err1 := libkv.NewStore( 79 | store.BOLTDB, 80 | []string{"/tmp/__boltdbtest"}, 81 | &store.Config{ 82 | Bucket: "boltDBTest", 83 | ConnectionTimeout: 1 * time.Second}, 84 | ) 85 | assert.NoError(t, err1) 86 | assert.NotNil(t, kv1) 87 | 88 | kv2, err2 := libkv.NewStore( 89 | store.BOLTDB, 90 | []string{"/tmp/__boltdbtest"}, 91 | &store.Config{Bucket: "boltDBTest", 92 | ConnectionTimeout: 1 * time.Second}, 93 | ) 94 | assert.NoError(t, err2) 95 | assert.NotNil(t, kv2) 96 | 97 | key1 := "TestKV1" 98 | value1 := []byte("TestVal1") 99 | err = kv1.Put(key1, value1, nil) 100 | assert.NoError(t, err) 101 | 102 | key2 := "TestKV2" 103 | value2 := []byte("TestVal2") 104 | err = kv2.Put(key2, value2, nil) 105 | assert.NoError(t, err) 106 | 107 | pair1, err1 := kv1.Get(key1) 108 | assert.NoError(t, err) 109 | if assert.NotNil(t, pair1) { 110 | assert.NotNil(t, pair1.Value) 111 | } 112 | assert.Equal(t, pair1.Value, value1) 113 | 114 | pair2, err2 := kv2.Get(key2) 115 | assert.NoError(t, err) 116 | if assert.NotNil(t, pair2) { 117 | assert.NotNil(t, pair2.Value) 118 | } 119 | assert.Equal(t, pair2.Value, value2) 120 | 121 | // AtomicPut using kv1 and kv2 should succeed 122 | _, _, err = kv1.AtomicPut(key1, []byte("TestnewVal1"), pair1, nil) 123 | assert.NoError(t, err) 124 | 125 | _, _, err = kv2.AtomicPut(key2, []byte("TestnewVal2"), pair2, nil) 126 | assert.NoError(t, err) 127 | 128 | testutils.RunTestCommon(t, kv1) 129 | testutils.RunTestCommon(t, kv2) 130 | 131 | kv1.Close() 132 | kv2.Close() 133 | 134 | _ = os.Remove("/tmp/__boltdbtest") 135 | } 136 | 137 | func TestBoldDBStore(t *testing.T) { 138 | kv := makeBoltDBClient(t) 139 | 140 | testutils.RunTestCommon(t, kv) 141 | testutils.RunTestAtomic(t, kv) 142 | 143 | _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") 144 | } 145 | -------------------------------------------------------------------------------- /cluster/hooks.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/common/models" 4 | import "github.com/humpback/gounits/container" 5 | import "github.com/humpback/gounits/httpx" 6 | import "github.com/humpback/gounits/logger" 7 | 8 | import ( 9 | "context" 10 | "net" 11 | "net/http" 12 | "strings" 13 | "time" 14 | ) 15 | 16 | // HookEvent is exported 17 | type HookEvent int 18 | 19 | const ( 20 | CreateMetaEvent HookEvent = iota + 1 21 | RemoveMetaEvent 22 | OperateMetaEvent 23 | UpdateMetaEvent 24 | UpgradeMetaEvent 25 | MigrateMetaEvent 26 | RecoveryMetaEvent 27 | ) 28 | 29 | func (event HookEvent) String() string { 30 | 31 | switch event { 32 | case CreateMetaEvent: 33 | return "CreateMetaEvent" 34 | case RemoveMetaEvent: 35 | return "RemoveMetaEvent" 36 | case OperateMetaEvent: 37 | return "OperateMetaEvent" 38 | case UpdateMetaEvent: 39 | return "UpdateMetaEvent" 40 | case UpgradeMetaEvent: 41 | return "UpgradeMetaEvent" 42 | case MigrateMetaEvent: 43 | return "MigrateMetaEvent" 44 | case RecoveryMetaEvent: 45 | return "RecoveryMetaEvent" 46 | } 47 | return "" 48 | } 49 | 50 | // HookContainer is exported 51 | type HookContainer struct { 52 | IP string `json:"IP"` 53 | Name string `json:"Name"` 54 | Container models.Container `json:"Container"` 55 | } 56 | 57 | // HookContainers is exported 58 | type HookContainers []*HookContainer 59 | 60 | // Hook is exported 61 | type Hook struct { 62 | Timestamp int64 `json:"Timestamp"` 63 | Event string `json:"Event"` 64 | MetaBase MetaBase `json:"MetaBase"` 65 | HookContainers 66 | client *httpx.HttpClient 67 | } 68 | 69 | // Submit is exported 70 | func (hook *Hook) Submit() { 71 | 72 | webHooks := hook.MetaBase.WebHooks 73 | for _, webHook := range webHooks { 74 | headers := map[string][]string{} 75 | secretToken := strings.TrimSpace(webHook.SecretToken) 76 | if secretToken != "" { 77 | headers["X-Humpback-Token"] = []string{secretToken} 78 | } 79 | hookURL := strings.TrimSpace(webHook.URL) 80 | respWebHook, err := hook.client.PostJSON(context.Background(), hookURL, nil, hook, headers) 81 | if err != nil { 82 | logger.ERROR("[#cluster#] webhook %s post %s to %s, http error:%s", hook.Event, hook.MetaBase.MetaID, hookURL, err.Error()) 83 | continue 84 | } 85 | if respWebHook.StatusCode() >= http.StatusBadRequest { 86 | logger.ERROR("[#cluster#] webhook %s post %s to %s, http code %d", hook.Event, hook.MetaBase.MetaID, hookURL, respWebHook.StatusCode()) 87 | } 88 | respWebHook.Close() 89 | } 90 | } 91 | 92 | // HooksProcessor is exported 93 | type HooksProcessor struct { 94 | bStart bool 95 | client *httpx.HttpClient 96 | hooksQueue *container.SyncQueue 97 | stopCh chan struct{} 98 | } 99 | 100 | // NewHooksProcessor is exported 101 | func NewHooksProcessor() *HooksProcessor { 102 | 103 | client := httpx.NewClient(). 104 | SetTransport(&http.Transport{ 105 | Proxy: http.ProxyFromEnvironment, 106 | DialContext: (&net.Dialer{ 107 | Timeout: 45 * time.Second, 108 | KeepAlive: 90 * time.Second, 109 | }).DialContext, 110 | DisableKeepAlives: false, 111 | MaxIdleConns: 50, 112 | MaxIdleConnsPerHost: 65, 113 | IdleConnTimeout: 90 * time.Second, 114 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 115 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 116 | }) 117 | 118 | return &HooksProcessor{ 119 | bStart: false, 120 | client: client, 121 | hooksQueue: container.NewSyncQueue(), 122 | } 123 | } 124 | 125 | // SubmitHook is exported 126 | func (processor *HooksProcessor) SubmitHook(metaBase MetaBase, hookContainers HookContainers, hookEvent HookEvent) { 127 | 128 | hook := &Hook{ 129 | client: processor.client, 130 | Timestamp: time.Now().UnixNano(), 131 | Event: hookEvent.String(), 132 | MetaBase: metaBase, 133 | HookContainers: hookContainers, 134 | } 135 | processor.hooksQueue.Push(hook) 136 | } 137 | 138 | func (processor *HooksProcessor) Start() { 139 | 140 | if !processor.bStart { 141 | processor.bStart = true 142 | go processor.eventPopLoop() 143 | } 144 | } 145 | 146 | func (processor *HooksProcessor) Close() { 147 | 148 | if processor.bStart { 149 | processor.hooksQueue.Close() 150 | processor.bStart = false 151 | } 152 | } 153 | 154 | func (processor *HooksProcessor) eventPopLoop() { 155 | 156 | for processor.bStart { 157 | value := processor.hooksQueue.Pop() 158 | if value != nil { 159 | hook := value.(*Hook) 160 | go hook.Submit() 161 | } 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/docs/examples.md: -------------------------------------------------------------------------------- 1 | #Examples 2 | 3 | This document contains useful example of usage for `libkv`. It might not be complete but provides with general informations on how to use the client. 4 | 5 | ##Create a store and use Put/Get/Delete 6 | 7 | ```go 8 | package main 9 | 10 | import ( 11 | "fmt" 12 | "time" 13 | "log" 14 | 15 | "github.com/docker/libkv" 16 | "github.com/docker/libkv/store" 17 | "github.com/docker/libkv/store/consul" 18 | ) 19 | 20 | func init() { 21 | // Register consul store to libkv 22 | consul.Register() 23 | 24 | // We can register as many backends that are supported by libkv 25 | etcd.Register() 26 | zookeeper.Register() 27 | boltdb.Register() 28 | } 29 | 30 | func main() { 31 | client := "localhost:8500" 32 | 33 | // Initialize a new store with consul 34 | kv, err := libkv.NewStore( 35 | store.CONSUL, // or "consul" 36 | []string{client}, 37 | &store.Config{ 38 | ConnectionTimeout: 10*time.Second, 39 | }, 40 | ) 41 | if err != nil { 42 | log.Fatal("Cannot create store consul") 43 | } 44 | 45 | key := "foo" 46 | err = kv.Put(key, []byte("bar"), nil) 47 | if err != nil { 48 | fmt.Errorf("Error trying to put value at key: %v", key) 49 | } 50 | 51 | pair, err := kv.Get(key) 52 | if err != nil { 53 | fmt.Errorf("Error trying accessing value at key: %v", key) 54 | } 55 | 56 | err = kv.Delete(key) 57 | if err != nil { 58 | fmt.Errorf("Error trying to delete key %v", key) 59 | } 60 | 61 | log.Info("value: ", string(pair.Value)) 62 | } 63 | ``` 64 | 65 | ##List keys 66 | 67 | ```go 68 | // List will list all the keys under `key` if it contains a set of child keys/values 69 | entries, err := kv.List(key) 70 | for _, pair := range entries { 71 | fmt.Printf("key=%v - value=%v", pair.Key, string(pair.Value)) 72 | } 73 | 74 | ``` 75 | 76 | ##Watching for events on a single key (Watch) 77 | 78 | You can use watches to watch modifications on a key. First you need to check if the key exists. If this is not the case, we need to create it using the `Put` function. 79 | 80 | ```go 81 | // Checking on the key before watching 82 | if !kv.Exists(key) { 83 | err := kv.Put(key, []byte("bar"), nil) 84 | if err != nil { 85 | fmt.Errorf("Something went wrong when initializing key %v", key) 86 | } 87 | } 88 | 89 | stopCh := make(<-chan struct{}) 90 | events, err := kv.Watch(key, stopCh) 91 | 92 | select { 93 | case pair := <-events: 94 | // Do something with events 95 | fmt.Printf("value changed on key %v: new value=%v", key, pair.Value) 96 | } 97 | 98 | ``` 99 | 100 | ##Watching for events happening on child keys (WatchTree) 101 | 102 | You can use watches to watch modifications on a key. First you need to check if the key exists. If this is not the case, we need to create it using the `Put` function. There is a special step here though if you want your code to work across backends. Because `etcd` is a special case and it makes the distinction between directories and keys, we need to make sure that the created key is considered as a directory by enforcing `IsDir` at `true`. 103 | 104 | ```go 105 | // Checking on the key before watching 106 | if !kv.Exists(key) { 107 | // Don't forget IsDir:true if the code is used cross-backend 108 | err := kv.Put(key, []byte("bar"), &store.WriteOptions{IsDir:true}) 109 | if err != nil { 110 | fmt.Errorf("Something went wrong when initializing key %v", key) 111 | } 112 | } 113 | 114 | stopCh := make(<-chan struct{}) 115 | events, err := kv.WatchTree(key, stopCh) 116 | 117 | select { 118 | case pairs := <-events: 119 | // Do something with events 120 | for _, pair := range pairs { 121 | fmt.Printf("value changed on key %v: new value=%v", key, pair.Value) 122 | } 123 | } 124 | 125 | ``` 126 | 127 | ## Distributed Locking, using Lock/Unlock 128 | 129 | ```go 130 | key := "lockKey" 131 | value := []byte("bar") 132 | 133 | // Initialize a distributed lock. TTL is optional, it is here to make sure that 134 | // the lock is released after the program that is holding the lock ends or crashes 135 | lock, err := kv.NewLock(key, &store.LockOptions{Value: value, TTL: 2 * time.Second}) 136 | if err != nil { 137 | fmt.Errorf("something went wrong when trying to initialize the Lock") 138 | } 139 | 140 | // Try to lock the key, the call to Lock() is blocking 141 | _, err := lock.Lock(nil) 142 | if err != nil { 143 | fmt.Errorf("something went wrong when trying to lock key %v", key) 144 | } 145 | 146 | // Get should work because we are holding the key 147 | pair, err := kv.Get(key) 148 | if err != nil { 149 | fmt.Errorf("key %v has value %v", key, pair.Value) 150 | } 151 | 152 | // Unlock the key 153 | err = lock.Unlock() 154 | if err != nil { 155 | fmt.Errorf("something went wrong when trying to unlock key %v", key) 156 | } 157 | ``` -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/docs/compatibility.md: -------------------------------------------------------------------------------- 1 | #Cross-Backend Compatibility 2 | 3 | The value of `libkv` is not to duplicate the code for programs that should support multiple distributed K/V stores like the classic `Consul`/`etcd`/`zookeeper` trio. 4 | 5 | This document provides with general guidelines for users willing to support those backends with the same code using `libkv`. 6 | 7 | Please note that most of those workarounds are going to disappear in the future with `etcd` APIv3. 8 | 9 | ##Etcd directory/key distinction 10 | 11 | `etcd` with APIv2 makes the distinction between keys and directories. The result with `libkv` is that when using the etcd driver: 12 | 13 | - You cannot store values on directories 14 | - You cannot invoke `WatchTree` (watching on child values), on a regular key 15 | 16 | This is fundamentaly different than `Consul` and `zookeeper` which are more permissive and allow the same set of operations on keys and directories (called a Node for zookeeper). 17 | 18 | Apiv3 is in the work for `etcd`, which removes this key/directory distinction, but until then you should follow these workarounds to make your `libkv` code work across backends. 19 | 20 | ###Put 21 | 22 | `etcd` cannot put values on directories, so this puts a major restriction compared to `Consul` and `zookeeper`. 23 | 24 | If you want to support all those three backends, you should make sure to only put data on **leaves**. 25 | 26 | For example: 27 | 28 | ```go 29 | _ := kv.Put("path/to/key/bis", []byte("foo"), nil) 30 | _ := kv.Put("path/to/key", []byte("bar"), nil) 31 | ``` 32 | 33 | Will work on `Consul` and `zookeeper` but fail for `etcd`. This is because the first `Put` in the case of `etcd` will recursively create the directory hierarchy and `path/to/key` is now considered as a directory. Thus, values should always be stored on leaves if the support for the three backends is planned. 34 | 35 | ###WatchTree 36 | 37 | When initializing the `WatchTree`, the natural way to do so is through the following code: 38 | 39 | ```go 40 | key := "path/to/key" 41 | if !kv.Exists(key) { 42 | err := kv.Put(key, []byte("data"), nil) 43 | } 44 | events, err := kv.WatchTree(key, nil) 45 | ``` 46 | 47 | The code above will not work across backends and etcd will fail on the `WatchTree` call. What happens exactly: 48 | 49 | - `Consul` will create a regular `key` because it has no distinction between directories and keys. This is not an issue as we can invoke `WatchTree` on regular keys. 50 | - `zookeeper` is going to create a `node` that can either be a directory or a key during the lifetime of a program but it does not matter as a directory can hold values and be watchable like a regular key. 51 | - `etcd` is going to create a regular `key`. We cannot invoke `WatchTree` on regular keys using etcd. 52 | 53 | To be cross-compatible between those three backends for `WatchTree`, we need to enforce a parameter that is only interpreted with `etcd` and which tells the client to create a `directory` instead of a key. 54 | 55 | ```go 56 | key := "path/to/key" 57 | if !kv.Exists(key) { 58 | // We enforce IsDir = true to make sure etcd creates a directory 59 | err := kv.Put(key, []byte("data"), &store.WriteOptions{IsDir:true}) 60 | } 61 | events, err := kv.WatchTree(key, nil) 62 | ``` 63 | 64 | The code above will work for the three backends but make sure to not try to store any value at that path as the call to `Put` will fail for `etcd` (you can only put at `path/to/key/foo`, `path/to/key/bar` for example). 65 | 66 | ##Etcd distributed locking 67 | 68 | There is `Lock` mechanisms baked in the `coreos/etcd/client` for now. Instead, `libkv` has its own implementation of a `Lock` on top of `etcd`. 69 | 70 | The general workflow for the `Lock` is as follows: 71 | 72 | - Call Lock concurrently on a `key` between threads/programs 73 | - Only one will create that key, others are going to fail because the key has already been created 74 | - The thread locking the key can get the right index to set the value of the key using Compare And Swap and effectively Lock and hold the key 75 | - Other threads are given a wrong index to fail the Compare and Swap and block until the key has been released by the thread holding the Lock 76 | - Lock seekers are setting up a Watch listening on that key and events happening on the key 77 | - When the thread/program stops holding the lock, it deletes the key triggering a `delete` event that will notify all the other threads. In case the program crashes, the key has a TTL attached that will send an `expire` event when this TTL expires. 78 | - Once everyone is notified, back to the first step. First come, first served with the Lock. 79 | 80 | The whole Lock process is highly dependent on the `delete`/`expire` events of `etcd`. So don't expect the key to be still there once the Lock is released. 81 | 82 | For example if the whole logic is to `Lock` a key and expect the value to still be there after it has been unlocked, it is not going to be cross-backend compatible with `Consul` and `zookeeper`. On the other end the `etcd` Lock can still be used to do Leader Election for example and still be cross-compatible with other backends. -------------------------------------------------------------------------------- /cluster/utils.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/humpback-center/cluster/storage/entry" 4 | import "github.com/humpback/humpback-center/cluster/storage/node" 5 | import "github.com/humpback/humpback-center/cluster/types" 6 | import "github.com/humpback/discovery/backends" 7 | import "github.com/humpback/gounits/json" 8 | 9 | import ( 10 | "fmt" 11 | "net" 12 | "sort" 13 | "strings" 14 | ) 15 | 16 | //ParseServer is exported 17 | func ParseServer(ipOrName string) Server { 18 | 19 | server := Server{} 20 | ip := net.ParseIP(ipOrName) 21 | if ip != nil { 22 | server.IP = ipOrName 23 | } else { 24 | server.Name = ipOrName 25 | } 26 | return server 27 | } 28 | 29 | func getImageTag(imageName string) string { 30 | 31 | imageTag := "latest" 32 | values := strings.SplitN(imageName, ":", 2) 33 | if len(values) == 2 { 34 | imageTag = values[1] 35 | } 36 | return imageTag 37 | } 38 | 39 | // searchServerOfEngines is exported 40 | func searchServerOfEngines(server Server, engines map[string]*Engine) *Engine { 41 | 42 | //priority ip 43 | if server.IP != "" { 44 | if engine, ret := engines[server.IP]; ret { 45 | return engine 46 | } 47 | } else if server.Name != "" { 48 | for _, engine := range engines { 49 | if server.Name == engine.Name { 50 | return engine 51 | } 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | func searchServerOfStorage(server Server, nodeStorage *node.NodeStorage) *Engine { 58 | 59 | var node *entry.Node 60 | if server.IP != "" { 61 | node, _ = nodeStorage.NodeByIP(server.IP) 62 | } else if server.Name != "" { 63 | node, _ = nodeStorage.NodeByName(server.Name) 64 | } 65 | 66 | if node != nil { 67 | engine := &Engine{} 68 | engine.Update(node.NodeData) 69 | engine.NodeLabels = node.NodeLabels 70 | engine.AvailabilityText = node.Availability 71 | return engine 72 | } 73 | return nil 74 | } 75 | 76 | // selectIPOrName is exported 77 | func selectIPOrName(ip string, name string) string { 78 | 79 | if ip != "" { 80 | return ip 81 | } 82 | return name 83 | } 84 | 85 | // compareAddServers is exported 86 | func compareAddServers(nodeCache *types.NodeCache, originServer Server, newServer Server) bool { 87 | 88 | nodeData1 := nodeCache.Get(selectIPOrName(originServer.IP, originServer.Name)) 89 | nodeData2 := nodeCache.Get(selectIPOrName(newServer.IP, newServer.Name)) 90 | if nodeData1 != nil && nodeData2 != nil { 91 | if nodeData1 == nodeData2 { 92 | return true 93 | } 94 | } 95 | if nodeData2 == nil { 96 | return true 97 | } 98 | return false 99 | } 100 | 101 | // compareRemoveServers is exported 102 | func compareRemoveServers(nodeCache *types.NodeCache, originServer Server, newServer Server) bool { 103 | 104 | nodeData1 := nodeCache.Get(selectIPOrName(originServer.IP, originServer.Name)) 105 | nodeData2 := nodeCache.Get(selectIPOrName(newServer.IP, newServer.Name)) 106 | if nodeData1 == nil && nodeData2 == nil { 107 | return true 108 | } 109 | if nodeData1 == nil { 110 | return true 111 | } 112 | if nodeData1 == nodeData2 { 113 | return true 114 | } 115 | return false 116 | } 117 | 118 | type rdEngines []*Engine 119 | 120 | func (engines rdEngines) Len() int { 121 | 122 | return len(engines) 123 | } 124 | 125 | func (engines rdEngines) Swap(i, j int) { 126 | 127 | engines[i], engines[j] = engines[j], engines[i] 128 | } 129 | 130 | func (engines rdEngines) Less(i, j int) bool { 131 | 132 | return engines[i].IP < engines[j].IP 133 | } 134 | 135 | // removeDuplicatesEngines is exported 136 | func removeDuplicatesEngines(engines []*Engine) []*Engine { 137 | 138 | out := []*Engine{} 139 | pEngines := rdEngines(engines) 140 | sort.Sort(pEngines) 141 | nLen := len(pEngines) 142 | for i := 0; i < nLen; i++ { 143 | if i > 0 && pEngines[i-1].IP == pEngines[i].IP { 144 | continue 145 | } 146 | out = append(out, pEngines[i]) 147 | } 148 | return out 149 | } 150 | 151 | type rdGroups []*Group 152 | 153 | func (groups rdGroups) Len() int { 154 | 155 | return len(groups) 156 | } 157 | 158 | func (groups rdGroups) Swap(i, j int) { 159 | 160 | groups[i], groups[j] = groups[j], groups[i] 161 | } 162 | 163 | func (groups rdGroups) Less(i, j int) bool { 164 | 165 | return groups[i].ID < groups[j].ID 166 | } 167 | 168 | func removeDuplicatesGroups(groups []*Group) []*Group { 169 | 170 | out := []*Group{} 171 | pGroups := rdGroups(groups) 172 | sort.Sort(pGroups) 173 | nLen := len(pGroups) 174 | for i := 0; i < nLen; i++ { 175 | if i > 0 && pGroups[i-1].ID == pGroups[i].ID { 176 | continue 177 | } 178 | out = append(out, pGroups[i]) 179 | } 180 | return out 181 | } 182 | 183 | func deCodeEntry(entry *backends.Entry) (*types.NodeData, error) { 184 | 185 | if entry == nil { 186 | return nil, fmt.Errorf("decode entry invalid") 187 | } 188 | 189 | nodeData := &types.NodeData{} 190 | err := json.DeCodeBufferToObject(entry.Data, nodeData) 191 | if err != nil { 192 | return nil, err 193 | } 194 | 195 | nodeData.Name = strings.ToUpper(nodeData.Name) 196 | return nodeData, nil 197 | } 198 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "time" 7 | ) 8 | 9 | // Backend represents a KV Store Backend 10 | type Backend string 11 | 12 | const ( 13 | // CONSUL backend 14 | CONSUL Backend = "consul" 15 | // ETCD backend 16 | ETCD Backend = "etcd" 17 | // ZK backend 18 | ZK Backend = "zk" 19 | // BOLTDB backend 20 | BOLTDB Backend = "boltdb" 21 | ) 22 | 23 | var ( 24 | // ErrBackendNotSupported is thrown when the backend k/v store is not supported by libkv 25 | ErrBackendNotSupported = errors.New("Backend storage not supported yet, please choose one of") 26 | // ErrCallNotSupported is thrown when a method is not implemented/supported by the current backend 27 | ErrCallNotSupported = errors.New("The current call is not supported with this backend") 28 | // ErrNotReachable is thrown when the API cannot be reached for issuing common store operations 29 | ErrNotReachable = errors.New("Api not reachable") 30 | // ErrCannotLock is thrown when there is an error acquiring a lock on a key 31 | ErrCannotLock = errors.New("Error acquiring the lock") 32 | // ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store 33 | ErrKeyModified = errors.New("Unable to complete atomic operation, key modified") 34 | // ErrKeyNotFound is thrown when the key is not found in the store during a Get operation 35 | ErrKeyNotFound = errors.New("Key not found in store") 36 | // ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation 37 | ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation") 38 | // ErrKeyExists is thrown when the previous value exists in the case of an AtomicPut 39 | ErrKeyExists = errors.New("Previous K/V pair exists, cannot complete Atomic operation") 40 | ) 41 | 42 | // Config contains the options for a storage client 43 | type Config struct { 44 | ClientTLS *ClientTLSConfig 45 | TLS *tls.Config 46 | ConnectionTimeout time.Duration 47 | Bucket string 48 | PersistConnection bool 49 | Username string 50 | Password string 51 | } 52 | 53 | // ClientTLSConfig contains data for a Client TLS configuration in the form 54 | // the etcd client wants it. Eventually we'll adapt it for ZK and Consul. 55 | type ClientTLSConfig struct { 56 | CertFile string 57 | KeyFile string 58 | CACertFile string 59 | } 60 | 61 | // Store represents the backend K/V storage 62 | // Each store should support every call listed 63 | // here. Or it couldn't be implemented as a K/V 64 | // backend for libkv 65 | type Store interface { 66 | // Put a value at the specified key 67 | Put(key string, value []byte, options *WriteOptions) error 68 | 69 | // Get a value given its key 70 | Get(key string) (*KVPair, error) 71 | 72 | // Delete the value at the specified key 73 | Delete(key string) error 74 | 75 | // Verify if a Key exists in the store 76 | Exists(key string) (bool, error) 77 | 78 | // Watch for changes on a key 79 | Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) 80 | 81 | // WatchTree watches for changes on child nodes under 82 | // a given directory 83 | WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) 84 | 85 | // NewLock creates a lock for a given key. 86 | // The returned Locker is not held and must be acquired 87 | // with `.Lock`. The Value is optional. 88 | NewLock(key string, options *LockOptions) (Locker, error) 89 | 90 | // List the content of a given prefix 91 | List(directory string) ([]*KVPair, error) 92 | 93 | // DeleteTree deletes a range of keys under a given directory 94 | DeleteTree(directory string) error 95 | 96 | // Atomic CAS operation on a single value. 97 | // Pass previous = nil to create a new key. 98 | AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) 99 | 100 | // Atomic delete of a single value 101 | AtomicDelete(key string, previous *KVPair) (bool, error) 102 | 103 | // Close the store connection 104 | Close() 105 | } 106 | 107 | // KVPair represents {Key, Value, Lastindex} tuple 108 | type KVPair struct { 109 | Key string 110 | Value []byte 111 | LastIndex uint64 112 | } 113 | 114 | // WriteOptions contains optional request parameters 115 | type WriteOptions struct { 116 | IsDir bool 117 | TTL time.Duration 118 | } 119 | 120 | // LockOptions contains optional request parameters 121 | type LockOptions struct { 122 | Value []byte // Optional, value to associate with the lock 123 | TTL time.Duration // Optional, expiration ttl associated with the lock 124 | RenewLock chan struct{} // Optional, chan used to control and stop the session ttl renewal for the lock 125 | } 126 | 127 | // Locker provides locking mechanism on top of the store. 128 | // Similar to `sync.Lock` except it may return errors. 129 | type Locker interface { 130 | Lock(stopChan chan struct{}) (<-chan struct{}, error) 131 | Unlock() error 132 | } 133 | -------------------------------------------------------------------------------- /cluster/enginespool.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/gounits/logger" 4 | 5 | import ( 6 | "sync" 7 | "time" 8 | ) 9 | 10 | // EnginesPool is exported 11 | type EnginesPool struct { 12 | sync.RWMutex 13 | Cluster *Cluster 14 | poolEngines map[string]*Engine 15 | pendEngines map[string]*Engine 16 | stopCh chan struct{} 17 | } 18 | 19 | // NewEnginesPool is exported 20 | func NewEnginesPool() *EnginesPool { 21 | 22 | pool := &EnginesPool{ 23 | poolEngines: make(map[string]*Engine), 24 | pendEngines: make(map[string]*Engine), 25 | stopCh: make(chan struct{}), 26 | } 27 | go pool.doLoop() 28 | return pool 29 | } 30 | 31 | // SetCluster is exported 32 | func (pool *EnginesPool) SetCluster(cluster *Cluster) { 33 | 34 | pool.Cluster = cluster 35 | } 36 | 37 | // Release is exported 38 | func (pool *EnginesPool) Release() { 39 | 40 | close(pool.stopCh) 41 | pool.Lock() 42 | for _, engine := range pool.pendEngines { 43 | delete(pool.pendEngines, engine.IP) 44 | } 45 | for _, engine := range pool.poolEngines { 46 | delete(pool.poolEngines, engine.IP) 47 | } 48 | pool.Unlock() 49 | } 50 | 51 | // InitEngineNodeLabels is exported 52 | func (pool *EnginesPool) InitEngineNodeLabels(engine *Engine) { 53 | 54 | node, _ := pool.Cluster.storageDriver.NodeStorage.NodeByIP(engine.IP) 55 | if node != nil { 56 | engine.SetNodeLabelsPairs(node.NodeLabels) 57 | } 58 | } 59 | 60 | // AddEngine is exported 61 | func (pool *EnginesPool) AddEngine(ip string, name string) { 62 | 63 | ipOrName := selectIPOrName(ip, name) 64 | nodeData := pool.Cluster.nodeCache.Get(ipOrName) 65 | if nodeData == nil { 66 | return 67 | } 68 | 69 | pool.Cluster.storageDriver.NodeStorage.SetNodeData(nodeData) 70 | engine := pool.Cluster.GetEngine(nodeData.IP) 71 | if engine != nil { 72 | pool.InitEngineNodeLabels(engine) 73 | engine.Update(nodeData) 74 | return 75 | } 76 | 77 | if ret := pool.Cluster.InGroupsContains(nodeData.IP, nodeData.Name); !ret { 78 | return 79 | } 80 | 81 | pool.Lock() 82 | defer pool.Unlock() 83 | if pendEngine, ret := pool.pendEngines[nodeData.IP]; ret { 84 | pool.InitEngineNodeLabels(pendEngine) 85 | if pendEngine.IsHealthy() { 86 | delete(pool.pendEngines, pendEngine.IP) 87 | pendEngine.Update(nodeData) 88 | pool.Cluster.Lock() 89 | pool.Cluster.engines[pendEngine.IP] = pendEngine 90 | pool.Cluster.Unlock() 91 | logger.INFO("[#cluster#] addengine, pool engine reused %s %s %s.", pendEngine.IP, pendEngine.Name, pendEngine.State()) 92 | } else { 93 | logger.INFO("[#cluster#] addengine, pool pending engine %s %s %s is already.", pendEngine.IP, pendEngine.Name, pendEngine.State()) 94 | } 95 | return 96 | } 97 | 98 | poolEngine, ret := pool.poolEngines[nodeData.IP] 99 | if ret { 100 | poolEngine.Update(nodeData) 101 | poolEngine.SetState(StatePending) 102 | logger.INFO("[#cluster#] addengine, pool engine reused %s %s %s.", poolEngine.IP, poolEngine.Name, poolEngine.State()) 103 | } else { 104 | var err error 105 | poolEngine, err = NewEngine(nodeData, pool.Cluster.overcommitRatio, pool.Cluster.removeDelay, pool.Cluster.configCache) 106 | if err != nil { 107 | return 108 | } 109 | pool.poolEngines[poolEngine.IP] = poolEngine 110 | logger.INFO("[#cluster#] addengine, pool engine create %s %s %s.", poolEngine.IP, poolEngine.Name, poolEngine.State()) 111 | } 112 | pool.InitEngineNodeLabels(poolEngine) 113 | pool.pendEngines[poolEngine.IP] = poolEngine 114 | } 115 | 116 | // RemoveEngine is exported 117 | func (pool *EnginesPool) RemoveEngine(ip string, name string) { 118 | 119 | ipOrName := selectIPOrName(ip, name) 120 | nodeData := pool.Cluster.nodeCache.Get(ipOrName) 121 | if nodeData == nil { 122 | return 123 | } 124 | 125 | pool.Lock() 126 | if engine := pool.Cluster.GetEngine(nodeData.IP); engine != nil { 127 | pool.Cluster.Lock() 128 | delete(pool.Cluster.engines, engine.IP) 129 | pool.Cluster.Unlock() 130 | pool.pendEngines[engine.IP] = engine 131 | } 132 | pool.Unlock() 133 | } 134 | 135 | func (pool *EnginesPool) doLoop() { 136 | 137 | for { 138 | ticker := time.NewTicker(2 * time.Second) 139 | select { 140 | case <-ticker.C: 141 | { 142 | ticker.Stop() 143 | pool.Lock() 144 | wgroup := sync.WaitGroup{} 145 | for _, pendEngine := range pool.pendEngines { 146 | if pendEngine.IsPending() { 147 | wgroup.Add(1) 148 | go func(engine *Engine) { 149 | if err := engine.RefreshContainers(); err == nil { 150 | engine.Open() 151 | pool.Cluster.migtatorCache.Cancel(engine) 152 | pool.Cluster.Lock() 153 | pool.Cluster.engines[engine.IP] = engine 154 | pool.Cluster.Unlock() 155 | logger.INFO("[#cluster#] engine %s %s %s", engine.IP, engine.Name, engine.State()) 156 | } 157 | wgroup.Done() 158 | }(pendEngine) 159 | } else if pendEngine.IsHealthy() { 160 | wgroup.Add(1) 161 | go func(engine *Engine) { 162 | pool.Cluster.migtatorCache.Start(engine) 163 | engine.Close() 164 | logger.INFO("[#cluster#] engine %s %s %s", engine.IP, engine.Name, engine.State()) 165 | wgroup.Done() 166 | }(pendEngine) 167 | } 168 | } 169 | wgroup.Wait() 170 | for _, pendEngine := range pool.pendEngines { 171 | delete(pool.pendEngines, pendEngine.IP) 172 | } 173 | pool.Unlock() 174 | } 175 | case <-pool.stopCh: 176 | { 177 | ticker.Stop() 178 | return 179 | } 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /cluster/constraint.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "regexp" 7 | "strings" 8 | ) 9 | 10 | const ( 11 | eq = iota 12 | noteq 13 | 14 | // NodeLabelsPrefix is the constraint key prefix for node labels. 15 | NodeLabelsPrefix = "node.labels." 16 | // EngineLabelsPrefix is the constraint key prefix for engine labels. 17 | EngineLabelsPrefix = "engine.labels." 18 | ) 19 | 20 | var ( 21 | alphaNumeric = regexp.MustCompile(`^(?i)[a-z_][a-z0-9\-_.]+$`) 22 | // value can be alphanumeric and some special characters. it shouldn't container 23 | // current or future operators like '>, <, ~', etc. 24 | valuePattern = regexp.MustCompile(`^(?i)[a-z0-9:\-_\s\.\*\(\)\?\+\[\]\\\^\$\|\/]+$`) 25 | // operators defines list of accepted operators 26 | operators = []string{"==", "!="} 27 | ) 28 | 29 | // Constraint defines a constraint. 30 | type Constraint struct { 31 | key string 32 | operator int 33 | exp string 34 | } 35 | 36 | // ParseConstraints parses list of constraints. 37 | func ParseConstraints(constraints []string) ([]Constraint, error) { 38 | 39 | exprs := []Constraint{} 40 | for _, c := range constraints { 41 | found := false 42 | // each expr is in the form of "key op value" 43 | for i, op := range operators { 44 | if !strings.Contains(c, op) { 45 | continue 46 | } 47 | // split with the op 48 | parts := strings.SplitN(c, op, 2) 49 | 50 | if len(parts) < 2 { 51 | return nil, fmt.Errorf("invalid expr: %s", c) 52 | } 53 | 54 | part0 := strings.TrimSpace(parts[0]) 55 | // validate key 56 | matched := alphaNumeric.MatchString(part0) 57 | if matched == false { 58 | return nil, fmt.Errorf("key '%s' is invalid", part0) 59 | } 60 | 61 | part1 := strings.TrimSpace(parts[1]) 62 | 63 | // validate Value 64 | matched = valuePattern.MatchString(part1) 65 | if matched == false { 66 | return nil, fmt.Errorf("value '%s' is invalid", part1) 67 | } 68 | // TODO(dongluochen): revisit requirements to see if globing or regex are useful 69 | exprs = append(exprs, Constraint{key: part0, operator: i, exp: part1}) 70 | 71 | found = true 72 | break // found an op, move to next entry 73 | } 74 | if !found { 75 | return nil, fmt.Errorf("constraint expected one operator from %s", strings.Join(operators, ", ")) 76 | } 77 | } 78 | return exprs, nil 79 | } 80 | 81 | // Match checks if the Constraint matches the target strings. 82 | func (c *Constraint) Match(whats ...string) bool { 83 | 84 | var match bool 85 | // full string match 86 | for _, what := range whats { 87 | // case insensitive compare 88 | if strings.EqualFold(c.exp, what) { 89 | match = true 90 | break 91 | } 92 | } 93 | 94 | switch c.operator { 95 | case eq: 96 | return match 97 | case noteq: 98 | return !match 99 | } 100 | return false 101 | } 102 | 103 | // MatchConstraints returns true if the node satisfies the given constraints. 104 | func MatchConstraints(constraints []Constraint, engine *Engine) bool { 105 | 106 | for _, constraint := range constraints { 107 | switch { 108 | case strings.EqualFold(constraint.key, "node.id"): 109 | if !constraint.Match(engine.ID) { 110 | return false 111 | } 112 | case strings.EqualFold(constraint.key, "node.hostname"): 113 | // if this node doesn't have hostname 114 | // it's equivalent to match an empty hostname 115 | // where '==' would fail, '!=' matches 116 | if engine.Name == "" { 117 | if !constraint.Match("") { 118 | return false 119 | } 120 | continue 121 | } 122 | if !constraint.Match(engine.Name) { 123 | return false 124 | } 125 | case strings.EqualFold(constraint.key, "node.ip"): 126 | engineIP := net.ParseIP(engine.IP) 127 | // single IP address, node.ip == 2001:db8::2 128 | if ip := net.ParseIP(constraint.exp); ip != nil { 129 | ipEq := ip.Equal(engineIP) 130 | if (ipEq && constraint.operator != eq) || (!ipEq && constraint.operator == eq) { 131 | return false 132 | } 133 | continue 134 | } 135 | // CIDR subnet, node.ip != 210.8.4.0/24 136 | if _, subnet, err := net.ParseCIDR(constraint.exp); err == nil { 137 | within := subnet.Contains(engineIP) 138 | if (within && constraint.operator != eq) || (!within && constraint.operator == eq) { 139 | return false 140 | } 141 | continue 142 | } 143 | // reject constraint with malformed address/network 144 | return false 145 | /* 146 | case strings.EqualFold(constraint.key, "node.role"): 147 | if !constraint.Match(n.Role.String()) { 148 | return false 149 | } 150 | */ 151 | case strings.EqualFold(constraint.key, "node.platform.os"): 152 | if engine.OSType == "" { 153 | if !constraint.Match("") { 154 | return false 155 | } 156 | continue 157 | } 158 | if !constraint.Match(engine.OSType) { 159 | return false 160 | } 161 | case strings.EqualFold(constraint.key, "node.platform.arch"): 162 | if engine.Architecture == "" { 163 | if !constraint.Match("") { 164 | return false 165 | } 166 | continue 167 | } 168 | if !constraint.Match(engine.Architecture) { 169 | return false 170 | } 171 | // node labels constraint in form like 'node.labels.key==value' 172 | case len(constraint.key) > len(NodeLabelsPrefix) && strings.EqualFold(constraint.key[:len(NodeLabelsPrefix)], NodeLabelsPrefix): 173 | if engine.NodeLabels == nil { 174 | if !constraint.Match("") { 175 | return false 176 | } 177 | continue 178 | } 179 | label := constraint.key[len(NodeLabelsPrefix):] 180 | // label itself is case sensitive 181 | val := engine.NodeLabels[label] 182 | if !constraint.Match(val) { 183 | return false 184 | } 185 | // engine labels constraint in form like 'engine.labels.key!=value' 186 | case len(constraint.key) > len(EngineLabelsPrefix) && strings.EqualFold(constraint.key[:len(EngineLabelsPrefix)], EngineLabelsPrefix): 187 | if engine.EngineLabels == nil { 188 | if !constraint.Match("") { 189 | return false 190 | } 191 | continue 192 | } 193 | label := constraint.key[len(EngineLabelsPrefix):] 194 | val := engine.EngineLabels[label] 195 | if !constraint.Match(val) { 196 | return false 197 | } 198 | default: 199 | // key doesn't match predefined syntax 200 | return false 201 | } 202 | } 203 | return true 204 | } 205 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/README.md: -------------------------------------------------------------------------------- 1 | # libkv 2 | 3 | [![GoDoc](https://godoc.org/github.com/docker/libkv?status.png)](https://godoc.org/github.com/docker/libkv) 4 | [![Build Status](https://travis-ci.org/docker/libkv.svg?branch=master)](https://travis-ci.org/docker/libkv) 5 | [![Coverage Status](https://coveralls.io/repos/docker/libkv/badge.svg)](https://coveralls.io/r/docker/libkv) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/docker/libkv)](https://goreportcard.com/report/github.com/docker/libkv) 7 | 8 | `libkv` provides a `Go` native library to store metadata. 9 | 10 | The goal of `libkv` is to abstract common store operations for multiple distributed and/or local Key/Value store backends. 11 | 12 | For example, you can use it to store your metadata or for service discovery to register machines and endpoints inside your cluster. 13 | 14 | You can also easily implement a generic *Leader Election* on top of it (see the [docker/leadership](https://github.com/docker/leadership) repository). 15 | 16 | As of now, `libkv` offers support for `Consul`, `Etcd`, `Zookeeper` (**Distributed** store) and `BoltDB` (**Local** store). 17 | 18 | ## Usage 19 | 20 | `libkv` is meant to be used as an abstraction layer over existing distributed Key/Value stores. It is especially useful if you plan to support `consul`, `etcd` and `zookeeper` using the same codebase. 21 | 22 | It is ideal if you plan for something written in Go that should support: 23 | 24 | - A simple metadata storage, distributed or local 25 | - A lightweight discovery service for your nodes 26 | - A distributed lock mechanism 27 | 28 | You can find examples of usage for `libkv` under in `docs/examples.go`. Optionally you can also take a look at the `docker/swarm` or `docker/libnetwork` repositories which are using `docker/libkv` for all the use cases listed above. 29 | 30 | ## Supported versions 31 | 32 | `libkv` supports: 33 | - Consul versions >= `0.5.1` because it uses Sessions with `Delete` behavior for the use of `TTLs` (mimics zookeeper's Ephemeral node support), If you don't plan to use `TTLs`: you can use Consul version `0.4.0+`. 34 | - Etcd versions >= `2.0` because it uses the new `coreos/etcd/client`, this might change in the future as the support for `APIv3` comes along and adds more capabilities. 35 | - Zookeeper versions >= `3.4.5`. Although this might work with previous version but this remains untested as of now. 36 | - Boltdb, which shouldn't be subject to any version dependencies. 37 | 38 | ## Interface 39 | 40 | A **storage backend** in `libkv` should implement (fully or partially) this interface: 41 | 42 | ```go 43 | type Store interface { 44 | Put(key string, value []byte, options *WriteOptions) error 45 | Get(key string) (*KVPair, error) 46 | Delete(key string) error 47 | Exists(key string) (bool, error) 48 | Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) 49 | WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) 50 | NewLock(key string, options *LockOptions) (Locker, error) 51 | List(directory string) ([]*KVPair, error) 52 | DeleteTree(directory string) error 53 | AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) 54 | AtomicDelete(key string, previous *KVPair) (bool, error) 55 | Close() 56 | } 57 | ``` 58 | 59 | ## Compatibility matrix 60 | 61 | Backend drivers in `libkv` are generally divided between **local drivers** and **distributed drivers**. Distributed backends offer enhanced capabilities like `Watches` and/or distributed `Locks`. 62 | 63 | Local drivers are usually used in complement to the distributed drivers to store informations that only needs to be available locally. 64 | 65 | | Calls | Consul | Etcd | Zookeeper | BoltDB | 66 | |-----------------------|:----------:|:------:|:-----------:|:--------:| 67 | | Put | X | X | X | X | 68 | | Get | X | X | X | X | 69 | | Delete | X | X | X | X | 70 | | Exists | X | X | X | X | 71 | | Watch | X | X | X | | 72 | | WatchTree | X | X | X | | 73 | | NewLock (Lock/Unlock) | X | X | X | | 74 | | List | X | X | X | X | 75 | | DeleteTree | X | X | X | X | 76 | | AtomicPut | X | X | X | X | 77 | | Close | X | X | X | X | 78 | 79 | ## Limitations 80 | 81 | Distributed Key/Value stores often have different concepts for managing and formatting keys and their associated values. Even though `libkv` tries to abstract those stores aiming for some consistency, in some cases it can't be applied easily. 82 | 83 | Please refer to the `docs/compatibility.md` to see what are the special cases for cross-backend compatibility. 84 | 85 | Other than those special cases, you should expect the same experience for basic operations like `Get`/`Put`, etc. 86 | 87 | Calls like `WatchTree` may return different events (or number of events) depending on the backend (for now, `Etcd` and `Consul` will likely return more events than `Zookeeper` that you should triage properly). Although you should be able to use it successfully to watch on events in an interchangeable way (see the **docker/leadership** repository or the **pkg/discovery/kv** package in **docker/docker**). 88 | 89 | ## TLS 90 | 91 | Only `Consul` and `etcd` have support for TLS and you should build and provide your own `config.TLS` object to feed the client. Support is planned for `zookeeper`. 92 | 93 | ## Roadmap 94 | 95 | - Make the API nicer to use (using `options`) 96 | - Provide more options (`consistency` for example) 97 | - Improve performance (remove extras `Get`/`List` operations) 98 | - Better key formatting 99 | - New backends? 100 | 101 | ## Contributing 102 | 103 | Want to hack on libkv? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. 104 | 105 | ## Copyright and license 106 | 107 | Copyright © 2014-2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. 108 | -------------------------------------------------------------------------------- /cluster/client.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/common/models" 4 | import "github.com/humpback/gounits/httpx" 5 | import "github.com/docker/docker/api/types" 6 | import ctypes "github.com/humpback/humpback-center/cluster/types" 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "net" 12 | "net/http" 13 | "time" 14 | ) 15 | 16 | // Client is exported 17 | type Client struct { 18 | ApiAddr string 19 | c *httpx.HttpClient 20 | } 21 | 22 | // NewClient is exported 23 | func NewClient(apiAddr string) *Client { 24 | 25 | client := httpx.NewClient(). 26 | SetTransport(&http.Transport{ 27 | Proxy: http.ProxyFromEnvironment, 28 | DialContext: (&net.Dialer{ 29 | Timeout: 45 * time.Second, 30 | KeepAlive: 90 * time.Second, 31 | }).DialContext, 32 | DisableKeepAlives: false, 33 | MaxIdleConns: 10, 34 | MaxIdleConnsPerHost: 10, 35 | IdleConnTimeout: 90 * time.Second, 36 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 37 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 38 | }) 39 | 40 | return &Client{ 41 | ApiAddr: apiAddr, 42 | c: client, 43 | } 44 | } 45 | 46 | // Close is exported 47 | // client close 48 | func (client *Client) Close() { 49 | 50 | client.c.Close() 51 | } 52 | 53 | // GetDockerInfoRequest is exported 54 | // get docker node info 55 | func (client *Client) GetDockerInfoRequest(ctx context.Context) (*types.Info, error) { 56 | 57 | respSpecs, err := client.c.Get(ctx, "http://"+client.ApiAddr+"/v1/dockerinfo", nil, nil) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | defer respSpecs.Close() 63 | if respSpecs.StatusCode() >= http.StatusBadRequest { 64 | return nil, fmt.Errorf("dockerinfo request, %s", ctypes.ParseHTTPResponseError(respSpecs)) 65 | } 66 | 67 | dockerInfo := &types.Info{} 68 | if err := respSpecs.JSON(dockerInfo); err != nil { 69 | return nil, err 70 | } 71 | return dockerInfo, nil 72 | } 73 | 74 | // GetContainerRequest is exported 75 | // get a container type info. 76 | func (client *Client) GetContainerRequest(ctx context.Context, containerid string) (*types.ContainerJSON, error) { 77 | 78 | query := map[string][]string{"originaldata": []string{"true"}} 79 | respContainer, err := client.c.Get(ctx, "http://"+client.ApiAddr+"/v1/containers/"+containerid, query, nil) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | defer respContainer.Close() 85 | if respContainer.StatusCode() >= http.StatusBadRequest { 86 | return nil, fmt.Errorf("container %s request, %s", ShortContainerID(containerid), ctypes.ParseHTTPResponseError(respContainer)) 87 | } 88 | 89 | containerJSON := &types.ContainerJSON{} 90 | if err := respContainer.JSON(containerJSON); err != nil { 91 | return nil, err 92 | } 93 | return containerJSON, nil 94 | } 95 | 96 | // GetContainersRequest is exported 97 | // return all containers info. 98 | func (client *Client) GetContainersRequest(ctx context.Context) ([]types.Container, error) { 99 | 100 | query := map[string][]string{"all": []string{"true"}} 101 | respContainers, err := client.c.Get(ctx, "http://"+client.ApiAddr+"/v1/containers", query, nil) 102 | if err != nil { 103 | return nil, err 104 | } 105 | 106 | defer respContainers.Close() 107 | if respContainers.StatusCode() >= http.StatusBadRequest { 108 | return nil, fmt.Errorf("containers request, %s", ctypes.ParseHTTPResponseError(respContainers)) 109 | } 110 | 111 | allContainers := []types.Container{} 112 | if err := respContainers.JSON(&allContainers); err != nil { 113 | return nil, err 114 | } 115 | return allContainers, nil 116 | } 117 | 118 | // CreateContainerRequest is exported 119 | // create a container request. 120 | func (client *Client) CreateContainerRequest(ctx context.Context, config models.Container) (*ctypes.CreateContainerResponse, error) { 121 | 122 | respCreated, err := client.c.PostJSON(ctx, "http://"+client.ApiAddr+"/v1/containers", nil, config, nil) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | defer respCreated.Close() 128 | if respCreated.StatusCode() >= http.StatusBadRequest { 129 | return nil, fmt.Errorf("create container %s request, %s", config.Name, ctypes.ParseHTTPResponseError(respCreated)) 130 | } 131 | 132 | createContainerResponse := &ctypes.CreateContainerResponse{} 133 | if err := respCreated.JSON(createContainerResponse); err != nil { 134 | return nil, err 135 | } 136 | return createContainerResponse, nil 137 | } 138 | 139 | // RemoveContainerRequest is exported 140 | // remove a container request. 141 | func (client *Client) RemoveContainerRequest(ctx context.Context, containerid string) error { 142 | 143 | query := map[string][]string{"force": []string{"true"}} 144 | respRemoved, err := client.c.Delete(ctx, "http://"+client.ApiAddr+"/v1/containers/"+containerid, query, nil) 145 | if err != nil { 146 | return err 147 | } 148 | 149 | defer respRemoved.Close() 150 | if respRemoved.StatusCode() >= http.StatusBadRequest { 151 | return fmt.Errorf("remove container %s request, %s", ShortContainerID(containerid), ctypes.ParseHTTPResponseError(respRemoved)) 152 | } 153 | return nil 154 | } 155 | 156 | // OperateContainerRequest is exported 157 | // operate a container request. 158 | func (client *Client) OperateContainerRequest(ctx context.Context, operate models.ContainerOperate) error { 159 | 160 | respOperated, err := client.c.PutJSON(ctx, "http://"+client.ApiAddr+"/v1/containers", nil, operate, nil) 161 | if err != nil { 162 | return err 163 | } 164 | 165 | defer respOperated.Close() 166 | if respOperated.StatusCode() >= http.StatusBadRequest { 167 | return fmt.Errorf("%s container %s request, %s", operate.Action, ShortContainerID(operate.Container), ctypes.ParseHTTPResponseError(respOperated)) 168 | } 169 | return nil 170 | } 171 | 172 | // UpgradeContainerRequest is exported 173 | // upgrade a container request. 174 | func (client *Client) UpgradeContainerRequest(ctx context.Context, operate models.ContainerOperate) (*ctypes.UpgradeContainerResponse, error) { 175 | 176 | respUpgraded, err := client.c.PutJSON(ctx, "http://"+client.ApiAddr+"/v1/containers", nil, operate, nil) 177 | if err != nil { 178 | return nil, err 179 | } 180 | 181 | defer respUpgraded.Close() 182 | if respUpgraded.StatusCode() >= http.StatusBadRequest { 183 | return nil, fmt.Errorf("upgrate container %s request, %s", ShortContainerID(operate.Container), ctypes.ParseHTTPResponseError(respUpgraded)) 184 | } 185 | 186 | upgradeContainerResponse := &ctypes.UpgradeContainerResponse{} 187 | if err := respUpgraded.JSON(upgradeContainerResponse); err != nil { 188 | return nil, err 189 | } 190 | return upgradeContainerResponse, nil 191 | } 192 | -------------------------------------------------------------------------------- /etc/lookupenv.go: -------------------------------------------------------------------------------- 1 | package etc 2 | 3 | import "github.com/humpback/gounits/convert" 4 | 5 | import ( 6 | "errors" 7 | "fmt" 8 | "net" 9 | "net/url" 10 | "os" 11 | "path/filepath" 12 | "strconv" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | var ( 18 | ERRConfigurationParseEnv = errors.New("configuration parseEnv error") 19 | ) 20 | 21 | // ParseEnv is exported 22 | func (conf *Configuration) ParseEnv() error { 23 | 24 | pidFile := os.Getenv("HUMPBACK_PIDFILE") 25 | if pidFile != "" { 26 | conf.PIDFile = pidFile 27 | } 28 | 29 | retryStartup := os.Getenv("HUMPBACK_RETRYSTARTUP") 30 | if retryStartup != "" { 31 | value, err := strconv.ParseBool(retryStartup) 32 | if err != nil { 33 | return fmt.Errorf("HUMPBACK_RETRYSTARTUP invalid, %s", err.Error()) 34 | } 35 | conf.RetryStartup = value 36 | } 37 | 38 | siteAPI := os.Getenv("HUMPBACK_SITEAPI") 39 | if siteAPI != "" { 40 | if _, err := url.Parse(siteAPI); err != nil { 41 | return fmt.Errorf("%s, HUMPBACK_SITEAPI %s", ERRConfigurationParseEnv.Error(), err.Error()) 42 | } 43 | conf.SiteAPI = siteAPI 44 | } 45 | 46 | if err := parseClusterEnv(conf); err != nil { 47 | return err 48 | } 49 | 50 | if err := parseAPIEnv(conf); err != nil { 51 | return err 52 | } 53 | 54 | if err := parseLogEnv(conf); err != nil { 55 | return err 56 | } 57 | 58 | return nil 59 | } 60 | 61 | func parseClusterEnv(conf *Configuration) error { 62 | 63 | driverOpts := convert.ConvertKVStringSliceToMap(conf.Cluster.DriverOpts) 64 | clusterLocation := os.Getenv("CENTER_CLUSTER_LOCATION") 65 | if clusterLocation != "" { 66 | driverOpts["location"] = clusterLocation 67 | } 68 | 69 | dataPath := os.Getenv("CENTER_CLUSTER_DATAPATH") 70 | if dataPath != "" { 71 | driverOpts["datapath"] = dataPath 72 | } 73 | 74 | cacheRoot := os.Getenv("CENTER_CLUSTER_CACHEROOT") 75 | if cacheRoot != "" { 76 | if _, err := filepath.Abs(cacheRoot); err != nil { 77 | return fmt.Errorf("%s, CENTER_CLUSTER_CACHEROOT %s", ERRConfigurationParseEnv.Error(), err.Error()) 78 | } 79 | driverOpts["cacheroot"] = cacheRoot 80 | } 81 | 82 | overCommit := os.Getenv("CENTER_CLUSTER_OVERCOMMIT") 83 | if overCommit != "" { 84 | if _, err := strconv.ParseFloat(overCommit, 2); err != nil { 85 | return fmt.Errorf("%s, CENTER_CLUSTER_OVERCOMMIT %s", ERRConfigurationParseEnv.Error(), err.Error()) 86 | } 87 | driverOpts["overcommit"] = overCommit 88 | } 89 | 90 | recoveryInterval := os.Getenv("CENTER_CLUSTER_RECOVERYINTERVAL") 91 | if recoveryInterval != "" { 92 | if _, err := time.ParseDuration(recoveryInterval); err != nil { 93 | return fmt.Errorf("%s, CENTER_CLUSTER_RECOVERYINTERVAL %s", ERRConfigurationParseEnv.Error(), err.Error()) 94 | } 95 | driverOpts["recoveryinterval"] = recoveryInterval 96 | } 97 | 98 | createRetry := os.Getenv("CENTER_CLUSTER_CREATERETRY") 99 | if createRetry != "" { 100 | if _, err := strconv.Atoi(createRetry); err != nil { 101 | return fmt.Errorf("%s, CENTER_CLUSTER_CREATERETRY %s", ERRConfigurationParseEnv.Error(), err.Error()) 102 | } 103 | driverOpts["createretry"] = createRetry 104 | } 105 | 106 | removeDelay := os.Getenv("CENTER_CLUSTER_REMOVEDELAY") 107 | if removeDelay != "" { 108 | if _, err := time.ParseDuration(removeDelay); err != nil { 109 | return fmt.Errorf("%s, CENTER_CLUSTER_REMOVEDELAY %s", ERRConfigurationParseEnv.Error(), err.Error()) 110 | } 111 | driverOpts["removedelay"] = removeDelay 112 | } 113 | 114 | migrateDelay := os.Getenv("CENTER_CLUSTER_MIGRATEDELAY") 115 | if migrateDelay != "" { 116 | if _, err := time.ParseDuration(migrateDelay); err != nil { 117 | return fmt.Errorf("%s, CENTER_CLUSTER_MIGRATEDELAY %s", ERRConfigurationParseEnv.Error(), err.Error()) 118 | } 119 | driverOpts["migratedelay"] = migrateDelay 120 | } 121 | conf.Cluster.DriverOpts = convert.ConvertMapToKVStringSlice(driverOpts) 122 | 123 | clusterURIs := os.Getenv("DOCKER_CLUSTER_URIS") 124 | if clusterURIs != "" { 125 | conf.Cluster.Discovery.URIs = clusterURIs 126 | } 127 | 128 | clusterName := os.Getenv("DOCKER_CLUSTER_NAME") 129 | if clusterName != "" { 130 | conf.Cluster.Discovery.Cluster = clusterName 131 | } 132 | 133 | clusterHeartBeat := os.Getenv("DOCKER_CLUSTER_HEARTBEAT") 134 | if clusterHeartBeat != "" { 135 | if _, err := time.ParseDuration(clusterHeartBeat); err != nil { 136 | return fmt.Errorf("%s, DOCKER_CLUSTER_HEARTBEAT %s", ERRConfigurationParseEnv.Error(), err.Error()) 137 | } 138 | conf.Cluster.Discovery.Heartbeat = clusterHeartBeat 139 | } 140 | return nil 141 | } 142 | 143 | func parseAPIEnv(conf *Configuration) error { 144 | 145 | listenPort := os.Getenv("CENTER_LISTEN_PORT") 146 | if listenPort != "" { 147 | var ( 148 | bindIPAddr string 149 | bindPort string 150 | ) 151 | bindArray := strings.SplitN(listenPort, ":", 2) 152 | if len(bindArray) == 1 { 153 | bindPort = bindArray[0] 154 | } else { 155 | bindIPAddr := bindArray[0] 156 | bindPort = bindArray[1] 157 | if len(bindIPAddr) > 0 { 158 | if _, err := net.ResolveIPAddr("tcp", bindIPAddr); err != nil { 159 | return fmt.Errorf("%s, CENTER_LISTEN_PORT host ipaddr error, %s", ERRConfigurationParseEnv.Error(), err.Error()) 160 | } 161 | } 162 | } 163 | nPort, err := strconv.Atoi(bindPort) 164 | if err != nil { 165 | return fmt.Errorf("%s, CENTER_LISTEN_PORT %s", ERRConfigurationParseEnv.Error(), err.Error()) 166 | } 167 | if nPort <= 0 || nPort > 65535 { 168 | return fmt.Errorf("%s, CENTER_LISTEN_PORT range invalid", ERRConfigurationParseEnv.Error()) 169 | } 170 | conf.API.Hosts = []string{bindIPAddr + ":" + bindPort} 171 | } 172 | 173 | enableCore := os.Getenv("CENTER_API_ENABLECORS") 174 | if enableCore != "" { 175 | ret, err := strconv.ParseBool(enableCore) 176 | if err != nil { 177 | return fmt.Errorf("%s, CENTER_API_ENABLECORS %s", ERRConfigurationParseEnv.Error(), err.Error()) 178 | } 179 | conf.API.EnableCors = ret 180 | } 181 | return nil 182 | } 183 | 184 | func parseLogEnv(conf *Configuration) error { 185 | 186 | logFile := os.Getenv("CENTER_LOG_FILE") 187 | if logFile != "" { 188 | if _, err := filepath.Abs(logFile); err != nil { 189 | return fmt.Errorf("%s, CENTER_LOG_FILE %s", ERRConfigurationParseEnv.Error(), err.Error()) 190 | } 191 | conf.Logger.LogFile = logFile 192 | } 193 | 194 | logLevel := os.Getenv("CENTER_LOG_LEVEL") 195 | if logLevel != "" { 196 | conf.Logger.LogLevel = logLevel 197 | } 198 | 199 | logSize := os.Getenv("CENTER_LOG_SIZE") 200 | if logSize != "" { 201 | lSize, err := strconv.Atoi(logSize) 202 | if err != nil { 203 | return fmt.Errorf("%s, CENTER_LOG_SIZE %s", ERRConfigurationParseEnv.Error(), err.Error()) 204 | } 205 | conf.Logger.LogSize = (int64)(lSize) 206 | } 207 | return nil 208 | } 209 | -------------------------------------------------------------------------------- /notify/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Humpback Notify 7 | 30 | 31 | 32 | 33 | 34 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | {{if .Exception}} 62 | 63 | 64 | 65 | 66 | {{end}} 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | {{if .GroupMeta}} 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | {{if .GroupMeta.Location}} 97 | 98 | 99 | 100 | 101 | {{end}} 102 | 103 | 104 | 105 | 106 | 107 | 108 | 118 | 119 | 120 | 121 | 137 | 138 | {{end}} 139 | {{if .WatchGroup}} 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | {{if .WatchGroup.Location}} 149 | 150 | 151 | 152 | 153 | {{end}} 154 | 155 | 156 | 167 | 168 | {{end}} 169 | 170 | 171 | 172 |
35 | 36 |
37 | Humpback 38 | Notify 39 |
40 |
41 |
 
ID{{.ID}}
Event{{.Event}}
Description{{.Description}}
Exception{{.Exception}}
Timestamp{{.Timestamp}}
Datetime{{.Datetime}}
MetaID{{.GroupMeta.MetaID}}
MetaName{{.GroupMeta.MetaName}}
Image{{.GroupMeta.Image}}
GroupID{{.GroupMeta.GroupID}}
GroupName{{.GroupMeta.GroupName}}
Location{{.GroupMeta.Location}}
Instances{{.GroupMeta.Instances}}
Engines 109 | 110 | {{range .GroupMeta.Engines}} 111 | {{if eq .State "Healthy"}} 112 |
{{.IP}} {{.Name}} {{.State}}
113 | {{else}} 114 |
{{.IP}} {{.Name}} {{.State}}
115 | {{end}} 116 | {{end}} 117 |
Containers 122 | 123 | {{if gt (.GroupMeta.Containers|len) 0}} 124 | {{range .GroupMeta.Containers}} 125 |
{{.ID}} {{.Name}}
126 | {{if eq .State "Running" }} 127 |
-> {{.Server}} {{.State}}
128 | {{else}} 129 |
-> {{.Server}} {{.State}}
130 | {{end}} 131 |
132 | {{end}} 133 | {{else}} 134 |
This meta no valid containers, Please wait recovery, until you start the agnet service.
135 | {{end}} 136 |
GroupID{{.WatchGroup.GroupID}}
GroupName{{.WatchGroup.GroupName}}
Location{{.WatchGroup.Location}}
Engines 157 | 158 | {{range .WatchGroup.Engines}} 159 | {{if eq .State "Healthy"}} 160 |
{{.IP}} {{.Name}} {{.State}}
161 | {{else}} 162 |
{{.IP}} {{.Name}} {{.State}}
163 | {{end}} 164 | {{end}} 165 |
166 |
 
173 | 174 | -------------------------------------------------------------------------------- /cluster/container.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/docker/docker/pkg/stringid" 4 | import "github.com/docker/docker/api/types" 5 | import units "github.com/docker/go-units" 6 | import "github.com/humpback/gounits/convert" 7 | import "github.com/humpback/gounits/rand" 8 | import "github.com/humpback/common/models" 9 | 10 | import ( 11 | "fmt" 12 | "strings" 13 | "time" 14 | ) 15 | 16 | // ShortContainerID is exported 17 | // return a short containerid string. 18 | func ShortContainerID(containerid string) string { 19 | return stringid.TruncateID(containerid) 20 | } 21 | 22 | // ContainerConfig is exported 23 | type ContainerConfig struct { 24 | models.Container 25 | } 26 | 27 | // Container is exported 28 | type Container struct { 29 | BaseConfig *ContainerBaseConfig 30 | Config *ContainerConfig 31 | Info types.ContainerJSON 32 | Engine *Engine 33 | } 34 | 35 | // GroupID is exported 36 | // Return Container GroupID 37 | func (c *Container) GroupID() string { 38 | 39 | if c.BaseConfig != nil && c.BaseConfig.MetaData != nil { 40 | return c.BaseConfig.MetaData.GroupID 41 | } 42 | return "" 43 | } 44 | 45 | // MetaID is exported 46 | // Return Container MetaID 47 | func (c *Container) MetaID() string { 48 | 49 | if c.BaseConfig != nil && c.BaseConfig.MetaData != nil { 50 | return c.BaseConfig.MetaData.MetaID 51 | } 52 | return "" 53 | } 54 | 55 | // Index is exported 56 | // Return Container Index 57 | func (c *Container) Index() int { 58 | 59 | if c.BaseConfig != nil { 60 | return c.BaseConfig.Index 61 | } 62 | return -1 63 | } 64 | 65 | // OriginalName is exported 66 | // Return Container OriginalName 67 | func (c *Container) OriginalName() string { 68 | 69 | if c.BaseConfig != nil { 70 | configEnvMap := convert.ConvertKVStringSliceToMap(c.BaseConfig.Env) 71 | if originalName, ret := configEnvMap["HUMPBACK_CLUSTER_CONTAINER_ORIGINALNAME"]; ret { 72 | return originalName 73 | } 74 | } 75 | return "" 76 | } 77 | 78 | // ValidateConfig is exported 79 | func (c *Container) ValidateConfig() bool { 80 | 81 | configEnvMap := convert.ConvertKVStringSliceToMap(c.Info.Config.Env) 82 | groupID := configEnvMap["HUMPBACK_CLUSTER_GROUPID"] 83 | metaID := configEnvMap["HUMPBACK_CLUSTER_METAID"] 84 | if len(groupID) == 0 && len(metaID) == 0 { 85 | return true //true, general container that do not called scheduling of cluster. 86 | } 87 | 88 | if len(groupID) > 0 || len(metaID) > 0 { 89 | if c.BaseConfig != nil { // valid cluster container 90 | return true 91 | } 92 | } 93 | return false // invalid cluster container 94 | } 95 | 96 | // update is exported 97 | // update container info and config 98 | func (c *Container) update(engine *Engine, containerJSON *types.ContainerJSON) bool { 99 | 100 | config := &models.Container{} 101 | config.Parse(containerJSON) 102 | containerConfig := &ContainerConfig{ 103 | Container: *config, 104 | } 105 | 106 | c.Config = containerConfig 107 | containerJSON.HostConfig.CPUShares = containerJSON.HostConfig.CPUShares * engine.Cpus / 1024.0 108 | //startAt, _ := time.Parse(time.RFC3339Nano, containerJSON.State.StartedAt) 109 | //finishedAt, _ := time.Parse(time.RFC3339Nano, containerJSON.State.FinishedAt) 110 | //containerJSON.State.StartedAt = startAt.Add(engine.DeltaDuration).Format(time.RFC3339Nano) 111 | //containerJSON.State.FinishedAt = finishedAt.Add(engine.DeltaDuration).Format(time.RFC3339Nano) 112 | c.Info = *containerJSON 113 | 114 | configEnvMap := convert.ConvertKVStringSliceToMap(containerJSON.Config.Env) 115 | groupID := configEnvMap["HUMPBACK_CLUSTER_GROUPID"] 116 | metaID := configEnvMap["HUMPBACK_CLUSTER_METAID"] 117 | if len(groupID) > 0 && len(metaID) > 0 { 118 | c.BaseConfig = engine.configCache.GetContainerBaseConfig(metaID, containerJSON.ID) 119 | return true 120 | } 121 | c.BaseConfig = nil 122 | return false 123 | } 124 | 125 | // Containers represents a list of containers 126 | type Containers []*Container 127 | 128 | // StateString returns a single string to describe state 129 | func StateString(state *types.ContainerState) string { 130 | 131 | startedAt, _ := time.Parse(time.RFC3339Nano, state.StartedAt) 132 | if state.Running { 133 | if state.Paused { 134 | return "Paused" 135 | } 136 | if state.Restarting { 137 | return "Restarting" 138 | } 139 | return "Running" 140 | } 141 | 142 | if state.Dead { 143 | return "Dead" 144 | } 145 | 146 | if startedAt.IsZero() { 147 | return "Created" 148 | } 149 | return "Exited" 150 | } 151 | 152 | // FullStateString returns readable description of the state 153 | func FullStateString(state *types.ContainerState) string { 154 | 155 | startedAt, _ := time.Parse(time.RFC3339Nano, state.StartedAt) 156 | finishedAt, _ := time.Parse(time.RFC3339Nano, state.FinishedAt) 157 | if state.Running { 158 | if state.Paused { 159 | return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(startedAt))) 160 | } 161 | if state.Restarting { 162 | return fmt.Sprintf("Restarting (%d) %s ago", state.ExitCode, units.HumanDuration(time.Now().UTC().Sub(finishedAt))) 163 | } 164 | healthText := "" 165 | if h := state.Health; h != nil { 166 | switch h.Status { 167 | case types.Starting: 168 | healthText = "Health: Starting" 169 | default: 170 | healthText = h.Status 171 | } 172 | } 173 | if len(healthText) > 0 { 174 | return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(startedAt)), healthText) 175 | } 176 | return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(startedAt))) 177 | } 178 | 179 | if state.Dead { 180 | return "Dead" 181 | } 182 | 183 | if startedAt.IsZero() { 184 | return "Created" 185 | } 186 | 187 | if finishedAt.IsZero() { 188 | return "" 189 | } 190 | 191 | return fmt.Sprintf("Exited (%d) %s ago", state.ExitCode, units.HumanDuration(time.Now().UTC().Sub(finishedAt))) 192 | } 193 | 194 | // Get returns a container using its ID or Name 195 | func (containers Containers) Get(IDOrName string) *Container { 196 | 197 | if len(strings.TrimSpace(IDOrName)) == 0 { 198 | return nil 199 | } 200 | 201 | for _, container := range containers { 202 | if container.Info.ID == IDOrName || rand.TruncateID(container.Info.ID) == IDOrName { 203 | return container 204 | } 205 | } 206 | 207 | candidates := []*Container{} 208 | for _, container := range containers { 209 | name := container.Info.Name 210 | if name == IDOrName || name == "/"+IDOrName || container.Engine.ID+name == IDOrName || container.Engine.Name+name == IDOrName { 211 | candidates = append(candidates, container) 212 | } 213 | } 214 | 215 | if size := len(candidates); size == 1 { 216 | return candidates[0] 217 | } else if size > 1 { 218 | return nil 219 | } 220 | 221 | for _, container := range containers { 222 | if strings.HasPrefix(container.Info.ID, IDOrName) { 223 | candidates = append(candidates, container) 224 | } 225 | } 226 | 227 | if len(candidates) == 1 { 228 | return candidates[0] 229 | } 230 | return nil 231 | } 232 | -------------------------------------------------------------------------------- /ctrl/cluster.go: -------------------------------------------------------------------------------- 1 | package ctrl 2 | 3 | import "github.com/humpback/common/models" 4 | import "github.com/humpback/humpback-center/api/request" 5 | import "github.com/humpback/humpback-center/cluster" 6 | import "github.com/humpback/humpback-center/cluster/types" 7 | import "github.com/humpback/humpback-center/etc" 8 | import "github.com/humpback/humpback-center/notify" 9 | import "github.com/humpback/discovery" 10 | import "github.com/humpback/gounits/logger" 11 | 12 | import ( 13 | "context" 14 | "encoding/base64" 15 | "fmt" 16 | "net/http" 17 | "strings" 18 | "time" 19 | ) 20 | 21 | func createCluster(configuration *etc.Configuration) (*cluster.Cluster, error) { 22 | 23 | clusterOpts := configuration.Cluster 24 | heartbeat, err := time.ParseDuration(clusterOpts.Discovery.Heartbeat) 25 | if err != nil { 26 | return nil, fmt.Errorf("discovery heartbeat invalid.") 27 | } 28 | 29 | if heartbeat < 1*time.Second { 30 | return nil, fmt.Errorf("discovery heartbeat should be at least 1s.") 31 | } 32 | 33 | configopts := map[string]string{"kv.path": strings.TrimSpace(clusterOpts.Discovery.Cluster)} 34 | discovery, err := discovery.New(clusterOpts.Discovery.URIs, heartbeat, 0, configopts) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | siteURL := strings.SplitN(configuration.SiteAPI, "/api", 2) 40 | notifySender := notify.NewNotifySender(siteURL[0], configuration.GetNotificationsEndPoints()) 41 | cluster, err := cluster.NewCluster(clusterOpts.DriverOpts, notifySender, discovery) 42 | if err != nil { 43 | return nil, err 44 | } 45 | return cluster, nil 46 | } 47 | 48 | func (c *Controller) initCluster() { 49 | 50 | if groups := c.getClusterGroupStoreData(""); groups != nil { 51 | logger.INFO("[#ctrl#] init cluster groups:%d", len(groups)) 52 | for _, group := range groups { 53 | if group.IsCluster { 54 | if c.Cluster.Location == "" || strings.ToUpper(group.Location) == strings.ToUpper(c.Cluster.Location) { 55 | c.Cluster.SetGroup(group) 56 | } 57 | } 58 | } 59 | } 60 | } 61 | 62 | func (c *Controller) startCluster() error { 63 | 64 | c.initCluster() 65 | logger.INFO("[#ctrl#] start cluster.") 66 | return c.Cluster.Start() 67 | } 68 | 69 | func (c *Controller) stopCluster() { 70 | 71 | c.Cluster.Stop() 72 | logger.INFO("[#ctrl#] stop cluster.") 73 | } 74 | 75 | func (c *Controller) getClusterGroupStoreData(groupid string) []*cluster.Group { 76 | 77 | query := map[string][]string{} 78 | groupid = strings.TrimSpace(groupid) 79 | if groupid != "" { 80 | query["groupid"] = []string{groupid} 81 | } 82 | 83 | t := time.Now().UnixNano() / int64(time.Millisecond) 84 | value := fmt.Sprintf("HUMPBACK_CENTER%d", t) 85 | code := base64.StdEncoding.EncodeToString([]byte(value)) 86 | headers := map[string][]string{"x-get-cluster": []string{code}} 87 | respGroups, err := c.client.Get(context.Background(), c.Configuration.SiteAPI+"/groups/getclusters", query, headers) 88 | if err != nil { 89 | logger.ERROR("[#ctrl#] get cluster group storedata error:%s", err.Error()) 90 | return nil 91 | } 92 | 93 | defer respGroups.Close() 94 | if respGroups.StatusCode() != http.StatusOK { 95 | logger.ERROR("[#ctrl#] get cluster group storedata error:%d %s", respGroups.StatusCode(), respGroups.String()) 96 | return nil 97 | } 98 | 99 | groups := []*cluster.Group{} 100 | if err := respGroups.JSON(&groups); err != nil { 101 | logger.ERROR("[#ctrl#] get cluster group storedata error:%s", err.Error()) 102 | return nil 103 | } 104 | return groups 105 | } 106 | 107 | func (c *Controller) SetCluster(cluster *cluster.Cluster) { 108 | 109 | if cluster != nil { 110 | logger.INFO("[#ctrl#] set cluster %p.", cluster) 111 | c.Cluster = cluster 112 | } 113 | } 114 | 115 | func (c *Controller) GetClusterGroupAllContainers(groupid string) *types.GroupContainers { 116 | 117 | return c.Cluster.GetGroupAllContainers(groupid) 118 | } 119 | 120 | func (c *Controller) GetClusterGroupContainers(metaid string) *types.GroupContainer { 121 | 122 | return c.Cluster.GetGroupContainers(metaid) 123 | } 124 | 125 | func (c *Controller) GetClusterGroupContainersMetaBase(metaid string) *cluster.MetaBase { 126 | 127 | return c.Cluster.GetMetaBase(metaid) 128 | } 129 | 130 | func (c *Controller) GetClusterGroupAllEngines(groupid string) []*cluster.Engine { 131 | 132 | return c.Cluster.GetGroupAllEngines(groupid) 133 | } 134 | 135 | func (c *Controller) GetClusterEngine(server string) *cluster.Engine { 136 | 137 | s := cluster.ParseServer(server) 138 | return c.Cluster.GetServerOfEngines(s) 139 | } 140 | 141 | func (c *Controller) SetClusterServerNodeLabels(server string, labels map[string]string) error { 142 | 143 | s := cluster.ParseServer(server) 144 | return c.Cluster.SetServerNodeLabels(s, labels) 145 | } 146 | 147 | func (c *Controller) SetClusterEnableEvent(event string) { 148 | 149 | logger.INFO("[#ctrl#] set cluster enable %s.", event) 150 | if event == request.CLUSTER_ENABLE_EVENT { 151 | c.initCluster() 152 | } 153 | } 154 | 155 | func (c *Controller) SetClusterGroupEvent(groupid string, event string) { 156 | 157 | logger.INFO("[#ctrl#] set cluster groupevent %s.", event) 158 | switch event { 159 | case request.GROUP_CREATE_EVENT, request.GROUP_CHANGE_EVENT: 160 | { 161 | if groups := c.getClusterGroupStoreData(groupid); groups != nil { 162 | logger.INFO("[#ctrl#] get cluster groups:%d", len(groups)) 163 | if len(groups) > 0 { 164 | for _, group := range groups { 165 | if group.IsCluster { 166 | if c.Cluster.Location == "" || strings.ToUpper(group.Location) == strings.ToUpper(c.Cluster.Location) { 167 | c.Cluster.SetGroup(group) 168 | } else { // group location changed 169 | if c.Cluster.GetGroup(groupid) != nil { 170 | c.Cluster.RemoveGroup(groupid) 171 | } 172 | } 173 | } 174 | } 175 | } else { // group iscluster change to false 176 | if c.Cluster.GetGroup(groupid) != nil { 177 | c.Cluster.RemoveGroup(groupid) 178 | } 179 | } 180 | } 181 | } 182 | case request.GROUP_REMOVE_EVENT: 183 | { // group removed 184 | if c.Cluster.GetGroup(groupid) != nil { 185 | c.Cluster.RemoveGroup(groupid) 186 | } 187 | } 188 | } 189 | } 190 | 191 | func (c *Controller) CreateClusterContainers(groupid string, instances int, webhooks types.WebHooks, placement types.Placement, config models.Container, option types.CreateOption) (string, *types.CreatedContainers, error) { 192 | 193 | return c.Cluster.CreateContainers(groupid, instances, webhooks, placement, config, option) 194 | } 195 | 196 | func (c *Controller) UpdateClusterContainers(metaid string, instances int, webhooks types.WebHooks, placement types.Placement, config models.Container, option types.UpdateOption) (*types.CreatedContainers, error) { 197 | 198 | return c.Cluster.UpdateContainers(metaid, instances, webhooks, placement, config, option) 199 | } 200 | 201 | func (c *Controller) OperateContainers(metaid string, action string) (*types.OperatedContainers, error) { 202 | 203 | return c.Cluster.OperateContainers(metaid, "", action) 204 | } 205 | 206 | func (c *Controller) OperateContainer(containerid string, action string) (string, *types.OperatedContainers, error) { 207 | 208 | return c.Cluster.OperateContainer(containerid, action) 209 | } 210 | 211 | func (c *Controller) UpgradeContainers(metaid string, imagetag string) (*types.UpgradeContainers, error) { 212 | 213 | return c.Cluster.UpgradeContainers(metaid, imagetag) 214 | } 215 | 216 | func (c *Controller) RemoveContainersOfMetaName(groupid string, metaname string) (string, *types.RemovedContainers, error) { 217 | 218 | return c.Cluster.RemoveContainersOfMetaName(groupid, metaname) 219 | } 220 | 221 | func (c *Controller) RemoveContainers(metaid string) (*types.RemovedContainers, error) { 222 | 223 | return c.Cluster.RemoveContainers(metaid, "") 224 | } 225 | 226 | func (c *Controller) RemoveContainer(containerid string) (string, *types.RemovedContainers, error) { 227 | 228 | return c.Cluster.RemoveContainer(containerid) 229 | } 230 | -------------------------------------------------------------------------------- /cluster/upgrade.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "github.com/humpback/common/models" 5 | 6 | import ( 7 | "fmt" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | // UpgradeState is exported 13 | type UpgradeState int 14 | 15 | const ( 16 | // UpgradeReady is exported, engine container upgrade is ready 17 | UpgradeReady = iota + 1 18 | // UpgradeIgnore is exported, engine healthy is false, can't upgrade, migrate container. 19 | UpgradeIgnore 20 | // UpgradeCompleted is exported, engine container upgraded completed. 21 | UpgradeCompleted 22 | // UpgradeFailure is exported, engine container upgraded failure. 23 | UpgradeFailure 24 | // UpgradeRecovery is exported, engine container recovery upgraded. 25 | UpgradeRecovery 26 | ) 27 | 28 | // UpgradeContainer is exported 29 | type UpgradeContainer struct { 30 | Original *Container 31 | New *Container 32 | State UpgradeState 33 | } 34 | 35 | // Execute is exported 36 | // upgrade originalContainer to image new tag 37 | func (upgradeContainer *UpgradeContainer) Execute(newImageTag string) error { 38 | 39 | engine := upgradeContainer.Original.Engine 40 | if !engine.IsHealthy() { 41 | upgradeContainer.State = UpgradeIgnore 42 | return nil 43 | } 44 | 45 | originalContainer := upgradeContainer.Original 46 | containerOperate := models.ContainerOperate{Action: "upgrade", Container: originalContainer.Config.ID, ImageTag: newImageTag} 47 | newContainer, err := engine.ForceUpgradeContainer(containerOperate) 48 | if err != nil { 49 | upgradeContainer.State = UpgradeFailure 50 | return fmt.Errorf("engine %s %s", engine.IP, err.Error()) 51 | } 52 | upgradeContainer.New = newContainer 53 | upgradeContainer.State = UpgradeCompleted 54 | return nil 55 | } 56 | 57 | // Recovery is exported 58 | // upgrade container failure, recovery completed containers to original image tag 59 | func (upgradeContainer *UpgradeContainer) Recovery(originalImageTag string) error { 60 | 61 | engine := upgradeContainer.Original.Engine 62 | if !engine.IsHealthy() { 63 | return nil 64 | } 65 | 66 | upgradeContainer.State = UpgradeFailure 67 | newContainer := upgradeContainer.New 68 | containerOperate := models.ContainerOperate{Action: "upgrade", Container: newContainer.Config.ID, ImageTag: originalImageTag} 69 | newContainer, err := engine.ForceUpgradeContainer(containerOperate) 70 | if err != nil { 71 | return fmt.Errorf("engine %s %s", engine.IP, err.Error()) 72 | } 73 | upgradeContainer.Original = newContainer 74 | upgradeContainer.State = UpgradeRecovery 75 | return nil 76 | } 77 | 78 | // Upgrader is exported 79 | type Upgrader struct { 80 | sync.RWMutex 81 | MetaID string 82 | OriginalTag string 83 | NewTag string 84 | configCache *ContainersConfigCache 85 | delayInterval time.Duration 86 | callback UpgraderHandleFunc 87 | containers []*UpgradeContainer 88 | } 89 | 90 | // NewUpgrader is exported 91 | func NewUpgrader(metaid string, originalTag string, newTag string, containers Containers, upgradeDelay time.Duration, 92 | configCache *ContainersConfigCache, callback UpgraderHandleFunc) *Upgrader { 93 | 94 | upgradeContainers := []*UpgradeContainer{} 95 | for _, container := range containers { 96 | if container.BaseConfig != nil && container.Engine != nil { 97 | upgradeContainers = append(upgradeContainers, &UpgradeContainer{ 98 | Original: container, 99 | New: nil, 100 | State: UpgradeReady, 101 | }) 102 | } 103 | } 104 | 105 | return &Upgrader{ 106 | MetaID: metaid, 107 | OriginalTag: originalTag, 108 | NewTag: newTag, 109 | configCache: configCache, 110 | delayInterval: upgradeDelay, 111 | callback: callback, 112 | containers: upgradeContainers, 113 | } 114 | } 115 | 116 | // Start is exported 117 | func (upgrader *Upgrader) Start(upgradeCh chan<- bool) { 118 | 119 | var ( 120 | err error 121 | ret bool 122 | errMsgs []string 123 | ) 124 | 125 | ret = true 126 | errMsgs = []string{} 127 | upgrader.Lock() 128 | defer upgrader.Unlock() 129 | upgrader.configCache.SetImageTag(upgrader.MetaID, upgrader.NewTag) 130 | for _, upgradeContainer := range upgrader.containers { 131 | if err = upgradeContainer.Execute(upgrader.NewTag); err != nil { 132 | upgrader.configCache.RemoveContainerBaseConfig(upgrader.MetaID, upgradeContainer.Original.Config.ID) 133 | errMsgs = append(errMsgs, "upgrade container execute, "+err.Error()) 134 | logger.ERROR("[#cluster#] upgrade container %s execute %s", ShortContainerID(upgradeContainer.Original.Config.ID), err.Error()) 135 | break 136 | } 137 | } 138 | 139 | if err != nil { //recovery upgrade completed containers 140 | ret = false 141 | upgrader.configCache.SetImageTag(upgrader.MetaID, upgrader.OriginalTag) 142 | for _, upgradeContainer := range upgrader.containers { 143 | if upgradeContainer.State == UpgradeCompleted { 144 | if err := upgradeContainer.Recovery(upgrader.OriginalTag); err != nil { 145 | upgrader.configCache.RemoveContainerBaseConfig(upgrader.MetaID, upgradeContainer.New.Config.ID) 146 | errMsgs = append(errMsgs, "upgrade container recovery, "+err.Error()) 147 | logger.ERROR("[#cluster#] upgrade container %s recovery %s", ShortContainerID(upgradeContainer.New.Config.ID), err.Error()) 148 | } 149 | } 150 | } 151 | } 152 | upgrader.callback(upgrader, errMsgs) 153 | upgradeCh <- ret 154 | } 155 | 156 | // UpgraderHandleFunc exported 157 | type UpgraderHandleFunc func(upgrader *Upgrader, errMsgs []string) 158 | 159 | // UpgradeContainersCache is exported 160 | type UpgradeContainersCache struct { 161 | sync.RWMutex 162 | Cluster *Cluster 163 | delayInterval time.Duration 164 | upgraders map[string]*Upgrader 165 | } 166 | 167 | // NewUpgradeContainersCache is exported 168 | func NewUpgradeContainersCache(upgradeDelay time.Duration) *UpgradeContainersCache { 169 | 170 | return &UpgradeContainersCache{ 171 | delayInterval: upgradeDelay, 172 | upgraders: make(map[string]*Upgrader), 173 | } 174 | } 175 | 176 | // SetCluster is exported 177 | func (cache *UpgradeContainersCache) SetCluster(cluster *Cluster) { 178 | 179 | cache.Cluster = cluster 180 | } 181 | 182 | // Upgrade is exported 183 | func (cache *UpgradeContainersCache) Upgrade(upgradeCh chan<- bool, metaid string, newTag string, containers Containers) { 184 | 185 | if cache.Cluster == nil || cache.Cluster.configCache == nil { 186 | return 187 | } 188 | 189 | configCache := cache.Cluster.configCache 190 | metaData := configCache.GetMetaData(metaid) 191 | if metaData == nil { 192 | return 193 | } 194 | 195 | cache.Lock() 196 | if _, ret := cache.upgraders[metaid]; !ret { 197 | upgrader := NewUpgrader(metaData.MetaID, metaData.ImageTag, newTag, containers, cache.delayInterval, configCache, cache.UpgraderHandleFunc) 198 | if upgrader != nil { 199 | cache.upgraders[metaData.MetaID] = upgrader 200 | logger.INFO("[#cluster#] upgrade start %s > %s", upgrader.MetaID, upgrader.NewTag) 201 | go upgrader.Start(upgradeCh) 202 | } 203 | } 204 | cache.Unlock() 205 | } 206 | 207 | // Contains is exported 208 | func (cache *UpgradeContainersCache) Contains(metaid string) bool { 209 | 210 | cache.RLock() 211 | defer cache.RUnlock() 212 | _, ret := cache.upgraders[metaid] 213 | return ret 214 | } 215 | 216 | // UpgraderHandleFunc is exported 217 | func (cache *UpgradeContainersCache) UpgraderHandleFunc(upgrader *Upgrader, errMsgs []string) { 218 | 219 | cache.Lock() 220 | delete(cache.upgraders, upgrader.MetaID) 221 | cache.Unlock() 222 | if cache.Cluster != nil { 223 | if _, engines, err := cache.Cluster.GetMetaDataEngines(upgrader.MetaID); err == nil { 224 | metaEngines := make(map[string]*Engine) 225 | for _, engine := range engines { 226 | if engine.IsHealthy() && engine.HasMeta(upgrader.MetaID) { 227 | metaEngines[engine.IP] = engine 228 | } 229 | } 230 | cache.Cluster.RefreshEnginesContainers(metaEngines) 231 | } 232 | } 233 | if len(errMsgs) > 0 { 234 | logger.ERROR("[#cluster#] upgrade failure %s > %s", upgrader.MetaID, upgrader.NewTag) 235 | } else { 236 | logger.INFO("[#cluster#] upgrade done %s > %s", upgrader.MetaID, upgrader.NewTag) 237 | } 238 | } 239 | -------------------------------------------------------------------------------- /cluster/types/node.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/docker/docker/client" 4 | import "github.com/humpback/common/models" 5 | import "github.com/humpback/discovery" 6 | import "github.com/humpback/discovery/backends" 7 | import "github.com/humpback/gounits/json" 8 | import "github.com/humpback/gounits/rand" 9 | 10 | import ( 11 | "context" 12 | "log" 13 | "net" 14 | "os" 15 | "sort" 16 | "strconv" 17 | "strings" 18 | "sync" 19 | "time" 20 | ) 21 | 22 | //NodeRegisterOptions is exported 23 | type NodeRegisterOptions struct { 24 | IPAddr string 25 | BindPort int 26 | NodeConfig *models.Config 27 | } 28 | 29 | // NewNodeRegisterOptions is exported 30 | func NewNodeRegisterOptions(ipAddr string, bindPort int, nodeConfig *models.Config) *NodeRegisterOptions { 31 | 32 | return &NodeRegisterOptions{ 33 | IPAddr: ipAddr, 34 | BindPort: bindPort, 35 | NodeConfig: nodeConfig, 36 | } 37 | } 38 | 39 | //NodeClusterOptions is exported 40 | type NodeClusterOptions struct { 41 | ClusterName string 42 | ClusterURIs string 43 | ClusterHeartBeat time.Duration 44 | ClusterTTL time.Duration 45 | } 46 | 47 | //NodeData is exported 48 | type NodeData struct { 49 | ID string `json:"id"` 50 | Name string `json:"name"` 51 | IP string `json:"ip"` 52 | APIAddr string `json:"apiaddr"` 53 | Cpus int64 `json:"cpus"` 54 | Memory int64 `json:"memory"` 55 | StorageDirver string `json:"storagedirver"` 56 | KernelVersion string `json:"kernelversion"` 57 | Architecture string `json:"architecture"` 58 | OperatingSystem string `json:"operatingsystem"` 59 | OSType string `json:"ostype"` 60 | EngineLabels []string `json:"lables"` 61 | AppVersion string `json:"appversion"` 62 | DockerVersion string `json:"dockerversion"` 63 | timestamp int64 64 | } 65 | 66 | // MapEngineLabels is exported 67 | // covert nodedata's engine labels []string to map 68 | func (nodeData *NodeData) MapEngineLabels() map[string]string { 69 | 70 | labels := map[string]string{} 71 | for _, label := range nodeData.EngineLabels { 72 | kv := strings.SplitN(label, "=", 2) 73 | if len(kv) == 2 { 74 | labels[kv[0]] = kv[1] 75 | } 76 | } 77 | return labels 78 | } 79 | 80 | // NodeOptions is exported 81 | type NodeOptions struct { 82 | NodeData 83 | NodeClusterOptions 84 | } 85 | 86 | var node *Node 87 | 88 | // Node is exported 89 | type Node struct { 90 | Key string 91 | Cluster string 92 | discovery *discovery.Discovery 93 | data *NodeData 94 | stopCh chan struct{} 95 | quitCh chan struct{} 96 | } 97 | 98 | // GetNodeData is exported 99 | func GetNodeData() *NodeData { 100 | 101 | if node != nil { 102 | return node.data 103 | } 104 | return nil 105 | } 106 | 107 | // NodeRegister is exported 108 | // register to cluster discovery 109 | func NodeRegister(options *NodeRegisterOptions) error { 110 | 111 | nodeOptions, err := createNodeOptions(options) 112 | if err != nil { 113 | return err 114 | } 115 | 116 | if _, err := createNode(nodeOptions); err != nil { 117 | return err 118 | } 119 | 120 | buf, err := json.EnCodeObjectToBuffer(&nodeOptions.NodeData) 121 | if err != nil { 122 | return err 123 | } 124 | 125 | log.Printf("register to cluster - %s %s [addr:%s]\n", node.Cluster, node.Key, nodeOptions.NodeData.APIAddr) 126 | node.discovery.Register(node.Key, buf, node.stopCh, func(key string, err error) { 127 | log.Printf("discovery register %s error:%s\n", key, err.Error()) 128 | if err == backends.ErrRegistLoopQuit { 129 | close(node.quitCh) 130 | } 131 | }) 132 | return nil 133 | } 134 | 135 | // NodeClose is exported 136 | // register close 137 | func NodeClose() { 138 | 139 | if node != nil { 140 | close(node.stopCh) //close register loop 141 | <-node.quitCh 142 | node = nil 143 | log.Printf("register closed.\n") 144 | } 145 | } 146 | 147 | // createNode is exported 148 | // create cluster discovery node 149 | func createNode(nodeOptions *NodeOptions) (*Node, error) { 150 | 151 | if node == nil { 152 | key, err := rand.UUIDFile("./humpback-agent.key") 153 | if err != nil { 154 | return nil, err 155 | } 156 | clusterName := nodeOptions.ClusterName 157 | configOpts := map[string]string{"kv.path": clusterName} 158 | d, err := discovery.New(nodeOptions.ClusterURIs, nodeOptions.ClusterHeartBeat, nodeOptions.ClusterTTL, configOpts) 159 | if err != nil { 160 | return nil, err 161 | } 162 | node = &Node{ 163 | Key: key, 164 | Cluster: clusterName, 165 | discovery: d, 166 | data: &nodeOptions.NodeData, 167 | stopCh: make(chan struct{}), 168 | quitCh: make(chan struct{}), 169 | } 170 | } 171 | return node, nil 172 | } 173 | 174 | // createNodeOptions is exported 175 | // create cluster node options 176 | func createNodeOptions(options *NodeRegisterOptions) (*NodeOptions, error) { 177 | 178 | heartbeat, err := time.ParseDuration(options.NodeConfig.DockerClusterHeartBeat) 179 | if err != nil { 180 | return nil, err 181 | } 182 | 183 | ttl, err := time.ParseDuration(options.NodeConfig.DockerClusterTTL) 184 | if err != nil { 185 | return nil, err 186 | } 187 | 188 | defaultHeaders := map[string]string{"User-Agent": "engine-api-cli-1.0"} 189 | dockerClient, err := client.NewClient(options.NodeConfig.DockerEndPoint, options.NodeConfig.DockerAPIVersion, nil, defaultHeaders) 190 | if err != nil { 191 | return nil, err 192 | } 193 | 194 | engineInfo, err := dockerClient.Info(context.Background()) 195 | if err != nil { 196 | return nil, err 197 | } 198 | 199 | hostName, err := os.Hostname() 200 | if err != nil { 201 | hostName = engineInfo.Name 202 | } 203 | 204 | apiAddr := net.JoinHostPort(options.IPAddr, strconv.Itoa(options.BindPort)) 205 | return &NodeOptions{ 206 | NodeData: NodeData{ 207 | ID: engineInfo.ID, 208 | Name: hostName, 209 | IP: options.IPAddr, 210 | APIAddr: apiAddr, 211 | Cpus: (int64)(engineInfo.NCPU), 212 | Memory: engineInfo.MemTotal, 213 | StorageDirver: engineInfo.Driver, 214 | KernelVersion: engineInfo.KernelVersion, 215 | Architecture: engineInfo.Architecture, 216 | OperatingSystem: engineInfo.OperatingSystem, 217 | OSType: engineInfo.OSType, 218 | EngineLabels: engineInfo.Labels, 219 | AppVersion: options.NodeConfig.AppVersion, 220 | DockerVersion: engineInfo.ServerVersion, 221 | }, 222 | NodeClusterOptions: NodeClusterOptions{ 223 | ClusterName: options.NodeConfig.DockerClusterName, 224 | ClusterURIs: options.NodeConfig.DockerClusterURIs, 225 | ClusterHeartBeat: heartbeat, 226 | ClusterTTL: ttl, 227 | }, 228 | }, nil 229 | } 230 | 231 | // NodeCache is exported 232 | type NodeCache struct { 233 | sync.RWMutex 234 | nodes map[string]*NodeData 235 | } 236 | 237 | // NewNodeCache is exported 238 | func NewNodeCache() *NodeCache { 239 | 240 | return &NodeCache{ 241 | nodes: make(map[string]*NodeData), 242 | } 243 | } 244 | 245 | // Add is exported 246 | // nodeCache add online nodeData. 247 | func (cache *NodeCache) Add(key string, nodeData *NodeData) { 248 | 249 | cache.Lock() 250 | if _, ret := cache.nodes[key]; !ret { 251 | nodeData.timestamp = time.Now().UnixNano() 252 | cache.nodes[key] = nodeData 253 | } 254 | cache.Unlock() 255 | } 256 | 257 | // Remove is exported 258 | // nodeCache remove offline nodeData. 259 | func (cache *NodeCache) Remove(key string) { 260 | 261 | cache.Lock() 262 | delete(cache.nodes, key) 263 | cache.Unlock() 264 | } 265 | 266 | // Node is exported 267 | // nodeCache get nodedata of key 268 | func (cache *NodeCache) Node(key string) *NodeData { 269 | 270 | cache.RLock() 271 | defer cache.RUnlock() 272 | if nodeData, ret := cache.nodes[key]; ret { 273 | return nodeData 274 | } 275 | return nil 276 | } 277 | 278 | // Get is exported 279 | // nodeCache get nodeData of server ip or server hostname 280 | func (cache *NodeCache) Get(IPOrName string) *NodeData { 281 | 282 | nodes := []*NodeData{} 283 | cache.RLock() 284 | defer cache.RUnlock() 285 | for _, nodeData := range cache.nodes { 286 | if nodeData.IP == IPOrName { 287 | nodes = append(nodes, nodeData) 288 | } 289 | } 290 | 291 | for _, nodeData := range cache.nodes { 292 | if nodeData.Name == IPOrName { 293 | nodes = append(nodes, nodeData) 294 | } 295 | } 296 | 297 | less := func(i, j int) bool { 298 | return nodes[i].timestamp > nodes[j].timestamp 299 | } 300 | 301 | if len(nodes) > 0 { 302 | sort.Slice(nodes, less) 303 | return nodes[0] 304 | } 305 | return nil 306 | } 307 | 308 | // ContainsOtherKey is exported 309 | func (cache *NodeCache) ContainsOtherKey(key string, IPOrName string) bool { 310 | 311 | cache.RLock() 312 | defer cache.RUnlock() 313 | for k, nodeData := range cache.nodes { 314 | if k != key && (nodeData.IP == IPOrName || nodeData.Name == IPOrName) { 315 | return true 316 | } 317 | } 318 | return false 319 | } 320 | -------------------------------------------------------------------------------- /api/response/respcluster.go: -------------------------------------------------------------------------------- 1 | package response 2 | 3 | import "github.com/humpback/humpback-center/cluster" 4 | import "github.com/humpback/humpback-center/cluster/types" 5 | import "github.com/humpback/common/models" 6 | import units "github.com/docker/go-units" 7 | 8 | /* 9 | GroupAllContainersResponse is exported 10 | Method: GET 11 | Route: /v1/groups/{groupid}/collections 12 | */ 13 | type GroupAllContainersResponse struct { 14 | GroupID string `json:"GroupId"` 15 | Containers *types.GroupContainers `json:"Containers"` 16 | } 17 | 18 | // NewGroupAllContainersResponse is exported 19 | func NewGroupAllContainersResponse(groupid string, containers *types.GroupContainers) *GroupAllContainersResponse { 20 | 21 | return &GroupAllContainersResponse{ 22 | GroupID: groupid, 23 | Containers: containers, 24 | } 25 | } 26 | 27 | /* 28 | GroupContainersResponse is exported 29 | Method: GET 30 | Route: /v1/groups/collections/{metaid} 31 | */ 32 | type GroupContainersResponse struct { 33 | Container *types.GroupContainer `json:"Container"` 34 | } 35 | 36 | // NewGroupContainersResponse is exported 37 | func NewGroupContainersResponse(container *types.GroupContainer) *GroupContainersResponse { 38 | 39 | return &GroupContainersResponse{ 40 | Container: container, 41 | } 42 | } 43 | 44 | // ContainersMetaBase is exported 45 | type ContainersMetaBase struct { 46 | GroupID string `json:"GroupId"` 47 | MetaID string `json:"MetaId"` 48 | IsRemoveDelay bool `json:"IsRemoveDelay"` 49 | IsRecovery bool `json:"IsRecovery"` 50 | Instances int `json:"Instances"` 51 | Placement types.Placement `json:"Placement"` 52 | WebHooks types.WebHooks `json:"WebHooks"` 53 | ImageTag string `json:"ImageTag"` 54 | models.Container 55 | CreateAt int64 `json:"CreateAt"` 56 | LastUpdateAt int64 `json:"LastUpdateAt"` 57 | } 58 | 59 | /* 60 | GroupContainersMetaBaseResponse is exported 61 | Method: GET 62 | Route: /v1/groups/collections/{metaid}/base 63 | */ 64 | type GroupContainersMetaBaseResponse struct { 65 | MetaBase *ContainersMetaBase `json:"MetaBase"` 66 | } 67 | 68 | // NewGroupContainersMetaBaseResponse is exported 69 | func NewGroupContainersMetaBaseResponse(metaBase *cluster.MetaBase) *GroupContainersMetaBaseResponse { 70 | 71 | if metaBase.Config.DNS == nil { 72 | metaBase.Config.DNS = []string{} 73 | } 74 | 75 | if metaBase.Config.Env == nil { 76 | metaBase.Config.Env = []string{} 77 | } 78 | 79 | if metaBase.Config.ExtraHosts == nil { 80 | metaBase.Config.ExtraHosts = []string{} 81 | } 82 | 83 | if metaBase.Config.Links == nil { 84 | metaBase.Config.Links = []string{} 85 | } 86 | 87 | if metaBase.Config.Labels == nil { 88 | metaBase.Config.Labels = map[string]string{} 89 | } 90 | 91 | if metaBase.Config.Ports == nil { 92 | metaBase.Config.Ports = []models.PortBinding{} 93 | } 94 | 95 | if metaBase.Config.Volumes == nil { 96 | metaBase.Config.Volumes = []models.VolumesBinding{} 97 | } 98 | 99 | if metaBase.Config.Ulimits == nil { 100 | metaBase.Config.Ulimits = []*units.Ulimit{} 101 | } 102 | 103 | containersMetaBase := &ContainersMetaBase{ 104 | GroupID: metaBase.GroupID, 105 | MetaID: metaBase.MetaID, 106 | IsRemoveDelay: metaBase.IsRemoveDelay, 107 | IsRecovery: metaBase.IsRecovery, 108 | Instances: metaBase.Instances, 109 | Placement: metaBase.Placement, 110 | WebHooks: metaBase.WebHooks, 111 | ImageTag: metaBase.ImageTag, 112 | Container: metaBase.Config, 113 | CreateAt: metaBase.CreateAt, 114 | LastUpdateAt: metaBase.LastUpdateAt, 115 | } 116 | 117 | return &GroupContainersMetaBaseResponse{ 118 | MetaBase: containersMetaBase, 119 | } 120 | } 121 | 122 | /* 123 | GroupEnginesResponse is exported 124 | Method: GET 125 | Route: /v1/groups/{groupid}/engines 126 | */ 127 | type GroupEnginesResponse struct { 128 | GroupID string `json:"GroupId"` 129 | Engines []*cluster.Engine `json:"Engines"` 130 | } 131 | 132 | // NewGroupEnginesResponse is exported 133 | func NewGroupEnginesResponse(groupid string, engines []*cluster.Engine) *GroupEnginesResponse { 134 | 135 | return &GroupEnginesResponse{ 136 | GroupID: groupid, 137 | Engines: engines, 138 | } 139 | } 140 | 141 | /* 142 | GroupEngineResponse is exported 143 | Method: GET 144 | Route: /v1/groups/engines/{server} 145 | */ 146 | type GroupEngineResponse struct { 147 | Engine *cluster.Engine `json:"Engine"` 148 | } 149 | 150 | // NewGroupEngineResponse is exported 151 | func NewGroupEngineResponse(engine *cluster.Engine) *GroupEngineResponse { 152 | 153 | return &GroupEngineResponse{ 154 | Engine: engine, 155 | } 156 | } 157 | 158 | /* 159 | ClusterEventResponse is exported 160 | Method: POST 161 | Route: /v1/cluster/event 162 | */ 163 | type ClusterEventResponse struct { 164 | Message string `json:"Message"` 165 | } 166 | 167 | // NewClusterEventResponse is exported 168 | func NewClusterEventResponse(message string) *ClusterEventResponse { 169 | 170 | return &ClusterEventResponse{ 171 | Message: message, 172 | } 173 | } 174 | 175 | /* 176 | GroupEventResponse is exported 177 | Method: POST 178 | Route: /v1/groups/event 179 | */ 180 | type GroupEventResponse struct { 181 | Message string `json:"Message"` 182 | } 183 | 184 | // NewGroupEventResponse is exported 185 | func NewGroupEventResponse(message string) *GroupEventResponse { 186 | 187 | return &GroupEventResponse{ 188 | Message: message, 189 | } 190 | } 191 | 192 | /* 193 | GroupCreateContainersResponse is exported 194 | Method: POST 195 | Route: /v1/groups/collections 196 | */ 197 | type GroupCreateContainersResponse struct { 198 | GroupID string `json:"GroupId"` 199 | MetaID string `json:"MetaId"` 200 | Created string `json:"Created"` 201 | Containers *types.CreatedContainers `json:"Containers"` 202 | } 203 | 204 | // NewGroupCreateContainersResponse is exported 205 | func NewGroupCreateContainersResponse(groupid string, metaid string, instances int, containers *types.CreatedContainers) *GroupCreateContainersResponse { 206 | 207 | created := "created all" 208 | if instances > len(*containers) { 209 | created = "created partial" 210 | } 211 | 212 | return &GroupCreateContainersResponse{ 213 | GroupID: groupid, 214 | MetaID: metaid, 215 | Created: created, 216 | Containers: containers, 217 | } 218 | } 219 | 220 | /* 221 | GroupUpdateContainersResponse is exported 222 | Method: PUT 223 | Route: /v1/groups/collections 224 | */ 225 | type GroupUpdateContainersResponse struct { 226 | MetaID string `json:"MetaId"` 227 | Updated string `json:"Updated"` 228 | Containers *types.CreatedContainers `json:"Containers"` 229 | } 230 | 231 | // NewGroupUpdateContainersResponse is exported 232 | func NewGroupUpdateContainersResponse(metaid string, instances int, containers *types.CreatedContainers) *GroupUpdateContainersResponse { 233 | 234 | updated := "updated all" 235 | if instances > len(*containers) { 236 | updated = "updated partial" 237 | } 238 | 239 | return &GroupUpdateContainersResponse{ 240 | MetaID: metaid, 241 | Updated: updated, 242 | Containers: containers, 243 | } 244 | } 245 | 246 | /* 247 | GroupOperateContainersResponse is exported 248 | Method: PUT 249 | Route1: /v1/groups/collections/action 250 | Route2: /v1/groups/container/action 251 | */ 252 | type GroupOperateContainersResponse struct { 253 | MetaID string `json:"MetaId"` 254 | Action string `json:"Action"` 255 | Containers *types.OperatedContainers `json:"Containers"` 256 | } 257 | 258 | // NewGroupOperateContainersResponse is exported 259 | func NewGroupOperateContainersResponse(metaid string, action string, containers *types.OperatedContainers) *GroupOperateContainersResponse { 260 | 261 | return &GroupOperateContainersResponse{ 262 | MetaID: metaid, 263 | Action: action, 264 | Containers: containers, 265 | } 266 | } 267 | 268 | /* 269 | GroupUpgradeContainersResponse is exported 270 | Method: PUT 271 | Route: /v1/groups/collections/upgrade 272 | */ 273 | type GroupUpgradeContainersResponse struct { 274 | MetaID string `json:"MetaId"` 275 | Upgrade string `json:"Upgrade"` 276 | Containers *types.UpgradeContainers `json:"Containers"` 277 | } 278 | 279 | // NewGroupUpgradeContainersResponse is exported 280 | func NewGroupUpgradeContainersResponse(metaid string, upgrade string, containers *types.UpgradeContainers) *GroupUpgradeContainersResponse { 281 | 282 | return &GroupUpgradeContainersResponse{ 283 | MetaID: metaid, 284 | Upgrade: upgrade, 285 | Containers: containers, 286 | } 287 | } 288 | 289 | /* 290 | GroupRemoveContainersResponse is exported 291 | Method: PUT 292 | Route1: /v1/groups/collections/{metaid} 293 | Route2: /v1/groups/container/{containerid} 294 | */ 295 | type GroupRemoveContainersResponse struct { 296 | MetaID string `json:"MetaId"` 297 | Containers *types.RemovedContainers `json:"Containers"` 298 | } 299 | 300 | // NewGroupRemoveContainersResponse is exported 301 | func NewGroupRemoveContainersResponse(metaid string, containers *types.RemovedContainers) *GroupRemoveContainersResponse { 302 | 303 | return &GroupRemoveContainersResponse{ 304 | MetaID: metaid, 305 | Containers: containers, 306 | } 307 | } 308 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/LICENSE.code: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2014-2016 Docker, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------