├── notify ├── endpoint.go ├── factory.go ├── endpoint_smtp.go ├── typedef.go ├── endpoint_api.go ├── events.go ├── notify.go └── template.html ├── api ├── listener_windows.go ├── request │ └── request.go ├── middleware │ └── logger.go ├── utils.go ├── listener_linux.go ├── response │ ├── response.go │ └── respcluster.go ├── router.go ├── server.go └── context.go ├── cluster ├── storage │ ├── entry │ │ └── entry.go │ ├── dao │ │ ├── json.go │ │ └── dao.go │ ├── storage.go │ └── node │ │ └── node.go ├── types │ ├── webhook.go │ ├── placement.go │ ├── responseerror.go │ ├── upgradecontainer.go │ ├── options.go │ ├── removedcontainer.go │ ├── operatedcontainer.go │ ├── createdcontainer.go │ ├── groupcontainer.go │ └── node.go ├── reduce.go ├── errors.go ├── enginespriority.go ├── weighted.go ├── enginesfilter.go ├── notify.go ├── hooks.go ├── utils.go ├── enginespool.go ├── constraint.go ├── client.go ├── container.go └── upgrade.go ├── README.md ├── vendor └── github.com │ └── docker │ └── libkv │ ├── script │ ├── travis_zk.sh │ ├── travis_etcd.sh │ ├── travis_consul.sh │ ├── coverage │ ├── validate-gofmt │ └── .validate │ ├── libkv_test.go │ ├── MAINTAINERS │ ├── libkv.go │ ├── store │ ├── helpers.go │ ├── zookeeper │ │ └── zookeeper_test.go │ ├── etcd │ │ └── etcd_test.go │ ├── consul │ │ └── consul_test.go │ ├── mock │ │ └── mock.go │ ├── boltdb │ │ └── boltdb_test.go │ └── store.go │ ├── .travis.yml │ ├── docs │ ├── examples.md │ └── compatibility.md │ ├── README.md │ └── LICENSE.code ├── .gitignore ├── main.go ├── .editorconfig ├── Dockerfile ├── etc ├── config.yaml ├── configuration.go └── lookupenv.go ├── ctrl ├── controller.go └── cluster.go ├── server └── service.go └── LICENSE /notify/endpoint.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | // IEndPoint is exported 4 | // sender endPoint interface 5 | type IEndPoint interface { 6 | DoEvent(event *Event, data interface{}) 7 | } 8 | -------------------------------------------------------------------------------- /api/listener_windows.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "crypto/tls" 5 | "fmt" 6 | "net" 7 | ) 8 | 9 | func newUnixListener(addr string, tlsConfig *tls.Config) (net.Listener, error) { 10 | 11 | return nil, fmt.Errorf("Windows platform does not support a unix socket") 12 | } 13 | -------------------------------------------------------------------------------- /cluster/storage/entry/entry.go: -------------------------------------------------------------------------------- 1 | package entry 2 | 3 | import "github.com/humpback/humpback-center/cluster/types" 4 | 5 | //Node is exported 6 | type Node struct { 7 | *types.NodeData 8 | NodeLabels map[string]string `json:"nodelabels"` 9 | Availability string `json:"availability"` 10 | } 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## humpback-center 2 | 3 | Humpback Center 主要为 [Humpback](https://github.com/humpback/humpback) 平台提供集群容器调度服务,以集群中心角色实现各个 Group 的容器分配管理。 4 | 5 | ### API Usage 6 | 7 | [API 文档](https://github.com/humpback/humpback-center/wiki/api-usage) 8 | 9 | ### 模式架构 10 | 11 |  12 | 13 | ## License 14 | 15 | Apache-2.0 16 | 17 | 18 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/travis_zk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -gt 0 ] ; then 4 | ZK_VERSION="$1" 5 | else 6 | ZK_VERSION="3.4.7" 7 | fi 8 | 9 | wget "http://apache.cs.utah.edu/zookeeper/zookeeper-${ZK_VERSION}/zookeeper-${ZK_VERSION}.tar.gz" 10 | tar -xvf "zookeeper-${ZK_VERSION}.tar.gz" 11 | mv zookeeper-$ZK_VERSION zk 12 | mv ./zk/conf/zoo_sample.cfg ./zk/conf/zoo.cfg 13 | -------------------------------------------------------------------------------- /api/request/request.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | const ( 8 | RequestSuccessed int = 0 9 | RequestInvalid int = -1001 10 | RequestFailure int = -1002 11 | ) 12 | 13 | var ( 14 | ErrRequestSuccessed = errors.New("request successed") 15 | ErrRequestInvalid = errors.New("request resolve error") 16 | ErrRequestFailure = errors.New("request failure error") 17 | ) 18 | -------------------------------------------------------------------------------- /cluster/storage/dao/json.go: -------------------------------------------------------------------------------- 1 | package dao 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | // MarshalObject encodes an object to binary format 8 | func MarshalObject(object interface{}) ([]byte, error) { 9 | return json.Marshal(object) 10 | } 11 | 12 | // UnmarshalObject decodes an object from binary data 13 | func UnmarshalObject(data []byte, object interface{}) error { 14 | return json.Unmarshal(data, object) 15 | } 16 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/travis_etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -gt 0 ] ; then 4 | ETCD_VERSION="$1" 5 | else 6 | ETCD_VERSION="2.2.0" 7 | fi 8 | 9 | curl -L https://github.com/coreos/etcd/releases/download/v$ETCD_VERSION/etcd-v$ETCD_VERSION-linux-amd64.tar.gz -o etcd-v$ETCD_VERSION-linux-amd64.tar.gz 10 | tar xzvf etcd-v$ETCD_VERSION-linux-amd64.tar.gz 11 | mv etcd-v$ETCD_VERSION-linux-amd64 etcd 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | *.exe 22 | *.test 23 | *.prof 24 | *.log 25 | humpback-center 26 | humpback-center.pid 27 | -------------------------------------------------------------------------------- /api/middleware/logger.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import "github.com/humpback/gounits/logger" 4 | 5 | import ( 6 | "net/http" 7 | "time" 8 | ) 9 | 10 | func Logger(inner http.Handler) http.Handler { 11 | 12 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 13 | t := time.Now() 14 | inner.ServeHTTP(w, r) 15 | logger.INFO("[#api#] HTTP %s\t%s\t%s", r.Method, r.RequestURI, time.Since(t)) 16 | }) 17 | } 18 | -------------------------------------------------------------------------------- /api/utils.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "net/http" 4 | 5 | func httpError(w http.ResponseWriter, err string, code int) { 6 | http.Error(w, err, code) 7 | } 8 | 9 | func writeCorsHeaders(w http.ResponseWriter, r *http.Request) { 10 | w.Header().Add("Access-Control-Allow-Origin", "*") 11 | w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") 12 | w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS, HEAD") 13 | } 14 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/travis_consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ $# -gt 0 ] ; then 4 | CONSUL_VERSION="$1" 5 | else 6 | CONSUL_VERSION="0.5.2" 7 | fi 8 | 9 | # install consul 10 | wget "https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip" 11 | unzip "consul_${CONSUL_VERSION}_linux_amd64.zip" 12 | 13 | # make config for minimum ttl 14 | touch config.json 15 | echo "{\"session_ttl_min\": \"1s\"}" >> config.json 16 | 17 | # check 18 | ./consul --version 19 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/libkv_test.go: -------------------------------------------------------------------------------- 1 | package libkv 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv/store" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestNewStoreUnsupported(t *testing.T) { 12 | client := "localhost:9999" 13 | 14 | kv, err := NewStore( 15 | "unsupported", 16 | []string{client}, 17 | &store.Config{ 18 | ConnectionTimeout: 10 * time.Second, 19 | }, 20 | ) 21 | assert.Error(t, err) 22 | assert.Nil(t, kv) 23 | assert.Equal(t, "Backend storage not supported yet, please choose one of ", err.Error()) 24 | } 25 | -------------------------------------------------------------------------------- /api/listener_linux.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "crypto/tls" 5 | "net" 6 | "os" 7 | "syscall" 8 | ) 9 | 10 | func newUnixListener(addr string, tlsConfig *tls.Config) (net.Listener, error) { 11 | 12 | if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { 13 | return nil, err 14 | } 15 | 16 | mask := syscall.Umask(0777) 17 | defer syscall.Umask(mask) 18 | l, err := newListener("unix", addr, tlsConfig) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | if err := os.Chmod(addr, 0600); err != nil { 24 | return nil, err 25 | } 26 | return l, nil 27 | } 28 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/humpback/gounits/system" 4 | import "github.com/humpback/humpback-center/server" 5 | 6 | import ( 7 | "log" 8 | "os" 9 | ) 10 | 11 | func main() { 12 | 13 | service, err := server.NewCenterService() 14 | if err != nil { 15 | log.Printf("service error:%s\n", err.Error()) 16 | os.Exit(system.ErrorExitCode(err)) 17 | } 18 | 19 | defer func() { 20 | service.Stop() 21 | os.Exit(0) 22 | }() 23 | 24 | if err := service.Startup(); err != nil { 25 | log.Printf("service start error:%s\n", err.Error()) 26 | os.Exit(system.ErrorExitCode(err)) 27 | } 28 | system.InitSignal(nil) 29 | } 30 | -------------------------------------------------------------------------------- /cluster/types/webhook.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // WebHook is exported 4 | type WebHook struct { 5 | URL string `json:"Url"` 6 | SecretToken string `json:"SecretToken"` 7 | } 8 | 9 | // WebHooks is exported 10 | type WebHooks []WebHook 11 | 12 | // BindConfig is exported 13 | type BindConfig struct { 14 | Category string `json:"Category"` 15 | OwnerToken string `json:"OwnerToken"` 16 | Sandbox bool `json:"Sandbox"` 17 | Location string `json:"Location"` 18 | Protocol string `json:"Protocol"` 19 | Port int `json:"Port"` 20 | Health string `json:"Health"` 21 | APIIDs []string `json:"APIIDs"` 22 | } 23 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/coverage: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MODE="mode: count" 4 | ROOT=${TRAVIS_BUILD_DIR:-.}/../../.. 5 | 6 | # Grab the list of packages. 7 | # Exclude the API and CLI from coverage as it will be covered by integration tests. 8 | PACKAGES=`go list ./...` 9 | 10 | # Create the empty coverage file. 11 | echo $MODE > goverage.report 12 | 13 | # Run coverage on every package. 14 | for package in $PACKAGES; do 15 | output="$ROOT/$package/coverage.out" 16 | 17 | go test -test.short -covermode=count -coverprofile=$output $package 18 | if [ -f "$output" ] ; then 19 | cat "$output" | grep -v "$MODE" >> goverage.report 20 | fi 21 | done 22 | -------------------------------------------------------------------------------- /cluster/types/placement.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // Spread is exported 4 | type Spread struct { 5 | SpreadDescriptor string `json:"SpreadDescriptor"` 6 | } 7 | 8 | // Preference is exported 9 | type Preference struct { 10 | Spread `json:"Spread"` 11 | } 12 | 13 | // Platform is exported 14 | type Platform struct { 15 | Architecture string `json:"Architecture"` 16 | OS string `json:"OS"` 17 | } 18 | 19 | // Placement is exported 20 | // Cluster services placement constraints 21 | type Placement struct { 22 | Constraints []string `json:"Constraints"` 23 | Preferences []Preference `json:"Preferences"` 24 | Platforms []Platform `json:"Platforms"` 25 | } 26 | -------------------------------------------------------------------------------- /notify/factory.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | //INotifyEndPointFactory is exported 4 | type INotifyEndPointFactory interface { 5 | CreateAPIEndPoint(endpoint EndPoint) IEndPoint 6 | CreateSMTPEndPoint(endpoint EndPoint) IEndPoint 7 | } 8 | 9 | //NotifyEndPointFactory is exported 10 | type NotifyEndPointFactory struct { 11 | INotifyEndPointFactory 12 | } 13 | 14 | //CreateAPIEndPoint is exported 15 | func (factory *NotifyEndPointFactory) CreateAPIEndPoint(endpoint EndPoint) IEndPoint { 16 | return NewAPIEndPoint(endpoint) 17 | } 18 | 19 | //CreateSMTPEndPoint is exported 20 | func (factory *NotifyEndPointFactory) CreateSMTPEndPoint(endpoint EndPoint) IEndPoint { 21 | return NewSMTPEndpoint(endpoint) 22 | } 23 | -------------------------------------------------------------------------------- /cluster/types/responseerror.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/gounits/httpx" 4 | 5 | import ( 6 | "fmt" 7 | ) 8 | 9 | /* 10 | Humpback api response exception struct 11 | */ 12 | 13 | // ResponseError is exported 14 | type ResponseError struct { 15 | Code int `json:"Code"` 16 | Detail string `json:"Detail"` 17 | Message string `json:"Message"` 18 | } 19 | 20 | // ParseHTTPResponseError is exported 21 | func ParseHTTPResponseError(response *httpx.HttpResponse) string { 22 | 23 | responseError := &ResponseError{} 24 | if err := response.JSON(responseError); err != nil { 25 | return fmt.Sprintf("engine client error, httpcode: %d", response.StatusCode()) 26 | } 27 | return responseError.Detail 28 | } 29 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig coding styles definitions. For more information about the 2 | # properties used in this file, please see the EditorConfig documentation: 3 | # http://editorconfig.org/ 4 | 5 | # indicate this is the root of the project 6 | root = true 7 | 8 | [*] 9 | charset = utf-8 10 | 11 | end_of_line = CRLF 12 | insert_final_newline = true 13 | trim_trailing_whitespace = true 14 | 15 | indent_style = space 16 | indent_size = 4 17 | 18 | [Makefile] 19 | indent_style = tab 20 | 21 | [*.md] 22 | trim_trailing_whitespace = false 23 | 24 | [*.go] 25 | indent_style = tab 26 | 27 | [*.json] 28 | charset = utf-8 29 | 30 | end_of_line = CRLF 31 | insert_final_newline = true 32 | trim_trailing_whitespace = true 33 | 34 | indent_style = space 35 | indent_size = 2 36 | 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM frolvlad/alpine-glibc:alpine-3.7 2 | 3 | MAINTAINER bobliu bobliu0909@gmail.com 4 | 5 | RUN apk add --no-cache bash 6 | 7 | RUN mkdir -p /opt/humpback-center/cache 8 | 9 | RUN mkdir -p /opt/humpback-center/etc 10 | 11 | RUN mkdir -p /opt/humpback-center/notify 12 | 13 | COPY etc/config.yaml /opt/humpback-center/etc/config.yaml 14 | 15 | COPY notify/template.html /opt/humpback-center/notify/template.html 16 | 17 | COPY humpback-center /opt/humpback-center/humpback-center 18 | 19 | COPY dumb-init /dumb-init 20 | 21 | ENTRYPOINT ["/dumb-init", "--"] 22 | 23 | WORKDIR /opt/humpback-center 24 | 25 | VOLUME ["/opt/humpback-center/etc"] 26 | 27 | VOLUME ["/opt/humpback-center/cache"] 28 | 29 | VOLUME ["/opt/humpback-center/logs"] 30 | 31 | CMD ["./humpback-center"] 32 | 33 | EXPOSE 8589 34 | -------------------------------------------------------------------------------- /cluster/types/upgradecontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/common/models" 4 | 5 | // UpgradeContainerResponse is exported 6 | type UpgradeContainerResponse struct { 7 | ID string `json:"Id"` 8 | } 9 | 10 | // UpgradeContainer is exported 11 | type UpgradeContainer struct { 12 | IP string `json:"IP"` 13 | HostName string `json:"HostName"` 14 | models.Container 15 | } 16 | 17 | // UpgradeContainers is exported 18 | type UpgradeContainers []*UpgradeContainer 19 | 20 | // SetUpgradePair is exported 21 | func (upgrade UpgradeContainers) SetUpgradePair(ip string, hostname string, container models.Container) UpgradeContainers { 22 | 23 | upgradeContainer := &UpgradeContainer{ 24 | IP: ip, 25 | HostName: hostname, 26 | Container: container, 27 | } 28 | upgrade = append(upgrade, upgradeContainer) 29 | return upgrade 30 | } 31 | -------------------------------------------------------------------------------- /api/response/response.go: -------------------------------------------------------------------------------- 1 | package response 2 | 3 | type Response interface { 4 | SetError(code int, err error, content string) 5 | SetResponse(data interface{}) 6 | } 7 | 8 | /* 9 | 消息返回响应结构体 10 | Code: 响应码, == 0 成功, < 0 失败 11 | Error: 失败名称 12 | Content: 成功/失败描述 13 | ResponseID: 14 | Data: 响应数据 15 | */ 16 | type ResponseResult struct { 17 | Response `json:"-,omitempty"` 18 | Code int `json:"Code"` 19 | Error string `json:"Error"` 20 | Content string `json:"Contnet"` 21 | ResponseID string `json:"ResponseID"` 22 | Data interface{} `json:"Data,omitpty"` 23 | } 24 | 25 | func (r *ResponseResult) SetError(code int, err error, content string) { 26 | 27 | r.Code = code 28 | r.Error = err.Error() 29 | r.Content = content 30 | } 31 | 32 | func (r *ResponseResult) SetResponse(data interface{}) { 33 | 34 | r.Data = data 35 | } 36 | -------------------------------------------------------------------------------- /cluster/types/options.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | //CreateOption is exported 4 | //cluster create containers option values. 5 | //`IsReCreate` indicates whether to re-create an existing containers each time it is built. 6 | //`ForceRemove` is an attached property. When `IsReCreate` is true, it means to force delete or directly upgrade an existing containers. 7 | //`IsRemoveDelay` delay (8 minutes) remove unused containers for service debounce. 8 | //`IsRecovery` service containers recovery check enable. 9 | type CreateOption struct { 10 | IsReCreate bool `json:"IsReCreate"` 11 | ForceRemove bool `json:"ForceRemove"` 12 | IsRemoveDelay bool `json:"IsRemoveDelay"` 13 | IsRecovery bool `json:"IsRecovery"` 14 | } 15 | 16 | //UpdateOption is exported 17 | type UpdateOption struct { 18 | IsRemoveDelay bool `json:"IsRemoveDelay"` 19 | IsRecovery bool `json:"IsRecovery"` 20 | } 21 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/validate-gofmt: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "$(dirname "$BASH_SOURCE")/.validate" 4 | 5 | IFS=$'\n' 6 | files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^Godeps/' || true) ) 7 | unset IFS 8 | 9 | badFiles=() 10 | for f in "${files[@]}"; do 11 | # we use "git show" here to validate that what's committed is formatted 12 | if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then 13 | badFiles+=( "$f" ) 14 | fi 15 | done 16 | 17 | if [ ${#badFiles[@]} -eq 0 ]; then 18 | echo 'Congratulations! All Go source files are properly formatted.' 19 | else 20 | { 21 | echo "These files are not properly gofmt'd:" 22 | for f in "${badFiles[@]}"; do 23 | echo " - $f" 24 | done 25 | echo 26 | echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' 27 | echo 28 | } >&2 29 | false 30 | fi 31 | -------------------------------------------------------------------------------- /cluster/types/removedcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // RemovedContainer is exported 4 | type RemovedContainer struct { 5 | IP string `json:"IP"` 6 | HostName string `json:"HostName"` 7 | ContainerID string `json:"ContainerId"` 8 | Result string `json:"Result"` 9 | } 10 | 11 | // RemovedContainers is exported 12 | type RemovedContainers []*RemovedContainer 13 | 14 | // SetRemovedPair is exported 15 | func (removed RemovedContainers) SetRemovedPair(ip string, hostname string, containerid string, err error) RemovedContainers { 16 | 17 | result := "remove successed." 18 | if err != nil { 19 | result = "remove failure, " + err.Error() 20 | } 21 | 22 | removedContainer := &RemovedContainer{ 23 | IP: ip, 24 | HostName: hostname, 25 | ContainerID: containerid, 26 | Result: result, 27 | } 28 | removed = append(removed, removedContainer) 29 | return removed 30 | } 31 | -------------------------------------------------------------------------------- /cluster/types/operatedcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // OperatedContainer is exported 4 | type OperatedContainer struct { 5 | IP string `json:"IP"` 6 | HostName string `json:"HostName"` 7 | ContainerID string `json:"ContainerId"` 8 | Result string `json:"Result"` 9 | } 10 | 11 | // OperatedContainers is exported 12 | type OperatedContainers []*OperatedContainer 13 | 14 | // SetOperatedPair is exported 15 | func (operated OperatedContainers) SetOperatedPair(ip string, hostname string, containerid string, action string, err error) OperatedContainers { 16 | 17 | result := action + " successed." 18 | if err != nil { 19 | result = action + " failure, " + err.Error() 20 | } 21 | 22 | operatedContainer := &OperatedContainer{ 23 | IP: ip, 24 | HostName: hostname, 25 | ContainerID: containerid, 26 | Result: result, 27 | } 28 | operated = append(operated, operatedContainer) 29 | return operated 30 | } 31 | -------------------------------------------------------------------------------- /cluster/types/createdcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/common/models" 4 | 5 | // CreateContainerResponse is exported 6 | type CreateContainerResponse struct { 7 | ID string `json:"Id"` 8 | Name string `json:"Name"` 9 | Warnings []string `json:"Warnings"` 10 | } 11 | 12 | // CreatedContainer is exported 13 | type CreatedContainer struct { 14 | IP string `json:"IP"` 15 | HostName string `json:"HostName"` 16 | models.Container 17 | } 18 | 19 | // CreatedContainers is exported 20 | type CreatedContainers []*CreatedContainer 21 | 22 | // SetCreatedPair is exported 23 | func (created CreatedContainers) SetCreatedPair(ip string, hostname string, container models.Container) CreatedContainers { 24 | 25 | createdContainer := &CreatedContainer{ 26 | IP: ip, 27 | HostName: hostname, 28 | Container: container, 29 | } 30 | created = append(created, createdContainer) 31 | return created 32 | } 33 | -------------------------------------------------------------------------------- /notify/endpoint_smtp.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "gopkg.in/gomail.v1" 5 | 6 | // SMTPEndPoint is exported 7 | type SMTPEndPoint struct { 8 | IEndPoint 9 | EndPoint 10 | mailer *gomail.Mailer 11 | } 12 | 13 | // NewSMTPEndpoint is exported 14 | func NewSMTPEndpoint(endpoint EndPoint) IEndPoint { 15 | 16 | mailer := gomail.NewMailer(endpoint.Host, endpoint.User, endpoint.Password, endpoint.Port) 17 | return &SMTPEndPoint{ 18 | EndPoint: endpoint, 19 | mailer: mailer, 20 | } 21 | } 22 | 23 | // DoEvent is exported 24 | func (endpoint *SMTPEndPoint) DoEvent(event *Event, data interface{}) { 25 | 26 | if !endpoint.Enabled { 27 | return 28 | } 29 | 30 | msg := gomail.NewMessage() 31 | msg.SetHeader("From", endpoint.Sender) 32 | msg.SetHeader("To", event.ContactInfo) 33 | msg.SetHeader("Subject", event.makeSubjectText()) 34 | msg.SetBody("text/html", data.(string)) 35 | if err := endpoint.mailer.Send(msg); err != nil { 36 | logger.ERROR("[#notify#] smtp endpoint post error: %s", err.Error()) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /cluster/types/groupcontainer.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import "github.com/humpback/common/models" 4 | 5 | // EngineContainer is exported 6 | type EngineContainer struct { 7 | IP string `json:"IP"` 8 | HostName string `json:"HostName"` 9 | Container models.Container `json:"Container"` 10 | } 11 | 12 | // GroupContainer is exported 13 | type GroupContainer struct { 14 | GroupID string `json:"GroupId"` 15 | MetaID string `json:"MetaId"` 16 | IsRemoveDelay bool `json:"IsRemoveDelay"` 17 | IsRecovery bool `json:"IsRecovery"` 18 | Instances int `json:"Instances"` 19 | Placement Placement `json:"Placement"` 20 | WebHooks WebHooks `json:"WebHooks"` 21 | Config models.Container `json:"Config"` 22 | Containers []*EngineContainer `json:"Containers"` 23 | CreateAt int64 `json:"CreateAt"` 24 | LastUpdateAt int64 `json:"LastUpdateAt"` 25 | } 26 | 27 | // GroupContainers is exported 28 | type GroupContainers []*GroupContainer 29 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/script/.validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$VALIDATE_UPSTREAM" ]; then 4 | # this is kind of an expensive check, so let's not do this twice if we 5 | # are running more than one validate bundlescript 6 | 7 | VALIDATE_REPO='https://github.com/docker/libkv.git' 8 | VALIDATE_BRANCH='master' 9 | 10 | if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then 11 | VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" 12 | VALIDATE_BRANCH="${TRAVIS_BRANCH}" 13 | fi 14 | 15 | VALIDATE_HEAD="$(git rev-parse --verify HEAD)" 16 | 17 | git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" 18 | VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" 19 | 20 | VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" 21 | VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" 22 | 23 | validate_diff() { 24 | if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then 25 | git diff "$VALIDATE_COMMIT_DIFF" "$@" 26 | fi 27 | } 28 | validate_log() { 29 | if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then 30 | git log "$VALIDATE_COMMIT_LOG" "$@" 31 | fi 32 | } 33 | fi 34 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/MAINTAINERS: -------------------------------------------------------------------------------- 1 | # Libkv maintainers file 2 | # 3 | # This file describes who runs the docker/libkv project and how. 4 | # This is a living document - if you see something out of date or missing, speak up! 5 | # 6 | # It is structured to be consumable by both humans and programs. 7 | # To extract its contents programmatically, use any TOML-compliant parser. 8 | # 9 | # This file is compiled into the MAINTAINERS file in docker/opensource. 10 | # 11 | [Org] 12 | [Org."Core maintainers"] 13 | people = [ 14 | "aluzzardi", 15 | "sanimej", 16 | "vieux", 17 | ] 18 | 19 | [people] 20 | 21 | # A reference list of all people associated with the project. 22 | # All other sections should refer to people by their canonical key 23 | # in the people section. 24 | 25 | # ADD YOURSELF HERE IN ALPHABETICAL ORDER 26 | 27 | [people.aluzzardi] 28 | Name = "Andrea Luzzardi" 29 | Email = "al@docker.com" 30 | GitHub = "aluzzardi" 31 | 32 | [people.sanimej] 33 | Name = "Santhosh Manohar" 34 | Email = "santhosh@docker.com" 35 | GitHub = "sanimej" 36 | 37 | [people.vieux] 38 | Name = "Victor Vieux" 39 | Email = "vieux@docker.com" 40 | GitHub = "vieux" 41 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/libkv.go: -------------------------------------------------------------------------------- 1 | package libkv 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | 8 | "github.com/docker/libkv/store" 9 | ) 10 | 11 | // Initialize creates a new Store object, initializing the client 12 | type Initialize func(addrs []string, options *store.Config) (store.Store, error) 13 | 14 | var ( 15 | // Backend initializers 16 | initializers = make(map[store.Backend]Initialize) 17 | 18 | supportedBackend = func() string { 19 | keys := make([]string, 0, len(initializers)) 20 | for k := range initializers { 21 | keys = append(keys, string(k)) 22 | } 23 | sort.Strings(keys) 24 | return strings.Join(keys, ", ") 25 | }() 26 | ) 27 | 28 | // NewStore creates an instance of store 29 | func NewStore(backend store.Backend, addrs []string, options *store.Config) (store.Store, error) { 30 | if init, exists := initializers[backend]; exists { 31 | return init(addrs, options) 32 | } 33 | 34 | return nil, fmt.Errorf("%s %s", store.ErrBackendNotSupported.Error(), supportedBackend) 35 | } 36 | 37 | // AddStore adds a new store backend to libkv 38 | func AddStore(store store.Backend, init Initialize) { 39 | initializers[store] = init 40 | } 41 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/helpers.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // CreateEndpoints creates a list of endpoints given the right scheme 8 | func CreateEndpoints(addrs []string, scheme string) (entries []string) { 9 | for _, addr := range addrs { 10 | entries = append(entries, scheme+"://"+addr) 11 | } 12 | return entries 13 | } 14 | 15 | // Normalize the key for each store to the form: 16 | // 17 | // /path/to/key 18 | // 19 | func Normalize(key string) string { 20 | return "/" + join(SplitKey(key)) 21 | } 22 | 23 | // GetDirectory gets the full directory part of 24 | // the key to the form: 25 | // 26 | // /path/to/ 27 | // 28 | func GetDirectory(key string) string { 29 | parts := SplitKey(key) 30 | parts = parts[:len(parts)-1] 31 | return "/" + join(parts) 32 | } 33 | 34 | // SplitKey splits the key to extract path informations 35 | func SplitKey(key string) (path []string) { 36 | if strings.Contains(key, "/") { 37 | path = strings.Split(key, "/") 38 | } else { 39 | path = []string{key} 40 | } 41 | return path 42 | } 43 | 44 | // join the path parts with '/' 45 | func join(parts []string) string { 46 | return strings.Join(parts, "/") 47 | } 48 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.7.1 5 | 6 | # let us have speedy Docker-based Travis workers 7 | sudo: false 8 | 9 | before_install: 10 | # Symlink below is needed for Travis CI to work correctly on personal forks of libkv 11 | - ln -s $HOME/gopath/src/github.com/${TRAVIS_REPO_SLUG///libkv/} $HOME/gopath/src/github.com/docker 12 | - go get golang.org/x/tools/cmd/cover 13 | - go get github.com/mattn/goveralls 14 | - go get github.com/golang/lint/golint 15 | - go get github.com/GeertJohan/fgt 16 | 17 | before_script: 18 | - script/travis_consul.sh 0.6.3 19 | - script/travis_etcd.sh 3.0.0 20 | - script/travis_zk.sh 3.5.1-alpha 21 | 22 | script: 23 | - ./consul agent -server -bootstrap -advertise=127.0.0.1 -data-dir /tmp/consul -config-file=./config.json 1>/dev/null & 24 | - ./etcd/etcd --listen-client-urls 'http://0.0.0.0:4001' --advertise-client-urls 'http://127.0.0.1:4001' >/dev/null 2>&1 & 25 | - ./zk/bin/zkServer.sh start ./zk/conf/zoo.cfg 1> /dev/null 26 | - script/validate-gofmt 27 | - go vet ./... 28 | - fgt golint ./... 29 | - go test -v -race ./... 30 | - script/coverage 31 | - goveralls -service=travis-ci -coverprofile=goverage.report 32 | -------------------------------------------------------------------------------- /etc/config.yaml: -------------------------------------------------------------------------------- 1 | version: 1.3.7 2 | pidfile: ./humpback-center.pid 3 | retrystartup: true 4 | siteapi: http://192.168.2.80:8012/api 5 | cluster: 6 | opts: [ 7 | #"location=dev", 8 | "datapath=./data", 9 | "cacheroot=./cache", 10 | "overcommit=0.08", 11 | "recoveryinterval=320s", 12 | "createretry=2", 13 | "migratedelay=145s", 14 | "removedelay=500s" 15 | ] 16 | discovery: 17 | uris: etcd://192.168.2.80:2379 18 | cluster: humpback/center 19 | heartbeat: 8s 20 | api: 21 | hosts: [":8589"] 22 | enablecors: true 23 | notifications: 24 | endpoints: 25 | #- name: api 26 | # url: http://192.168.139.1:8009/framework/v1/mail 27 | # headers: 28 | # x-cluster-notify: ["endo"] 29 | # content-type: ["application/json; charset=utf-8"] 30 | # sender: humpback@newegg.com 31 | # enabled: true 32 | #- name: smtp 33 | # host: smtp.example.com 34 | # port: 25 35 | # user: u1 36 | # password: 123456 37 | # sender: humpback@newegg.com 38 | # enabled: true 39 | logger: 40 | logfile: ./logs/humpback-center.log 41 | loglevel: info 42 | logsize: 20971520 43 | ... 44 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/zookeeper/zookeeper_test.go: -------------------------------------------------------------------------------- 1 | package zookeeper 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv" 8 | "github.com/docker/libkv/store" 9 | "github.com/docker/libkv/testutils" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | client = "localhost:2181" 15 | ) 16 | 17 | func makeZkClient(t *testing.T) store.Store { 18 | kv, err := New( 19 | []string{client}, 20 | &store.Config{ 21 | ConnectionTimeout: 3 * time.Second, 22 | }, 23 | ) 24 | 25 | if err != nil { 26 | t.Fatalf("cannot create store: %v", err) 27 | } 28 | 29 | return kv 30 | } 31 | 32 | func TestRegister(t *testing.T) { 33 | Register() 34 | 35 | kv, err := libkv.NewStore(store.ZK, []string{client}, nil) 36 | assert.NoError(t, err) 37 | assert.NotNil(t, kv) 38 | 39 | if _, ok := kv.(*Zookeeper); !ok { 40 | t.Fatal("Error registering and initializing zookeeper") 41 | } 42 | } 43 | 44 | func TestZkStore(t *testing.T) { 45 | kv := makeZkClient(t) 46 | ttlKV := makeZkClient(t) 47 | 48 | defer testutils.RunCleanup(t, kv) 49 | 50 | testutils.RunTestCommon(t, kv) 51 | testutils.RunTestAtomic(t, kv) 52 | testutils.RunTestWatch(t, kv) 53 | testutils.RunTestLock(t, kv) 54 | testutils.RunTestTTL(t, kv, ttlKV) 55 | } 56 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/etcd/etcd_test.go: -------------------------------------------------------------------------------- 1 | package etcd 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv" 8 | "github.com/docker/libkv/store" 9 | "github.com/docker/libkv/testutils" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | client = "localhost:4001" 15 | ) 16 | 17 | func makeEtcdClient(t *testing.T) store.Store { 18 | kv, err := New( 19 | []string{client}, 20 | &store.Config{ 21 | ConnectionTimeout: 3 * time.Second, 22 | Username: "test", 23 | Password: "very-secure", 24 | }, 25 | ) 26 | 27 | if err != nil { 28 | t.Fatalf("cannot create store: %v", err) 29 | } 30 | 31 | return kv 32 | } 33 | 34 | func TestRegister(t *testing.T) { 35 | Register() 36 | 37 | kv, err := libkv.NewStore(store.ETCD, []string{client}, nil) 38 | assert.NoError(t, err) 39 | assert.NotNil(t, kv) 40 | 41 | if _, ok := kv.(*Etcd); !ok { 42 | t.Fatal("Error registering and initializing etcd") 43 | } 44 | } 45 | 46 | func TestEtcdStore(t *testing.T) { 47 | kv := makeEtcdClient(t) 48 | lockKV := makeEtcdClient(t) 49 | ttlKV := makeEtcdClient(t) 50 | 51 | defer testutils.RunCleanup(t, kv) 52 | 53 | testutils.RunTestCommon(t, kv) 54 | testutils.RunTestAtomic(t, kv) 55 | testutils.RunTestWatch(t, kv) 56 | testutils.RunTestLock(t, kv) 57 | testutils.RunTestLockTTL(t, kv, lockKV) 58 | testutils.RunTestLockWait(t, kv, lockKV) 59 | testutils.RunTestTTL(t, kv, ttlKV) 60 | } 61 | -------------------------------------------------------------------------------- /notify/typedef.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "net/http" 4 | 5 | //EndPoint is exported 6 | type EndPoint struct { 7 | Name string `yaml:"name"` 8 | URL string `yaml:"url"` 9 | Enabled bool `yaml:"enabled"` 10 | Sender string `yaml:"sender"` 11 | Headers http.Header `yaml:"headers"` 12 | Host string `yaml:"host"` 13 | Port int `yaml:"port"` 14 | User string `yaml:"user"` 15 | Password string `yaml:"password"` 16 | } 17 | 18 | //Notifications is exported 19 | type Notifications struct { 20 | EndPoints []EndPoint `yaml:"endpoints,omitempty"` 21 | } 22 | 23 | //Engine is exported 24 | type Engine struct { 25 | IP string 26 | Name string 27 | State string 28 | } 29 | 30 | //WatchGroup is exported 31 | type WatchGroup struct { 32 | GroupID string 33 | GroupName string 34 | Location string 35 | ContactInfo string 36 | Engines []*Engine 37 | } 38 | 39 | //WatchGroups is exported 40 | type WatchGroups map[string]*WatchGroup 41 | 42 | //Container is exported 43 | type Container struct { 44 | ID string 45 | Name string 46 | Server string 47 | State string 48 | } 49 | 50 | //GroupMeta is exported 51 | type GroupMeta struct { 52 | MetaID string 53 | MetaName string 54 | Location string 55 | GroupID string 56 | GroupName string 57 | Instances int 58 | Image string 59 | ContactInfo string 60 | Engines []*Engine 61 | Containers []*Container 62 | } 63 | -------------------------------------------------------------------------------- /cluster/reduce.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | // ReduceEngine is exported 4 | type ReduceEngine struct { 5 | metaid string 6 | engine *Engine 7 | container *Container 8 | } 9 | 10 | // Containers is exported 11 | // Return engine's containers of metaid 12 | func (reduce *ReduceEngine) Containers() Containers { 13 | 14 | if reduce.engine != nil { 15 | return reduce.engine.Containers(reduce.metaid) 16 | } 17 | return Containers{} 18 | } 19 | 20 | // ReduceContainer is exported 21 | func (reduce *ReduceEngine) ReduceContainer() *Container { 22 | 23 | return reduce.container 24 | } 25 | 26 | // Engine is exported 27 | func (reduce *ReduceEngine) Engine() *Engine { 28 | 29 | return reduce.engine 30 | } 31 | 32 | type reduceEngines []*ReduceEngine 33 | 34 | func (engines reduceEngines) Len() int { 35 | 36 | return len(engines) 37 | } 38 | 39 | func (engines reduceEngines) Swap(i, j int) { 40 | 41 | engines[i], engines[j] = engines[j], engines[i] 42 | } 43 | 44 | func (engines reduceEngines) Less(i, j int) bool { 45 | 46 | return len(engines[i].Containers()) > len(engines[j].Containers()) 47 | } 48 | 49 | func selectReduceEngines(metaid string, engines []*Engine) reduceEngines { 50 | 51 | out := reduceEngines{} 52 | for _, engine := range engines { 53 | if engine.IsHealthy() { 54 | containers := engine.Containers(metaid) 55 | if len(containers) > 0 { 56 | out = append(out, &ReduceEngine{ 57 | engine: engine, 58 | metaid: metaid, 59 | container: containers[0], 60 | }) 61 | } 62 | } 63 | } 64 | return out 65 | } 66 | -------------------------------------------------------------------------------- /ctrl/controller.go: -------------------------------------------------------------------------------- 1 | package ctrl 2 | 3 | import "github.com/humpback/gounits/httpx" 4 | import "github.com/humpback/gounits/logger" 5 | import "github.com/humpback/humpback-center/cluster" 6 | import "github.com/humpback/humpback-center/etc" 7 | 8 | import ( 9 | "net" 10 | "net/http" 11 | "time" 12 | ) 13 | 14 | // Controller is exprted 15 | type Controller struct { 16 | client *httpx.HttpClient 17 | Configuration *etc.Configuration 18 | Cluster *cluster.Cluster 19 | } 20 | 21 | // NewController is exported 22 | func NewController(configuration *etc.Configuration) (*Controller, error) { 23 | 24 | cluster, err := createCluster(configuration) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | client := httpx.NewClient(). 30 | SetTransport(&http.Transport{ 31 | Proxy: http.ProxyFromEnvironment, 32 | DialContext: (&net.Dialer{ 33 | Timeout: 45 * time.Second, 34 | KeepAlive: 90 * time.Second, 35 | }).DialContext, 36 | DisableKeepAlives: false, 37 | MaxIdleConns: 10, 38 | MaxIdleConnsPerHost: 10, 39 | IdleConnTimeout: 90 * time.Second, 40 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 41 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 42 | }) 43 | 44 | return &Controller{ 45 | client: client, 46 | Configuration: configuration, 47 | Cluster: cluster, 48 | }, nil 49 | } 50 | 51 | // Initialize is exported 52 | // init cluster 53 | func (c *Controller) Initialize() error { 54 | 55 | logger.INFO("[#ctrl#] controller initialize.....") 56 | logger.INFO("[#ctrl#] configuration %+v", c.Configuration) 57 | return c.startCluster() 58 | } 59 | 60 | // UnInitialize is exported 61 | // uninit cluster 62 | func (c *Controller) UnInitialize() { 63 | 64 | c.stopCluster() 65 | logger.INFO("[#ctrl#] controller uninitialized.") 66 | } 67 | -------------------------------------------------------------------------------- /cluster/storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import "github.com/boltdb/bolt" 4 | import "github.com/humpback/gounits/system" 5 | import "github.com/humpback/humpback-center/cluster/storage/node" 6 | 7 | import ( 8 | "fmt" 9 | "path" 10 | "path/filepath" 11 | "time" 12 | ) 13 | 14 | const ( 15 | databaseFileName = "data.db" 16 | ) 17 | 18 | // DataStorage defines the implementation of datastore using 19 | // BoltDB as the storage system. 20 | type DataStorage struct { 21 | path string 22 | driver *bolt.DB 23 | NodeStorage *node.NodeStorage 24 | } 25 | 26 | // NewDataStorage is exported 27 | func NewDataStorage(storePath string) (*DataStorage, error) { 28 | 29 | var err error 30 | storePath, err = filepath.Abs(storePath) 31 | if err != nil { 32 | return nil, fmt.Errorf("storage driver path invalid, %s", err) 33 | } 34 | 35 | storePath = filepath.Clean(storePath) 36 | if err = system.MakeDirectory(storePath); err != nil { 37 | return nil, fmt.Errorf("storage driver make directory failure, %s", err) 38 | } 39 | 40 | databasePath := path.Join(storePath, databaseFileName) 41 | databasePath = filepath.Clean(databasePath) 42 | return &DataStorage{ 43 | path: databasePath, 44 | }, nil 45 | } 46 | 47 | // Open is exported 48 | // open storage driver file. 49 | func (storage *DataStorage) Open() error { 50 | 51 | if storage.driver == nil { 52 | driver, err := bolt.Open(storage.path, 0600, &bolt.Options{Timeout: 1 * time.Second}) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | nodeStorage, err := node.NewNodeStorage(driver) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | storage.NodeStorage = nodeStorage 63 | storage.driver = driver 64 | } 65 | return nil 66 | } 67 | 68 | // Close is exported 69 | // Close storage driver file. 70 | func (storage *DataStorage) Close() error { 71 | 72 | if storage.driver != nil { 73 | return storage.driver.Close() 74 | } 75 | return nil 76 | } 77 | -------------------------------------------------------------------------------- /cluster/errors.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "errors" 4 | 5 | // cluster errors define 6 | var ( 7 | //cluster discovery is nil. 8 | ErrClusterDiscoveryInvalid = errors.New("cluster discovery invalid") 9 | //cluster meta not found 10 | ErrClusterMetaDataNotFound = errors.New("cluster metadata not found") 11 | //cluster group not found 12 | ErrClusterGroupNotFound = errors.New("cluster group not found") 13 | //cluster container not found 14 | ErrClusterContainerNotFound = errors.New("cluster container not found") 15 | //cluster server not found 16 | ErrClusterServerNotFound = errors.New("cluster server not found") 17 | //cluster group no docker engine available 18 | ErrClusterNoEngineAvailable = errors.New("cluster no docker-engine available") 19 | //cluster containers instances invalid 20 | ErrClusterContainersInstancesInvalid = errors.New("cluster containers instances invalid") 21 | //cluster containers meta create failure 22 | ErrClusterContainersMetaCreateFailure = errors.New("cluster containers meta create failure") 23 | //cluster create containers name conflict 24 | ErrClusterCreateContainerNameConflict = errors.New("cluster create containers name conflict, this cluster already exists") 25 | //cluster create containers tag already using 26 | ErrClusterCreateContainerTagAlreadyUsing = errors.New("cluster create containers tag is already using") 27 | //cluster create containers all failure 28 | ErrClusterCreateContainerFailure = errors.New("cluster create containers failure") 29 | //cluster containers is upgrading 30 | ErrClusterContainersUpgrading = errors.New("cluster containers state is upgrading") 31 | //cluster containers is migrating 32 | ErrClusterContainersMigrating = errors.New("cluster containers state is migrating") 33 | //cluster containers is setting 34 | ErrClusterContainersSetting = errors.New("cluster containers state is setting") 35 | //cluster containers instances no change 36 | ErrClusterContainersInstancesNoChange = errors.New("cluster containers instances no change") 37 | ) 38 | -------------------------------------------------------------------------------- /notify/endpoint_api.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "github.com/humpback/gounits/httpx" 4 | import "github.com/humpback/gounits/logger" 5 | 6 | import ( 7 | "context" 8 | "time" 9 | "net" 10 | "net/http" 11 | ) 12 | 13 | // APIEndPoint is exported 14 | type APIEndPoint struct { 15 | IEndPoint 16 | EndPoint 17 | client *httpx.HttpClient 18 | } 19 | 20 | // NewAPIEndPoint is exported 21 | func NewAPIEndPoint(endpoint EndPoint) IEndPoint { 22 | 23 | client := httpx.NewClient(). 24 | SetTransport(&http.Transport{ 25 | Proxy: http.ProxyFromEnvironment, 26 | DialContext: (&net.Dialer{ 27 | Timeout: 45 * time.Second, 28 | KeepAlive: 90 * time.Second, 29 | }).DialContext, 30 | DisableKeepAlives: false, 31 | MaxIdleConns: 10, 32 | MaxIdleConnsPerHost: 10, 33 | IdleConnTimeout: 90 * time.Second, 34 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 35 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 36 | }) 37 | 38 | return &APIEndPoint{ 39 | EndPoint: endpoint, 40 | client: client, 41 | } 42 | } 43 | 44 | // DoEvent is exported 45 | func (endpoint *APIEndPoint) DoEvent(event *Event, data interface{}) { 46 | 47 | if !endpoint.Enabled { 48 | return 49 | } 50 | 51 | value := map[string]interface{}{ 52 | "From": endpoint.Sender, 53 | "To": event.ContactInfo, 54 | "Subject": event.makeSubjectText(), 55 | "Body": data, 56 | "ContentType": "HTML", 57 | "MailType": "Smtp", 58 | "SmtpSetting": map[string]interface{}{}, 59 | } 60 | 61 | response, err := endpoint.client.PostJSON(context.Background(), endpoint.URL, nil, value, endpoint.Headers) 62 | if err != nil { 63 | logger.ERROR("[#notify#] api endpoint error: %s", err.Error()) 64 | return 65 | } 66 | defer response.Close() 67 | if response.StatusCode() >= http.StatusBadRequest { 68 | logger.ERROR("[#notify#] api endpoint response code: %d", response.StatusCode()) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /cluster/enginespriority.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "sync" 4 | 5 | // EnginePriorities is exported 6 | type EnginePriorities struct { 7 | sync.RWMutex 8 | Engines map[string]*Engine 9 | } 10 | 11 | // NewEnginePriorities is exported 12 | func NewEnginePriorities(metaData *MetaData, engines []*Engine) *EnginePriorities { 13 | 14 | enginePriorities := &EnginePriorities{ 15 | Engines: make(map[string]*Engine), 16 | } 17 | 18 | for _, baseConfig := range metaData.BaseConfigs { 19 | for _, engine := range engines { 20 | if engine.IsHealthy() && engine.HasContainer(baseConfig.ID) { 21 | enginePriorities.Add(baseConfig.ID, engine) 22 | break 23 | } 24 | } 25 | } 26 | return enginePriorities 27 | } 28 | 29 | // EngineStrings is exported 30 | func (priorities *EnginePriorities) EngineStrings() []string { 31 | 32 | engines := []string{} 33 | priorities.RLock() 34 | defer priorities.RUnlock() 35 | for _, engine := range priorities.Engines { 36 | engines = append(engines, engine.IP) 37 | } 38 | return engines 39 | } 40 | 41 | // Select is exported 42 | func (priorities *EnginePriorities) Select() *Engine { 43 | 44 | var engine *Engine 45 | priorities.Lock() 46 | defer priorities.Unlock() 47 | if len(priorities.Engines) == 0 { 48 | return nil 49 | } 50 | 51 | for containerid, e := range priorities.Engines { 52 | engine = e 53 | delete(priorities.Engines, containerid) 54 | break 55 | } 56 | return engine 57 | } 58 | 59 | // Size is exported 60 | func (priorities *EnginePriorities) Size() int { 61 | 62 | size := 0 63 | priorities.RLock() 64 | size = len(priorities.Engines) 65 | priorities.RUnlock() 66 | return size 67 | } 68 | 69 | // Add is exported 70 | func (priorities *EnginePriorities) Add(containerid string, engine *Engine) { 71 | 72 | priorities.Lock() 73 | if _, ret := priorities.Engines[containerid]; !ret { 74 | priorities.Engines[containerid] = engine 75 | } 76 | priorities.Unlock() 77 | } 78 | 79 | // Remove is exported 80 | func (priorities *EnginePriorities) Remove(containerid string) { 81 | 82 | priorities.Lock() 83 | delete(priorities.Engines, containerid) 84 | priorities.Unlock() 85 | } 86 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/consul/consul_test.go: -------------------------------------------------------------------------------- 1 | package consul 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/docker/libkv" 8 | "github.com/docker/libkv/store" 9 | "github.com/docker/libkv/testutils" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var ( 14 | client = "localhost:8500" 15 | ) 16 | 17 | func makeConsulClient(t *testing.T) store.Store { 18 | 19 | kv, err := New( 20 | []string{client}, 21 | &store.Config{ 22 | ConnectionTimeout: 3 * time.Second, 23 | }, 24 | ) 25 | 26 | if err != nil { 27 | t.Fatalf("cannot create store: %v", err) 28 | } 29 | 30 | return kv 31 | } 32 | 33 | func TestRegister(t *testing.T) { 34 | Register() 35 | 36 | kv, err := libkv.NewStore(store.CONSUL, []string{client}, nil) 37 | assert.NoError(t, err) 38 | assert.NotNil(t, kv) 39 | 40 | if _, ok := kv.(*Consul); !ok { 41 | t.Fatal("Error registering and initializing consul") 42 | } 43 | } 44 | 45 | func TestConsulStore(t *testing.T) { 46 | kv := makeConsulClient(t) 47 | lockKV := makeConsulClient(t) 48 | ttlKV := makeConsulClient(t) 49 | 50 | defer testutils.RunCleanup(t, kv) 51 | 52 | testutils.RunTestCommon(t, kv) 53 | testutils.RunTestAtomic(t, kv) 54 | testutils.RunTestWatch(t, kv) 55 | testutils.RunTestLock(t, kv) 56 | testutils.RunTestLockTTL(t, kv, lockKV) 57 | testutils.RunTestLockWait(t, kv, lockKV) 58 | testutils.RunTestTTL(t, kv, ttlKV) 59 | } 60 | 61 | func TestGetActiveSession(t *testing.T) { 62 | kv := makeConsulClient(t) 63 | 64 | consul := kv.(*Consul) 65 | 66 | key := "foo" 67 | value := []byte("bar") 68 | 69 | // Put the first key with the Ephemeral flag 70 | err := kv.Put(key, value, &store.WriteOptions{TTL: 2 * time.Second}) 71 | assert.NoError(t, err) 72 | 73 | // Session should not be empty 74 | session, err := consul.getActiveSession(key) 75 | assert.NoError(t, err) 76 | assert.NotEqual(t, session, "") 77 | 78 | // Delete the key 79 | err = kv.Delete(key) 80 | assert.NoError(t, err) 81 | 82 | // Check the session again, it should return nothing 83 | session, err = consul.getActiveSession(key) 84 | assert.NoError(t, err) 85 | assert.Equal(t, session, "") 86 | } 87 | -------------------------------------------------------------------------------- /server/service.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import "github.com/humpback/gounits/fprocess" 4 | import "github.com/humpback/gounits/logger" 5 | import "github.com/humpback/humpback-center/api" 6 | import "github.com/humpback/humpback-center/ctrl" 7 | import "github.com/humpback/humpback-center/etc" 8 | 9 | import ( 10 | "flag" 11 | ) 12 | 13 | /* 14 | CenterService is exported 15 | humpback center service 16 | */ 17 | type CenterService struct { 18 | PIDFile *fprocess.PIDFile 19 | APIServer *api.Server 20 | Controller *ctrl.Controller 21 | } 22 | 23 | // NewCenterService exported 24 | func NewCenterService() (*CenterService, error) { 25 | 26 | var conf string 27 | flag.StringVar(&conf, "f", "etc/config.yaml", "humpback center configuration file.") 28 | flag.Parse() 29 | configuration, err := etc.NewConfiguration(conf) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | pidfile, err := fprocess.New(configuration.PIDFile) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | largs := configuration.GetLogger() 40 | logger.OPEN(largs) 41 | controller, err := ctrl.NewController(configuration) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | apiserver := api.NewServer(configuration.API.Hosts, nil, controller, configuration.API.EnableCors) 47 | return &CenterService{ 48 | PIDFile: pidfile, 49 | APIServer: apiserver, 50 | Controller: controller, 51 | }, nil 52 | } 53 | 54 | func (service *CenterService) Startup() error { 55 | 56 | logger.INFO("[#service#] service start...") 57 | if err := service.Controller.Initialize(); err != nil { 58 | return err 59 | } 60 | logger.INFO("[#service#] center process %d", service.PIDFile.PID) 61 | //apiserver start. 62 | go func() { 63 | logger.INFO("[#service#] center API listen: %s", service.APIServer.ListenHosts()) 64 | if err := service.APIServer.Startup(); err != nil { 65 | logger.ERROR("[#service#] service API start error:%s", err.Error()) 66 | } 67 | }() 68 | return nil 69 | } 70 | 71 | func (service *CenterService) Stop() error { 72 | 73 | service.Controller.UnInitialize() 74 | service.PIDFile.Remove() 75 | logger.INFO("[#service#] service closed.") 76 | logger.CLOSE() 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /notify/events.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import "github.com/humpback/gounits/rand" 4 | 5 | import ( 6 | "bytes" 7 | "html/template" 8 | "time" 9 | ) 10 | 11 | //EventType is exported 12 | type EventType int 13 | 14 | const ( 15 | //GroupEnginesWatchEvent is exported 16 | //cluster discovery watch nodes event 17 | GroupEnginesWatchEvent EventType = 1000 18 | //GroupMetaContainersEvent is exported 19 | //cluster meta containers migrated or recovered to warning event 20 | GroupMetaContainersEvent EventType = 1001 21 | ) 22 | 23 | //eventsTextMap is exported 24 | var eventsTextMap = map[EventType]string{ 25 | GroupEnginesWatchEvent: "GroupEnginesWatchEvent", 26 | GroupMetaContainersEvent: "GroupMetaContainersEvent", 27 | } 28 | 29 | //Event is exported 30 | type Event struct { 31 | ID string 32 | Type EventType 33 | Name string 34 | Error error 35 | ContactInfo string 36 | Endpoints []IEndPoint 37 | data map[string]interface{} 38 | } 39 | 40 | //NewEvent is exported 41 | func NewEvent(eventType EventType, description string, err error, contactInfo string, siteurl string, endpoints []IEndPoint) *Event { 42 | 43 | seed := time.Now() 44 | event := &Event{ 45 | ID: rand.UUID(true), 46 | Type: eventType, 47 | Name: eventsTextMap[eventType], 48 | Error: err, 49 | ContactInfo: contactInfo, 50 | Endpoints: endpoints, 51 | } 52 | 53 | event.data = map[string]interface{}{ 54 | "SiteURL": siteurl, 55 | "ID": event.ID, 56 | "Event": event.Name, 57 | "Description": description, 58 | "Timestamp": seed.UnixNano(), 59 | "Datetime": seed, 60 | } 61 | 62 | if err != nil { 63 | event.data["Exception"] = err.Error() 64 | } 65 | return event 66 | } 67 | 68 | //Dispatch is exported 69 | func (event *Event) dispatch(templateBody string) { 70 | 71 | if len(templateBody) > 0 { 72 | var buf bytes.Buffer 73 | t := template.New("") 74 | t.Parse(templateBody) 75 | t.Execute(&buf, event.data) 76 | for _, endPoint := range event.Endpoints { 77 | endPoint.DoEvent(event, buf.String()) 78 | } 79 | } 80 | } 81 | 82 | //makeSubjectText is exported 83 | func (event *Event) makeSubjectText() string { 84 | 85 | subjectPrefix := "(info)" 86 | if event.Error != nil { 87 | subjectPrefix = "(warn)" 88 | } 89 | return subjectPrefix + " Humpback Notification" 90 | } 91 | -------------------------------------------------------------------------------- /etc/configuration.go: -------------------------------------------------------------------------------- 1 | package etc 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "github.com/humpback/humpback-center/notify" 5 | import "gopkg.in/yaml.v2" 6 | 7 | import ( 8 | "io/ioutil" 9 | "os" 10 | ) 11 | 12 | // Configuration is exported 13 | type Configuration struct { 14 | 15 | //base options 16 | Version string `yaml:"version" json:"version"` 17 | PIDFile string `yaml:"pidfile" json:"pidfile"` 18 | RetryStartup bool `yaml:"retrystartup" json:"retrystartup"` 19 | SiteAPI string `yaml:"siteapi" json:"siteapi"` 20 | 21 | Cluster struct { 22 | //driver opts 23 | DriverOpts []string `yaml:"opts" json:"opts"` 24 | //service discovery opts 25 | Discovery struct { 26 | URIs string `yaml:"uris" json:"uris"` 27 | Cluster string `yaml:"cluster" json:"cluster"` 28 | Heartbeat string `yaml:"heartbeat" json:"heartbeat"` 29 | } `yaml:"discovery" json:"discovery"` 30 | } `yaml:"cluster" json:"cluster"` 31 | 32 | //api options 33 | API struct { 34 | Hosts []string `yaml:"hosts" json:"hosts"` 35 | EnableCors bool `yaml:"enablecors" json:"enablecors"` 36 | } `yaml:"api" json:"api"` 37 | 38 | Notifications notify.Notifications `yaml:"notifications,omitempty" json:"notifications,omitempty"` 39 | 40 | //log options 41 | Logger struct { 42 | LogFile string `yaml:"logfile" json:"logfile"` 43 | LogLevel string `yaml:"loglevel" json:"loglevel"` 44 | LogSize int64 `yaml:"logsize" json:"logsize"` 45 | } `yaml:"logger" json:"logger"` 46 | } 47 | 48 | // NewConfiguration is exported 49 | func NewConfiguration(file string) (*Configuration, error) { 50 | 51 | fd, err := os.OpenFile(file, os.O_RDWR, 0777) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | defer fd.Close() 57 | data, err := ioutil.ReadAll(fd) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | conf := &Configuration{} 63 | if err := yaml.Unmarshal([]byte(data), conf); err != nil { 64 | return nil, err 65 | } 66 | 67 | if err := conf.ParseEnv(); err != nil { 68 | return nil, err 69 | } 70 | return conf, nil 71 | } 72 | 73 | // GetNotificationsEndPoints is exported 74 | func (conf *Configuration) GetNotificationsEndPoints() []notify.EndPoint { 75 | 76 | return conf.Notifications.EndPoints 77 | } 78 | 79 | // GetLogger is exported 80 | func (conf *Configuration) GetLogger() *logger.Args { 81 | 82 | return &logger.Args{ 83 | FileName: conf.Logger.LogFile, 84 | Level: conf.Logger.LogLevel, 85 | MaxSize: conf.Logger.LogSize, 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /cluster/weighted.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/gounits/logger" 4 | import "github.com/humpback/common/models" 5 | 6 | // WeightedEngine is exported 7 | type WeightedEngine struct { 8 | engine *Engine 9 | weight int64 10 | } 11 | 12 | // Containers is exported 13 | // Return engine's containers 14 | func (weighted *WeightedEngine) Containers() Containers { 15 | 16 | if weighted.engine != nil { 17 | return weighted.engine.Containers("") 18 | } 19 | return Containers{} 20 | } 21 | 22 | // Engine is exported 23 | func (weighted *WeightedEngine) Engine() *Engine { 24 | 25 | return weighted.engine 26 | } 27 | 28 | // Weight is exported 29 | func (weighted *WeightedEngine) Weight() int64 { 30 | 31 | return weighted.weight 32 | } 33 | 34 | type weightedEngines []*WeightedEngine 35 | 36 | func (engines weightedEngines) Len() int { 37 | 38 | return len(engines) 39 | } 40 | 41 | func (engines weightedEngines) Swap(i, j int) { 42 | 43 | engines[i], engines[j] = engines[j], engines[i] 44 | } 45 | 46 | func (engines weightedEngines) Less(i, j int) bool { 47 | 48 | if engines[i].Weight() == engines[j].Weight() { 49 | return len(engines[i].Containers()) < len(engines[j].Containers()) 50 | } 51 | return engines[i].Weight() < engines[j].Weight() 52 | } 53 | 54 | func (engines weightedEngines) Engines() []*Engine { 55 | 56 | out := []*Engine{} 57 | for _, weightedEngine := range engines { 58 | out = append(out, weightedEngine.Engine()) 59 | } 60 | return out 61 | } 62 | 63 | func selectWeightdEngines(engines []*Engine, config models.Container) weightedEngines { 64 | 65 | out := weightedEngines{} 66 | for _, engine := range engines { 67 | totalCpus := engine.TotalCpus() 68 | totalMemory := engine.TotalMemory() 69 | if totalMemory < config.Memory || totalCpus < config.CPUShares { 70 | logger.INFO("[#cluster#] weighted engine %s filter.", engine.IP) 71 | continue 72 | } 73 | 74 | var cpuScore int64 = 100 75 | var memoryScore int64 = 100 76 | 77 | if config.CPUShares > 0 { 78 | cpuScore = (engine.UsedCpus() + config.CPUShares) * 100 / totalCpus 79 | } 80 | 81 | if config.Memory > 0 { 82 | memoryScore = (engine.UsedMemory()/1024/1024 + config.Memory) * 100 / totalMemory 83 | } 84 | 85 | //logger.INFO("[#cluster#] weighted engine %s cpuScore:%d memorySocre:%d weight:%d", engine.IP, cpuScore, memoryScore, cpuScore+memoryScore) 86 | if cpuScore <= 100 && memoryScore <= 100 { 87 | out = append(out, &WeightedEngine{ 88 | engine: engine, 89 | weight: cpuScore + memoryScore, 90 | }) 91 | } 92 | } 93 | return out 94 | } 95 | -------------------------------------------------------------------------------- /cluster/enginesfilter.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // EnginesFilter is exported 8 | type EnginesFilter struct { 9 | sync.RWMutex 10 | allocEngines map[string]*Engine 11 | failEngines map[string]*Engine 12 | } 13 | 14 | // NewEnginesFilter is exported 15 | func NewEnginesFilter() *EnginesFilter { 16 | 17 | return &EnginesFilter{ 18 | allocEngines: make(map[string]*Engine), 19 | failEngines: make(map[string]*Engine), 20 | } 21 | } 22 | 23 | // Size is exported 24 | func (filter *EnginesFilter) Size() int { 25 | 26 | filter.RLock() 27 | defer filter.RUnlock() 28 | return len(filter.allocEngines) + len(filter.failEngines) 29 | } 30 | 31 | // SetAllocEngine is exported 32 | func (filter *EnginesFilter) SetAllocEngine(engine *Engine) { 33 | 34 | filter.Lock() 35 | if engine != nil { 36 | if _, ret := filter.allocEngines[engine.IP]; !ret { 37 | filter.allocEngines[engine.IP] = engine 38 | } 39 | } 40 | filter.Unlock() 41 | } 42 | 43 | // SetFailEngine is exported 44 | func (filter *EnginesFilter) SetFailEngine(engine *Engine) { 45 | 46 | filter.Lock() 47 | if engine != nil { 48 | if _, ret := filter.failEngines[engine.IP]; !ret { 49 | filter.failEngines[engine.IP] = engine 50 | } 51 | } 52 | filter.Unlock() 53 | } 54 | 55 | // AllocEngines is exported 56 | func (filter *EnginesFilter) AllocEngines() []*Engine { 57 | 58 | filter.RLock() 59 | defer filter.RUnlock() 60 | engines := []*Engine{} 61 | for _, engine := range filter.allocEngines { 62 | engines = append(engines, engine) 63 | } 64 | return engines 65 | } 66 | 67 | // FailEngines is exported 68 | func (filter *EnginesFilter) FailEngines() []*Engine { 69 | 70 | filter.RLock() 71 | defer filter.RUnlock() 72 | engines := []*Engine{} 73 | for _, engine := range filter.failEngines { 74 | engines = append(engines, engine) 75 | } 76 | return engines 77 | } 78 | 79 | // Filter is exported 80 | func (filter *EnginesFilter) Filter(engines []*Engine) []*Engine { 81 | 82 | if filter.Size() == 0 { 83 | return engines 84 | } 85 | 86 | filter.RLock() 87 | filterEngines := make(map[string]*Engine) 88 | for _, engine := range filter.allocEngines { 89 | filterEngines[engine.IP] = engine 90 | } 91 | 92 | for _, engine := range filter.failEngines { 93 | filterEngines[engine.IP] = engine 94 | } 95 | 96 | out := []*Engine{} 97 | for _, engine := range engines { 98 | if _, ret := filterEngines[engine.IP]; !ret { 99 | out = append(out, engine) 100 | } 101 | } 102 | filter.RUnlock() 103 | return out 104 | } 105 | -------------------------------------------------------------------------------- /cluster/storage/dao/dao.go: -------------------------------------------------------------------------------- 1 | package dao 2 | 3 | import "github.com/boltdb/bolt" 4 | 5 | import ( 6 | "encoding/binary" 7 | "errors" 8 | ) 9 | 10 | var ( 11 | ErrStorageObjectNotFound = errors.New("object not found") 12 | ) 13 | 14 | // Itob returns an 8-byte big endian representation of v. 15 | // This function is typically used for encoding integer IDs to byte slices 16 | // so that they can be used as BoltDB keys. 17 | func Itob(v int) []byte { 18 | b := make([]byte, 8) 19 | binary.BigEndian.PutUint64(b, uint64(v)) 20 | return b 21 | } 22 | 23 | // CreateBucket is a generic function used to create a bucket inside a bolt database. 24 | func CreateBucket(db *bolt.DB, bucketName string) error { 25 | return db.Update(func(tx *bolt.Tx) error { 26 | _, err := tx.CreateBucketIfNotExists([]byte(bucketName)) 27 | if err != nil { 28 | return err 29 | } 30 | return nil 31 | }) 32 | } 33 | 34 | // GetObject is a generic function used to retrieve an unmarshalled object from a bolt database. 35 | func GetObject(db *bolt.DB, bucketName string, key []byte, object interface{}) error { 36 | var data []byte 37 | 38 | err := db.View(func(tx *bolt.Tx) error { 39 | bucket := tx.Bucket([]byte(bucketName)) 40 | 41 | value := bucket.Get(key) 42 | if value == nil { 43 | return ErrStorageObjectNotFound 44 | } 45 | 46 | data = make([]byte, len(value)) 47 | copy(data, value) 48 | 49 | return nil 50 | }) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | return UnmarshalObject(data, object) 56 | } 57 | 58 | // UpdateObject is a generic function used to update an object inside a bolt database. 59 | func UpdateObject(db *bolt.DB, bucketName string, key []byte, object interface{}) error { 60 | return db.Update(func(tx *bolt.Tx) error { 61 | bucket := tx.Bucket([]byte(bucketName)) 62 | 63 | data, err := MarshalObject(object) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | err = bucket.Put(key, data) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | return nil 74 | }) 75 | } 76 | 77 | // DeleteObject is a generic function used to delete an object inside a bolt database. 78 | func DeleteObject(db *bolt.DB, bucketName string, key []byte) error { 79 | return db.Update(func(tx *bolt.Tx) error { 80 | bucket := tx.Bucket([]byte(bucketName)) 81 | return bucket.Delete(key) 82 | }) 83 | } 84 | 85 | // GetNextIdentifier is a generic function that returns the specified bucket identifier incremented by 1. 86 | func GetNextIdentifier(db *bolt.DB, bucketName string) int { 87 | var identifier int 88 | 89 | db.View(func(tx *bolt.Tx) error { 90 | bucket := tx.Bucket([]byte(bucketName)) 91 | id := bucket.Sequence() 92 | identifier = int(id) 93 | return nil 94 | }) 95 | 96 | identifier++ 97 | return identifier 98 | } 99 | -------------------------------------------------------------------------------- /api/router.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "github.com/gorilla/mux" 4 | import "github.com/humpback/humpback-center/ctrl" 5 | 6 | import ( 7 | "net/http" 8 | ) 9 | 10 | type handler func(c *Context) error 11 | 12 | var routes = map[string]map[string]handler{ 13 | "GET": { 14 | "/v1/_ping": ping, 15 | "/v1/configuration": getConfiguration, 16 | "/v1/groups/{groupid}/collections": getGroupAllContainers, 17 | "/v1/groups/{groupid}/engines": getGroupEngines, 18 | "/v1/groups/collections/{metaid}": getGroupContainers, 19 | "/v1/groups/collections/{metaid}/base": getGroupContainersMetaBase, 20 | "/v1/groups/engines/{server}": getGroupEngine, 21 | }, 22 | "POST": { 23 | "/v1/groups/event": postGroupEvent, 24 | "/v1/cluster/event": postClusterEvent, 25 | "/v1/groups/collections": postGroupCreateContainers, 26 | }, 27 | "PUT": { 28 | "/v1/groups/collections": putGroupUpdateContainers, 29 | "/v1/groups/collections/upgrade": putGroupUpgradeContainers, 30 | "/v1/groups/collections/action": putGroupOperateContainers, 31 | "/v1/groups/container/action": putGroupOperateContainer, 32 | "/v1/groups/nodelabels": putGroupServerNodeLabels, 33 | }, 34 | "DELETE": { 35 | "/v1/groups/{groupid}/collections/{metaname}": deleteGroupRemoveContainersOfMetaName, 36 | "/v1/groups/collections/{metaid}": deleteGroupRemoveContainers, 37 | "/v1/groups/container/{containerid}": deleteGroupRemoveContainer, 38 | }, 39 | } 40 | 41 | func NewRouter(controller *ctrl.Controller, enableCors bool) *mux.Router { 42 | 43 | router := mux.NewRouter() 44 | for method, mappings := range routes { 45 | for route, handler := range mappings { 46 | routemethod := method 47 | routepattern := route 48 | routehandler := handler 49 | wrap := func(w http.ResponseWriter, r *http.Request) { 50 | if enableCors { 51 | writeCorsHeaders(w, r) 52 | } 53 | c := NewContext(w, r, controller) 54 | routehandler(c) 55 | } 56 | router.Path(routepattern).Methods(routemethod).HandlerFunc(wrap) 57 | if enableCors { 58 | optionsmethod := "OPTIONS" 59 | optionshandler := optionsHandler 60 | wrap := func(w http.ResponseWriter, r *http.Request) { 61 | if enableCors { 62 | writeCorsHeaders(w, r) 63 | } 64 | c := NewContext(w, r, controller) 65 | optionshandler(c) 66 | } 67 | router.Path(routepattern).Methods(optionsmethod).HandlerFunc(wrap) 68 | } 69 | } 70 | } 71 | return router 72 | } 73 | 74 | func ping(ctx *Context) error { 75 | 76 | return ctx.JSON(http.StatusOK, "PANG") 77 | } 78 | 79 | func getConfiguration(ctx *Context) error { 80 | 81 | return ctx.JSON(http.StatusOK, ctx.Controller.Configuration) 82 | } 83 | 84 | func optionsHandler(ctx *Context) error { 85 | 86 | ctx.WriteHeader(http.StatusOK) 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /notify/notify.go: -------------------------------------------------------------------------------- 1 | package notify 2 | 3 | import ( 4 | "io/ioutil" 5 | "strings" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | //notify template string 11 | var templateBody string 12 | 13 | //NotifySender is exported 14 | type NotifySender struct { 15 | sync.RWMutex 16 | SiteURL string 17 | initWatch bool 18 | endPoints []IEndPoint 19 | events map[string]*Event 20 | } 21 | 22 | //NewNotifySender is exported 23 | func NewNotifySender(siteurl string, endPoints []EndPoint) *NotifySender { 24 | 25 | sender := &NotifySender{ 26 | SiteURL: siteurl, 27 | initWatch: true, 28 | endPoints: []IEndPoint{}, 29 | events: make(map[string]*Event), 30 | } 31 | 32 | if buf, err := ioutil.ReadFile("./notify/template.html"); err == nil { 33 | templateBody = string(buf) 34 | } 35 | 36 | factory := &NotifyEndPointFactory{} 37 | sender.Lock() 38 | for _, endPoint := range endPoints { 39 | switch strings.ToUpper(endPoint.Name) { 40 | case "API": 41 | apiEndPoint := factory.CreateAPIEndPoint(endPoint) 42 | sender.endPoints = append(sender.endPoints, apiEndPoint) 43 | case "SMTP": 44 | smtpEndPoint := factory.CreateSMTPEndPoint(endPoint) 45 | sender.endPoints = append(sender.endPoints, smtpEndPoint) 46 | } 47 | } 48 | sender.Unlock() 49 | 50 | go func() { 51 | time.Sleep(30 * time.Second) 52 | sender.initWatch = false 53 | }() 54 | return sender 55 | } 56 | 57 | //AddGroupEnginesWatchEvent is exported 58 | func (sender *NotifySender) AddGroupEnginesWatchEvent(description string, watchGroup *WatchGroup) { 59 | 60 | event := NewEvent(GroupEnginesWatchEvent, description, nil, watchGroup.ContactInfo, sender.SiteURL, sender.endPoints) 61 | event.data["WatchGroup"] = watchGroup 62 | sender.Lock() 63 | sender.events[event.ID] = event 64 | sender.Unlock() 65 | go sender.dispatchEvents() 66 | } 67 | 68 | //AddGroupMetaContainersEvent is exported 69 | func (sender *NotifySender) AddGroupMetaContainersEvent(description string, err error, groupMeta *GroupMeta) { 70 | 71 | event := NewEvent(GroupMetaContainersEvent, description, err, groupMeta.ContactInfo, sender.SiteURL, sender.endPoints) 72 | event.data["GroupMeta"] = groupMeta 73 | sender.Lock() 74 | sender.events[event.ID] = event 75 | sender.Unlock() 76 | go sender.dispatchEvents() 77 | } 78 | 79 | //dispatchEvents is exported 80 | //dispatch all events. 81 | func (sender *NotifySender) dispatchEvents() { 82 | 83 | sender.Lock() 84 | for { 85 | if len(sender.events) == 0 { 86 | break 87 | } 88 | if !sender.initWatch { 89 | wgroup := sync.WaitGroup{} 90 | for _, event := range sender.events { 91 | wgroup.Add(1) 92 | go func(e *Event) { 93 | e.dispatch(templateBody) 94 | wgroup.Done() 95 | }(event) 96 | } 97 | wgroup.Wait() 98 | } 99 | for _, event := range sender.events { 100 | delete(sender.events, event.ID) 101 | } 102 | } 103 | sender.Unlock() 104 | } 105 | -------------------------------------------------------------------------------- /api/server.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "github.com/humpback/humpback-center/api/middleware" 4 | import "github.com/humpback/humpback-center/ctrl" 5 | 6 | import ( 7 | "crypto/tls" 8 | "fmt" 9 | "net" 10 | "net/http" 11 | "strings" 12 | ) 13 | 14 | type Dispatcher struct { 15 | handler http.Handler 16 | } 17 | 18 | func (dispatcher *Dispatcher) SetHandler(handler http.Handler) { 19 | 20 | dispatcher.handler = handler 21 | } 22 | 23 | func (dispatcher *Dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { 24 | 25 | if dispatcher.handler == nil { 26 | httpError(w, "API Dispatcher Invalid.", http.StatusInternalServerError) 27 | return 28 | } 29 | handler := middleware.Logger(dispatcher.handler) 30 | handler.ServeHTTP(w, r) 31 | } 32 | 33 | type Server struct { 34 | hosts []string 35 | tlsConfig *tls.Config 36 | dispatcher *Dispatcher 37 | } 38 | 39 | func NewServer(hosts []string, tlsConfig *tls.Config, controller *ctrl.Controller, enablecors bool) *Server { 40 | 41 | router := NewRouter(controller, enablecors) 42 | return &Server{ 43 | hosts: hosts, 44 | tlsConfig: tlsConfig, 45 | dispatcher: &Dispatcher{ 46 | handler: router, 47 | }, 48 | } 49 | } 50 | 51 | func (server *Server) ListenHosts() []string { 52 | 53 | return server.hosts 54 | } 55 | 56 | func (server *Server) SetHandler(handler http.Handler) { 57 | 58 | server.dispatcher.SetHandler(handler) 59 | } 60 | 61 | func (server *Server) Startup() error { 62 | 63 | errorsCh := make(chan error, len(server.hosts)) 64 | for _, host := range server.hosts { 65 | protoAddrParts := strings.SplitN(host, "://", 2) 66 | if len(protoAddrParts) == 1 { 67 | protoAddrParts = append([]string{"tcp"}, protoAddrParts...) 68 | } 69 | 70 | go func() { 71 | var ( 72 | err error 73 | l net.Listener 74 | s = http.Server{ 75 | Addr: protoAddrParts[1], 76 | Handler: server.dispatcher, 77 | } 78 | ) 79 | 80 | switch protoAddrParts[0] { 81 | case "unix": 82 | l, err = newUnixListener(protoAddrParts[1], server.tlsConfig) 83 | case "tcp": 84 | l, err = newListener("tcp", protoAddrParts[1], server.tlsConfig) 85 | default: 86 | err = fmt.Errorf("API UnSupported Protocol:%q", protoAddrParts[0]) 87 | } 88 | if err != nil { 89 | errorsCh <- err 90 | } else { 91 | errorsCh <- s.Serve(l) 92 | } 93 | }() 94 | } 95 | 96 | for i := 0; i < len(server.hosts); i++ { 97 | err := <-errorsCh 98 | if err != nil { 99 | return err 100 | } 101 | } 102 | return nil 103 | } 104 | 105 | func newListener(proto string, addr string, tlsConfig *tls.Config) (net.Listener, error) { 106 | 107 | l, err := net.Listen(proto, addr) 108 | if err != nil { 109 | return nil, err 110 | } 111 | 112 | if tlsConfig != nil { 113 | tlsConfig.NextProtos = []string{"http/1.1"} 114 | l = tls.NewListener(l, tlsConfig) 115 | } 116 | return l, nil 117 | } 118 | -------------------------------------------------------------------------------- /cluster/notify.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/humpback-center/notify" 4 | 5 | //WatchEngines is exported 6 | type WatchEngines []*Engine 7 | 8 | //NewWatchEngine is exported 9 | func NewWatchEngine(ip string, name string, state EngineState) *Engine { 10 | 11 | return &Engine{ 12 | IP: ip, 13 | Name: name, 14 | state: state, 15 | } 16 | } 17 | 18 | //NotifyGroupEnginesWatchEvent is exported 19 | func (cluster *Cluster) NotifyGroupEnginesWatchEvent(description string, watchEngines WatchEngines) { 20 | 21 | watchGroups := make(notify.WatchGroups) 22 | for _, engine := range watchEngines { 23 | e := ¬ify.Engine{ 24 | IP: engine.IP, 25 | Name: engine.Name, 26 | State: stateText[engine.state], 27 | } 28 | groups := cluster.GetEngineGroups(engine) 29 | for _, group := range groups { 30 | if watchGroup, ret := watchGroups[group.ID]; !ret { 31 | watchGroup = ¬ify.WatchGroup{ 32 | GroupID: group.ID, 33 | GroupName: group.Name, 34 | Location: group.Location, 35 | ContactInfo: group.ContactInfo, 36 | Engines: []*notify.Engine{e}, 37 | } 38 | watchGroups[group.ID] = watchGroup 39 | } else { 40 | watchGroup.Engines = append(watchGroup.Engines, e) 41 | } 42 | } 43 | } 44 | for _, watchGroup := range watchGroups { 45 | cluster.NotifySender.AddGroupEnginesWatchEvent(description, watchGroup) 46 | } 47 | } 48 | 49 | //NotifyGroupMetaContainersEvent is exported 50 | func (cluster *Cluster) NotifyGroupMetaContainersEvent(description string, exception error, metaid string) { 51 | 52 | metaData, engines, err := cluster.GetMetaDataEngines(metaid) 53 | if err != nil { 54 | return 55 | } 56 | 57 | group := cluster.GetGroup(metaData.GroupID) 58 | if group == nil { 59 | return 60 | } 61 | 62 | containers := []*notify.Container{} 63 | for _, baseConfig := range metaData.BaseConfigs { 64 | for _, engine := range engines { 65 | if engine.IsHealthy() && engine.HasContainer(baseConfig.ID) { 66 | state := "Unkonw" 67 | if c := engine.Container(baseConfig.ID); c != nil { 68 | state = StateString(c.Info.State) 69 | } 70 | containers = append(containers, ¬ify.Container{ 71 | ID: ShortContainerID(baseConfig.ID), 72 | Name: baseConfig.Name, 73 | Server: engine.IP, 74 | State: state, 75 | }) 76 | } 77 | } 78 | } 79 | 80 | nEngines := []*notify.Engine{} 81 | engines = cluster.GetGroupAllEngines(metaData.GroupID) 82 | for _, engine := range engines { 83 | e := ¬ify.Engine{ 84 | IP: engine.IP, 85 | Name: engine.Name, 86 | State: stateText[engine.state], 87 | } 88 | nEngines = append(nEngines, e) 89 | } 90 | 91 | groupMeta := ¬ify.GroupMeta{ 92 | MetaID: metaData.MetaID, 93 | MetaName: metaData.Config.Name, 94 | Location: group.Location, 95 | GroupID: group.ID, 96 | GroupName: group.Name, 97 | Instances: metaData.Instances, 98 | Image: metaData.Config.Image, 99 | ContactInfo: group.ContactInfo, 100 | Engines: nEngines, 101 | Containers: containers, 102 | } 103 | cluster.NotifySender.AddGroupMetaContainersEvent(description, exception, groupMeta) 104 | } 105 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/mock/mock.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "github.com/docker/libkv/store" 5 | "github.com/stretchr/testify/mock" 6 | ) 7 | 8 | // Mock store. Mocks all Store functions using testify.Mock 9 | type Mock struct { 10 | mock.Mock 11 | 12 | // Endpoints passed to InitializeMock 13 | Endpoints []string 14 | 15 | // Options passed to InitializeMock 16 | Options *store.Config 17 | } 18 | 19 | // New creates a Mock store 20 | func New(endpoints []string, options *store.Config) (store.Store, error) { 21 | s := &Mock{} 22 | s.Endpoints = endpoints 23 | s.Options = options 24 | return s, nil 25 | } 26 | 27 | // Put mock 28 | func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { 29 | args := s.Mock.Called(key, value, opts) 30 | return args.Error(0) 31 | } 32 | 33 | // Get mock 34 | func (s *Mock) Get(key string) (*store.KVPair, error) { 35 | args := s.Mock.Called(key) 36 | return args.Get(0).(*store.KVPair), args.Error(1) 37 | } 38 | 39 | // Delete mock 40 | func (s *Mock) Delete(key string) error { 41 | args := s.Mock.Called(key) 42 | return args.Error(0) 43 | } 44 | 45 | // Exists mock 46 | func (s *Mock) Exists(key string) (bool, error) { 47 | args := s.Mock.Called(key) 48 | return args.Bool(0), args.Error(1) 49 | } 50 | 51 | // Watch mock 52 | func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { 53 | args := s.Mock.Called(key, stopCh) 54 | return args.Get(0).(<-chan *store.KVPair), args.Error(1) 55 | } 56 | 57 | // WatchTree mock 58 | func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { 59 | args := s.Mock.Called(prefix, stopCh) 60 | return args.Get(0).(chan []*store.KVPair), args.Error(1) 61 | } 62 | 63 | // NewLock mock 64 | func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { 65 | args := s.Mock.Called(key, options) 66 | return args.Get(0).(store.Locker), args.Error(1) 67 | } 68 | 69 | // List mock 70 | func (s *Mock) List(prefix string) ([]*store.KVPair, error) { 71 | args := s.Mock.Called(prefix) 72 | return args.Get(0).([]*store.KVPair), args.Error(1) 73 | } 74 | 75 | // DeleteTree mock 76 | func (s *Mock) DeleteTree(prefix string) error { 77 | args := s.Mock.Called(prefix) 78 | return args.Error(0) 79 | } 80 | 81 | // AtomicPut mock 82 | func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { 83 | args := s.Mock.Called(key, value, previous, opts) 84 | return args.Bool(0), args.Get(1).(*store.KVPair), args.Error(2) 85 | } 86 | 87 | // AtomicDelete mock 88 | func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { 89 | args := s.Mock.Called(key, previous) 90 | return args.Bool(0), args.Error(1) 91 | } 92 | 93 | // Lock mock implementation of Locker 94 | type Lock struct { 95 | mock.Mock 96 | } 97 | 98 | // Lock mock 99 | func (l *Lock) Lock(stopCh chan struct{}) (<-chan struct{}, error) { 100 | args := l.Mock.Called(stopCh) 101 | return args.Get(0).(<-chan struct{}), args.Error(1) 102 | } 103 | 104 | // Unlock mock 105 | func (l *Lock) Unlock() error { 106 | args := l.Mock.Called() 107 | return args.Error(0) 108 | } 109 | 110 | // Close mock 111 | func (s *Mock) Close() { 112 | return 113 | } 114 | -------------------------------------------------------------------------------- /api/context.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import "github.com/humpback/gounits/rand" 4 | import "github.com/humpback/humpback-center/ctrl" 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "net/http" 10 | "net/url" 11 | ) 12 | 13 | type ( 14 | store map[string]interface{} 15 | 16 | Response struct { 17 | writer http.ResponseWriter 18 | status int 19 | size int64 20 | } 21 | 22 | Context struct { 23 | context.Context 24 | ID string 25 | request *http.Request 26 | response *Response 27 | query url.Values 28 | store store 29 | Controller *ctrl.Controller 30 | } 31 | ) 32 | 33 | func NewResponse(w http.ResponseWriter) *Response { 34 | 35 | return &Response{ 36 | writer: w, 37 | } 38 | } 39 | 40 | func (r *Response) SetWriter(w http.ResponseWriter) { 41 | 42 | r.writer = w 43 | } 44 | 45 | func (r *Response) Header() http.Header { 46 | 47 | return r.writer.Header() 48 | } 49 | 50 | func (r *Response) Writer() http.ResponseWriter { 51 | 52 | return r.writer 53 | } 54 | 55 | func (r *Response) WriteHeader(code int) { 56 | 57 | r.status = code 58 | r.writer.WriteHeader(code) 59 | } 60 | 61 | func (r *Response) Write(b []byte) (int, error) { 62 | 63 | n, err := r.writer.Write(b) 64 | if err == nil { 65 | r.size += int64(n) 66 | } 67 | return n, err 68 | } 69 | 70 | func (r *Response) Flush() { 71 | 72 | r.writer.(http.Flusher).Flush() 73 | } 74 | 75 | func (r *Response) Size() int64 { 76 | 77 | return r.size 78 | } 79 | 80 | func (r *Response) Status() int { 81 | 82 | return r.status 83 | } 84 | 85 | func NewContext(w http.ResponseWriter, r *http.Request, controller *ctrl.Controller) *Context { 86 | 87 | return &Context{ 88 | ID: rand.UUID(true), 89 | request: r, 90 | response: NewResponse(w), 91 | store: make(store), 92 | Controller: controller, 93 | } 94 | } 95 | 96 | func (c *Context) Request() *http.Request { 97 | 98 | return c.request 99 | } 100 | 101 | func (c *Context) Response() *Response { 102 | 103 | return c.response 104 | } 105 | 106 | func (c *Context) Get(key string) interface{} { 107 | 108 | return c.store[key] 109 | } 110 | 111 | func (c *Context) Set(key string, v interface{}) { 112 | 113 | if c.store == nil { 114 | c.store = make(store) 115 | } 116 | c.store[key] = v 117 | } 118 | 119 | func (c *Context) WriteHeader(code int) { 120 | 121 | c.response.WriteHeader(code) 122 | } 123 | 124 | func (c *Context) Query(name string) string { 125 | 126 | if c.query == nil { 127 | c.query = c.request.URL.Query() 128 | } 129 | return c.query.Get(name) 130 | } 131 | 132 | func (c *Context) Form(name string) string { 133 | 134 | return c.request.FormValue(name) 135 | } 136 | 137 | func (c *Context) JSON(code int, v interface{}) error { 138 | 139 | data, err := json.Marshal(v) 140 | if err != nil { 141 | return err 142 | } 143 | c.response.Header().Set("Content-Type", "application/json; charset=utf-8") 144 | c.response.WriteHeader(code) 145 | if _, err := c.response.Write(data); err != nil { 146 | return err 147 | } 148 | return nil 149 | } 150 | 151 | func (c *Context) JSONP(code int, callback string, v interface{}) error { 152 | 153 | b, err := json.Marshal(v) 154 | if err != nil { 155 | return err 156 | } 157 | c.response.Header().Set("Content-Type", "application/javascript; charset=utf-8") 158 | c.response.WriteHeader(code) 159 | data := []byte(callback + "(") 160 | data = append(data, b...) 161 | data = append(data, []byte(");")...) 162 | if _, err := c.response.Write(data); err != nil { 163 | return err 164 | } 165 | return nil 166 | } 167 | -------------------------------------------------------------------------------- /cluster/storage/node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import "github.com/boltdb/bolt" 4 | import "github.com/humpback/humpback-center/cluster/types" 5 | import "github.com/humpback/humpback-center/cluster/storage/dao" 6 | import "github.com/humpback/humpback-center/cluster/storage/entry" 7 | 8 | import ( 9 | "strings" 10 | ) 11 | 12 | const ( 13 | // BucketName represents the name of the bucket where this stores data. 14 | BucketName = "nodes" 15 | ) 16 | 17 | // NodeStorage is exported 18 | type NodeStorage struct { 19 | driver *bolt.DB 20 | } 21 | 22 | // NewNodeStorage is exported 23 | func NewNodeStorage(driver *bolt.DB) (*NodeStorage, error) { 24 | 25 | err := dao.CreateBucket(driver, BucketName) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | return &NodeStorage{ 31 | driver: driver, 32 | }, nil 33 | } 34 | 35 | // NodeByIP is exported 36 | func (nodeStorage *NodeStorage) NodeByIP(ip string) (*entry.Node, error) { 37 | 38 | var node entry.Node 39 | err := dao.GetObject(nodeStorage.driver, BucketName, []byte(ip), &node) 40 | if err != nil { 41 | return nil, err 42 | } 43 | return &node, nil 44 | } 45 | 46 | // NodeByID is exported 47 | func (nodeStorage *NodeStorage) NodeByID(id string) (*entry.Node, error) { 48 | 49 | var node *entry.Node 50 | err := nodeStorage.driver.View(func(tx *bolt.Tx) error { 51 | bucket := tx.Bucket([]byte(BucketName)) 52 | cursor := bucket.Cursor() 53 | for k, v := cursor.First(); k != nil; k, v = cursor.Next() { 54 | var value entry.Node 55 | err := dao.UnmarshalObject(v, &value) 56 | if err != nil { 57 | return err 58 | } 59 | if strings.ToUpper(value.ID) == strings.ToUpper(id) { 60 | node = &value 61 | break 62 | } 63 | } 64 | if node == nil { 65 | return dao.ErrStorageObjectNotFound 66 | } 67 | return nil 68 | }) 69 | return node, err 70 | } 71 | 72 | // NodeByName is exported 73 | func (nodeStorage *NodeStorage) NodeByName(name string) (*entry.Node, error) { 74 | 75 | var node *entry.Node 76 | err := nodeStorage.driver.View(func(tx *bolt.Tx) error { 77 | bucket := tx.Bucket([]byte(BucketName)) 78 | cursor := bucket.Cursor() 79 | for k, v := cursor.First(); k != nil; k, v = cursor.Next() { 80 | var value entry.Node 81 | err := dao.UnmarshalObject(v, &value) 82 | if err != nil { 83 | return err 84 | } 85 | if strings.ToUpper(value.Name) == strings.ToUpper(name) { 86 | node = &value 87 | break 88 | } 89 | } 90 | if node == nil { 91 | return dao.ErrStorageObjectNotFound 92 | } 93 | return nil 94 | }) 95 | return node, err 96 | } 97 | 98 | // SetNodeData set a node entry. 99 | func (nodeStorage *NodeStorage) SetNodeData(nodeData *types.NodeData) error { 100 | 101 | var node *entry.Node 102 | node, _ = nodeStorage.NodeByIP(nodeData.IP) 103 | if node == nil { 104 | node = &entry.Node{ 105 | NodeLabels: map[string]string{}, 106 | Availability: "Active", 107 | } 108 | } 109 | 110 | node.NodeData = nodeData 111 | return nodeStorage.driver.Update(func(tx *bolt.Tx) error { 112 | bucket := tx.Bucket([]byte(BucketName)) 113 | data, err := dao.MarshalObject(node) 114 | if err != nil { 115 | return err 116 | } 117 | return bucket.Put([]byte(node.IP), data) 118 | }) 119 | } 120 | 121 | // SetNodeLabels set a node labels. 122 | func (nodeStorage *NodeStorage) SetNodeLabels(ip string, labels map[string]string) error { 123 | 124 | var node *entry.Node 125 | node, err := nodeStorage.NodeByIP(ip) 126 | if err != nil { 127 | return err 128 | } 129 | 130 | node.NodeLabels = labels 131 | return nodeStorage.driver.Update(func(tx *bolt.Tx) error { 132 | bucket := tx.Bucket([]byte(BucketName)) 133 | data, err := dao.MarshalObject(node) 134 | if err != nil { 135 | return err 136 | } 137 | return bucket.Put([]byte(node.IP), data) 138 | }) 139 | } 140 | 141 | // DeleteNode deletes a node entry. 142 | func (nodeStorage *NodeStorage) DeleteNode(ip string) error { 143 | 144 | return dao.DeleteObject(nodeStorage.driver, BucketName, []byte(ip)) 145 | } 146 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/boltdb/boltdb_test.go: -------------------------------------------------------------------------------- 1 | package boltdb 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | "time" 7 | 8 | "github.com/docker/libkv" 9 | "github.com/docker/libkv/store" 10 | "github.com/docker/libkv/testutils" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func makeBoltDBClient(t *testing.T) store.Store { 15 | kv, err := New([]string{"/tmp/not_exist_dir/__boltdbtest"}, &store.Config{Bucket: "boltDBTest"}) 16 | 17 | if err != nil { 18 | t.Fatalf("cannot create store: %v", err) 19 | } 20 | 21 | return kv 22 | } 23 | 24 | func TestRegister(t *testing.T) { 25 | Register() 26 | 27 | kv, err := libkv.NewStore( 28 | store.BOLTDB, 29 | []string{"/tmp/not_exist_dir/__boltdbtest"}, 30 | &store.Config{Bucket: "boltDBTest"}, 31 | ) 32 | assert.NoError(t, err) 33 | assert.NotNil(t, kv) 34 | 35 | if _, ok := kv.(*BoltDB); !ok { 36 | t.Fatal("Error registering and initializing boltDB") 37 | } 38 | 39 | _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") 40 | } 41 | 42 | // TestMultiplePersistConnection tests the second connection to a 43 | // BoltDB fails when one is already open with PersistConnection flag 44 | func TestMultiplePersistConnection(t *testing.T) { 45 | kv, err := libkv.NewStore( 46 | store.BOLTDB, 47 | []string{"/tmp/not_exist_dir/__boltdbtest"}, 48 | &store.Config{ 49 | Bucket: "boltDBTest", 50 | ConnectionTimeout: 1 * time.Second, 51 | PersistConnection: true}, 52 | ) 53 | assert.NoError(t, err) 54 | assert.NotNil(t, kv) 55 | 56 | if _, ok := kv.(*BoltDB); !ok { 57 | t.Fatal("Error registering and initializing boltDB") 58 | } 59 | 60 | // Must fail if multiple boltdb requests are made with a valid timeout 61 | kv, err = libkv.NewStore( 62 | store.BOLTDB, 63 | []string{"/tmp/not_exist_dir/__boltdbtest"}, 64 | &store.Config{ 65 | Bucket: "boltDBTest", 66 | ConnectionTimeout: 1 * time.Second, 67 | PersistConnection: true}, 68 | ) 69 | assert.Error(t, err) 70 | 71 | _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") 72 | } 73 | 74 | // TestConcurrentConnection tests simultaenous get/put using 75 | // two handles. 76 | func TestConcurrentConnection(t *testing.T) { 77 | var err error 78 | kv1, err1 := libkv.NewStore( 79 | store.BOLTDB, 80 | []string{"/tmp/__boltdbtest"}, 81 | &store.Config{ 82 | Bucket: "boltDBTest", 83 | ConnectionTimeout: 1 * time.Second}, 84 | ) 85 | assert.NoError(t, err1) 86 | assert.NotNil(t, kv1) 87 | 88 | kv2, err2 := libkv.NewStore( 89 | store.BOLTDB, 90 | []string{"/tmp/__boltdbtest"}, 91 | &store.Config{Bucket: "boltDBTest", 92 | ConnectionTimeout: 1 * time.Second}, 93 | ) 94 | assert.NoError(t, err2) 95 | assert.NotNil(t, kv2) 96 | 97 | key1 := "TestKV1" 98 | value1 := []byte("TestVal1") 99 | err = kv1.Put(key1, value1, nil) 100 | assert.NoError(t, err) 101 | 102 | key2 := "TestKV2" 103 | value2 := []byte("TestVal2") 104 | err = kv2.Put(key2, value2, nil) 105 | assert.NoError(t, err) 106 | 107 | pair1, err1 := kv1.Get(key1) 108 | assert.NoError(t, err) 109 | if assert.NotNil(t, pair1) { 110 | assert.NotNil(t, pair1.Value) 111 | } 112 | assert.Equal(t, pair1.Value, value1) 113 | 114 | pair2, err2 := kv2.Get(key2) 115 | assert.NoError(t, err) 116 | if assert.NotNil(t, pair2) { 117 | assert.NotNil(t, pair2.Value) 118 | } 119 | assert.Equal(t, pair2.Value, value2) 120 | 121 | // AtomicPut using kv1 and kv2 should succeed 122 | _, _, err = kv1.AtomicPut(key1, []byte("TestnewVal1"), pair1, nil) 123 | assert.NoError(t, err) 124 | 125 | _, _, err = kv2.AtomicPut(key2, []byte("TestnewVal2"), pair2, nil) 126 | assert.NoError(t, err) 127 | 128 | testutils.RunTestCommon(t, kv1) 129 | testutils.RunTestCommon(t, kv2) 130 | 131 | kv1.Close() 132 | kv2.Close() 133 | 134 | _ = os.Remove("/tmp/__boltdbtest") 135 | } 136 | 137 | func TestBoldDBStore(t *testing.T) { 138 | kv := makeBoltDBClient(t) 139 | 140 | testutils.RunTestCommon(t, kv) 141 | testutils.RunTestAtomic(t, kv) 142 | 143 | _ = os.Remove("/tmp/not_exist_dir/__boltdbtest") 144 | } 145 | -------------------------------------------------------------------------------- /cluster/hooks.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/common/models" 4 | import "github.com/humpback/gounits/container" 5 | import "github.com/humpback/gounits/httpx" 6 | import "github.com/humpback/gounits/logger" 7 | 8 | import ( 9 | "context" 10 | "net" 11 | "net/http" 12 | "strings" 13 | "time" 14 | ) 15 | 16 | // HookEvent is exported 17 | type HookEvent int 18 | 19 | const ( 20 | CreateMetaEvent HookEvent = iota + 1 21 | RemoveMetaEvent 22 | OperateMetaEvent 23 | UpdateMetaEvent 24 | UpgradeMetaEvent 25 | MigrateMetaEvent 26 | RecoveryMetaEvent 27 | ) 28 | 29 | func (event HookEvent) String() string { 30 | 31 | switch event { 32 | case CreateMetaEvent: 33 | return "CreateMetaEvent" 34 | case RemoveMetaEvent: 35 | return "RemoveMetaEvent" 36 | case OperateMetaEvent: 37 | return "OperateMetaEvent" 38 | case UpdateMetaEvent: 39 | return "UpdateMetaEvent" 40 | case UpgradeMetaEvent: 41 | return "UpgradeMetaEvent" 42 | case MigrateMetaEvent: 43 | return "MigrateMetaEvent" 44 | case RecoveryMetaEvent: 45 | return "RecoveryMetaEvent" 46 | } 47 | return "" 48 | } 49 | 50 | // HookContainer is exported 51 | type HookContainer struct { 52 | IP string `json:"IP"` 53 | Name string `json:"Name"` 54 | Container models.Container `json:"Container"` 55 | } 56 | 57 | // HookContainers is exported 58 | type HookContainers []*HookContainer 59 | 60 | // Hook is exported 61 | type Hook struct { 62 | Timestamp int64 `json:"Timestamp"` 63 | Event string `json:"Event"` 64 | MetaBase MetaBase `json:"MetaBase"` 65 | HookContainers 66 | client *httpx.HttpClient 67 | } 68 | 69 | // Submit is exported 70 | func (hook *Hook) Submit() { 71 | 72 | webHooks := hook.MetaBase.WebHooks 73 | for _, webHook := range webHooks { 74 | headers := map[string][]string{} 75 | secretToken := strings.TrimSpace(webHook.SecretToken) 76 | if secretToken != "" { 77 | headers["X-Humpback-Token"] = []string{secretToken} 78 | } 79 | hookURL := strings.TrimSpace(webHook.URL) 80 | respWebHook, err := hook.client.PostJSON(context.Background(), hookURL, nil, hook, headers) 81 | if err != nil { 82 | logger.ERROR("[#cluster#] webhook %s post %s to %s, http error:%s", hook.Event, hook.MetaBase.MetaID, hookURL, err.Error()) 83 | continue 84 | } 85 | if respWebHook.StatusCode() >= http.StatusBadRequest { 86 | logger.ERROR("[#cluster#] webhook %s post %s to %s, http code %d", hook.Event, hook.MetaBase.MetaID, hookURL, respWebHook.StatusCode()) 87 | } 88 | respWebHook.Close() 89 | } 90 | } 91 | 92 | // HooksProcessor is exported 93 | type HooksProcessor struct { 94 | bStart bool 95 | client *httpx.HttpClient 96 | hooksQueue *container.SyncQueue 97 | stopCh chan struct{} 98 | } 99 | 100 | // NewHooksProcessor is exported 101 | func NewHooksProcessor() *HooksProcessor { 102 | 103 | client := httpx.NewClient(). 104 | SetTransport(&http.Transport{ 105 | Proxy: http.ProxyFromEnvironment, 106 | DialContext: (&net.Dialer{ 107 | Timeout: 45 * time.Second, 108 | KeepAlive: 90 * time.Second, 109 | }).DialContext, 110 | DisableKeepAlives: false, 111 | MaxIdleConns: 50, 112 | MaxIdleConnsPerHost: 65, 113 | IdleConnTimeout: 90 * time.Second, 114 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 115 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 116 | }) 117 | 118 | return &HooksProcessor{ 119 | bStart: false, 120 | client: client, 121 | hooksQueue: container.NewSyncQueue(), 122 | } 123 | } 124 | 125 | // SubmitHook is exported 126 | func (processor *HooksProcessor) SubmitHook(metaBase MetaBase, hookContainers HookContainers, hookEvent HookEvent) { 127 | 128 | hook := &Hook{ 129 | client: processor.client, 130 | Timestamp: time.Now().UnixNano(), 131 | Event: hookEvent.String(), 132 | MetaBase: metaBase, 133 | HookContainers: hookContainers, 134 | } 135 | processor.hooksQueue.Push(hook) 136 | } 137 | 138 | func (processor *HooksProcessor) Start() { 139 | 140 | if !processor.bStart { 141 | processor.bStart = true 142 | go processor.eventPopLoop() 143 | } 144 | } 145 | 146 | func (processor *HooksProcessor) Close() { 147 | 148 | if processor.bStart { 149 | processor.hooksQueue.Close() 150 | processor.bStart = false 151 | } 152 | } 153 | 154 | func (processor *HooksProcessor) eventPopLoop() { 155 | 156 | for processor.bStart { 157 | value := processor.hooksQueue.Pop() 158 | if value != nil { 159 | hook := value.(*Hook) 160 | go hook.Submit() 161 | } 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/docs/examples.md: -------------------------------------------------------------------------------- 1 | #Examples 2 | 3 | This document contains useful example of usage for `libkv`. It might not be complete but provides with general informations on how to use the client. 4 | 5 | ##Create a store and use Put/Get/Delete 6 | 7 | ```go 8 | package main 9 | 10 | import ( 11 | "fmt" 12 | "time" 13 | "log" 14 | 15 | "github.com/docker/libkv" 16 | "github.com/docker/libkv/store" 17 | "github.com/docker/libkv/store/consul" 18 | ) 19 | 20 | func init() { 21 | // Register consul store to libkv 22 | consul.Register() 23 | 24 | // We can register as many backends that are supported by libkv 25 | etcd.Register() 26 | zookeeper.Register() 27 | boltdb.Register() 28 | } 29 | 30 | func main() { 31 | client := "localhost:8500" 32 | 33 | // Initialize a new store with consul 34 | kv, err := libkv.NewStore( 35 | store.CONSUL, // or "consul" 36 | []string{client}, 37 | &store.Config{ 38 | ConnectionTimeout: 10*time.Second, 39 | }, 40 | ) 41 | if err != nil { 42 | log.Fatal("Cannot create store consul") 43 | } 44 | 45 | key := "foo" 46 | err = kv.Put(key, []byte("bar"), nil) 47 | if err != nil { 48 | fmt.Errorf("Error trying to put value at key: %v", key) 49 | } 50 | 51 | pair, err := kv.Get(key) 52 | if err != nil { 53 | fmt.Errorf("Error trying accessing value at key: %v", key) 54 | } 55 | 56 | err = kv.Delete(key) 57 | if err != nil { 58 | fmt.Errorf("Error trying to delete key %v", key) 59 | } 60 | 61 | log.Info("value: ", string(pair.Value)) 62 | } 63 | ``` 64 | 65 | ##List keys 66 | 67 | ```go 68 | // List will list all the keys under `key` if it contains a set of child keys/values 69 | entries, err := kv.List(key) 70 | for _, pair := range entries { 71 | fmt.Printf("key=%v - value=%v", pair.Key, string(pair.Value)) 72 | } 73 | 74 | ``` 75 | 76 | ##Watching for events on a single key (Watch) 77 | 78 | You can use watches to watch modifications on a key. First you need to check if the key exists. If this is not the case, we need to create it using the `Put` function. 79 | 80 | ```go 81 | // Checking on the key before watching 82 | if !kv.Exists(key) { 83 | err := kv.Put(key, []byte("bar"), nil) 84 | if err != nil { 85 | fmt.Errorf("Something went wrong when initializing key %v", key) 86 | } 87 | } 88 | 89 | stopCh := make(<-chan struct{}) 90 | events, err := kv.Watch(key, stopCh) 91 | 92 | select { 93 | case pair := <-events: 94 | // Do something with events 95 | fmt.Printf("value changed on key %v: new value=%v", key, pair.Value) 96 | } 97 | 98 | ``` 99 | 100 | ##Watching for events happening on child keys (WatchTree) 101 | 102 | You can use watches to watch modifications on a key. First you need to check if the key exists. If this is not the case, we need to create it using the `Put` function. There is a special step here though if you want your code to work across backends. Because `etcd` is a special case and it makes the distinction between directories and keys, we need to make sure that the created key is considered as a directory by enforcing `IsDir` at `true`. 103 | 104 | ```go 105 | // Checking on the key before watching 106 | if !kv.Exists(key) { 107 | // Don't forget IsDir:true if the code is used cross-backend 108 | err := kv.Put(key, []byte("bar"), &store.WriteOptions{IsDir:true}) 109 | if err != nil { 110 | fmt.Errorf("Something went wrong when initializing key %v", key) 111 | } 112 | } 113 | 114 | stopCh := make(<-chan struct{}) 115 | events, err := kv.WatchTree(key, stopCh) 116 | 117 | select { 118 | case pairs := <-events: 119 | // Do something with events 120 | for _, pair := range pairs { 121 | fmt.Printf("value changed on key %v: new value=%v", key, pair.Value) 122 | } 123 | } 124 | 125 | ``` 126 | 127 | ## Distributed Locking, using Lock/Unlock 128 | 129 | ```go 130 | key := "lockKey" 131 | value := []byte("bar") 132 | 133 | // Initialize a distributed lock. TTL is optional, it is here to make sure that 134 | // the lock is released after the program that is holding the lock ends or crashes 135 | lock, err := kv.NewLock(key, &store.LockOptions{Value: value, TTL: 2 * time.Second}) 136 | if err != nil { 137 | fmt.Errorf("something went wrong when trying to initialize the Lock") 138 | } 139 | 140 | // Try to lock the key, the call to Lock() is blocking 141 | _, err := lock.Lock(nil) 142 | if err != nil { 143 | fmt.Errorf("something went wrong when trying to lock key %v", key) 144 | } 145 | 146 | // Get should work because we are holding the key 147 | pair, err := kv.Get(key) 148 | if err != nil { 149 | fmt.Errorf("key %v has value %v", key, pair.Value) 150 | } 151 | 152 | // Unlock the key 153 | err = lock.Unlock() 154 | if err != nil { 155 | fmt.Errorf("something went wrong when trying to unlock key %v", key) 156 | } 157 | ``` -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/docs/compatibility.md: -------------------------------------------------------------------------------- 1 | #Cross-Backend Compatibility 2 | 3 | The value of `libkv` is not to duplicate the code for programs that should support multiple distributed K/V stores like the classic `Consul`/`etcd`/`zookeeper` trio. 4 | 5 | This document provides with general guidelines for users willing to support those backends with the same code using `libkv`. 6 | 7 | Please note that most of those workarounds are going to disappear in the future with `etcd` APIv3. 8 | 9 | ##Etcd directory/key distinction 10 | 11 | `etcd` with APIv2 makes the distinction between keys and directories. The result with `libkv` is that when using the etcd driver: 12 | 13 | - You cannot store values on directories 14 | - You cannot invoke `WatchTree` (watching on child values), on a regular key 15 | 16 | This is fundamentaly different than `Consul` and `zookeeper` which are more permissive and allow the same set of operations on keys and directories (called a Node for zookeeper). 17 | 18 | Apiv3 is in the work for `etcd`, which removes this key/directory distinction, but until then you should follow these workarounds to make your `libkv` code work across backends. 19 | 20 | ###Put 21 | 22 | `etcd` cannot put values on directories, so this puts a major restriction compared to `Consul` and `zookeeper`. 23 | 24 | If you want to support all those three backends, you should make sure to only put data on **leaves**. 25 | 26 | For example: 27 | 28 | ```go 29 | _ := kv.Put("path/to/key/bis", []byte("foo"), nil) 30 | _ := kv.Put("path/to/key", []byte("bar"), nil) 31 | ``` 32 | 33 | Will work on `Consul` and `zookeeper` but fail for `etcd`. This is because the first `Put` in the case of `etcd` will recursively create the directory hierarchy and `path/to/key` is now considered as a directory. Thus, values should always be stored on leaves if the support for the three backends is planned. 34 | 35 | ###WatchTree 36 | 37 | When initializing the `WatchTree`, the natural way to do so is through the following code: 38 | 39 | ```go 40 | key := "path/to/key" 41 | if !kv.Exists(key) { 42 | err := kv.Put(key, []byte("data"), nil) 43 | } 44 | events, err := kv.WatchTree(key, nil) 45 | ``` 46 | 47 | The code above will not work across backends and etcd will fail on the `WatchTree` call. What happens exactly: 48 | 49 | - `Consul` will create a regular `key` because it has no distinction between directories and keys. This is not an issue as we can invoke `WatchTree` on regular keys. 50 | - `zookeeper` is going to create a `node` that can either be a directory or a key during the lifetime of a program but it does not matter as a directory can hold values and be watchable like a regular key. 51 | - `etcd` is going to create a regular `key`. We cannot invoke `WatchTree` on regular keys using etcd. 52 | 53 | To be cross-compatible between those three backends for `WatchTree`, we need to enforce a parameter that is only interpreted with `etcd` and which tells the client to create a `directory` instead of a key. 54 | 55 | ```go 56 | key := "path/to/key" 57 | if !kv.Exists(key) { 58 | // We enforce IsDir = true to make sure etcd creates a directory 59 | err := kv.Put(key, []byte("data"), &store.WriteOptions{IsDir:true}) 60 | } 61 | events, err := kv.WatchTree(key, nil) 62 | ``` 63 | 64 | The code above will work for the three backends but make sure to not try to store any value at that path as the call to `Put` will fail for `etcd` (you can only put at `path/to/key/foo`, `path/to/key/bar` for example). 65 | 66 | ##Etcd distributed locking 67 | 68 | There is `Lock` mechanisms baked in the `coreos/etcd/client` for now. Instead, `libkv` has its own implementation of a `Lock` on top of `etcd`. 69 | 70 | The general workflow for the `Lock` is as follows: 71 | 72 | - Call Lock concurrently on a `key` between threads/programs 73 | - Only one will create that key, others are going to fail because the key has already been created 74 | - The thread locking the key can get the right index to set the value of the key using Compare And Swap and effectively Lock and hold the key 75 | - Other threads are given a wrong index to fail the Compare and Swap and block until the key has been released by the thread holding the Lock 76 | - Lock seekers are setting up a Watch listening on that key and events happening on the key 77 | - When the thread/program stops holding the lock, it deletes the key triggering a `delete` event that will notify all the other threads. In case the program crashes, the key has a TTL attached that will send an `expire` event when this TTL expires. 78 | - Once everyone is notified, back to the first step. First come, first served with the Lock. 79 | 80 | The whole Lock process is highly dependent on the `delete`/`expire` events of `etcd`. So don't expect the key to be still there once the Lock is released. 81 | 82 | For example if the whole logic is to `Lock` a key and expect the value to still be there after it has been unlocked, it is not going to be cross-backend compatible with `Consul` and `zookeeper`. On the other end the `etcd` Lock can still be used to do Leader Election for example and still be cross-compatible with other backends. -------------------------------------------------------------------------------- /cluster/utils.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/humpback-center/cluster/storage/entry" 4 | import "github.com/humpback/humpback-center/cluster/storage/node" 5 | import "github.com/humpback/humpback-center/cluster/types" 6 | import "github.com/humpback/discovery/backends" 7 | import "github.com/humpback/gounits/json" 8 | 9 | import ( 10 | "fmt" 11 | "net" 12 | "sort" 13 | "strings" 14 | ) 15 | 16 | //ParseServer is exported 17 | func ParseServer(ipOrName string) Server { 18 | 19 | server := Server{} 20 | ip := net.ParseIP(ipOrName) 21 | if ip != nil { 22 | server.IP = ipOrName 23 | } else { 24 | server.Name = ipOrName 25 | } 26 | return server 27 | } 28 | 29 | func getImageTag(imageName string) string { 30 | 31 | imageTag := "latest" 32 | values := strings.SplitN(imageName, ":", 2) 33 | if len(values) == 2 { 34 | imageTag = values[1] 35 | } 36 | return imageTag 37 | } 38 | 39 | // searchServerOfEngines is exported 40 | func searchServerOfEngines(server Server, engines map[string]*Engine) *Engine { 41 | 42 | //priority ip 43 | if server.IP != "" { 44 | if engine, ret := engines[server.IP]; ret { 45 | return engine 46 | } 47 | } else if server.Name != "" { 48 | for _, engine := range engines { 49 | if server.Name == engine.Name { 50 | return engine 51 | } 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | func searchServerOfStorage(server Server, nodeStorage *node.NodeStorage) *Engine { 58 | 59 | var node *entry.Node 60 | if server.IP != "" { 61 | node, _ = nodeStorage.NodeByIP(server.IP) 62 | } else if server.Name != "" { 63 | node, _ = nodeStorage.NodeByName(server.Name) 64 | } 65 | 66 | if node != nil { 67 | engine := &Engine{} 68 | engine.Update(node.NodeData) 69 | engine.NodeLabels = node.NodeLabels 70 | engine.AvailabilityText = node.Availability 71 | return engine 72 | } 73 | return nil 74 | } 75 | 76 | // selectIPOrName is exported 77 | func selectIPOrName(ip string, name string) string { 78 | 79 | if ip != "" { 80 | return ip 81 | } 82 | return name 83 | } 84 | 85 | // compareAddServers is exported 86 | func compareAddServers(nodeCache *types.NodeCache, originServer Server, newServer Server) bool { 87 | 88 | nodeData1 := nodeCache.Get(selectIPOrName(originServer.IP, originServer.Name)) 89 | nodeData2 := nodeCache.Get(selectIPOrName(newServer.IP, newServer.Name)) 90 | if nodeData1 != nil && nodeData2 != nil { 91 | if nodeData1 == nodeData2 { 92 | return true 93 | } 94 | } 95 | if nodeData2 == nil { 96 | return true 97 | } 98 | return false 99 | } 100 | 101 | // compareRemoveServers is exported 102 | func compareRemoveServers(nodeCache *types.NodeCache, originServer Server, newServer Server) bool { 103 | 104 | nodeData1 := nodeCache.Get(selectIPOrName(originServer.IP, originServer.Name)) 105 | nodeData2 := nodeCache.Get(selectIPOrName(newServer.IP, newServer.Name)) 106 | if nodeData1 == nil && nodeData2 == nil { 107 | return true 108 | } 109 | if nodeData1 == nil { 110 | return true 111 | } 112 | if nodeData1 == nodeData2 { 113 | return true 114 | } 115 | return false 116 | } 117 | 118 | type rdEngines []*Engine 119 | 120 | func (engines rdEngines) Len() int { 121 | 122 | return len(engines) 123 | } 124 | 125 | func (engines rdEngines) Swap(i, j int) { 126 | 127 | engines[i], engines[j] = engines[j], engines[i] 128 | } 129 | 130 | func (engines rdEngines) Less(i, j int) bool { 131 | 132 | return engines[i].IP < engines[j].IP 133 | } 134 | 135 | // removeDuplicatesEngines is exported 136 | func removeDuplicatesEngines(engines []*Engine) []*Engine { 137 | 138 | out := []*Engine{} 139 | pEngines := rdEngines(engines) 140 | sort.Sort(pEngines) 141 | nLen := len(pEngines) 142 | for i := 0; i < nLen; i++ { 143 | if i > 0 && pEngines[i-1].IP == pEngines[i].IP { 144 | continue 145 | } 146 | out = append(out, pEngines[i]) 147 | } 148 | return out 149 | } 150 | 151 | type rdGroups []*Group 152 | 153 | func (groups rdGroups) Len() int { 154 | 155 | return len(groups) 156 | } 157 | 158 | func (groups rdGroups) Swap(i, j int) { 159 | 160 | groups[i], groups[j] = groups[j], groups[i] 161 | } 162 | 163 | func (groups rdGroups) Less(i, j int) bool { 164 | 165 | return groups[i].ID < groups[j].ID 166 | } 167 | 168 | func removeDuplicatesGroups(groups []*Group) []*Group { 169 | 170 | out := []*Group{} 171 | pGroups := rdGroups(groups) 172 | sort.Sort(pGroups) 173 | nLen := len(pGroups) 174 | for i := 0; i < nLen; i++ { 175 | if i > 0 && pGroups[i-1].ID == pGroups[i].ID { 176 | continue 177 | } 178 | out = append(out, pGroups[i]) 179 | } 180 | return out 181 | } 182 | 183 | func deCodeEntry(entry *backends.Entry) (*types.NodeData, error) { 184 | 185 | if entry == nil { 186 | return nil, fmt.Errorf("decode entry invalid") 187 | } 188 | 189 | nodeData := &types.NodeData{} 190 | err := json.DeCodeBufferToObject(entry.Data, nodeData) 191 | if err != nil { 192 | return nil, err 193 | } 194 | 195 | nodeData.Name = strings.ToUpper(nodeData.Name) 196 | return nodeData, nil 197 | } 198 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/store/store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "time" 7 | ) 8 | 9 | // Backend represents a KV Store Backend 10 | type Backend string 11 | 12 | const ( 13 | // CONSUL backend 14 | CONSUL Backend = "consul" 15 | // ETCD backend 16 | ETCD Backend = "etcd" 17 | // ZK backend 18 | ZK Backend = "zk" 19 | // BOLTDB backend 20 | BOLTDB Backend = "boltdb" 21 | ) 22 | 23 | var ( 24 | // ErrBackendNotSupported is thrown when the backend k/v store is not supported by libkv 25 | ErrBackendNotSupported = errors.New("Backend storage not supported yet, please choose one of") 26 | // ErrCallNotSupported is thrown when a method is not implemented/supported by the current backend 27 | ErrCallNotSupported = errors.New("The current call is not supported with this backend") 28 | // ErrNotReachable is thrown when the API cannot be reached for issuing common store operations 29 | ErrNotReachable = errors.New("Api not reachable") 30 | // ErrCannotLock is thrown when there is an error acquiring a lock on a key 31 | ErrCannotLock = errors.New("Error acquiring the lock") 32 | // ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store 33 | ErrKeyModified = errors.New("Unable to complete atomic operation, key modified") 34 | // ErrKeyNotFound is thrown when the key is not found in the store during a Get operation 35 | ErrKeyNotFound = errors.New("Key not found in store") 36 | // ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation 37 | ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation") 38 | // ErrKeyExists is thrown when the previous value exists in the case of an AtomicPut 39 | ErrKeyExists = errors.New("Previous K/V pair exists, cannot complete Atomic operation") 40 | ) 41 | 42 | // Config contains the options for a storage client 43 | type Config struct { 44 | ClientTLS *ClientTLSConfig 45 | TLS *tls.Config 46 | ConnectionTimeout time.Duration 47 | Bucket string 48 | PersistConnection bool 49 | Username string 50 | Password string 51 | } 52 | 53 | // ClientTLSConfig contains data for a Client TLS configuration in the form 54 | // the etcd client wants it. Eventually we'll adapt it for ZK and Consul. 55 | type ClientTLSConfig struct { 56 | CertFile string 57 | KeyFile string 58 | CACertFile string 59 | } 60 | 61 | // Store represents the backend K/V storage 62 | // Each store should support every call listed 63 | // here. Or it couldn't be implemented as a K/V 64 | // backend for libkv 65 | type Store interface { 66 | // Put a value at the specified key 67 | Put(key string, value []byte, options *WriteOptions) error 68 | 69 | // Get a value given its key 70 | Get(key string) (*KVPair, error) 71 | 72 | // Delete the value at the specified key 73 | Delete(key string) error 74 | 75 | // Verify if a Key exists in the store 76 | Exists(key string) (bool, error) 77 | 78 | // Watch for changes on a key 79 | Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) 80 | 81 | // WatchTree watches for changes on child nodes under 82 | // a given directory 83 | WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) 84 | 85 | // NewLock creates a lock for a given key. 86 | // The returned Locker is not held and must be acquired 87 | // with `.Lock`. The Value is optional. 88 | NewLock(key string, options *LockOptions) (Locker, error) 89 | 90 | // List the content of a given prefix 91 | List(directory string) ([]*KVPair, error) 92 | 93 | // DeleteTree deletes a range of keys under a given directory 94 | DeleteTree(directory string) error 95 | 96 | // Atomic CAS operation on a single value. 97 | // Pass previous = nil to create a new key. 98 | AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) 99 | 100 | // Atomic delete of a single value 101 | AtomicDelete(key string, previous *KVPair) (bool, error) 102 | 103 | // Close the store connection 104 | Close() 105 | } 106 | 107 | // KVPair represents {Key, Value, Lastindex} tuple 108 | type KVPair struct { 109 | Key string 110 | Value []byte 111 | LastIndex uint64 112 | } 113 | 114 | // WriteOptions contains optional request parameters 115 | type WriteOptions struct { 116 | IsDir bool 117 | TTL time.Duration 118 | } 119 | 120 | // LockOptions contains optional request parameters 121 | type LockOptions struct { 122 | Value []byte // Optional, value to associate with the lock 123 | TTL time.Duration // Optional, expiration ttl associated with the lock 124 | RenewLock chan struct{} // Optional, chan used to control and stop the session ttl renewal for the lock 125 | } 126 | 127 | // Locker provides locking mechanism on top of the store. 128 | // Similar to `sync.Lock` except it may return errors. 129 | type Locker interface { 130 | Lock(stopChan chan struct{}) (<-chan struct{}, error) 131 | Unlock() error 132 | } 133 | -------------------------------------------------------------------------------- /cluster/enginespool.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/gounits/logger" 4 | 5 | import ( 6 | "sync" 7 | "time" 8 | ) 9 | 10 | // EnginesPool is exported 11 | type EnginesPool struct { 12 | sync.RWMutex 13 | Cluster *Cluster 14 | poolEngines map[string]*Engine 15 | pendEngines map[string]*Engine 16 | stopCh chan struct{} 17 | } 18 | 19 | // NewEnginesPool is exported 20 | func NewEnginesPool() *EnginesPool { 21 | 22 | pool := &EnginesPool{ 23 | poolEngines: make(map[string]*Engine), 24 | pendEngines: make(map[string]*Engine), 25 | stopCh: make(chan struct{}), 26 | } 27 | go pool.doLoop() 28 | return pool 29 | } 30 | 31 | // SetCluster is exported 32 | func (pool *EnginesPool) SetCluster(cluster *Cluster) { 33 | 34 | pool.Cluster = cluster 35 | } 36 | 37 | // Release is exported 38 | func (pool *EnginesPool) Release() { 39 | 40 | close(pool.stopCh) 41 | pool.Lock() 42 | for _, engine := range pool.pendEngines { 43 | delete(pool.pendEngines, engine.IP) 44 | } 45 | for _, engine := range pool.poolEngines { 46 | delete(pool.poolEngines, engine.IP) 47 | } 48 | pool.Unlock() 49 | } 50 | 51 | // InitEngineNodeLabels is exported 52 | func (pool *EnginesPool) InitEngineNodeLabels(engine *Engine) { 53 | 54 | node, _ := pool.Cluster.storageDriver.NodeStorage.NodeByIP(engine.IP) 55 | if node != nil { 56 | engine.SetNodeLabelsPairs(node.NodeLabels) 57 | } 58 | } 59 | 60 | // AddEngine is exported 61 | func (pool *EnginesPool) AddEngine(ip string, name string) { 62 | 63 | ipOrName := selectIPOrName(ip, name) 64 | nodeData := pool.Cluster.nodeCache.Get(ipOrName) 65 | if nodeData == nil { 66 | return 67 | } 68 | 69 | pool.Cluster.storageDriver.NodeStorage.SetNodeData(nodeData) 70 | engine := pool.Cluster.GetEngine(nodeData.IP) 71 | if engine != nil { 72 | pool.InitEngineNodeLabels(engine) 73 | engine.Update(nodeData) 74 | return 75 | } 76 | 77 | if ret := pool.Cluster.InGroupsContains(nodeData.IP, nodeData.Name); !ret { 78 | return 79 | } 80 | 81 | pool.Lock() 82 | defer pool.Unlock() 83 | if pendEngine, ret := pool.pendEngines[nodeData.IP]; ret { 84 | pool.InitEngineNodeLabels(pendEngine) 85 | if pendEngine.IsHealthy() { 86 | delete(pool.pendEngines, pendEngine.IP) 87 | pendEngine.Update(nodeData) 88 | pool.Cluster.Lock() 89 | pool.Cluster.engines[pendEngine.IP] = pendEngine 90 | pool.Cluster.Unlock() 91 | logger.INFO("[#cluster#] addengine, pool engine reused %s %s %s.", pendEngine.IP, pendEngine.Name, pendEngine.State()) 92 | } else { 93 | logger.INFO("[#cluster#] addengine, pool pending engine %s %s %s is already.", pendEngine.IP, pendEngine.Name, pendEngine.State()) 94 | } 95 | return 96 | } 97 | 98 | poolEngine, ret := pool.poolEngines[nodeData.IP] 99 | if ret { 100 | poolEngine.Update(nodeData) 101 | poolEngine.SetState(StatePending) 102 | logger.INFO("[#cluster#] addengine, pool engine reused %s %s %s.", poolEngine.IP, poolEngine.Name, poolEngine.State()) 103 | } else { 104 | var err error 105 | poolEngine, err = NewEngine(nodeData, pool.Cluster.overcommitRatio, pool.Cluster.removeDelay, pool.Cluster.configCache) 106 | if err != nil { 107 | return 108 | } 109 | pool.poolEngines[poolEngine.IP] = poolEngine 110 | logger.INFO("[#cluster#] addengine, pool engine create %s %s %s.", poolEngine.IP, poolEngine.Name, poolEngine.State()) 111 | } 112 | pool.InitEngineNodeLabels(poolEngine) 113 | pool.pendEngines[poolEngine.IP] = poolEngine 114 | } 115 | 116 | // RemoveEngine is exported 117 | func (pool *EnginesPool) RemoveEngine(ip string, name string) { 118 | 119 | ipOrName := selectIPOrName(ip, name) 120 | nodeData := pool.Cluster.nodeCache.Get(ipOrName) 121 | if nodeData == nil { 122 | return 123 | } 124 | 125 | pool.Lock() 126 | if engine := pool.Cluster.GetEngine(nodeData.IP); engine != nil { 127 | pool.Cluster.Lock() 128 | delete(pool.Cluster.engines, engine.IP) 129 | pool.Cluster.Unlock() 130 | pool.pendEngines[engine.IP] = engine 131 | } 132 | pool.Unlock() 133 | } 134 | 135 | func (pool *EnginesPool) doLoop() { 136 | 137 | for { 138 | ticker := time.NewTicker(2 * time.Second) 139 | select { 140 | case <-ticker.C: 141 | { 142 | ticker.Stop() 143 | pool.Lock() 144 | wgroup := sync.WaitGroup{} 145 | for _, pendEngine := range pool.pendEngines { 146 | if pendEngine.IsPending() { 147 | wgroup.Add(1) 148 | go func(engine *Engine) { 149 | if err := engine.RefreshContainers(); err == nil { 150 | engine.Open() 151 | pool.Cluster.migtatorCache.Cancel(engine) 152 | pool.Cluster.Lock() 153 | pool.Cluster.engines[engine.IP] = engine 154 | pool.Cluster.Unlock() 155 | logger.INFO("[#cluster#] engine %s %s %s", engine.IP, engine.Name, engine.State()) 156 | } 157 | wgroup.Done() 158 | }(pendEngine) 159 | } else if pendEngine.IsHealthy() { 160 | wgroup.Add(1) 161 | go func(engine *Engine) { 162 | pool.Cluster.migtatorCache.Start(engine) 163 | engine.Close() 164 | logger.INFO("[#cluster#] engine %s %s %s", engine.IP, engine.Name, engine.State()) 165 | wgroup.Done() 166 | }(pendEngine) 167 | } 168 | } 169 | wgroup.Wait() 170 | for _, pendEngine := range pool.pendEngines { 171 | delete(pool.pendEngines, pendEngine.IP) 172 | } 173 | pool.Unlock() 174 | } 175 | case <-pool.stopCh: 176 | { 177 | ticker.Stop() 178 | return 179 | } 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /cluster/constraint.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "regexp" 7 | "strings" 8 | ) 9 | 10 | const ( 11 | eq = iota 12 | noteq 13 | 14 | // NodeLabelsPrefix is the constraint key prefix for node labels. 15 | NodeLabelsPrefix = "node.labels." 16 | // EngineLabelsPrefix is the constraint key prefix for engine labels. 17 | EngineLabelsPrefix = "engine.labels." 18 | ) 19 | 20 | var ( 21 | alphaNumeric = regexp.MustCompile(`^(?i)[a-z_][a-z0-9\-_.]+$`) 22 | // value can be alphanumeric and some special characters. it shouldn't container 23 | // current or future operators like '>, <, ~', etc. 24 | valuePattern = regexp.MustCompile(`^(?i)[a-z0-9:\-_\s\.\*\(\)\?\+\[\]\\\^\$\|\/]+$`) 25 | // operators defines list of accepted operators 26 | operators = []string{"==", "!="} 27 | ) 28 | 29 | // Constraint defines a constraint. 30 | type Constraint struct { 31 | key string 32 | operator int 33 | exp string 34 | } 35 | 36 | // ParseConstraints parses list of constraints. 37 | func ParseConstraints(constraints []string) ([]Constraint, error) { 38 | 39 | exprs := []Constraint{} 40 | for _, c := range constraints { 41 | found := false 42 | // each expr is in the form of "key op value" 43 | for i, op := range operators { 44 | if !strings.Contains(c, op) { 45 | continue 46 | } 47 | // split with the op 48 | parts := strings.SplitN(c, op, 2) 49 | 50 | if len(parts) < 2 { 51 | return nil, fmt.Errorf("invalid expr: %s", c) 52 | } 53 | 54 | part0 := strings.TrimSpace(parts[0]) 55 | // validate key 56 | matched := alphaNumeric.MatchString(part0) 57 | if matched == false { 58 | return nil, fmt.Errorf("key '%s' is invalid", part0) 59 | } 60 | 61 | part1 := strings.TrimSpace(parts[1]) 62 | 63 | // validate Value 64 | matched = valuePattern.MatchString(part1) 65 | if matched == false { 66 | return nil, fmt.Errorf("value '%s' is invalid", part1) 67 | } 68 | // TODO(dongluochen): revisit requirements to see if globing or regex are useful 69 | exprs = append(exprs, Constraint{key: part0, operator: i, exp: part1}) 70 | 71 | found = true 72 | break // found an op, move to next entry 73 | } 74 | if !found { 75 | return nil, fmt.Errorf("constraint expected one operator from %s", strings.Join(operators, ", ")) 76 | } 77 | } 78 | return exprs, nil 79 | } 80 | 81 | // Match checks if the Constraint matches the target strings. 82 | func (c *Constraint) Match(whats ...string) bool { 83 | 84 | var match bool 85 | // full string match 86 | for _, what := range whats { 87 | // case insensitive compare 88 | if strings.EqualFold(c.exp, what) { 89 | match = true 90 | break 91 | } 92 | } 93 | 94 | switch c.operator { 95 | case eq: 96 | return match 97 | case noteq: 98 | return !match 99 | } 100 | return false 101 | } 102 | 103 | // MatchConstraints returns true if the node satisfies the given constraints. 104 | func MatchConstraints(constraints []Constraint, engine *Engine) bool { 105 | 106 | for _, constraint := range constraints { 107 | switch { 108 | case strings.EqualFold(constraint.key, "node.id"): 109 | if !constraint.Match(engine.ID) { 110 | return false 111 | } 112 | case strings.EqualFold(constraint.key, "node.hostname"): 113 | // if this node doesn't have hostname 114 | // it's equivalent to match an empty hostname 115 | // where '==' would fail, '!=' matches 116 | if engine.Name == "" { 117 | if !constraint.Match("") { 118 | return false 119 | } 120 | continue 121 | } 122 | if !constraint.Match(engine.Name) { 123 | return false 124 | } 125 | case strings.EqualFold(constraint.key, "node.ip"): 126 | engineIP := net.ParseIP(engine.IP) 127 | // single IP address, node.ip == 2001:db8::2 128 | if ip := net.ParseIP(constraint.exp); ip != nil { 129 | ipEq := ip.Equal(engineIP) 130 | if (ipEq && constraint.operator != eq) || (!ipEq && constraint.operator == eq) { 131 | return false 132 | } 133 | continue 134 | } 135 | // CIDR subnet, node.ip != 210.8.4.0/24 136 | if _, subnet, err := net.ParseCIDR(constraint.exp); err == nil { 137 | within := subnet.Contains(engineIP) 138 | if (within && constraint.operator != eq) || (!within && constraint.operator == eq) { 139 | return false 140 | } 141 | continue 142 | } 143 | // reject constraint with malformed address/network 144 | return false 145 | /* 146 | case strings.EqualFold(constraint.key, "node.role"): 147 | if !constraint.Match(n.Role.String()) { 148 | return false 149 | } 150 | */ 151 | case strings.EqualFold(constraint.key, "node.platform.os"): 152 | if engine.OSType == "" { 153 | if !constraint.Match("") { 154 | return false 155 | } 156 | continue 157 | } 158 | if !constraint.Match(engine.OSType) { 159 | return false 160 | } 161 | case strings.EqualFold(constraint.key, "node.platform.arch"): 162 | if engine.Architecture == "" { 163 | if !constraint.Match("") { 164 | return false 165 | } 166 | continue 167 | } 168 | if !constraint.Match(engine.Architecture) { 169 | return false 170 | } 171 | // node labels constraint in form like 'node.labels.key==value' 172 | case len(constraint.key) > len(NodeLabelsPrefix) && strings.EqualFold(constraint.key[:len(NodeLabelsPrefix)], NodeLabelsPrefix): 173 | if engine.NodeLabels == nil { 174 | if !constraint.Match("") { 175 | return false 176 | } 177 | continue 178 | } 179 | label := constraint.key[len(NodeLabelsPrefix):] 180 | // label itself is case sensitive 181 | val := engine.NodeLabels[label] 182 | if !constraint.Match(val) { 183 | return false 184 | } 185 | // engine labels constraint in form like 'engine.labels.key!=value' 186 | case len(constraint.key) > len(EngineLabelsPrefix) && strings.EqualFold(constraint.key[:len(EngineLabelsPrefix)], EngineLabelsPrefix): 187 | if engine.EngineLabels == nil { 188 | if !constraint.Match("") { 189 | return false 190 | } 191 | continue 192 | } 193 | label := constraint.key[len(EngineLabelsPrefix):] 194 | val := engine.EngineLabels[label] 195 | if !constraint.Match(val) { 196 | return false 197 | } 198 | default: 199 | // key doesn't match predefined syntax 200 | return false 201 | } 202 | } 203 | return true 204 | } 205 | -------------------------------------------------------------------------------- /vendor/github.com/docker/libkv/README.md: -------------------------------------------------------------------------------- 1 | # libkv 2 | 3 | [](https://godoc.org/github.com/docker/libkv) 4 | [](https://travis-ci.org/docker/libkv) 5 | [](https://coveralls.io/r/docker/libkv) 6 | [](https://goreportcard.com/report/github.com/docker/libkv) 7 | 8 | `libkv` provides a `Go` native library to store metadata. 9 | 10 | The goal of `libkv` is to abstract common store operations for multiple distributed and/or local Key/Value store backends. 11 | 12 | For example, you can use it to store your metadata or for service discovery to register machines and endpoints inside your cluster. 13 | 14 | You can also easily implement a generic *Leader Election* on top of it (see the [docker/leadership](https://github.com/docker/leadership) repository). 15 | 16 | As of now, `libkv` offers support for `Consul`, `Etcd`, `Zookeeper` (**Distributed** store) and `BoltDB` (**Local** store). 17 | 18 | ## Usage 19 | 20 | `libkv` is meant to be used as an abstraction layer over existing distributed Key/Value stores. It is especially useful if you plan to support `consul`, `etcd` and `zookeeper` using the same codebase. 21 | 22 | It is ideal if you plan for something written in Go that should support: 23 | 24 | - A simple metadata storage, distributed or local 25 | - A lightweight discovery service for your nodes 26 | - A distributed lock mechanism 27 | 28 | You can find examples of usage for `libkv` under in `docs/examples.go`. Optionally you can also take a look at the `docker/swarm` or `docker/libnetwork` repositories which are using `docker/libkv` for all the use cases listed above. 29 | 30 | ## Supported versions 31 | 32 | `libkv` supports: 33 | - Consul versions >= `0.5.1` because it uses Sessions with `Delete` behavior for the use of `TTLs` (mimics zookeeper's Ephemeral node support), If you don't plan to use `TTLs`: you can use Consul version `0.4.0+`. 34 | - Etcd versions >= `2.0` because it uses the new `coreos/etcd/client`, this might change in the future as the support for `APIv3` comes along and adds more capabilities. 35 | - Zookeeper versions >= `3.4.5`. Although this might work with previous version but this remains untested as of now. 36 | - Boltdb, which shouldn't be subject to any version dependencies. 37 | 38 | ## Interface 39 | 40 | A **storage backend** in `libkv` should implement (fully or partially) this interface: 41 | 42 | ```go 43 | type Store interface { 44 | Put(key string, value []byte, options *WriteOptions) error 45 | Get(key string) (*KVPair, error) 46 | Delete(key string) error 47 | Exists(key string) (bool, error) 48 | Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) 49 | WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) 50 | NewLock(key string, options *LockOptions) (Locker, error) 51 | List(directory string) ([]*KVPair, error) 52 | DeleteTree(directory string) error 53 | AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) 54 | AtomicDelete(key string, previous *KVPair) (bool, error) 55 | Close() 56 | } 57 | ``` 58 | 59 | ## Compatibility matrix 60 | 61 | Backend drivers in `libkv` are generally divided between **local drivers** and **distributed drivers**. Distributed backends offer enhanced capabilities like `Watches` and/or distributed `Locks`. 62 | 63 | Local drivers are usually used in complement to the distributed drivers to store informations that only needs to be available locally. 64 | 65 | | Calls | Consul | Etcd | Zookeeper | BoltDB | 66 | |-----------------------|:----------:|:------:|:-----------:|:--------:| 67 | | Put | X | X | X | X | 68 | | Get | X | X | X | X | 69 | | Delete | X | X | X | X | 70 | | Exists | X | X | X | X | 71 | | Watch | X | X | X | | 72 | | WatchTree | X | X | X | | 73 | | NewLock (Lock/Unlock) | X | X | X | | 74 | | List | X | X | X | X | 75 | | DeleteTree | X | X | X | X | 76 | | AtomicPut | X | X | X | X | 77 | | Close | X | X | X | X | 78 | 79 | ## Limitations 80 | 81 | Distributed Key/Value stores often have different concepts for managing and formatting keys and their associated values. Even though `libkv` tries to abstract those stores aiming for some consistency, in some cases it can't be applied easily. 82 | 83 | Please refer to the `docs/compatibility.md` to see what are the special cases for cross-backend compatibility. 84 | 85 | Other than those special cases, you should expect the same experience for basic operations like `Get`/`Put`, etc. 86 | 87 | Calls like `WatchTree` may return different events (or number of events) depending on the backend (for now, `Etcd` and `Consul` will likely return more events than `Zookeeper` that you should triage properly). Although you should be able to use it successfully to watch on events in an interchangeable way (see the **docker/leadership** repository or the **pkg/discovery/kv** package in **docker/docker**). 88 | 89 | ## TLS 90 | 91 | Only `Consul` and `etcd` have support for TLS and you should build and provide your own `config.TLS` object to feed the client. Support is planned for `zookeeper`. 92 | 93 | ## Roadmap 94 | 95 | - Make the API nicer to use (using `options`) 96 | - Provide more options (`consistency` for example) 97 | - Improve performance (remove extras `Get`/`List` operations) 98 | - Better key formatting 99 | - New backends? 100 | 101 | ## Contributing 102 | 103 | Want to hack on libkv? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. 104 | 105 | ## Copyright and license 106 | 107 | Copyright © 2014-2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. 108 | -------------------------------------------------------------------------------- /cluster/client.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import "github.com/humpback/common/models" 4 | import "github.com/humpback/gounits/httpx" 5 | import "github.com/docker/docker/api/types" 6 | import ctypes "github.com/humpback/humpback-center/cluster/types" 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "net" 12 | "net/http" 13 | "time" 14 | ) 15 | 16 | // Client is exported 17 | type Client struct { 18 | ApiAddr string 19 | c *httpx.HttpClient 20 | } 21 | 22 | // NewClient is exported 23 | func NewClient(apiAddr string) *Client { 24 | 25 | client := httpx.NewClient(). 26 | SetTransport(&http.Transport{ 27 | Proxy: http.ProxyFromEnvironment, 28 | DialContext: (&net.Dialer{ 29 | Timeout: 45 * time.Second, 30 | KeepAlive: 90 * time.Second, 31 | }).DialContext, 32 | DisableKeepAlives: false, 33 | MaxIdleConns: 10, 34 | MaxIdleConnsPerHost: 10, 35 | IdleConnTimeout: 90 * time.Second, 36 | TLSHandshakeTimeout: http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout, 37 | ExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout, 38 | }) 39 | 40 | return &Client{ 41 | ApiAddr: apiAddr, 42 | c: client, 43 | } 44 | } 45 | 46 | // Close is exported 47 | // client close 48 | func (client *Client) Close() { 49 | 50 | client.c.Close() 51 | } 52 | 53 | // GetDockerInfoRequest is exported 54 | // get docker node info 55 | func (client *Client) GetDockerInfoRequest(ctx context.Context) (*types.Info, error) { 56 | 57 | respSpecs, err := client.c.Get(ctx, "http://"+client.ApiAddr+"/v1/dockerinfo", nil, nil) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | defer respSpecs.Close() 63 | if respSpecs.StatusCode() >= http.StatusBadRequest { 64 | return nil, fmt.Errorf("dockerinfo request, %s", ctypes.ParseHTTPResponseError(respSpecs)) 65 | } 66 | 67 | dockerInfo := &types.Info{} 68 | if err := respSpecs.JSON(dockerInfo); err != nil { 69 | return nil, err 70 | } 71 | return dockerInfo, nil 72 | } 73 | 74 | // GetContainerRequest is exported 75 | // get a container type info. 76 | func (client *Client) GetContainerRequest(ctx context.Context, containerid string) (*types.ContainerJSON, error) { 77 | 78 | query := map[string][]string{"originaldata": []string{"true"}} 79 | respContainer, err := client.c.Get(ctx, "http://"+client.ApiAddr+"/v1/containers/"+containerid, query, nil) 80 | if err != nil { 81 | return nil, err 82 | } 83 | 84 | defer respContainer.Close() 85 | if respContainer.StatusCode() >= http.StatusBadRequest { 86 | return nil, fmt.Errorf("container %s request, %s", ShortContainerID(containerid), ctypes.ParseHTTPResponseError(respContainer)) 87 | } 88 | 89 | containerJSON := &types.ContainerJSON{} 90 | if err := respContainer.JSON(containerJSON); err != nil { 91 | return nil, err 92 | } 93 | return containerJSON, nil 94 | } 95 | 96 | // GetContainersRequest is exported 97 | // return all containers info. 98 | func (client *Client) GetContainersRequest(ctx context.Context) ([]types.Container, error) { 99 | 100 | query := map[string][]string{"all": []string{"true"}} 101 | respContainers, err := client.c.Get(ctx, "http://"+client.ApiAddr+"/v1/containers", query, nil) 102 | if err != nil { 103 | return nil, err 104 | } 105 | 106 | defer respContainers.Close() 107 | if respContainers.StatusCode() >= http.StatusBadRequest { 108 | return nil, fmt.Errorf("containers request, %s", ctypes.ParseHTTPResponseError(respContainers)) 109 | } 110 | 111 | allContainers := []types.Container{} 112 | if err := respContainers.JSON(&allContainers); err != nil { 113 | return nil, err 114 | } 115 | return allContainers, nil 116 | } 117 | 118 | // CreateContainerRequest is exported 119 | // create a container request. 120 | func (client *Client) CreateContainerRequest(ctx context.Context, config models.Container) (*ctypes.CreateContainerResponse, error) { 121 | 122 | respCreated, err := client.c.PostJSON(ctx, "http://"+client.ApiAddr+"/v1/containers", nil, config, nil) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | defer respCreated.Close() 128 | if respCreated.StatusCode() >= http.StatusBadRequest { 129 | return nil, fmt.Errorf("create container %s request, %s", config.Name, ctypes.ParseHTTPResponseError(respCreated)) 130 | } 131 | 132 | createContainerResponse := &ctypes.CreateContainerResponse{} 133 | if err := respCreated.JSON(createContainerResponse); err != nil { 134 | return nil, err 135 | } 136 | return createContainerResponse, nil 137 | } 138 | 139 | // RemoveContainerRequest is exported 140 | // remove a container request. 141 | func (client *Client) RemoveContainerRequest(ctx context.Context, containerid string) error { 142 | 143 | query := map[string][]string{"force": []string{"true"}} 144 | respRemoved, err := client.c.Delete(ctx, "http://"+client.ApiAddr+"/v1/containers/"+containerid, query, nil) 145 | if err != nil { 146 | return err 147 | } 148 | 149 | defer respRemoved.Close() 150 | if respRemoved.StatusCode() >= http.StatusBadRequest { 151 | return fmt.Errorf("remove container %s request, %s", ShortContainerID(containerid), ctypes.ParseHTTPResponseError(respRemoved)) 152 | } 153 | return nil 154 | } 155 | 156 | // OperateContainerRequest is exported 157 | // operate a container request. 158 | func (client *Client) OperateContainerRequest(ctx context.Context, operate models.ContainerOperate) error { 159 | 160 | respOperated, err := client.c.PutJSON(ctx, "http://"+client.ApiAddr+"/v1/containers", nil, operate, nil) 161 | if err != nil { 162 | return err 163 | } 164 | 165 | defer respOperated.Close() 166 | if respOperated.StatusCode() >= http.StatusBadRequest { 167 | return fmt.Errorf("%s container %s request, %s", operate.Action, ShortContainerID(operate.Container), ctypes.ParseHTTPResponseError(respOperated)) 168 | } 169 | return nil 170 | } 171 | 172 | // UpgradeContainerRequest is exported 173 | // upgrade a container request. 174 | func (client *Client) UpgradeContainerRequest(ctx context.Context, operate models.ContainerOperate) (*ctypes.UpgradeContainerResponse, error) { 175 | 176 | respUpgraded, err := client.c.PutJSON(ctx, "http://"+client.ApiAddr+"/v1/containers", nil, operate, nil) 177 | if err != nil { 178 | return nil, err 179 | } 180 | 181 | defer respUpgraded.Close() 182 | if respUpgraded.StatusCode() >= http.StatusBadRequest { 183 | return nil, fmt.Errorf("upgrate container %s request, %s", ShortContainerID(operate.Container), ctypes.ParseHTTPResponseError(respUpgraded)) 184 | } 185 | 186 | upgradeContainerResponse := &ctypes.UpgradeContainerResponse{} 187 | if err := respUpgraded.JSON(upgradeContainerResponse); err != nil { 188 | return nil, err 189 | } 190 | return upgradeContainerResponse, nil 191 | } 192 | -------------------------------------------------------------------------------- /etc/lookupenv.go: -------------------------------------------------------------------------------- 1 | package etc 2 | 3 | import "github.com/humpback/gounits/convert" 4 | 5 | import ( 6 | "errors" 7 | "fmt" 8 | "net" 9 | "net/url" 10 | "os" 11 | "path/filepath" 12 | "strconv" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | var ( 18 | ERRConfigurationParseEnv = errors.New("configuration parseEnv error") 19 | ) 20 | 21 | // ParseEnv is exported 22 | func (conf *Configuration) ParseEnv() error { 23 | 24 | pidFile := os.Getenv("HUMPBACK_PIDFILE") 25 | if pidFile != "" { 26 | conf.PIDFile = pidFile 27 | } 28 | 29 | retryStartup := os.Getenv("HUMPBACK_RETRYSTARTUP") 30 | if retryStartup != "" { 31 | value, err := strconv.ParseBool(retryStartup) 32 | if err != nil { 33 | return fmt.Errorf("HUMPBACK_RETRYSTARTUP invalid, %s", err.Error()) 34 | } 35 | conf.RetryStartup = value 36 | } 37 | 38 | siteAPI := os.Getenv("HUMPBACK_SITEAPI") 39 | if siteAPI != "" { 40 | if _, err := url.Parse(siteAPI); err != nil { 41 | return fmt.Errorf("%s, HUMPBACK_SITEAPI %s", ERRConfigurationParseEnv.Error(), err.Error()) 42 | } 43 | conf.SiteAPI = siteAPI 44 | } 45 | 46 | if err := parseClusterEnv(conf); err != nil { 47 | return err 48 | } 49 | 50 | if err := parseAPIEnv(conf); err != nil { 51 | return err 52 | } 53 | 54 | if err := parseLogEnv(conf); err != nil { 55 | return err 56 | } 57 | 58 | return nil 59 | } 60 | 61 | func parseClusterEnv(conf *Configuration) error { 62 | 63 | driverOpts := convert.ConvertKVStringSliceToMap(conf.Cluster.DriverOpts) 64 | clusterLocation := os.Getenv("CENTER_CLUSTER_LOCATION") 65 | if clusterLocation != "" { 66 | driverOpts["location"] = clusterLocation 67 | } 68 | 69 | dataPath := os.Getenv("CENTER_CLUSTER_DATAPATH") 70 | if dataPath != "" { 71 | driverOpts["datapath"] = dataPath 72 | } 73 | 74 | cacheRoot := os.Getenv("CENTER_CLUSTER_CACHEROOT") 75 | if cacheRoot != "" { 76 | if _, err := filepath.Abs(cacheRoot); err != nil { 77 | return fmt.Errorf("%s, CENTER_CLUSTER_CACHEROOT %s", ERRConfigurationParseEnv.Error(), err.Error()) 78 | } 79 | driverOpts["cacheroot"] = cacheRoot 80 | } 81 | 82 | overCommit := os.Getenv("CENTER_CLUSTER_OVERCOMMIT") 83 | if overCommit != "" { 84 | if _, err := strconv.ParseFloat(overCommit, 2); err != nil { 85 | return fmt.Errorf("%s, CENTER_CLUSTER_OVERCOMMIT %s", ERRConfigurationParseEnv.Error(), err.Error()) 86 | } 87 | driverOpts["overcommit"] = overCommit 88 | } 89 | 90 | recoveryInterval := os.Getenv("CENTER_CLUSTER_RECOVERYINTERVAL") 91 | if recoveryInterval != "" { 92 | if _, err := time.ParseDuration(recoveryInterval); err != nil { 93 | return fmt.Errorf("%s, CENTER_CLUSTER_RECOVERYINTERVAL %s", ERRConfigurationParseEnv.Error(), err.Error()) 94 | } 95 | driverOpts["recoveryinterval"] = recoveryInterval 96 | } 97 | 98 | createRetry := os.Getenv("CENTER_CLUSTER_CREATERETRY") 99 | if createRetry != "" { 100 | if _, err := strconv.Atoi(createRetry); err != nil { 101 | return fmt.Errorf("%s, CENTER_CLUSTER_CREATERETRY %s", ERRConfigurationParseEnv.Error(), err.Error()) 102 | } 103 | driverOpts["createretry"] = createRetry 104 | } 105 | 106 | removeDelay := os.Getenv("CENTER_CLUSTER_REMOVEDELAY") 107 | if removeDelay != "" { 108 | if _, err := time.ParseDuration(removeDelay); err != nil { 109 | return fmt.Errorf("%s, CENTER_CLUSTER_REMOVEDELAY %s", ERRConfigurationParseEnv.Error(), err.Error()) 110 | } 111 | driverOpts["removedelay"] = removeDelay 112 | } 113 | 114 | migrateDelay := os.Getenv("CENTER_CLUSTER_MIGRATEDELAY") 115 | if migrateDelay != "" { 116 | if _, err := time.ParseDuration(migrateDelay); err != nil { 117 | return fmt.Errorf("%s, CENTER_CLUSTER_MIGRATEDELAY %s", ERRConfigurationParseEnv.Error(), err.Error()) 118 | } 119 | driverOpts["migratedelay"] = migrateDelay 120 | } 121 | conf.Cluster.DriverOpts = convert.ConvertMapToKVStringSlice(driverOpts) 122 | 123 | clusterURIs := os.Getenv("DOCKER_CLUSTER_URIS") 124 | if clusterURIs != "" { 125 | conf.Cluster.Discovery.URIs = clusterURIs 126 | } 127 | 128 | clusterName := os.Getenv("DOCKER_CLUSTER_NAME") 129 | if clusterName != "" { 130 | conf.Cluster.Discovery.Cluster = clusterName 131 | } 132 | 133 | clusterHeartBeat := os.Getenv("DOCKER_CLUSTER_HEARTBEAT") 134 | if clusterHeartBeat != "" { 135 | if _, err := time.ParseDuration(clusterHeartBeat); err != nil { 136 | return fmt.Errorf("%s, DOCKER_CLUSTER_HEARTBEAT %s", ERRConfigurationParseEnv.Error(), err.Error()) 137 | } 138 | conf.Cluster.Discovery.Heartbeat = clusterHeartBeat 139 | } 140 | return nil 141 | } 142 | 143 | func parseAPIEnv(conf *Configuration) error { 144 | 145 | listenPort := os.Getenv("CENTER_LISTEN_PORT") 146 | if listenPort != "" { 147 | var ( 148 | bindIPAddr string 149 | bindPort string 150 | ) 151 | bindArray := strings.SplitN(listenPort, ":", 2) 152 | if len(bindArray) == 1 { 153 | bindPort = bindArray[0] 154 | } else { 155 | bindIPAddr := bindArray[0] 156 | bindPort = bindArray[1] 157 | if len(bindIPAddr) > 0 { 158 | if _, err := net.ResolveIPAddr("tcp", bindIPAddr); err != nil { 159 | return fmt.Errorf("%s, CENTER_LISTEN_PORT host ipaddr error, %s", ERRConfigurationParseEnv.Error(), err.Error()) 160 | } 161 | } 162 | } 163 | nPort, err := strconv.Atoi(bindPort) 164 | if err != nil { 165 | return fmt.Errorf("%s, CENTER_LISTEN_PORT %s", ERRConfigurationParseEnv.Error(), err.Error()) 166 | } 167 | if nPort <= 0 || nPort > 65535 { 168 | return fmt.Errorf("%s, CENTER_LISTEN_PORT range invalid", ERRConfigurationParseEnv.Error()) 169 | } 170 | conf.API.Hosts = []string{bindIPAddr + ":" + bindPort} 171 | } 172 | 173 | enableCore := os.Getenv("CENTER_API_ENABLECORS") 174 | if enableCore != "" { 175 | ret, err := strconv.ParseBool(enableCore) 176 | if err != nil { 177 | return fmt.Errorf("%s, CENTER_API_ENABLECORS %s", ERRConfigurationParseEnv.Error(), err.Error()) 178 | } 179 | conf.API.EnableCors = ret 180 | } 181 | return nil 182 | } 183 | 184 | func parseLogEnv(conf *Configuration) error { 185 | 186 | logFile := os.Getenv("CENTER_LOG_FILE") 187 | if logFile != "" { 188 | if _, err := filepath.Abs(logFile); err != nil { 189 | return fmt.Errorf("%s, CENTER_LOG_FILE %s", ERRConfigurationParseEnv.Error(), err.Error()) 190 | } 191 | conf.Logger.LogFile = logFile 192 | } 193 | 194 | logLevel := os.Getenv("CENTER_LOG_LEVEL") 195 | if logLevel != "" { 196 | conf.Logger.LogLevel = logLevel 197 | } 198 | 199 | logSize := os.Getenv("CENTER_LOG_SIZE") 200 | if logSize != "" { 201 | lSize, err := strconv.Atoi(logSize) 202 | if err != nil { 203 | return fmt.Errorf("%s, CENTER_LOG_SIZE %s", ERRConfigurationParseEnv.Error(), err.Error()) 204 | } 205 | conf.Logger.LogSize = (int64)(lSize) 206 | } 207 | return nil 208 | } 209 | -------------------------------------------------------------------------------- /notify/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 | 6 ||
35 |
36 |
37 | Humpback
38 | Notify
39 |
40 |
41 | |
42 | |
| 45 | | |
| 48 | | |
| ID | 51 |{{.ID}} | 52 |
| Event | 55 |{{.Event}} | 56 |
| Description | 59 |{{.Description}} | 60 |
| Exception | 64 |{{.Exception}} | 65 |
| Timestamp | 69 |{{.Timestamp}} | 70 |
| Datetime | 73 |{{.Datetime}} | 74 |
| MetaID | 78 |{{.GroupMeta.MetaID}} | 79 |
| MetaName | 82 |{{.GroupMeta.MetaName}} | 83 |
| Image | 86 |{{.GroupMeta.Image}} | 87 |
| GroupID | 90 |{{.GroupMeta.GroupID}} | 91 |
| GroupName | 94 |{{.GroupMeta.GroupName}} | 95 |
| Location | 99 |{{.GroupMeta.Location}} | 100 |
| Instances | 104 |{{.GroupMeta.Instances}} | 105 |
| Engines | 108 |
109 |
110 | {{range .GroupMeta.Engines}}
111 | {{if eq .State "Healthy"}}
112 | {{.IP}} {{.Name}} {{.State}}
113 | {{else}}
114 | {{.IP}} {{.Name}} {{.State}}
115 | {{end}}
116 | {{end}}
117 | |
118 |
| Containers | 121 |
122 |
123 | {{if gt (.GroupMeta.Containers|len) 0}}
124 | {{range .GroupMeta.Containers}}
125 | {{.ID}} {{.Name}}
126 | {{if eq .State "Running" }}
127 | -> {{.Server}} {{.State}}
128 | {{else}}
129 | -> {{.Server}} {{.State}}
130 | {{end}}
131 | 132 | {{end}} 133 | {{else}} 134 | This meta no valid containers, Please wait recovery, until you start the agnet service.135 | {{end}} 136 | |
137 |
| GroupID | 142 |{{.WatchGroup.GroupID}} | 143 |
| GroupName | 146 |{{.WatchGroup.GroupName}} | 147 |
| Location | 151 |{{.WatchGroup.Location}} | 152 |
| Engines | 156 |
157 |
158 | {{range .WatchGroup.Engines}}
159 | {{if eq .State "Healthy"}}
160 | {{.IP}} {{.Name}} {{.State}}
161 | {{else}}
162 | {{.IP}} {{.Name}} {{.State}}
163 | {{end}}
164 | {{end}}
165 |
166 | |
167 |
| 171 | | |