├── proto
├── Makefile
├── katalog-sync.proto
└── katalog-sync.pb.go
├── pkg
└── daemon
│ ├── .gitignore
│ ├── testfiles
│ ├── sidecar
│ │ ├── bad_sidecar_name
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ │ ├── not_ready
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ │ ├── sidecar_not_ready
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ │ └── working
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ ├── basic
│ │ ├── terminating
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ │ ├── working2
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ │ └── working
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ ├── basic_blacklist
│ │ ├── working
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ │ └── failing
│ │ │ ├── baseline.json
│ │ │ └── input.json
│ └── readinessGate
│ │ ├── not_ready
│ │ ├── baseline.json
│ │ └── input.json
│ │ └── working
│ │ ├── baseline.json
│ │ └── input.json
│ ├── interface.go
│ ├── util_test.go
│ ├── util.go
│ ├── k8s.go
│ ├── struct_test.go
│ ├── struct.go
│ └── daemon.go
├── .gitignore
├── .dockerignore
├── .travis.yml
├── Makefile
├── .github
└── workflows
│ └── build.yml
├── Dockerfile
├── LICENSE
├── config
├── example.yaml
├── daemonset.yaml
└── example_sidecar.yaml
├── go.mod
├── cmd
├── katalog-sync-daemon
│ └── main.go
└── katalog-sync-sidecar
│ └── main.go
├── static
└── katalog-sync-diagram.svg
└── README.md
/proto/Makefile:
--------------------------------------------------------------------------------
1 | default:
2 | @protoc --gogofaster_out=plugins=grpc:. *.proto
3 |
--------------------------------------------------------------------------------
/pkg/daemon/.gitignore:
--------------------------------------------------------------------------------
1 | # Exclude all integrationtest result files
2 | testfiles/*/**/result.json
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | cmd/katalog-sync-daemon/katalog-sync-daemon
2 | cmd/katalog-sync-sidecar/katalog-sync-sidecar
3 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | cmd/katalog-sync-daemon/katalog-sync-daemon
2 | cmd/katalog-sync-sidecar/katalog-sync-sidecar
3 | config/*
4 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/bad_sidecar_name/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": true,
3 | "service_names": null,
4 | "service_ids": {},
5 | "tags": {},
6 | "ports": {},
7 | "ready": {},
8 | "service_meta": {}
9 | }
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | # use container infrastructure
4 | sudo: false
5 |
6 | # cache go build cache and pkg mod
7 | cache:
8 | directories:
9 | - $HOME/.cache/go-build
10 | - $HOME/gopath/pkg/mod
11 |
12 | go:
13 | - "1.12.x"
14 |
15 | install:
16 | - go get golang.org/x/tools/cmd/goimports
17 |
18 | script:
19 | - make fmt && git diff --exit-code
20 | - make imports && git diff --exit-code
21 | - make test
22 |
--------------------------------------------------------------------------------
/proto/katalog-sync.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package katalogsync;
4 |
5 |
6 | service KatalogSync {
7 | rpc Register(RegisterQuery) returns (RegisterResult);
8 | rpc Deregister(DeregisterQuery) returns (DeregisterResult);
9 | }
10 |
11 | message RegisterQuery {
12 | string Namespace = 1;
13 | string PodName = 2;
14 | string ContainerName = 3;
15 | }
16 |
17 | message RegisterResult {
18 |
19 | }
20 |
21 | message DeregisterQuery {
22 | string Namespace = 1;
23 | string PodName = 2;
24 | string ContainerName = 3;
25 | }
26 |
27 | message DeregisterResult {
28 |
29 | }
30 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | BUILD := build
2 | GO ?= go
3 | GOFILES := $(shell find . -name "*.go" -type f ! -path "./vendor/*")
4 | GOFMT ?= gofmt
5 | GOIMPORTS ?= goimports -local=github.com/wish/katalog-sync
6 |
7 | .PHONY: clean
8 | clean:
9 | $(GO) clean -i ./...
10 | rm -rf $(BUILD)
11 |
12 | .PHONY: fmt
13 | fmt:
14 | $(GOFMT) -w -s $(GOFILES)
15 |
16 | .PHONY: imports
17 | imports:
18 | $(GOIMPORTS) -w $(GOFILES)
19 |
20 | .PHONY: test
21 | test:
22 | $(GO) test -v ./...
23 |
24 | .PHONY: docker
25 | docker:
26 | DOCKER_BUILDKIT=1 docker build .
27 |
28 | testlocal-build:
29 | DOCKER_BUILDKIT=1 docker build -t quay.io/wish/katalog-sync:latest .
30 | kind load docker-image quay.io/wish/katalog-sync:latest
31 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic/terminating/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-7df6995f69-96wth",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-7df6995f69-96wth"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 8080
24 | },
25 | "ready": {
26 | "hw-service-name": null,
27 | "servicename2": null
28 | },
29 | "service_meta": {
30 | "hw-service-name": null,
31 | "servicename2": null
32 | }
33 | }
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/not_ready/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-6f596c7944-5q5t7",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-6f596c7944-5q5t7"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 12345
24 | },
25 | "ready": {
26 | "hw-service-name": null,
27 | "servicename2": null
28 | },
29 | "service_meta": {
30 | "hw-service-name": null,
31 | "servicename2": null
32 | }
33 | }
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/sidecar_not_ready/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-6f596c7944-5q5t7",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-6f596c7944-5q5t7"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 12345
24 | },
25 | "ready": {
26 | "hw-service-name": null,
27 | "servicename2": null
28 | },
29 | "service_meta": {
30 | "hw-service-name": null,
31 | "servicename2": null
32 | }
33 | }
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic/working2/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-7df6995f69-96wth",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-7df6995f69-96wth"
10 | },
11 | "tags": {
12 | "hw-service-name": null,
13 | "servicename2": [
14 | "b",
15 | "c"
16 | ]
17 | },
18 | "ports": {
19 | "hw-service-name": 8080,
20 | "servicename2": 8080
21 | },
22 | "ready": {
23 | "hw-service-name": {
24 | "hw": true
25 | },
26 | "servicename2": {
27 | "hw": true
28 | }
29 | },
30 | "service_meta": {
31 | "hw-service-name": null,
32 | "servicename2": null
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic/working/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-7df6995f69-96wth",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-7df6995f69-96wth"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 8080
24 | },
25 | "ready": {
26 | "hw-service-name": {
27 | "hw": true
28 | },
29 | "servicename2": {
30 | "hw": true
31 | }
32 | },
33 | "service_meta": {
34 | "hw-service-name": null,
35 | "servicename2": null
36 | }
37 | }
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic_blacklist/working/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-7df6995f69-96wth",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-7df6995f69-96wth"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 8080
24 | },
25 | "ready": {
26 | "hw-service-name": {
27 | "hw": true
28 | },
29 | "servicename2": {
30 | "hw": true
31 | }
32 | },
33 | "service_meta": {
34 | "hw-service-name": null,
35 | "servicename2": null
36 | }
37 | }
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic_blacklist/failing/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-7df6995f69-96wth",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-7df6995f69-96wth"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 8080
24 | },
25 | "ready": {
26 | "hw-service-name": {
27 | "hw": false
28 | },
29 | "servicename2": {
30 | "hw": false
31 | }
32 | },
33 | "service_meta": {
34 | "hw-service-name": null,
35 | "servicename2": null
36 | }
37 | }
--------------------------------------------------------------------------------
/pkg/daemon/interface.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | consulApi "github.com/hashicorp/consul/api"
5 | k8sApi "k8s.io/api/core/v1"
6 | )
7 |
8 | // Kubelet encapsulates the interface for kubelet interaction
9 | type Kubelet interface {
10 | GetPodList() (*k8sApi.PodList, error)
11 | }
12 |
13 | // ConsulCatalog encapsulates the interface for interacting with the Catalog API
14 | type ConsulCatalog interface {
15 | Services() (map[string]*consulApi.AgentService, error)
16 | }
17 |
18 | // ConsulAgent encapsulates the interface for interacting with the local agent
19 | // and service API
20 | type ConsulAgent interface {
21 | UpdateTTL(checkID, output, status string) error
22 | Services() (map[string]*consulApi.AgentService, error)
23 | ServiceDeregister(serviceID string) error
24 | ServiceRegister(service *consulApi.AgentServiceRegistration) error
25 | }
26 |
--------------------------------------------------------------------------------
/pkg/daemon/util_test.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | "reflect"
5 | "strconv"
6 | "testing"
7 | )
8 |
9 | func TestParseMap(t *testing.T) {
10 | tests := []struct {
11 | in string
12 | out map[string]string
13 | }{
14 | // Basic well formed
15 | {
16 | in: "a:1,b:2",
17 | out: map[string]string{"a": "1", "b": "2"},
18 | },
19 |
20 | // With some spaces
21 | {
22 | in: "a:1, b:2",
23 | out: map[string]string{"a": "1", "b": "2"},
24 | },
25 |
26 | // With some invalid mappings
27 | {
28 | in: "a:1,b",
29 | out: map[string]string{"a": "1"},
30 | },
31 | }
32 |
33 | for i, test := range tests {
34 | t.Run(strconv.Itoa(i), func(t *testing.T) {
35 | m := ParseMap(test.in)
36 | if !reflect.DeepEqual(m, test.out) {
37 | t.Fatalf("Mismatch expected=%v acutal=%v", test.out, m)
38 | }
39 | })
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/readinessGate/not_ready/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-6f596c7944-5q5t7",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-6f596c7944-5q5t7"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 12345
24 | },
25 | "ready": {
26 | "hw-service-name": {
27 | "hw": false
28 | },
29 | "servicename2": {
30 | "hw": false
31 | }
32 | },
33 | "service_meta": {
34 | "hw-service-name": null,
35 | "servicename2": null
36 | },
37 | "outstandingReadinessGate": true
38 | }
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/working/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-6f596c7944-5q5t7",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-6f596c7944-5q5t7"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 12345
24 | },
25 | "ready": {
26 | "hw-service-name": {
27 | "hw": true
28 | },
29 | "servicename2": {
30 | "hw": true
31 | }
32 | },
33 | "service_meta": {
34 | "hw-service-name": {
35 | "a": "1",
36 | "b": "2"
37 | },
38 | "servicename2": {
39 | "b": "1",
40 | "c": "2"
41 | }
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: build
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | tags:
8 | - v*
9 |
10 | jobs:
11 | build:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v2
15 | - uses: azure/docker-login@v1
16 | with:
17 | login-server: quay.io
18 | username: '${{ secrets.QUAYIO_USERNAME }}'
19 | password: '${{ secrets.QUAYIO_PASSWORD }}'
20 | - name: Set up Docker Buildx
21 | id: buildx
22 | uses: crazy-max/ghaction-docker-buildx@v1
23 | with:
24 | buildx-version: latest
25 | qemu-version: latest
26 | - name: Available platforms
27 | run: 'echo ${{ steps.buildx.outputs.platforms }}'
28 | - name: Run Buildx
29 | run: |
30 | docker buildx build \
31 | --platform linux/amd64,linux/arm64 \
32 | --push -t quay.io/wish/katalog-sync:${GITHUB_REF##*/} .
33 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/readinessGate/working/baseline.json:
--------------------------------------------------------------------------------
1 | {
2 | "error": false,
3 | "service_names": [
4 | "hw-service-name",
5 | "servicename2"
6 | ],
7 | "service_ids": {
8 | "hw-service-name": "katalog-sync_hw-service-name_hw_hw-6f596c7944-5q5t7",
9 | "servicename2": "katalog-sync_servicename2_hw_hw-6f596c7944-5q5t7"
10 | },
11 | "tags": {
12 | "hw-service-name": [
13 | "a",
14 | "b"
15 | ],
16 | "servicename2": [
17 | "b",
18 | "c"
19 | ]
20 | },
21 | "ports": {
22 | "hw-service-name": 8080,
23 | "servicename2": 12345
24 | },
25 | "ready": {
26 | "hw-service-name": {
27 | "hw": true
28 | },
29 | "servicename2": {
30 | "hw": true
31 | }
32 | },
33 | "service_meta": {
34 | "hw-service-name": {
35 | "a": "1",
36 | "b": "2"
37 | },
38 | "servicename2": {
39 | "b": "1",
40 | "c": "2"
41 | }
42 | },
43 | "outstandingReadinessGate": true
44 | }
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM --platform=$BUILDPLATFORM golang:alpine as builder
2 |
3 | ARG BUILDPLATFORM
4 | ARG TARGETARCH
5 | ARG TARGETOS
6 | ENV GOARCH=${TARGETARCH} GOOS=${TARGETOS}
7 |
8 | WORKDIR /go/src/github.com/wish/katalog-sync
9 |
10 | # Cache dependencies
11 | COPY go.mod .
12 | COPY go.sum .
13 | RUN go mod download
14 |
15 | COPY . /go/src/github.com/wish/katalog-sync
16 | RUN cd /go/src/github.com/wish/katalog-sync/cmd/katalog-sync-daemon && CGO_ENABLED=0 go build
17 | RUN cd /go/src/github.com/wish/katalog-sync/cmd/katalog-sync-sidecar && CGO_ENABLED=0 go build
18 |
19 | FROM golang:alpine
20 |
21 | COPY --from=builder /go/src/github.com/wish/katalog-sync/cmd/katalog-sync-daemon/katalog-sync-daemon /bin/katalog-sync-daemon
22 | COPY --from=builder /go/src/github.com/wish/katalog-sync/cmd/katalog-sync-sidecar/katalog-sync-sidecar /bin/katalog-sync-sidecar
23 |
24 | ARG USER=katalog-sync
25 | ENV HOME /home/$USER
26 | RUN addgroup -S $USER && adduser -S -G $USER -u 12345 $USER
27 | USER $USER
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 ContextLogic Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/config/example.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hw
6 | namespace: hw
7 | labels:
8 | app: hw
9 | spec:
10 | replicas: 2
11 | selector:
12 | matchLabels:
13 | app: hw
14 | template:
15 | metadata:
16 | labels:
17 | app: hw
18 | annotations:
19 | katalog-sync.wish.com/service-names: hw-service-name,servicename2
20 | katalog-sync.wish.com/service-port: '8080'
21 | katalog-sync.wish.com/service-tags: a,b
22 | katalog-sync.wish.com/service-tags-servicename2: b,c
23 | katalog-sync.wish.com/sync-interval: 2s
24 | spec:
25 | terminationGracePeriodSeconds: 1
26 | containers:
27 | - name: hw
28 | image: smcquay/hw:v0.1.5
29 | imagePullPolicy: Always
30 | ports:
31 | - containerPort: 8080
32 | livenessProbe:
33 | httpGet:
34 | path: "/live"
35 | port: 8080
36 | initialDelaySeconds: 5
37 | periodSeconds: 5
38 | readinessProbe:
39 | httpGet:
40 | path: "/ready"
41 | port: 8080
42 | periodSeconds: 5
43 |
44 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/wish/katalog-sync
2 |
3 | go 1.16
4 |
5 | require (
6 | github.com/armon/go-metrics v0.3.10 // indirect
7 | github.com/gogo/protobuf v1.3.2
8 | github.com/google/go-cmp v0.5.6 // indirect
9 | github.com/google/gofuzz v1.2.0 // indirect
10 | github.com/hashicorp/consul/api v1.11.0
11 | github.com/hashicorp/go-hclog v0.14.1 // indirect
12 | github.com/hashicorp/go-immutable-radix v1.3.0 // indirect
13 | github.com/hashicorp/go-msgpack v1.1.5 // indirect
14 | github.com/hashicorp/go-multierror v1.1.1 // indirect
15 | github.com/hashicorp/go-sockaddr v1.0.2 // indirect
16 | github.com/hashicorp/go-uuid v1.0.2 // indirect
17 | github.com/hashicorp/golang-lru v0.5.4 // indirect
18 | github.com/hashicorp/serf v0.10.0 // indirect
19 | github.com/jessevdk/go-flags v1.4.0
20 | github.com/kr/pretty v0.2.1 // indirect
21 | github.com/mitchellh/go-testing-interface v1.14.1 // indirect
22 | github.com/mitchellh/mapstructure v1.4.1-0.20210112042008-8ebf2d61a8b4 // indirect
23 | github.com/pkg/errors v0.9.1
24 | github.com/prometheus/client_golang v1.11.1
25 | github.com/sergi/go-diff v1.0.0
26 | github.com/sirupsen/logrus v1.6.0
27 | github.com/stretchr/testify v1.7.0 // indirect
28 | golang.org/x/net v0.7.0
29 | google.golang.org/grpc v1.27.1
30 | k8s.io/api v0.20.4
31 | k8s.io/apimachinery v0.20.4
32 | k8s.io/client-go v0.20.4
33 | )
34 |
--------------------------------------------------------------------------------
/pkg/daemon/util.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | "encoding/json"
5 | "strings"
6 |
7 | corev1 "k8s.io/api/core/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "k8s.io/apimachinery/pkg/util/strategicpatch"
10 | )
11 |
12 | func ParseMap(s string) map[string]string {
13 | pairs := strings.Split(s, ",")
14 | m := make(map[string]string, len(pairs))
15 | for _, pair := range pairs {
16 | split := strings.Split(pair, ":")
17 | if len(split) == 2 {
18 | m[strings.TrimSpace(split[0])] = strings.TrimSpace(split[1])
19 | }
20 | }
21 | return m
22 | }
23 |
24 | func buildPodConditionPatch(pod *corev1.Pod, condition corev1.PodCondition) ([]byte, error) {
25 | oldData, err := json.Marshal(corev1.Pod{
26 | Status: corev1.PodStatus{
27 | Conditions: nil,
28 | },
29 | })
30 | if err != nil {
31 | return nil, err
32 | }
33 | newData, err := json.Marshal(corev1.Pod{
34 | ObjectMeta: metav1.ObjectMeta{UID: pod.UID}, // only put the uid in the new object to ensure it appears in the patch as a precondition
35 | Status: corev1.PodStatus{
36 | Conditions: []corev1.PodCondition{condition},
37 | },
38 | })
39 | if err != nil {
40 | return nil, err
41 | }
42 | patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, corev1.Pod{})
43 | if err != nil {
44 | return nil, err
45 | }
46 | return patchBytes, nil
47 | }
48 |
--------------------------------------------------------------------------------
/config/daemonset.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: katalog-sync
6 | namespace: kube-system
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRoleBinding
10 | metadata:
11 | name: katalog-sync
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: katalog-sync
16 | subjects:
17 | - kind: ServiceAccount
18 | name: katalog-sync
19 | namespace: kube-system
20 | ---
21 | apiVersion: rbac.authorization.k8s.io/v1
22 | kind: ClusterRole
23 | metadata:
24 | name: katalog-sync
25 | rules:
26 | - apiGroups:
27 | - ''
28 | resources:
29 | - nodes/proxy
30 | - nodes/metrics
31 | verbs:
32 | - get
33 | - apiGroups:
34 | - ''
35 | resources:
36 | - pods/status
37 | verbs:
38 | - patch
39 | - apiGroups:
40 | - ''
41 | resources:
42 | - pods
43 | verbs:
44 | - list
45 | - get
46 | ---
47 | apiVersion: apps/v1
48 | kind: DaemonSet
49 | metadata:
50 | labels:
51 | k8s-app: katalog-sync
52 | name: katalog-sync
53 | namespace: kube-system
54 | spec:
55 | selector:
56 | matchLabels:
57 | k8s-app: katalog-sync
58 | template:
59 | metadata:
60 | annotations:
61 | scheduler.alpha.kubernetes.io/critical-pod: ''
62 | labels:
63 | k8s-app: katalog-sync
64 | spec:
65 | serviceAccount: katalog-sync
66 | containers:
67 | - command:
68 | - "/bin/katalog-sync-daemon"
69 | args:
70 | - "--bind-address=:8501"
71 | image: quay.io/wish/katalog-sync:latest
72 | imagePullPolicy: IfNotPresent
73 | name: katalog-sync-daemon
74 | hostNetwork: true
75 | terminationGracePeriodSeconds: 5
76 | tolerations:
77 | - key: CriticalAddonsOnly
78 | operator: Exists
79 | updateStrategy:
80 | type: RollingUpdate
81 |
82 |
--------------------------------------------------------------------------------
/config/example_sidecar.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hw
6 | namespace: hw
7 | labels:
8 | app: hw
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app: hw
14 | template:
15 | metadata:
16 | labels:
17 | app: hw
18 | annotations:
19 | katalog-sync.wish.com/sidecar: katalog-sync-sidecar
20 | katalog-sync.wish.com/service-names: hw-service-name,servicename2
21 | katalog-sync.wish.com/service-port: '8080'
22 | katalog-sync.wish.com/service-port-servicename2: '12345'
23 | katalog-sync.wish.com/service-meta: 'a:1,b:2'
24 | katalog-sync.wish.com/service-meta-servicename2: 'b:1,c:2'
25 | katalog-sync.wish.com/service-tags: a,b
26 | katalog-sync.wish.com/sync-interval: 2s
27 | spec:
28 | terminationGracePeriodSeconds: 1
29 | containers:
30 | - name: hw
31 | image: smcquay/hw:v0.1.5
32 | imagePullPolicy: Always
33 | ports:
34 | - containerPort: 8080
35 | livenessProbe:
36 | httpGet:
37 | path: "/live"
38 | port: 8080
39 | initialDelaySeconds: 5
40 | periodSeconds: 5
41 | readinessProbe:
42 | httpGet:
43 | path: "/ready"
44 | port: 8080
45 | periodSeconds: 5
46 | - command:
47 | - "/bin/katalog-sync-sidecar"
48 | args:
49 | - "--katalog-sync-daemon=$(HOST_IP):8501"
50 | - "--namespace=$(MY_POD_NAMESPACE)"
51 | - "--pod-name=$(MY_POD_NAME)"
52 | - "--container-name=katalog-sync-sidecar"
53 | - "--bind-address=:8888"
54 | env:
55 | - name: HOST_IP
56 | valueFrom:
57 | fieldRef:
58 | fieldPath: status.hostIP
59 | - name: MY_POD_NAMESPACE
60 | valueFrom:
61 | fieldRef:
62 | fieldPath: metadata.namespace
63 | - name: MY_POD_NAME
64 | valueFrom:
65 | fieldRef:
66 | fieldPath: metadata.name
67 | image: quay.io/wish/katalog-sync:latest
68 | imagePullPolicy: Always
69 | name: katalog-sync-sidecar
70 | readinessProbe:
71 | httpGet:
72 | path: "/ready"
73 | port: 8888
74 | initialDelaySeconds: 1
75 | periodSeconds: 5
76 |
77 |
--------------------------------------------------------------------------------
/pkg/daemon/k8s.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | "crypto/tls"
5 | "encoding/json"
6 | "io/ioutil"
7 | "net/http"
8 |
9 | k8sApi "k8s.io/api/core/v1"
10 | "k8s.io/client-go/rest"
11 | )
12 |
13 | // KubeletClientConfig holds the config options for connecting to the kubelet API
14 | type KubeletClientConfig struct {
15 | APIEndpoint string `long:"kubelet-api" env:"KUBELET_API" description:"kubelet API endpoint" default:"http://localhost:10255/pods"`
16 | InsecureSkipVerify bool `long:"kubelet-api-insecure-skip-verify" env:"KUBELET_API_INSECURE_SKIP_VERIFY" description:"skip verification of TLS certificate from kubelet API"`
17 | }
18 |
19 | // NewKubeletClient returns a new KubeletClient based on the given config
20 | func NewKubeletClient(c KubeletClientConfig) (*KubeletClient, error) {
21 | // creates the in-cluster config
22 | config, err := rest.InClusterConfig()
23 | if err != nil {
24 | if err == rest.ErrNotInCluster {
25 | if c.InsecureSkipVerify {
26 | tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}
27 | return &KubeletClient{c: c, client: &http.Client{Transport: tr}}, nil
28 | }
29 |
30 | return &KubeletClient{c: c, client: http.DefaultClient}, nil
31 | }
32 | return nil, err
33 | }
34 | if c.InsecureSkipVerify {
35 | config.TLSClientConfig.Insecure = true
36 | config.TLSClientConfig.CAData = nil
37 | config.TLSClientConfig.CAFile = ""
38 | }
39 | transport, err := rest.TransportFor(config)
40 | if err != nil {
41 | return nil, err
42 | }
43 |
44 | return &KubeletClient{c: c, client: &http.Client{Transport: transport}}, nil
45 | }
46 |
47 | // KubeletClient is an HTTP client for kubelet that implements the Kubelet interface
48 | type KubeletClient struct {
49 | c KubeletClientConfig
50 | client *http.Client
51 | }
52 |
53 | // GetPodList returns the list of pods the kubelet is managing
54 | func (k *KubeletClient) GetPodList() (*k8sApi.PodList, error) {
55 | // k8s testing
56 | req, err := http.NewRequest("GET", k.c.APIEndpoint, nil)
57 | if err != nil {
58 | return nil, err
59 | }
60 |
61 | resp, err := k.client.Do(req)
62 | if err != nil {
63 | return nil, err
64 | }
65 | defer resp.Body.Close()
66 |
67 | var podList k8sApi.PodList
68 | b, err := ioutil.ReadAll(resp.Body)
69 | if err != nil {
70 | return nil, err
71 | }
72 | if err := json.Unmarshal(b, &podList); err != nil {
73 | return nil, err
74 | }
75 | return &podList, nil
76 | }
77 |
--------------------------------------------------------------------------------
/cmd/katalog-sync-daemon/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "net"
5 | "net/http"
6 | _ "net/http/pprof"
7 | "os"
8 |
9 | consulApi "github.com/hashicorp/consul/api"
10 | flags "github.com/jessevdk/go-flags"
11 | "github.com/prometheus/client_golang/prometheus/promhttp"
12 | "github.com/sirupsen/logrus"
13 | grpc "google.golang.org/grpc"
14 |
15 | "github.com/wish/katalog-sync/pkg/daemon"
16 | katalogsync "github.com/wish/katalog-sync/proto"
17 | )
18 |
19 | // TODO: consul flags
20 | var opts struct {
21 | LogLevel string `long:"log-level" env:"LOG_LEVEL" description:"Log level" default:"info"`
22 | BindAddr string `long:"bind-address" env:"BIND_ADDRESS" description:"address for binding RPC interface for sidecar"`
23 | MetricsBindAddr string `long:"metrics-bind-address" env:"METRICS_BIND_ADDRESS" description:"address for binding metrics interface"`
24 | PProfBindAddr string `long:"pprof-bind-address" env:"PPROF_BIND_ADDRESS" description:"address for binding pprof"`
25 | daemon.DaemonConfig
26 | daemon.KubeletClientConfig
27 | }
28 |
29 | func main() {
30 | parser := flags.NewParser(&opts, flags.Default)
31 | if _, err := parser.Parse(); err != nil {
32 | // If the error was from the parser, then we can simply return
33 | // as Parse() prints the error already
34 | if _, ok := err.(*flags.Error); ok {
35 | os.Exit(1)
36 | }
37 | logrus.Fatalf("Error parsing flags: %v", err)
38 | }
39 |
40 | if opts.PProfBindAddr != "" {
41 | l, err := net.Listen("tcp", opts.PProfBindAddr)
42 | if err != nil {
43 | logrus.Fatalf("Error binding: %v", err)
44 | }
45 |
46 | go func() {
47 | http.Serve(l, http.DefaultServeMux)
48 | }()
49 | }
50 |
51 | if opts.MetricsBindAddr != "" {
52 | l, err := net.Listen("tcp", opts.MetricsBindAddr)
53 | if err != nil {
54 | logrus.Fatalf("Error binding: %v", err)
55 | }
56 |
57 | mux := http.NewServeMux()
58 | mux.Handle("/metrics", promhttp.Handler())
59 |
60 | go func() {
61 | http.Serve(l, mux)
62 | }()
63 | }
64 |
65 | // Use log level
66 | level, err := logrus.ParseLevel(opts.LogLevel)
67 | if err != nil {
68 | logrus.Fatalf("Unknown log level %s: %v", opts.LogLevel, err)
69 | }
70 | logrus.SetLevel(level)
71 |
72 | // Set the log format to have a reasonable timestamp
73 | formatter := &logrus.TextFormatter{
74 | FullTimestamp: true,
75 | }
76 | logrus.SetFormatter(formatter)
77 |
78 | kubeletClient, err := daemon.NewKubeletClient(opts.KubeletClientConfig)
79 | if err != nil {
80 | logrus.Fatalf("Unable to create kubelet client: %v", err)
81 | }
82 |
83 | // Consul testing
84 | consulCfg := consulApi.DefaultConfig()
85 | client, err := consulApi.NewClient(consulCfg)
86 | if err != nil {
87 | panic(err)
88 | }
89 |
90 | d := daemon.NewDaemon(opts.DaemonConfig, kubeletClient, client)
91 |
92 | if opts.BindAddr != "" {
93 | s := grpc.NewServer()
94 | katalogsync.RegisterKatalogSyncServer(s, d)
95 | l, err := net.Listen("tcp", opts.BindAddr)
96 | if err != nil {
97 | logrus.Fatalf("failed to listen: %v", err)
98 | }
99 | go func() {
100 | logrus.Errorf("error serving: %v", s.Serve(l))
101 | }()
102 | }
103 |
104 | // TODO: change to background, and wait on signals to die
105 | d.Run()
106 | }
107 |
--------------------------------------------------------------------------------
/pkg/daemon/struct_test.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "io/ioutil"
7 | "os"
8 | "path"
9 | "path/filepath"
10 | "testing"
11 |
12 | "github.com/sergi/go-diff/diffmatchpatch"
13 | k8sApi "k8s.io/api/core/v1"
14 | )
15 |
16 | var podTestDir = "testfiles"
17 |
18 | type podTestResult struct {
19 | Err bool `json:"error"`
20 | ServiceNames []string `json:"service_names"`
21 | ServiceIDs map[string]string `json:"service_ids"`
22 | Tags map[string][]string `json:"tags"`
23 | Ports map[string]int `json:"ports"`
24 | Ready map[string]map[string]bool `json:"ready"`
25 | ServiceMeta map[string]map[string]string `json:"service_meta"`
26 | OutstandingReadinessGate bool `json:"outstandingReadinessGate,omitempty"`
27 | }
28 |
29 | func TestPod(t *testing.T) {
30 | // Find all tests
31 | files, err := ioutil.ReadDir(podTestDir)
32 | if err != nil {
33 | t.Fatalf("error loading tests: %v", err)
34 | }
35 |
36 | for _, file := range files {
37 | if !file.IsDir() {
38 | continue
39 | }
40 | // TODO: subtest stuff
41 | t.Run(file.Name(), func(t *testing.T) {
42 | runPodIntegrationTest(t, file.Name())
43 | })
44 | }
45 | }
46 |
47 | func runPodIntegrationTest(t *testing.T, testDir string) {
48 | filepath.Walk(path.Join(podTestDir, testDir), func(fpath string, info os.FileInfo, err error) error {
49 | // If its not a directory, skip it
50 | if !info.IsDir() {
51 | return nil
52 | }
53 |
54 | k8sPod := k8sApi.Pod{}
55 | b, err := ioutil.ReadFile(path.Join(fpath, "input.json"))
56 | if err != nil {
57 | if os.IsNotExist(err) {
58 | return nil
59 | }
60 | t.Fatalf("Unable to read input: %v", err)
61 | }
62 |
63 | if err := json.Unmarshal(b, &k8sPod); err != nil {
64 | t.Fatalf("unable to unmarshal input to pod: %v", err)
65 | }
66 |
67 | relFilePath, err := filepath.Rel(podTestDir, fpath)
68 | if err != nil {
69 | t.Fatalf("Error getting relative path? Shouldn't be possible: %v", err)
70 | }
71 |
72 | t.Run(relFilePath, func(t *testing.T) {
73 | result := &podTestResult{
74 | ServiceIDs: make(map[string]string),
75 | Tags: make(map[string][]string),
76 | Ports: make(map[string]int),
77 | Ready: make(map[string]map[string]bool),
78 | ServiceMeta: make(map[string]map[string]string),
79 | }
80 |
81 | pod, err := NewPod(k8sPod, &DaemonConfig{})
82 | result.Err = err != nil
83 | if err == nil {
84 | result.ServiceNames = pod.GetServiceNames()
85 |
86 | for _, name := range result.ServiceNames {
87 | result.ServiceIDs[name] = pod.GetServiceID(name)
88 | result.Tags[name] = pod.GetTags(name)
89 | result.Ports[name] = pod.GetPort(name)
90 | _, result.Ready[name] = pod.Ready()
91 | result.ServiceMeta[name] = pod.GetServiceMeta(name)
92 | }
93 | }
94 |
95 | // handle
96 | if pod != nil {
97 | pod.HandleReadinessGate()
98 | result.OutstandingReadinessGate = pod.OutstandingReadinessGate
99 | }
100 |
101 | b, err := json.MarshalIndent(result, "", " ")
102 | if err != nil {
103 | panic(err)
104 | }
105 | ioutil.WriteFile(path.Join(fpath, "result.json"), b, 0644)
106 |
107 | baselineResultBytes, err := ioutil.ReadFile(path.Join(fpath, "baseline.json"))
108 | if err != nil {
109 | t.Skip("No baseline.json found, skipping comparison")
110 | } else {
111 | baselineResultBytes = bytes.TrimSpace(baselineResultBytes)
112 | resultBytes := bytes.TrimSpace(b)
113 | if !bytes.Equal(baselineResultBytes, resultBytes) {
114 | dmp := diffmatchpatch.New()
115 | diffs := dmp.DiffMain(string(baselineResultBytes), string(resultBytes), false)
116 | t.Fatalf("Mismatch of results and baseline!\n%s", dmp.DiffPrettyText(diffs))
117 | }
118 | }
119 | })
120 | return nil
121 | })
122 | }
123 |
--------------------------------------------------------------------------------
/static/katalog-sync-diagram.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic/working2/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-7df6995f69-96wth",
4 | "generateName": "hw-7df6995f69-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-7df6995f69-96wth",
7 | "uid": "4a6f4de2-2e58-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "7123",
9 | "creationTimestamp": "2019-02-11T23:53:55Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "7df6995f69"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
18 | "katalog-sync.wish.com/sync-interval": "2s",
19 | "kubernetes.io/config.seen": "2019-02-11T15:53:55.238848124-08:00",
20 | "kubernetes.io/config.source": "api"
21 | },
22 | "ownerReferences": [{
23 | "apiVersion": "apps/v1",
24 | "kind": "ReplicaSet",
25 | "name": "hw-7df6995f69",
26 | "uid": "4a6df5fd-2e58-11e9-8f72-54e1ad14ee37",
27 | "controller": true,
28 | "blockOwnerDeletion": true
29 | }]
30 | },
31 | "spec": {
32 | "volumes": [{
33 | "name": "default-token-zwnc6",
34 | "secret": {
35 | "secretName": "default-token-zwnc6",
36 | "defaultMode": 420
37 | }
38 | }],
39 | "containers": [{
40 | "name": "hw",
41 | "image": "smcquay/hw:v0.1.5",
42 | "ports": [{
43 | "containerPort": 8080,
44 | "protocol": "TCP"
45 | }],
46 | "resources": {},
47 | "volumeMounts": [{
48 | "name": "default-token-zwnc6",
49 | "readOnly": true,
50 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
51 | }],
52 | "livenessProbe": {
53 | "httpGet": {
54 | "path": "/live",
55 | "port": 8080,
56 | "scheme": "HTTP"
57 | },
58 | "initialDelaySeconds": 5,
59 | "timeoutSeconds": 1,
60 | "periodSeconds": 5,
61 | "successThreshold": 1,
62 | "failureThreshold": 3
63 | },
64 | "readinessProbe": {
65 | "httpGet": {
66 | "path": "/ready",
67 | "port": 8080,
68 | "scheme": "HTTP"
69 | },
70 | "timeoutSeconds": 1,
71 | "periodSeconds": 5,
72 | "successThreshold": 1,
73 | "failureThreshold": 3
74 | },
75 | "terminationMessagePath": "/dev/termination-log",
76 | "terminationMessagePolicy": "File",
77 | "imagePullPolicy": "Always"
78 | }],
79 | "restartPolicy": "Always",
80 | "terminationGracePeriodSeconds": 1,
81 | "dnsPolicy": "ClusterFirst",
82 | "serviceAccountName": "default",
83 | "serviceAccount": "default",
84 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
85 | "securityContext": {},
86 | "schedulerName": "default-scheduler",
87 | "tolerations": [{
88 | "key": "node.kubernetes.io/not-ready",
89 | "operator": "Exists",
90 | "effect": "NoExecute",
91 | "tolerationSeconds": 300
92 | },
93 | {
94 | "key": "node.kubernetes.io/unreachable",
95 | "operator": "Exists",
96 | "effect": "NoExecute",
97 | "tolerationSeconds": 300
98 | }
99 | ],
100 | "priority": 0,
101 | "enableServiceLinks": true
102 | },
103 | "status": {
104 | "phase": "Running",
105 | "conditions": [{
106 | "type": "Initialized",
107 | "status": "True",
108 | "lastProbeTime": null,
109 | "lastTransitionTime": "2019-02-11T23:53:55Z"
110 | },
111 | {
112 | "type": "Ready",
113 | "status": "True",
114 | "lastProbeTime": null,
115 | "lastTransitionTime": "2019-02-11T23:53:59Z"
116 | },
117 | {
118 | "type": "ContainersReady",
119 | "status": "True",
120 | "lastProbeTime": null,
121 | "lastTransitionTime": "2019-02-11T23:53:59Z"
122 | },
123 | {
124 | "type": "PodScheduled",
125 | "status": "True",
126 | "lastProbeTime": null,
127 | "lastTransitionTime": "2019-02-11T23:53:55Z"
128 | }
129 | ],
130 | "hostIP": "10.10.204.182",
131 | "podIP": "10.1.1.140",
132 | "startTime": "2019-02-11T23:53:55Z",
133 | "containerStatuses": [{
134 | "name": "hw",
135 | "state": {
136 | "running": {
137 | "startedAt": "2019-02-11T23:53:58Z"
138 | }
139 | },
140 | "lastState": {},
141 | "ready": true,
142 | "restartCount": 0,
143 | "image": "smcquay/hw:v0.1.5",
144 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
145 | "containerID": "docker://e22d6e7128d6783579a5d55caf06df33d4a18447d59e61a12f8a95d43375a582"
146 | }],
147 | "qosClass": "BestEffort"
148 | }
149 | }
150 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic/working/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-7df6995f69-96wth",
4 | "generateName": "hw-7df6995f69-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-7df6995f69-96wth",
7 | "uid": "4a6f4de2-2e58-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "7123",
9 | "creationTimestamp": "2019-02-11T23:53:55Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "7df6995f69"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-tags": "a,b",
18 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
19 | "katalog-sync.wish.com/sync-interval": "2s",
20 | "kubernetes.io/config.seen": "2019-02-11T15:53:55.238848124-08:00",
21 | "kubernetes.io/config.source": "api"
22 | },
23 | "ownerReferences": [{
24 | "apiVersion": "apps/v1",
25 | "kind": "ReplicaSet",
26 | "name": "hw-7df6995f69",
27 | "uid": "4a6df5fd-2e58-11e9-8f72-54e1ad14ee37",
28 | "controller": true,
29 | "blockOwnerDeletion": true
30 | }]
31 | },
32 | "spec": {
33 | "volumes": [{
34 | "name": "default-token-zwnc6",
35 | "secret": {
36 | "secretName": "default-token-zwnc6",
37 | "defaultMode": 420
38 | }
39 | }],
40 | "containers": [{
41 | "name": "hw",
42 | "image": "smcquay/hw:v0.1.5",
43 | "ports": [{
44 | "containerPort": 8080,
45 | "protocol": "TCP"
46 | }],
47 | "resources": {},
48 | "volumeMounts": [{
49 | "name": "default-token-zwnc6",
50 | "readOnly": true,
51 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
52 | }],
53 | "livenessProbe": {
54 | "httpGet": {
55 | "path": "/live",
56 | "port": 8080,
57 | "scheme": "HTTP"
58 | },
59 | "initialDelaySeconds": 5,
60 | "timeoutSeconds": 1,
61 | "periodSeconds": 5,
62 | "successThreshold": 1,
63 | "failureThreshold": 3
64 | },
65 | "readinessProbe": {
66 | "httpGet": {
67 | "path": "/ready",
68 | "port": 8080,
69 | "scheme": "HTTP"
70 | },
71 | "timeoutSeconds": 1,
72 | "periodSeconds": 5,
73 | "successThreshold": 1,
74 | "failureThreshold": 3
75 | },
76 | "terminationMessagePath": "/dev/termination-log",
77 | "terminationMessagePolicy": "File",
78 | "imagePullPolicy": "Always"
79 | }],
80 | "restartPolicy": "Always",
81 | "terminationGracePeriodSeconds": 1,
82 | "dnsPolicy": "ClusterFirst",
83 | "serviceAccountName": "default",
84 | "serviceAccount": "default",
85 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
86 | "securityContext": {},
87 | "schedulerName": "default-scheduler",
88 | "tolerations": [{
89 | "key": "node.kubernetes.io/not-ready",
90 | "operator": "Exists",
91 | "effect": "NoExecute",
92 | "tolerationSeconds": 300
93 | },
94 | {
95 | "key": "node.kubernetes.io/unreachable",
96 | "operator": "Exists",
97 | "effect": "NoExecute",
98 | "tolerationSeconds": 300
99 | }
100 | ],
101 | "priority": 0,
102 | "enableServiceLinks": true
103 | },
104 | "status": {
105 | "phase": "Running",
106 | "conditions": [{
107 | "type": "Initialized",
108 | "status": "True",
109 | "lastProbeTime": null,
110 | "lastTransitionTime": "2019-02-11T23:53:55Z"
111 | },
112 | {
113 | "type": "Ready",
114 | "status": "True",
115 | "lastProbeTime": null,
116 | "lastTransitionTime": "2019-02-11T23:53:59Z"
117 | },
118 | {
119 | "type": "ContainersReady",
120 | "status": "True",
121 | "lastProbeTime": null,
122 | "lastTransitionTime": "2019-02-11T23:53:59Z"
123 | },
124 | {
125 | "type": "PodScheduled",
126 | "status": "True",
127 | "lastProbeTime": null,
128 | "lastTransitionTime": "2019-02-11T23:53:55Z"
129 | }
130 | ],
131 | "hostIP": "10.10.204.182",
132 | "podIP": "10.1.1.140",
133 | "startTime": "2019-02-11T23:53:55Z",
134 | "containerStatuses": [{
135 | "name": "hw",
136 | "state": {
137 | "running": {
138 | "startedAt": "2019-02-11T23:53:58Z"
139 | }
140 | },
141 | "lastState": {},
142 | "ready": true,
143 | "restartCount": 0,
144 | "image": "smcquay/hw:v0.1.5",
145 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
146 | "containerID": "docker://e22d6e7128d6783579a5d55caf06df33d4a18447d59e61a12f8a95d43375a582"
147 | }],
148 | "qosClass": "BestEffort"
149 | }
150 | }
151 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic/terminating/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-7df6995f69-96wth",
4 | "generateName": "hw-7df6995f69-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-7df6995f69-96wth",
7 | "uid": "4a6f4de2-2e58-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "7123",
9 | "creationTimestamp": "2019-02-11T23:53:55Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "7df6995f69"
13 | },
14 | "deletionTimestamp": "2021-01-11T15:53:55.238848124-08:00",
15 | "annotations": {
16 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
17 | "katalog-sync.wish.com/service-port": "8080",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
20 | "katalog-sync.wish.com/sync-interval": "2s",
21 | "kubernetes.io/config.seen": "2019-02-11T15:53:55.238848124-08:00",
22 | "kubernetes.io/config.source": "api"
23 | },
24 | "ownerReferences": [{
25 | "apiVersion": "apps/v1",
26 | "kind": "ReplicaSet",
27 | "name": "hw-7df6995f69",
28 | "uid": "4a6df5fd-2e58-11e9-8f72-54e1ad14ee37",
29 | "controller": true,
30 | "blockOwnerDeletion": true
31 | }]
32 | },
33 | "spec": {
34 | "volumes": [{
35 | "name": "default-token-zwnc6",
36 | "secret": {
37 | "secretName": "default-token-zwnc6",
38 | "defaultMode": 420
39 | }
40 | }],
41 | "containers": [{
42 | "name": "hw",
43 | "image": "smcquay/hw:v0.1.5",
44 | "ports": [{
45 | "containerPort": 8080,
46 | "protocol": "TCP"
47 | }],
48 | "resources": {},
49 | "volumeMounts": [{
50 | "name": "default-token-zwnc6",
51 | "readOnly": true,
52 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
53 | }],
54 | "livenessProbe": {
55 | "httpGet": {
56 | "path": "/live",
57 | "port": 8080,
58 | "scheme": "HTTP"
59 | },
60 | "initialDelaySeconds": 5,
61 | "timeoutSeconds": 1,
62 | "periodSeconds": 5,
63 | "successThreshold": 1,
64 | "failureThreshold": 3
65 | },
66 | "readinessProbe": {
67 | "httpGet": {
68 | "path": "/ready",
69 | "port": 8080,
70 | "scheme": "HTTP"
71 | },
72 | "timeoutSeconds": 1,
73 | "periodSeconds": 5,
74 | "successThreshold": 1,
75 | "failureThreshold": 3
76 | },
77 | "terminationMessagePath": "/dev/termination-log",
78 | "terminationMessagePolicy": "File",
79 | "imagePullPolicy": "Always"
80 | }],
81 | "restartPolicy": "Always",
82 | "terminationGracePeriodSeconds": 1,
83 | "dnsPolicy": "ClusterFirst",
84 | "serviceAccountName": "default",
85 | "serviceAccount": "default",
86 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
87 | "securityContext": {},
88 | "schedulerName": "default-scheduler",
89 | "tolerations": [{
90 | "key": "node.kubernetes.io/not-ready",
91 | "operator": "Exists",
92 | "effect": "NoExecute",
93 | "tolerationSeconds": 300
94 | },
95 | {
96 | "key": "node.kubernetes.io/unreachable",
97 | "operator": "Exists",
98 | "effect": "NoExecute",
99 | "tolerationSeconds": 300
100 | }
101 | ],
102 | "priority": 0,
103 | "enableServiceLinks": true
104 | },
105 | "status": {
106 | "phase": "Running",
107 | "conditions": [{
108 | "type": "Initialized",
109 | "status": "True",
110 | "lastProbeTime": null,
111 | "lastTransitionTime": "2019-02-11T23:53:55Z"
112 | },
113 | {
114 | "type": "Ready",
115 | "status": "True",
116 | "lastProbeTime": null,
117 | "lastTransitionTime": "2019-02-11T23:53:59Z"
118 | },
119 | {
120 | "type": "ContainersReady",
121 | "status": "True",
122 | "lastProbeTime": null,
123 | "lastTransitionTime": "2019-02-11T23:53:59Z"
124 | },
125 | {
126 | "type": "PodScheduled",
127 | "status": "True",
128 | "lastProbeTime": null,
129 | "lastTransitionTime": "2019-02-11T23:53:55Z"
130 | }
131 | ],
132 | "hostIP": "10.10.204.182",
133 | "podIP": "10.1.1.140",
134 | "startTime": "2019-02-11T23:53:55Z",
135 | "containerStatuses": [{
136 | "name": "hw",
137 | "state": {
138 | "running": {
139 | "startedAt": "2019-02-11T23:53:58Z"
140 | }
141 | },
142 | "lastState": {},
143 | "ready": true,
144 | "restartCount": 0,
145 | "image": "smcquay/hw:v0.1.5",
146 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
147 | "containerID": "docker://e22d6e7128d6783579a5d55caf06df33d4a18447d59e61a12f8a95d43375a582"
148 | }],
149 | "qosClass": "BestEffort"
150 | }
151 | }
152 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/readinessGate/not_ready/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-6f596c7944-5q5t7",
4 | "generateName": "hw-6f596c7944-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-6f596c7944-5q5t7",
7 | "uid": "e9fedb18-2e56-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "6244",
9 | "creationTimestamp": "2019-02-11T23:44:03Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "6f596c7944"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-port-servicename2": "12345",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
20 | "katalog-sync.wish.com/sync-interval": "2s",
21 | "kubernetes.io/config.seen": "2019-02-11T15:44:03.945239692-08:00",
22 | "kubernetes.io/config.source": "api"
23 | },
24 | "ownerReferences": [{
25 | "apiVersion": "apps/v1",
26 | "kind": "ReplicaSet",
27 | "name": "hw-6f596c7944",
28 | "uid": "e9f5926f-2e56-11e9-8f72-54e1ad14ee37",
29 | "controller": true,
30 | "blockOwnerDeletion": true
31 | }]
32 | },
33 | "spec": {
34 | "readinessGates": [
35 | {"conditionType": "katalog-sync.wish.com/synced"}
36 | ],
37 | "volumes": [{
38 | "name": "default-token-zwnc6",
39 | "secret": {
40 | "secretName": "default-token-zwnc6",
41 | "defaultMode": 420
42 | }
43 | }],
44 | "containers": [{
45 | "name": "hw",
46 | "image": "smcquay/hw:v0.1.5",
47 | "ports": [{
48 | "containerPort": 8080,
49 | "protocol": "TCP"
50 | }],
51 | "resources": {},
52 | "volumeMounts": [{
53 | "name": "default-token-zwnc6",
54 | "readOnly": true,
55 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
56 | }],
57 | "livenessProbe": {
58 | "httpGet": {
59 | "path": "/live",
60 | "port": 8080,
61 | "scheme": "HTTP"
62 | },
63 | "initialDelaySeconds": 5,
64 | "timeoutSeconds": 1,
65 | "periodSeconds": 5,
66 | "successThreshold": 1,
67 | "failureThreshold": 3
68 | },
69 | "readinessProbe": {
70 | "httpGet": {
71 | "path": "/ready",
72 | "port": 8080,
73 | "scheme": "HTTP"
74 | },
75 | "timeoutSeconds": 1,
76 | "periodSeconds": 5,
77 | "successThreshold": 1,
78 | "failureThreshold": 3
79 | },
80 | "terminationMessagePath": "/dev/termination-log",
81 | "terminationMessagePolicy": "File",
82 | "imagePullPolicy": "Always"
83 | }
84 | ],
85 | "restartPolicy": "Always",
86 | "terminationGracePeriodSeconds": 1,
87 | "dnsPolicy": "ClusterFirst",
88 | "serviceAccountName": "default",
89 | "serviceAccount": "default",
90 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
91 | "securityContext": {},
92 | "schedulerName": "default-scheduler",
93 | "tolerations": [{
94 | "key": "node.kubernetes.io/not-ready",
95 | "operator": "Exists",
96 | "effect": "NoExecute",
97 | "tolerationSeconds": 300
98 | },
99 | {
100 | "key": "node.kubernetes.io/unreachable",
101 | "operator": "Exists",
102 | "effect": "NoExecute",
103 | "tolerationSeconds": 300
104 | }
105 | ],
106 | "priority": 0,
107 | "enableServiceLinks": true
108 | },
109 | "status": {
110 | "phase": "Running",
111 | "conditions": [{
112 | "type": "Initialized",
113 | "status": "True",
114 | "lastProbeTime": null,
115 | "lastTransitionTime": "2019-02-11T23:44:03Z"
116 | },
117 | {
118 | "type": "Ready",
119 | "status": "True",
120 | "lastProbeTime": null,
121 | "lastTransitionTime": "2019-02-11T23:44:36Z"
122 | },
123 | {
124 | "type": "ContainersReady",
125 | "status": "True",
126 | "lastProbeTime": null,
127 | "lastTransitionTime": "2019-02-11T23:44:36Z"
128 | },
129 | {
130 | "type": "PodScheduled",
131 | "status": "True",
132 | "lastProbeTime": null,
133 | "lastTransitionTime": "2019-02-11T23:44:03Z"
134 | }
135 | ],
136 | "hostIP": "10.10.204.182",
137 | "podIP": "10.1.1.137",
138 | "startTime": "2019-02-11T23:44:03Z",
139 | "containerStatuses": [{
140 | "name": "hw",
141 | "state": {
142 | "running": {
143 | "startedAt": "2019-02-11T23:44:08Z"
144 | }
145 | },
146 | "lastState": {},
147 | "ready": false,
148 | "restartCount": 0,
149 | "image": "smcquay/hw:v0.1.5",
150 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
151 | "containerID": "docker://1eefaa0d929cabe94e5fb2d958ec1de7bbc9ec1a3033bac5c3ea01c1ad57a80b"
152 | }
153 | ],
154 | "qosClass": "BestEffort"
155 | }
156 | }
157 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic_blacklist/failing/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-7df6995f69-96wth",
4 | "generateName": "hw-7df6995f69-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-7df6995f69-96wth",
7 | "uid": "4a6f4de2-2e58-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "7123",
9 | "creationTimestamp": "2019-02-11T23:53:55Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "7df6995f69"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-tags": "a,b",
18 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
19 | "katalog-sync.wish.com/container-exclude": "ignore-container",
20 | "katalog-sync.wish.com/sync-interval": "2s",
21 | "kubernetes.io/config.seen": "2019-02-11T15:53:55.238848124-08:00",
22 | "kubernetes.io/config.source": "api"
23 | },
24 | "ownerReferences": [{
25 | "apiVersion": "apps/v1",
26 | "kind": "ReplicaSet",
27 | "name": "hw-7df6995f69",
28 | "uid": "4a6df5fd-2e58-11e9-8f72-54e1ad14ee37",
29 | "controller": true,
30 | "blockOwnerDeletion": true
31 | }]
32 | },
33 | "spec": {
34 | "volumes": [{
35 | "name": "default-token-zwnc6",
36 | "secret": {
37 | "secretName": "default-token-zwnc6",
38 | "defaultMode": 420
39 | }
40 | }],
41 | "containers": [{
42 | "name": "hw",
43 | "image": "smcquay/hw:v0.1.5",
44 | "ports": [{
45 | "containerPort": 8080,
46 | "protocol": "TCP"
47 | }],
48 | "resources": {},
49 | "volumeMounts": [{
50 | "name": "default-token-zwnc6",
51 | "readOnly": true,
52 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
53 | }],
54 | "livenessProbe": {
55 | "httpGet": {
56 | "path": "/live",
57 | "port": 8080,
58 | "scheme": "HTTP"
59 | },
60 | "initialDelaySeconds": 5,
61 | "timeoutSeconds": 1,
62 | "periodSeconds": 5,
63 | "successThreshold": 1,
64 | "failureThreshold": 3
65 | },
66 | "readinessProbe": {
67 | "httpGet": {
68 | "path": "/ready",
69 | "port": 8080,
70 | "scheme": "HTTP"
71 | },
72 | "timeoutSeconds": 1,
73 | "periodSeconds": 5,
74 | "successThreshold": 1,
75 | "failureThreshold": 3
76 | },
77 | "terminationMessagePath": "/dev/termination-log",
78 | "terminationMessagePolicy": "File",
79 | "imagePullPolicy": "Always"
80 | }],
81 | "restartPolicy": "Always",
82 | "terminationGracePeriodSeconds": 1,
83 | "dnsPolicy": "ClusterFirst",
84 | "serviceAccountName": "default",
85 | "serviceAccount": "default",
86 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
87 | "securityContext": {},
88 | "schedulerName": "default-scheduler",
89 | "tolerations": [{
90 | "key": "node.kubernetes.io/not-ready",
91 | "operator": "Exists",
92 | "effect": "NoExecute",
93 | "tolerationSeconds": 300
94 | },
95 | {
96 | "key": "node.kubernetes.io/unreachable",
97 | "operator": "Exists",
98 | "effect": "NoExecute",
99 | "tolerationSeconds": 300
100 | }
101 | ],
102 | "priority": 0,
103 | "enableServiceLinks": true
104 | },
105 | "status": {
106 | "phase": "Running",
107 | "conditions": [{
108 | "type": "Initialized",
109 | "status": "True",
110 | "lastProbeTime": null,
111 | "lastTransitionTime": "2019-02-11T23:53:55Z"
112 | },
113 | {
114 | "type": "Ready",
115 | "status": "True",
116 | "lastProbeTime": null,
117 | "lastTransitionTime": "2019-02-11T23:53:59Z"
118 | },
119 | {
120 | "type": "ContainersReady",
121 | "status": "True",
122 | "lastProbeTime": null,
123 | "lastTransitionTime": "2019-02-11T23:53:59Z"
124 | },
125 | {
126 | "type": "PodScheduled",
127 | "status": "True",
128 | "lastProbeTime": null,
129 | "lastTransitionTime": "2019-02-11T23:53:55Z"
130 | }
131 | ],
132 | "hostIP": "10.10.204.182",
133 | "podIP": "10.1.1.140",
134 | "startTime": "2019-02-11T23:53:55Z",
135 | "containerStatuses": [{
136 | "name": "hw",
137 | "state": {
138 | "running": {
139 | "startedAt": "2019-02-11T23:53:58Z"
140 | }
141 | },
142 | "lastState": {},
143 | "ready": false,
144 | "restartCount": 0,
145 | "image": "smcquay/hw:v0.1.5",
146 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
147 | "containerID": "docker://e22d6e7128d6783579a5d55caf06df33d4a18447d59e61a12f8a95d43375a582"
148 | },
149 | {
150 | "name": "ignore-container",
151 | "state": {
152 | "running": {
153 | "startedAt": "2019-02-11T23:53:58Z"
154 | }
155 | },
156 | "lastState": {},
157 | "ready": false,
158 | "restartCount": 0,
159 | "image": "alpine:latest"
160 | }],
161 | "qosClass": "BestEffort"
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/basic_blacklist/working/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-7df6995f69-96wth",
4 | "generateName": "hw-7df6995f69-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-7df6995f69-96wth",
7 | "uid": "4a6f4de2-2e58-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "7123",
9 | "creationTimestamp": "2019-02-11T23:53:55Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "7df6995f69"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-tags": "a,b",
18 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
19 | "katalog-sync.wish.com/container-exclude": "ignore-container",
20 | "katalog-sync.wish.com/sync-interval": "2s",
21 | "kubernetes.io/config.seen": "2019-02-11T15:53:55.238848124-08:00",
22 | "kubernetes.io/config.source": "api"
23 | },
24 | "ownerReferences": [{
25 | "apiVersion": "apps/v1",
26 | "kind": "ReplicaSet",
27 | "name": "hw-7df6995f69",
28 | "uid": "4a6df5fd-2e58-11e9-8f72-54e1ad14ee37",
29 | "controller": true,
30 | "blockOwnerDeletion": true
31 | }]
32 | },
33 | "spec": {
34 | "volumes": [{
35 | "name": "default-token-zwnc6",
36 | "secret": {
37 | "secretName": "default-token-zwnc6",
38 | "defaultMode": 420
39 | }
40 | }],
41 | "containers": [{
42 | "name": "hw",
43 | "image": "smcquay/hw:v0.1.5",
44 | "ports": [{
45 | "containerPort": 8080,
46 | "protocol": "TCP"
47 | }],
48 | "resources": {},
49 | "volumeMounts": [{
50 | "name": "default-token-zwnc6",
51 | "readOnly": true,
52 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
53 | }],
54 | "livenessProbe": {
55 | "httpGet": {
56 | "path": "/live",
57 | "port": 8080,
58 | "scheme": "HTTP"
59 | },
60 | "initialDelaySeconds": 5,
61 | "timeoutSeconds": 1,
62 | "periodSeconds": 5,
63 | "successThreshold": 1,
64 | "failureThreshold": 3
65 | },
66 | "readinessProbe": {
67 | "httpGet": {
68 | "path": "/ready",
69 | "port": 8080,
70 | "scheme": "HTTP"
71 | },
72 | "timeoutSeconds": 1,
73 | "periodSeconds": 5,
74 | "successThreshold": 1,
75 | "failureThreshold": 3
76 | },
77 | "terminationMessagePath": "/dev/termination-log",
78 | "terminationMessagePolicy": "File",
79 | "imagePullPolicy": "Always"
80 | }],
81 | "restartPolicy": "Always",
82 | "terminationGracePeriodSeconds": 1,
83 | "dnsPolicy": "ClusterFirst",
84 | "serviceAccountName": "default",
85 | "serviceAccount": "default",
86 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
87 | "securityContext": {},
88 | "schedulerName": "default-scheduler",
89 | "tolerations": [{
90 | "key": "node.kubernetes.io/not-ready",
91 | "operator": "Exists",
92 | "effect": "NoExecute",
93 | "tolerationSeconds": 300
94 | },
95 | {
96 | "key": "node.kubernetes.io/unreachable",
97 | "operator": "Exists",
98 | "effect": "NoExecute",
99 | "tolerationSeconds": 300
100 | }
101 | ],
102 | "priority": 0,
103 | "enableServiceLinks": true
104 | },
105 | "status": {
106 | "phase": "Running",
107 | "conditions": [{
108 | "type": "Initialized",
109 | "status": "True",
110 | "lastProbeTime": null,
111 | "lastTransitionTime": "2019-02-11T23:53:55Z"
112 | },
113 | {
114 | "type": "Ready",
115 | "status": "True",
116 | "lastProbeTime": null,
117 | "lastTransitionTime": "2019-02-11T23:53:59Z"
118 | },
119 | {
120 | "type": "ContainersReady",
121 | "status": "True",
122 | "lastProbeTime": null,
123 | "lastTransitionTime": "2019-02-11T23:53:59Z"
124 | },
125 | {
126 | "type": "PodScheduled",
127 | "status": "True",
128 | "lastProbeTime": null,
129 | "lastTransitionTime": "2019-02-11T23:53:55Z"
130 | }
131 | ],
132 | "hostIP": "10.10.204.182",
133 | "podIP": "10.1.1.140",
134 | "startTime": "2019-02-11T23:53:55Z",
135 | "containerStatuses": [{
136 | "name": "hw",
137 | "state": {
138 | "running": {
139 | "startedAt": "2019-02-11T23:53:58Z"
140 | }
141 | },
142 | "lastState": {},
143 | "ready": true,
144 | "restartCount": 0,
145 | "image": "smcquay/hw:v0.1.5",
146 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
147 | "containerID": "docker://e22d6e7128d6783579a5d55caf06df33d4a18447d59e61a12f8a95d43375a582"
148 | },
149 | {
150 | "name": "ignore-container",
151 | "state": {
152 | "running": {
153 | "startedAt": "2019-02-11T23:53:58Z"
154 | }
155 | },
156 | "lastState": {},
157 | "ready": false,
158 | "restartCount": 0,
159 | "image": "alpine:latest"
160 | }],
161 | "qosClass": "BestEffort"
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/readinessGate/working/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-6f596c7944-5q5t7",
4 | "generateName": "hw-6f596c7944-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-6f596c7944-5q5t7",
7 | "uid": "e9fedb18-2e56-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "6244",
9 | "creationTimestamp": "2019-02-11T23:44:03Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "6f596c7944"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-port-servicename2": "12345",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
20 | "katalog-sync.wish.com/service-meta": "a:1,b:2",
21 | "katalog-sync.wish.com/service-meta-servicename2": "b:1,c:2",
22 | "katalog-sync.wish.com/sync-interval": "2s",
23 | "kubernetes.io/config.seen": "2019-02-11T15:44:03.945239692-08:00",
24 | "kubernetes.io/config.source": "api"
25 | },
26 | "ownerReferences": [{
27 | "apiVersion": "apps/v1",
28 | "kind": "ReplicaSet",
29 | "name": "hw-6f596c7944",
30 | "uid": "e9f5926f-2e56-11e9-8f72-54e1ad14ee37",
31 | "controller": true,
32 | "blockOwnerDeletion": true
33 | }]
34 | },
35 | "spec": {
36 | "readinessGates": [
37 | {"conditionType": "katalog-sync.wish.com/synced"}
38 | ],
39 | "volumes": [{
40 | "name": "default-token-zwnc6",
41 | "secret": {
42 | "secretName": "default-token-zwnc6",
43 | "defaultMode": 420
44 | }
45 | }],
46 | "containers": [{
47 | "name": "hw",
48 | "image": "smcquay/hw:v0.1.5",
49 | "ports": [{
50 | "containerPort": 8080,
51 | "protocol": "TCP"
52 | }],
53 | "resources": {},
54 | "volumeMounts": [{
55 | "name": "default-token-zwnc6",
56 | "readOnly": true,
57 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
58 | }],
59 | "livenessProbe": {
60 | "httpGet": {
61 | "path": "/live",
62 | "port": 8080,
63 | "scheme": "HTTP"
64 | },
65 | "initialDelaySeconds": 5,
66 | "timeoutSeconds": 1,
67 | "periodSeconds": 5,
68 | "successThreshold": 1,
69 | "failureThreshold": 3
70 | },
71 | "readinessProbe": {
72 | "httpGet": {
73 | "path": "/ready",
74 | "port": 8080,
75 | "scheme": "HTTP"
76 | },
77 | "timeoutSeconds": 1,
78 | "periodSeconds": 5,
79 | "successThreshold": 1,
80 | "failureThreshold": 3
81 | },
82 | "terminationMessagePath": "/dev/termination-log",
83 | "terminationMessagePolicy": "File",
84 | "imagePullPolicy": "Always"
85 | }
86 | ],
87 | "restartPolicy": "Always",
88 | "terminationGracePeriodSeconds": 1,
89 | "dnsPolicy": "ClusterFirst",
90 | "serviceAccountName": "default",
91 | "serviceAccount": "default",
92 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
93 | "securityContext": {},
94 | "schedulerName": "default-scheduler",
95 | "tolerations": [{
96 | "key": "node.kubernetes.io/not-ready",
97 | "operator": "Exists",
98 | "effect": "NoExecute",
99 | "tolerationSeconds": 300
100 | },
101 | {
102 | "key": "node.kubernetes.io/unreachable",
103 | "operator": "Exists",
104 | "effect": "NoExecute",
105 | "tolerationSeconds": 300
106 | }
107 | ],
108 | "priority": 0,
109 | "enableServiceLinks": true
110 | },
111 | "status": {
112 | "phase": "Running",
113 | "conditions": [{
114 | "type": "Initialized",
115 | "status": "True",
116 | "lastProbeTime": null,
117 | "lastTransitionTime": "2019-02-11T23:44:03Z"
118 | },
119 | {
120 | "type": "Ready",
121 | "status": "True",
122 | "lastProbeTime": null,
123 | "lastTransitionTime": "2019-02-11T23:44:36Z"
124 | },
125 | {
126 | "type": "ContainersReady",
127 | "status": "True",
128 | "lastProbeTime": null,
129 | "lastTransitionTime": "2019-02-11T23:44:36Z"
130 | },
131 | {
132 | "type": "PodScheduled",
133 | "status": "True",
134 | "lastProbeTime": null,
135 | "lastTransitionTime": "2019-02-11T23:44:03Z"
136 | }
137 | ],
138 | "hostIP": "10.10.204.182",
139 | "podIP": "10.1.1.137",
140 | "startTime": "2019-02-11T23:44:03Z",
141 | "containerStatuses": [{
142 | "name": "hw",
143 | "state": {
144 | "running": {
145 | "startedAt": "2019-02-11T23:44:08Z"
146 | }
147 | },
148 | "lastState": {},
149 | "ready": true,
150 | "restartCount": 0,
151 | "image": "smcquay/hw:v0.1.5",
152 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
153 | "containerID": "docker://1eefaa0d929cabe94e5fb2d958ec1de7bbc9ec1a3033bac5c3ea01c1ad57a80b"
154 | }
155 | ],
156 | "qosClass": "BestEffort"
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/cmd/katalog-sync-sidecar/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "net"
6 | "net/http"
7 | "os"
8 | "os/signal"
9 | "syscall"
10 | "time"
11 |
12 | flags "github.com/jessevdk/go-flags"
13 | "github.com/sirupsen/logrus"
14 | grpc "google.golang.org/grpc"
15 |
16 | katalogsync "github.com/wish/katalog-sync/proto"
17 | )
18 |
19 | var opts struct {
20 | LogLevel string `long:"log-level" env:"LOG_LEVEL" description:"Log level" default:"info"`
21 | KatalogSyncEndpoint string `long:"katalog-sync-daemon" env:"KATALOG_SYNC_DAEMON" description:"katalog-sync-daemon API endpoint"`
22 | KatalogSyncMaxBackoff time.Duration `long:"katalog-sync-daemon-max-backoff" env:"KATALOG_SYNC_DAEMON_MAX_BACKOFF" description:"katalog-sync-daemon API max backoff" default:"1s"`
23 | BindAddr string `long:"bind-address" env:"BIND_ADDRESS" description:"address for binding checks to"`
24 |
25 | Namespace string `long:"namespace" env:"NAMESPACE" description:"k8s namespace this is running in"`
26 | PodName string `long:"pod-name" env:"POD_NAME" description:"k8s pod this is running in"`
27 | ContainerName string `long:"container-name" env:"CONTAINER_NAME" description:"k8s container this is running in"`
28 | }
29 |
30 | func main() {
31 | parser := flags.NewParser(&opts, flags.Default)
32 | if _, err := parser.Parse(); err != nil {
33 | // If the error was from the parser, then we can simply return
34 | // as Parse() prints the error already
35 | if _, ok := err.(*flags.Error); ok {
36 | os.Exit(1)
37 | }
38 | logrus.Fatalf("Error parsing flags: %v", err)
39 | }
40 |
41 | // Use log level
42 | level, err := logrus.ParseLevel(opts.LogLevel)
43 | if err != nil {
44 | logrus.Fatalf("Unknown log level %s: %v", opts.LogLevel, err)
45 | }
46 | logrus.SetLevel(level)
47 |
48 | // Set the log format to have a reasonable timestamp
49 | formatter := &logrus.TextFormatter{
50 | FullTimestamp: true,
51 | }
52 | logrus.SetFormatter(formatter)
53 |
54 | var ready bool
55 |
56 | l, err := net.Listen("tcp", opts.BindAddr)
57 | if err != nil {
58 | logrus.Fatalf("Error binding: %v", err)
59 | }
60 |
61 | go func() {
62 | http.HandleFunc("/ready", func(w http.ResponseWriter, r *http.Request) {
63 | logrus.Infof("ready? %v", ready)
64 | if !ready {
65 | http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
66 | }
67 | })
68 | // TODO: log error?
69 | http.Serve(l, http.DefaultServeMux)
70 | }()
71 |
72 | conn, err := grpc.Dial(opts.KatalogSyncEndpoint, grpc.WithInsecure(), grpc.WithBackoffMaxDelay(opts.KatalogSyncMaxBackoff))
73 | if err != nil {
74 | logrus.Fatalf("Unable to connect to katalog-sync-daemon: %v", err)
75 | }
76 | defer conn.Close()
77 |
78 | ctx, cancel := context.WithCancel(context.Background())
79 | defer cancel() // TODO: do we even need this?
80 | sigs := make(chan os.Signal, 1)
81 | defer close(sigs)
82 | signal.Notify(sigs, syscall.SIGHUP, syscall.SIGTERM, syscall.SIGINT)
83 |
84 | client := katalogsync.NewKatalogSyncClient(conn)
85 |
86 | // Connect to sidecar and send register request
87 | // We want to retry until we are successful
88 | for {
89 | // If we get a signal to stop; lets gracefully exit
90 | select {
91 | case sig := <-sigs:
92 | switch sig {
93 | case syscall.SIGTERM, syscall.SIGINT:
94 | logrus.Infof("Got signal to stop while registering, exiting")
95 | return
96 | }
97 | default:
98 | }
99 |
100 | if _, err := client.Register(ctx, &katalogsync.RegisterQuery{Namespace: opts.Namespace, PodName: opts.PodName, ContainerName: opts.ContainerName}); err != nil {
101 | logrus.Errorf("error registering with katalog-sync-daemon: %v %v", grpc.Code(err), err)
102 | } else {
103 | break
104 | }
105 |
106 | // TODO: better sleep + backoff based on GRPC error codes
107 | time.Sleep(time.Second)
108 | }
109 | ready = true
110 | logrus.Infof("register complete, waiting for signals")
111 |
112 | // TODO: add option that will do the TTL updates on our own?
113 |
114 | // Wait for kill signal
115 | WAITLOOP:
116 | for {
117 | select {
118 | case sig := <-sigs:
119 | switch sig {
120 | case syscall.SIGTERM, syscall.SIGINT:
121 | logrus.Infof("Got signal to stop, starting deregister")
122 | break WAITLOOP
123 | }
124 | }
125 | }
126 |
127 | go func() {
128 | <-sigs
129 | cancel()
130 | }()
131 |
132 | // Send deregister request
133 | for {
134 | select {
135 | case <-ctx.Done():
136 | return
137 | default:
138 | }
139 | logrus.Infof("deregister attempt")
140 | _, err := client.Deregister(ctx, &katalogsync.DeregisterQuery{Namespace: opts.Namespace, PodName: opts.PodName, ContainerName: opts.ContainerName})
141 | if err == nil {
142 | logrus.Infof("deregister succeed")
143 | return
144 | }
145 | logrus.Errorf("error deregistering with katalog-sync-daemon: %v %v", grpc.Code(err), err)
146 | // TODO: better sleep + backoff based on GRPC error codes
147 | time.Sleep(time.Second)
148 | }
149 | }
150 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/bad_sidecar_name/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-5cbd9bb7fc-plkvz",
4 | "generateName": "hw-5cbd9bb7fc-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-5cbd9bb7fc-plkvz",
7 | "uid": "281d6bc5-296e-11e9-8d0b-54e1ad14ee37",
8 | "resourceVersion": "1402",
9 | "creationTimestamp": "2019-02-05T17:47:50Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "5cbd9bb7fc"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-port-servicename2": "12345",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/sidecar": "katalog-sync-sidecar",
20 | "katalog-sync.wish.com/sync-interval": "2s",
21 | "kubernetes.io/config.seen": "2019-02-11T14:40:41.498594331-08:00",
22 | "kubernetes.io/config.source": "api"
23 | },
24 | "ownerReferences": [{
25 | "apiVersion": "apps/v1",
26 | "kind": "ReplicaSet",
27 | "name": "hw-5cbd9bb7fc",
28 | "uid": "281c2a9a-296e-11e9-8d0b-54e1ad14ee37",
29 | "controller": true,
30 | "blockOwnerDeletion": true
31 | }]
32 | },
33 | "spec": {
34 | "volumes": [{
35 | "name": "default-token-zwnc6",
36 | "secret": {
37 | "secretName": "default-token-zwnc6",
38 | "defaultMode": 420
39 | }
40 | }],
41 | "containers": [{
42 | "name": "katalog-sync-sidecars",
43 | "image": "quay.io/wish/katalog-sync:latest",
44 | "command": [
45 | "/bin/katalog-sync-sidecar"
46 | ],
47 | "args": [
48 | "--katalog-sync-daemon=$(HOST_IP):8501",
49 | "--namespace=$(MY_POD_NAMESPACE)",
50 | "--pod-name=$(MY_POD_NAME)",
51 | "--container-name=katalog-sync-sidecar",
52 | "--bind-address=:8888"
53 | ],
54 | "env": [{
55 | "name": "HOST_IP",
56 | "valueFrom": {
57 | "fieldRef": {
58 | "apiVersion": "v1",
59 | "fieldPath": "status.hostIP"
60 | }
61 | }
62 | },
63 | {
64 | "name": "MY_POD_NAMESPACE",
65 | "valueFrom": {
66 | "fieldRef": {
67 | "apiVersion": "v1",
68 | "fieldPath": "metadata.namespace"
69 | }
70 | }
71 | },
72 | {
73 | "name": "MY_POD_NAME",
74 | "valueFrom": {
75 | "fieldRef": {
76 | "apiVersion": "v1",
77 | "fieldPath": "metadata.name"
78 | }
79 | }
80 | }
81 | ],
82 | "resources": {},
83 | "volumeMounts": [{
84 | "name": "default-token-zwnc6",
85 | "readOnly": true,
86 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
87 | }],
88 | "terminationMessagePath": "/dev/termination-log",
89 | "terminationMessagePolicy": "File",
90 | "imagePullPolicy": "Always"
91 | }],
92 | "restartPolicy": "Always",
93 | "terminationGracePeriodSeconds": 1,
94 | "dnsPolicy": "ClusterFirst",
95 | "serviceAccountName": "default",
96 | "serviceAccount": "default",
97 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
98 | "securityContext": {},
99 | "schedulerName": "default-scheduler",
100 | "tolerations": [{
101 | "key": "node.kubernetes.io/not-ready",
102 | "operator": "Exists",
103 | "effect": "NoExecute",
104 | "tolerationSeconds": 300
105 | },
106 | {
107 | "key": "node.kubernetes.io/unreachable",
108 | "operator": "Exists",
109 | "effect": "NoExecute",
110 | "tolerationSeconds": 300
111 | }
112 | ],
113 | "priority": 0,
114 | "enableServiceLinks": true
115 | },
116 | "status": {
117 | "phase": "Running",
118 | "conditions": [{
119 | "type": "Initialized",
120 | "status": "True",
121 | "lastProbeTime": null,
122 | "lastTransitionTime": "2019-02-05T17:47:50Z"
123 | },
124 | {
125 | "type": "Ready",
126 | "status": "False",
127 | "lastProbeTime": null,
128 | "lastTransitionTime": "2019-02-05T17:47:56Z",
129 | "reason": "ContainersNotReady",
130 | "message": "containers with unready status: [katalog-sync-sidecars]"
131 | },
132 | {
133 | "type": "ContainersReady",
134 | "status": "False",
135 | "lastProbeTime": null,
136 | "lastTransitionTime": "2019-02-05T17:47:56Z",
137 | "reason": "ContainersNotReady",
138 | "message": "containers with unready status: [katalog-sync-sidecars]"
139 | },
140 | {
141 | "type": "PodScheduled",
142 | "status": "True",
143 | "lastProbeTime": null,
144 | "lastTransitionTime": "2019-02-05T17:47:50Z"
145 | }
146 | ],
147 | "hostIP": "172.18.28.15",
148 | "startTime": "2019-02-05T17:47:50Z",
149 | "containerStatuses": [{
150 | "name": "katalog-sync-sidecars",
151 | "state": {
152 | "terminated": {
153 | "exitCode": 2,
154 | "reason": "Error",
155 | "startedAt": "2019-02-05T17:50:47Z",
156 | "finishedAt": "2019-02-05T17:50:47Z",
157 | "containerID": "docker://e32feae36e13d28c9a0fc6d6fce69fd3665cd2e305eec4159293a1789f4195b7"
158 | }
159 | },
160 | "lastState": {},
161 | "ready": false,
162 | "restartCount": 5,
163 | "image": "quay.io/wish/katalog-sync:latest",
164 | "imageID": "docker-pullable://quay.io/wish/katalog-sync@sha256:63697ed0510ab09f2a6b0042b87d5a7a244ff05475455e07d5da808d6d1495ed",
165 | "containerID": "docker://e32feae36e13d28c9a0fc6d6fce69fd3665cd2e305eec4159293a1789f4195b7"
166 | }],
167 | "qosClass": "BestEffort"
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # katalog-sync [](https://godoc.org/github.com/wish/katalog-sync) [](https://travis-ci.org/wish/katalog-sync) [](https://goreportcard.com/report/github.com/wish/katalog-sync)
2 |
3 | katalog-sync is a node-local mechanism for syncing k8s pods to consul services.
4 |
5 | katalog-sync has:
6 |
7 | - node-local syncing to local consul-agent
8 | - agent-services in consul, meaning health of those endpoints is tied to the node agent
9 | - sync readiness state from k8s as check to consul
10 | - (optional) sidecar service to ensure consul registration before a pod is marked "ready"
11 |
12 | katalog-sync makes the following assumptions:
13 |
14 | - You have a consul-agent running on each node (presumably as a Daemonset)
15 | - You are running a consul-agent which supports ServiceMetadata (>= [1.0.7](https://www.hashicorp.com/blog/consul-1-0-7))
16 | - You want to sync Pods to consul services and have the readiness values reflected
17 | - Your pods can communicate with Daemonsets running on the same node
18 |
19 | ### katalog-sync overview
20 |
21 |
22 | 1. Kubelet starts container on Node
23 | 1. (optional) katalog-sync-sidecar calls to katalog-sync-daemonset waiting until registration with consul is complete
24 | 1. Daemonset syncs changes from kubelet through the local kubelet API
25 | 1. Daemonset syncs changes to consul
26 |
27 | ### k8s pod annotations
28 | | Annotation | |
29 | |---------------------------------------------------|--------------------------------------------------|
30 | | katalog-sync.wish.com/service-names | Comma-separated list of service names |
31 | | katalog-sync.wish.com/service-port | Port for the consul service |
32 | | katalog-sync.wish.com/service-port-**SERVICE-NAME** | Port override to use for a specific service name |
33 | | katalog-sync.wish.com/service-tags | Tags for the consul service |
34 | | katalog-sync.wish.com/service-tags-**SERVICE-NAME** | Tags override to use for a specific service name |
35 | | katalog-sync.wish.com/service-meta | ServiceMeta for the consul service |
36 | | katalog-sync.wish.com/service-meta-**SERVICE-NAME** | ServiceMeta override to use for a specific service name |
37 | | katalog-sync.wish.com/service-health | Use a fixed health status string regardless of pod readiness, valid values: 'passing', 'warning', 'critical' |
38 | | katalog-sync.wish.com/serivce-health-**SERVICE-NAME** | health overrride for a specific service name |
39 | | katalog-sync.wish.com/sidecar | Container name of the katalog-sync-sidecar |
40 | | katalog-sync.wish.com/sync-interval | How frequently to sync this service with consul |
41 | | katalog-sync.wish.com/service-check-ttl | TTL for the service checks put into consul |
42 | | katalog-sync.wish.com/container-exclude | Comma-separated list of containers to exclude in readiness check |
43 |
44 | ### katalog-sync-daemon options
45 | ``` console
46 | $ ./katalog-sync-daemon -h
47 | Usage:
48 | katalog-sync-daemon [OPTIONS]
49 |
50 | Application Options:
51 | --log-level= Log level (default: info) [$LOG_LEVEL]
52 | --bind-address= address for binding RPC interface for
53 | sidecar [$BIND_ADDRESS]
54 | --pprof-bind-address= address for binding pprof
55 | [$PPROF_BIND_ADDRESS]
56 | --min-sync-interval= minimum duration allowed for sync
57 | (default: 500ms) [$MIN_SYNC_INTERVAL]
58 | --max-sync-interval= maximum duration allowed for sync
59 | (default: 5s) [$MAX_SYNC_INTERVAL]
60 | --default-sync-interval=
61 | --default-check-ttl=
62 | --sync-ttl-buffer-duration= how much time to ensure is between
63 | sync time and ttl (default: 10s)
64 | [$SYNC_TTL_BUFFER_DURATION]
65 | --kubelet-api= kubelet API endpoint (default:
66 | http://localhost:10255/pods)
67 | [$KUBELET_API]
68 | --kubelet-api-insecure-skip-verify skip verification of TLS certificate
69 | from kubelet API
70 | [$KUBELET_API_INSECURE_SKIP_VERIFY]
71 |
72 | Help Options:
73 | -h, --help Show this help message
74 | ```
75 |
76 | ### katalog-sync-sidecar options
77 | ``` console
78 | $ ./katalog-sync-sidecar -h
79 | Usage:
80 | katalog-sync-sidecar [OPTIONS]
81 |
82 | Application Options:
83 | --log-level= Log level (default: info) [$LOG_LEVEL]
84 | --katalog-sync-daemon= katalog-sync-daemon API endpoint [$KATALOG_SYNC_DAEMON]
85 | --katalog-sync-daemon-max-backoff= katalog-sync-daemon API max backoff (default: 1s) [$KATALOG_SYNC_DAEMON_MAX_BACKOFF]
86 | --bind-address= address for binding checks to [$BIND_ADDRESS]
87 | --namespace= k8s namespace this is running in [$NAMESPACE]
88 | --pod-name= k8s pod this is running in [$POD_NAME]
89 | --container-name= k8s container this is running in [$CONTAINER_NAME]
90 |
91 | Help Options:
92 | -h, --help Show this help message
93 | ```
94 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/not_ready/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-6f596c7944-5q5t7",
4 | "generateName": "hw-6f596c7944-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-6f596c7944-5q5t7",
7 | "uid": "e9fedb18-2e56-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "6244",
9 | "creationTimestamp": "2019-02-11T23:44:03Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "6f596c7944"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-port-servicename2": "12345",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
20 | "katalog-sync.wish.com/sidecar": "katalog-sync-sidecar",
21 | "katalog-sync.wish.com/sync-interval": "2s",
22 | "kubernetes.io/config.seen": "2019-02-11T15:44:03.945239692-08:00",
23 | "kubernetes.io/config.source": "api"
24 | },
25 | "ownerReferences": [{
26 | "apiVersion": "apps/v1",
27 | "kind": "ReplicaSet",
28 | "name": "hw-6f596c7944",
29 | "uid": "e9f5926f-2e56-11e9-8f72-54e1ad14ee37",
30 | "controller": true,
31 | "blockOwnerDeletion": true
32 | }]
33 | },
34 | "spec": {
35 | "volumes": [{
36 | "name": "default-token-zwnc6",
37 | "secret": {
38 | "secretName": "default-token-zwnc6",
39 | "defaultMode": 420
40 | }
41 | }],
42 | "containers": [{
43 | "name": "hw",
44 | "image": "smcquay/hw:v0.1.5",
45 | "ports": [{
46 | "containerPort": 8080,
47 | "protocol": "TCP"
48 | }],
49 | "resources": {},
50 | "volumeMounts": [{
51 | "name": "default-token-zwnc6",
52 | "readOnly": true,
53 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
54 | }],
55 | "livenessProbe": {
56 | "httpGet": {
57 | "path": "/live",
58 | "port": 8080,
59 | "scheme": "HTTP"
60 | },
61 | "initialDelaySeconds": 5,
62 | "timeoutSeconds": 1,
63 | "periodSeconds": 5,
64 | "successThreshold": 1,
65 | "failureThreshold": 3
66 | },
67 | "readinessProbe": {
68 | "httpGet": {
69 | "path": "/ready",
70 | "port": 8080,
71 | "scheme": "HTTP"
72 | },
73 | "timeoutSeconds": 1,
74 | "periodSeconds": 5,
75 | "successThreshold": 1,
76 | "failureThreshold": 3
77 | },
78 | "terminationMessagePath": "/dev/termination-log",
79 | "terminationMessagePolicy": "File",
80 | "imagePullPolicy": "Always"
81 | },
82 | {
83 | "name": "katalog-sync-sidecar",
84 | "image": "quay.io/wish/katalog-sync:latest",
85 | "command": [
86 | "/bin/katalog-sync-sidecar"
87 | ],
88 | "args": [
89 | "--katalog-sync-daemon=$(HOST_IP):8501",
90 | "--namespace=$(MY_POD_NAMESPACE)",
91 | "--pod-name=$(MY_POD_NAME)",
92 | "--container-name=katalog-sync-sidecar",
93 | "--bind-address=:8888"
94 | ],
95 | "env": [{
96 | "name": "HOST_IP",
97 | "valueFrom": {
98 | "fieldRef": {
99 | "apiVersion": "v1",
100 | "fieldPath": "status.hostIP"
101 | }
102 | }
103 | },
104 | {
105 | "name": "MY_POD_NAMESPACE",
106 | "valueFrom": {
107 | "fieldRef": {
108 | "apiVersion": "v1",
109 | "fieldPath": "metadata.namespace"
110 | }
111 | }
112 | },
113 | {
114 | "name": "MY_POD_NAME",
115 | "valueFrom": {
116 | "fieldRef": {
117 | "apiVersion": "v1",
118 | "fieldPath": "metadata.name"
119 | }
120 | }
121 | }
122 | ],
123 | "resources": {},
124 | "volumeMounts": [{
125 | "name": "default-token-zwnc6",
126 | "readOnly": true,
127 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
128 | }],
129 | "readinessProbe": {
130 | "httpGet": {
131 | "path": "/ready",
132 | "port": 8888,
133 | "scheme": "HTTP"
134 | },
135 | "initialDelaySeconds": 1,
136 | "timeoutSeconds": 1,
137 | "periodSeconds": 5,
138 | "successThreshold": 1,
139 | "failureThreshold": 3
140 | },
141 | "terminationMessagePath": "/dev/termination-log",
142 | "terminationMessagePolicy": "File",
143 | "imagePullPolicy": "Always"
144 | }
145 | ],
146 | "restartPolicy": "Always",
147 | "terminationGracePeriodSeconds": 1,
148 | "dnsPolicy": "ClusterFirst",
149 | "serviceAccountName": "default",
150 | "serviceAccount": "default",
151 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
152 | "securityContext": {},
153 | "schedulerName": "default-scheduler",
154 | "tolerations": [{
155 | "key": "node.kubernetes.io/not-ready",
156 | "operator": "Exists",
157 | "effect": "NoExecute",
158 | "tolerationSeconds": 300
159 | },
160 | {
161 | "key": "node.kubernetes.io/unreachable",
162 | "operator": "Exists",
163 | "effect": "NoExecute",
164 | "tolerationSeconds": 300
165 | }
166 | ],
167 | "priority": 0,
168 | "enableServiceLinks": true
169 | },
170 | "status": {
171 | "phase": "Running",
172 | "conditions": [{
173 | "type": "Initialized",
174 | "status": "True",
175 | "lastProbeTime": null,
176 | "lastTransitionTime": "2019-02-11T23:44:03Z"
177 | },
178 | {
179 | "type": "Ready",
180 | "status": "True",
181 | "lastProbeTime": null,
182 | "lastTransitionTime": "2019-02-11T23:44:36Z"
183 | },
184 | {
185 | "type": "ContainersReady",
186 | "status": "True",
187 | "lastProbeTime": null,
188 | "lastTransitionTime": "2019-02-11T23:44:36Z"
189 | },
190 | {
191 | "type": "PodScheduled",
192 | "status": "True",
193 | "lastProbeTime": null,
194 | "lastTransitionTime": "2019-02-11T23:44:03Z"
195 | }
196 | ],
197 | "hostIP": "10.10.204.182",
198 | "podIP": "10.1.1.137",
199 | "startTime": "2019-02-11T23:44:03Z",
200 | "containerStatuses": [{
201 | "name": "hw",
202 | "state": {
203 | "running": {
204 | "startedAt": "2019-02-11T23:44:08Z"
205 | }
206 | },
207 | "lastState": {},
208 | "ready": false,
209 | "restartCount": 0,
210 | "image": "smcquay/hw:v0.1.5",
211 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
212 | "containerID": "docker://1eefaa0d929cabe94e5fb2d958ec1de7bbc9ec1a3033bac5c3ea01c1ad57a80b"
213 | },
214 | {
215 | "name": "katalog-sync-sidecar",
216 | "state": {
217 | "running": {
218 | "startedAt": "2019-02-11T23:44:30Z"
219 | }
220 | },
221 | "lastState": {
222 | "terminated": {
223 | "exitCode": 2,
224 | "reason": "Error",
225 | "startedAt": "2019-02-11T23:44:15Z",
226 | "finishedAt": "2019-02-11T23:44:15Z",
227 | "containerID": "docker://b1367bda03e53ef2603a0fded0674d5a6954f3d1dd1fb3b7d2944079d7b6b907"
228 | }
229 | },
230 | "ready": false,
231 | "restartCount": 2,
232 | "image": "quay.io/wish/katalog-sync:latest",
233 | "imageID": "docker-pullable://quay.io/wish/katalog-sync@sha256:bd51bb5a8add2b2e1412ac2632b7f80309d8147a6bfceaa09f795e7f0d0ea7fc",
234 | "containerID": "docker://20f432830594a531e20d8e712655a1c480094636f1082a453ab056c09ab6da57"
235 | }
236 | ],
237 | "qosClass": "BestEffort"
238 | }
239 | }
240 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/sidecar_not_ready/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-6f596c7944-5q5t7",
4 | "generateName": "hw-6f596c7944-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-6f596c7944-5q5t7",
7 | "uid": "e9fedb18-2e56-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "6244",
9 | "creationTimestamp": "2019-02-11T23:44:03Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "6f596c7944"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-port-servicename2": "12345",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
20 | "katalog-sync.wish.com/sidecar": "katalog-sync-sidecar",
21 | "katalog-sync.wish.com/sync-interval": "2s",
22 | "kubernetes.io/config.seen": "2019-02-11T15:44:03.945239692-08:00",
23 | "kubernetes.io/config.source": "api"
24 | },
25 | "ownerReferences": [{
26 | "apiVersion": "apps/v1",
27 | "kind": "ReplicaSet",
28 | "name": "hw-6f596c7944",
29 | "uid": "e9f5926f-2e56-11e9-8f72-54e1ad14ee37",
30 | "controller": true,
31 | "blockOwnerDeletion": true
32 | }]
33 | },
34 | "spec": {
35 | "volumes": [{
36 | "name": "default-token-zwnc6",
37 | "secret": {
38 | "secretName": "default-token-zwnc6",
39 | "defaultMode": 420
40 | }
41 | }],
42 | "containers": [{
43 | "name": "hw",
44 | "image": "smcquay/hw:v0.1.5",
45 | "ports": [{
46 | "containerPort": 8080,
47 | "protocol": "TCP"
48 | }],
49 | "resources": {},
50 | "volumeMounts": [{
51 | "name": "default-token-zwnc6",
52 | "readOnly": true,
53 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
54 | }],
55 | "livenessProbe": {
56 | "httpGet": {
57 | "path": "/live",
58 | "port": 8080,
59 | "scheme": "HTTP"
60 | },
61 | "initialDelaySeconds": 5,
62 | "timeoutSeconds": 1,
63 | "periodSeconds": 5,
64 | "successThreshold": 1,
65 | "failureThreshold": 3
66 | },
67 | "readinessProbe": {
68 | "httpGet": {
69 | "path": "/ready",
70 | "port": 8080,
71 | "scheme": "HTTP"
72 | },
73 | "timeoutSeconds": 1,
74 | "periodSeconds": 5,
75 | "successThreshold": 1,
76 | "failureThreshold": 3
77 | },
78 | "terminationMessagePath": "/dev/termination-log",
79 | "terminationMessagePolicy": "File",
80 | "imagePullPolicy": "Always"
81 | },
82 | {
83 | "name": "katalog-sync-sidecar",
84 | "image": "quay.io/wish/katalog-sync:latest",
85 | "command": [
86 | "/bin/katalog-sync-sidecar"
87 | ],
88 | "args": [
89 | "--katalog-sync-daemon=$(HOST_IP):8501",
90 | "--namespace=$(MY_POD_NAMESPACE)",
91 | "--pod-name=$(MY_POD_NAME)",
92 | "--container-name=katalog-sync-sidecar",
93 | "--bind-address=:8888"
94 | ],
95 | "env": [{
96 | "name": "HOST_IP",
97 | "valueFrom": {
98 | "fieldRef": {
99 | "apiVersion": "v1",
100 | "fieldPath": "status.hostIP"
101 | }
102 | }
103 | },
104 | {
105 | "name": "MY_POD_NAMESPACE",
106 | "valueFrom": {
107 | "fieldRef": {
108 | "apiVersion": "v1",
109 | "fieldPath": "metadata.namespace"
110 | }
111 | }
112 | },
113 | {
114 | "name": "MY_POD_NAME",
115 | "valueFrom": {
116 | "fieldRef": {
117 | "apiVersion": "v1",
118 | "fieldPath": "metadata.name"
119 | }
120 | }
121 | }
122 | ],
123 | "resources": {},
124 | "volumeMounts": [{
125 | "name": "default-token-zwnc6",
126 | "readOnly": true,
127 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
128 | }],
129 | "readinessProbe": {
130 | "httpGet": {
131 | "path": "/ready",
132 | "port": 8888,
133 | "scheme": "HTTP"
134 | },
135 | "initialDelaySeconds": 1,
136 | "timeoutSeconds": 1,
137 | "periodSeconds": 5,
138 | "successThreshold": 1,
139 | "failureThreshold": 3
140 | },
141 | "terminationMessagePath": "/dev/termination-log",
142 | "terminationMessagePolicy": "File",
143 | "imagePullPolicy": "Always"
144 | }
145 | ],
146 | "restartPolicy": "Always",
147 | "terminationGracePeriodSeconds": 1,
148 | "dnsPolicy": "ClusterFirst",
149 | "serviceAccountName": "default",
150 | "serviceAccount": "default",
151 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
152 | "securityContext": {},
153 | "schedulerName": "default-scheduler",
154 | "tolerations": [{
155 | "key": "node.kubernetes.io/not-ready",
156 | "operator": "Exists",
157 | "effect": "NoExecute",
158 | "tolerationSeconds": 300
159 | },
160 | {
161 | "key": "node.kubernetes.io/unreachable",
162 | "operator": "Exists",
163 | "effect": "NoExecute",
164 | "tolerationSeconds": 300
165 | }
166 | ],
167 | "priority": 0,
168 | "enableServiceLinks": true
169 | },
170 | "status": {
171 | "phase": "Running",
172 | "conditions": [{
173 | "type": "Initialized",
174 | "status": "True",
175 | "lastProbeTime": null,
176 | "lastTransitionTime": "2019-02-11T23:44:03Z"
177 | },
178 | {
179 | "type": "Ready",
180 | "status": "True",
181 | "lastProbeTime": null,
182 | "lastTransitionTime": "2019-02-11T23:44:36Z"
183 | },
184 | {
185 | "type": "ContainersReady",
186 | "status": "True",
187 | "lastProbeTime": null,
188 | "lastTransitionTime": "2019-02-11T23:44:36Z"
189 | },
190 | {
191 | "type": "PodScheduled",
192 | "status": "True",
193 | "lastProbeTime": null,
194 | "lastTransitionTime": "2019-02-11T23:44:03Z"
195 | }
196 | ],
197 | "hostIP": "10.10.204.182",
198 | "podIP": "10.1.1.137",
199 | "startTime": "2019-02-11T23:44:03Z",
200 | "containerStatuses": [{
201 | "name": "hw",
202 | "state": {
203 | "running": {
204 | "startedAt": "2019-02-11T23:44:08Z"
205 | }
206 | },
207 | "lastState": {},
208 | "ready": true,
209 | "restartCount": 0,
210 | "image": "smcquay/hw:v0.1.5",
211 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
212 | "containerID": "docker://1eefaa0d929cabe94e5fb2d958ec1de7bbc9ec1a3033bac5c3ea01c1ad57a80b"
213 | },
214 | {
215 | "name": "katalog-sync-sidecar",
216 | "state": {
217 | "running": {
218 | "startedAt": "2019-02-11T23:44:30Z"
219 | }
220 | },
221 | "lastState": {
222 | "terminated": {
223 | "exitCode": 2,
224 | "reason": "Error",
225 | "startedAt": "2019-02-11T23:44:15Z",
226 | "finishedAt": "2019-02-11T23:44:15Z",
227 | "containerID": "docker://b1367bda03e53ef2603a0fded0674d5a6954f3d1dd1fb3b7d2944079d7b6b907"
228 | }
229 | },
230 | "ready": false,
231 | "restartCount": 2,
232 | "image": "quay.io/wish/katalog-sync:latest",
233 | "imageID": "docker-pullable://quay.io/wish/katalog-sync@sha256:bd51bb5a8add2b2e1412ac2632b7f80309d8147a6bfceaa09f795e7f0d0ea7fc",
234 | "containerID": "docker://20f432830594a531e20d8e712655a1c480094636f1082a453ab056c09ab6da57"
235 | }
236 | ],
237 | "qosClass": "BestEffort"
238 | }
239 | }
240 |
--------------------------------------------------------------------------------
/pkg/daemon/testfiles/sidecar/working/input.json:
--------------------------------------------------------------------------------
1 | {
2 | "metadata": {
3 | "name": "hw-6f596c7944-5q5t7",
4 | "generateName": "hw-6f596c7944-",
5 | "namespace": "hw",
6 | "selfLink": "/api/v1/namespaces/hw/pods/hw-6f596c7944-5q5t7",
7 | "uid": "e9fedb18-2e56-11e9-8f72-54e1ad14ee37",
8 | "resourceVersion": "6244",
9 | "creationTimestamp": "2019-02-11T23:44:03Z",
10 | "labels": {
11 | "app": "hw",
12 | "pod-template-hash": "6f596c7944"
13 | },
14 | "annotations": {
15 | "katalog-sync.wish.com/service-names": "hw-service-name,servicename2",
16 | "katalog-sync.wish.com/service-port": "8080",
17 | "katalog-sync.wish.com/service-port-servicename2": "12345",
18 | "katalog-sync.wish.com/service-tags": "a,b",
19 | "katalog-sync.wish.com/service-tags-servicename2": "b,c",
20 | "katalog-sync.wish.com/service-meta": "a:1,b:2",
21 | "katalog-sync.wish.com/service-meta-servicename2": "b:1,c:2",
22 | "katalog-sync.wish.com/sidecar": "katalog-sync-sidecar",
23 | "katalog-sync.wish.com/sync-interval": "2s",
24 | "kubernetes.io/config.seen": "2019-02-11T15:44:03.945239692-08:00",
25 | "kubernetes.io/config.source": "api"
26 | },
27 | "ownerReferences": [{
28 | "apiVersion": "apps/v1",
29 | "kind": "ReplicaSet",
30 | "name": "hw-6f596c7944",
31 | "uid": "e9f5926f-2e56-11e9-8f72-54e1ad14ee37",
32 | "controller": true,
33 | "blockOwnerDeletion": true
34 | }]
35 | },
36 | "spec": {
37 | "volumes": [{
38 | "name": "default-token-zwnc6",
39 | "secret": {
40 | "secretName": "default-token-zwnc6",
41 | "defaultMode": 420
42 | }
43 | }],
44 | "containers": [{
45 | "name": "hw",
46 | "image": "smcquay/hw:v0.1.5",
47 | "ports": [{
48 | "containerPort": 8080,
49 | "protocol": "TCP"
50 | }],
51 | "resources": {},
52 | "volumeMounts": [{
53 | "name": "default-token-zwnc6",
54 | "readOnly": true,
55 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
56 | }],
57 | "livenessProbe": {
58 | "httpGet": {
59 | "path": "/live",
60 | "port": 8080,
61 | "scheme": "HTTP"
62 | },
63 | "initialDelaySeconds": 5,
64 | "timeoutSeconds": 1,
65 | "periodSeconds": 5,
66 | "successThreshold": 1,
67 | "failureThreshold": 3
68 | },
69 | "readinessProbe": {
70 | "httpGet": {
71 | "path": "/ready",
72 | "port": 8080,
73 | "scheme": "HTTP"
74 | },
75 | "timeoutSeconds": 1,
76 | "periodSeconds": 5,
77 | "successThreshold": 1,
78 | "failureThreshold": 3
79 | },
80 | "terminationMessagePath": "/dev/termination-log",
81 | "terminationMessagePolicy": "File",
82 | "imagePullPolicy": "Always"
83 | },
84 | {
85 | "name": "katalog-sync-sidecar",
86 | "image": "quay.io/wish/katalog-sync:latest",
87 | "command": [
88 | "/bin/katalog-sync-sidecar"
89 | ],
90 | "args": [
91 | "--katalog-sync-daemon=$(HOST_IP):8501",
92 | "--namespace=$(MY_POD_NAMESPACE)",
93 | "--pod-name=$(MY_POD_NAME)",
94 | "--container-name=katalog-sync-sidecar",
95 | "--bind-address=:8888"
96 | ],
97 | "env": [{
98 | "name": "HOST_IP",
99 | "valueFrom": {
100 | "fieldRef": {
101 | "apiVersion": "v1",
102 | "fieldPath": "status.hostIP"
103 | }
104 | }
105 | },
106 | {
107 | "name": "MY_POD_NAMESPACE",
108 | "valueFrom": {
109 | "fieldRef": {
110 | "apiVersion": "v1",
111 | "fieldPath": "metadata.namespace"
112 | }
113 | }
114 | },
115 | {
116 | "name": "MY_POD_NAME",
117 | "valueFrom": {
118 | "fieldRef": {
119 | "apiVersion": "v1",
120 | "fieldPath": "metadata.name"
121 | }
122 | }
123 | }
124 | ],
125 | "resources": {},
126 | "volumeMounts": [{
127 | "name": "default-token-zwnc6",
128 | "readOnly": true,
129 | "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
130 | }],
131 | "readinessProbe": {
132 | "httpGet": {
133 | "path": "/ready",
134 | "port": 8888,
135 | "scheme": "HTTP"
136 | },
137 | "initialDelaySeconds": 1,
138 | "timeoutSeconds": 1,
139 | "periodSeconds": 5,
140 | "successThreshold": 1,
141 | "failureThreshold": 3
142 | },
143 | "terminationMessagePath": "/dev/termination-log",
144 | "terminationMessagePolicy": "File",
145 | "imagePullPolicy": "Always"
146 | }
147 | ],
148 | "restartPolicy": "Always",
149 | "terminationGracePeriodSeconds": 1,
150 | "dnsPolicy": "ClusterFirst",
151 | "serviceAccountName": "default",
152 | "serviceAccount": "default",
153 | "nodeName": "tjackson-thinkpad-x1-carbon-5th",
154 | "securityContext": {},
155 | "schedulerName": "default-scheduler",
156 | "tolerations": [{
157 | "key": "node.kubernetes.io/not-ready",
158 | "operator": "Exists",
159 | "effect": "NoExecute",
160 | "tolerationSeconds": 300
161 | },
162 | {
163 | "key": "node.kubernetes.io/unreachable",
164 | "operator": "Exists",
165 | "effect": "NoExecute",
166 | "tolerationSeconds": 300
167 | }
168 | ],
169 | "priority": 0,
170 | "enableServiceLinks": true
171 | },
172 | "status": {
173 | "phase": "Running",
174 | "conditions": [{
175 | "type": "Initialized",
176 | "status": "True",
177 | "lastProbeTime": null,
178 | "lastTransitionTime": "2019-02-11T23:44:03Z"
179 | },
180 | {
181 | "type": "Ready",
182 | "status": "True",
183 | "lastProbeTime": null,
184 | "lastTransitionTime": "2019-02-11T23:44:36Z"
185 | },
186 | {
187 | "type": "ContainersReady",
188 | "status": "True",
189 | "lastProbeTime": null,
190 | "lastTransitionTime": "2019-02-11T23:44:36Z"
191 | },
192 | {
193 | "type": "PodScheduled",
194 | "status": "True",
195 | "lastProbeTime": null,
196 | "lastTransitionTime": "2019-02-11T23:44:03Z"
197 | }
198 | ],
199 | "hostIP": "10.10.204.182",
200 | "podIP": "10.1.1.137",
201 | "startTime": "2019-02-11T23:44:03Z",
202 | "containerStatuses": [{
203 | "name": "hw",
204 | "state": {
205 | "running": {
206 | "startedAt": "2019-02-11T23:44:08Z"
207 | }
208 | },
209 | "lastState": {},
210 | "ready": true,
211 | "restartCount": 0,
212 | "image": "smcquay/hw:v0.1.5",
213 | "imageID": "docker-pullable://smcquay/hw@sha256:514233b4dfbe7b93b2ac07634dc964ab5b1d8318f0c35afe0882fdde6fb245f1",
214 | "containerID": "docker://1eefaa0d929cabe94e5fb2d958ec1de7bbc9ec1a3033bac5c3ea01c1ad57a80b"
215 | },
216 | {
217 | "name": "katalog-sync-sidecar",
218 | "state": {
219 | "running": {
220 | "startedAt": "2019-02-11T23:44:30Z"
221 | }
222 | },
223 | "lastState": {
224 | "terminated": {
225 | "exitCode": 2,
226 | "reason": "Error",
227 | "startedAt": "2019-02-11T23:44:15Z",
228 | "finishedAt": "2019-02-11T23:44:15Z",
229 | "containerID": "docker://b1367bda03e53ef2603a0fded0674d5a6954f3d1dd1fb3b7d2944079d7b6b907"
230 | }
231 | },
232 | "ready": true,
233 | "restartCount": 2,
234 | "image": "quay.io/wish/katalog-sync:latest",
235 | "imageID": "docker-pullable://quay.io/wish/katalog-sync@sha256:bd51bb5a8add2b2e1412ac2632b7f80309d8147a6bfceaa09f795e7f0d0ea7fc",
236 | "containerID": "docker://20f432830594a531e20d8e712655a1c480094636f1082a453ab056c09ab6da57"
237 | }
238 | ],
239 | "qosClass": "BestEffort"
240 | }
241 | }
242 |
--------------------------------------------------------------------------------
/pkg/daemon/struct.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "strconv"
8 | "strings"
9 | "sync"
10 | "time"
11 |
12 | consulApi "github.com/hashicorp/consul/api"
13 | "github.com/sirupsen/logrus"
14 | corev1 "k8s.io/api/core/v1"
15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16 | "k8s.io/apimachinery/pkg/types"
17 | "k8s.io/client-go/kubernetes"
18 | "k8s.io/client-go/rest"
19 | )
20 |
21 | // ReadinessGate
22 | const ReadinessGateType = "katalog-sync.wish.com/synced" // name of readiness gate
23 |
24 | var (
25 | // Annotation names
26 | ConsulServiceNames = "katalog-sync.wish.com/service-names" // comma-separated list of service names
27 | ConsulServicePort = "katalog-sync.wish.com/service-port" // port to use for consul entry
28 | ConsulServicePortOverride = "katalog-sync.wish.com/service-port-" // port override to use for a specific service name
29 | ConsulServiceTags = "katalog-sync.wish.com/service-tags" // tags for the service
30 | ConsulServiceTagsOverride = "katalog-sync.wish.com/service-tags-" // tags override to use for a specific service name
31 | ConsulServiceMeta = "katalog-sync.wish.com/service-meta" // meta for the service
32 | ConsulServiceMetaOverride = "katalog-sync.wish.com/service-meta-" // meta override to use for a specific service name
33 | ConsulServiceHealth = "katalog-sync.wish.com/service-health" // health status for the service (passing/warning/critical)
34 | ConsulServiceHealthOverride = "katalog-sync.wish.com/service-health-" // health status override
35 | SidecarName = "katalog-sync.wish.com/sidecar" // Name of sidecar container, only to be set if it exists
36 | SyncInterval = "katalog-sync.wish.com/sync-interval" // How frequently we want to sync this service
37 | ConsulServiceCheckTTL = "katalog-sync.wish.com/service-check-ttl" // TTL for the service checks we put in consul
38 | ContainerExclusion = "katalog-sync.wish.com/container-exclude" // comma-separated list of containers to exclude from ready check
39 | )
40 |
41 | // NewPod returns a daemon pod based on a config and a k8s pod
42 | func NewPod(pod corev1.Pod, dc *DaemonConfig) (*Pod, error) {
43 | var sidecarState *SidecarState
44 | // If we have an annotation saying we have a sidecar, lets load it
45 | if sidecarContainerName, ok := pod.ObjectMeta.Annotations[SidecarName]; ok {
46 | // we want to mark the initial state based on what the sidecar container state
47 | // is, this way if the daemon gets reloaded we don't require a re-negotiation
48 | sidecarReady := false
49 | found := false
50 | for _, containerStatus := range pod.Status.ContainerStatuses {
51 | if containerStatus.Name == sidecarContainerName {
52 | sidecarReady = containerStatus.Ready
53 | found = true
54 | break
55 | }
56 | }
57 | if !found {
58 | return nil, fmt.Errorf("Unable to find sidecar container %s", sidecarContainerName)
59 | }
60 |
61 | sidecarState = &SidecarState{
62 | SidecarName: sidecarContainerName,
63 | Ready: sidecarReady,
64 | }
65 | }
66 |
67 | // Check if we have a readiness gate defined
68 | var ourReadinessGate corev1.PodReadinessGate
69 | for _, gate := range pod.Spec.ReadinessGates {
70 | if gate.ConditionType == ReadinessGateType {
71 | ourReadinessGate = gate
72 | }
73 | }
74 |
75 | // Calculate SyncInterval
76 | syncInterval := dc.DefaultSyncInterval
77 | if interval, ok := pod.ObjectMeta.Annotations[SyncInterval]; ok {
78 | duration, err := time.ParseDuration(interval)
79 | if err != nil {
80 | return nil, err
81 | }
82 | syncInterval = duration
83 | }
84 |
85 | // Calculate CheckTTL
86 | checkTTL := dc.DefaultCheckTTL
87 | if interval, ok := pod.ObjectMeta.Annotations[ConsulServiceCheckTTL]; ok {
88 | duration, err := time.ParseDuration(interval)
89 | if err != nil {
90 | return nil, err
91 | }
92 | checkTTL = duration
93 | }
94 |
95 | // Ensure that the checkTTL is at least SyncTTLBuffer greater than syncTTL
96 | if minCheckTTL := syncInterval + dc.SyncTTLBuffer; checkTTL < minCheckTTL {
97 | checkTTL = minCheckTTL
98 | }
99 |
100 | ctx, cancel := context.WithCancel(context.Background())
101 |
102 | return &Pod{
103 | Pod: pod,
104 | SidecarState: sidecarState,
105 | SyncStatuses: make(map[string]*SyncStatus),
106 | OutstandingReadinessGate: ourReadinessGate.ConditionType == ReadinessGateType,
107 |
108 | CheckTTL: checkTTL,
109 | SyncInterval: syncInterval,
110 | Ctx: ctx,
111 | Cancel: cancel,
112 | }, nil
113 |
114 | }
115 |
116 | // Pod is our representation of a pod in k8s
117 | type Pod struct {
118 | corev1.Pod
119 | *SidecarState
120 | // map servicename -> sync status
121 | SyncStatuses
122 | OutstandingReadinessGate bool // Do we have a ReadinessGate to set
123 | InitialSyncDone bool // Ready and in consul
124 |
125 | CheckTTL time.Duration
126 | SyncInterval time.Duration
127 | Ctx context.Context
128 | Cancel context.CancelFunc
129 |
130 | l sync.Mutex
131 |
132 | waitCh []chan struct{}
133 | }
134 |
135 | func (p *Pod) WaitChanges() chan struct{} {
136 | ch := make(chan struct{}, 5)
137 | p.l.Lock()
138 | p.waitCh = append(p.waitCh, ch)
139 | p.l.Unlock()
140 | return ch
141 | }
142 |
143 | // HasChange will return whether a change has been made that needs a full resync
144 | // if not then a simple TTL update will suffice
145 | func (p *Pod) HasChange(service *consulApi.AgentService) bool {
146 | if service.Port != p.GetPort(service.Service) {
147 | return true
148 | }
149 |
150 | if service.Address != p.Status.PodIP {
151 | return true
152 | }
153 |
154 | return false
155 | }
156 |
157 | // GetServiceID returns an identifier that addresses this pod.
158 | func (p *Pod) GetServiceID(serviceName string) string {
159 | // ServiceID is katalog-sync_service_namespace_pod
160 | return strings.Join([]string{
161 | "katalog-sync",
162 | serviceName,
163 | p.Pod.ObjectMeta.Namespace,
164 | p.Pod.ObjectMeta.Name,
165 | }, "_")
166 | }
167 |
168 | // UpdatePod updates the k8s pod
169 | func (p *Pod) UpdatePod(k8sPod corev1.Pod) {
170 | p.l.Lock()
171 | defer p.l.Unlock()
172 | p.Pod = k8sPod
173 |
174 | // notify waiters
175 | for i, ch := range p.waitCh {
176 | select {
177 | case ch <- struct{}{}:
178 | continue
179 | default:
180 | close(ch)
181 | copy(p.waitCh[i:], p.waitCh[i+1:])
182 | p.waitCh = p.waitCh[:len(p.waitCh)-1]
183 | }
184 | }
185 | }
186 |
187 | // GetServiceNames returns the list of service names defined in the k8s annotations
188 | func (p *Pod) GetServiceNames() []string {
189 | return strings.Split(p.Pod.ObjectMeta.Annotations[ConsulServiceNames], ",")
190 | }
191 |
192 | // HasServiceName returns whether a given name is one of the annotated service names for this pod
193 | func (p *Pod) HasServiceName(n string) bool {
194 | for _, name := range p.GetServiceNames() {
195 | if name == n {
196 | return true
197 | }
198 | }
199 | return false
200 | }
201 |
202 | // GetTags returns the tags for a given service for this pod
203 | // This first checks the service-specific tags, and falls back to the service-level tags
204 | func (p *Pod) GetTags(n string) []string {
205 | if tagStr, ok := p.Pod.ObjectMeta.Annotations[ConsulServiceTagsOverride+n]; ok {
206 | return strings.Split(tagStr, ",")
207 | }
208 |
209 | if tagStr, ok := p.Pod.ObjectMeta.Annotations[ConsulServiceTags]; ok {
210 | return strings.Split(tagStr, ",")
211 | }
212 |
213 | return nil
214 | }
215 |
216 | // GetServiceMeta returns a map of metadata to be added to the ServiceMetadata
217 | func (p *Pod) GetServiceMeta(n string) map[string]string {
218 | if metaStr, ok := p.Pod.ObjectMeta.Annotations[ConsulServiceMetaOverride+n]; ok {
219 | return ParseMap(metaStr)
220 | }
221 |
222 | if metaStr, ok := p.Pod.ObjectMeta.Annotations[ConsulServiceMeta]; ok {
223 | return ParseMap(metaStr)
224 | }
225 |
226 | return nil
227 | }
228 |
229 | // GetServiceHealth returns the service health specified in annotation, or defaultVal if not specified.
230 | func (p *Pod) GetServiceHealth(n string, defaultVal string) string {
231 | healthStr := p.Pod.ObjectMeta.Annotations[ConsulServiceHealthOverride+n]
232 | if healthStr == "" {
233 | healthStr = p.Pod.ObjectMeta.Annotations[ConsulServiceHealth]
234 | }
235 | switch healthStr {
236 | case consulApi.HealthCritical, consulApi.HealthPassing, consulApi.HealthWarning:
237 | return healthStr
238 | case "": // annotation not set
239 | default:
240 | logrus.Errorf("Unknown service health status '%v' ignored", healthStr)
241 | }
242 | return defaultVal
243 | }
244 |
245 | // GetPort returns the port for a given service for this pod
246 | // This first checks the service-specific port, and falls back to the service-level port
247 | func (p *Pod) GetPort(n string) int {
248 | if portStr, ok := p.Pod.ObjectMeta.Annotations[ConsulServicePortOverride+n]; ok {
249 | port, err := strconv.Atoi(portStr)
250 | if err == nil {
251 | return port
252 | }
253 | logrus.Errorf("Unable to parse port from annotation %s: %v", portStr, err)
254 | }
255 |
256 | // First we look for a port in an annotation
257 | if portStr, ok := p.Pod.ObjectMeta.Annotations[ConsulServicePort]; ok {
258 | port, err := strconv.Atoi(portStr)
259 | if err == nil {
260 | return port
261 | }
262 | logrus.Errorf("Unable to parse port from annotation %s: %v", portStr, err)
263 | }
264 |
265 | // If no port was defined, we find the first port we can in the spec and use that
266 | for _, container := range p.Pod.Spec.Containers {
267 | for _, port := range container.Ports {
268 | return int(port.ContainerPort)
269 | }
270 | }
271 |
272 | // TODO: error?
273 | return -1
274 | }
275 |
276 | // Ready checks the readiness of the containers in the pod
277 | func (p *Pod) Ready() (bool, map[string]bool) {
278 | if p.SidecarState != nil {
279 | if !p.SidecarState.Ready {
280 | // TODO: change return to be a string that describes? here seems odd to not say anything
281 | return false, nil
282 | }
283 | }
284 |
285 | // If pod is terminating we want to mark it as not-ready (for sync status);
286 | // This way we mimic the shutdown behavior of normal services (e.g. pods
287 | // in terminating status are removed as endpoints for servivces)
288 | // Determining Terminating state as kubectl does (https://github.com/kubernetes/kubernetes/blob/v1.2.0/pkg/kubectl/resource_printer.go#L588)
289 | if p.DeletionTimestamp != nil {
290 | return false, nil
291 | }
292 |
293 | podReady := true
294 | containerReadiness := make(map[string]bool)
295 | excludeContainers := p.ContainerExclusion()
296 | for _, containerStatus := range p.Pod.Status.ContainerStatuses {
297 | if _, ok := excludeContainers[containerStatus.Name]; ok {
298 | delete(excludeContainers, containerStatus.Name)
299 | continue
300 | }
301 | // If we have a sidecar defined, we skip the container for it -- as the request showed up
302 | if p.SidecarState != nil {
303 | if containerStatus.Name == p.SidecarState.SidecarName {
304 | continue
305 | }
306 | }
307 | podReady = podReady && containerStatus.Ready
308 | containerReadiness[containerStatus.Name] = containerStatus.Ready
309 | }
310 | if len(excludeContainers) > 0 {
311 | logrus.Warnf("Some exclude containers for %s not found in pod: %v", p.ObjectMeta.SelfLink, excludeContainers)
312 | }
313 | return podReady, containerReadiness
314 | }
315 |
316 | // ContainerExclusion returns the containers that should be excluded from a readiness check
317 | func (p *Pod) ContainerExclusion() map[string]struct{} {
318 | str, ok := p.Pod.ObjectMeta.Annotations[ContainerExclusion]
319 | if !ok {
320 | return nil
321 | }
322 | excludeContainers := strings.Split(str, ",")
323 |
324 | m := make(map[string]struct{}, len(excludeContainers))
325 | for _, c := range excludeContainers {
326 | m[c] = struct{}{}
327 | }
328 |
329 | return m
330 | }
331 |
332 | func (p *Pod) HandleReadinessGate() error {
333 | p.l.Lock()
334 | defer p.l.Unlock()
335 | logrus.Debugf("HandleReadinessGate: %v", p.GetServiceNames())
336 | // Fast path for things without a readiness gate or with a completed readiness gate
337 | if !p.OutstandingReadinessGate {
338 | return nil
339 | }
340 |
341 | var ourCondition corev1.PodCondition
342 | for _, condition := range p.Pod.Status.Conditions {
343 | if condition.Type == ReadinessGateType {
344 | ourCondition = condition
345 | }
346 | }
347 |
348 | logrus.Tracef("condition: %v", ourCondition)
349 |
350 | // If the pod is already marked ready; we are done
351 | if ourCondition.Status == corev1.ConditionTrue {
352 | p.OutstandingReadinessGate = false
353 | return nil
354 | }
355 |
356 | // We didn't find it, set it!
357 | if ourCondition.Type != ReadinessGateType {
358 | ourCondition.Type = ReadinessGateType
359 | }
360 |
361 | ready, reasonMap := p.Ready()
362 | if ready {
363 | // Assuming the pod is ready; we need to check sync status
364 | var notSyncedServices []string
365 | for serviceName, status := range p.SyncStatuses {
366 | if status.LastError != nil {
367 | notSyncedServices = append(notSyncedServices, serviceName)
368 | }
369 | }
370 | if len(notSyncedServices) != 0 {
371 | ourCondition.Status = corev1.ConditionFalse
372 | ourCondition.Reason = "Not all services synced to consul"
373 | ourCondition.Message = fmt.Sprintf("The following services haven't been synced to consul yet: %s", notSyncedServices)
374 | } else {
375 | // check that this ended up in consul as well
376 | if p.OutstandingReadinessGate && p.InitialSyncDone {
377 | ourCondition.Status = corev1.ConditionTrue
378 | ourCondition.Reason = "Done"
379 | ourCondition.Message = "Done"
380 | } else {
381 | ourCondition.Status = corev1.ConditionFalse
382 | ourCondition.Reason = "Not synced to remote consul"
383 | ourCondition.Message = "State synced to local consul, waiting on sync to remote consul"
384 | }
385 | }
386 | } else {
387 | ourCondition.Status = corev1.ConditionFalse
388 | ourCondition.Reason = "Not all containers are ready"
389 | notesB, err := json.MarshalIndent(reasonMap, "", " ")
390 | if err != nil {
391 | panic(err)
392 | }
393 | ourCondition.Message = string(notesB)
394 | }
395 |
396 | logrus.Infof("condition to set: %v", ourCondition)
397 |
398 | patch, err := buildPodConditionPatch(&p.Pod, ourCondition)
399 | if err != nil {
400 | return err
401 | }
402 |
403 | // TODO: pass in
404 | config, err := rest.InClusterConfig()
405 | if err != nil {
406 | return err
407 | }
408 | clientset, err := kubernetes.NewForConfig(config)
409 | if err != nil {
410 | return err
411 | }
412 |
413 | podsClient := clientset.CoreV1().Pods(p.Pod.ObjectMeta.Namespace)
414 |
415 | _, err = podsClient.Patch(context.TODO(), p.Pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "status")
416 | if err != nil {
417 | return err
418 | }
419 |
420 | return nil
421 | }
422 |
423 | // State from our sidecar service
424 | type SidecarState struct {
425 | SidecarName string // name of the sidecar container
426 | Ready bool
427 | }
428 |
429 | // SyncStatuses is a map of SyncStatus for each service defined in a pod (serviceName -> *SyncStatus)
430 | type SyncStatuses map[string]*SyncStatus
431 |
432 | // GetStatus returns the SyncStatus for the given serviceName
433 | func (s SyncStatuses) GetStatus(n string) *SyncStatus {
434 | status, ok := s[n]
435 | if !ok {
436 | status = &SyncStatus{}
437 | s[n] = status
438 | }
439 | return status
440 | }
441 |
442 | // GetError returns the first error found in the set of SyncStatuses
443 | func (s SyncStatuses) GetError() error {
444 | for _, status := range s {
445 | if status.LastError != nil {
446 | return status.LastError
447 | }
448 | }
449 |
450 | return nil
451 | }
452 |
453 | // SyncStatus encapsulates the result of the last sync attempt
454 | type SyncStatus struct {
455 | LastUpdated time.Time
456 | LastError error
457 | }
458 |
459 | // SetError sets the error and LastUpdated time for the status
460 | func (s *SyncStatus) SetError(e error) {
461 | s.LastError = e
462 | s.LastUpdated = time.Now()
463 | }
464 |
--------------------------------------------------------------------------------
/pkg/daemon/daemon.go:
--------------------------------------------------------------------------------
1 | package daemon
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "time"
8 |
9 | consulApi "github.com/hashicorp/consul/api"
10 | "github.com/pkg/errors"
11 | "github.com/prometheus/client_golang/prometheus"
12 | "github.com/sirupsen/logrus"
13 |
14 | katalogsync "github.com/wish/katalog-sync/proto"
15 | )
16 |
17 | var (
18 | ConsulSyncSourceName = "external-sync-source"
19 | ConsulSyncSourceValue = "katalog-sync"
20 | ConsulK8sLinkName = "external-k8s-link"
21 | ConsulK8sNamespace = "external-k8s-namespace"
22 | ConsulK8sPod = "external-k8s-pod"
23 | )
24 |
25 | // Metrics
26 | var (
27 | k8sSyncCount = prometheus.NewCounterVec(prometheus.CounterOpts{
28 | Name: "katalog_sync_kubelet_sync_count_total",
29 | Help: "How many syncs completed from kubelet API, partitioned by success",
30 | }, []string{"status"})
31 | k8sSyncSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
32 | Name: "katalog_sync_kubelet_sync_duration_seconds",
33 | Help: "Latency of sync process from kubelet",
34 | }, []string{"status"})
35 | consulSyncCount = prometheus.NewCounterVec(prometheus.CounterOpts{
36 | Name: "katalog_sync_consul_sync_count_total",
37 | Help: "How many syncs completed to consul API, partitioned by success",
38 | }, []string{"status"})
39 | consulSyncSummary = prometheus.NewSummaryVec(prometheus.SummaryOpts{
40 | Name: "katalog_sync_consul_sync_duration_seconds",
41 | Help: "Latency of sync process from kubelet",
42 | }, []string{"status"})
43 | )
44 |
45 | func init() {
46 | prometheus.MustRegister(
47 | k8sSyncCount,
48 | k8sSyncSummary,
49 | consulSyncCount,
50 | consulSyncSummary,
51 | )
52 | }
53 |
54 | // DaemonConfig contains the configuration options for a katalog-sync-daemon
55 | type DaemonConfig struct {
56 | MinSyncInterval time.Duration `long:"min-sync-interval" env:"MIN_SYNC_INTERVAL" description:"minimum duration allowed for sync" default:"500ms"`
57 | MaxSyncInterval time.Duration `long:"max-sync-interval" env:"MAX_SYNC_INTERVAL" description:"maximum duration allowed for sync" default:"5s"`
58 | DefaultSyncInterval time.Duration `long:"default-sync-interval" env:"DEFAULT_SYNC_INTERVAL" default:"1s"`
59 | DefaultCheckTTL time.Duration `long:"default-check-ttl" env:"DEFAULT_CHECK_TTL" default:"10s"`
60 | SyncTTLBuffer time.Duration `long:"sync-ttl-buffer-duration" env:"SYNC_TTL_BUFFER_DURATION" description:"how much time to ensure is between sync time and ttl" default:"10s"`
61 | }
62 |
63 | // NewDaemon is a helper function to return a new *Daemon
64 | func NewDaemon(c DaemonConfig, k8sClient Kubelet, consulClient *consulApi.Client) *Daemon {
65 | return &Daemon{
66 | c: c,
67 | k8sClient: k8sClient,
68 | consulClient: consulClient,
69 |
70 | localK8sState: make(map[string]*Pod),
71 | syncCh: make(chan chan error),
72 | }
73 | }
74 |
75 | // Daemon is responsible for syncing state from k8s -> consul
76 | type Daemon struct {
77 | c DaemonConfig
78 |
79 | k8sClient Kubelet
80 | consulClient *consulApi.Client
81 |
82 | // TODO: locks around this? or move everything through a channel
83 | // Our local representation of what pods are running
84 | localK8sState map[string]*Pod
85 |
86 | syncCh chan chan error
87 | }
88 |
89 | func (d *Daemon) doSync(ctx context.Context) error {
90 | ch := make(chan error, 1)
91 |
92 | // Trigger a sync
93 | d.syncCh <- ch
94 | select {
95 | case <-ctx.Done():
96 | return ctx.Err()
97 | case err := <-ch:
98 | return err
99 | }
100 | }
101 |
102 | // Register handles a sidecar request for registration. This will block until
103 | // (1) the pod excluding the sidecar container is ready
104 | // (2) the service has been pushed to the agent services API
105 | // (3) the entry shows up in the catalog API (meaning it synced to the cluster)
106 | func (d *Daemon) Register(ctx context.Context, in *katalogsync.RegisterQuery) (*katalogsync.RegisterResult, error) {
107 | if err := d.doSync(ctx); err != nil {
108 | return nil, err
109 | }
110 |
111 | k := podCacheKey(in.Namespace, in.PodName)
112 | pod, ok := d.localK8sState[k]
113 | if !ok {
114 | return nil, fmt.Errorf("Unable to find pod with katalog-sync annotation (%s): %s", ConsulServiceNames, k)
115 | }
116 |
117 | if pod.SidecarState == nil {
118 | return nil, fmt.Errorf("Pod is missing annotation %s for sidecar", SidecarName)
119 | }
120 |
121 | pod.SidecarState.SidecarName = in.ContainerName
122 | pod.SidecarState.Ready = true
123 |
124 | if err := d.doSync(ctx); err != nil {
125 | return nil, err
126 | }
127 |
128 | if err := pod.SyncStatuses.GetError(); err != nil {
129 | return nil, errors.Wrap(err, "Unable to sync status")
130 | }
131 |
132 | // The goal here is to ensure that the registration has propogated to the rest of the cluster
133 | nodeName, err := d.consulClient.Agent().NodeName()
134 | if err != nil {
135 | return nil, err
136 | }
137 | opts := &consulApi.QueryOptions{AllowStale: true, UseCache: true}
138 | if err := d.ConsulNodeDoUntil(ctx, nodeName, opts, func(node *consulApi.CatalogNode) bool {
139 | synced := true
140 | for _, serviceName := range pod.GetServiceNames() {
141 | // If the service exists, then we just need to update
142 | if _, ok := node.Services[pod.GetServiceID(serviceName)]; !ok {
143 | synced = false
144 | }
145 | }
146 | return synced
147 | }); err != nil {
148 | return nil, err
149 | }
150 |
151 | if ready, _ := pod.Ready(); ready {
152 | return nil, nil
153 | }
154 | return nil, fmt.Errorf("not ready!: %v", pod.SyncStatuses.GetError())
155 | }
156 |
157 | // Deregister handles a sidecar request for deregistration. This will block until
158 | // (2) the service has been removed from the agent services API
159 | // (3) the entry has been removed from the catalog API (meaning it synced to the cluster)
160 | func (d *Daemon) Deregister(ctx context.Context, in *katalogsync.DeregisterQuery) (*katalogsync.DeregisterResult, error) {
161 | if err := d.doSync(ctx); err != nil {
162 | return nil, err
163 | }
164 |
165 | k := podCacheKey(in.Namespace, in.PodName)
166 | pod, ok := d.localK8sState[k]
167 | if !ok {
168 | return nil, fmt.Errorf("Unable to find pod with katalog-sync annotation (%s): %s", ConsulServiceNames, k)
169 | }
170 |
171 | if pod.SidecarState == nil {
172 | return nil, fmt.Errorf("Pod is missing annotation %s for sidecar", SidecarName)
173 | }
174 |
175 | pod.SidecarState.Ready = false
176 |
177 | if err := d.doSync(ctx); err != nil {
178 | return nil, err
179 | }
180 |
181 | if err := pod.SyncStatuses.GetError(); err != nil {
182 | return nil, errors.Wrap(err, "Unable to sync status")
183 | }
184 |
185 | // The goal here is to ensure that the deregistration has propogated to the rest of the cluster
186 | nodeName, err := d.consulClient.Agent().NodeName()
187 | if err != nil {
188 | return nil, err
189 | }
190 | opts := &consulApi.QueryOptions{AllowStale: true, UseCache: true}
191 |
192 | if err := d.ConsulNodeDoUntil(ctx, nodeName, opts, func(node *consulApi.CatalogNode) bool {
193 | synced := true
194 | for _, serviceName := range pod.GetServiceNames() {
195 | // If the service exists, then we just need to update
196 | if _, ok := node.Services[pod.GetServiceID(serviceName)]; ok {
197 | status, _, err := d.consulClient.Agent().AgentHealthServiceByID(pod.GetServiceID(serviceName))
198 | if err == nil {
199 | // if health status is not fixed and is passing; not done
200 | if pod.GetServiceHealth(serviceName, "") == "" && status == consulApi.HealthPassing {
201 | synced = false
202 | }
203 | } else {
204 | // if we got an error; assume it isn't synced
205 | synced = false
206 | }
207 | }
208 | }
209 | return synced
210 | }); err != nil {
211 | return nil, err
212 | }
213 |
214 | if ready, _ := pod.Ready(); !ready {
215 | return nil, nil
216 | }
217 | return nil, fmt.Errorf("ready!: %v", pod.SyncStatuses.GetError())
218 | }
219 |
220 | func (d *Daemon) calculateSleepTime() time.Duration {
221 | sleepDuration := d.c.MaxSyncInterval
222 | for _, pod := range d.localK8sState {
223 | if pod.SyncInterval < sleepDuration && pod.SyncInterval > d.c.MinSyncInterval {
224 | sleepDuration = pod.SyncInterval
225 | }
226 | }
227 | return sleepDuration
228 | }
229 |
230 | // TODO: refactor into a start/stop/run job (so initial sync is done on start, and the rest in background goroutine)
231 | func (d *Daemon) Run() error {
232 | timer := time.NewTimer(0)
233 | var lastRun time.Time
234 |
235 | retChans := make([]chan error, 0)
236 |
237 | doSync := func() error {
238 | defer func() {
239 | sleepTime := d.calculateSleepTime()
240 | logrus.Infof("sleeping for %s", sleepTime)
241 | timer = time.NewTimer(sleepTime)
242 | lastRun = time.Now()
243 | }()
244 | // Load initial state from k8s
245 | start := time.Now()
246 | if err := d.fetchK8s(); err != nil {
247 | k8sSyncCount.WithLabelValues("error").Inc()
248 | k8sSyncSummary.WithLabelValues("error").Observe(time.Now().Sub(start).Seconds())
249 | logrus.Errorf("Error fetching state from k8s: %v", err)
250 | } else {
251 | k8sSyncCount.WithLabelValues("success").Inc()
252 | k8sSyncSummary.WithLabelValues("success").Observe(time.Now().Sub(start).Seconds())
253 | }
254 |
255 | // Do initial sync
256 | start = time.Now()
257 | err := d.syncConsul()
258 | if err != nil {
259 | consulSyncCount.WithLabelValues("error").Inc()
260 | consulSyncSummary.WithLabelValues("error").Observe(time.Now().Sub(start).Seconds())
261 | } else {
262 | consulSyncCount.WithLabelValues("success").Inc()
263 | consulSyncSummary.WithLabelValues("success").Observe(time.Now().Sub(start).Seconds())
264 | }
265 | return err
266 | }
267 |
268 | // Loop forever running the update job
269 | for {
270 | select {
271 | // If the timer went off, then we need to do a sync
272 | case <-timer.C:
273 | start := time.Now()
274 | err := doSync()
275 | logrus.Infof("Sync completed in %s: %v", time.Now().Sub(start), err)
276 | for _, ch := range retChans {
277 | select {
278 | case ch <- err:
279 | default:
280 | }
281 | }
282 | retChans = retChans[:]
283 |
284 | // If we got a channel on the syncCh then we need to add it to our list
285 | case ch := <-d.syncCh:
286 | retChans = append(retChans, ch)
287 | if time.Now().Sub(lastRun) > d.c.MinSyncInterval {
288 | if !timer.Stop() {
289 | <-timer.C
290 | }
291 | timer.Reset(0)
292 | }
293 | }
294 | }
295 | }
296 |
297 | // fetchK8s is responsible for updating the local k8sState with what we pull
298 | // from our k8sClient
299 | func (d *Daemon) fetchK8s() error {
300 | podList, err := d.k8sClient.GetPodList()
301 | if err != nil {
302 | return err
303 | }
304 |
305 | // Add/Update the ones we have
306 | newKeys := make(map[string]struct{})
307 | for _, pod := range podList.Items {
308 | // If the pod doesn't have a service-name defined, we don't touch it
309 | if _, ok := pod.ObjectMeta.Annotations[ConsulServiceNames]; !ok {
310 | continue
311 | }
312 |
313 | // If the pod isn't in the "Running" phase, we skip
314 | if pod.Status.Phase != "Running" {
315 | continue
316 | }
317 |
318 | key := podCacheKey(pod.Namespace, pod.Name)
319 | newKeys[key] = struct{}{}
320 | if existingPod, ok := d.localK8sState[key]; ok {
321 | existingPod.UpdatePod(pod)
322 | existingPod.HandleReadinessGate()
323 | } else {
324 | p, err := NewPod(pod, &d.c)
325 | if err != nil {
326 | logrus.Errorf("error creating local state for pod: %v", err)
327 | } else {
328 | d.localK8sState[key] = p
329 | // If there is an outstanding readinessGate we need to register a wait for remote syncing
330 | if p.OutstandingReadinessGate {
331 | go d.waitPod(p)
332 | }
333 | // Create readiness gate
334 | p.HandleReadinessGate()
335 | }
336 | }
337 | }
338 |
339 | // remove any local ones that don't exist anymore
340 | for k, pod := range d.localK8sState {
341 | if _, ok := newKeys[k]; !ok {
342 | pod.Cancel()
343 | delete(d.localK8sState, k)
344 | }
345 | }
346 |
347 | return nil
348 | }
349 |
350 | // Background goroutine to wait for a pod to be ready in consul; once done set "InitialSyncDone"
351 | func (d *Daemon) waitPod(pod *Pod) {
352 | syncedRemotely := false
353 | changesCh := pod.WaitChanges()
354 | changesCh <- struct{}{} // Seed a single change
355 | for {
356 | // wait for a change in pod state or the pod to stop
357 | select {
358 | case <-pod.Ctx.Done():
359 | return
360 | case _, ok := <-changesCh:
361 | // If the channel closed (we were too slow) we want to re-subscribe
362 | if !ok {
363 | changesCh = pod.WaitChanges()
364 | continue
365 | }
366 |
367 | }
368 | // If we haven't ensured the service is synced remotely; wait on that
369 | if !syncedRemotely {
370 | // The goal here is to ensure that the registration has propogated to the rest of the cluster
371 | nodeName, err := d.consulClient.Agent().NodeName()
372 | if err != nil {
373 | time.Sleep(time.Second) // TODO; exponential backoff
374 | continue // retry
375 | }
376 |
377 | opts := &consulApi.QueryOptions{AllowStale: true, UseCache: true}
378 | if err := d.ConsulNodeDoUntil(pod.Ctx, nodeName, opts, func(node *consulApi.CatalogNode) bool {
379 | synced := true
380 | for _, serviceName := range pod.GetServiceNames() {
381 | // If the service exists, then we just need to update
382 | if _, ok := node.Services[pod.GetServiceID(serviceName)]; !ok {
383 | synced = false
384 | }
385 | }
386 | return synced
387 | }); err != nil {
388 | time.Sleep(time.Second) // TODO; exponential backoff
389 | continue // retry
390 | }
391 | syncedRemotely = true
392 | }
393 | if ready, _ := pod.Ready(); ready {
394 | pod.InitialSyncDone = true
395 | // trigger a handle of readiness gate to avoid the poll delay.
396 | pod.HandleReadinessGate()
397 | return
398 | }
399 | }
400 | }
401 |
402 | // syncConsul is responsible for syncing local state to consul
403 | func (d *Daemon) syncConsul() error {
404 | // Get services from consul
405 | consulServices, err := d.consulClient.Agent().Services()
406 | if err != nil {
407 | return err
408 | }
409 |
410 | // TODO: split out update, for now we'll just re-register it all
411 | // Push/Update from local state
412 | for _, pod := range d.localK8sState {
413 | ready, containerReadiness := pod.Ready()
414 |
415 | status := consulApi.HealthCritical
416 | if ready {
417 | status = consulApi.HealthPassing
418 | }
419 |
420 | notesB, err := json.MarshalIndent(containerReadiness, "", " ")
421 | if err != nil {
422 | panic(err)
423 | }
424 |
425 | for _, serviceName := range pod.GetServiceNames() {
426 | // If the service exists, then we just need to update
427 | if consulService, ok := consulServices[pod.GetServiceID(serviceName)]; ok && !pod.HasChange(consulService) {
428 | // only call update if we are past halflife of last update
429 | if pod.SyncStatuses.GetStatus(serviceName).LastUpdated.IsZero() || time.Now().Sub(pod.SyncStatuses.GetStatus(serviceName).LastUpdated) >= (pod.CheckTTL/2) {
430 | // If the service already exists, just update the check
431 | pod.SyncStatuses.GetStatus(serviceName).SetError(
432 | d.consulClient.Agent().UpdateTTL(
433 | pod.GetServiceID(serviceName), string(notesB), pod.GetServiceHealth(serviceName, status)))
434 | }
435 | } else {
436 | // Define the base metadata that katalog-sync requires
437 | meta := map[string]string{
438 | "external-source": "kubernetes", // Define the source of this service; see https://github.com/hashicorp/consul/blob/fc1d9e5d78749edc55249e5e7c1a8f7a24add99d/website/source/docs/platform/k8s/service-sync.html.md#service-meta
439 | ConsulSyncSourceName: ConsulSyncSourceValue, // Mark this as katalog-sync so we know we generated this
440 | ConsulK8sLinkName: podCacheKey(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name), // which includes full path to this (ns, pod name, etc.)
441 | ConsulK8sNamespace: pod.ObjectMeta.Namespace,
442 | ConsulK8sPod: pod.ObjectMeta.Name,
443 | }
444 | // Add in any metadata that the pod annotations define
445 | for k, v := range pod.GetServiceMeta(serviceName) {
446 | if _, ok := meta[k]; !ok {
447 | meta[k] = v
448 | }
449 | }
450 | // Next we actually register the service with consul
451 | pod.SyncStatuses.GetStatus(serviceName).SetError(d.consulClient.Agent().ServiceRegister(&consulApi.AgentServiceRegistration{
452 | ID: pod.GetServiceID(serviceName),
453 | Name: serviceName,
454 | Port: pod.GetPort(serviceName),
455 | Address: pod.Status.PodIP,
456 | Meta: meta,
457 | Tags: pod.GetTags(serviceName),
458 |
459 | Check: &consulApi.AgentServiceCheck{
460 | CheckID: pod.GetServiceID(serviceName), // TODO: better name? -- the name cannot have `/` in it -- its used in the API query path
461 | TTL: pod.CheckTTL.String(),
462 | Status: pod.GetServiceHealth(serviceName, status), // Current status of check
463 | Notes: string(notesB), // Map of container->ready
464 | },
465 | }))
466 | }
467 | }
468 | }
469 |
470 | // Delete old ones
471 | for _, consulService := range consulServices {
472 | // We skip all services we aren't syncing (in case others are also registering agent services)
473 | if v, ok := consulService.Meta[ConsulSyncSourceName]; !ok || v != ConsulSyncSourceValue {
474 | continue
475 | }
476 |
477 | // If the service exists, skip
478 | if pod, ok := d.localK8sState[consulService.Meta[ConsulK8sLinkName]]; ok && pod.HasServiceName(consulService.Service) {
479 | continue
480 | }
481 |
482 | if err := d.consulClient.Agent().ServiceDeregister(consulService.ID); err != nil {
483 | return err
484 | }
485 | }
486 |
487 | return nil
488 | }
489 |
490 | type consulNodeFunc func(*consulApi.CatalogNode) bool
491 |
492 | // ConsulNodeDoUntil is a helper to wait until a change has propogated into the CatalogAPI
493 | func (d *Daemon) ConsulNodeDoUntil(ctx context.Context, nodeName string, opts *consulApi.QueryOptions, f consulNodeFunc) error {
494 | for {
495 | // If the client is no longer waiting, lets stop checking
496 | select {
497 | case <-ctx.Done():
498 | return ctx.Err()
499 | default:
500 | }
501 | node, m, err := d.consulClient.Catalog().Node(nodeName, opts)
502 | if err != nil {
503 | return err
504 | }
505 | opts.WaitIndex = m.LastIndex
506 | if f(node) {
507 | return nil
508 | }
509 | }
510 | }
511 |
512 | func podCacheKey(namespace, name string) string {
513 | if namespace == "" {
514 | namespace = "default"
515 | }
516 | return fmt.Sprintf("%s/%s", namespace, name)
517 | }
518 |
--------------------------------------------------------------------------------
/proto/katalog-sync.pb.go:
--------------------------------------------------------------------------------
1 | // Code generated by protoc-gen-gogo. DO NOT EDIT.
2 | // source: katalog-sync.proto
3 |
4 | /*
5 | Package katalogsync is a generated protocol buffer package.
6 |
7 | It is generated from these files:
8 | katalog-sync.proto
9 |
10 | It has these top-level messages:
11 | RegisterQuery
12 | RegisterResult
13 | DeregisterQuery
14 | DeregisterResult
15 | */
16 | package katalogsync
17 |
18 | import (
19 | fmt "fmt"
20 |
21 | proto "github.com/gogo/protobuf/proto"
22 |
23 | math "math"
24 |
25 | context "golang.org/x/net/context"
26 |
27 | grpc "google.golang.org/grpc"
28 |
29 | io "io"
30 | )
31 |
32 | // Reference imports to suppress errors if they are not otherwise used.
33 | var _ = proto.Marshal
34 | var _ = fmt.Errorf
35 | var _ = math.Inf
36 |
37 | // This is a compile-time assertion to ensure that this generated file
38 | // is compatible with the proto package it is being compiled against.
39 | // A compilation error at this line likely means your copy of the
40 | // proto package needs to be updated.
41 | const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
42 |
43 | type RegisterQuery struct {
44 | Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"`
45 | PodName string `protobuf:"bytes,2,opt,name=PodName,proto3" json:"PodName,omitempty"`
46 | ContainerName string `protobuf:"bytes,3,opt,name=ContainerName,proto3" json:"ContainerName,omitempty"`
47 | }
48 |
49 | func (m *RegisterQuery) Reset() { *m = RegisterQuery{} }
50 | func (m *RegisterQuery) String() string { return proto.CompactTextString(m) }
51 | func (*RegisterQuery) ProtoMessage() {}
52 | func (*RegisterQuery) Descriptor() ([]byte, []int) { return fileDescriptorKatalogSync, []int{0} }
53 |
54 | func (m *RegisterQuery) GetNamespace() string {
55 | if m != nil {
56 | return m.Namespace
57 | }
58 | return ""
59 | }
60 |
61 | func (m *RegisterQuery) GetPodName() string {
62 | if m != nil {
63 | return m.PodName
64 | }
65 | return ""
66 | }
67 |
68 | func (m *RegisterQuery) GetContainerName() string {
69 | if m != nil {
70 | return m.ContainerName
71 | }
72 | return ""
73 | }
74 |
75 | type RegisterResult struct {
76 | }
77 |
78 | func (m *RegisterResult) Reset() { *m = RegisterResult{} }
79 | func (m *RegisterResult) String() string { return proto.CompactTextString(m) }
80 | func (*RegisterResult) ProtoMessage() {}
81 | func (*RegisterResult) Descriptor() ([]byte, []int) { return fileDescriptorKatalogSync, []int{1} }
82 |
83 | type DeregisterQuery struct {
84 | Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"`
85 | PodName string `protobuf:"bytes,2,opt,name=PodName,proto3" json:"PodName,omitempty"`
86 | ContainerName string `protobuf:"bytes,3,opt,name=ContainerName,proto3" json:"ContainerName,omitempty"`
87 | }
88 |
89 | func (m *DeregisterQuery) Reset() { *m = DeregisterQuery{} }
90 | func (m *DeregisterQuery) String() string { return proto.CompactTextString(m) }
91 | func (*DeregisterQuery) ProtoMessage() {}
92 | func (*DeregisterQuery) Descriptor() ([]byte, []int) { return fileDescriptorKatalogSync, []int{2} }
93 |
94 | func (m *DeregisterQuery) GetNamespace() string {
95 | if m != nil {
96 | return m.Namespace
97 | }
98 | return ""
99 | }
100 |
101 | func (m *DeregisterQuery) GetPodName() string {
102 | if m != nil {
103 | return m.PodName
104 | }
105 | return ""
106 | }
107 |
108 | func (m *DeregisterQuery) GetContainerName() string {
109 | if m != nil {
110 | return m.ContainerName
111 | }
112 | return ""
113 | }
114 |
115 | type DeregisterResult struct {
116 | }
117 |
118 | func (m *DeregisterResult) Reset() { *m = DeregisterResult{} }
119 | func (m *DeregisterResult) String() string { return proto.CompactTextString(m) }
120 | func (*DeregisterResult) ProtoMessage() {}
121 | func (*DeregisterResult) Descriptor() ([]byte, []int) { return fileDescriptorKatalogSync, []int{3} }
122 |
123 | func init() {
124 | proto.RegisterType((*RegisterQuery)(nil), "katalogsync.RegisterQuery")
125 | proto.RegisterType((*RegisterResult)(nil), "katalogsync.RegisterResult")
126 | proto.RegisterType((*DeregisterQuery)(nil), "katalogsync.DeregisterQuery")
127 | proto.RegisterType((*DeregisterResult)(nil), "katalogsync.DeregisterResult")
128 | }
129 |
130 | // Reference imports to suppress errors if they are not otherwise used.
131 | var _ context.Context
132 | var _ grpc.ClientConn
133 |
134 | // This is a compile-time assertion to ensure that this generated file
135 | // is compatible with the grpc package it is being compiled against.
136 | const _ = grpc.SupportPackageIsVersion4
137 |
138 | // Client API for KatalogSync service
139 |
140 | type KatalogSyncClient interface {
141 | Register(ctx context.Context, in *RegisterQuery, opts ...grpc.CallOption) (*RegisterResult, error)
142 | Deregister(ctx context.Context, in *DeregisterQuery, opts ...grpc.CallOption) (*DeregisterResult, error)
143 | }
144 |
145 | type katalogSyncClient struct {
146 | cc *grpc.ClientConn
147 | }
148 |
149 | func NewKatalogSyncClient(cc *grpc.ClientConn) KatalogSyncClient {
150 | return &katalogSyncClient{cc}
151 | }
152 |
153 | func (c *katalogSyncClient) Register(ctx context.Context, in *RegisterQuery, opts ...grpc.CallOption) (*RegisterResult, error) {
154 | out := new(RegisterResult)
155 | err := grpc.Invoke(ctx, "/katalogsync.KatalogSync/Register", in, out, c.cc, opts...)
156 | if err != nil {
157 | return nil, err
158 | }
159 | return out, nil
160 | }
161 |
162 | func (c *katalogSyncClient) Deregister(ctx context.Context, in *DeregisterQuery, opts ...grpc.CallOption) (*DeregisterResult, error) {
163 | out := new(DeregisterResult)
164 | err := grpc.Invoke(ctx, "/katalogsync.KatalogSync/Deregister", in, out, c.cc, opts...)
165 | if err != nil {
166 | return nil, err
167 | }
168 | return out, nil
169 | }
170 |
171 | // Server API for KatalogSync service
172 |
173 | type KatalogSyncServer interface {
174 | Register(context.Context, *RegisterQuery) (*RegisterResult, error)
175 | Deregister(context.Context, *DeregisterQuery) (*DeregisterResult, error)
176 | }
177 |
178 | func RegisterKatalogSyncServer(s *grpc.Server, srv KatalogSyncServer) {
179 | s.RegisterService(&_KatalogSync_serviceDesc, srv)
180 | }
181 |
182 | func _KatalogSync_Register_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
183 | in := new(RegisterQuery)
184 | if err := dec(in); err != nil {
185 | return nil, err
186 | }
187 | if interceptor == nil {
188 | return srv.(KatalogSyncServer).Register(ctx, in)
189 | }
190 | info := &grpc.UnaryServerInfo{
191 | Server: srv,
192 | FullMethod: "/katalogsync.KatalogSync/Register",
193 | }
194 | handler := func(ctx context.Context, req interface{}) (interface{}, error) {
195 | return srv.(KatalogSyncServer).Register(ctx, req.(*RegisterQuery))
196 | }
197 | return interceptor(ctx, in, info, handler)
198 | }
199 |
200 | func _KatalogSync_Deregister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
201 | in := new(DeregisterQuery)
202 | if err := dec(in); err != nil {
203 | return nil, err
204 | }
205 | if interceptor == nil {
206 | return srv.(KatalogSyncServer).Deregister(ctx, in)
207 | }
208 | info := &grpc.UnaryServerInfo{
209 | Server: srv,
210 | FullMethod: "/katalogsync.KatalogSync/Deregister",
211 | }
212 | handler := func(ctx context.Context, req interface{}) (interface{}, error) {
213 | return srv.(KatalogSyncServer).Deregister(ctx, req.(*DeregisterQuery))
214 | }
215 | return interceptor(ctx, in, info, handler)
216 | }
217 |
218 | var _KatalogSync_serviceDesc = grpc.ServiceDesc{
219 | ServiceName: "katalogsync.KatalogSync",
220 | HandlerType: (*KatalogSyncServer)(nil),
221 | Methods: []grpc.MethodDesc{
222 | {
223 | MethodName: "Register",
224 | Handler: _KatalogSync_Register_Handler,
225 | },
226 | {
227 | MethodName: "Deregister",
228 | Handler: _KatalogSync_Deregister_Handler,
229 | },
230 | },
231 | Streams: []grpc.StreamDesc{},
232 | Metadata: "katalog-sync.proto",
233 | }
234 |
235 | func (m *RegisterQuery) Marshal() (dAtA []byte, err error) {
236 | size := m.Size()
237 | dAtA = make([]byte, size)
238 | n, err := m.MarshalTo(dAtA)
239 | if err != nil {
240 | return nil, err
241 | }
242 | return dAtA[:n], nil
243 | }
244 |
245 | func (m *RegisterQuery) MarshalTo(dAtA []byte) (int, error) {
246 | var i int
247 | _ = i
248 | var l int
249 | _ = l
250 | if len(m.Namespace) > 0 {
251 | dAtA[i] = 0xa
252 | i++
253 | i = encodeVarintKatalogSync(dAtA, i, uint64(len(m.Namespace)))
254 | i += copy(dAtA[i:], m.Namespace)
255 | }
256 | if len(m.PodName) > 0 {
257 | dAtA[i] = 0x12
258 | i++
259 | i = encodeVarintKatalogSync(dAtA, i, uint64(len(m.PodName)))
260 | i += copy(dAtA[i:], m.PodName)
261 | }
262 | if len(m.ContainerName) > 0 {
263 | dAtA[i] = 0x1a
264 | i++
265 | i = encodeVarintKatalogSync(dAtA, i, uint64(len(m.ContainerName)))
266 | i += copy(dAtA[i:], m.ContainerName)
267 | }
268 | return i, nil
269 | }
270 |
271 | func (m *RegisterResult) Marshal() (dAtA []byte, err error) {
272 | size := m.Size()
273 | dAtA = make([]byte, size)
274 | n, err := m.MarshalTo(dAtA)
275 | if err != nil {
276 | return nil, err
277 | }
278 | return dAtA[:n], nil
279 | }
280 |
281 | func (m *RegisterResult) MarshalTo(dAtA []byte) (int, error) {
282 | var i int
283 | _ = i
284 | var l int
285 | _ = l
286 | return i, nil
287 | }
288 |
289 | func (m *DeregisterQuery) Marshal() (dAtA []byte, err error) {
290 | size := m.Size()
291 | dAtA = make([]byte, size)
292 | n, err := m.MarshalTo(dAtA)
293 | if err != nil {
294 | return nil, err
295 | }
296 | return dAtA[:n], nil
297 | }
298 |
299 | func (m *DeregisterQuery) MarshalTo(dAtA []byte) (int, error) {
300 | var i int
301 | _ = i
302 | var l int
303 | _ = l
304 | if len(m.Namespace) > 0 {
305 | dAtA[i] = 0xa
306 | i++
307 | i = encodeVarintKatalogSync(dAtA, i, uint64(len(m.Namespace)))
308 | i += copy(dAtA[i:], m.Namespace)
309 | }
310 | if len(m.PodName) > 0 {
311 | dAtA[i] = 0x12
312 | i++
313 | i = encodeVarintKatalogSync(dAtA, i, uint64(len(m.PodName)))
314 | i += copy(dAtA[i:], m.PodName)
315 | }
316 | if len(m.ContainerName) > 0 {
317 | dAtA[i] = 0x1a
318 | i++
319 | i = encodeVarintKatalogSync(dAtA, i, uint64(len(m.ContainerName)))
320 | i += copy(dAtA[i:], m.ContainerName)
321 | }
322 | return i, nil
323 | }
324 |
325 | func (m *DeregisterResult) Marshal() (dAtA []byte, err error) {
326 | size := m.Size()
327 | dAtA = make([]byte, size)
328 | n, err := m.MarshalTo(dAtA)
329 | if err != nil {
330 | return nil, err
331 | }
332 | return dAtA[:n], nil
333 | }
334 |
335 | func (m *DeregisterResult) MarshalTo(dAtA []byte) (int, error) {
336 | var i int
337 | _ = i
338 | var l int
339 | _ = l
340 | return i, nil
341 | }
342 |
343 | func encodeFixed64KatalogSync(dAtA []byte, offset int, v uint64) int {
344 | dAtA[offset] = uint8(v)
345 | dAtA[offset+1] = uint8(v >> 8)
346 | dAtA[offset+2] = uint8(v >> 16)
347 | dAtA[offset+3] = uint8(v >> 24)
348 | dAtA[offset+4] = uint8(v >> 32)
349 | dAtA[offset+5] = uint8(v >> 40)
350 | dAtA[offset+6] = uint8(v >> 48)
351 | dAtA[offset+7] = uint8(v >> 56)
352 | return offset + 8
353 | }
354 | func encodeFixed32KatalogSync(dAtA []byte, offset int, v uint32) int {
355 | dAtA[offset] = uint8(v)
356 | dAtA[offset+1] = uint8(v >> 8)
357 | dAtA[offset+2] = uint8(v >> 16)
358 | dAtA[offset+3] = uint8(v >> 24)
359 | return offset + 4
360 | }
361 | func encodeVarintKatalogSync(dAtA []byte, offset int, v uint64) int {
362 | for v >= 1<<7 {
363 | dAtA[offset] = uint8(v&0x7f | 0x80)
364 | v >>= 7
365 | offset++
366 | }
367 | dAtA[offset] = uint8(v)
368 | return offset + 1
369 | }
370 | func (m *RegisterQuery) Size() (n int) {
371 | var l int
372 | _ = l
373 | l = len(m.Namespace)
374 | if l > 0 {
375 | n += 1 + l + sovKatalogSync(uint64(l))
376 | }
377 | l = len(m.PodName)
378 | if l > 0 {
379 | n += 1 + l + sovKatalogSync(uint64(l))
380 | }
381 | l = len(m.ContainerName)
382 | if l > 0 {
383 | n += 1 + l + sovKatalogSync(uint64(l))
384 | }
385 | return n
386 | }
387 |
388 | func (m *RegisterResult) Size() (n int) {
389 | var l int
390 | _ = l
391 | return n
392 | }
393 |
394 | func (m *DeregisterQuery) Size() (n int) {
395 | var l int
396 | _ = l
397 | l = len(m.Namespace)
398 | if l > 0 {
399 | n += 1 + l + sovKatalogSync(uint64(l))
400 | }
401 | l = len(m.PodName)
402 | if l > 0 {
403 | n += 1 + l + sovKatalogSync(uint64(l))
404 | }
405 | l = len(m.ContainerName)
406 | if l > 0 {
407 | n += 1 + l + sovKatalogSync(uint64(l))
408 | }
409 | return n
410 | }
411 |
412 | func (m *DeregisterResult) Size() (n int) {
413 | var l int
414 | _ = l
415 | return n
416 | }
417 |
418 | func sovKatalogSync(x uint64) (n int) {
419 | for {
420 | n++
421 | x >>= 7
422 | if x == 0 {
423 | break
424 | }
425 | }
426 | return n
427 | }
428 | func sozKatalogSync(x uint64) (n int) {
429 | return sovKatalogSync(uint64((x << 1) ^ uint64((int64(x) >> 63))))
430 | }
431 | func (m *RegisterQuery) Unmarshal(dAtA []byte) error {
432 | l := len(dAtA)
433 | iNdEx := 0
434 | for iNdEx < l {
435 | preIndex := iNdEx
436 | var wire uint64
437 | for shift := uint(0); ; shift += 7 {
438 | if shift >= 64 {
439 | return ErrIntOverflowKatalogSync
440 | }
441 | if iNdEx >= l {
442 | return io.ErrUnexpectedEOF
443 | }
444 | b := dAtA[iNdEx]
445 | iNdEx++
446 | wire |= (uint64(b) & 0x7F) << shift
447 | if b < 0x80 {
448 | break
449 | }
450 | }
451 | fieldNum := int32(wire >> 3)
452 | wireType := int(wire & 0x7)
453 | if wireType == 4 {
454 | return fmt.Errorf("proto: RegisterQuery: wiretype end group for non-group")
455 | }
456 | if fieldNum <= 0 {
457 | return fmt.Errorf("proto: RegisterQuery: illegal tag %d (wire type %d)", fieldNum, wire)
458 | }
459 | switch fieldNum {
460 | case 1:
461 | if wireType != 2 {
462 | return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
463 | }
464 | var stringLen uint64
465 | for shift := uint(0); ; shift += 7 {
466 | if shift >= 64 {
467 | return ErrIntOverflowKatalogSync
468 | }
469 | if iNdEx >= l {
470 | return io.ErrUnexpectedEOF
471 | }
472 | b := dAtA[iNdEx]
473 | iNdEx++
474 | stringLen |= (uint64(b) & 0x7F) << shift
475 | if b < 0x80 {
476 | break
477 | }
478 | }
479 | intStringLen := int(stringLen)
480 | if intStringLen < 0 {
481 | return ErrInvalidLengthKatalogSync
482 | }
483 | postIndex := iNdEx + intStringLen
484 | if postIndex > l {
485 | return io.ErrUnexpectedEOF
486 | }
487 | m.Namespace = string(dAtA[iNdEx:postIndex])
488 | iNdEx = postIndex
489 | case 2:
490 | if wireType != 2 {
491 | return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
492 | }
493 | var stringLen uint64
494 | for shift := uint(0); ; shift += 7 {
495 | if shift >= 64 {
496 | return ErrIntOverflowKatalogSync
497 | }
498 | if iNdEx >= l {
499 | return io.ErrUnexpectedEOF
500 | }
501 | b := dAtA[iNdEx]
502 | iNdEx++
503 | stringLen |= (uint64(b) & 0x7F) << shift
504 | if b < 0x80 {
505 | break
506 | }
507 | }
508 | intStringLen := int(stringLen)
509 | if intStringLen < 0 {
510 | return ErrInvalidLengthKatalogSync
511 | }
512 | postIndex := iNdEx + intStringLen
513 | if postIndex > l {
514 | return io.ErrUnexpectedEOF
515 | }
516 | m.PodName = string(dAtA[iNdEx:postIndex])
517 | iNdEx = postIndex
518 | case 3:
519 | if wireType != 2 {
520 | return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
521 | }
522 | var stringLen uint64
523 | for shift := uint(0); ; shift += 7 {
524 | if shift >= 64 {
525 | return ErrIntOverflowKatalogSync
526 | }
527 | if iNdEx >= l {
528 | return io.ErrUnexpectedEOF
529 | }
530 | b := dAtA[iNdEx]
531 | iNdEx++
532 | stringLen |= (uint64(b) & 0x7F) << shift
533 | if b < 0x80 {
534 | break
535 | }
536 | }
537 | intStringLen := int(stringLen)
538 | if intStringLen < 0 {
539 | return ErrInvalidLengthKatalogSync
540 | }
541 | postIndex := iNdEx + intStringLen
542 | if postIndex > l {
543 | return io.ErrUnexpectedEOF
544 | }
545 | m.ContainerName = string(dAtA[iNdEx:postIndex])
546 | iNdEx = postIndex
547 | default:
548 | iNdEx = preIndex
549 | skippy, err := skipKatalogSync(dAtA[iNdEx:])
550 | if err != nil {
551 | return err
552 | }
553 | if skippy < 0 {
554 | return ErrInvalidLengthKatalogSync
555 | }
556 | if (iNdEx + skippy) > l {
557 | return io.ErrUnexpectedEOF
558 | }
559 | iNdEx += skippy
560 | }
561 | }
562 |
563 | if iNdEx > l {
564 | return io.ErrUnexpectedEOF
565 | }
566 | return nil
567 | }
568 | func (m *RegisterResult) Unmarshal(dAtA []byte) error {
569 | l := len(dAtA)
570 | iNdEx := 0
571 | for iNdEx < l {
572 | preIndex := iNdEx
573 | var wire uint64
574 | for shift := uint(0); ; shift += 7 {
575 | if shift >= 64 {
576 | return ErrIntOverflowKatalogSync
577 | }
578 | if iNdEx >= l {
579 | return io.ErrUnexpectedEOF
580 | }
581 | b := dAtA[iNdEx]
582 | iNdEx++
583 | wire |= (uint64(b) & 0x7F) << shift
584 | if b < 0x80 {
585 | break
586 | }
587 | }
588 | fieldNum := int32(wire >> 3)
589 | wireType := int(wire & 0x7)
590 | if wireType == 4 {
591 | return fmt.Errorf("proto: RegisterResult: wiretype end group for non-group")
592 | }
593 | if fieldNum <= 0 {
594 | return fmt.Errorf("proto: RegisterResult: illegal tag %d (wire type %d)", fieldNum, wire)
595 | }
596 | switch fieldNum {
597 | default:
598 | iNdEx = preIndex
599 | skippy, err := skipKatalogSync(dAtA[iNdEx:])
600 | if err != nil {
601 | return err
602 | }
603 | if skippy < 0 {
604 | return ErrInvalidLengthKatalogSync
605 | }
606 | if (iNdEx + skippy) > l {
607 | return io.ErrUnexpectedEOF
608 | }
609 | iNdEx += skippy
610 | }
611 | }
612 |
613 | if iNdEx > l {
614 | return io.ErrUnexpectedEOF
615 | }
616 | return nil
617 | }
618 | func (m *DeregisterQuery) Unmarshal(dAtA []byte) error {
619 | l := len(dAtA)
620 | iNdEx := 0
621 | for iNdEx < l {
622 | preIndex := iNdEx
623 | var wire uint64
624 | for shift := uint(0); ; shift += 7 {
625 | if shift >= 64 {
626 | return ErrIntOverflowKatalogSync
627 | }
628 | if iNdEx >= l {
629 | return io.ErrUnexpectedEOF
630 | }
631 | b := dAtA[iNdEx]
632 | iNdEx++
633 | wire |= (uint64(b) & 0x7F) << shift
634 | if b < 0x80 {
635 | break
636 | }
637 | }
638 | fieldNum := int32(wire >> 3)
639 | wireType := int(wire & 0x7)
640 | if wireType == 4 {
641 | return fmt.Errorf("proto: DeregisterQuery: wiretype end group for non-group")
642 | }
643 | if fieldNum <= 0 {
644 | return fmt.Errorf("proto: DeregisterQuery: illegal tag %d (wire type %d)", fieldNum, wire)
645 | }
646 | switch fieldNum {
647 | case 1:
648 | if wireType != 2 {
649 | return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
650 | }
651 | var stringLen uint64
652 | for shift := uint(0); ; shift += 7 {
653 | if shift >= 64 {
654 | return ErrIntOverflowKatalogSync
655 | }
656 | if iNdEx >= l {
657 | return io.ErrUnexpectedEOF
658 | }
659 | b := dAtA[iNdEx]
660 | iNdEx++
661 | stringLen |= (uint64(b) & 0x7F) << shift
662 | if b < 0x80 {
663 | break
664 | }
665 | }
666 | intStringLen := int(stringLen)
667 | if intStringLen < 0 {
668 | return ErrInvalidLengthKatalogSync
669 | }
670 | postIndex := iNdEx + intStringLen
671 | if postIndex > l {
672 | return io.ErrUnexpectedEOF
673 | }
674 | m.Namespace = string(dAtA[iNdEx:postIndex])
675 | iNdEx = postIndex
676 | case 2:
677 | if wireType != 2 {
678 | return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType)
679 | }
680 | var stringLen uint64
681 | for shift := uint(0); ; shift += 7 {
682 | if shift >= 64 {
683 | return ErrIntOverflowKatalogSync
684 | }
685 | if iNdEx >= l {
686 | return io.ErrUnexpectedEOF
687 | }
688 | b := dAtA[iNdEx]
689 | iNdEx++
690 | stringLen |= (uint64(b) & 0x7F) << shift
691 | if b < 0x80 {
692 | break
693 | }
694 | }
695 | intStringLen := int(stringLen)
696 | if intStringLen < 0 {
697 | return ErrInvalidLengthKatalogSync
698 | }
699 | postIndex := iNdEx + intStringLen
700 | if postIndex > l {
701 | return io.ErrUnexpectedEOF
702 | }
703 | m.PodName = string(dAtA[iNdEx:postIndex])
704 | iNdEx = postIndex
705 | case 3:
706 | if wireType != 2 {
707 | return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
708 | }
709 | var stringLen uint64
710 | for shift := uint(0); ; shift += 7 {
711 | if shift >= 64 {
712 | return ErrIntOverflowKatalogSync
713 | }
714 | if iNdEx >= l {
715 | return io.ErrUnexpectedEOF
716 | }
717 | b := dAtA[iNdEx]
718 | iNdEx++
719 | stringLen |= (uint64(b) & 0x7F) << shift
720 | if b < 0x80 {
721 | break
722 | }
723 | }
724 | intStringLen := int(stringLen)
725 | if intStringLen < 0 {
726 | return ErrInvalidLengthKatalogSync
727 | }
728 | postIndex := iNdEx + intStringLen
729 | if postIndex > l {
730 | return io.ErrUnexpectedEOF
731 | }
732 | m.ContainerName = string(dAtA[iNdEx:postIndex])
733 | iNdEx = postIndex
734 | default:
735 | iNdEx = preIndex
736 | skippy, err := skipKatalogSync(dAtA[iNdEx:])
737 | if err != nil {
738 | return err
739 | }
740 | if skippy < 0 {
741 | return ErrInvalidLengthKatalogSync
742 | }
743 | if (iNdEx + skippy) > l {
744 | return io.ErrUnexpectedEOF
745 | }
746 | iNdEx += skippy
747 | }
748 | }
749 |
750 | if iNdEx > l {
751 | return io.ErrUnexpectedEOF
752 | }
753 | return nil
754 | }
755 | func (m *DeregisterResult) Unmarshal(dAtA []byte) error {
756 | l := len(dAtA)
757 | iNdEx := 0
758 | for iNdEx < l {
759 | preIndex := iNdEx
760 | var wire uint64
761 | for shift := uint(0); ; shift += 7 {
762 | if shift >= 64 {
763 | return ErrIntOverflowKatalogSync
764 | }
765 | if iNdEx >= l {
766 | return io.ErrUnexpectedEOF
767 | }
768 | b := dAtA[iNdEx]
769 | iNdEx++
770 | wire |= (uint64(b) & 0x7F) << shift
771 | if b < 0x80 {
772 | break
773 | }
774 | }
775 | fieldNum := int32(wire >> 3)
776 | wireType := int(wire & 0x7)
777 | if wireType == 4 {
778 | return fmt.Errorf("proto: DeregisterResult: wiretype end group for non-group")
779 | }
780 | if fieldNum <= 0 {
781 | return fmt.Errorf("proto: DeregisterResult: illegal tag %d (wire type %d)", fieldNum, wire)
782 | }
783 | switch fieldNum {
784 | default:
785 | iNdEx = preIndex
786 | skippy, err := skipKatalogSync(dAtA[iNdEx:])
787 | if err != nil {
788 | return err
789 | }
790 | if skippy < 0 {
791 | return ErrInvalidLengthKatalogSync
792 | }
793 | if (iNdEx + skippy) > l {
794 | return io.ErrUnexpectedEOF
795 | }
796 | iNdEx += skippy
797 | }
798 | }
799 |
800 | if iNdEx > l {
801 | return io.ErrUnexpectedEOF
802 | }
803 | return nil
804 | }
805 | func skipKatalogSync(dAtA []byte) (n int, err error) {
806 | l := len(dAtA)
807 | iNdEx := 0
808 | for iNdEx < l {
809 | var wire uint64
810 | for shift := uint(0); ; shift += 7 {
811 | if shift >= 64 {
812 | return 0, ErrIntOverflowKatalogSync
813 | }
814 | if iNdEx >= l {
815 | return 0, io.ErrUnexpectedEOF
816 | }
817 | b := dAtA[iNdEx]
818 | iNdEx++
819 | wire |= (uint64(b) & 0x7F) << shift
820 | if b < 0x80 {
821 | break
822 | }
823 | }
824 | wireType := int(wire & 0x7)
825 | switch wireType {
826 | case 0:
827 | for shift := uint(0); ; shift += 7 {
828 | if shift >= 64 {
829 | return 0, ErrIntOverflowKatalogSync
830 | }
831 | if iNdEx >= l {
832 | return 0, io.ErrUnexpectedEOF
833 | }
834 | iNdEx++
835 | if dAtA[iNdEx-1] < 0x80 {
836 | break
837 | }
838 | }
839 | return iNdEx, nil
840 | case 1:
841 | iNdEx += 8
842 | return iNdEx, nil
843 | case 2:
844 | var length int
845 | for shift := uint(0); ; shift += 7 {
846 | if shift >= 64 {
847 | return 0, ErrIntOverflowKatalogSync
848 | }
849 | if iNdEx >= l {
850 | return 0, io.ErrUnexpectedEOF
851 | }
852 | b := dAtA[iNdEx]
853 | iNdEx++
854 | length |= (int(b) & 0x7F) << shift
855 | if b < 0x80 {
856 | break
857 | }
858 | }
859 | iNdEx += length
860 | if length < 0 {
861 | return 0, ErrInvalidLengthKatalogSync
862 | }
863 | return iNdEx, nil
864 | case 3:
865 | for {
866 | var innerWire uint64
867 | var start int = iNdEx
868 | for shift := uint(0); ; shift += 7 {
869 | if shift >= 64 {
870 | return 0, ErrIntOverflowKatalogSync
871 | }
872 | if iNdEx >= l {
873 | return 0, io.ErrUnexpectedEOF
874 | }
875 | b := dAtA[iNdEx]
876 | iNdEx++
877 | innerWire |= (uint64(b) & 0x7F) << shift
878 | if b < 0x80 {
879 | break
880 | }
881 | }
882 | innerWireType := int(innerWire & 0x7)
883 | if innerWireType == 4 {
884 | break
885 | }
886 | next, err := skipKatalogSync(dAtA[start:])
887 | if err != nil {
888 | return 0, err
889 | }
890 | iNdEx = start + next
891 | }
892 | return iNdEx, nil
893 | case 4:
894 | return iNdEx, nil
895 | case 5:
896 | iNdEx += 4
897 | return iNdEx, nil
898 | default:
899 | return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
900 | }
901 | }
902 | panic("unreachable")
903 | }
904 |
905 | var (
906 | ErrInvalidLengthKatalogSync = fmt.Errorf("proto: negative length found during unmarshaling")
907 | ErrIntOverflowKatalogSync = fmt.Errorf("proto: integer overflow")
908 | )
909 |
910 | func init() { proto.RegisterFile("katalog-sync.proto", fileDescriptorKatalogSync) }
911 |
912 | var fileDescriptorKatalogSync = []byte{
913 | // 229 bytes of a gzipped FileDescriptorProto
914 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x4e, 0x2c, 0x49,
915 | 0xcc, 0xc9, 0x4f, 0xd7, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2,
916 | 0x86, 0x8a, 0x81, 0x84, 0x94, 0x72, 0xb9, 0x78, 0x83, 0x52, 0xd3, 0x33, 0x8b, 0x4b, 0x52, 0x8b,
917 | 0x02, 0x4b, 0x53, 0x8b, 0x2a, 0x85, 0x64, 0xb8, 0x38, 0xfd, 0x12, 0x73, 0x53, 0x8b, 0x0b, 0x12,
918 | 0x93, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x10, 0x02, 0x42, 0x12, 0x5c, 0xec, 0x01,
919 | 0xf9, 0x29, 0x20, 0xbe, 0x04, 0x13, 0x58, 0x0e, 0xc6, 0x15, 0x52, 0xe1, 0xe2, 0x75, 0xce, 0xcf,
920 | 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, 0x02, 0xcb, 0x33, 0x83, 0xe5, 0x51, 0x05, 0x95, 0x04, 0xb8,
921 | 0xf8, 0x60, 0xd6, 0x05, 0xa5, 0x16, 0x97, 0xe6, 0x94, 0x28, 0xe5, 0x73, 0xf1, 0xbb, 0xa4, 0x16,
922 | 0xd1, 0xd1, 0x09, 0x42, 0x5c, 0x02, 0x08, 0x0b, 0x21, 0x8e, 0x30, 0x9a, 0xcb, 0xc8, 0xc5, 0xed,
923 | 0x0d, 0x09, 0x95, 0xe0, 0xca, 0xbc, 0x64, 0x21, 0x67, 0x2e, 0x0e, 0x98, 0x33, 0x85, 0xa4, 0xf4,
924 | 0x90, 0xc2, 0x4b, 0x0f, 0x25, 0xb0, 0xa4, 0xa4, 0xb1, 0xca, 0x41, 0x0c, 0x15, 0xf2, 0xe4, 0xe2,
925 | 0x42, 0x58, 0x24, 0x24, 0x83, 0xa2, 0x14, 0xcd, 0xcb, 0x52, 0xb2, 0x38, 0x64, 0x21, 0x46, 0x39,
926 | 0x09, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e,
927 | 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xe3, 0xd2, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xe2, 0x98,
928 | 0x1c, 0xe1, 0x01, 0x00, 0x00,
929 | }
930 |
--------------------------------------------------------------------------------