├── .gitignore ├── README.md ├── _config.yml ├── go-client ├── README.md ├── dynamic-client │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── app.yaml │ ├── controller-runtime │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── skaffold.yaml └── informer │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── app.yaml │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── skaffold.yaml ├── kind ├── README.md ├── contour │ ├── README.md │ └── kind.config.yaml └── metallb │ ├── README.md │ ├── kind.config.yaml │ ├── metallb-config.yaml │ └── sample-app.yaml ├── kubernetes-101 ├── developers │ ├── README.md │ ├── examples │ │ ├── deploy.yaml │ │ ├── pod.yaml │ │ └── rs.yaml │ ├── overview.md │ └── why-kubernetes.md └── operators │ └── README.md └── stories ├── README.md ├── e2e-node-tests └── README.md ├── e2e-tests ├── README.md ├── example-e2e-test │ ├── apps │ │ ├── deployment.go │ │ └── framework.go │ ├── e2e.go │ ├── e2e_test.go │ ├── framework │ │ └── framework.go │ ├── go.mod │ └── go.sum ├── hack │ └── update-gofmt.sh └── test-run.yaml ├── general └── README.md ├── kk-pr-85968 ├── README.md ├── example-cobra │ ├── apiserver.go │ ├── cmd │ │ └── app │ │ │ ├── options │ │ │ └── options.go │ │ │ └── server.go │ ├── go.mod │ ├── go.sum │ └── pkg │ │ ├── kubeapiserver │ │ └── options │ │ │ └── options.go │ │ ├── master │ │ └── services.go │ │ └── util │ │ └── flag │ │ └── flag.go └── hack │ └── update-gofmt.sh ├── kk-pr-85993 └── README.md ├── kubelet-testing └── README.md └── networking ├── README.md ├── container-network.png ├── go.mod └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | *.swo 2 | *.swp 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Welcome! 2 | 3 | This space is intended to host a broad range of tutorials, notes, failure 4 | stories, and anything that may be remotely useful to another operator or 5 | developer who works with or wants to learn to work with Kubernetes. 6 | 7 | ## Table of Contents 8 | * [kubernetes 101](./kubernetes-101) - a collection of howtos, tips, and 9 | tutorials for 10 | * [developers](./kubernetes-101/developers) people who are building things on 11 | top off Kubernetes. 12 | * [operators](./kubernetes-101/operators) people who are running Kubernetes 13 | and taking care of it. 14 | * [Stories](./stories/) - a collection of developer stories about Kubernetes. 15 | Here you will find stories detailing how parts of Kubernetes work. These are 16 | inspired by real-life issues, bugs, and things that are not very well know. 17 | These are written for people who want to begin contributing to Kubernetes. 18 | * [go-client](./go-client/) - a collection of handy programs for building 19 | things on top of Kubernetes using the Go client. 20 | * [kind](./kind/) - [github.com/kubernetes-sigs/kind] relates resources, 21 | tutorials, etc. 22 | 23 | 24 | [github.com/kubernetes-sigs/kind]: https://github.com/kubernetes-sigs/kind 25 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-architect -------------------------------------------------------------------------------- /go-client/README.md: -------------------------------------------------------------------------------- 1 | # Building on Top of Kubernetes with Go 2 | 3 | ## Table of Contents 4 | * [Building an informer](./informer/) 5 | * [Using the dynamic client](./dynamic-client/) 6 | -------------------------------------------------------------------------------- /go-client/dynamic-client/Dockerfile: -------------------------------------------------------------------------------- 1 | From golang:1.12 AS base 2 | 3 | ENV GO111MODULE=on 4 | WORKDIR /go/src/app 5 | ADD go.mod go.sum /go/src/app/ 6 | RUN go mod download 7 | ADD main.go /go/src/app/ 8 | # CGO_ENABLED=0 disables cgo - no cross-compiled dependencies. 9 | # -a forces a rebuild. 10 | # -ldflags -w disables debug. 11 | # -extldflags "-static" 12 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go \ 13 | build -a -ldflags='-w -extldflags "-static"' \ 14 | -o /go/bin/app 15 | 16 | 17 | FROM scratch 18 | COPY --from=base /go/bin/app /go/bin/app 19 | ENTRYPOINT ["/go/bin/app"] 20 | -------------------------------------------------------------------------------- /go-client/dynamic-client/Makefile: -------------------------------------------------------------------------------- 1 | GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) 2 | GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") 3 | DOCKER_IMAGE := cool-kube$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) 4 | 5 | DOCKERFILE ?= Dockerfile 6 | DOCKER_BUILD_ARGS ?= --force-rm -f "$(DOCKERFILE)" 7 | 8 | DOCKER_BUILD := docker build $(DOCKER_BUILD_ARGS) -t "$(DOCKER_IMAGE)" . 9 | DOCKER_RUN := docker run --rm -it 10 | 11 | .PHONY: all 12 | all: push 13 | 14 | .PHONY: build 15 | build: 16 | $(DOCKER_BUILD) 17 | 18 | .PHONY: push 19 | push: build 20 | docker tag $(DOCKER_IMAGE) alejandrox1/cool-kube:v1 21 | docker push alejandrox1/cool-kube:v1 22 | -------------------------------------------------------------------------------- /go-client/dynamic-client/README.md: -------------------------------------------------------------------------------- 1 | # Dynamic Client 2 | 3 | In this section we will cover a bit about the dynamic client and why it is 4 | useful. 5 | 6 | ## Theory 7 | 8 | Belowe we discussed some of the pieces that you will often see when working 9 | with Kubernetes. 10 | 11 | [`runtime.Object`](https://godoc.org/k8s.io/apimachinery/pkg/runtime#Object) 12 | , from "k8s.io/apimachinery/pkg/runtime", is something that you will see very often. 13 | A `runtime.Object` is an interface that implements the following methods: 14 | 15 | ```go 16 | type Object interface { 17 | GetObjectKind() schema.ObjectKind 18 | DeepCopyObject() Object 19 | } 20 | ``` 21 | > Object interface must be supported by all API types registered with Scheme. 22 | > Since objects in a scheme are expected to be serialized to the wire, the 23 | > interface an Object must provide to the Scheme allows serializers to set the 24 | > kind, version, and group the object is represented as. An Object may choose 25 | > to return a no-op ObjectKindAccessor in cases where it is not expected to be 26 | > serialized. 27 | 28 | [`schema.ObjectKind`](https://godoc.org/k8s.io/apimachinery/pkg/runtime/schema#ObjectKind) 29 | from "k8s.io/apimachinery/pkg/runtime/schema". 30 | `schema.ObjectKind` is another interface that allows for setting and getting an 31 | object's group, version, and kind. 32 | 33 | ```go 34 | type ObjectKind interface { 35 | // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil 36 | // should clear the current setting. 37 | SetGroupVersionKind(kind GroupVersionKind) 38 | // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does 39 | // not expose or provide these fields. 40 | GroupVersionKind() GroupVersionKind 41 | } 42 | ``` 43 | > All objects that are serialized from a Scheme encode their type information. 44 | > This interface is used by serialization to set type information from the 45 | > Scheme onto the serialized version of an object. For objects that cannot be 46 | > serialized or have unique requirements, this interface may be a no-op. 47 | 48 | [`client.Patch`](https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#Patch) 49 | from "sigs.k8s.io/controller-runtime/pkg/client". 50 | 51 | ```go 52 | type Patch interface { 53 | // Type is the PatchType of the patch. 54 | Type() types.PatchType 55 | // Data is the raw data representing the patch. 56 | Data(obj runtime.Object) ([]byte, error) 57 | } 58 | ``` 59 | > Patch is a patch that can be applied to a Kubernetes object. 60 | 61 | [`types.PatchType`](https://godoc.org/k8s.io/apimachinery/pkg/types#PatchType) 62 | from "k8s.io/apimachinery/pkg/types". 63 | ```go 64 | const ( 65 | JSONPatchType PatchType = "application/json-patch+json" 66 | MergePatchType PatchType = "application/merge-patch+json" 67 | StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" 68 | ApplyPatchType PatchType = "application/apply-patch+yaml" 69 | ) 70 | ``` 71 | > these are constants to support HTTP PATCH utilized by both the client and 72 | > server. 73 | 74 | ## Lab 75 | 76 | In our example progam, we will try to obtain the API group, version, and 77 | resource from an object. 78 | We will use the "standard" clientset and a dynamic client which returns 79 | unstructured objects. 80 | 81 | This example shows an interesting caveat of working with Kubernetes objects: 82 | 83 | > decoding to go structs drops apiVersion/kind, because the type info is 84 | > inherent in the object. decoding to unstructured objects 85 | > (like the dynamic client does) preserves that information. 86 | > [github.com/kubernetes/client-go/issues/541#issuecomment-452312901](https://github.com/kubernetes/client-go/issues/541#issuecomment-452312901) 87 | 88 | ### Create a Cluster 89 | ``` 90 | $ kind create cluster 91 | ``` 92 | 93 | ### Our Go program 94 | 95 | We will be using code for the newest version of Kubernetes :rocket: 96 | ``` 97 | $ export GO111MODULE=on 98 | $ go mod init 99 | $ go get k8s.io/client-go@kubernetes-1.16.0 100 | ``` 101 | 102 | To run this (make sure you change the `push` target inside of the 103 | [Makefile](./Makefile) to create your own image): 104 | ``` 105 | $ make && kubectl apply -f app.yaml 106 | ``` 107 | 108 | This will create a deployment called `cool-kube`. 109 | To get its logs, try: 110 | ``` 111 | $ kubectl logs deploy/cool-kube -f 112 | ``` 113 | 114 | 115 | [godoc pkg/clinet]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client 116 | -------------------------------------------------------------------------------- /go-client/dynamic-client/app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # ServiceAccount for our app. 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: cool-kube 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: Role 10 | metadata: 11 | name: cool-kube 12 | namespace: default 13 | rules: 14 | - apiGroups: ["apps"] 15 | resources: ["deployments"] 16 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 17 | --- 18 | # Bind "cool-kube" Role to "cool-kube" ServiceAccount. 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: RoleBinding 21 | metadata: 22 | name: cool-kube 23 | namespace: default # This only grants permissions within the "default" namespace. 24 | subjects: 25 | - kind: ServiceAccount 26 | name: cool-kube 27 | apiGroup: "" 28 | roleRef: 29 | kind: Role #this must be Role or ClusterRole 30 | name: cool-kube # this must match the name of the Role or ClusterRole you wish to bind to. 31 | apiGroup: rbac.authorization.k8s.io 32 | --- 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | metadata: 36 | name: cool-kube 37 | labels: 38 | app: cool-kube 39 | spec: 40 | replicas: 1 41 | selector: 42 | matchLabels: 43 | app: cool-kube 44 | template: 45 | metadata: 46 | labels: 47 | app: cool-kube 48 | spec: 49 | serviceAccount: cool-kube 50 | containers: 51 | - name: cool-kube 52 | image: alejandrox1/cool-kube:v1 53 | imagePullPolicy: Always 54 | -------------------------------------------------------------------------------- /go-client/dynamic-client/controller-runtime: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/contributing-to-kubernetes/gnosis/26f5ba9a60cc4a0c6a14ce7517105e757e97d936/go-client/dynamic-client/controller-runtime -------------------------------------------------------------------------------- /go-client/dynamic-client/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/contributing-to-kubernetes/gnosis/go-client/dynamic-client 2 | 3 | go 1.13 4 | 5 | require ( 6 | k8s.io/api v0.17.4 7 | k8s.io/apimachinery v0.17.4 8 | k8s.io/client-go v0.17.4 9 | ) 10 | -------------------------------------------------------------------------------- /go-client/dynamic-client/go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= 4 | github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= 5 | github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= 6 | github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= 7 | github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= 8 | github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= 9 | github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= 10 | github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= 11 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 12 | github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= 13 | github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= 14 | github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= 15 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 16 | github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 17 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 18 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 19 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 20 | github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= 21 | github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= 22 | github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= 23 | github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= 24 | github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= 25 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 26 | github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 27 | github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= 28 | github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= 29 | github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= 30 | github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= 31 | github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= 32 | github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= 33 | github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= 34 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 35 | github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 36 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 37 | github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 38 | github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 39 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 40 | github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= 41 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 42 | github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= 43 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 44 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 45 | github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 46 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 47 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 48 | github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= 49 | github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= 50 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 51 | github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= 52 | github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= 53 | github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 54 | github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= 55 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= 56 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= 57 | github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= 58 | github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= 59 | github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= 60 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 61 | github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 62 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 63 | github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= 64 | github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 65 | github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= 66 | github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 67 | github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= 68 | github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 69 | github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= 70 | github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= 71 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 72 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 73 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 74 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 75 | github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= 76 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 77 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 78 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 79 | github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 80 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 81 | github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= 82 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 83 | github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 84 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= 85 | github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 86 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 87 | github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 88 | github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 89 | github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= 90 | github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 91 | github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 92 | github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= 93 | github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 94 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 95 | github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= 96 | github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 97 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 98 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 99 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 100 | github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 101 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 102 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 103 | go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= 104 | golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 105 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 106 | golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= 107 | golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 108 | golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= 109 | golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 110 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 111 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 112 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 113 | golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 114 | golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 115 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 116 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 117 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 118 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 119 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 120 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 121 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 122 | golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= 123 | golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 124 | golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= 125 | golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 126 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 127 | golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 128 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= 129 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 130 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 131 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 132 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 133 | golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 134 | golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 135 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 136 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 137 | golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 138 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 139 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 140 | golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= 141 | golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 142 | golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= 143 | golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 144 | golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 145 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 146 | golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 147 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 148 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 149 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= 150 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 151 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= 152 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 153 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 154 | golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 155 | golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 156 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 157 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 158 | golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 159 | google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= 160 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 161 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 162 | google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 163 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 164 | google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 165 | google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 166 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 167 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 168 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 169 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 170 | gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o= 171 | gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 172 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 173 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 174 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 175 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 176 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= 177 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 178 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 179 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 180 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 181 | honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 182 | k8s.io/api v0.0.0-20190918155943-95b840bb6a1f h1:8FRUST8oUkEI45WYKyD8ed7Ad0Kg5v11zHyPkEVb2xo= 183 | k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= 184 | k8s.io/api v0.17.4 h1:HbwOhDapkguO8lTAE8OX3hdF2qp8GtpC9CW/MQATXXo= 185 | k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= 186 | k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 h1:CS1tBQz3HOXiseWZu6ZicKX361CZLT97UFnnPx0aqBw= 187 | k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= 188 | k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw= 189 | k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= 190 | k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 h1:mLmhKUm1X+pXu0zXMEzNsOF5E2kKFGe5o6BZBIIqA6A= 191 | k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= 192 | k8s.io/client-go v0.17.4 h1:VVdVbpTY70jiNHS1eiFkUt7ZIJX3txd29nDxxXH4en8= 193 | k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= 194 | k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= 195 | k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= 196 | k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 197 | k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 198 | k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= 199 | k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= 200 | k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= 201 | k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= 202 | k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= 203 | k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= 204 | k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= 205 | k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= 206 | k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= 207 | k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= 208 | sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= 209 | sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= 210 | sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= 211 | -------------------------------------------------------------------------------- /go-client/dynamic-client/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | appsv1 "k8s.io/api/apps/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | //"k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/api/resource" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | "k8s.io/client-go/dynamic" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/rest" 16 | ) 17 | 18 | var ( 19 | ns string = "default" 20 | ) 21 | 22 | func int32Ptr(i int32) *int32 { return &i } 23 | 24 | func createDeploy(name, ns string) *appsv1.Deployment { 25 | labels := make(map[string]string) 26 | labels["app"] = name 27 | 28 | return &appsv1.Deployment{ 29 | ObjectMeta: metav1.ObjectMeta{ 30 | Name: name, 31 | Namespace: ns, 32 | Labels: labels, 33 | }, 34 | Spec: appsv1.DeploymentSpec{ 35 | Replicas: int32Ptr(2), 36 | Selector: &metav1.LabelSelector{ 37 | MatchLabels: labels, 38 | }, 39 | Template: corev1.PodTemplateSpec{ 40 | ObjectMeta: metav1.ObjectMeta{ 41 | Labels: labels, 42 | }, 43 | Spec: corev1.PodSpec{ 44 | Containers: []corev1.Container{ 45 | { 46 | Name: "main", 47 | Image: "nginx:1.12", 48 | Ports: []corev1.ContainerPort{ 49 | { 50 | Name: "http", 51 | Protocol: corev1.ProtocolTCP, 52 | ContainerPort: 80, 53 | }, 54 | }, 55 | Resources: corev1.ResourceRequirements{ 56 | Limits: corev1.ResourceList{ 57 | "cpu": resource.MustParse("500m"), 58 | "memory": resource.MustParse("64m"), 59 | }, 60 | Requests: corev1.ResourceList{ 61 | "cpu": resource.MustParse("500m"), 62 | "memory": resource.MustParse("64m"), 63 | }, 64 | }, 65 | }, 66 | }, 67 | }, 68 | }, 69 | }, 70 | } 71 | } 72 | 73 | func main() { 74 | // creates the in-cluster config 75 | config, err := rest.InClusterConfig() 76 | if err != nil { 77 | panic(err.Error()) 78 | } 79 | // creates the clientset 80 | clientset, err := kubernetes.NewForConfig(config) 81 | if err != nil { 82 | panic(err.Error()) 83 | } 84 | // Create a dynamic client. 85 | dynamicClient, err := dynamic.NewForConfig(config) 86 | if err != nil { 87 | panic(err.Error()) 88 | } 89 | 90 | // Create Deployment. 91 | fmt.Println("Creating deployment...") 92 | deploy := createDeploy("nginx-server", ns) 93 | result, err := clientset.AppsV1().Deployments(ns).Create(deploy) 94 | if err != nil { 95 | panic(err) 96 | } 97 | fmt.Printf("Created deployment %q.\n", result.GetObjectMeta().GetName()) 98 | time.Sleep(5 * time.Second) 99 | 100 | // List deployments. 101 | fmt.Printf("Listing deployments in namespace %q:\n", ns) 102 | list, err := clientset.AppsV1().Deployments(ns).List(metav1.ListOptions{}) 103 | if err != nil { 104 | panic(err) 105 | } 106 | for _, d := range list.Items { 107 | fmt.Printf(" * %s (%d replicas)\n", d.Name, *d.Spec.Replicas) 108 | fmt.Printf(" * objectkind: %v\n", d.GetObjectKind()) 109 | fmt.Printf(" * groupversion: %v\n", d.GetObjectKind().GroupVersionKind()) 110 | } 111 | 112 | deployResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} 113 | unstructuredObj, err := dynamicClient.Resource(deployResource).Namespace(ns).Get("nginx-server", metav1.GetOptions{}) 114 | if err != nil { 115 | panic(err) 116 | } 117 | fmt.Println(" * Getting Object kind through a dynamic client...") 118 | fmt.Printf(" * objectkind: %v\n", unstructuredObj.GetObjectKind()) 119 | fmt.Printf(" * groupversion: %v\n", unstructuredObj.GetObjectKind().GroupVersionKind()) 120 | 121 | time.Sleep(10 * time.Minute) 122 | } 123 | -------------------------------------------------------------------------------- /go-client/dynamic-client/skaffold.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: skaffold/v1beta14 2 | kind: Config 3 | build: 4 | artifacts: 5 | - image: alejandrox1/cool-kube 6 | deploy: 7 | kubectl: 8 | manifests: 9 | - app.yaml 10 | -------------------------------------------------------------------------------- /go-client/informer/Dockerfile: -------------------------------------------------------------------------------- 1 | From golang:1.12 AS base 2 | 3 | ENV GO111MODULE=on 4 | WORKDIR /go/src/app 5 | ADD main.go go.mod go.sum /go/src/app/ 6 | # CGO_ENABLED=0 disables cgo - no cross-compiled dependencies. 7 | # -a forces a rebuild. 8 | # -ldflags -w disables debug. 9 | # -extldflags "-static" 10 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go \ 11 | build -a -ldflags='-w -extldflags "-static"' \ 12 | -o /go/bin/app 13 | 14 | 15 | FROM scratch 16 | COPY --from=base /go/bin/app /go/bin/app 17 | ENTRYPOINT ["/go/bin/app"] 18 | -------------------------------------------------------------------------------- /go-client/informer/Makefile: -------------------------------------------------------------------------------- 1 | GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) 2 | GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") 3 | DOCKER_IMAGE := informer$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) 4 | 5 | DOCKERFILE ?= Dockerfile 6 | DOCKER_BUILD_ARGS ?= --force-rm -f "$(DOCKERFILE)" 7 | 8 | DOCKER_BUILD := docker build $(DOCKER_BUILD_ARGS) -t "$(DOCKER_IMAGE)" . 9 | DOCKER_RUN := docker run --rm -it 10 | 11 | .PHONY: all 12 | all: push 13 | 14 | .PHONY: build 15 | build: 16 | $(DOCKER_BUILD) 17 | 18 | .PHONY: push 19 | push: build 20 | docker tag $(DOCKER_IMAGE) alejandrox1/informer:v1 21 | docker push alejandrox1/informer:v1 22 | -------------------------------------------------------------------------------- /go-client/informer/README.md: -------------------------------------------------------------------------------- 1 | # Example Informer 2 | 3 | ## Try It Out! 4 | 5 | You can either use the provided skaffold file and run `skaffold dev` to run the 6 | whole thing or you can do it yourself. 7 | If you want to do it yourself, you will need to first off build and push the 8 | container image for this app (make sure you modify the `push` recipe in the 9 | [Makefile](./Makefile) and the `image` field in the Deployment inside of 10 | [app.yaml](./app.yaml)): 11 | ```bash 12 | make 13 | ``` 14 | 15 | Now apply it and see it work: 16 | ```bash 17 | $ kubectl delete -f app.yaml 18 | serviceaccount "pod-manager" deleted 19 | role.rbac.authorization.k8s.io "pod-manager" deleted 20 | rolebinding.rbac.authorization.k8s.io "pod-manager" deleted 21 | deployment.apps "pod-manager" deleted 22 | ``` 23 | 24 | ```bash 25 | $ kubectl get po -w 26 | NAME READY STATUS RESTARTS AGE 27 | demo-pod 0/1 ContainerCreating 0 3s 28 | pod-manager-5d8c84d6d4-dp46v 1/1 Running 0 4s 29 | ``` 30 | -------------------------------------------------------------------------------- /go-client/informer/app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # ServiceAccount for our app. 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: pod-manager 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: Role 10 | metadata: 11 | name: pod-manager 12 | namespace: default 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - "pods" 18 | verbs: 19 | - "get" 20 | - "list" 21 | - "watch" 22 | - "create" 23 | - "update" 24 | - "patch" 25 | - "delete" 26 | --- 27 | # Bind "pod-manager" Role to "pod-manager" ServiceAccount. 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | kind: RoleBinding 30 | metadata: 31 | name: pod-manager 32 | namespace: default # This only grants permissions within the "default" namespace. 33 | subjects: 34 | - kind: ServiceAccount 35 | name: pod-manager 36 | apiGroup: "" 37 | roleRef: 38 | kind: Role #this must be Role or ClusterRole 39 | name: pod-manager # this must match the name of the Role or ClusterRole you wish to bind to. 40 | apiGroup: rbac.authorization.k8s.io 41 | --- 42 | apiVersion: apps/v1 43 | kind: Deployment 44 | metadata: 45 | name: pod-manager 46 | labels: 47 | app: pod-manager 48 | spec: 49 | replicas: 1 50 | selector: 51 | matchLabels: 52 | app: pod-manager 53 | template: 54 | metadata: 55 | labels: 56 | app: pod-manager 57 | spec: 58 | serviceAccount: pod-manager 59 | containers: 60 | - name: pod-manager 61 | image: alejandrox1/informer:v1 62 | imagePullPolicy: Always 63 | -------------------------------------------------------------------------------- /go-client/informer/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/contributing-to-kubernetes/gnosis/go-client/informer 2 | 3 | go 1.13 4 | 5 | require ( 6 | k8s.io/api v0.17.4 7 | k8s.io/apimachinery v0.17.4 8 | k8s.io/client-go v0.17.4 9 | ) 10 | -------------------------------------------------------------------------------- /go-client/informer/go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= 4 | github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= 5 | github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= 6 | github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= 7 | github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= 8 | github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= 9 | github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= 10 | github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= 11 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 12 | github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= 13 | github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= 14 | github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= 15 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 16 | github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 17 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 18 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 19 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 20 | github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= 21 | github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= 22 | github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= 23 | github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= 24 | github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= 25 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 26 | github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 27 | github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= 28 | github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= 29 | github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= 30 | github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= 31 | github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= 32 | github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= 33 | github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= 34 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 35 | github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 36 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 37 | github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 38 | github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 39 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 40 | github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= 41 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 42 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 43 | github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 44 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 45 | github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= 46 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 47 | github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= 48 | github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= 49 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 50 | github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= 51 | github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= 52 | github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 53 | github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= 54 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= 55 | github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= 56 | github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= 57 | github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= 58 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 59 | github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= 60 | github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 61 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 62 | github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= 63 | github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 64 | github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= 65 | github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= 66 | github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= 67 | github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= 68 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 69 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 70 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 71 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 72 | github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= 73 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 74 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 75 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 76 | github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 77 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 78 | github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= 79 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 80 | github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 81 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= 82 | github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 83 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 84 | github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 85 | github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= 86 | github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 87 | github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= 88 | github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 89 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 90 | github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= 91 | github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 92 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 93 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 94 | github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 95 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 96 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 97 | go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= 98 | golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 99 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 100 | golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= 101 | golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 102 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 103 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 104 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 105 | golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 106 | golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 107 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 108 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 109 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 110 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 111 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 112 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 113 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 114 | golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= 115 | golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 116 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 117 | golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 118 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= 119 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 120 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 121 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 122 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 123 | golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 124 | golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 125 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 126 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 127 | golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 128 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 129 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 130 | golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= 131 | golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 132 | golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 133 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 134 | golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 135 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 136 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 137 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 138 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= 139 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 140 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 141 | golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 142 | golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 143 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 144 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 145 | golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 146 | google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= 147 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 148 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 149 | google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 150 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 151 | google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 152 | google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 153 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 154 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 155 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 156 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 157 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 158 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 159 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 160 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 161 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 162 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 163 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 164 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 165 | honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 166 | k8s.io/api v0.17.4 h1:HbwOhDapkguO8lTAE8OX3hdF2qp8GtpC9CW/MQATXXo= 167 | k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= 168 | k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw= 169 | k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= 170 | k8s.io/client-go v0.17.4 h1:VVdVbpTY70jiNHS1eiFkUt7ZIJX3txd29nDxxXH4en8= 171 | k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= 172 | k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= 173 | k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 174 | k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 175 | k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= 176 | k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= 177 | k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= 178 | k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= 179 | k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= 180 | sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= 181 | sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= 182 | sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= 183 | -------------------------------------------------------------------------------- /go-client/informer/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | //"k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/api/resource" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/rest" 15 | "k8s.io/client-go/tools/cache" 16 | ) 17 | 18 | var ( 19 | ns string = "default" 20 | ) 21 | 22 | func createPod(name, ns string) *corev1.Pod { 23 | labels := make(map[string]string) 24 | labels["app"] = name 25 | 26 | return &corev1.Pod{ 27 | ObjectMeta: metav1.ObjectMeta{ 28 | Name: name, 29 | Namespace: ns, 30 | Labels: labels, 31 | }, 32 | Spec: corev1.PodSpec{ 33 | Containers: []corev1.Container{ 34 | { 35 | Name: "main", 36 | Image: "ubuntu:18.04", 37 | Command: []string{"sleep", "3600"}, 38 | Resources: corev1.ResourceRequirements{ 39 | Limits: corev1.ResourceList{ 40 | "cpu": resource.MustParse("500m"), 41 | "memory": resource.MustParse("64m"), 42 | }, 43 | Requests: corev1.ResourceList{ 44 | "cpu": resource.MustParse("500m"), 45 | "memory": resource.MustParse("64m"), 46 | }, 47 | }, 48 | }, 49 | }, 50 | }, 51 | } 52 | } 53 | 54 | func main() { 55 | // creates the in-cluster config 56 | config, err := rest.InClusterConfig() 57 | if err != nil { 58 | panic(err.Error()) 59 | } 60 | // creates the clientset 61 | clientset, err := kubernetes.NewForConfig(config) 62 | if err != nil { 63 | panic(err.Error()) 64 | } 65 | 66 | // Create informer to watch for any new pods that come up. 67 | // See https://godoc.org/k8s.io/client-go/tools/cache#NewInformer 68 | podNamesSeen := []string{} 69 | _, podController := cache.NewInformer( 70 | &cache.ListWatch{ 71 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 72 | obj, err := clientset.CoreV1().Pods(ns).List(options) 73 | return runtime.Object(obj), err 74 | }, 75 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 76 | return clientset.CoreV1().Pods(ns).Watch(options) 77 | }, 78 | }, 79 | &corev1.Pod{}, 80 | time.Millisecond*100, 81 | cache.ResourceEventHandlerFuncs{ 82 | AddFunc: func(obj interface{}) { 83 | if pod, ok := obj.(*corev1.Pod); ok { 84 | fmt.Printf(" - Pod %s %d\n", pod.Name, len(podNamesSeen)) 85 | podNamesSeen = append(podNamesSeen, pod.Name) 86 | } 87 | }, 88 | }, 89 | ) 90 | 91 | stopCh := make(chan struct{}) 92 | go podController.Run(stopCh) 93 | defer close(stopCh) 94 | 95 | i := 0 96 | for { 97 | // Create a Pod. 98 | podName := fmt.Sprintf("demo-pod-%d", i) 99 | pod := createPod(podName, ns) 100 | _, err := clientset.CoreV1().Pods(ns).Create(pod) 101 | fmt.Printf("Created Pod %s\n", podName) 102 | if err != nil { 103 | panic(err) 104 | } 105 | 106 | time.Sleep(30 * time.Second) 107 | i++ 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /go-client/informer/skaffold.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: skaffold/v1beta13 2 | kind: Config 3 | build: 4 | artifacts: 5 | - image: alejandrox1/cool-app 6 | deploy: 7 | kubectl: 8 | manifests: 9 | - app.yaml 10 | -------------------------------------------------------------------------------- /kind/README.md: -------------------------------------------------------------------------------- 1 | # kind Resources 2 | 3 | ## Table of Contents 4 | * [Contour: Kind-ly running Contour](./contour/) 5 | * [MetalLB: Using MetalLb with Kind](./metallb/) 6 | -------------------------------------------------------------------------------- /kind/contour/README.md: -------------------------------------------------------------------------------- 1 | # Kind-ly running Contour 2 | 3 | This is based on the great post by Steve Sloka, 4 | [Kind-ly running Contour](https://projectcontour.io/kindly-running-contour/). 5 | 6 | I used kind v0.6.0-alpha and Contour 0.15.0 for this tutorial. 7 | 8 | ## Creating a Kubernetes cluster 9 | 10 | First off, let's create the cluster: 11 | ``` 12 | $ kind create cluster --config ./kind.config.yaml 13 | ``` 14 | 15 | The kind config file will create 1 worker node and we are mapping ports 80 and 16 | 443 between the worker node and our host. 17 | 18 | You should see the worker node come up as a Docker container: 19 | ``` 20 | ONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 21 | aef85177177f kindest/node:v1.15.3 "/usr/local/bin/entr…" About a minute ago Up About a minute 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp kind-worker 22 | ``` 23 | 24 | ## [heptio/contour] 25 | 26 | ### Envoy 27 | 28 | Let's take a short detour into Envoy before we proceed with Contour. 29 | Hopefully some details will make more sense... 30 | 31 | So Envoy... 32 | 33 | One of the big features that envoy provides is a set of management APIs. 34 | These management APIs can very easily be implemented by configuration servers 35 | (i.e., Contour) or control plane services (i.e., Istio). 36 | If the control plane / configuration server implements the set amnagement APIs 37 | then it is possible to manage Envoy configuration dynamically (no need to 38 | restart Envoy for changes to occur). 39 | 40 | In the Envoy world, specifically looking at the v2 management APIs, these can 41 | do endpoint discovery, cluster discovery, route discovery, listener discovery, 42 | health discovery, aggregated discovery, and secret discovery services. 43 | These APIs are often refered to as xDS (x Discovery Service) APIs. 44 | See [The universal data plane API] for more detail. 45 | 46 | ### Contour 47 | 48 | > Contour is a Kubernetes ingress controller using Lyft's Envoy proxy. https://projectcontour.io 49 | 50 | > Contour is an Ingress controller for Kubernetes that works by deploying the Envoy proxy as a reverse proxy and load balancer. Unlike other Ingress controllers, Contour supports dynamic configuration updates out of the box while maintaining a lightweight profile. 51 | 52 | In this blog post, we will us a ["split" deployment]. 53 | This sort of deployment separates Contour and Envoy resources, versus deploying 54 | them in the same Pod and having them communicate through `localhost`. 55 | 56 | We will use one of the examples from the [heptio/contour] repo: 57 | ``` 58 | # Clone the repo... 59 | $ mkdir ~/go/src/github.com/heptio 60 | $ cd ~/go/src/github.com/heptio 61 | $ git clone https://github.com/heptio/contour.git 62 | 63 | # Deploy Contour 64 | $ kubectl apply -f ~/go/src/github.com/heptio/contour/examples/ds-hostnet-split 65 | ``` 66 | 67 | ## Deploy a Sample Application 68 | 69 | Now that we have Contour deployed, we should have all requests to 70 | `localhost:80` and `localhost:443` be routed to an Envoy pod. 71 | At this point we need an application to which Envoy will route these requests 72 | to. 73 | 74 | The [heptio/contour] contains the resources necessary to deploy [kuard]: 75 | ``` 76 | $ kubectl apply -f ~/go/src/github.com/heptio/contour/examples/example-workload/kuard-ingressroute.yaml 77 | ``` 78 | 79 | **NOTE**: You'll need to add `127.0.0.1 kuard.local` to `/etc/hosts` in order to 80 | access the sample application through your web browser at 81 | `http://kuard.local/`. 82 | 83 | ### What Just Happened? 84 | 85 | The last kubectl apply command should have created the following Kubernetes 86 | resource's: 87 | * `deployment.apps/kuard` 88 | * `service/kuard` - ClusterIP type Service 89 | * `ingressroute.contour.heptio.com/kuard` 90 | 91 | `ingressroute.contour.heptio.com/kuard` looks like this: 92 | ```yaml 93 | apiVersion: contour.heptio.com/v1beta1 94 | kind: IngressRoute 95 | metadata: 96 | labels: 97 | app: kuard 98 | name: kuard 99 | namespace: default 100 | spec: 101 | virtualhost: 102 | fqdn: kuard.local 103 | routes: 104 | - match: / 105 | services: 106 | - name: kuard 107 | port: 80 108 | ``` 109 | 110 | 111 | [heptio/contour]: https://github.com/heptio/contour 112 | [The universal data plane API]: https://blog.envoyproxy.io/the-universal-data-plane-api-d15cec7a 113 | ["split" deployment]: https://projectcontour.io/contour-v014/ 114 | [kuard]: https://github.com/kubernetes-up-and-running/kuard 115 | [Contour IngressRoute]: https://github.com/heptio/contour/blob/v0.14.2/docs/ingressroute.md#ingress-to-ingressroute 116 | -------------------------------------------------------------------------------- /kind/contour/kind.config.yaml: -------------------------------------------------------------------------------- 1 | # Create a kind cluster with 1 control plane and 1 worker node. 2 | apiVersion: kind.sigs.k8s.io/v1alpha3 3 | kind: Cluster 4 | nodes: 5 | - role: control-plane 6 | - role: worker 7 | extraPortMappings: 8 | # Map port 80 on the container (worker node) to port 80 on the host. 9 | - containerPort: 80 10 | hostPort: 80 11 | listenAddress: "0.0.0.0" 12 | # Map port 443 on the container (worker node) to port 443 on the host. 13 | - containerPort: 443 14 | hostPort: 443 15 | listenAddress: "0.0.0.0" 16 | -------------------------------------------------------------------------------- /kind/metallb/README.md: -------------------------------------------------------------------------------- 1 | # Using MetalLb with Kind 2 | 3 | This is my attempt at running step by step through Duffie Cooley's 4 | [Using MetalLb with Kind] blog post. 5 | 6 | I used kind v0.6.0-alpha for this tutorial. 7 | 8 | Create your cluster: 9 | ``` 10 | $ kind create cluster --config=./kind.config.yaml 11 | ``` 12 | 13 | Try and ping the worker nodes in the cluster: 14 | ``` 15 | $ kubectl get no -o wide 16 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 17 | kind-control-plane Ready master 3m22s v1.15.3 172.17.0.3 Ubuntu Disco Dingo (development branch) 5.0.0-25-generic containerd://1.2.6-0ubuntu1 18 | kind-worker Ready 2m44s v1.15.3 172.17.0.2 Ubuntu Disco Dingo (development branch) 5.0.0-25-generic containerd://1.2.6-0ubuntu1 19 | (base) 20 | ``` 21 | 22 | Ping the worker node: 23 | ``` 24 | $ ping 172.17.0.2 25 | PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data. 26 | 64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.085 ms 27 | 64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.099 ms 28 | ^C 29 | --- 172.17.0.2 ping statistics --- 30 | 2 packets transmitted, 2 received, 0% packet loss, time 25ms 31 | rtt min/avg/max/mdev = 0.085/0.092/0.099/0.007 ms 32 | ``` 33 | 34 | Inspect the Docker `bridge` network: 35 | ``` 36 | $ docker network inspect bridge 37 | ``` 38 | 39 | Specifically, we are looking for the network's IP address management 40 | configuration: 41 | ``` 42 | $ docker network inspect bridge --format='{{json .IPAM.Config}}' 43 | [{"Subnet":"172.17.0.0/16","Gateway":"172.17.0.1"}] 44 | ``` 45 | 46 | Based on the subnet, Docker's `bridge` network has the CIDR range of 47 | 172.17.0.0 - 172.17.255.255. 48 | 49 | Create a sample application, an "echo" server based on [InAnimaTe/echo-server]: 50 | ``` 51 | $ kubectl apply -f sample-app.yaml 52 | ``` 53 | 54 | You should see a Deployment and a Service: 55 | ``` 56 | $ kubectl get deploy,svc echo 57 | NAME READY UP-TO-DATE AVAILABLE AGE 58 | deployment.extensions/echo 3/3 3 3 13s 59 | 60 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 61 | service/echo LoadBalancer 10.104.255.80 8080:32445/TCP 13s 62 | ``` 63 | 64 | Notice that the Service has the external IP marked as ``. 65 | 66 | From [MetalLB's installation instructions]: 67 | ``` 68 | kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.8.1/manifests/metallb.yaml 69 | ``` 70 | 71 | The following ConfigMap will give MetalLB control over the IPs from 72 | 172.17.255.1 to 172.17.255.250, and configure Layer 2 mode: 73 | ``` 74 | $ kubectl apply -f metallb-config.yaml 75 | configmap/config created 76 | ``` 77 | 78 | At this point, you should see the echo Service get an external IP: 79 | ``` 80 | $ kubectl get svc echo 81 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 82 | echo LoadBalancer 10.104.255.80 172.17.255.1 8080:32445/TCP 10m 83 | ``` 84 | 85 | Test it out: 86 | ``` 87 | curl http://172.17.255.1:8080 88 | ``` 89 | 90 | or open go to http://172.17.255.1:8080/ws in your web browser :smile: 91 | 92 | 93 | ## Notes 94 | 95 | ### Layer 2 96 | 97 | Let's start with some context on what layer 2 mode means: 98 | 99 | > Layer 2, also known as the Data Link Layer, is the second level in the 100 | > seven-layer OSI reference model for network protocol design. Layer 2 is 101 | > equivalent to the link layer (the lowest layer) in the TCP/IP network model. 102 | > Layer2 is the network layer used to transfer data between adjacent network 103 | > nodes in a wide area network or between nodes on the same local area network. 104 | > [layer 2 mode] 105 | 106 | > In layer 2 mode, one machine in the cluster takes ownership of the service, 107 | > and uses standard address discovery protocols (ARP for IPv4, NDP for IPv6) 108 | > to make those IPs reachable on the local network. From the LAN’s point of 109 | > view, the announcing machine simply has multiple IP addresses. 110 | > [MetalLB docs] 111 | 112 | The machine that takes ownership of a service is a "leader". 113 | MetalLB will rely on Kubernetes to figure out the state of pods and nodes 114 | relevant to the service. This information will be used to select a leader and 115 | to act in the case that the leader goes away. 116 | 117 | > layer 2 does not implement a load-balancer. Rather, it implements a failover 118 | > mechanism so that a different node can take over should the current leader 119 | > node fail for some reason. [MetalLB layer 2 docs]. 120 | 121 | 122 | [Using MetalLb with Kind]: https://mauilion.dev/posts/kind-metallb/ 123 | [InAnimaTe/echo-server]: https://github.com/InAnimaTe/echo-server 124 | [MetalLB's installation instructions]: https://metallb.universe.tf/installation/ 125 | [layer 2 mode]: https://www.juniper.net/documentation/en_US/junos/topics/concept/l2-qfx-series-overview.html 126 | [MetalLB docs]: https://metallb.universe.tf/concepts/ 127 | [MetalLB layer 2 docs]: https://metallb.universe.tf/concepts/layer2/ 128 | -------------------------------------------------------------------------------- /kind/metallb/kind.config.yaml: -------------------------------------------------------------------------------- 1 | # Create a kind cluster with 1 control plane and 1 worker node. 2 | apiVersion: kind.sigs.k8s.io/v1alpha3 3 | kind: Cluster 4 | nodes: 5 | - role: control-plane 6 | - role: worker 7 | -------------------------------------------------------------------------------- /kind/metallb/metallb-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: default 10 | protocol: layer2 11 | # Our Docker's bridge network had the subnet listed as 172.17.0.0/16. 12 | # This means that this network can assign IP addresses in the range of 13 | # 172.17.0.0 - 172.17.255.255. 14 | addresses: 15 | - 172.17.255.1-172.17.255.250 16 | -------------------------------------------------------------------------------- /kind/metallb/sample-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: echo 5 | labels: 6 | app: echo 7 | namespace: default 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: echo 13 | template: 14 | metadata: 15 | labels: 16 | app: echo 17 | spec: 18 | containers: 19 | - image: inanimate/echo-server 20 | imagePullPolicy: Always 21 | name: echo 22 | ports: 23 | - containerPort: 8080 24 | protocol: TCP 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: echo 30 | labels: 31 | app: echo 32 | namespace: default 33 | spec: 34 | selector: 35 | app: echo 36 | type: LoadBalancer 37 | ports: 38 | - port: 8080 39 | targetPort: 8080 40 | protocol: TCP 41 | -------------------------------------------------------------------------------- /kubernetes-101/developers/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes For Developers 2 | 3 | Welcome! 4 | 5 | This section is aimed to help developers do cooler things on Kubernetes, be 6 | more productive, and at times just have more fun. 7 | Some parts of this guide will run like tutorials or blog posts. 8 | Others, will be random stories of things you can do, different design patterns 9 | thatyou can adopt, or any other tangentially related thing that a developer on 10 | Kubernetees can do or see. 11 | 12 | ## Table of Contents 13 | * [Why Kubernetes](./why-kubernetes.md) brief discussion of why Kubernetes looks 14 | the way that it does and wny you may be interested in using it. 15 | 16 | * [Overview of Kubernetes](./overview.md) what is a pod and why do we need deployments? 17 | -------------------------------------------------------------------------------- /kubernetes-101/developers/examples/deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deploy 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: ubuntu 10 | template: 11 | metadata: 12 | labels: 13 | app: ubuntu 14 | spec: 15 | containers: 16 | - image: ubuntu:18.04 17 | name: bash 18 | args: 19 | - "sleep" 20 | - "999" 21 | -------------------------------------------------------------------------------- /kubernetes-101/developers/examples/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-pod 5 | spec: 6 | containers: 7 | - image: ubuntu:18.04 8 | name: bash 9 | args: 10 | - "sleep" 11 | - "999" 12 | -------------------------------------------------------------------------------- /kubernetes-101/developers/examples/rs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: my-rs 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | app: ubuntu 10 | template: 11 | metadata: 12 | labels: 13 | app: ubuntu 14 | spec: 15 | containers: 16 | - image: ubuntu:18.04 17 | name: bash 18 | args: 19 | - "sleep" 20 | - "999" 21 | -------------------------------------------------------------------------------- /kubernetes-101/developers/overview.md: -------------------------------------------------------------------------------- 1 | # Overview of Kubernetes 2 | 3 | Let's get to it! 4 | 5 | It is possible that you may have heard about Pods, Deployments, Configmaps, and 6 | the like. 7 | If you haven't, then you are about to! :smile: 8 | 9 | **Note**: the examples we reference here can also be found in the 10 | [examples](./examples) sudirectory. 11 | 12 | ## Pods 13 | By now we know that Kubernetes is a "Production-Grade Container Scheduling and 14 | Management". 15 | Now, we have to figure out what to put in containers... 16 | 17 | Containers are designed for you to run 1 process in them. 18 | They are not entire machines! 19 | They should not ron daemonized applications, you shouldn't have nginx sending 20 | over reuests to your web app, or anything of the sort. 21 | You get one process so use it wisely! 22 | 23 | However, you may need to have multiple processes working together. 24 | Have you heard of Istio, Linkerd, or any of the other tens of service meshes? 25 | What they do, is that they run a sidecar container with your "application". 26 | This sidecar container may handle networking, telemetry, what have you. 27 | Thus you get to focus on your app and the othe container that runs with your 28 | app will help you out (no need to bake additional logic into your application). 29 | 30 | To enable this pattern, Kubernetes packages one or more containers in a `Pod`. 31 | (If you are familiar with ECS/Fargate you may have workedwith tasks). 32 | 33 | The most common way of running thins in Kubernetes is by using YAML. 34 | Time for an example! 35 | Let's run a container that uses ubuntu 18.04: 36 | 37 | ```yaml 38 | apiVersion: v1 39 | kind: Pod 40 | metadata: 41 | name: my-pod 42 | spec: 43 | containers: 44 | - image: ubuntu:18.04 45 | name: bash 46 | args: 47 | - "sleep" 48 | - "999" 49 | ``` 50 | 51 | Now, create the pod! 52 | ```bash 53 | kubectl apply -f examples/pod.yaml 54 | ``` 55 | 56 | You can look at the pod by running 57 | ```bash 58 | kubectl get pods 59 | ``` 60 | 61 | and delete it by running 62 | ```bash 63 | kubectl delete -f examples/pod.yaml 64 | ``` 65 | 66 | ## ReplicaSets 67 | 68 | tl;dr: pods package applications. 69 | 70 | Another thing you may see or hear every now and then are `ReplicaSets`. 71 | ReplicaSets help you scale your app. 72 | ReplicaSets have a similar structure to Pods: 73 | ```yaml 74 | apiVersion: ... 75 | kind: ... 76 | metadata: 77 | ... 78 | spec: 79 | replicas: N 80 | selector: 81 | matchLabels: 82 | ... 83 | template: 84 | ``` 85 | 86 | The `apiVersion`, `kind`, `metadata`, and `spec` are in almost all Kuberentes 87 | resources, so this you will see most of the time. 88 | Within the `spec` field, however you see some new fields: 89 | * `replicas`: specifies the number of replicas of a given pod that you want to 90 | run. 91 | * `selector.matchLabels`: like many objects in Kubernetes, ReplicaSets are 92 | managed by a Kubernetes component (a "controller"). This 93 | controller is an application, and this application finds pods 94 | through "labels" (labels are attached to Kubernetes resources, they are 95 | essentially more metadata). So in the `selector.matchLabels` field you tell 96 | Kubernetes how the Pods for this ReplicaSet will be labeled. 97 | * `template`: this is where you specify the pod template (or pod spec!!) that the replicaset is 98 | to use to create pods! 99 | 100 | Let's now create a replicaset that runs 2 replicas of the pod in the previous 101 | section. 102 | The replicaset definition will now look like this: 103 | ```yaml 104 | apiVersion: apps/v1 105 | kind: ReplicaSet 106 | metadata: 107 | name: my-rs 108 | spec: 109 | replicas: 2 110 | selector: 111 | matchLabels: 112 | app: ubuntu 113 | template: 114 | metadata: 115 | labels: 116 | app: ubuntu 117 | spec: 118 | containers: 119 | - image: ubuntu:18.04 120 | name: bash 121 | args: 122 | - "sleep" 123 | - "999" 124 | ``` 125 | 126 | Create it 127 | ```bash 128 | kubectl apply -f examples/rs.yaml 129 | ``` 130 | 131 | Inspect it 132 | ``` 133 | kubectl get replicasets 134 | ``` 135 | 136 | Inspect its pods 137 | ``` 138 | kubectl get pods --show-labels 139 | ``` 140 | 141 | And delete it 142 | ``` 143 | kubectl delete -f examples/rs.yaml 144 | ``` 145 | 146 | ## Deployments 147 | 148 | tl;dr pods package applications and replicasets allow you to scale your pods. 149 | 150 | Deployments are where it is at! :rocket: 151 | Deployments are an abstraction on top of ReplicaSets; Deployments manage 152 | ReplicaSets! 153 | So for the most part, you want to be writing Deployments instead of 154 | ReplicaSets. 155 | 156 | We previously mentioned that replicasets manage pod replicas. 157 | Well, deployments manage replicasets! 158 | 159 | There are many features that Deployments bring to the table but the number one 160 | is the ability to manage version updates. 161 | what if you want to bump your app to its latest version? 162 | If you already have a deployment running, then that deployment will have a 163 | replicaset, and that replicaset will be "managing" some pods. 164 | If you now update your deployment to use a new container image version then the 165 | deployment will create a new replicaset, and this new replicaset will then create 166 | pods based on the newer version. 167 | As the new pods come to live, the old replicaset will slowly decresed its 168 | desired replica count, thus killing the old pods until we replace all the old 169 | pods with ones running the latest version. 170 | 171 | And now time for another example! 172 | A deployment looks very much like a replicaset. 173 | You can compare this to our previous example: 174 | 175 | ```yaml 176 | apiVersion: apps/v1 177 | kind: Deployment 178 | metadata: 179 | name: my-deploy 180 | spec: 181 | replicas: 1 182 | selector: 183 | matchLabels: 184 | app: ubuntu 185 | template: 186 | metadata: 187 | labels: 188 | app: ubuntu 189 | spec: 190 | containers: 191 | - image: ubuntu:18.04 192 | name: bash 193 | args: 194 | - "sleep" 195 | - "999" 196 | ``` 197 | 198 | You can create the deployment 199 | ```bash 200 | kubectl apply -f examples/deploy.yaml 201 | ``` 202 | 203 | You can inspect the deployment 204 | ```bash 205 | kubectl get deployments 206 | ``` 207 | 208 | You can inspect its replicaset 209 | ```bash 210 | kubectl get replicasets --show-labels 211 | ``` 212 | 213 | Its pods 214 | ```bash 215 | kubectl get pods --show-labels 216 | ``` 217 | 218 | and to clean it up 219 | ``` 220 | kubectl delete -f examples/deploy.yaml 221 | ``` 222 | -------------------------------------------------------------------------------- /kubernetes-101/developers/why-kubernetes.md: -------------------------------------------------------------------------------- 1 | # Why Kubernetes 2 | 3 | If you found yourself in here then you are either interested in getting an 4 | overview of Kubernetes of you clicked some random linkin Twitter. 5 | Either way, here, we will discuss why Kubernetes looks the way it does. 6 | 7 | ## Problem Statement 8 | 9 | First thing you see when you land in 10 | [k/k](https://github.com/kubernetes/kubernetes) 11 | (the https://github.com/kubernetes/kubernetes repo) is this line: 12 | 13 | > Production-Grade Container Scheduling and Management 14 | 15 | The big idea is that you can write portable programs (portable in linux-like 16 | systems, at least) by packagin them in a container image 17 | (the thing that comes out of a `docker build`) and subsecuently using that 18 | container image to run a container (the thing that happens when you `docker run`). 19 | 20 | Maybe you are familiar with 21 | [docker compose](https://docs.docker.com/compose/) and have some ecperience 22 | orchestrating containers. 23 | If so, you know that sometimes you have to restart docker compose, if your 24 | machine goes down. 25 | Also, you dont get to scale out to multiple machines easily. 26 | This was one of the reasons why [Docker Swarm](https://github.com/docker/swarm) 27 | was invented. 28 | 29 | There are plenty of tools out there to do container orchestration! 30 | One that a lot of people may be familiar with as well is 31 | [Fargate](https://aws.amazon.com/fargate/). 32 | 33 | > AWS Fargate is a compute engine for Amazon ECS that allows you to run 34 | > containers without having to manage servers or clusters. With AWS Fargate, 35 | > you no longer have to provision, configure, and scale clusters of virtual 36 | > machines to run containers. This removes the need to choose server types, 37 | > decide when to scale your clusters, or optimize cluster packing. AWS Fargate 38 | > removes the need for you to interact with or think about servers or clusters. 39 | > Fargate lets you focus on designing and building your applications instead of 40 | > managing the infrastructure that runs them. 41 | 42 | Sounds pretty damn good, doesn't it? 43 | The big thing that we want to offer people is the capacity to develop and 44 | deploy their applications in a reliable and scalable platform that doesn't 45 | require the developer to know squat about the machines, networking, or anything 46 | of the sort. 47 | The motivation is that developers should be free to do what they need, when 48 | they need. 49 | 50 | And like all the container orchestration platforms, Kubernetes aims to do this. 51 | However, here is where the design of Kubernetes really shines. 52 | In Kubernetes, container orchestration doesn't solely imply that your 53 | containers will be kept alive and running. 54 | It also means that any service that your application needs will be available 55 | and managed by the same sort of APIs that manage your application! 56 | 57 | This may not sound like much but let's take an example. 58 | If you want to horizontally scale your application (increase the number of 59 | replicas), in Fargate, you need to set up an "app autoscaling target", a couple 60 | "autoscaling policies", along with some "cloudwatch alarms", see 61 | [`bradford-hamilton/terraform-ecs-fargate`](https://github.com/bradford-hamilton/terraform-ecs-fargate/blob/master/terraform/auto_scaling.tf) 62 | for an example. 63 | 64 | In Kubernetes, there is an API that is designed to help you out. 65 | 66 | ```yaml 67 | apiVersion: autoscaling/v1 68 | kind: HorizontalPodAutoscaler 69 | metadata: 70 | name: php-apache 71 | namespace: default 72 | spec: 73 | scaleTargetRef: 74 | apiVersion: apps/v1 75 | kind: Deployment 76 | name: php-apache 77 | minReplicas: 1 78 | maxReplicas: 10 79 | targetCPUUtilizationPercentage: 50 80 | ``` 81 | 82 | This holds for the most part, a lot of container orchestration solutions rely 83 | on the cloud provider to work, and you have to twiddle with those bits to get 84 | your container to do what you want. 85 | In Kubernetes, applications, infrastructure, and cloud providers are kept away 86 | from one another (unless you build an operator to manage your cloud provider's 87 | infrastructure!). 88 | This is where the thing really shines, and this is why so many people even 89 | bother. 90 | 91 | Most container orchestration platforms orchestrate the container and period. 92 | Kubernetes orchestrates everything that your application may need to be useful. 93 | -------------------------------------------------------------------------------- /kubernetes-101/operators/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/contributing-to-kubernetes/gnosis/26f5ba9a60cc4a0c6a14ce7517105e757e97d936/kubernetes-101/operators/README.md -------------------------------------------------------------------------------- /stories/README.md: -------------------------------------------------------------------------------- 1 | # Welcome! 2 | 3 | # Contributing to Kubernetes 4 | 5 | ## Welcome! 6 | 7 | Here, you will find lots of examples of work that has been done in Kubernetes. 8 | These examples are made for new contributors (even those who may not be new to 9 | Go). 10 | 11 | The aim of this is to, by revisiting work that has been completed or is in progress, 12 | help you gain the hands-on experience necessary for you to become a Kubernetes maintainer! 13 | We believe that through thorough analysis and understanding of the code going into Kubernetes, 14 | we can tap into a valuable and readily available resource that will help us to be even 15 | more effective contributors, for both experienced and inexperienced contributors. 16 | 17 | ## Table of Contents 18 | * [General](./general): description of how the Kubernetes community is laid out 19 | and general information you may need to get started. 20 | * Hands On Examples 21 | * [API Machinery: Fix bug in apiserver service cidr split](./kk-pr-85968) 22 | * [Network: Intro to container networking](./networking) 23 | * [Network: Kubenet fetches gateway from CNI result instead of calculating gateway from pod cidr](./kk-pr-85993) 24 | * [Testing: Understanding E2E Tests](./e2e-tests) 25 | * [Testing: Running Node E2E Tests](./e2e-node-tests/) 26 | * [Testing: Testing Kubelets](./kubelet-testing/) 27 | 28 | ## How can I make a PR? 29 | - Create an issue, if there isn't already one. Following the guidelines of this template to make an issue: 30 | 31 | ``` 32 | PR 33 | kubernetes/kubernetes#85898 34 | 35 | SIG Labels 36 | /sig node 37 | /sig network 38 | /area kubelet 39 | ``` 40 | 41 | - Create a feature branch (on the upstream repo) with the issue and it's number, so that it matches the issue you intend to write documentation on. If the issue is `"issue #23"`, your branch name should be `issue-23`. 42 | 43 | - Make iterative changes to the branch by making PRs against that branch. 44 | 45 | - When a PR is done being reviewed, merge it via the **squash and merge** github button, into the issue-X branch (to easily avoid any merge commit messages). 46 | 47 | - Once `"issue-23"` is ready to be merged, clean it up and merge the final result into master. 48 | -------------------------------------------------------------------------------- /stories/e2e-node-tests/README.md: -------------------------------------------------------------------------------- 1 | # Running E2E Node Tests 2 | 3 | Welcome to another adventure down the Kubernetes test directory. 4 | Last time we cover 5 | [how to run Kubernetes e2e tests with kind](../e2e-tests/), this time we will 6 | cover node e2e tests. 7 | These tests are owned by 8 | [SIG Node](https://github.com/kubernetes/community/tree/master/sig-node). 9 | 10 | Node e2e tests are interesting because they test components such as the Kubelet 11 | and they do so without much else than ETCD. 12 | This post will be the first in a series of SIG Node tests as we figure out a 13 | reliable and easy-ish way to run these tests locally. 14 | For this post, we will cover how to run them remotely in GCP. 15 | 16 | ## Getting Started 17 | 18 | ### Configuring Your CLI 19 | As we go down this rabbit hole, we will begin by setting up our environment for 20 | remote tests. 21 | 22 | To get started, please go through 23 | https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/e2e-node-tests.md 24 | and make sure to install gcloud, for example 25 | https://cloud.google.com/deployment-manager/docs/step-by-step-guide/installation-and-setup 26 | 27 | To see if your account is all set do a 28 | ``` 29 | gcloud auth list 30 | ``` 31 | 32 | Else, run a `gcloud config set account `. 33 | To chose a project, do a `gcloud config set project `. 34 | 35 | GCP, like other cloud providers has regions and zones. 36 | You can browse and chose anyone you want but what matters is that you chose 37 | one. 38 | To do so, you will need to run a command that looks like this 39 | 40 | ``` 41 | gcloud config set compute/region us-central1 42 | gcloud config set compute/zone us-central1-f 43 | ``` 44 | 45 | ### Preparing Your VMs 46 | 47 | In GCP, in order to SSH into a machine one can add a public SSH key as part of 48 | the metadata associated with VMs. 49 | The "metadata" section is a section in the Google cloud console in the "Compute 50 | Engine" section. 51 | 52 | So we will now go and add an SSH key for us to be able to SSH into our logins - 53 | this is used by the official e2e test node runner in order to "upload" binaries 54 | and to "download" test results and logs. 55 | 56 | To add an SSH public key please see this guide: 57 | https://cloud.google.com/compute/docs/instances/adding-removing-ssh-keys 58 | 59 | You can add your own key (you will need to format it as stated in the link 60 | above) or you can use this gcloud commmand which will generate a prvate and 61 | public SSH keys in your machine and will add the public one as metadata for 62 | your VMs 63 | 64 | ``` 65 | gcloud compute config-ssh 66 | ``` 67 | 68 | Ladt step before we exectute an actual test is to obtain the credentials for a 69 | service account for the node test runner to use. 70 | For that, please follow this guide: 71 | https://cloud.google.com/docs/authentication/production#cloud-console 72 | 73 | Once you have it, download it and put it somewhere. 74 | You will need to set the following environment variable for the node test 75 | runner to use: 76 | ``` 77 | export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service-account-credentials.json 78 | ``` 79 | 80 | ## Running Node Tests 81 | 82 | At this point, you should have a terminal open and be at the root of the 83 | Kubernetes repo. 84 | If are there and if you have gone through the steps above then you should be 85 | able to execute this command 86 | 87 | ``` 88 | time make test-e2e-node FOCUS="\[Flaky\]" SKIP="" PARALLELISM=1 REMOTE=true DELETE_INSTANCES=true 89 | ``` 90 | 91 | This will run Ginkgo tests labeled as "Flaky", it will run them serially, and 92 | it will delete the VM instance after the e2e tests finish. 93 | (These tests will also probably fail but we will tell you why and how to make 94 | them pass later on :microscope:). 95 | 96 | But, if you wanna see the entire set of arguments and all run a 97 | ``` 98 | make test-e2e-node PRINT_HELP=y 99 | ``` 100 | 101 | ## Into the Rabbit Hole 102 | 103 | Ight, so you just ran a node e2e test. 104 | Running any other e2e test is just a matter of figuring out what to `FOCUS` on 105 | and what to `SKIP`. 106 | 107 | So now we will try and see what all is going on behind the scenes - this is the 108 | long section :smile:. 109 | 110 | Like normal e2e tests, the make command will compile Ginkgo from the vendor 111 | directory (along with some other dependencies) and it will compile a test 112 | binary from the source code at 113 | https://github.com/kubernetes/kubernetes/tree/master/test/e2e_node. 114 | The bash that does all this for us can be found in 115 | https://github.com/kubernetes/kubernetes/blob/master/hack/make-rules/test-e2e-node.sh. 116 | In that file you will see how parallelism is determined, where the runtime is 117 | specified, ginkgo flags, etc. 118 | 119 | The thing you should notice is this command 120 | ```bash 121 | go run test/e2e_node/runner/remote/run_remote.go 122 | ``` 123 | 124 | The e2e node test runner has a directory for local tests and another one for 125 | remote tests. 126 | For the sake of experimentation we will only focus here on the remote test 127 | runner. 128 | 129 | The test runner begins by checking the value of the test suite. 130 | The test suite is a utility interface created for remote tests, 131 | https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/remote/types.go. 132 | It sets up the environment and it runs the actual tests. 133 | 134 | In the case of node e2e tests we can use the "default" test suite, the 135 | "cadvisor" test suite, or the "conformance" one. 136 | 137 | The default test suite can be found here 138 | https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/remote/node_e2e.go 139 | and that is all that happened during our previous e2e tests. 140 | But before diving in there, we will first see what else the test runner does. 141 | 142 | One of the details we will cover here are the node configuration files. 143 | If you did run the node e2e tests in the previous example, you should have seen 144 | three tests fail. 145 | The reason why they fail is that by default, the test runner will run tests in 146 | a n1-standard-1 machine which has 1 vCPU and about 4.5 GB while the tests we 147 | ran require at least 15 vCPUs and some 18 or more GBs. 148 | You can see https://github.com/kubernetes/kubernetes/issues/91263 for more 149 | details. 150 | 151 | One of the things we can tune do in the test runner is to specify the machine 152 | instance and that can be done with a configuration file that looks a bit like 153 | this: 154 | 155 | ```yaml 156 | images: 157 | cosbeta-resource1: 158 | image: cos-beta-81-12871-44-0 159 | project: cos-cloud 160 | machine: n1-standard-1 161 | metadata: "user-data 0 { 258 | _, primaryServiceClusterCIDR, err := net.ParseCIDR(serviceClusterIPRangeList[0]) 259 | if err != nil { 260 | return options, fmt.Errorf("service-cluster-ip-range[0] is not a valid cidr") 261 | } 262 | 263 | serviceIPRange, apiServerServiceIP, err = master.ServiceIPRange(*(primaryServiceClusterCIDR)) 264 | if err != nil { 265 | return options, fmt.Errorf("error determining service IP ranges for primary service cidr: %v", err) 266 | } 267 | s.PrimaryServiceClusterIPRange = serviceIPRange 268 | } 269 | ``` 270 | 271 | Notice how `s.ServiceClusterIPRanges` is being split! 272 | The problem with this is that if there is no value provided by the user through 273 | `--service-cluster-ip-range` then `ServiceClusterIPRanges`will default to its 274 | zero value which is an empty string. 275 | 276 | **Note**: Zero values are a Go thing. From [A Tour of Go: Zero 277 | Values](https://tour.golang.org/basics/12) 278 | 279 | > Variables declared without an explicit initial value are given their zero value. 280 | > 281 | > The zero value is: 282 | > 283 | > 0 for numeric types, 284 | > false for the boolean type, and 285 | > "" (the empty string) for strings. 286 | 287 | So if the user doesn't provide a value for the service cluster IP ranges, then 288 | we will end splitting an empty string. 289 | This will result in `serviceClusterIPRangeList` having length 1 and only 290 | containing an empty string. 291 | 292 | However, if you read through the rest, the code expects 293 | `serviceClusterIPRangeList` to have a length of 0 when the user did not specify 294 | a value!!! 295 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/apiserver.go: -------------------------------------------------------------------------------- 1 | /* 2 | Entrypoint into our command-line app. 3 | 4 | Inspired by k/k/cmd/kube-apiserver/apiserver.go 5 | */ 6 | package main 7 | 8 | import ( 9 | "os" 10 | 11 | "github.com/contributing-to-kubernetes/gnosis/stories/kk-pr-85968/example-cobra/cmd/app" 12 | ) 13 | 14 | func main() { 15 | command := app.NewAPIServerCommand() 16 | 17 | if err := command.Execute(); err != nil { 18 | os.Exit(1) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/cmd/app/options/options.go: -------------------------------------------------------------------------------- 1 | /* 2 | Inspired by k/k/cmd/kube-apiserver/app/options/options.go. 3 | */ 4 | package options 5 | 6 | import ( 7 | "net" 8 | 9 | cliflag "k8s.io/component-base/cli/flag" 10 | ) 11 | 12 | // ServerRunOptions runs a kubernetes api server. 13 | type ServerRunOptions struct { 14 | // ServiceClusterIPRange is mapped to input provided by user 15 | ServiceClusterIPRanges string 16 | //PrimaryServiceClusterIPRange and SecondaryServiceClusterIPRange are the results 17 | // of parsing ServiceClusterIPRange into actual values 18 | PrimaryServiceClusterIPRange net.IPNet 19 | SecondaryServiceClusterIPRange net.IPNet 20 | 21 | // Adding this one just for kicks. 22 | APIServerServiceIP net.IP 23 | } 24 | 25 | // NewServerRunOptions creates a new ServerRunOptions object with default parameters 26 | func NewServerRunOptions() *ServerRunOptions { 27 | s := ServerRunOptions{} 28 | 29 | return &s 30 | } 31 | 32 | // Flags returns flags for a specific APIServer by section name 33 | func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) { 34 | fs := fss.FlagSet("misc") 35 | 36 | // TODO (khenidak) change documentation as we move IPv6DualStack feature from ALPHA to BETA 37 | fs.StringVar(&s.ServiceClusterIPRanges, "service-cluster-ip-range", s.ServiceClusterIPRanges, ""+ 38 | "A CIDR notation IP range from which to assign service cluster IPs. This must not "+ 39 | "overlap with any IP ranges assigned to nodes for pods.") 40 | 41 | return fss 42 | } 43 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/cmd/app/server.go: -------------------------------------------------------------------------------- 1 | /* 2 | This is where the main startup logic of the API server lives. 3 | The flow is that we first defined a cobra command in NewAPIServerCommand. 4 | This command will parse and define command-line flags (we only care about the 5 | --service-cluster-ip-range flag). 6 | Then we will mimick the real kube-apiserver's logic to parse and use this 7 | value. 8 | 9 | Inspired by k/k/cmd/kube-apiserver/app/server.go. 10 | */ 11 | package app 12 | 13 | import ( 14 | "fmt" 15 | "net" 16 | "strings" 17 | 18 | "github.com/spf13/cobra" 19 | 20 | "k8s.io/apiserver/pkg/util/term" 21 | cliflag "k8s.io/component-base/cli/flag" 22 | "k8s.io/component-base/cli/globalflag" 23 | "k8s.io/component-base/version/verflag" 24 | "k8s.io/klog" 25 | 26 | "github.com/contributing-to-kubernetes/gnosis/stories/kk-pr-85968/example-cobra/cmd/app/options" 27 | "github.com/contributing-to-kubernetes/gnosis/stories/kk-pr-85968/example-cobra/pkg/master" 28 | utilflag "github.com/contributing-to-kubernetes/gnosis/stories/kk-pr-85968/example-cobra/pkg/util/flag" 29 | ) 30 | 31 | // NewAPIServerCommand creates a *cobra.Command object with default parameters 32 | func NewAPIServerCommand() *cobra.Command { 33 | s := options.NewServerRunOptions() 34 | cmd := &cobra.Command{ 35 | Use: "kube-apiserver", 36 | Long: `The Kubernetes API server validates and configures data 37 | for the api objects which include pods, services, replicationcontrollers, and 38 | others. The API Server services REST operations and provides the frontend to the 39 | cluster's shared state through which all other components interact.`, 40 | // Run but returns an error. 41 | RunE: func(cmd *cobra.Command, args []string) error { 42 | utilflag.PrintFlags(cmd.Flags()) 43 | 44 | // set default options 45 | completedOptions, err := Complete(s) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | return Run(completedOptions) 51 | }, 52 | } 53 | 54 | fs := cmd.Flags() 55 | namedFlagSets := s.Flags() 56 | verflag.AddFlags(namedFlagSets.FlagSet("global")) 57 | globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name()) 58 | //options.AddCustomGlobalFlags(namedFlagSets.FlagSet("generic")) 59 | for _, f := range namedFlagSets.FlagSets { 60 | fs.AddFlagSet(f) 61 | } 62 | 63 | usageFmt := "Usage:\n %s\n" 64 | cols, _, _ := term.TerminalSize(cmd.OutOrStdout()) 65 | cmd.SetUsageFunc(func(cmd *cobra.Command) error { 66 | fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine()) 67 | cliflag.PrintSections(cmd.OutOrStderr(), namedFlagSets, cols) 68 | return nil 69 | }) 70 | cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { 71 | fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine()) 72 | cliflag.PrintSections(cmd.OutOrStdout(), namedFlagSets, cols) 73 | }) 74 | 75 | return cmd 76 | } 77 | 78 | // Run runs the specified APIServer. 79 | func Run(completeOptions completedServerRunOptions) error { 80 | klog.Infof("Server run options %#v\n", completeOptions.ServerRunOptions) 81 | return nil 82 | } 83 | 84 | // completedServerRunOptions is a private wrapper that enforces a call of Complete() before Run can be invoked. 85 | type completedServerRunOptions struct { 86 | *options.ServerRunOptions 87 | } 88 | 89 | // Complete set default ServerRunOptions. 90 | // Should be called after kube-apiserver flags parsed. 91 | func Complete(s *options.ServerRunOptions) (completedServerRunOptions, error) { 92 | var options completedServerRunOptions 93 | 94 | // process s.ServiceClusterIPRange from list to Primary and Secondary 95 | // we process secondary only if provided by user 96 | apiServerServiceIP, primaryServiceIPRange, secondaryServiceIPRange, err := getServiceIPAndRanges(s.ServiceClusterIPRanges) 97 | if err != nil { 98 | return options, err 99 | } 100 | s.PrimaryServiceClusterIPRange = primaryServiceIPRange 101 | s.SecondaryServiceClusterIPRange = secondaryServiceIPRange 102 | // Just for kicks. 103 | s.APIServerServiceIP = apiServerServiceIP 104 | 105 | options.ServerRunOptions = s 106 | return options, nil 107 | } 108 | 109 | // This is where the main action happens and where the bug in 110 | // https://github.com/kubernetes/kubernetes/pull/85937 was resolved. 111 | func getServiceIPAndRanges(serviceClusterIPRanges string) (net.IP, net.IPNet, net.IPNet, error) { 112 | serviceClusterIPRangeList := []string{} 113 | // Here is where the bug discovered in 114 | // https://github.com/kubernetes/kubernetes/pull/85937 is prevented! 115 | if serviceClusterIPRanges != "" { 116 | serviceClusterIPRangeList = strings.Split(serviceClusterIPRanges, ",") 117 | } 118 | 119 | var apiServerServiceIP net.IP 120 | var primaryServiceIPRange net.IPNet 121 | var secondaryServiceIPRange net.IPNet 122 | var err error 123 | // nothing provided by user, use default range (only applies to the Primary) 124 | if len(serviceClusterIPRangeList) == 0 { 125 | var primaryServiceClusterCIDR net.IPNet 126 | primaryServiceIPRange, apiServerServiceIP, err = master.ServiceIPRange(primaryServiceClusterCIDR) 127 | if err != nil { 128 | return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("error determining service IP ranges: %v", err) 129 | } 130 | return apiServerServiceIP, primaryServiceIPRange, net.IPNet{}, nil 131 | } 132 | 133 | if len(serviceClusterIPRangeList) > 0 { 134 | _, primaryServiceClusterCIDR, err := net.ParseCIDR(serviceClusterIPRangeList[0]) 135 | if err != nil { 136 | return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("service-cluster-ip-range[0] is not a valid cidr") 137 | } 138 | 139 | primaryServiceIPRange, apiServerServiceIP, err = master.ServiceIPRange(*(primaryServiceClusterCIDR)) 140 | if err != nil { 141 | return net.IP{}, net.IPNet{}, net.IPNet{}, fmt.Errorf("error determining service IP ranges for primary service cidr: %v", err) 142 | } 143 | } 144 | 145 | return apiServerServiceIP, primaryServiceIPRange, secondaryServiceIPRange, nil 146 | } 147 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/contributing-to-kubernetes/gnosis/stories/kk-pr-85968/example-cobra 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/spf13/cobra v0.0.7 7 | github.com/spf13/pflag v1.0.5 8 | k8s.io/apiserver v0.18.1 9 | k8s.io/component-base v0.18.1 10 | k8s.io/klog v1.0.0 11 | k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 12 | ) 13 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/pkg/kubeapiserver/options/options.go: -------------------------------------------------------------------------------- 1 | /* 2 | Inspired by k/k/pkg/kubeapiserver/options/options.go. 3 | */ 4 | package options 5 | 6 | import ( 7 | "net" 8 | ) 9 | 10 | // DefaultServiceIPCIDR is a CIDR notation of IP range from which to allocate service cluster IPs 11 | var DefaultServiceIPCIDR net.IPNet = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(24, 32)} 12 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/pkg/master/services.go: -------------------------------------------------------------------------------- 1 | /* 2 | Inspired by k/k/pkg/master/services.go. 3 | */ 4 | package master 5 | 6 | import ( 7 | "fmt" 8 | "net" 9 | 10 | "k8s.io/klog" 11 | "k8s.io/utils/integer" 12 | utilnet "k8s.io/utils/net" 13 | 14 | kubeoptions "github.com/contributing-to-kubernetes/gnosis/stories/kk-pr-85968/example-cobra/pkg/kubeapiserver/options" 15 | ) 16 | 17 | // ServiceIPRange checks if the serviceClusterIPRange flag is nil, raising a warning if so and 18 | // setting service ip range to the default value in kubeoptions.DefaultServiceIPCIDR 19 | // for now until the default is removed per the deprecation timeline guidelines. 20 | // Returns service ip range, api server service IP, and an error 21 | func ServiceIPRange(passedServiceClusterIPRange net.IPNet) (net.IPNet, net.IP, error) { 22 | serviceClusterIPRange := passedServiceClusterIPRange 23 | if passedServiceClusterIPRange.IP == nil { 24 | klog.Warningf("No CIDR for service cluster IPs specified. Default value which was %s is deprecated and will be removed in future releases. Please specify it using --service-cluster-ip-range on kube-apiserver.", kubeoptions.DefaultServiceIPCIDR.String()) 25 | serviceClusterIPRange = kubeoptions.DefaultServiceIPCIDR 26 | } 27 | 28 | size := integer.Int64Min(utilnet.RangeSize(&serviceClusterIPRange), 1<<16) 29 | if size < 8 { 30 | return net.IPNet{}, net.IP{}, fmt.Errorf("the service cluster IP range must be at least %d IP addresses", 8) 31 | } 32 | 33 | // Select the first valid IP from ServiceClusterIPRange to use as the GenericAPIServer service IP. 34 | apiServerServiceIP, err := utilnet.GetIndexedIP(&serviceClusterIPRange, 1) 35 | if err != nil { 36 | return net.IPNet{}, net.IP{}, err 37 | } 38 | klog.V(4).Infof("Setting service IP to %q (read-write).", apiServerServiceIP) 39 | 40 | return serviceClusterIPRange, apiServerServiceIP, nil 41 | } 42 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/example-cobra/pkg/util/flag/flag.go: -------------------------------------------------------------------------------- 1 | /* 2 | Inspired by k/k/pkg/util/flag/flags.go. 3 | */ 4 | package flag 5 | 6 | import ( 7 | "github.com/spf13/pflag" 8 | "k8s.io/klog" 9 | ) 10 | 11 | // PrintFlags logs the flags in the flagset 12 | func PrintFlags(flags *pflag.FlagSet) { 13 | flags.VisitAll(func(flag *pflag.Flag) { 14 | klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) 15 | }) 16 | } 17 | -------------------------------------------------------------------------------- /stories/kk-pr-85968/hack/update-gofmt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | find_files() { 4 | find . -not \( \ 5 | \( \ 6 | -wholename './output' \ 7 | -o -wholename './.git' \ 8 | -o -wholename './_output' \ 9 | -o -wholename './_gopath' \ 10 | \) -prune \ 11 | \) -name '*.go' 12 | } 13 | 14 | find_files | xargs gofmt -s -w 15 | -------------------------------------------------------------------------------- /stories/kk-pr-85993/README.md: -------------------------------------------------------------------------------- 1 | # Kubenet fetches gateway from CNI result instead of calculating gateway from pod cidr #85993 2 | 3 | ## Description 4 | 5 | This write up is on [PR #85993](https://github.com/kubernetes/kubernetes/pull/85993). This PR fixed a regression in kubenet that prevented pods from obtaining IP addresses. 6 | 7 | The goal of this PR was to refactor the way that kubenet fetches the gateway. The problem was that the kubenet was having issues allocating addresses for pods, with a node spec in 1.16, as described in [Issue #84541](https://github.com/kubernetes/kubernetes/issues/84541). 8 | 9 | ### Brief Overview of Kubenet 10 | 11 | It is a Linux only network plugin, meant to be basic and simple. It's not expected to implement things such as `cross-node networking` or `network policy` and is typically used in conjunction with a cloud provider that sets up routing rules for node(s). 12 | 13 | Here are some things that `kubenet` will do: 14 | - Create a `Linux bridge` named `cbr0` 15 | - Create a `veth pair` for each `pod` connected to `cbr0` 16 | - Assign an `IP address` to the `pod` end of the `veth pair` 17 | - This `IP address` comes from a `range` that has been assigned to the `node` through configuration or by the `controller-manager` 18 | - Assign an `MTU` to the `cbr0` 19 | - This `MTU` matches the smallest `MTU` of an `enabled normal interface` on the `host` 20 | 21 | If this doesn't make a lot of sense to you check out 22 | [Network: Intro to container networking](./networking) before continuing. 23 | More information can be located within the [k8s.io docs](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/). This is also the source in which this overview was derived. 24 | 25 | ## Old Logic Breakdown 26 | 27 | Before the PR, the gateway was derived from the pod cidr by ranging over the list of current pod cidrs: 28 | 29 | ```go 30 | for idx, currentPodCIDR := range podCIDRs { 31 | _, cidr, err := net.ParseCIDR(currentPodCIDR) 32 | if nil != err { 33 | klog.Warningf("Failed to generate CNI network config with cidr %s at index:%v: %v", currentPodCIDR, idx, err) 34 | return 35 | } 36 | // create list of ips and gateways 37 | cidr.IP[len(cidr.IP)-1] += 1 // Set bridge address to first address in IPNet 38 | plugin.podCIDRs = append(plugin.podCIDRs, cidr) 39 | plugin.podGateways = append(plugin.podGateways, cidr.IP) 40 | } 41 | ``` 42 | 43 | Notice how, in the above logic, we are creating a list of `ips` and `gateways` by setting up a `Linux bridge`, then appending the `pod cidrs` and `gateways` to it. Let's break it down a bit. 44 | 45 | First, we range over the `pod cidrs` to get their `IP network` value, a.k.a. [IPNet](https://golang.org/pkg/net/#IPNet), but in this context we call it the `cidr`. 46 | ```go 47 | for idx, currentPodCIDR := range podCIDRs { 48 | _, cidr, err := net.ParseCIDR(currentPodCIDR) 49 | ``` 50 | 51 | Now that we have the `cidr` value, let's create the list of `ips` and `gateways`. We first need to set the `bridge address`. This is done by setting it to the first address in the [IPNet](https://golang.org/pkg/net/#IPNet), a.k.a. that `cidr` value we've just mentioned: 52 | ```go 53 | cidr.IP[len(cidr.IP)-1] += 1 54 | ``` 55 | 56 | What that line above does is take an [IPNet.IP](https://golang.org/pkg/net/#IP) value (i.e. `10.0.0.0`) and increment that 32 bit address by 1 (i.e. `10.0.0.1`). Notice that we are mutating the network number from the [IPNet](https://golang.org/pkg/net/#IPNet) when we say `cidr.IP`. If you look at how [IPNet](https://golang.org/pkg/net/#IPNet) works, it's a struct containing the [IP](https://golang.org/pkg/net/#IP) (`network number`) and the [IPMask](https://golang.org/pkg/net/#IPMask). This is how that struct looks in the golang library: 57 | ```go 58 | type IPNet struct { 59 | IP IP // network number 60 | Mask IPMask // network mask 61 | } 62 | ``` 63 | 64 | Now we move on to this line of code: 65 | ```go 66 | plugin.podCIDRs = append(plugin.podCIDRS, cidr) 67 | ``` 68 | 69 | The `plugin` part, is a method receiver for the `Event` function that we are currently in, referencing the `kubenetNetworkPlugin` defined earlier in the file as: 70 | ```go 71 | type kubenetNetworkPlugin struct { 72 | network.NoopNetworkPlugin 73 | 74 | host network.Host 75 | netConfig *libcni.NetworkConfig 76 | loConfig *libcni.NetworkConfig 77 | cniConfig libcni.CNI 78 | bandwidthShaper bandwidth.Shaper 79 | mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization 80 | podIPs map[kubecontainer.ContainerID]utilsets.String 81 | mtu int 82 | execer utilexec.Interface 83 | nsenterPath string 84 | hairpinMode kubeletconfig.HairpinMode 85 | // kubenet can use either hostportSyncer and hostportManager to implement hostports 86 | // Currently, if network host supports legacy features, hostportSyncer will be used, 87 | // otherwise, hostportManager will be used. 88 | hostportSyncer hostport.HostportSyncer 89 | hostportSyncerv6 hostport.HostportSyncer 90 | hostportManager hostport.HostPortManager 91 | hostportManagerv6 hostport.HostPortManager 92 | iptables utiliptables.Interface 93 | iptablesv6 utiliptables.Interface 94 | sysctl utilsysctl.Interface 95 | ebtables utilebtables.Interface 96 | // binDirs is passed by kubelet cni-bin-dir parameter. 97 | // kubenet will search for CNI binaries in DefaultCNIDir first, then continue to binDirs. 98 | binDirs []string 99 | nonMasqueradeCIDR string 100 | cacheDir string 101 | podCIDRs []*net.IPNet 102 | podGateways []net.IP 103 | } 104 | ``` 105 | 106 | So, when we want to append the updated `cidr` to `plugin.podCIDRs` we are referring to a list of type `*net.IPNet`. 107 | 108 | The next line also does some appending, 109 | ```go 110 | plugin.podGateways = append(plugin.podGateways, cidr.IP) 111 | ``` 112 | But, instead of the whole `IPNet`, we are appending `cidr.IP` to a list of type `net.IP`. 113 | 114 | ## New Logic Breakdown 115 | 116 | The logic described above was replaced with changes to the same `kubenet_linux.go` file. Changes were made to the `kubenetNetworkPlugin` struct and to some of the methods on that struct: 117 | - `Event` 118 | - `setup` 119 | - `syncEbtablesDedupRules` 120 | - `getRangesConfig` 121 | 122 | First, we removed `podGateways`: 123 | ```go 124 | podGateways []net.IP 125 | ``` 126 | from the `kubenetNetworkPlugin` struct. 127 | 128 | Next, we updated the `Event` method's logic: 129 | 130 | ```go 131 | for idx, currentPodCIDR := range podCIDRs { 132 | _, cidr, err := net.ParseCIDR(currentPodCIDR) 133 | if nil != err { 134 | klog.Warningf("Failed to generate CNI network config with cidr %s at index:%v: %v", currentPodCIDR, idx, err) 135 | return 136 | } 137 | // create list of ips 138 | plugin.podCIDRs = append(plugin.podCIDRs, cidr) 139 | } 140 | ``` 141 | 142 | What should stand out is the fact that we are no longer setting up the `bridge address` here or appending `gateway` values to `podGateways` in that `kubenetNetworkPlugin` struct. We've removed that altogether. This essentially removed the dependency on `pod cidrs` to derive a `gateway` value. So, how do we get the `gateway` now?? 143 | 144 | That's where these next steps come in.. 145 | 146 | Now, let's update the `setup` method. This method is responsible for setting up networking through [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) using the given ns/name and sandbox ID. Let's start off by creating some variables representing lists of type `podGateways` and type `podCidrs`: 147 | ```go 148 | var podGateways []net.IP 149 | var podCIDRs []net.IPNet 150 | ``` 151 | 152 | We can update these lists based on whether or not it is an IP4 or IP6 address we're adding: 153 | ```go 154 | //TODO: v1.16 (khenidak) update NET_CONFIG_TEMPLATE to CNI version 0.3.0 or later so 155 | // that we get multiple IP addresses in the returned Result structure 156 | if res.IP4 != nil { 157 | ipv4 = res.IP4.IP.IP.To4() 158 | podGateways = append(podGateways, res.IP4.Gateway) 159 | podCIDRs = append(podCIDRs, net.IPNet{IP: ipv4.Mask(res.IP4.IP.Mask), Mask: res.IP4.IP.Mask}) 160 | } 161 | 162 | if res.IP6 != nil { 163 | ipv6 = res.IP6.IP.IP 164 | podGateways = append(podGateways, res.IP6.Gateway) 165 | podCIDRs = append(podCIDRs, net.IPNet{IP: ipv6.Mask(res.IP6.IP.Mask), Mask: res.IP6.IP.Mask}) 166 | } 167 | ``` 168 | 169 | for reference, `res` is a variable defined earlier in this `setup` method as: 170 | ```go 171 | // Coerce the CNI result version 172 | res, err := cnitypes020.GetResult(resT) 173 | ``` 174 | 175 | so, when we say 176 | ```go 177 | if res.IP4 != nil 178 | ``` 179 | _or_ 180 | ```go 181 | if res.IP6 != nil 182 | ``` 183 | this checks the CNI result to see which IP address type is returned. 184 | 185 | Then, at the bottom of this `setup` method, we make a call to a method that eliminates duplicate packets by configuring the rules for [ebtables](https://ebtables.netfilter.org/): 186 | ```go 187 | // configure the ebtables rules to eliminate duplicate packets by best effort 188 | plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr, podCIDRs, podGateways) 189 | ``` 190 | 191 | If you have a sharp eye, you may have noticed the change in the `syncEbtablesDedupRules` method signature. Here's how it was done previously: 192 | ```go 193 | plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr) 194 | ``` 195 | 196 | That is because we need `podCIDRs` and `podGateways` for this `syncEbtablesDedupRules` method when we do the following: 197 | ```go 198 | // per gateway rule 199 | for idx, gw := range podGateways { 200 | klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), gw.String(), podCIDRs[idx].String()) 201 | 202 | bIsV6 := netutils.IsIPv6(gw) 203 | IPFamily := "IPv4" 204 | ipSrc := "--ip-src" 205 | if bIsV6 { 206 | IPFamily = "IPv6" 207 | ipSrc = "--ip6-src" 208 | } 209 | commonArgs := []string{"-p", IPFamily, "-s", macAddr.String(), "-o", "veth+"} 210 | _, err = plugin.ebtables.EnsureRule(utilebtables.Prepend, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, gw.String(), "-j", "ACCEPT")...) 211 | if err != nil { 212 | klog.Errorf("Failed to ensure packets from cbr0 gateway:%v to be accepted with error:%v", gw.String(), err) 213 | return 214 | 215 | } 216 | _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, podCIDRs[idx].String(), "-j", "DROP")...) 217 | if err != nil { 218 | klog.Errorf("Failed to ensure packets from podCidr[%v] but has mac address of cbr0 to get dropped. err:%v", podCIDRs[idx].String(), err) 219 | return 220 | } 221 | } 222 | ``` 223 | 224 | The changes made above were to reflect the change in setup where we defined `podCIDRs` as opposed to appending to `plugin.podCIDRs`, which was the `podCIDRs` on the `kubenetNetworkPlugin` struct. Here's the diff for that change: 225 | ```go 226 | klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), gw.String(), plugin.podCIDRs[idx].String())) 227 | ``` 228 | _to_ 229 | ```go 230 | klog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), gw.String(), podCIDRs[idx].String()) 231 | ``` 232 | And similarly, a change was made in how we error. We've gone from: 233 | ```go 234 | _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, plugin.podCIDRs[idx].String(), "-j", "DROP")...) 235 | if err != nil { 236 | klog.Errorf("Failed to ensure packets from podCidr[%v] but has mac address of cbr0 to get dropped. err:%v", plugin.podCIDRs[idx].String(), err) 237 | return 238 | } 239 | ``` 240 | _to_ 241 | ```go 242 | _, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, ipSrc, podCIDRs[idx].String(), "-j", "DROP")...) 243 | if err != nil { 244 | klog.Errorf("Failed to ensure packets from podCidr[%v] but has mac address of cbr0 to get dropped. err:%v", podCIDRs[idx].String(), err) 245 | return 246 | } 247 | ``` 248 | 249 | Again, updating `plugin.podCIDRs` to be `podCIDRs` instead. 250 | 251 | Finally, let's hop out of the `setup` method and jump to `getRangesConfig`. This was a small method that was updated as well. It gets referenced in the `Event` method that we've talked about earlier. In `Event`, it is used to make the json output for the CNI network config: 252 | ```go 253 | json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), plugin.getRoutesConfig()) 254 | klog.V(4).Infof("CNI network config set to %v", json) 255 | plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) 256 | ``` 257 | 258 | That `getRangesConfig` method went from this: 259 | ```go 260 | // given a n cidrs assigned to nodes, 261 | // create bridge configuration that conforms to them 262 | func (plugin *kubenetNetworkPlugin) getRangesConfig() string { 263 | createRange := func(thisNet *net.IPNet) string { 264 | template := ` 265 | [{ 266 | "subnet": "%s", 267 | "gateway": "%s" 268 | }]` 269 | return fmt.Sprintf(template, thisNet.String(), thisNet.IP.String()) 270 | } 271 | 272 | ranges := make([]string, len(plugin.podCIDRs)) 273 | for idx, thisCIDR := range plugin.podCIDRs { 274 | ranges[idx] = createRange(thisCIDR) 275 | } 276 | //[{range}], [{range}] 277 | // each range is a subnet and a gateway 278 | return strings.Join(ranges[:], ",") 279 | } 280 | ``` 281 | _to_ 282 | ```go 283 | func (plugin *kubenetNetworkPlugin) getRangesConfig() string { 284 | createRange := func(thisNet *net.IPNet) string { 285 | template := ` 286 | [{ 287 | "subnet": "%s" 288 | }]` 289 | return fmt.Sprintf(template, thisNet.String()) 290 | } 291 | 292 | ranges := make([]string, len(plugin.podCIDRs)) 293 | for idx, thisCIDR := range plugin.podCIDRs { 294 | ranges[idx] = createRange(thisCIDR) 295 | } 296 | //[{range}], [{range}] 297 | // each range contains a subnet. gateway will be fetched from cni result 298 | return strings.Join(ranges[:], ",") 299 | } 300 | ``` 301 | 302 | ## Conclusion 303 | 304 | We've just learned a bit of how kubenet works, how it has fetched the gateway in the past and how it fetches it at the time of the PR. Bugs can be very interesting and lead to interesting solutions and/or insight. The hope is, that you walk away from this PR breakdown with a little more knowledge of the inner-workings of Kubernetes, Go and networking. As a community, it is important to keep on learning and sharing the knowledge we gain from our experiences. 305 | 306 | ``` 307 | "We are only as strong as we are united, as weak as we are divided." 308 | 309 | ― J.K. Rowling, Harry Potter and the Goblet of Fire 310 | ``` 311 | -------------------------------------------------------------------------------- /stories/kubelet-testing/README.md: -------------------------------------------------------------------------------- 1 | # Testing Kubelets 2 | 3 | ## Table Of Contents 4 | 5 | * [Context](#context) 6 | * [Run Services Mode: The Tales of ETCD, API Server, and Namespace Controller](#run-services-mode-the-tales-of-etcd-api-server-and-namespace-controller) 7 | * [Run Kubelet Mode: Getting a Kubelet Up and Running](#run-kubelet-mode-getting-a-kubelet-up-and-running) 8 | * [Running It All](#running-it-all) 9 | * [In Case of Failure](#in-case-of-failure) 10 | 11 | ## Context 12 | 13 | Welcome back! 14 | This post is a follow up to [Running Node E2E Tests](../e2e-node-tests/). 15 | The prvious post just went through the motions and of how to work with node e2e 16 | tests which is enough for contributors interested in working withSIG Node. 17 | 18 | However, for the sake of curiosity, we will now go in deeper and try to figure 19 | out exactly what needs to happen to be able to test a Kubelet. 20 | 21 | this time we will grab some information from the local test runner over at 22 | https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/runner/local/run_local.go 23 | 24 | This is program is way smaller than the remote test runner and at its center is 25 | this command: 26 | ``` 27 | ginkgo e2e_node.test -- 28 | ``` 29 | 30 | If you went through [how to run Kubernetes e2e tests with kind](../e2e-tests/), 31 | then this structure should look familiar. 32 | For Kubernetes e2e tests we compile all tests into an executable called 33 | `e2e.test` and its contents come from 34 | https://github.com/kubernetes/kubernetes/tree/master/test/e2e. 35 | In the same manner, we have a test executable whose contents now come from 36 | https://github.com/kubernetes/kubernetes/tree/master/test/e2e_node. 37 | 38 | And if you want to compile the node e2e test executable you can do so in the 39 | same manner 40 | ``` 41 | make all WHAT='test/e2e/e2e_node.test' 42 | ``` 43 | 44 | In the case of standard e2e tests, the test executable assumes that you have a 45 | cluster all set up and the executable just executes tests. 46 | Node tests are particularly interesting because on top of running tests it also 47 | get some Kubelets (and sometimes an API server or an ETCD) configured and 48 | working. 49 | 50 | --- 51 | 52 | ## Run Services Mode: The Tales of ETCD, API Server, and Namespace Controller 53 | 54 | The first thing we will look at is at the `--run-services-mode` flag. 55 | This flag 56 | 57 | > If true, only run services (etcd, apiserver) in current process, and not run test. 58 | 59 | To execute our test binary (make sure to run `make WHAT='ginkgo e2e_node.test' 60 | beforehand) 61 | 62 | ``` 63 | OUTPUTDIR=/path/to/make/artifacts/something/like/go/src/k8s.io/kubernetes/_output/local/go/bin 64 | 65 | sudo ${OUTPUTDIR}/ginkgo -nodes=1 -untilItFails=false ${OUTPUTDIR}/e2e_node.test -- \ 66 | --container-runtime=docker --alsologtostderr --v 4 --report-dir=/tmp/_artifacts/200524T140146 \ 67 | --node-name cool-node \ 68 | --kubelet-flags="--container-runtime=docker" \ 69 | --kubelet-flags="--network-plugin= --cni-bin-dir=" \ 70 | --run-services-mode=true 71 | ``` 72 | 73 | 74 | We can check that our test binary is actually taking over some of our ports 75 | (for our API server and our ETCD instance) 76 | ``` 77 | sudo lsof -i -P -n | grep LISTEN 78 | ``` 79 | 80 | and we can even send requests to it!!! 81 | ``` 82 | curl -H 'Content-Type: application/yaml' 'http://127.0.0.1:8080/api/v1/namespaces' 83 | ``` 84 | ```json 85 | { 86 | "kind": "NamespaceList", 87 | "apiVersion": "v1", 88 | "metadata": { 89 | "selfLink": "/api/v1/namespaces", 90 | "resourceVersion": "49" 91 | }, 92 | "items": [ 93 | { 94 | "metadata": { 95 | "name": "default", 96 | "selfLink": "/api/v1/namespaces/default", 97 | "uid": "e1db06cf-c208-4f91-811f-6fb636c01786", 98 | "resourceVersion": "40", 99 | "creationTimestamp": "2020-05-24T18:22:24Z", 100 | "managedFields": [ 101 | { 102 | "manager": "e2e_node.test", 103 | "operation": "Update", 104 | "apiVersion": "v1", 105 | "time": "2020-05-24T18:22:24Z", 106 | "fieldsType": "FieldsV1", 107 | "fieldsV1": {"f:status":{"f:phase":{}}} 108 | } 109 | ] 110 | }, 111 | "spec": { 112 | "finalizers": [ 113 | "kubernetes" 114 | ] 115 | }, 116 | "status": { 117 | "phase": "Active" 118 | } 119 | }, 120 | { 121 | "metadata": { 122 | "name": "kube-node-lease", 123 | "selfLink": "/api/v1/namespaces/kube-node-lease", 124 | "uid": "2e352367-afbf-4ff6-88fa-e02a3e216903", 125 | "resourceVersion": "6", 126 | "creationTimestamp": "2020-05-24T18:22:23Z", 127 | "managedFields": [ 128 | { 129 | "manager": "e2e_node.test", 130 | "operation": "Update", 131 | "apiVersion": "v1", 132 | "time": "2020-05-24T18:22:23Z", 133 | "fieldsType": "FieldsV1", 134 | "fieldsV1": {"f:status":{"f:phase":{}}} 135 | } 136 | ] 137 | }, 138 | "spec": { 139 | "finalizers": [ 140 | "kubernetes" 141 | ] 142 | }, 143 | "status": { 144 | "phase": "Active" 145 | } 146 | }, 147 | { 148 | "metadata": { 149 | "name": "kube-public", 150 | "selfLink": "/api/v1/namespaces/kube-public", 151 | "uid": "18076030-0f32-46f3-87c6-e522bf8ade8d", 152 | "resourceVersion": "5", 153 | "creationTimestamp": "2020-05-24T18:22:23Z", 154 | "managedFields": [ 155 | { 156 | "manager": "e2e_node.test", 157 | "operation": "Update", 158 | "apiVersion": "v1", 159 | "time": "2020-05-24T18:22:23Z", 160 | "fieldsType": "FieldsV1", 161 | "fieldsV1": {"f:status":{"f:phase":{}}} 162 | } 163 | ] 164 | }, 165 | "spec": { 166 | "finalizers": [ 167 | "kubernetes" 168 | ] 169 | }, 170 | "status": { 171 | "phase": "Active" 172 | } 173 | }, 174 | { 175 | "metadata": { 176 | "name": "kube-system", 177 | "selfLink": "/api/v1/namespaces/kube-system", 178 | "uid": "6a3fa478-707a-462d-a8b2-ce00ecde1eea", 179 | "resourceVersion": "4", 180 | "creationTimestamp": "2020-05-24T18:22:23Z", 181 | "managedFields": [ 182 | { 183 | "manager": "e2e_node.test", 184 | "operation": "Update", 185 | "apiVersion": "v1", 186 | "time": "2020-05-24T18:22:23Z", 187 | "fieldsType": "FieldsV1", 188 | "fieldsV1": {"f:status":{"f:phase":{}}} 189 | } 190 | ] 191 | }, 192 | "spec": { 193 | "finalizers": [ 194 | "kubernetes" 195 | ] 196 | }, 197 | "status": { 198 | "phase": "Active" 199 | } 200 | } 201 | ] 202 | } 203 | ``` 204 | 205 | Again, time to look under the hood. 206 | 207 | If we call our test with `--run-services-mode=true` then we are using an 208 | internal object called `e2eServices`. 209 | Let's walk the code through. 210 | 211 | [k/k/test/e2e_node/e2e_node_suite_test.go](https://github.com/kubernetes/kubernetes/blob/4e8b56e6671893757d40e2001a3c615acebc13a2/test/e2e_node/e2e_node_suite_test.go#L120-L125) 212 | is essentially the first place where our test code begins. 213 | Here, we check if the `--run-services-mode` flag is set to true - in this mode 214 | we will run the Kubernetes API server and an ETCD in the current process 215 | without executing any tests. 216 | 217 | ```go 218 | func TestE2eNode(t *testing.T) { 219 | if *runServicesMode { 220 | // If run-services-mode is specified, only run services in current process. 221 | services.RunE2EServices(t) 222 | return 223 | } 224 | ... 225 | ``` 226 | 227 | [`services.RunE2eServices()`](https://github.com/kubernetes/kubernetes/blob/4e8b56e6671893757d40e2001a3c615acebc13a2/test/e2e_node/services/services.go#L106-L118) 228 | we set the feature gates for our Kubelet and then instantiate an `e2eServices` 229 | services object and proceed to run it. 230 | ```go 231 | // Populate global DefaultFeatureGate with value from TestContext.FeatureGates. 232 | // This way, statically-linked components see the same feature gate config as the test context. 233 | if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(framework.TestContext.FeatureGates); err != nil { 234 | t.Fatal(err) 235 | } 236 | e := newE2EServices() 237 | if err := e.run(t); err != nil { 238 | klog.Fatalf("Failed to run e2e services: %v", err) 239 | } 240 | ``` 241 | 242 | From the structure of `e2eServices`, we can coroborate that it inded manages an 243 | ETCD and an API server 244 | ```go 245 | // e2eService manages e2e services in current process. 246 | type e2eServices struct { 247 | rmDirs []string 248 | // statically linked e2e services 249 | etcdServer *etcd3testing.EtcdTestServer 250 | etcdStorage *storagebackend.Config 251 | apiServer *APIServer 252 | nsController *NamespaceController 253 | } 254 | ``` 255 | See 256 | [k/k/test/e2e_node/services/internal_services.go#L30-L38](https://github.com/kubernetes/kubernetes/blob/4e8b56e6671893757d40e2001a3c615acebc13a2/test/e2e_node/services/internal_services.go#L30-L38). 257 | 258 | The first step is to start ETCD. 259 | For this we will use a handy Kubernetes library 260 | ```go 261 | import ( 262 | etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing" 263 | ) 264 | ``` 265 | [k/k/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing). 266 | 267 | 268 | After that, we start an API Server locally. 269 | This part configures the options and configuration for the Kubernetes API 270 | Server and proceeds to call the code for the API Server directly. 271 | 272 | And our last step is to run a namespace controller! 273 | The namespace controller is the component that manages the lifecycle of 274 | Kubernetes namespaces and it can be found here 275 | https://github.com/kubernetes/kubernetes/tree/master/pkg/controller/namespace. 276 | (You should definetely checkout the code for the namespace controller, 277 | specifically how the `Run()` function starts a pool of workers). 278 | 279 | Once all this components are running then we just wait for a termination 280 | signal. 281 | Meanwhile, you will have an API Server up and running. 282 | 283 | --- 284 | 285 | ## Run Kubelet Mode: Getting a Kubelet Up and Running 286 | 287 | Same test binary different flag for this section! 288 | We will now look at `--run-kubelet-mode=true`. 289 | 290 | Again, the entrypoint is the 291 | [`TestE2ENode` function](https://github.com/kubernetes/kubernetes/blob/4e8b56e6671893757d40e2001a3c615acebc13a2/test/e2e_node/e2e_node_suite_test.go#L126-L130): 292 | ```go 293 | func TestE2eNode(t *testing.T) { 294 | ... 295 | if *runKubeletMode { 296 | // If run-kubelet-mode is specified, only start kubelet. 297 | services.RunKubelet() 298 | return 299 | } 300 | ... 301 | ``` 302 | 303 | In this case, the 304 | [`services.RunKubelet()`](https://github.com/kubernetes/kubernetes/blob/4e8b56e6671893757d40e2001a3c615acebc13a2/test/e2e_node/services/kubelet.go#L74-L87) 305 | function will use a public object: 306 | ```go 307 | func RunKubelet() { 308 | var err error 309 | // Enable monitorParent to make sure kubelet will receive termination signal 310 | // when test process exits. 311 | e := NewE2EServices(true /* monitorParent */) 312 | defer e.Stop() 313 | e.kubelet, err = e.startKubelet() 314 | if err != nil { 315 | klog.Fatalf("Failed to start kubelet: %v", err) 316 | } 317 | // Wait until receiving a termination signal. 318 | waitForTerminationSignal() 319 | } 320 | ``` 321 | 322 | This `NewE2EServices()` function creates a 323 | [`E2EServices` object](https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/services/services.go#L33-L43) 324 | ```go 325 | // E2EServices starts and stops e2e services in a separate process. The test 326 | // uses it to start and stop all e2e services. 327 | type E2EServices struct { 328 | // monitorParent determines whether the sub-processes should watch and die with the current 329 | // process. 330 | rmDirs []string 331 | monitorParent bool 332 | services *server 333 | kubelet *server 334 | logs logFiles 335 | } 336 | ``` 337 | 338 | This public `E2EServices` object builds on top of the other. 339 | One can proceed with the "run kubelet mode" and start a kubelet 340 | ```go 341 | var err error 342 | // Enable monitorParent to make sure kubelet will receive termination signal 343 | // when test process exits. 344 | e := NewE2EServices(true /* monitorParent */) 345 | defer e.Stop() 346 | e.kubelet, err = e.startKubelet() 347 | if err != nil { 348 | klog.Fatalf("Failed to start kubelet: %v", err) 349 | } 350 | // Wait until receiving a termination signal. 351 | waitForTerminationSignal() 352 | ``` 353 | 354 | Which comes in handy, but one can run the API Server, ETCD, and Namespace 355 | controller, AND a Kubelet! 356 | ```go 357 | // Enable monitorParent to make sure kubelet will receive termination signal 358 | // when test process exits. 359 | e := NewE2EServices(true /* monitorParent */) 360 | defer e.Stop() 361 | 362 | if err := e.Start(); err != nil { 363 | // Do something... 364 | } 365 | ``` 366 | 367 | The `Start()` function will 368 | 369 | > Start starts the e2e services in another process by calling back into the 370 | > test binary. Returns when all e2e services are ready or an error. 371 | > 372 | > We want to statically link e2e services into the test binary, but we don't 373 | > want their glog output to pollute the test result. So we run the binary in 374 | > run-services-mode to start e2e services in another process. 375 | > The function starts 2 processes: 376 | > * internal e2e services: services which statically linked in the test binary - apiserver, etcd and namespace controller. 377 | > * kubelet: kubelet binary is outside. (We plan to move main kubelet start logic out when we have standard kubelet launcher) 378 | 379 | And we will now go and run the API Server, ETCD, a Namespace controller, and a 380 | Kubelet using the above logic! 381 | 382 | --- 383 | 384 | ## Running It All 385 | 386 | As previously stated, there is a way to un a Kubelet alongside a Kubernetes API 387 | Server, an ECTD instance, and a Namespace controller. 388 | For that we just have to trigger some part of the code that calls the 389 | `E2EServices` `Start()` function. 390 | Luckily for us, that there is a flag for us to do just that without modifying 391 | any of the code. 392 | 393 | The flag we will use is `--stop-services=false`. 394 | This falg is defined in 395 | [k/k/test/e2e_node/util.go](https://github.com/kubernetes/kubernetes/blob/4e8b56e6671893757d40e2001a3c615acebc13a2/test/e2e_node/util.go#L65) 396 | and 397 | 398 | > If true, stop local node services after running tests 399 | 400 | We know to use it because the entrypoint references it here 401 | [k/k/test/e2e_node/e2e_node_suite_test.go](https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/e2e_node_suite_test.go#L192-L197) 402 | ```go 403 | if *startServices { 404 | // If the services are expected to stop after test, they should monitor the test process. 405 | // If the services are expected to keep running after test, they should not monitor the test process. 406 | e2es = services.NewE2EServices(*stopServices) 407 | gomega.Expect(e2es.Start()).To(gomega.Succeed(), "should be able to start node services.") 408 | klog.Infof("Node services started. Running tests...") 409 | } 410 | ``` 411 | 412 | `startServices` is another flag but it is true by default, so we only need to 413 | set `stopServices` to false in order to run our test executable and leave an 414 | instance of the Kubernetes API Server, an ETCD, a Namespace controller, and a 415 | Kubelet running for us to test. 416 | 417 | Now here comes the command that will do all this for us (keep in mind that it 418 | may fail but below the command we will tell you at least one reason why it may 419 | fail :smile:) 420 | 421 | ``` 422 | sudo ${OUTPUTDIR}/ginkgo -nodes=1 -untilItFails=false ${OUTPUTDIR}/e2e_node.test -- \ 423 | --container-runtime=docker --alsologtostderr --v 4 --report-dir=/tmp/_artifacts/200524T140146 \ 424 | --node-name cool-node --kubelet-flags="--container-runtime=docker" \ 425 | --kubelet-flags="--network-plugin= --cni-bin-dir=" \ 426 | --stop-services=false 427 | ``` 428 | 429 | Exactly the same command as before but with `--stop-services` set to false. 430 | 431 | ### In Case of Failure 432 | 433 | If the above command didn't work for you: you saw a message about a failed 434 | health/readiness check, then you might have also seen the command our program 435 | is using to start the kubelet. 436 | 437 | In our case it was something like this 438 | ``` 439 | sudo /usr/bin/systemd-run --unit=kubelet-20200524T194401.service --slice=runtime.slice \ 440 | --remain-after-exit /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/kubelet \ 441 | --kubeconfig /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/kubeconfig \ 442 | --root-dir /var/lib/kubelet --v 4 --logtostderr \ 443 | --dynamic-config-dir /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/dynamic-kubelet-config \ 444 | --network-plugin=kubenet --cni-bin-dir /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/cni/bin \ 445 | --cni-conf-dir /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/cni/net.d \ 446 | --cni-cache-dir /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/cni/cache \ 447 | --hostname-override cool-node --container-runtime docker \ 448 | --container-runtime-endpoint unix:///var/run/dockershim.sock \ 449 | --config /home/user/go/src/k8s.io/kubernetes/_output/local/go/bin/kubelet-config \ 450 | --container-runtime=docker --network-plugin= --cni-bin-dir= 451 | ``` 452 | 453 | In our case, the kubelet is getting started with systemd. 454 | So we tried copying and pasting that command in another terminal and then 455 | executing it. 456 | 457 | Notice that the name of the systemd unit is passed as a value to the `--unit` 458 | flag. 459 | And since we have that info we can check its status: 460 | ``` 461 | systemctl -l status kubelet-20200524T194401.service 462 | ... 463 | server.go:274] failed to run Kubelet: running with swap on is not supported, please disable swap! or set --fail-swap-on fl 464 | ``` 465 | 466 | With that complain about swap, we proceeded to delete the current failed 467 | Kubelet systemd unit 468 | ``` 469 | systemctl reset-failed kubelet-20200524T194401.service 470 | ``` 471 | 472 | We turned off swap 473 | ``` 474 | sudo swapoff -a 475 | ``` 476 | 477 | (You can turn it back on with `sudo swapon -a`). 478 | 479 | And we got it to work! :tada: 480 | -------------------------------------------------------------------------------- /stories/networking/README.md: -------------------------------------------------------------------------------- 1 | # Intro to Container Networking 2 | 3 | Howdy! 4 | If you are here then you proably want to hear all about networking. 5 | 6 | You probably have heard by now that containers are implemented through Linux 7 | namespaces. 8 | For example, one way that containers are made to feel like they are running in 9 | their own machine is because they create their own networking namespace. 10 | 11 | Probably, you have then heard how network namespaces are leveraged by Docker or 12 | when running containers in Kubernetes or something of the sort. 13 | 14 | Taking the example of Kubernetes, you may have heard that pod-to-pod 15 | communication is leveraged by creating a network bridge in a host, creating a 16 | veth pair per container and attaching one end to the bridge and another one to 17 | the container's network namespace. 18 | 19 | **Note:** If this sounds new to you or if you want a refresher please take a 20 | look at these resources behforehand 21 | * [An illustrated guide to Kubernetes Networking Part 1](https://itnext.io/an-illustrated-guide-to-kubernetes-networking-part-1-d1ede3322727) 22 | * [Understanding kubernetes networking: pods](https://medium.com/google-cloud/understanding-kubernetes-networking-pods-7117dd28727) 23 | 24 | This sort of explanation is a great one and the one you may be able to live 25 | with except if you are interested in becoming a contributor to SIG networking 26 | and actually have to write code. 27 | Here, we want to actualy go through the steps and motions of setting up a 28 | container network. 29 | 30 | With that, let's get started! 31 | 32 | **P.S.,** this is a living document: if you have any feedback or suggestions please 33 | feel free to open up and issue or to even submit a PR to modify this doc. 34 | 35 | ## Namespaces In The Wild 36 | 37 | Let's start by playing around with namespaces, specifically with the network 38 | namespace functionality of Linux. 39 | 40 | Most Linux distributions will come with the `ip` command. 41 | Turns out, that we can use this command to manage network namespaces! 42 | So first off, take a look at the man page for `ip netns` and do a 43 | 44 | `man ip netns` 45 | > IP-NETNS(8) 46 | > 47 | > NAME 48 | > ip-netns - process network namespace management 49 | > 50 | > DESCRIPTION 51 | > A network namespace is logically another copy of the network stack, with 52 | > its own routes, firewall rules, and network devices. 53 | 54 | 55 | First thing we can do is list the current network namespace we are using with 56 | ``` 57 | ip netns list 58 | ``` 59 | 60 | At this point, you probably won't have any network namespaces. 61 | 62 | If we explore a little bit: 63 | ``` 64 | docker run --rm -it -name ubuntu ubuntu:19.04 bash 65 | ``` 66 | 67 | A simple `ip netns list` will not show you anything. 68 | If you continue reading on the man page for `ip netns` you will see that it 69 | expects the file descriptors for a network namespace to be in 70 | `/var/run/netns/$NS_NAME`. 71 | Docker doesn't do this. 72 | 73 | The file descriptor for our container's is actually in 74 | `/proc/$CONTAINER_PID/ns/net`. 75 | By the way, you can get the container PID by running 76 | 77 | ``` 78 | docker inspect -f '{{.State.Pid}}' ubuntu 79 | ``` 80 | The last argument can be the container ID or its name. 81 | I my case, my ubuntu container had the PID `23773`. 82 | 83 | First off, we can see that this container does have its very own network 84 | namespace! 85 | First off, check the network devices that are in your current namesapce by 86 | doing an 87 | 88 | ``` 89 | ip link show 90 | ``` 91 | 92 | You may see your loopback device, `lo` (localhost :tree:), your wifi, and a 93 | docker bridge. 94 | At this point you may also see a network device called something like `vethxxx` 95 | which does correspond to a virtual ethernet device and it is probably related 96 | to the container that we are running. 97 | In my computer I see 98 | 99 | ``` 100 | $ ip link show 101 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 102 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 103 | ... 104 | 3: docker0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default 105 | ... 106 | 196: vethf167fbb@if195: mtu 1500 qdisc noqueue master docker0 state UP mode DEFAULT group default 107 | ... 108 | ``` 109 | 110 | In particular notice how the `vethf167fbb` network device has `master docker0` 111 | in its description, but we'll come back to bridges and veths later. 112 | For now, let's compare the network devices in the network namespace of our 113 | container. 114 | 115 | Again, our container PID is 23773. 116 | Let's get into it! 117 | 118 | ``` 119 | $ sudo nsenter --target 23773 --net ip link show 120 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 121 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 122 | 195: eth0@if196: mtu 1500 qdisc noqueue state UP mode DEFAULT group default 123 | ... 124 | ``` 125 | 126 | The `--target` flag of nsenter specifies the PID and `--net` tells it that we 127 | want to use that PID's network namespace. 128 | 129 | Also, if we wanted to have the container's network namespace show through the 130 | `ip netns` command we could do create a symbolic link to it to a place where 131 | `ip netns` will be able to find it 132 | 133 | ``` 134 | sudo ln -s /proc/23773/ns/net /var/run/netns/ubuntu 135 | ``` 136 | 137 | We will now see the container's namespace! 138 | 139 | ``` 140 | $ ip netns list 141 | ubuntu 142 | ``` 143 | 144 | And we can even replciate `nsenter` with this command 145 | ``` 146 | sudo ip netns exec ubuntu ip link show 147 | ``` 148 | Here, we are essentially telling our system to execute the command `ip link 149 | show` in the network namespace `ubuntu`. 150 | The symbolic link may persist after your stop your container so make sure to 151 | remove it 152 | 153 | ``` 154 | sudo rm /var/run/netns/ubuntu 155 | ``` 156 | 157 | Also, if you stopped your container, you should see that the the veth network 158 | device in your network namespace dissapeared! :ghost: 159 | 160 | ## Creating Your Own Container (kinda) 161 | 162 | ### Creating Your Own Namespace 163 | 164 | Ight, so we got to verify all those things we have heard in the wild: Docker 165 | does indeed use network namespaces, uses virtual network devices (veth pairs) 166 | to simulate real ethernet connections, and it uses a network bridge (to act as 167 | a network bridge). 168 | 169 | Let's how much functionality we can get out of a network namespace. 170 | Let's begin by creating one named :cool: 171 | 172 | ``` 173 | sudo ip netns add cool 174 | ``` 175 | 176 | You should be able to list it now. 177 | ``` 178 | $ ip netns list 179 | cool 180 | ``` 181 | 182 | Let's see what all is included with a network namespace. 183 | We will use again the command `ip netns exec $NAMESPACE_NAME` to execute a 184 | command inside of our `cool` namespace 185 | 186 | ``` 187 | $ sudo ip netns exec cool ip link show 188 | 1: lo: mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen 1000 189 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 190 | ``` 191 | 192 | Compare it with your default namespace by running a simple `ip link show`. 193 | 194 | Let's try and run an application now, how about it? 195 | Included in this directory is a simple Go webserver that you can run by doing 196 | 197 | ``` 198 | $ go build && ./networking 199 | 2020/04/11 18:53:09 booting up server... 200 | ``` 201 | 202 | If you send a request to its home 203 | ``` 204 | $ curl http://localhost:8080/ 205 | hello! the time right now is 2020-04-11 18:53:40.12163246 -0400 EDT m=+30.936893278 206 | ``` 207 | 208 | It will tell you what time it is. 209 | 210 | Now, what do you think would happen if we run our server in our `cool` 211 | namespace? 212 | Hopefully you are thinking that it would more or less happen the same as if you 213 | were running the webserver inside of a container but let's verify. 214 | 215 | To run the webserver in our `cool` namespace we can do it by running 216 | ``` 217 | $ sudo ip netns exec cool ./networking 218 | 2020/04/11 18:57:19 booting up server... 219 | ``` 220 | 221 | If you didn't stop the webserver we just ran (I didn't) you should see that we 222 | are able to run it both "normally" and inside of our `cool` namespace despite 223 | the fact that both use the same port! Same as with a container - so the media 224 | hype has been real. 225 | 226 | Now, make sure that you only have the webserver version running in your `cool` 227 | namespace. 228 | What happens if you try to curl it as normal? 229 | Well I'll tell you what happened when I did 230 | ``` 231 | $ curl http://localhost:8080/ 232 | curl: (7) Failed to connect to localhost port 8080: Connection refused 233 | ``` 234 | 235 | Which is cool, thats what we would expect if we tried to curl an application 236 | running inside of a container. 237 | So let's try the curl inside of the `cool` namespace 238 | ``` 239 | $ sudo ip netns exec cool curl http://localhost:8080/ 240 | curl: (7) Couldn't connect to server 241 | ``` 242 | 243 | Even inside of the same network namespace our curl did not work but it did 244 | return a different error. 245 | This is another feature of network namespaces, that is that you really get to 246 | confiure all of it! 247 | 248 | In our case, if you tried to run an `ip link show` to list the network devices 249 | inside of the `cool` namespace you should have seen that the loopback devices 250 | is actually turned off 251 | 252 | ``` 253 | $ sudo ip netns exec cool ip link show 254 | 1: lo: mtu 65536 qdisc noop state DOWN mode DEFAULT group default qlen 1000 255 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 256 | ``` 257 | 258 | However, we can turn it on easy enough 259 | ``` 260 | sudo ip netns exec cool ip link set up dev lo 261 | ``` 262 | 263 | And now, our lo device in our `cool` network namespace is no longer in the 264 | state "DOWN" 265 | ``` 266 | $ sudo ip netns exec cool ip link show 267 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 268 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 269 | ``` 270 | 271 | And a curl inside of the `cool` namespace will now work 272 | ``` 273 | $ sudo ip netns exec cool curl http://localhost:8080/ 274 | hello! the time right now is 2020-04-11 19:06:39.038751559 -0400 EDT 275 | m=+559.343944704 276 | ``` 277 | :tada: 278 | 279 | 280 | For now, let's terminate our webserver in a fancy way 281 | ``` 282 | sudo ip netns pids cool | sudo xargs kill 283 | ``` 284 | 285 | Cool party trick, huh?! 286 | 287 | Now, let's try and reach out to the internet. 288 | Let's do a simple ping to Google 289 | ``` 290 | $ sudo ip netns exec cool ping 8.8.8.8 291 | ping: connect: Network is unreachable 292 | ``` 293 | 294 | You might have guessed that I'm proposed to ping the internete because I knew 295 | it wouldn't work and it will make for a cool story to fix it and you will be 296 | correct! 297 | Didn't think that setting up something container-is would be that easy, did 298 | you? :smile: 299 | 300 | 301 | 302 | ### More About Networking 303 | 304 | NOW WE WILL START TALKING ABOUT BRIDGES AND VETHS!!! :grinning: 305 | 306 | **Creating VETH pairs** 307 | 308 | Here is where all that talk will finally begin to happen. 309 | So overall idea, we want to connect our network namespace with the actual 310 | physical device in our host so that processes living inside of our network 311 | namespace can communicate with the rest of the world. 312 | 313 | The plan is to begin by creating a virtual ethernet device pair because as the 314 | man pages say 315 | 316 | 317 | `man veth` 318 | > The veth devices are virtual Ethernet devices. They can act as tunnels 319 | > between network namespaces to create a bridge to a physical network device 320 | > in another namespace, but can also be used as standalone network devices. 321 | 322 | Which is exactly what we want to do! 323 | 324 | So, let's create a veth pair 325 | ``` 326 | sudo ip link add veth0 type veth peer name br-veth0 327 | ``` 328 | Make sure to run a quick `ip link show` to see your new devices. 329 | 330 | We will keep the `br-veth0` device for the bridge we will build and will move 331 | `veth1` to our `cool` namespace. 332 | ``` 333 | sudo ip link set veth0 netns cool 334 | ``` 335 | 336 | A quick `ip link show` will only show the `br-veth0` end of your pair but in 337 | the `cool` namespace you should now be able to see the `veth0` end 338 | ``` 339 | $ sudo ip netns exec cool ip link show 340 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 341 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 342 | 202: veth0@if201: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 343 | ... 344 | ``` 345 | 346 | --- 347 | 348 | **Building Bridges** 349 | 350 | And thus we are on the way of figuring out how we can setup some sort of 351 | network connection with our network namespace. 352 | 353 | Next step: creating a bridge 354 | ``` 355 | sudo ip link add name br0 type bridge 356 | ``` 357 | and to turn it on 358 | ``` 359 | sudo ip link set br0 up 360 | ``` 361 | 362 | Another `ip link show` should show you that our bridge is not in a DOWN state. 363 | 364 | Let's give the veth side in the namespace an IP (we want to later on 365 | configure a gateway and all those things that will allow for a request to go 366 | into the internetes) and turn it on. 367 | Before we do anything in the namespace, check out its current state with 368 | ``` 369 | sudo ip netns exec cool ip address show 370 | ``` 371 | Now, we will assign `veth0` and address 372 | ``` 373 | sudo ip netns exec cool ip addr add 192.168.1.11/24 dev veth0 374 | ``` 375 | 376 | There should be an IPv4 address in there... 377 | ``` 378 | $ sudo ip netns exec cool ip address show 379 | 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 380 | link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 381 | inet 127.0.0.1/8 scope host lo 382 | valid_lft forever preferred_lft forever 383 | inet6 ::1/128 scope host 384 | valid_lft forever preferred_lft forever 385 | 202: veth0@if201: mtu 1500 qdisc noop state DOWN group default qlen 1000 386 | inet 192.168.1.11/24 scope global veth0 387 | valid_lft forever preferred_lft forever 388 | ``` 389 | 390 | And turn it on! 391 | ``` 392 | sudo ip netns exec cool ip link set veth0 up 393 | ``` 394 | 395 | Next, let's move the bridge end of our veth pair to our bridge - the other end 396 | is already in the `cool` namespace. 397 | 398 | First, let's turn on the bridge side of the veth up 399 | ``` 400 | sudo ip link set br-veth0 up 401 | ``` 402 | 403 | In order to add the bridge side of the veth pair to the bridge we have to set 404 | the `br0` as its "master" 405 | ``` 406 | sudo ip link set br-veth0 master br0 407 | ``` 408 | 409 | And another `ip link show` should output something like this 410 | ``` 411 | $ ip link show 412 | ... 413 | 201: br-veth0@if202: mtu 1500 qdisc noqueue master br0 state UP mode DEFAULT group default qlen 1000 414 | ... 415 | 203: br0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000 416 | ... 417 | ``` 418 | 419 | Notice the "master br0 state UP" status of the `br-veth0` device. 420 | 421 | --- 422 | 423 | **Plugging In Our Namespace** 424 | 425 | Next up, we need to figure out how to connect to our namespace - allow traffic 426 | in and out. 427 | What we need is to give the bridge an IP so that we can create an entry in the 428 | host's routing table. 429 | 430 | 431 | For this, we will give our `br0` bridge device an IP and also give it a 432 | broadcat address. 433 | For context, a broadcast address is a 434 | 435 | > network address at which all devices connected to a multiple-access 436 | > communications network are enabled to receive datagrams, which comprise UDP 437 | > and TCP/IP packets, for instance. https://en.wikipedia.org/wiki/Broadcast_address 438 | 439 | **NOTE:** We will begin talking about host bits and subnets and all those 440 | things, if you are not 100 percent familiar with those terms take a quick look 441 | at these resources 442 | * [How do you calculate the prefix, network, subnet, and host numbers?](https://networkengineering.stackexchange.com/questions/7106/how-do-you-calculate-the-prefix-network-subnet-and-host-numbers) 443 | 444 | The address of the `br0` bridge will be 192.168.1.10/24 and the broadcast 445 | address will be 192.168.1.255 446 | ``` 447 | sudo ip addr add 192.168.1.10/24 brd + dev br0 448 | ``` 449 | The "+" symbol sets the host bits to 255. 450 | Double check with `ip address show` that it did what you expected. 451 | 452 | 453 | Let's see what our routing table has to say now about our bridge 454 | ``` 455 | $ ip route show 456 | ... 457 | 192.168.1.0/24 dev br0 proto kernel scope link src 192.168.1.10 458 | ``` 459 | 460 | This entry tells us that any packet sent to any IP address within the subnet 461 | 192.168.1.0/24 must be sent through the network interface `br0` with 462 | 192.168.1.10 as its source IP. 463 | 464 | "proto kernel" means that this entry was created by the kernel during autoconfiguration. 465 | "scope link" means the destination IP addresses within 192.168.1.0/24 are valid 466 | only on the device `br0`. 467 | 468 | See 469 | [Getting and interpreting routing table information](https://diego.assencio.com/?index=d71346b8737ee449bb09496784c9b344) 470 | for more details. 471 | 472 | Let's test our understanding of the above rule with a ping 473 | ``` 474 | $ ping 192.168.1.10 475 | PING 192.168.1.10 (192.168.1.10) 56(84) bytes of data. 476 | 64 bytes from 192.168.1.10: icmp_seq=1 ttl=64 time=0.072 ms 477 | 64 bytes from 192.168.1.10: icmp_seq=2 ttl=64 time=0.048 ms 478 | ... 479 | --- 192.168.1.10 ping statistics --- 480 | 10 packets transmitted, 10 received, 0% packet loss, time 9211ms 481 | rtt min/avg/max/mdev = 0.025/0.057/0.109/0.020 ms 482 | ``` 483 | 484 | Also, interetesting to checkout what happened in our `veth0` pair 485 | ``` 486 | $ sudo ip netns exec cool ip route show 487 | 192.168.1.0/24 dev veth0 proto kernel scope link src 192.168.1.11 488 | ``` 489 | 490 | And we can ping it as well! 491 | ``` 492 | $ sudo ip netns exec cool ping 192.168.1.11 493 | PING 192.168.1.11 (192.168.1.11) 56(84) bytes of data. 494 | 64 bytes from 192.168.1.11: icmp_seq=1 ttl=64 time=0.026 ms 495 | 64 bytes from 192.168.1.11: icmp_seq=2 ttl=64 time=0.037 ms 496 | 64 bytes from 192.168.1.11: icmp_seq=3 ttl=64 time=0.038 ms 497 | ^C 498 | --- 192.168.1.11 ping statistics --- 499 | 3 packets transmitted, 3 received, 0% packet loss, time 2036ms 500 | rtt min/avg/max/mdev = 0.026/0.033/0.038/0.005 ms 501 | ``` 502 | 503 | However, since we only have 1 entry in our routing table for our `cool` 504 | namespace and that one is only for 192.168.1.0/24, we should still have no 505 | internet access 506 | ``` 507 | $ sudo ip netns exec cool ping 8.8.8.8 508 | ping: connect: Network is unreachable 509 | ``` 510 | 511 | So let's give it a default gateway. 512 | The default gateway will match any connections that cannot be matched by any 513 | other routes (check in your default namespace to see what you use, `ip route show`). 514 | We will use the 192.168.1.10 IP address since it is the address that our bridge 515 | interface has assigned and since this one is reachable from the default network 516 | namespace and from our `cool` network namespace. 517 | ``` 518 | sudo ip netns exec cool ip route add default via 192.168.1.10 519 | ``` 520 | 521 | If we look at our routing table in `cool` we should now see this 522 | ``` 523 | $ sudo ip netns exec cool ip route show 524 | default via 192.168.1.10 dev veth0 525 | 192.168.1.0/24 dev veth0 proto kernel scope link src 192.168.1.11 526 | ``` 527 | 528 | If we ping again we will see that we area bit closer to our goal 529 | ``` 530 | $ sudo ip netns exec cool ping 8.8.8.8 531 | PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data. 532 | ^C 533 | --- 8.8.8.8 ping statistics --- 534 | 19 packets transmitted, 0 received, 100% packet loss, time 18415ms 535 | ``` 536 | 537 | The hypothesis right now is that even though we gave our namespace a way to 538 | reach out into the world, there is no way for responses to be able to come back 539 | to the 192.168.1.0/24 subnet because our routing table tells us that 540 | thesepackets must have 192.168.1.10 as its source. 541 | 542 | We can test our hypothesis by using the utility traceroute. 543 | ``` 544 | $ sudo ip netns exec cool traceroute -n 8.8.8.8 545 | traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 60 byte packets 546 | 1 192.168.1.10 0.050 ms 0.009 ms 0.006 ms 547 | 2 * * * 548 | 3 * * * 549 | 4 * * * 550 | 5 * * * 551 | 6 * * * 552 | 7 *^C 553 | ``` 554 | As expected, packets can go out byt they don't know how to come back in. 555 | 556 | The solution for this involves first of all some IPv4 forwarding 557 | ``` 558 | sudo sysctl -w net.ipv4.ip_forward=1 559 | ``` 560 | 561 | Then we have to modify some iptables :smile: 562 | We will allow packets from the internets to reach our 192.168.1.0/24 subnet by 563 | adding an iptables rule in the `POSTROUTING` chain of the `nat` table 564 | 565 | ``` 566 | sudo iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -j MASQUERADE 567 | ``` 568 | 569 | And now to explicitly enable routing to our bridge interface 570 | ``` 571 | sudo iptables -A FORWARD -i $BR -j ACCEPT 572 | sudo iptables -A FORWARD -o $BR -j ACCEPT 573 | ``` 574 | 575 | And finally time for another ping to 8.8.8.8. 576 | Please give it a try :palm_tree: 577 | 578 | ``` 579 | sudo ip netns exec cool traceroute -n 8.8.8.8 580 | ``` 581 | 582 | ## Summary 583 | 584 | In case all the commands and descriptions made you lose sight of what we were 585 | trying to achieve, this is what we have more or less built: 586 | A bridge network in the default network namespace that provides connectivity to 587 | the world through a VETH pair where one end is attached to the bridge and 588 | another to a network namespace (container). 589 | 590 | 591 | ![](./container-network.png) 592 | 593 | 594 | ## Tl;DR 595 | 596 | ``` 597 | NS="cool" 598 | VETH="veth0" 599 | PVETH="br-veth0" 600 | BR="br0" 601 | VETH_ADDR="192.168.1.11/24" 602 | BR_ADDR="192.168.1.10/24" 603 | GW="192.168.1.10" 604 | 605 | sudo ip netns add $NS 606 | 607 | sudo ip netns exec $NS ip link set up dev lo 608 | 609 | sudo ip link add $PVETH type veth peer name $VETH 610 | sudo ip link set $VETH netns $NS 611 | 612 | sudo ip link add name $BR type bridge 613 | sudo ip link set br0 up 614 | 615 | sudo ip netns exec $NS ip addr add $VETH_ADDR dev $VETH 616 | sudo ip netns exec $NS ip link set $VETH up 617 | sudo ip link set $PVETH up 618 | 619 | sudo ip link set $PVETH master $BR 620 | 621 | sudo ip addr add $BR_ADDR brd + dev $BR 622 | 623 | sudo ip netns exec $NS ip route add default via $GW 624 | 625 | sudo sysctl -w net.ipv4.ip_forward=1 626 | sudo iptables -P FORWARD DROP 627 | sudo iptables -F FORWARD 628 | sudo iptables -t nat -F 629 | sudo iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -j MASQUERADE 630 | sudo iptables -A FORWARD -i $BR -j ACCEPT 631 | sudo iptables -A FORWARD -o $BR -j ACCEPT 632 | ``` 633 | 634 | **References:** 635 | * [Linux Switching – Interconnecting Namespaces](https://www.opencloudblog.com/?p=66) 636 | * [Using network namespaces and a virtual switch to isolate servers](https://ops.tips/blog/using-network-namespaces-and-bridge-to-isolate-servers/) 637 | * [Setup a network namespace with Internet access](https://gist.github.com/dpino/6c0dca1742093346461e11aa8f608a99) 638 | * [Deep Dive Into Docker Overlay Networks: Part 1](https://blog.d2si.io/2017/04/25/deep-dive-into-docker-overlay-networks-part-1/) 639 | * [Connect network namespace to internet](https://unix.stackexchange.com/questions/287140/connect-network-namespace-to-internet) 640 | 641 | 642 | ## Cleanup 643 | 644 | ``` 645 | sudo ip link del dev br-veth0 646 | ``` 647 | 648 | ``` 649 | sudo ip link set br0 down 650 | sudo brctl delbr br0 651 | ``` 652 | 653 | ``` 654 | sudo ip netns pids cool | sudo xargs kill 655 | sudo ip netns del cool 656 | ``` 657 | -------------------------------------------------------------------------------- /stories/networking/container-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/contributing-to-kubernetes/gnosis/26f5ba9a60cc4a0c6a14ce7517105e757e97d936/stories/networking/container-network.png -------------------------------------------------------------------------------- /stories/networking/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/contributing-to-kubernetes/gnosis/stories/networking 2 | 3 | go 1.13 4 | -------------------------------------------------------------------------------- /stories/networking/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net/http" 7 | "time" 8 | ) 9 | 10 | func handler(w http.ResponseWriter, r *http.Request) { 11 | fmt.Fprintf(w, "hello! the time right now is %v\n", time.Now()) 12 | } 13 | 14 | func main() { 15 | http.HandleFunc("/", handler) 16 | 17 | log.Println("booting up server...") 18 | log.Fatal(http.ListenAndServe("0.0.0.0:8080", nil)) 19 | } 20 | --------------------------------------------------------------------------------