34 | ```
35 |
--------------------------------------------------------------------------------
/Documentation/deploy-addons.md:
--------------------------------------------------------------------------------
1 | ## Deploy the DNS Add-on
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | The DNS add-on allows your services to have a DNS name in addition to an IP address. This is helpful for simplified service discovery between applications. More info can be found in the [Kubernetes DNS documentation][k8s-dns].
10 |
11 | Add-ons are built on the same Kubernetes components as user-submitted jobs — Pods, Replication Controllers and Services. We're going to install the DNS add-on with `kubectl`.
12 |
13 | First create `dns-addon.yml` on your local machine and replace the variable. There is a lot going on in there, so let's break it down after you create it.
14 |
15 | [k8s-dns]: https://kubernetes.io/docs/admin/dns/
16 |
17 | * Replace `${DNS_SERVICE_IP}`
18 |
19 | **dns-addon.yml**
20 |
21 | ```yaml
22 | apiVersion: v1
23 | kind: Service
24 | metadata:
25 | name: kube-dns
26 | namespace: kube-system
27 | labels:
28 | k8s-app: kube-dns
29 | kubernetes.io/cluster-service: "true"
30 | kubernetes.io/name: "KubeDNS"
31 | spec:
32 | selector:
33 | k8s-app: kube-dns
34 | clusterIP: ${DNS_SERVICE_IP}
35 | ports:
36 | - name: dns
37 | port: 53
38 | protocol: UDP
39 | - name: dns-tcp
40 | port: 53
41 | protocol: TCP
42 |
43 |
44 | ---
45 |
46 |
47 | apiVersion: v1
48 | kind: ReplicationController
49 | metadata:
50 | name: kube-dns-v20
51 | namespace: kube-system
52 | labels:
53 | k8s-app: kube-dns
54 | version: v20
55 | kubernetes.io/cluster-service: "true"
56 | spec:
57 | replicas: 1
58 | selector:
59 | k8s-app: kube-dns
60 | version: v20
61 | template:
62 | metadata:
63 | labels:
64 | k8s-app: kube-dns
65 | version: v20
66 | annotations:
67 | scheduler.alpha.kubernetes.io/critical-pod: ''
68 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
69 | spec:
70 | containers:
71 | - name: kubedns
72 | image: gcr.io/google_containers/kubedns-amd64:1.8
73 | resources:
74 | limits:
75 | memory: 170Mi
76 | requests:
77 | cpu: 100m
78 | memory: 70Mi
79 | livenessProbe:
80 | httpGet:
81 | path: /healthz-kubedns
82 | port: 8080
83 | scheme: HTTP
84 | initialDelaySeconds: 60
85 | timeoutSeconds: 5
86 | successThreshold: 1
87 | failureThreshold: 5
88 | readinessProbe:
89 | httpGet:
90 | path: /readiness
91 | port: 8081
92 | scheme: HTTP
93 | initialDelaySeconds: 3
94 | timeoutSeconds: 5
95 | args:
96 | - --domain=cluster.local.
97 | - --dns-port=10053
98 | ports:
99 | - containerPort: 10053
100 | name: dns-local
101 | protocol: UDP
102 | - containerPort: 10053
103 | name: dns-tcp-local
104 | protocol: TCP
105 | - name: dnsmasq
106 | image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
107 | livenessProbe:
108 | httpGet:
109 | path: /healthz-dnsmasq
110 | port: 8080
111 | scheme: HTTP
112 | initialDelaySeconds: 60
113 | timeoutSeconds: 5
114 | successThreshold: 1
115 | failureThreshold: 5
116 | args:
117 | - --cache-size=1000
118 | - --no-resolv
119 | - --server=127.0.0.1#10053
120 | - --log-facility=-
121 | ports:
122 | - containerPort: 53
123 | name: dns
124 | protocol: UDP
125 | - containerPort: 53
126 | name: dns-tcp
127 | protocol: TCP
128 | - name: healthz
129 | image: gcr.io/google_containers/exechealthz-amd64:1.2
130 | resources:
131 | limits:
132 | memory: 50Mi
133 | requests:
134 | cpu: 10m
135 | memory: 50Mi
136 | args:
137 | - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
138 | - --url=/healthz-dnsmasq
139 | - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
140 | - --url=/healthz-kubedns
141 | - --port=8080
142 | - --quiet
143 | ports:
144 | - containerPort: 8080
145 | protocol: TCP
146 | dnsPolicy: Default
147 | ```
148 |
149 | *Note:* The above YAML definition is based on the upstream DNS addon in the [Kubernetes addon folder][k8s-dns-addon].
150 |
151 | [k8s-dns-addon]: https://github.com/kubernetes/kubernetes/tree/v1.5.2/cluster/addons/dns
152 |
153 | This single YAML file is actually creating 2 different Kubernetes objects, separated by `---`.
154 |
155 | The first object is a service that provides DNS lookups over port 53 for any service that requires it.
156 |
157 | The second object is a Replication Controller, which consists of several different containers that work together to provide DNS lookups. There's too much going on to explain it all, but it's using health checks, resource limits, and intra-pod networking over multiple ports.
158 |
159 | Next, start the DNS add-on:
160 |
161 | ```sh
162 | $ kubectl create -f dns-addon.yml
163 | ```
164 |
165 | And check for `kube-dns-v20-*` pod up and running:
166 |
167 | ```sh
168 | $ kubectl get pods --namespace=kube-system | grep kube-dns-v20
169 | ```
170 |
171 | ## Deploy the kube Dashboard Add-on
172 |
173 | Create `kube-dashboard-rc.yaml` and `kube-dashboard-svc.yaml` on your local machine.
174 |
175 | **kube-dashboard-rc.yaml**
176 |
177 |
178 | ```yaml
179 | apiVersion: v1
180 | kind: ReplicationController
181 | metadata:
182 | name: kubernetes-dashboard-v1.4.1
183 | namespace: kube-system
184 | labels:
185 | k8s-app: kubernetes-dashboard
186 | version: v1.4.1
187 | kubernetes.io/cluster-service: "true"
188 | spec:
189 | replicas: 1
190 | selector:
191 | k8s-app: kubernetes-dashboard
192 | template:
193 | metadata:
194 | labels:
195 | k8s-app: kubernetes-dashboard
196 | version: v1.4.1
197 | kubernetes.io/cluster-service: "true"
198 | annotations:
199 | scheduler.alpha.kubernetes.io/critical-pod: ''
200 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
201 | spec:
202 | containers:
203 | - name: kubernetes-dashboard
204 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.1
205 | resources:
206 | limits:
207 | cpu: 100m
208 | memory: 50Mi
209 | requests:
210 | cpu: 100m
211 | memory: 50Mi
212 | ports:
213 | - containerPort: 9090
214 | livenessProbe:
215 | httpGet:
216 | path: /
217 | port: 9090
218 | initialDelaySeconds: 30
219 | timeoutSeconds: 30
220 | ```
221 |
222 | **kube-dashboard-svc.yaml**
223 |
224 |
225 | ```yaml
226 | apiVersion: v1
227 | kind: Service
228 | metadata:
229 | name: kubernetes-dashboard
230 | namespace: kube-system
231 | labels:
232 | k8s-app: kubernetes-dashboard
233 | kubernetes.io/cluster-service: "true"
234 | spec:
235 | selector:
236 | k8s-app: kubernetes-dashboard
237 | ports:
238 | - port: 80
239 | targetPort: 9090
240 | ```
241 |
242 | Create the Replication Controller and Service.
243 |
244 | ```sh
245 | $ kubectl create -f kube-dashboard-rc.yaml
246 | $ kubectl create -f kube-dashboard-svc.yaml
247 | ```
248 |
249 | Access the dashboard by port forwarding with `kubectl`.
250 |
251 |
252 | ```sh
253 | $ kubectl get pods --namespace=kube-system
254 | $ kubectl port-forward kubernetes-dashboard-v1.4.1-SOME-ID 9090 --namespace=kube-system
255 | ```
256 |
257 | Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/) in your browser.
258 |
259 |
260 |
Now that you have a working Kubernetes cluster with a functional CLI tool, you are free to deploy Kubernetes-ready applications.
261 |
Start with a multi-tier web application (Guestbook) from the official Kubernetes documentation to visualize how the various Kubernetes components fit together.
262 |
Deploy the Guestbook Sample app
263 |
264 |
--------------------------------------------------------------------------------
/Documentation/deploy-master.md:
--------------------------------------------------------------------------------
1 | # Deploy Kubernetes Master Node(s)
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | Boot a single CoreOS machine which will be used as the Kubernetes master node. You must use a CoreOS version 962.0.0+ for the `/usr/lib/coreos/kubelet-wrapper` script to be present in the image. See [kubelet-wrapper](kubelet-wrapper.md) for more information.
10 |
11 | See the [CoreOS Documentation](https://coreos.com/os/docs/latest/) for guides on launching nodes on supported platforms.
12 |
13 | Manual configuration of the required master node services is explained below, but most of the configuration could also be done with cloud-config, aside from placing the TLS assets on disk. For security reasons, these secrets should not be stored in cloud-config.
14 |
15 | The instructions below configure the required master node components using manifests stored in `/etc/kubernetes/manifests`. The kubelet will watch this location for new or modified manifests and run them automatically.
16 |
17 | High-availability is achieved by repeating these instructions for each master node. Each of the master components is safe to run on multiple nodes.
18 |
19 | The apiserver is stateless, but handles recording the results of leader elections to etcd on behalf of other master components. The controller-manager and scheduler use the leader election mechanism to ensure only one of each is active, leaving the inactive master components ready to assume responsibility in case of failure.
20 |
21 | ## Configure Service Components
22 |
23 | ### TLS Assets
24 |
25 | Create the required directory and place the keys generated previously in the following locations:
26 |
27 | ```sh
28 | $ sudo mkdir -p /etc/kubernetes/ssl
29 | ```
30 |
31 | * File: `/etc/kubernetes/ssl/ca.pem`
32 | * File: `/etc/kubernetes/ssl/apiserver.pem`
33 | * File: `/etc/kubernetes/ssl/apiserver-key.pem`
34 |
35 | And make sure you've set proper permission for private key:
36 |
37 | ```sh
38 | $ sudo chmod 600 /etc/kubernetes/ssl/*-key.pem
39 | $ sudo chown root:root /etc/kubernetes/ssl/*-key.pem
40 | ```
41 |
42 | ### Network Configuration
43 |
44 | Networking is provided by Flannel and Calico.
45 |
46 | * [flannel][flannel-docs] provides a software-defined overlay network for routing traffic to/from the [pods][pod-overview]
47 | * [Calico][calico-docs] secures the overlay network by restricting traffic to/from the pods based on fine-grained network policy.
48 |
49 | *Note:* If the pod-network is being managed independently of flannel, then the flannel parts of this guide can be skipped. In this case, Calico may still be used for providing network policy. See [Kubernetes networking](kubernetes-networking.md) for more detail.
50 |
51 | We will configure flannel to source its local configuration in `/etc/flannel/options.env` and cluster-level configuration in etcd. Create this file and edit the contents:
52 |
53 | * Replace `${ADVERTISE_IP}` with this machine's publicly routable IP.
54 | * Replace `${ETCD_ENDPOINTS}`
55 |
56 | **/etc/flannel/options.env**
57 |
58 | ```sh
59 | FLANNELD_IFACE=${ADVERTISE_IP}
60 | FLANNELD_ETCD_ENDPOINTS=${ETCD_ENDPOINTS}
61 | ```
62 | Next create a [systemd drop-in][dropins], which is a method for appending or overriding parameters of a systemd unit. In this case we're appending two dependency rules. Create the following drop-in, which will use the above configuration when flannel starts:
63 |
64 | **/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf**
65 |
66 | ```yaml
67 | [Service]
68 | ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
69 | ```
70 |
71 | [calico-docs]: http://docs.projectcalico.org/v2.0/getting-started/kubernetes/
72 | [flannel-docs]: https://coreos.com/flannel/docs/latest/
73 | [pod-overview]: https://coreos.com/kubernetes/docs/latest/pods.html
74 | [service-overview]: https://coreos.com/kubernetes/docs/latest/services.html
75 |
76 | ### Docker Configuration
77 |
78 | In order for flannel to manage the pod network in the cluster, Docker needs to be configured to use it. All we need to do is require that flanneld is running prior to Docker starting.
79 |
80 | *Note:* If the pod-network is being managed independently, this step can be skipped. See [kubernetes networking](kubernetes-networking.md) for more detail.
81 |
82 | Again, we will use a [systemd drop-in][dropins]:
83 |
84 | **/etc/systemd/system/docker.service.d/40-flannel.conf**
85 |
86 | ```yaml
87 | [Unit]
88 | Requires=flanneld.service
89 | After=flanneld.service
90 | [Service]
91 | EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
92 | ```
93 |
94 | Create the Docker CNI Options file:
95 |
96 | **/etc/kubernetes/cni/docker_opts_cni.env**
97 |
98 | ```yaml
99 | DOCKER_OPT_BIP=""
100 | DOCKER_OPT_IPMASQ=""
101 | ```
102 |
103 | If using Flannel for networking, setup the Flannel CNI configuration with below. If you intend to use Calico for networking, follow the guide to [Set Up Calico For Network Policy](#set-up-calico-for-network-policy-optional) instead.
104 |
105 | **/etc/kubernetes/cni/net.d/10-flannel.conf**
106 |
107 | ```yaml
108 | {
109 | "name": "podnet",
110 | "type": "flannel",
111 | "delegate": {
112 | "isDefaultGateway": true
113 | }
114 | }
115 |
116 | ```
117 |
118 | [dropins]: https://coreos.com/os/docs/latest/using-systemd-drop-in-units.html
119 |
120 | ### Create the kubelet Unit
121 |
122 | The [kubelet][kubelet-admin] is the agent on each machine that starts and stops Pods and other machine-level tasks. The kubelet communicates with the API server (also running on the master nodes) with the TLS certificates we placed on disk earlier.
123 |
124 | On the master node, the kubelet is configured to communicate with the API server, but not register for cluster work, as shown in the `--register-schedulable=false` line in the YAML excerpt below. This prevents user pods being scheduled on the master nodes, and ensures cluster work is routed only to task-specific worker nodes.
125 |
126 | When using Calico, the kubelet is configured to use the Container Networking Interface (CNI) standard for networking. This makes Calico aware of each pod that is created and allows it to network the pods into the flannel overlay. Both flannel and Calico communicate via CNI interfaces to ensure the correct IP range (managed by flannel) is used for each node.
127 |
128 | Note that the kubelet running on a master node may log repeated attempts to post its status to the API server. These warnings are expected behavior and can be ignored. Future Kubernetes releases plan to [handle this common deployment consideration more gracefully](https://github.com/kubernetes/kubernetes/issues/14140#issuecomment-142126864).
129 |
130 | * Replace `${ADVERTISE_IP}` with this node's publicly routable IP.
131 | * Replace `${DNS_SERVICE_IP}`
132 | * Replace `${K8S_VER}` This will map to: `quay.io/coreos/hyperkube:${K8S_VER}` release, e.g. `v1.5.4_coreos.0`.
133 | * If using Calico for network policy
134 | - Replace `${NETWORK_PLUGIN}` with `cni`
135 | - Add the following to `RKT_RUN_ARGS=`
136 | ```
137 | --volume cni-bin,kind=host,source=/opt/cni/bin \
138 | --mount volume=cni-bin,target=/opt/cni/bin
139 | ```
140 | - Add `ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin`
141 | * Decide if you will use [additional features][rkt-opts-examples] such as:
142 | - [mounting ephemeral disks][mount-disks]
143 | - [allow pods to mount RDB][rdb] or [iSCSI volumes][iscsi]
144 | - [allowing access to insecure container registries][insecure-registry]
145 | - [changing your CoreOS auto-update settings][update]
146 |
147 | **Note**: Anyone with access to port 10250 on a node can execute arbitrary code in a pod on the node. Information, including logs and metadata, is also disclosed on port 10255. See [securing the Kubelet API][securing-kubelet-api] for more information.
148 |
149 | **/etc/systemd/system/kubelet.service**
150 |
151 | ```yaml
152 | [Service]
153 | Environment=KUBELET_IMAGE_TAG=${K8S_VER}
154 | Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
155 | --volume var-log,kind=host,source=/var/log \
156 | --mount volume=var-log,target=/var/log \
157 | --volume dns,kind=host,source=/etc/resolv.conf \
158 | --mount volume=dns,target=/etc/resolv.conf"
159 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
160 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers
161 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
162 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
163 | --api-servers=http://127.0.0.1:8080 \
164 | --register-schedulable=false \
165 | --cni-conf-dir=/etc/kubernetes/cni/net.d \
166 | --network-plugin=${NETWORK_PLUGIN} \
167 | --container-runtime=docker \
168 | --allow-privileged=true \
169 | --pod-manifest-path=/etc/kubernetes/manifests \
170 | --hostname-override=${ADVERTISE_IP} \
171 | --cluster_dns=${DNS_SERVICE_IP} \
172 | --cluster_domain=cluster.local
173 | ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
174 | Restart=always
175 | RestartSec=10
176 |
177 | [Install]
178 | WantedBy=multi-user.target
179 | ```
180 |
181 | ### Set Up the kube-apiserver Pod
182 |
183 | The API server is where most of the magic happens. It is stateless by design and takes in API requests, processes them and stores the result in etcd if needed, and then returns the result of the request.
184 |
185 | We're going to use a unique feature of the kubelet to launch a Pod that runs the API server. Above we configured the kubelet to watch a local directory for pods to run with the `--pod-manifest-path=/etc/kubernetes/manifests` flag. All we need to do is place our Pod manifest in that location, and the kubelet will make sure it stays running, just as if the Pod was submitted via the API. The cool trick here is that we don't have an API running yet, but the Pod will function the exact same way, which simplifies troubleshooting later on.
186 |
187 | If this is your first time looking at a Pod manifest, don't worry, they aren't all this complicated. But, this shows off the power and flexibility of the Pod concept. Create `/etc/kubernetes/manifests/kube-apiserver.yaml` with the following settings:
188 |
189 | * Replace `${ETCD_ENDPOINTS}`
190 | * Replace `${SERVICE_IP_RANGE}`
191 | * Replace `${ADVERTISE_IP}` with this node's publicly routable IP.
192 |
193 | **/etc/kubernetes/manifests/kube-apiserver.yaml**
194 |
195 | ```yaml
196 | apiVersion: v1
197 | kind: Pod
198 | metadata:
199 | name: kube-apiserver
200 | namespace: kube-system
201 | spec:
202 | hostNetwork: true
203 | containers:
204 | - name: kube-apiserver
205 | image: quay.io/coreos/hyperkube:v1.5.4_coreos.0
206 | command:
207 | - /hyperkube
208 | - apiserver
209 | - --bind-address=0.0.0.0
210 | - --etcd-servers=${ETCD_ENDPOINTS}
211 | - --allow-privileged=true
212 | - --service-cluster-ip-range=${SERVICE_IP_RANGE}
213 | - --secure-port=443
214 | - --advertise-address=${ADVERTISE_IP}
215 | - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
216 | - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
217 | - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
218 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem
219 | - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
220 | - --runtime-config=extensions/v1beta1/networkpolicies=true
221 | - --anonymous-auth=false
222 | livenessProbe:
223 | httpGet:
224 | host: 127.0.0.1
225 | port: 8080
226 | path: /healthz
227 | initialDelaySeconds: 15
228 | timeoutSeconds: 15
229 | ports:
230 | - containerPort: 443
231 | hostPort: 443
232 | name: https
233 | - containerPort: 8080
234 | hostPort: 8080
235 | name: local
236 | volumeMounts:
237 | - mountPath: /etc/kubernetes/ssl
238 | name: ssl-certs-kubernetes
239 | readOnly: true
240 | - mountPath: /etc/ssl/certs
241 | name: ssl-certs-host
242 | readOnly: true
243 | volumes:
244 | - hostPath:
245 | path: /etc/kubernetes/ssl
246 | name: ssl-certs-kubernetes
247 | - hostPath:
248 | path: /usr/share/ca-certificates
249 | name: ssl-certs-host
250 | ```
251 |
252 | ### Set Up the kube-proxy Pod
253 |
254 | We're going to run the proxy just like we did the API server. The proxy is responsible for directing traffic destined for specific services and pods to the correct location. The proxy communicates with the API server periodically to keep up to date.
255 |
256 | Both the master and worker nodes in your cluster will run the proxy.
257 |
258 | All you have to do is create `/etc/kubernetes/manifests/kube-proxy.yaml`, there are no settings that need to be configured.
259 |
260 | **/etc/kubernetes/manifests/kube-proxy.yaml**
261 |
262 | ```yaml
263 | apiVersion: v1
264 | kind: Pod
265 | metadata:
266 | name: kube-proxy
267 | namespace: kube-system
268 | spec:
269 | hostNetwork: true
270 | containers:
271 | - name: kube-proxy
272 | image: quay.io/coreos/hyperkube:v1.5.4_coreos.0
273 | command:
274 | - /hyperkube
275 | - proxy
276 | - --master=http://127.0.0.1:8080
277 | securityContext:
278 | privileged: true
279 | volumeMounts:
280 | - mountPath: /etc/ssl/certs
281 | name: ssl-certs-host
282 | readOnly: true
283 | volumes:
284 | - hostPath:
285 | path: /usr/share/ca-certificates
286 | name: ssl-certs-host
287 | ```
288 |
289 | ### Set Up the kube-controller-manager Pod
290 |
291 | The controller manager is responsible for reconciling any required actions based on changes to [Replication Controllers][rc-overview].
292 |
293 | For example, if you increased the replica count, the controller manager would generate a scale up event, which would cause a new Pod to get scheduled in the cluster. The controller manager communicates with the API to submit these events.
294 |
295 | Create `/etc/kubernetes/manifests/kube-controller-manager.yaml`. It will use the TLS certificate placed on disk earlier.
296 |
297 | [rc-overview]: https://coreos.com/kubernetes/docs/latest/replication-controller.html
298 |
299 | **/etc/kubernetes/manifests/kube-controller-manager.yaml**
300 |
301 | ```yaml
302 | apiVersion: v1
303 | kind: Pod
304 | metadata:
305 | name: kube-controller-manager
306 | namespace: kube-system
307 | spec:
308 | hostNetwork: true
309 | containers:
310 | - name: kube-controller-manager
311 | image: quay.io/coreos/hyperkube:v1.5.4_coreos.0
312 | command:
313 | - /hyperkube
314 | - controller-manager
315 | - --master=http://127.0.0.1:8080
316 | - --leader-elect=true
317 | - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
318 | - --root-ca-file=/etc/kubernetes/ssl/ca.pem
319 | resources:
320 | requests:
321 | cpu: 200m
322 | livenessProbe:
323 | httpGet:
324 | host: 127.0.0.1
325 | path: /healthz
326 | port: 10252
327 | initialDelaySeconds: 15
328 | timeoutSeconds: 15
329 | volumeMounts:
330 | - mountPath: /etc/kubernetes/ssl
331 | name: ssl-certs-kubernetes
332 | readOnly: true
333 | - mountPath: /etc/ssl/certs
334 | name: ssl-certs-host
335 | readOnly: true
336 | volumes:
337 | - hostPath:
338 | path: /etc/kubernetes/ssl
339 | name: ssl-certs-kubernetes
340 | - hostPath:
341 | path: /usr/share/ca-certificates
342 | name: ssl-certs-host
343 | ```
344 |
345 | ### Set Up the kube-scheduler Pod
346 |
347 | The scheduler monitors the API for unscheduled pods, finds them a machine to run on, and communicates the decision back to the API.
348 |
349 | Create File `/etc/kubernetes/manifests/kube-scheduler.yaml`:
350 |
351 | **/etc/kubernetes/manifests/kube-scheduler.yaml**
352 |
353 | ```yaml
354 | apiVersion: v1
355 | kind: Pod
356 | metadata:
357 | name: kube-scheduler
358 | namespace: kube-system
359 | spec:
360 | hostNetwork: true
361 | containers:
362 | - name: kube-scheduler
363 | image: quay.io/coreos/hyperkube:v1.5.4_coreos.0
364 | command:
365 | - /hyperkube
366 | - scheduler
367 | - --master=http://127.0.0.1:8080
368 | - --leader-elect=true
369 | resources:
370 | requests:
371 | cpu: 100m
372 | livenessProbe:
373 | httpGet:
374 | host: 127.0.0.1
375 | path: /healthz
376 | port: 10251
377 | initialDelaySeconds: 15
378 | timeoutSeconds: 15
379 | ```
380 |
381 | ### Set Up Calico For Network Policy (optional)
382 |
383 | This step can be skipped if you do not wish to provide network policy to your cluster using Calico.
384 |
385 | Several things happen here. First the `ConfigMap` will create some configuration needed for Calico and the kubelet.
386 |
387 | Second the `DaemonSet` runs on all hosts, including the master node. It performs several functions:
388 | * The `install-cni` container drops in the necessary CNI binaries to `/opt/cni/bin/` that was setup in the kubelet step
389 | * Connects containers to the flannel overlay network, which enables the "one IP per pod" concept.
390 | * Enforces network policy created through the Kubernetes policy API, ensuring pods talk to authorized resources only.
391 |
392 | The policy controller is the last major piece of the calico.yaml. It monitors the API for changes related to network policy and configures Calico to implement that policy.
393 |
394 | When creating `/etc/kubernetes/manifests/calico.yaml`:
395 |
396 | * Replace `${ETCD_ENDPOINTS}`
397 |
398 | **/etc/kubernetes/manifests/calico.yaml**
399 |
400 | ```yaml
401 | # This ConfigMap is used to configure a self-hosted Calico installation.
402 | kind: ConfigMap
403 | apiVersion: v1
404 | metadata:
405 | name: calico-config
406 | namespace: kube-system
407 | data:
408 | # Configure this with the location of your etcd cluster.
409 | etcd_endpoints: "${ETCD_ENDPOINTS}"
410 |
411 | # The CNI network configuration to install on each node. The special
412 | # values in this config will be automatically populated.
413 | cni_network_config: |-
414 | {
415 | "name": "calico",
416 | "type": "flannel",
417 | "delegate": {
418 | "type": "calico",
419 | "etcd_endpoints": "__ETCD_ENDPOINTS__",
420 | "log_level": "info",
421 | "policy": {
422 | "type": "k8s",
423 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
424 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
425 | },
426 | "kubernetes": {
427 | "kubeconfig": "/etc/kubernetes/cni/net.d/__KUBECONFIG_FILENAME__"
428 | }
429 | }
430 | }
431 |
432 | ---
433 |
434 | # This manifest installs the calico/node container, as well
435 | # as the Calico CNI plugins and network config on
436 | # each master and worker node in a Kubernetes cluster.
437 | kind: DaemonSet
438 | apiVersion: extensions/v1beta1
439 | metadata:
440 | name: calico-node
441 | namespace: kube-system
442 | labels:
443 | k8s-app: calico-node
444 | spec:
445 | selector:
446 | matchLabels:
447 | k8s-app: calico-node
448 | template:
449 | metadata:
450 | labels:
451 | k8s-app: calico-node
452 | annotations:
453 | scheduler.alpha.kubernetes.io/critical-pod: ''
454 | scheduler.alpha.kubernetes.io/tolerations: |
455 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
456 | {"key":"CriticalAddonsOnly", "operator":"Exists"}]
457 | spec:
458 | hostNetwork: true
459 | containers:
460 | # Runs calico/node container on each Kubernetes node. This
461 | # container programs network policy and routes on each
462 | # host.
463 | - name: calico-node
464 | image: quay.io/calico/node:v0.23.0
465 | env:
466 | # The location of the Calico etcd cluster.
467 | - name: ETCD_ENDPOINTS
468 | valueFrom:
469 | configMapKeyRef:
470 | name: calico-config
471 | key: etcd_endpoints
472 | # Choose the backend to use.
473 | - name: CALICO_NETWORKING_BACKEND
474 | value: "none"
475 | # Disable file logging so `kubectl logs` works.
476 | - name: CALICO_DISABLE_FILE_LOGGING
477 | value: "true"
478 | - name: NO_DEFAULT_POOLS
479 | value: "true"
480 | securityContext:
481 | privileged: true
482 | volumeMounts:
483 | - mountPath: /lib/modules
484 | name: lib-modules
485 | readOnly: false
486 | - mountPath: /var/run/calico
487 | name: var-run-calico
488 | readOnly: false
489 | - mountPath: /etc/resolv.conf
490 | name: dns
491 | readOnly: true
492 | # This container installs the Calico CNI binaries
493 | # and CNI network config file on each node.
494 | - name: install-cni
495 | image: quay.io/calico/cni:v1.5.2
496 | imagePullPolicy: Always
497 | command: ["/install-cni.sh"]
498 | env:
499 | # CNI configuration filename
500 | - name: CNI_CONF_NAME
501 | value: "10-calico.conf"
502 | # The location of the Calico etcd cluster.
503 | - name: ETCD_ENDPOINTS
504 | valueFrom:
505 | configMapKeyRef:
506 | name: calico-config
507 | key: etcd_endpoints
508 | # The CNI network config to install on each node.
509 | - name: CNI_NETWORK_CONFIG
510 | valueFrom:
511 | configMapKeyRef:
512 | name: calico-config
513 | key: cni_network_config
514 | volumeMounts:
515 | - mountPath: /host/opt/cni/bin
516 | name: cni-bin-dir
517 | - mountPath: /host/etc/cni/net.d
518 | name: cni-net-dir
519 | volumes:
520 | # Used by calico/node.
521 | - name: lib-modules
522 | hostPath:
523 | path: /lib/modules
524 | - name: var-run-calico
525 | hostPath:
526 | path: /var/run/calico
527 | # Used to install CNI.
528 | - name: cni-bin-dir
529 | hostPath:
530 | path: /opt/cni/bin
531 | - name: cni-net-dir
532 | hostPath:
533 | path: /etc/kubernetes/cni/net.d
534 | - name: dns
535 | hostPath:
536 | path: /etc/resolv.conf
537 |
538 | ---
539 |
540 | # This manifest deploys the Calico policy controller on Kubernetes.
541 | # See https://github.com/projectcalico/k8s-policy
542 | apiVersion: extensions/v1beta1
543 | kind: ReplicaSet
544 | metadata:
545 | name: calico-policy-controller
546 | namespace: kube-system
547 | labels:
548 | k8s-app: calico-policy
549 | spec:
550 | # The policy controller can only have a single active instance.
551 | replicas: 1
552 | template:
553 | metadata:
554 | name: calico-policy-controller
555 | namespace: kube-system
556 | labels:
557 | k8s-app: calico-policy
558 | annotations:
559 | scheduler.alpha.kubernetes.io/critical-pod: ''
560 | scheduler.alpha.kubernetes.io/tolerations: |
561 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
562 | {"key":"CriticalAddonsOnly", "operator":"Exists"}]
563 | spec:
564 | # The policy controller must run in the host network namespace so that
565 | # it isn't governed by policy that would prevent it from working.
566 | hostNetwork: true
567 | containers:
568 | - name: calico-policy-controller
569 | image: calico/kube-policy-controller:v0.4.0
570 | env:
571 | # The location of the Calico etcd cluster.
572 | - name: ETCD_ENDPOINTS
573 | valueFrom:
574 | configMapKeyRef:
575 | name: calico-config
576 | key: etcd_endpoints
577 | # The location of the Kubernetes API. Use the default Kubernetes
578 | # service for API access.
579 | - name: K8S_API
580 | value: "https://kubernetes.default:443"
581 | # Since we're running in the host namespace and might not have KubeDNS
582 | # access, configure the container's /etc/hosts to resolve
583 | # kubernetes.default to the correct service clusterIP.
584 | - name: CONFIGURE_ETC_HOSTS
585 | value: "true"
586 | ```
587 |
588 | ## Start Services
589 |
590 | Now that we've defined all of our units and written our TLS certificates to disk, we're ready to start the master components.
591 |
592 | ### Load Changed Units
593 |
594 | First, we need to tell systemd that we've changed units on disk and it needs to rescan everything:
595 |
596 | ```sh
597 | $ sudo systemctl daemon-reload
598 | ```
599 |
600 | ### Configure flannel Network
601 |
602 | Earlier it was mentioned that flannel stores cluster-level configuration in etcd. We need to configure our Pod network IP range now. Since etcd was started earlier, we can set this now. If you don't have etcd running, start it now.
603 |
604 | * Replace `$POD_NETWORK`
605 | * Replace `$ETCD_SERVER` with one url (`http://ip:port`) from `$ETCD_ENDPOINTS`
606 |
607 | ```sh
608 | $ curl -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ETCD_SERVER/v2/keys/coreos.com/network/config"
609 | ```
610 |
611 | After configuring flannel, we should restart it for our changes to take effect. Note that this will also restart the docker daemon and could impact running containers.
612 |
613 | ```sh
614 | $ sudo systemctl start flanneld
615 | $ sudo systemctl enable flanneld
616 | ```
617 |
618 | ### Start kubelet
619 |
620 | Now that everything is configured, we can start the kubelet, which will also start the Pod manifests for the API server, the controller manager, proxy and scheduler.
621 |
622 | ```sh
623 | $ sudo systemctl start kubelet
624 | ```
625 |
626 | Ensure that the kubelet will start after a reboot:
627 |
628 | ```sh
629 | $ sudo systemctl enable kubelet
630 | Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
631 | ```
632 |
633 | ### Basic Health Checks
634 |
635 | First, we need to make sure the Kubernetes API is available (this could take a few minutes after starting the kubelet.service)
636 |
637 | ```sh
638 | $ curl http://127.0.0.1:8080/version
639 | ```
640 |
641 | A successful response should look something like:
642 |
643 | ```
644 | {
645 | "major": "1",
646 | "minor": "4",
647 | "gitVersion": "v1.5.2+coreos.0",
648 | "gitCommit": "ec2b52fabadf824a42b66b6729fe4cff2c62af8c",
649 | "gitTreeState": "clean",
650 | "buildDate": "2016-11-14T19:42:00Z",
651 | "goVersion": "go1.6.3",
652 | "compiler": "gc",
653 | "platform": "linux/amd64"
654 | }
655 | ```
656 |
657 | To check the health of the kubelet systemd unit that we created, run `systemctl status kubelet.service`.
658 |
659 | Our Pods should now be starting up and downloading their containers. Once the kubelet has started, you can check it's creating its pods via the metadata api:
660 |
661 | ```sh
662 | $ curl -s localhost:10255/pods | jq -r '.items[].metadata.name'
663 | kube-scheduler-$node
664 | kube-apiserver-$node
665 | kube-controller-$node
666 | kube-proxy-$node
667 | ```
668 |
669 |
670 |
Did the containers start downloading? As long as the kubelet knows about them, everything is working properly.
671 |
Yes, ready to deploy the Workers
672 |
673 |
674 | [rkt-opts-examples]: kubelet-wrapper.md#customizing-rkt-options
675 | [rdb]: kubelet-wrapper.md#allow-pods-to-use-rbd-volumes
676 | [iscsi]: kubelet-wrapper.md#allow-pods-to-use-iscsi-mounts
677 | [host-dns]: kubelet-wrapper.md#use-the-hosts-dns-configuration
678 | [mount-disks]: https://coreos.com/os/docs/latest/mounting-storage.html
679 | [insecure-registry]: https://coreos.com/os/docs/latest/registry-authentication.html#using-a-registry-without-ssl-configured
680 | [update]: https://coreos.com/os/docs/latest/switching-channels.html
681 | [securing-kubelet-api]: kubelet-wrapper.md#Securing-the-Kubelet-API
682 | [kubelet-admin]: https://kubernetes.io/docs/admin/kubelet/
683 |
--------------------------------------------------------------------------------
/Documentation/deploy-workers.md:
--------------------------------------------------------------------------------
1 | # Deploy Kubernetes Worker Node(s)
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | Boot one or more CoreOS nodes which will be used as Kubernetes Workers. You must use a CoreOS version 962.0.0+ for the `/usr/lib/coreos/kubelet-wrapper` script to be present in the image. See [kubelet-wrapper](kubelet-wrapper.md) for more information.
10 |
11 | See the [CoreOS Documentation](https://coreos.com/os/docs/latest/) for guides on launching nodes on supported platforms.
12 |
13 | ## Configure Service Components
14 |
15 | ### TLS Assets
16 |
17 | Place the TLS keypairs generated previously in the following locations. Note that each keypair is unique and should be installed on the worker node it was generated for:
18 |
19 | * File: `/etc/kubernetes/ssl/ca.pem`
20 | * File: `/etc/kubernetes/ssl/${WORKER_FQDN}-worker.pem`
21 | * File: `/etc/kubernetes/ssl/${WORKER_FQDN}-worker-key.pem`
22 |
23 | And make sure you've set proper permission for private key:
24 |
25 | ```sh
26 | $ sudo chmod 600 /etc/kubernetes/ssl/*-key.pem
27 | $ sudo chown root:root /etc/kubernetes/ssl/*-key.pem
28 | ```
29 |
30 | Create symlinks to the worker-specific certificate and key so that the remaining configurations on the workers do not have to be unique per worker.
31 |
32 | ```sh
33 | $ cd /etc/kubernetes/ssl/
34 | $ sudo ln -s ${WORKER_FQDN}-worker.pem worker.pem
35 | $ sudo ln -s ${WORKER_FQDN}-worker-key.pem worker-key.pem
36 | ```
37 |
38 |
39 | ### Networking Configuration
40 |
41 | *Note:* If the pod-network is being managed independently of flannel, then the flannel parts of this guide can be skipped. It's recommended that Calico is still used for providing network policy. See [kubernetes networking](kubernetes-networking.md) for more detail.
42 |
43 | Just like earlier, create `/etc/flannel/options.env` and modify these values:
44 |
45 | * Replace `${ADVERTISE_IP}` with this node's publicly routable IP.
46 | * Replace `${ETCD_ENDPOINTS}`
47 |
48 | **/etc/flannel/options.env**
49 |
50 | ```yaml
51 | FLANNELD_IFACE=${ADVERTISE_IP}
52 | FLANNELD_ETCD_ENDPOINTS=${ETCD_ENDPOINTS}
53 | ```
54 |
55 | Next create a [systemd drop-in][dropins], which will use the above configuration when flannel starts
56 |
57 | **/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf**
58 |
59 | ```yaml
60 | [Service]
61 | ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
62 | ```
63 |
64 | [dropins]: https://coreos.com/os/docs/latest/using-systemd-drop-in-units.html
65 |
66 | ### Docker Configuration
67 |
68 | *Note:* If the pod-network is being managed independently, this step can be skipped. See [kubernetes networking](kubernetes-networking.md) for more detail.
69 |
70 | Require that flanneld is running prior to Docker start.
71 |
72 | Create `/etc/systemd/system/docker.service.d/40-flannel.conf`
73 |
74 | **/etc/systemd/system/docker.service.d/40-flannel.conf**
75 |
76 | ```yaml
77 | [Unit]
78 | Requires=flanneld.service
79 | After=flanneld.service
80 | [Service]
81 | EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
82 | ```
83 |
84 | Create the Docker CNI Options file:
85 |
86 | **/etc/kubernetes/cni/docker_opts_cni.env**
87 |
88 | ```yaml
89 | DOCKER_OPT_BIP=""
90 | DOCKER_OPT_IPMASQ=""
91 | ```
92 |
93 | If using Flannel for networking, setup the Flannel CNI configuration with below. If you intend to use Calico for networking, setup using [Set Up the CNI config (optional)](#set-up-the-cni-config-optional) instead.
94 |
95 | **/etc/kubernetes/cni/net.d/10-flannel.conf**
96 |
97 | ```yaml
98 | {
99 | "name": "podnet",
100 | "type": "flannel",
101 | "delegate": {
102 | "isDefaultGateway": true
103 | }
104 | }
105 | ```
106 |
107 | ### Create the kubelet Unit
108 |
109 | Create `/etc/systemd/system/kubelet.service` and substitute the following variables:
110 |
111 | * Replace `${MASTER_HOST}`
112 | * Replace `${ADVERTISE_IP}` with this node's publicly routable IP.
113 | * Replace `${DNS_SERVICE_IP}`
114 | * Replace `${K8S_VER}` This will map to: `quay.io/coreos/hyperkube:${K8S_VER}` release, e.g. `v1.5.4_coreos.0`.
115 | * If using Calico for network policy
116 | - Replace `${NETWORK_PLUGIN}` with `cni`
117 | - Add the following to `RKT_RUN_ARGS=`
118 | ```
119 | --volume cni-bin,kind=host,source=/opt/cni/bin \
120 | --mount volume=cni-bin,target=/opt/cni/bin
121 | ```
122 | - Add `ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin`
123 | * Decide if you will use [additional features][rkt-opts-examples] such as:
124 | - [mounting ephemeral disks][mount-disks]
125 | - [allow pods to mount RDB][rdb] or [iSCSI volumes][iscsi]
126 | - [allowing access to insecure container registries][insecure-registry]
127 | - [changing your CoreOS auto-update settings][update]
128 |
129 | **Note**: Anyone with access to port 10250 on a node can execute arbitrary code in a pod on the node. Information, including logs and metadata, is also disclosed on port 10255. See [securing the Kubelet API][securing-kubelet-api] for more information.
130 |
131 | **/etc/systemd/system/kubelet.service**
132 |
133 | ```yaml
134 | [Service]
135 | Environment=KUBELET_IMAGE_TAG=${K8S_VER}
136 | Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
137 | --volume dns,kind=host,source=/etc/resolv.conf \
138 | --mount volume=dns,target=/etc/resolv.conf \
139 | --volume var-log,kind=host,source=/var/log \
140 | --mount volume=var-log,target=/var/log"
141 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
142 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers
143 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
144 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
145 | --api-servers=https://${MASTER_HOST} \
146 | --cni-conf-dir=/etc/kubernetes/cni/net.d \
147 | --network-plugin=${NETWORK_PLUGIN} \
148 | --container-runtime=docker \
149 | --register-node=true \
150 | --allow-privileged=true \
151 | --pod-manifest-path=/etc/kubernetes/manifests \
152 | --hostname-override=${ADVERTISE_IP} \
153 | --cluster_dns=${DNS_SERVICE_IP} \
154 | --cluster_domain=cluster.local \
155 | --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
156 | --tls-cert-file=/etc/kubernetes/ssl/worker.pem \
157 | --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem
158 | ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
159 | Restart=always
160 | RestartSec=10
161 |
162 | [Install]
163 | WantedBy=multi-user.target
164 | ```
165 |
166 | ### Set Up the kube-proxy Pod
167 |
168 | Create `/etc/kubernetes/manifests/kube-proxy.yaml`:
169 |
170 | * Replace `${MASTER_HOST}`
171 |
172 | **/etc/kubernetes/manifests/kube-proxy.yaml**
173 |
174 | ```yaml
175 | apiVersion: v1
176 | kind: Pod
177 | metadata:
178 | name: kube-proxy
179 | namespace: kube-system
180 | spec:
181 | hostNetwork: true
182 | containers:
183 | - name: kube-proxy
184 | image: quay.io/coreos/hyperkube:v1.5.4_coreos.0
185 | command:
186 | - /hyperkube
187 | - proxy
188 | - --master=${MASTER_HOST}
189 | - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
190 | securityContext:
191 | privileged: true
192 | volumeMounts:
193 | - mountPath: /etc/ssl/certs
194 | name: "ssl-certs"
195 | - mountPath: /etc/kubernetes/worker-kubeconfig.yaml
196 | name: "kubeconfig"
197 | readOnly: true
198 | - mountPath: /etc/kubernetes/ssl
199 | name: "etc-kube-ssl"
200 | readOnly: true
201 | volumes:
202 | - name: "ssl-certs"
203 | hostPath:
204 | path: "/usr/share/ca-certificates"
205 | - name: "kubeconfig"
206 | hostPath:
207 | path: "/etc/kubernetes/worker-kubeconfig.yaml"
208 | - name: "etc-kube-ssl"
209 | hostPath:
210 | path: "/etc/kubernetes/ssl"
211 | ```
212 |
213 | ### Set Up kubeconfig
214 |
215 | In order to facilitate secure communication between Kubernetes components, kubeconfig can be used to define authentication settings. In this case, the kubelet and proxy are reading this configuration to communicate with the API.
216 |
217 | Create `/etc/kubernetes/worker-kubeconfig.yaml`:
218 |
219 | **/etc/kubernetes/worker-kubeconfig.yaml**
220 |
221 | ```yaml
222 | apiVersion: v1
223 | kind: Config
224 | clusters:
225 | - name: local
226 | cluster:
227 | certificate-authority: /etc/kubernetes/ssl/ca.pem
228 | users:
229 | - name: kubelet
230 | user:
231 | client-certificate: /etc/kubernetes/ssl/worker.pem
232 | client-key: /etc/kubernetes/ssl/worker-key.pem
233 | contexts:
234 | - context:
235 | cluster: local
236 | user: kubelet
237 | name: kubelet-context
238 | current-context: kubelet-context
239 | ```
240 |
241 | ## Start Services
242 |
243 | Now we can start the Worker services.
244 |
245 | ### Load Changed Units
246 |
247 | Tell systemd to rescan the units on disk:
248 |
249 | ```sh
250 | $ sudo systemctl daemon-reload
251 | ```
252 |
253 | ### Start kubelet, and flannel
254 |
255 | Start the kubelet, which will start the proxy.
256 |
257 | ```sh
258 | $ sudo systemctl start flanneld
259 | $ sudo systemctl start kubelet
260 | ```
261 |
262 | Ensure that the services start on each boot:
263 |
264 | ```sh
265 | $ sudo systemctl enable flanneld
266 | Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /etc/systemd/system/flanneld.service.
267 | $ sudo systemctl enable kubelet
268 | Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.
269 | ```
270 |
271 | To check the health of the kubelet systemd unit that we created, run `systemctl status kubelet.service`.
272 |
273 |
277 |
278 | [rkt-opts-examples]: kubelet-wrapper.md#customizing-rkt-options
279 | [rdb]: kubelet-wrapper.md#allow-pods-to-use-rbd-volumes
280 | [iscsi]: kubelet-wrapper.md#allow-pods-to-use-iscsi-mounts
281 | [host-dns]: kubelet-wrapper.md#use-the-hosts-dns-configuration
282 | [mount-disks]: https://coreos.com/os/docs/latest/mounting-storage.html
283 | [insecure-registry]: https://coreos.com/os/docs/latest/registry-authentication.html#using-a-registry-without-ssl-configured
284 | [update]: https://coreos.com/os/docs/latest/switching-channels.html
285 | [securing-kubelet-api]: kubelet-wrapper.md#Securing-the-Kubelet-API
286 |
--------------------------------------------------------------------------------
/Documentation/getting-started.md:
--------------------------------------------------------------------------------
1 | # CoreOS + Kubernetes Step By Step
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This guide walks through deploying a Kubernetes cluster of CoreOS nodes, with a single controller and multiple workers. This guide enumerates the multiple steps and stages of a Kubernetes deployment. To quickly deploy a Kubernetes cluster without engaging component-level details, check out the [free tier of the CoreOS Tectonic][tectonic-free] Kubernetes distribution, or the [open-source Tectonic Installer][tectonic-installer] that drives Tectonic's automation of cluster deployments.
10 |
11 | The primary goals of this guide are:
12 |
13 | - Configure an etcd cluster for Kubernetes to use
14 | - Generate the required certificates for communication between Kubernetes components
15 | - Deploy a master node
16 | - Deploy worker nodes
17 | - Configure `kubectl` to work with our cluster
18 | - Deploy the DNS add-on
19 | - Deploy the network policy add-on
20 |
21 | Working through this guide may take you a few hours, but it will give you good understanding of the moving pieces of your cluster and set you up for success in the long run. For a shortcut, you can utilize [these generic user-data scripts][generic-userdata]. Let's get started.
22 |
23 | ## Deployment Options
24 |
25 | The following variables will be used throughout this guide. Most of the provided defaults can safely be used, however some values such as `ETCD_ENDPOINTS` and `MASTER_HOST` will need to be customized to your infrastructure.
26 |
27 | **MASTER_HOST**=_no default_
28 |
29 | The address of the master node. In most cases this will be the publicly routable IP of the node. Worker nodes must be able to reach the master node(s) via this address on port 443. Additionally, external clients (such as an administrator using `kubectl`) will also need access, since this will run the Kubernetes API endpoint.
30 |
31 | If you will be running a high-availability control-plane consisting of multiple master nodes, then `MASTER_HOST` will ideally be a network load balancer that sits in front of them. Alternatively, a DNS name can be configured which will resolve to the master IPs. How requests are routed to the master nodes will be an important consideration when creating the TLS certificates.
32 |
33 |
34 |
35 | **ETCD_ENDPOINTS**=_no default_
36 |
37 | List of etcd machines (`http://ip:port`), comma separated. If you're running a cluster of 5 machines, list them all here.
38 |
39 |
40 |
41 | **POD_NETWORK**=10.2.0.0/16
42 |
43 | The CIDR network to use for pod IPs.
44 | Each pod launched in the cluster will be assigned an IP out of this range.
45 | This network must be routable between all hosts in the cluster. In a default installation, the flannel overlay network will provide routing to this network.
46 |
47 |
48 |
49 | **SERVICE_IP_RANGE**=10.3.0.0/24
50 |
51 | The CIDR network to use for service cluster VIPs (Virtual IPs). Each service will be assigned a cluster IP out of this range. This must not overlap with any IP ranges assigned to the `POD_NETWORK`, or other existing network infrastructure. Routing to these VIPs is handled by a local kube-proxy service to each host, and are not required to be routable between hosts.
52 |
53 |
54 |
55 | **K8S_SERVICE_IP**=10.3.0.1
56 |
57 | The VIP (Virtual IP) address of the Kubernetes API Service. If the `SERVICE_IP_RANGE` is changed above, this must be set to the first IP in that range.
58 |
59 |
60 |
61 | **DNS_SERVICE_IP**=10.3.0.10
62 |
63 | The VIP (Virtual IP) address of the cluster DNS service. This IP must be in the range of the `SERVICE_IP_RANGE` and cannot be the first IP in the range. This same IP must be configured on all worker nodes to enable DNS service discovery.
64 |
65 | ## Deploy etcd Cluster
66 |
67 | Kubernetes uses etcd for data storage and for cluster consensus between different software components. Your etcd cluster will be heavily utilized since all objects storing within and every scheduling decision is recorded. It's recommended that you run a multi-machine cluster on dedicated hardware (with fast disks) to gain maximum performance and reliability of this important part of your cluster. For development environments, a single etcd is ok.
68 |
69 | ### Single-Node/Development
70 |
71 | You can simply start etcd via [cloud-config][cloud-config-etcd] when you create your CoreOS machine or start it manually.
72 |
73 | If you are starting etcd manually, we need to first configure it to listen on all interfaces:
74 |
75 | * Replace `${PUBLIC_IP}` with the etcd machines publicly routable IP address.
76 |
77 | **/etc/systemd/system/etcd2.service.d/40-listen-address.conf**
78 |
79 | ```
80 | [Service]
81 | Environment=ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379
82 | Environment=ETCD_ADVERTISE_CLIENT_URLS=http://${PUBLIC_IP}:2379
83 | ```
84 |
85 | Use the value of `ETCD_ADVERTISE_CLIENT_URLS` as the value of `ETCD_ENDPOINTS` in the rest of this guide.
86 |
87 | Next, start etcd
88 |
89 | ```
90 | $ sudo systemctl start etcd2
91 | ```
92 |
93 | To ensure etcd starts after a reboot, enable it too:
94 |
95 | ```sh
96 | $ sudo systemctl enable etcd2
97 | Created symlink from /etc/systemd/system/multi-user.target.wants/etcd2.service to /usr/lib64/systemd/system/etcd2.service.
98 | ```
99 |
100 | [cloud-config-etcd]: https://coreos.com/os/docs/latest/cloud-config.html#etcd2
101 |
102 | ### Multi-Node/Production
103 |
104 | It is highly recommended that etcd is run as a dedicated cluster separately from Kubernetes components.
105 |
106 | Use the [official etcd clustering guide](https://coreos.com/etcd/docs/latest/docker_guide.html) to decide how best to deploy etcd into your environment.
107 |
108 | ## Generate Kubernetes TLS Assets
109 |
110 | The Kubernetes API has various methods for validating clients — this guide will configure the API server to use client certificate authentication.
111 |
112 | This means it is necessary to have a Certificate Authority and generate the proper credentials. Generate the necessary assets from existing PKI infrastructure, or by following [these OpenSSL-based instructions](openssl.md) to create the needed certificates and keys.
113 |
114 | In the following steps, it is assumed that you will have generated the following TLS assets:
115 |
116 | **Root CA Public Key**
117 |
118 | ca.pem
119 |
120 |
121 |
122 | **API Server Public & Private Keys**
123 |
124 | apiserver.pem
125 |
126 | apiserver-key.pem
127 |
128 |
129 |
130 | **Worker Node Public & Private Keys**
131 |
132 | _You should have one certificate/key set for every worker node in the planned cluster._
133 |
134 | ${WORKER_FQDN}-worker.pem
135 |
136 | ${WORKER_FQDN}-worker-key.pem
137 |
138 |
139 |
140 | **Cluster Admin Public & Private Keys**
141 |
142 | admin.pem
143 |
144 | admin-key.pem
145 |
146 |
147 |
Is your etcd cluster up and running? You need the IPs for the next step.
148 |
Did you generate all of the certificates? You will place these on disk next.
149 |
Yes, ready to deploy the master node
150 |
151 |
152 | [generic-userdata]: kubernetes-on-generic-platforms.md
153 | [tectonic-free]: https://coreos.com/tectonic/
154 | [tectonic-installer]: https://github.com/coreos/tectonic-installer
155 |
--------------------------------------------------------------------------------
/Documentation/kubelet-wrapper.md:
--------------------------------------------------------------------------------
1 | # Kubelet Wrapper Script
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | The kubelet is the orchestrator of containers on each host in the Kubernetes cluster — it starts and stops containers, configures pod mounts, and other low-level, essential tasks. In order to accomplish these tasks, the kubelet requires special permissions on the host.
10 |
11 | CoreOS recommends running the kubelet using the rkt container engine, because it has the correct set of features to enable these special permissions, while taking advantage of all that container packaging has to offer: image discovery, signing/verification, and simplified management.
12 |
13 | CoreOS ships a wrapper script, `/usr/lib/coreos/kubelet-wrapper`, which makes it very easy to run the kubelet under rkt. This script accomplishes two things:
14 |
15 | 1. Future releases of CoreOS can tweak the system-related parameters of the kubelet, such as mounting in /etc/ssl/certs.
16 | 1. Allows user-specified flags and the desired version of the kubelet to be passed to rkt. This gives each cluster admin control to enable newer API features and easily tweak settings, independent of CoreOS releases.
17 |
18 | This script is currently shipping in CoreOS 962.0.0+ and will be included in all channels in the near future.
19 |
20 | ## Using the kubelet-wrapper
21 |
22 | An example systemd kubelet.service file which takes advantage of the kubelet-wrapper script:
23 |
24 | **/etc/systemd/system/kubelet.service**
25 |
26 | ```ini
27 | [Service]
28 | Environment=KUBELET_IMAGE_TAG=v1.5.4_coreos.0
29 | Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid"
30 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
31 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
32 | --api-servers=http://127.0.0.1:8080 \
33 | --pod-manifest-path=/etc/kubernetes/manifests
34 | ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
35 | ```
36 |
37 | In the example above we set the `KUBELET_IMAGE_TAG` and the kubelet-wrapper script takes care of running the correct container image with our desired API server address and manifest location.
38 |
39 | ## Customizing rkt Options
40 |
41 | Passing customized options or flags to rkt can be accomplished with the RKT_RUN_ARGS environment variable. Referencing it in a unit file is straightforward.
42 |
43 | ### Use the host's DNS configuration
44 |
45 | Mount the host's `/etc/resolv.conf` file directly into the container in order to inherit DNS settings and allow you to address workers by hostname in addition to an IP address.
46 |
47 | ```ini
48 | [Service]
49 | Environment=KUBELET_IMAGE_TAG=v1.5.4_coreos.0
50 | Environment="RKT_RUN_ARGS=--volume=resolv,kind=host,source=/etc/resolv.conf \
51 | --mount volume=resolv,target=/etc/resolv.conf \
52 | --uuid-file-save=/var/run/kubelet-pod.uuid"
53 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
54 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
55 | --api-servers=http://127.0.0.1:8080 \
56 | --pod-manifest-path=/etc/kubernetes/manifests
57 | ...other flags...
58 | ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
59 | ```
60 |
61 | ### Allow pods to use iSCSI mounts
62 |
63 | Pods running in your cluster can reference remote storage volumes located on an iSCSI target.
64 |
65 | ```ini
66 | [Service]
67 | Environment=KUBELET_IMAGE_TAG=v1.5.4_coreos.0
68 | Environment="RKT_RUN_ARGS=--volume iscsiadm,kind=host,source=/usr/sbin/iscsiadm \
69 | --mount volume=iscsiadm,target=/usr/sbin/iscsiadm \
70 | --uuid-file-save=/var/run/kubelet-pod.uuid"
71 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
72 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
73 | --api-servers=http://127.0.0.1:8080 \
74 | --pod-manifest-path=/etc/kubernetes/manifests
75 | ...other flags...
76 | ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
77 | ```
78 |
79 | ### Allow pods to use rbd volumes
80 |
81 | Pods using the [rbd volume plugin][rbd-example] to consume data from ceph must ensure that the kubelet has access to modprobe. Add the following options to the `RKT_RUN_ARGS` env before launching the kubelet via kubelet-wrapper:
82 |
83 | ```ini
84 | [Service]
85 | Environment=KUBELET_IMAGE_TAG=v1.5.4_coreos.0
86 | Environment="RKT_RUN_ARGS=--volume modprobe,kind=host,source=/usr/sbin/modprobe \
87 | --mount volume=modprobe,target=/usr/sbin/modprobe \
88 | --volume lib-modules,kind=host,source=/lib/modules \
89 | --mount volume=lib-modules,target=/lib/modules \
90 | --uuid-file-save=/var/run/kubelet-pod.uuid"
91 | ...
92 | ```
93 |
94 | Note that the kubelet also requires access to the userspace `rbd` tool that is included only in hyperkube images tagged `v1.3.6_coreos.0` or later.
95 |
96 | ### Securing the Kubelet API
97 |
98 | By default, the Kubelet allows unauthenticated access to its [API ports][kubernetes-ports].
99 |
100 | In order to secure your Kubernetes cluster, you **must** either:
101 |
102 | 1. Avoid exposing the Kubelet API to the internet, and trust all software with access to it (including every pod run on your cluster), or
103 | 2. Turn on [Kubelet authentication][kubelet-authn-authz].
104 |
105 | The Kubernetes documentation on [Master -> Cluster communication][master-cluster-communication] provides more information and details solutions.
106 |
107 | ## Manual deployment
108 |
109 | If you wish to use the kubelet-wrapper on a CoreOS version prior to 962.0.0, you can manually place the script on the host. Please note that this requires rkt version 0.15.0+.
110 |
111 | For example:
112 |
113 | - Retrieve a copy of the [kubelet-wrapper script][kubelet-wrapper]
114 | - Place on the host: `/opt/bin/kubelet-wrapper`
115 | - Make the script executable: `chmod +x /opt/bin/kubelet-wrapper`
116 | - Reference from your kubelet service file:
117 |
118 | ```ini
119 | [Service]
120 | Environment=KUBELET_IMAGE_TAG=v1.5.4_coreos.0
121 | ...
122 | ExecStart=/opt/bin/kubelet-wrapper \
123 | --api-servers=http://127.0.0.1:8080 \
124 | --pod-manifest-path=/etc/kubernetes/manifests
125 | ...
126 | ```
127 |
128 | [#2141]: https://github.com/coreos/rkt/issues/2141
129 | [kubelet-wrapper]: https://github.com/coreos/coreos-overlay/blob/master/app-admin/kubelet-wrapper/files/kubelet-wrapper
130 | [rbd-example]: https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/rbd
131 | [kubernetes-ports]: https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/rbd
132 | [kubelet-authn-authz]: https://kubernetes.io/docs/admin/kubelet-authentication-authorization/
133 | [master-cluster-communication]: https://kubernetes.io/docs/admin/master-node-communication/#master---cluster
134 |
--------------------------------------------------------------------------------
/Documentation/kubernetes-networking.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Networking
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | ## Network Model
10 |
11 | The Kubernetes network model outlines three methods of component communication:
12 |
13 | * Pod-to-Pod Communication
14 | * Each Pod in a Kubernetes cluster is assigned an IP in a flat shared networking namespace. This allows for a clean network model where Pods, from a networking perspective, can be treated much like VMs or physical hosts.
15 |
16 | * Pod-to-Service Communication
17 | * Services are implemented by assigning Virtual IPs which clients can access and are transparently proxied to the Pods grouped by that service. Requests to the Service IPs are intercepted by a kube-proxy process running on all hosts, which is then responsible for routing to the correct Pod.
18 |
19 | * External-to-Internal Communication
20 | * Accessing services from outside the cluster is generally implemented by configuring external loadbalancers which target all nodes in the cluster. Once traffic arrives at a node, it is routed to the correct Service backends via the kube-proxy.
21 |
22 | See [Kubernetes Networking][kubernetes-network] for more detailed information on the Kubernetes network model and motivation.
23 |
24 | [kubernetes-network]: https://kubernetes.io/docs/admin/networking/
25 |
26 | ## Port allocation
27 |
28 | The information below describes a minimum set of port allocations used by Kubernetes components. Some of these allocations will be optional depending on the deployment (e.g. if flannel or Calico is being used). Additionally, there are likely additional ports a deployer will need to open on their infrastructure (e.g. 22/ssh).
29 |
30 | Master Node Inbound
31 |
32 | | Protocol | Port Range | Source | Purpose |
33 | -----------|------------|-------------------------------------------|------------------------|
34 | | TCP | 443 | Worker Nodes, API Requests, and End-Users | Kubernetes API server. |
35 | | UDP | 8285 | Master & Worker Nodes | flannel overlay network - *udp backend*. This is the default network configuration (only required if using flannel) |
36 | | UDP | 8472 | Master & Worker Nodes | flannel overlay network - *vxlan backend* (only required if using flannel) |
37 |
38 | Worker Node Inbound
39 |
40 | | Protocol | Port Range | Source | Purpose |
41 | -----------|-------------|--------------------------------|------------------------------------------------------------------------|
42 | | TCP | 10250 | Master Nodes | Worker node Kubelet API for exec and logs. |
43 | | TCP | 10255 | Heapster | Worker node read-only Kubelet API. |
44 | | TCP | 30000-32767 | External Application Consumers | Default port range for [external service][external-service] ports. Typically, these ports would need to be exposed to external load-balancers, or other external consumers of the application itself. |
45 | | TCP | ALL | Master & Worker Nodes | Intra-cluster communication (unnecessary if `vxlan` is used for networking) |
46 | | UDP | 8285 | Master & Worker Nodes | flannel overlay network - *udp backend*. This is the default network configuration (only required if using flannel) |
47 | | UDP | 8472 | Master & Worker Nodes | flannel overlay network - *vxlan backend* (only required if using flannel) |
48 | | TCP | 179 | Worker Nodes | Calico BGP network (only required if the BGP backend is used) |
49 |
50 | etcd Node Inbound
51 |
52 | | Protocol | Port Range | Source | Purpose |
53 | -----------|------------|---------------|----------------------------------------------------------|
54 | | TCP | 2379-2380 | Master Nodes | etcd server client API |
55 | | TCP | 2379-2380 | Worker Nodes | etcd server client API (only required if using flannel or Calico). |
56 |
57 | [external-service]: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
58 |
59 | ## Advanced Configuration
60 |
61 | The CoreOS Kubernetes documentation describes a software-defined overlay network (i.e. [flannel][coreos-flannel]) to manage the Kubernetes Pod Network. However, in some cases a deployer may want to make use of existing network infrastructure to manage the Kubernetes network themselves e.g. using [Calico][calico]
62 |
63 | The following requirements must be met by your existing infrastructure to use Tectonic with a self-managed network.
64 |
65 | [coreos-flannel]: https://coreos.com/flannel/docs/latest/flannel-config.html
66 | [calico]: http://docs.projectcalico.org/v2.0/getting-started/kubernetes/
67 |
68 | ### Pod-to-Pod Communication
69 |
70 | Each pod in the Kubernetes cluster will be assigned an IP that is expected to be routable from all other hosts and pods in the Kubernetes cluster.
71 |
72 | An easy way to achieve this is to use Calico. The Calico agent is already running on each node to enforce network policy. Starting it with the `CALICO_NETWORKING` environment variable set to `true` will cause it to run a BGP agent inside the Calico agent pod. These BGP agents will automatically form a full mesh network to exchange routing information. This allows a single large IP range to be used across your whole cluster and IP addresses to be efficiently assigned from it. To peer with your existing BGP infrastructure follow this [guide][calico-bgp]. If your Kubernetes cluster is hosted on an [L2 network][calico-l2] (e.g. in your own datacenter or on AWS) there is no need to peer with your routers.
73 |
74 | An alternative way to achieve this is to first assign an IP range to each host in your cluster.
75 | Requests to IPs in an assigned range would need to be routed to that host via your network infrastructure.
76 | Next, the host is configured such that each pod launched on the host is assigned an IP from the host range.
77 |
78 | For example:
79 |
80 | * Node A assigned IP range 10.0.1.0/24
81 | * Node B assigned IP range 10.0.2.0/24.
82 |
83 | When a Pod is launched on `Node A` it might be assigned `10.0.1.33` and on `Node B` a pod could be assigned `10.0.2.144`.
84 | It would then be expected that both pods would be able to reach each other via those IPs, as if they were on a flat network.
85 |
86 | The actual allocation of Pod IPs on the host can be achieved by configuring Docker to use a linux bridge device configured with the correct IP range.
87 | When a new Kubernetes Pod is launched, it will be assigned an IP from the range assigned to the linux bridge device.
88 |
89 | To achieve this network model, there are various methods that can be used. See the [Kubernetes Networking][how-to-achieve] documentation for more detail.
90 |
91 | [how-to-achieve]: https://kubernetes.io/docs/admin/networking/#how-to-achieve-this
92 | [calico-bgp]: https://github.com/projectcalico/calico-containers/blob/v0.19.0/docs/bgp.md
93 | [calico-l2]: http://docs.projectcalico.org/v2.0/reference/private-cloud/l2-interconnect-fabric
94 |
95 | ### Pod-to-Service Communication
96 |
97 | The service IPs are assigned from a range configured in the Kubernetes API Server via the `--service-ip-range` flag. These are virtual IPs which are intercepted by a kube-proxy process running locally on each node. These IPs do not need to be routable off-host, because IPTables rules will intercept the traffic, and route to the proper backend (usually the pod network).
98 |
99 | A requirement of a manually configured network is that the service-ip-range does not conflict with existing network infrastructure. The CoreOS Kubernetes guides default to a service-ip-range of `10.3.0.0/24`, but that can easily be changed if this conflicts with existing infrastructure.
100 |
101 | ### External-to-Internal Communication
102 |
103 | IP addresses assigned on the pod network are typically not routable outside of the cluster unless you're using Calico and have [peered with your routers][calico-external]. This isn't an issue since most communication between your applications stays within the cluster, as described above. Allowing external traffic into the cluster is generally accomplished by mapping external load-balancers to specifically exposed services in the cluster. This mapping allows the kube-proxy process to route the external requests to the proper pods using the cluster's pod-network.
104 |
105 | In a manually configured network, it may be necessary to open a range of ports to outside clients (default 30000-32767) for use with "external services". See the [Kubernetes Service][kube-service] documentation for more information on external services.
106 |
107 | [calico-external]: https://github.com/projectcalico/calico-containers/blob/v0.19.0/docs/ExternalConnectivity.md
108 | [kube-service]: http://kubernetes.io/docs/user-guide/services/#publishing-services---service-types
109 |
--------------------------------------------------------------------------------
/Documentation/kubernetes-on-baremetal.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Installation on Bare Metal & CoreOS
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This guide walks a deployer through launching a multi-node Kubernetes cluster on bare metal servers running CoreOS. After completing this guide, a deployer will be able to interact with the Kubernetes API from their workstation using the `kubectl` CLI tool.
10 |
11 | ## Deployment requirements
12 |
13 | ### CoreOS version
14 |
15 | All Kubernetes controllers and nodes must use CoreOS version 962.0.0 or greater for the `kubelet-wrapper` script to be present in the image. If you wish to use an earlier version (e.g. from the 'stable' channel) see [kubelet-wrapper](kubelet-wrapper.md) for more information.
16 |
17 | ### Kubernetes pod network
18 |
19 | This configuration uses the [flannel][coreos-flannel] overlay network to manage the [pod network][pod-network]. Many bare metal configurations may instead have an existing self-managed network. In this scenario, it is common to use [Calico][calico-networking] to manage pod network policy while omitting the overlay network, and interoperating with existing physical network gear over BGP.
20 |
21 | See the [Kubernetes networking](kubernetes-networking.md) documentation for more information on self-managed networking options.
22 |
23 | [coreos-flannel]: https://coreos.com/flannel/docs/latest/flannel-config.html
24 | [calico-networking]: https://github.com/projectcalico/calico-containers
25 | [pod-network]: https://github.com/kubernetes/kubernetes/blob/release-1.4/docs/design/networking.md#pod-to-pod
26 |
27 | ## Provisioning
28 |
29 | The CoreOS [Matchbox][matchbox-gh] project can automate network booting and provisioning Container Linux clusters. It provides:
30 |
31 | * The Matchbox HTTP/gRPC service matches machines to configs, by hardware attributes, and can be installed as a binary, RPM, container image, or deployed on Kubernetes itself.
32 | * Guides for creating network boot environments with iPXE/GRUB
33 | * Support for Terraform to allow teams to manage and version bare-metal resources
34 | * Example clusters including an [etcd cluster][etcd-cluster-example] and multi-node [Kubernetes cluster][kubernetes-cluster-example].
35 |
36 | [Get started][matchbox-intro-doc] provisioning machines into clusters or read the [docs][matchbox-docs].
37 |
38 | Container Linux bare metal installation documents provide low level background details about the boot mechanisms:
39 |
40 | * [Booting with iPXE][coreos-ipxe]
41 | * [Booting with PXE][coreos-pxe]
42 | * [Installing to Disk][coreos-ondisk]
43 |
44 | Mixing multiple methods is possible. For example, doing an install to disk for the machines running the etcd cluster and Kubernetes master nodes, but PXE-booting the worker machines.
45 |
46 | [coreos-ipxe]: https://coreos.com/os/docs/latest/booting-with-ipxe.html
47 | [coreos-pxe]: https://coreos.com/os/docs/latest/booting-with-pxe.html
48 | [coreos-ondisk]: https://coreos.com/os/docs/latest/installing-to-disk.html
49 | [ignition-docs]: https://coreos.com/ignition/docs/latest/
50 | [matchbox-gh]: https://github.com/coreos/matchbox
51 | [matchbox-docs]: https://coreos.com/matchbox/docs/latest/
52 | [matchbox-intro-doc]: https://coreos.com/matchbox/docs/latest/getting-started.html
53 | [etcd-cluster-example]: https://github.com/coreos/matchbox/blob/master/Documentation/getting-started-rkt.md
54 | [kubernetes-cluster-example]: https://coreos.com/matchbox/docs/latest/terraform/bootkube-install/README.html
55 |
56 |
57 |
Did you install CoreOS on your machines? An SSH connection to each machine is all that's needed. We'll start the configuration next.
58 |
I'm ready to get started
59 |
60 |
--------------------------------------------------------------------------------
/Documentation/kubernetes-on-generic-platforms.md:
--------------------------------------------------------------------------------
1 | # Kubernetes on CoreOS with Generic Install Scripts
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This guide will setup Kubernetes on CoreOS in a similar way to other tools in the repo. The main goal of these scripts is to be generic and work on many different cloud providers or platforms. The notable difference is that these scripts are intended to be platform agnostic and thus don't automatically setup the TLS assets on each host beforehand.
10 |
11 | While we provide these scripts and test them through the multi-node Vagrant setup, we recommend using a platform specific install method if available. If you are installing to bare-metal, you might find our [baremetal repo](https://github.com/coreos/coreos-baremetal) more appropriate.
12 |
13 | ## Generate TLS Assets
14 |
15 | Review the [OpenSSL-based TLS instructions][openssl] for generating your TLS assets for each of the Kubernetes nodes.
16 |
17 | Place the files in the following locations:
18 |
19 | | Controller Files | Location |
20 | |------------------|----------|
21 | | API Certificate | `/etc/kubernetes/ssl/apiserver.pem` |
22 | | API Private Key | `/etc/kubernetes/ssl/apiserver-key.pem` |
23 | | CA Certificate | `/etc/kubernetes/ssl/ca.pem` |
24 |
25 | | Worker Files | Location |
26 | |------------------|----------|
27 | | Worker Certificate | `/etc/kubernetes/ssl/worker.pem` |
28 | | Worker Private Key | `/etc/kubernetes/ssl/worker-key.pem` |
29 | | CA Certificate | `/etc/kubernetes/ssl/ca.pem` |
30 |
31 | ## Network Requirements
32 |
33 | This cluster must adhere to the [Kubernetes networking model][networking]. Nodes created by the generic scripts, by default, listen on and identify themselves by the `ADVERTISE_IP` environment variable. If this isn't set, the scripts will source it from `/etc/environment`, specifically using the value of `COREOS_PUBLIC_IPV4`.
34 |
35 | ### Controller Requirements
36 |
37 | Each controller node must set its `ADVERTISE_IP` to an IP that accepts connections on port 443 from the workers. If using a load balancer, it must accept connections on 443 and pass that to the pool of controllers.
38 |
39 | To view the complete list of environment variables, view the top of the `controller-install.sh` script.
40 |
41 | ### Worker Requirements
42 |
43 | In addition to identifying itself with `ADVERTISE_IP`, each worker must be configured with the `CONTROLLER_ENDPOINT` variable, which tells them where to contact the Kubernetes API. For a single controller, this is the `ADVERTISE_IP` mentioned above. For multiple controllers, this is the IP of the load balancer.
44 |
45 | To view the complete list of environment variables, view the top of the `worker-install.sh` script.
46 |
47 | ## Optional Configuration
48 |
49 | You may modify the kubelet's unit file to use [additional features][rkt-opts-examples] such as:
50 |
51 | - [mounting ephemeral disks][mount-disks]
52 | - [allow pods to mount RDB][rdb] or [iSCSI volumes][iscsi]
53 | - [allowing access to insecure container registries][insecure-registry]
54 | - [use host DNS configuration instead of a public DNS server][host-dns]
55 | - [enable the cluster logging add-on][cluster-logging]
56 | - [changing your CoreOS auto-update settings][update]
57 |
58 | ## Boot etcd Cluster
59 |
60 | It is highly recommended that etcd is run as a dedicated cluster separately from Kubernetes components.
61 |
62 | Use the [official etcd clustering guide](https://coreos.com/etcd/docs/latest/docker_guide.html) to decide how best to deploy etcd into your environment.
63 |
64 | ## Boot Controllers
65 |
66 | Follow these instructions for each controller you wish to boot:
67 |
68 | 1. Boot CoreOS
69 | 1. [Download][controller-script] and copy `controller-install.sh` onto disk.
70 | 1. Copy TLS assets onto disk.
71 | 1. Execute `controller-install.sh` with environment variables set.
72 | 1. Wait for the script to complete. About 300 MB of containers will be downloaded before the cluster is running.
73 |
74 | ## Boot Workers
75 |
76 | Follow these instructions for each worker you wish to boot:
77 |
78 | 1. Boot CoreOS
79 | 1. [Download][worker-script] and copy `worker-install.sh` onto disk.
80 | 1. Copy TLS assets onto disk.
81 | 1. Execute `worker-install.sh` with environment variables set.
82 | 1. Wait for the script to complete. About 300 MB of containers will be downloaded before the cluster is running.
83 |
84 | ## Monitor Progress
85 |
86 | The Kubernetes will be up and running after the scripts complete and containers are downloaded. To take a closer look, SSH to one of the machines and monitor the container downloads:
87 |
88 | ```
89 | $ docker ps
90 | ```
91 |
92 | You can also watch the kubelet's logs with journalctl:
93 |
94 | ```
95 | $ journalctl -u kubelet -f
96 | ```
97 |
98 |
102 |
103 | [openssl]: openssl.md
104 | [networking]: kubernetes-networking.md
105 | [rkt-opts-examples]: kubelet-wrapper.md#customizing-rkt-options
106 | [rdb]: kubelet-wrapper.md#allow-pods-to-use-rbd-volumes
107 | [iscsi]: kubelet-wrapper.md#allow-pods-to-use-iscsi-mounts
108 | [host-dns]: kubelet-wrapper.md#use-the-hosts-dns-configuration
109 | [cluster-logging]: kubelet-wrapper.md#use-the-cluster-logging-add-on
110 | [mount-disks]: https://coreos.com/os/docs/latest/mounting-storage.html
111 | [insecure-registry]: https://coreos.com/os/docs/latest/registry-authentication.html#using-a-registry-without-ssl-configured
112 | [update]: https://coreos.com/os/docs/latest/switching-channels.html
113 | [controller-script]: https://github.com/coreos/coreos-kubernetes/blob/master/multi-node/generic/controller-install.sh
114 | [worker-script]: https://github.com/coreos/coreos-kubernetes/blob/master/multi-node/generic/worker-install.sh
115 |
--------------------------------------------------------------------------------
/Documentation/kubernetes-on-vagrant-single.md:
--------------------------------------------------------------------------------
1 | # Single-Node Kubernetes Installation with Vagrant & CoreOS
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | While Kubernetes is designed to run across large clusters, it can be useful to have Kubernetes available on a single machine.
10 | This guide walks a deployer through this process using Vagrant and CoreOS.
11 | After completing this guide, a deployer will be able to interact with the Kubernetes API from their workstation using the kubectl CLI tool.
12 |
13 | ## Install Prerequisites
14 |
15 | ### Vagrant
16 |
17 | Navigate to the [Vagrant downloads page][vagrant-downloads] and grab the appropriate package for your system. Install the downloaded software before continuing.
18 |
19 | [vagrant-downloads]: https://www.vagrantup.com/downloads.html
20 |
21 | ### kubectl
22 |
23 | `kubectl` is the main program for interacting with the Kubernetes API. Download `kubectl` from the Kubernetes release artifact site with the `curl` tool.
24 |
25 | The linux `kubectl` binary can be fetched with a command like:
26 |
27 | ```sh
28 | $ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.4/bin/linux/amd64/kubectl
29 | ```
30 |
31 | On an OS X workstation, replace `linux` in the URL above with `darwin`:
32 |
33 | ```sh
34 | $ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.4/bin/darwin/amd64/kubectl
35 | ```
36 |
37 | After downloading the binary, ensure it is executable and move it into your PATH:
38 |
39 | ```sh
40 | $ chmod +x kubectl
41 | $ mv kubectl /usr/local/bin/kubectl
42 | ```
43 |
44 | ## Clone the Repository
45 |
46 | The following commands will clone a repository that contains a "Vagrantfile", which describes the set of virtual machines that will run Kubernetes on top of CoreOS.
47 |
48 | ```sh
49 | $ git clone https://github.com/coreos/coreos-kubernetes.git
50 | $ cd coreos-kubernetes/single-node/
51 | ```
52 |
53 | ## Choose Container Runtime (optional)
54 |
55 | The runtime defaults to docker. If you wish to use rkt simply edit the user-data and change the line beginning with `export CONTAINER_RUNTIME` to:
56 |
57 | `export CONTAINER_RUNTIME=rkt`
58 |
59 | ## Enable Network Policy (Optional)
60 |
61 | To enable network policy edit the user-data file and set `USE_CALICO=true`.
62 |
63 | ## Start the Machine
64 |
65 | Ensure the latest CoreOS vagrant image will be used by running `vagrant box update`.
66 |
67 | Simply run `vagrant up` and wait for the command to succeed.
68 | Once Vagrant is finished booting and provisioning your machine, your cluster is good to go.
69 |
70 | ## Configure kubectl
71 |
72 | Once in the `coreos-kubernetes/single-node/` directory, configure your local Kubernetes client using the following commands:
73 |
74 | You can choose from one of the two following options.
75 |
76 | 1. **Use a custom KUBECONFIG path**
77 |
78 | ```sh
79 | $ export KUBECONFIG="${KUBECONFIG}:$(pwd)/kubeconfig"
80 | $ kubectl config use-context vagrant-single
81 | ```
82 |
83 | 1. **Update the local-user kubeconfig**
84 |
85 | ```sh
86 | $ kubectl config set-cluster vagrant-single-cluster --server=https://172.17.4.99:443 --certificate-authority=${PWD}/ssl/ca.pem
87 | $ kubectl config set-credentials vagrant-single-admin --certificate-authority=${PWD}/ssl/ca.pem --client-key=${PWD}/ssl/admin-key.pem --client-certificate=${PWD}/ssl/admin.pem
88 | $ kubectl config set-context vagrant-single --cluster=vagrant-single-cluster --user=vagrant-single-admin
89 | $ kubectl config use-context vagrant-single
90 | ```
91 |
92 | Check that your client is configured properly by using `kubectl` to inspect your cluster:
93 |
94 | ```sh
95 | $ kubectl get nodes
96 | NAME LABELS STATUS
97 | 172.17.4.99 kubernetes.io/hostname=172.17.4.99 Ready
98 | ```
99 |
100 | **NOTE:** When the cluster is first being launched, it must download all container images for the cluster components (Kubernetes, dns, heapster, etc). Depending on the speed of your connection, it can take a few minutes before the Kubernetes api-server is available. Before the api-server is running, the kubectl command above may show output similar to:
101 |
102 | `The connection to the server 172.17.4.99:443 was refused - did you specify the right host or port?`
103 |
104 |
105 |
Is kubectl working correctly?
106 |
Now that you've got a working Kubernetes cluster with a functional CLI tool, you are free to deploy Kubernetes-ready applications.
107 | Start with a multi-tier web application from the official Kubernetes documentation to visualize how the various Kubernetes components fit together.
108 |
View the Guestbook example app
109 |
110 |
--------------------------------------------------------------------------------
/Documentation/kubernetes-on-vagrant.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Installation with Vagrant & CoreOS
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This guide walks a deployer through launching a multi-node Kubernetes cluster using Vagrant and CoreOS.
10 | After completing this guide, a deployer will be able to interact with the Kubernetes API from their workstation using the kubectl CLI tool.
11 |
12 | ## Install Prerequisites
13 |
14 | ### Vagrant
15 |
16 | Navigate to the [Vagrant downloads page][vagrant-downloads] and grab the appropriate package for your system. Install the Vagrant software before continuing.
17 |
18 | [vagrant-downloads]: https://www.vagrantup.com/downloads.html
19 |
20 | ### kubectl
21 |
22 | `kubectl` is the main program for interacting with the Kubernetes API. Download `kubectl` from the Kubernetes release artifact site with the `curl` tool.
23 |
24 | The linux `kubectl` binary can be fetched with a command like:
25 |
26 | ```sh
27 | $ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.4/bin/linux/amd64/kubectl
28 | ```
29 |
30 | On an OS X workstation, replace `linux` in the URL above with `darwin`:
31 |
32 | ```sh
33 | $ curl -O https://storage.googleapis.com/kubernetes-release/release/v1.5.4/bin/darwin/amd64/kubectl
34 | ```
35 |
36 | After downloading the binary, ensure it is executable and move it into your PATH:
37 |
38 | ```sh
39 | $ chmod +x kubectl
40 | $ sudo mv kubectl /usr/local/bin/kubectl
41 | ```
42 |
43 | ## Clone the Repository
44 |
45 | The following commands will clone a repository that contains a "Vagrantfile", which describes the set of virtual machines that will run Kubernetes on top of CoreOS.
46 |
47 | ```sh
48 | $ git clone https://github.com/coreos/coreos-kubernetes.git
49 | $ cd coreos-kubernetes/multi-node/vagrant
50 | ```
51 |
52 | ## Choose Container Runtime (Optional)
53 |
54 | The runtime defaults to docker. To change to use rkt edit the following files:
55 |
56 | ```
57 | ../generic/controller-install.sh
58 | ../generic/worker-install.sh
59 | ```
60 |
61 | And change the line beginning with `export CONTAINER_RUNTIME` to:
62 |
63 | `export CONTAINER_RUNTIME=rkt`
64 |
65 | ## Enable Network Policy (Optional)
66 |
67 | To enable network policy edit the following files:
68 |
69 | ```
70 | ../generic/controller-install.sh
71 | ../generic/worker-install.sh
72 | ```
73 |
74 | And set `USE_CALICO=true`.
75 |
76 | ## Start the Machines
77 |
78 | The default cluster configuration is to start a virtual machine for each role — master node, worker node, and etcd server. However, you can modify the default cluster settings by copying `config.rb.sample` to `config.rb` and modifying configuration values.
79 |
80 | ```
81 | #$update_channel="alpha"
82 |
83 | #$controller_count=1
84 | #$controller_vm_memory=1024
85 |
86 | #$worker_count=1
87 | #$worker_vm_memory=1024
88 |
89 | #$etcd_count=1
90 | #$etcd_vm_memory=512
91 | ```
92 |
93 | Ensure the latest CoreOS vagrant image will be used by running `vagrant box update`.
94 |
95 | Then run `vagrant up` and wait for Vagrant to provision and boot the virtual machines.
96 |
97 | ## Configure kubectl
98 |
99 | Choose one of the two following ways to configure `kubectl` to connect to the new cluster:
100 |
101 | ### Use a custom KUBECONFIG path
102 |
103 | ```sh
104 | $ export KUBECONFIG="${KUBECONFIG}:$(pwd)/kubeconfig"
105 | $ kubectl config use-context vagrant-multi
106 | ```
107 |
108 | ### Update the local-user kubeconfig
109 |
110 | Configure your local Kubernetes client using the following commands:
111 |
112 | ```sh
113 | $ kubectl config set-cluster vagrant-multi-cluster --server=https://172.17.4.101:443 --certificate-authority=${PWD}/ssl/ca.pem
114 | $ kubectl config set-credentials vagrant-multi-admin --certificate-authority=${PWD}/ssl/ca.pem --client-key=${PWD}/ssl/admin-key.pem --client-certificate=${PWD}/ssl/admin.pem
115 | $ kubectl config set-context vagrant-multi --cluster=vagrant-multi-cluster --user=vagrant-multi-admin
116 | $ kubectl config use-context vagrant-multi
117 | ```
118 |
119 | Check that `kubectl` is configured properly by inspecting the cluster:
120 |
121 | ```sh
122 | $ kubectl get nodes
123 | NAME LABELS STATUS
124 | 172.17.4.201 kubernetes.io/hostname=172.17.4.201 Ready
125 | ```
126 |
127 | **NOTE:** When the cluster is first launched, it must download all container images for the cluster components (Kubernetes, dns, heapster, etc). Depending on the speed of your connection, it can take a few minutes before the Kubernetes api-server is available. Before the api-server is running, the kubectl command above may show output similar to:
128 |
129 | `The connection to the server 172.17.4.101:443 was refused - did you specify the right host or port?`
130 |
131 |
132 |
Is kubectl working correctly?
133 |
Now that you've got a working Kubernetes cluster with a functional CLI tool, you are free to deploy Kubernetes-ready applications.
134 | Start with a multi-tier web application from the official Kubernetes documentation to visualize how the various Kubernetes components fit together.
135 |
View the Guestbook example app
136 |
137 |
--------------------------------------------------------------------------------
/Documentation/kubernetes-upgrade.md:
--------------------------------------------------------------------------------
1 | # Upgrading Kubernetes
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This document describes upgrading the Kubernetes components on a cluster's master and worker nodes. For general information on Kubernetes cluster management, upgrades (including more advanced topics such as major API version upgrades) see the [Kubernetes upstream documentation](https://kubernetes.io/docs/admin/cluster-management/) and [version upgrade notes](https://github.com/kubernetes/kubernetes/blob/release-1.4/docs/design/versioning.md#upgrades)
10 |
11 | **NOTE:** The following upgrade documentation is for installations based on the CoreOS + Kubernetes step-by-step [installation guide](https://coreos.com/kubernetes/docs/latest/getting-started.html).
12 |
13 | ## Upgrading the Kubelet
14 |
15 | The Kubelet runs on both master and worker nodes, and is distributed as a hyperkube container image. The image version is usually set as an environment variable in the `kubelet.service` file, which is then passed to the [kubelet-wrapper](kubelet-wrapper.md) script.
16 |
17 | To update the image version, modify the kubelet service file on each node (`/etc/systemd/system/kubelet.service`) to reference the new hyperkube image.
18 |
19 | For example, modifying the `KUBELET_IMAGE_TAG` environment variable in the following service file would change the container image version used when launching the kubelet via the [kubelet-wrapper](kubelet-wrapper.md) script.
20 |
21 | **/etc/systemd/system/kubelet.service**
22 |
23 | ```
24 | Environment=KUBELET_IMAGE_TAG=v1.5.4_coreos.0
25 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
26 | --api-servers=https://master [...]
27 | ```
28 |
29 | ## Upgrading Calico
30 |
31 | The Calico agent runs on both master and worker nodes, and is distributed as a container image. It runs self hosted under Kubernetes.
32 |
33 | To upgrade Calico, follow the documentation [here](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/upgrade)
34 |
35 | **Note:** If you are running Calico as a systemd service, you will first need to change to a self-hosted install by following [this guide](https://coreos.com/kubernetes/docs/latest/deploy-master.html)
36 |
37 | ## Upgrading Master Nodes
38 |
39 | Master nodes consist of the following Kubernetes components:
40 |
41 | * kube-proxy
42 | * kube-apiserver
43 | * kube-controller-manager
44 | * kube-scheduler
45 | * policy-controller
46 |
47 | While upgrading the master components, user pods on worker nodes will continue to run normally.
48 |
49 | ### Upgrading Master Node Components
50 |
51 | The master node components (kube-controller-manager,kube-scheduler, kube-apiserver, and kube-proxy) are run as "static pods". This means the pod definition is a file on disk (default location: `/etc/kubernetes/manifests`). To update these components, you simply need to update the static manifest file. When the manifest changes on disk, the kubelet will pick up the changes and restart the local pod.
52 |
53 | For example, to upgrade the kube-apiserver version you could update the pod image tag in `/etc/kubernetes/manifests/kube-apiserver.yaml`:
54 |
55 | From: `image: quay.io/coreos/hyperkube:v1.0.6_coreos.0`
56 |
57 | To: `image: quay.io/coreos/hyperkube:v1.0.7_coreos.0`
58 |
59 | In high-availability deployments, the control-plane components (apiserver, scheduler, and controller-manager) are deployed to all master nodes. Upgrades of these components will require them being updated on each master node.
60 |
61 | **NOTE:** Because a particular master node may not be elected to run a particular component (e.g. kube-scheduler), updating the local manifest may not update the currently active instance of the Pod. You should update the manifests on all master nodes to ensure that no matter which is active, all will reflect the updated manifest.
62 |
63 | ### Upgrading Worker Nodes
64 |
65 | Worker nodes consist of the following kubernetes components.
66 |
67 | * kube-proxy
68 |
69 | ### Upgrading the kube-proxy
70 |
71 | The kube-proxy is run as a "static pod". To upgrade the pod definition, simply modify the pod manifest located in `/etc/kubernetes/manifests/kube-proxy.yaml`. The kubelet will pick up the changes and re-launch the kube-proxy pod.
72 |
73 | ## Example Upgrade Process:
74 |
75 | 1. Prepare new pod manifests for master nodes
76 | 1. Prepare new pod manifests for worker nodes
77 | 1. For each master node:
78 | 1. Back up existing manifests
79 | 1. Update manifests
80 | 1. Repeat item 3 for each worker node
81 |
--------------------------------------------------------------------------------
/Documentation/openssl.md:
--------------------------------------------------------------------------------
1 | # Cluster TLS using OpenSSL
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This guide will walk you through generating Kubernetes TLS assets using OpenSSL.
10 |
11 | This is provided as a proof-of-concept guide to get started with Kubernetes client certificate authentication.
12 |
13 | ## Deployment Options
14 |
15 | The following variables will be used throughout this guide. The default for `K8S_SERVICE_IP` can safely be used, however `MASTER_HOST` will need to be customized to your infrastructure.
16 |
17 | **MASTER_HOST**=_no default_
18 |
19 | The address of the master node. In most cases this will be the publicly routable IP or hostname of the node. Worker nodes must be able to reach the master node(s) via this address on port 443. Additionally, external clients (such as an administrator using `kubectl`) will also need access, since this will run the Kubernetes API endpoint.
20 |
21 | If you will be running a highly-available control-plane consisting of multiple master nodes, then `MASTER_HOST` will ideally be a network load balancer that sits in front of them. Alternatively, a DNS name can be configured to resolve to the master IPs. In either case, the certificates generated below must have the appropriate CommonName and/or SubjectAlternateNames.
22 |
23 | ---
24 |
25 | **K8S_SERVICE_IP**=10.3.0.1
26 |
27 | The IP address of the Kubernetes API Service. The `K8S_SERVICE_IP` will be the first IP in the `SERVICE_IP_RANGE` discussed in the [deployment guide][deployment-guide]. The first IP in the default range of 10.3.0.0/24 will be 10.3.0.1. If the SERVICE_IP_RANGE was changed from the default, this value must be updated as well.
28 |
29 | ---
30 |
31 | **WORKER_IP**=_no default_
32 |
33 | **WORKER_FQDN**=_no default_
34 |
35 | The IP addresses and fully qualifed hostnames of all worker nodes will be needed. The certificates generated for the worker nodes will need to reflect how requests will be routed to those nodes. In most cases this will be a routable IP and/or a routable hostname. These will be unique per worker; when you see them used below, consider it a loop and do that step for _each_ worker.
36 |
37 | ## Create a Cluster Root CA
38 |
39 | First, we need to create a new certificate authority which will be used to sign the rest of our certificates.
40 |
41 | ```sh
42 | $ openssl genrsa -out ca-key.pem 2048
43 | $ openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca"
44 | ```
45 |
46 | **You need to store the CA keypair in a secure location for future use.**
47 |
48 | ## Kubernetes API Server Keypair
49 |
50 | ### OpenSSL Config
51 |
52 | This is a minimal openssl config which will be used when creating the api-server certificate. We need to create a configuration file since some of the options we need to use can't be specified as flags. Create `openssl.cnf` on your local machine and replace the following values:
53 |
54 | * Replace `${K8S_SERVICE_IP}`
55 | * Replace `${MASTER_HOST}`
56 |
57 | **openssl.cnf**
58 |
59 | ```
60 | [req]
61 | req_extensions = v3_req
62 | distinguished_name = req_distinguished_name
63 | [req_distinguished_name]
64 | [ v3_req ]
65 | basicConstraints = CA:FALSE
66 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
67 | subjectAltName = @alt_names
68 | [alt_names]
69 | DNS.1 = kubernetes
70 | DNS.2 = kubernetes.default
71 | DNS.3 = kubernetes.default.svc
72 | DNS.4 = kubernetes.default.svc.cluster.local
73 | IP.1 = ${K8S_SERVICE_IP}
74 | IP.2 = ${MASTER_HOST}
75 | ```
76 |
77 | If deploying multiple master nodes in an HA configuration, you may need to add more TLS `subjectAltName`s (SANs). Proper configuration of SANs in each certificate depends on how worker nodes and `kubectl` users contact the master nodes: directly by IP address, via load balancer, or by resolving a DNS name.
78 |
79 | Example:
80 |
81 | ```
82 | DNS.5 = ${MASTER_DNS_NAME}
83 | IP.3 = ${MASTER_IP}
84 | IP.4 = ${MASTER_LOADBALANCER_IP}
85 | ```
86 |
87 | ### Generate the API Server Keypair
88 |
89 | Using the above `openssl.cnf`, create the api-server keypair:
90 |
91 | ```sh
92 | $ openssl genrsa -out apiserver-key.pem 2048
93 | $ openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config openssl.cnf
94 | $ openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile openssl.cnf
95 | ```
96 |
97 | ## Kubernetes Worker Keypairs
98 |
99 | This procedure generates a unique TLS certificate for every Kubernetes worker node in your cluster. While unique certificates are less convenient to generate and deploy, they do provide stronger security assurances and the most portable installation experience across multiple cloud-based and on-premises Kubernetes deployments.
100 |
101 | ### OpenSSL Config
102 |
103 | We will use a common openssl configuration file for all workers. The certificate output will be customized per worker based on environment variables used in conjunction with the configuration file. Create the file `worker-openssl.cnf` on your local machine with the following contents.
104 |
105 | **worker-openssl.cnf**
106 |
107 | ```
108 | [req]
109 | req_extensions = v3_req
110 | distinguished_name = req_distinguished_name
111 | [req_distinguished_name]
112 | [ v3_req ]
113 | basicConstraints = CA:FALSE
114 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
115 | subjectAltName = @alt_names
116 | [alt_names]
117 | IP.1 = $ENV::WORKER_IP
118 | ```
119 |
120 | ### Generate the Kubernetes Worker Keypairs
121 |
122 | Run the following set of commands once for every worker node in the planned cluster. Replace `WORKER_FQDN` and `WORKER_IP` in the following commands with the correct values for each node. If the node does not have a routeable hostname, set `WORKER_FQDN` to a unique, per-node placeholder name like `kube-worker-1`, `kube-worker-2` and so on.
123 |
124 | ```sh
125 | $ openssl genrsa -out ${WORKER_FQDN}-worker-key.pem 2048
126 | $ WORKER_IP=${WORKER_IP} openssl req -new -key ${WORKER_FQDN}-worker-key.pem -out ${WORKER_FQDN}-worker.csr -subj "/CN=${WORKER_FQDN}" -config worker-openssl.cnf
127 | $ WORKER_IP=${WORKER_IP} openssl x509 -req -in ${WORKER_FQDN}-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${WORKER_FQDN}-worker.pem -days 365 -extensions v3_req -extfile worker-openssl.cnf
128 | ```
129 |
130 | ## Generate the Cluster Administrator Keypair
131 |
132 | ```sh
133 | $ openssl genrsa -out admin-key.pem 2048
134 | $ openssl req -new -key admin-key.pem -out admin.csr -subj "/CN=kube-admin"
135 | $ openssl x509 -req -in admin.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin.pem -days 365
136 | ```
137 |
138 | You are now ready to return to the [deployment guide][deployment-guide] and configure your master node(s), worker nodes, and `kubectl` on your local machine.
139 |
140 | [deployment-guide]: getting-started.md
141 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/MAINTAINERS:
--------------------------------------------------------------------------------
1 |
2 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
3 |
4 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
5 |
6 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | CoreOS Project
2 | Copyright 2014 CoreOS, Inc
3 |
4 | This product includes software developed at CoreOS, Inc.
5 | (http://www.coreos.com/).
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes on CoreOS Container Linux
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This repo contains tooling and documentation around deploying Kubernetes using CoreOS Container Linux.
10 | Initial setup of a Kubernetes cluster is covered, but ongoing maintenance and updates of the cluster is not addressed.
11 |
12 | *Notice: kube-aws has moved!*
13 |
14 | If you're looking for kube-aws, it has been moved to a new [dedicated repository](https://github.com/coreos/kube-aws). All outstanding AWS-related issues and PRs should be moved to there. This repository will continue to host development on single and multi node vagrant distributions.
15 |
16 | ## The CoreOS Way
17 |
18 | When designing these guides and tools, the following considerations are made:
19 |
20 | * We always setup TLS
21 | * An individual node can reboot and the cluster will still function
22 | * Internal cluster DNS is available
23 | * Service accounts enabled
24 | * Follow Kubernetes guidelines for AdmissionControllers and other suggested configuration
25 |
26 | ## Kubernetes Topics
27 |
28 | Follow the Kubernetes guides on the CoreOS website:
29 |
30 | https://coreos.com/kubernetes/docs/latest/
31 |
32 | - [Intro to Pods](https://coreos.com/kubernetes/docs/latest/pods.html)
33 | - [Intro to Services](https://coreos.com/kubernetes/docs/latest/services.html)
34 | - [Intro to Replication Controllers](https://coreos.com/kubernetes/docs/latest/replication-controller.html)
35 |
36 | ## Deploying on Container Linux
37 |
38 | - [Step-by-Step for Any Platform](Documentation/getting-started.md)
39 | - [Single-Node Vagrant Stack](single-node/README.md)
40 | - [Multi-Node Vagrant Cluster](multi-node/vagrant/README.md)
41 | - [Multi-Node Bare Metal Cluster](Documentation/kubernetes-on-baremetal.md)
42 |
43 | ## Running Kubernetes Conformance Tests
44 |
45 | - [Conformance Tests](Documentation/conformance-tests.md)
46 |
--------------------------------------------------------------------------------
/contrib/bump-version.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # This script will go through each of the tracked files in this repo and update
4 | # the CURRENT_VERSION to the TARGET_VERSION. This is meant as a helper - but
5 | # probably should still double-check the changes are correct
6 |
7 | if [ $# -ne 1 ] || [ `expr $1 : ".*_.*"` == 0 ]; then
8 | echo "USAGE: $0 "
9 | echo " example: $0 'v1.5.4_coreos.0'"
10 | exit 1
11 | fi
12 |
13 | CURRENT_VERSION=${CURRENT_VERSION:-"v1.5.4_coreos.0"}
14 | TARGET_VERSION=${1}
15 |
16 | CURRENT_VERSION_BASE=${CURRENT_VERSION%%_*}
17 | TARGET_VERSION_BASE=${TARGET_VERSION%%_*}
18 |
19 | CURRENT_VERSION_SEMVER=${CURRENT_VERSION/_/+}
20 | TARGET_VERSION_SEMVER=${TARGET_VERSION/_/+}
21 |
22 | GIT_ROOT=$(git rev-parse --show-toplevel)
23 |
24 | cd $GIT_ROOT
25 | TRACKED=($(git grep -F "${CURRENT_VERSION_BASE}"| awk -F : '{print $1}' | sort -u))
26 | for i in "${TRACKED[@]}"; do
27 | echo Updating $i
28 | if [ "$(uname -s)" == "Darwin" ]; then
29 | sed -i "" "s/${CURRENT_VERSION}/${TARGET_VERSION}/g" $i
30 | sed -i "" "s/${CURRENT_VERSION_SEMVER}/${TARGET_VERSION_SEMVER}/g" $i
31 | sed -i "" "s/${CURRENT_VERSION_BASE}/${TARGET_VERSION_BASE}/g" $i
32 | else
33 | sed -i "s/${CURRENT_VERSION}/${TARGET_VERSION}/g" $i
34 | sed -i "s/${CURRENT_VERSION_SEMVER}/${TARGET_VERSION_SEMVER}/g" $i
35 | sed -i "s/${CURRENT_VERSION_BASE}/${TARGET_VERSION_BASE}/g" $i
36 | fi
37 | done
38 |
--------------------------------------------------------------------------------
/contrib/conformance-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 |
4 | CHECK_NODE_COUNT=${CHECK_NODE_COUNT:-true}
5 | CONFORMANCE_REPO=${CONFORMANCE_REPO:-github.com/coreos/kubernetes}
6 | CONFORMANCE_VERSION=${CONFORMANCE_VERSION:-v1.5.4+coreos.0}
7 | SSH_OPTS=${SSH_OPTS:-}
8 |
9 | usage() {
10 | echo "USAGE:"
11 | echo " $0 "
12 | echo
13 | exit 1
14 | }
15 |
16 | if [ $# -ne 3 ]; then
17 | usage
18 | exit 1
19 | fi
20 |
21 | ssh_host=$1
22 | ssh_port=$2
23 | ssh_key=$3
24 |
25 | kubeconfig=$(cat < /home/core/kubeconfig"
40 |
41 | # Init steps necessary to run conformance in docker://golang:1.7.4 container
42 | INIT="apt-get update && apt-get install -y rsync && go get -u github.com/jteeuwen/go-bindata/go-bindata"
43 |
44 | TEST_FLAGS="-v --test -check_version_skew=false --test_args=\"ginkgo.focus='\[Conformance\]'\""
45 |
46 | CONFORMANCE=$(echo \
47 | "cd /go/src/k8s.io/kubernetes && " \
48 | "git checkout ${CONFORMANCE_VERSION} && " \
49 | "make all WHAT=cmd/kubectl && " \
50 | "make all WHAT=vendor/github.com/onsi/ginkgo/ginkgo && " \
51 | "make all WHAT=test/e2e/e2e.test && " \
52 | "KUBECONFIG=/kubeconfig KUBERNETES_PROVIDER=skeleton KUBERNETES_CONFORMANCE_TEST=Y go run hack/e2e.go ${TEST_FLAGS}")
53 |
54 | RKT_OPTS=$(echo \
55 | "--volume=kc,kind=host,source=/home/core/kubeconfig "\
56 | "--volume=k8s,kind=host,source=${K8S_SRC} " \
57 | "--mount volume=kc,target=/kubeconfig " \
58 | "--mount volume=k8s,target=/go/src/k8s.io/kubernetes")
59 |
60 | CMD="sudo rkt run --net=host --insecure-options=image ${RKT_OPTS} docker://golang:1.7.4 --exec /bin/bash -- -c \"${INIT} && ${CONFORMANCE}\""
61 |
62 | ssh ${SSH_OPTS} -i ${ssh_key} -p ${ssh_port} core@${ssh_host} "${CMD}"
63 |
--------------------------------------------------------------------------------
/lib/init-ssl:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | # define location of openssl binary manually since running this
5 | # script under Vagrant fails on some systems without it
6 | OPENSSL=$(which openssl)
7 |
8 | function usage {
9 | echo "USAGE: $0 [SAN,SAN,SAN]"
10 | echo " example: $0 ./ssl/ worker kube-worker IP.1=127.0.0.1,IP.2=10.0.0.1"
11 | }
12 |
13 | if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
14 | usage
15 | exit 1
16 | fi
17 |
18 | OUTDIR="$1"
19 | CERTBASE="$2"
20 | CN="$3"
21 | SANS="$4"
22 |
23 | if [ ! -d $OUTDIR ]; then
24 | echo "ERROR: output directory does not exist: $OUTDIR"
25 | exit 1
26 | fi
27 |
28 | OUTFILE="$OUTDIR/$CN.tar"
29 |
30 | if [ -f "$OUTFILE" ];then
31 | exit 0
32 | fi
33 |
34 | CNF_TEMPLATE="
35 | [req]
36 | req_extensions = v3_req
37 | distinguished_name = req_distinguished_name
38 |
39 | [req_distinguished_name]
40 |
41 | [ v3_req ]
42 | basicConstraints = CA:FALSE
43 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment
44 | subjectAltName = @alt_names
45 |
46 | [alt_names]
47 | DNS.1 = kubernetes
48 | DNS.2 = kubernetes.default
49 | DNS.3 = kubernetes.default.svc
50 | DNS.4 = kubernetes.default.svc.cluster.local
51 | "
52 | echo "Generating SSL artifacts in $OUTDIR"
53 |
54 |
55 | CONFIGFILE="$OUTDIR/$CERTBASE-req.cnf"
56 | CAFILE="$OUTDIR/ca.pem"
57 | CAKEYFILE="$OUTDIR/ca-key.pem"
58 | KEYFILE="$OUTDIR/$CERTBASE-key.pem"
59 | CSRFILE="$OUTDIR/$CERTBASE.csr"
60 | PEMFILE="$OUTDIR/$CERTBASE.pem"
61 |
62 | CONTENTS="${CAFILE} ${KEYFILE} ${PEMFILE}"
63 |
64 |
65 | # Add SANs to openssl config
66 | echo "$CNF_TEMPLATE$(echo $SANS | tr ',' '\n')" > "$CONFIGFILE"
67 |
68 | $OPENSSL genrsa -out "$KEYFILE" 2048
69 | $OPENSSL req -new -key "$KEYFILE" -out "$CSRFILE" -subj "/CN=$CN" -config "$CONFIGFILE"
70 | $OPENSSL x509 -req -in "$CSRFILE" -CA "$CAFILE" -CAkey "$CAKEYFILE" -CAcreateserial -out "$PEMFILE" -days 365 -extensions v3_req -extfile "$CONFIGFILE"
71 |
72 | tar -cf $OUTFILE -C $OUTDIR $(for f in $CONTENTS;do printf "$(basename $f) ";done)
73 |
74 | echo "Bundled SSL artifacts into $OUTFILE"
75 | echo "$CONTENTS"
76 |
--------------------------------------------------------------------------------
/lib/init-ssl-ca:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e
3 |
4 | # define location of openssl binary manually since running this
5 | # script under Vagrant fails on some systems without it
6 | OPENSSL=$(which openssl)
7 |
8 | function usage {
9 | echo "USAGE: $0 "
10 | echo " example: $0 ./ssl/ca.pem"
11 | }
12 |
13 | if [ -z "$1" ]; then
14 | usage
15 | exit 1
16 | fi
17 |
18 | OUTDIR="$1"
19 |
20 | if [ ! -d $OUTDIR ]; then
21 | echo "ERROR: output directory does not exist: $OUTDIR"
22 | exit 1
23 | fi
24 |
25 | OUTFILE="$OUTDIR/ca.pem"
26 |
27 | if [ -f "$OUTFILE" ];then
28 | exit 0
29 | fi
30 |
31 | # establish cluster CA and self-sign a cert
32 | openssl genrsa -out "$OUTDIR/ca-key.pem" 2048
33 | openssl req -x509 -new -nodes -key "$OUTDIR/ca-key.pem" -days 10000 -out "$OUTFILE" -subj "/CN=kube-ca"
34 |
35 |
36 |
--------------------------------------------------------------------------------
/multi-node/aws/README.md:
--------------------------------------------------------------------------------
1 | # kube-aws has moved!
2 |
3 | https://github.com/coreos/kube-aws
4 |
--------------------------------------------------------------------------------
/multi-node/generic/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes on CoreOS with Generic Install Scripts
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
9 | This guide will setup Kubernetes on CoreOS in a similar way to other tools in the repo. The main goal of these scripts is to be generic and work on many different cloud providers or platforms. The notable difference is that these scripts are intended to be platform agnostic and thus don't automatically setup the TLS assets on each host beforehand.
10 |
11 | [Read the documentation to boot a cluster][docs]
12 |
13 | [docs]: ../../Documentation/kubernetes-on-generic-platforms.md
14 |
--------------------------------------------------------------------------------
/multi-node/generic/worker-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # List of etcd servers (http://ip:port), comma separated
5 | export ETCD_ENDPOINTS=
6 |
7 | # The endpoint the worker node should use to contact controller nodes (https://ip:port)
8 | # In HA configurations this should be an external DNS record or loadbalancer in front of the control nodes.
9 | # However, it is also possible to point directly to a single control node.
10 | export CONTROLLER_ENDPOINT=
11 |
12 | # Specify the version (vX.Y.Z) of Kubernetes assets to deploy
13 | export K8S_VER=v1.5.4_coreos.0
14 |
15 | # Hyperkube image repository to use.
16 | export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube
17 |
18 | # The CIDR network to use for pod IPs.
19 | # Each pod launched in the cluster will be assigned an IP out of this range.
20 | # Each node will be configured such that these IPs will be routable using the flannel overlay network.
21 | export POD_NETWORK=10.2.0.0/16
22 |
23 | # The IP address of the cluster DNS service.
24 | # This must be the same DNS_SERVICE_IP used when configuring the controller nodes.
25 | export DNS_SERVICE_IP=10.3.0.10
26 |
27 | # Whether to use Calico for Kubernetes network policy.
28 | export USE_CALICO=false
29 |
30 | # Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'.
31 | export CONTAINER_RUNTIME=docker
32 |
33 | # The above settings can optionally be overridden using an environment file:
34 | ENV_FILE=/run/coreos-kubernetes/options.env
35 |
36 | # To run a self hosted Calico install it needs to be able to write to the CNI dir
37 | if [ "${USE_CALICO}" = "true" ]; then
38 | export CALICO_OPTS="--volume cni-bin,kind=host,source=/opt/cni/bin \
39 | --mount volume=cni-bin,target=/opt/cni/bin"
40 | else
41 | export CALICO_OPTS=""
42 | fi
43 |
44 | # -------------
45 |
46 | function init_config {
47 | local REQUIRED=( 'ADVERTISE_IP' 'ETCD_ENDPOINTS' 'CONTROLLER_ENDPOINT' 'DNS_SERVICE_IP' 'K8S_VER' 'HYPERKUBE_IMAGE_REPO' 'USE_CALICO' )
48 |
49 | if [ -f $ENV_FILE ]; then
50 | export $(cat $ENV_FILE | xargs)
51 | fi
52 |
53 | if [ -z $ADVERTISE_IP ]; then
54 | export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment)
55 | fi
56 |
57 | for REQ in "${REQUIRED[@]}"; do
58 | if [ -z "$(eval echo \$$REQ)" ]; then
59 | echo "Missing required config value: ${REQ}"
60 | exit 1
61 | fi
62 | done
63 | }
64 |
65 | function init_templates {
66 | local TEMPLATE=/etc/systemd/system/kubelet.service
67 | local uuid_file="/var/run/kubelet-pod.uuid"
68 | if [ ! -f $TEMPLATE ]; then
69 | echo "TEMPLATE: $TEMPLATE"
70 | mkdir -p $(dirname $TEMPLATE)
71 | cat << EOF > $TEMPLATE
72 | [Service]
73 | Environment=KUBELET_IMAGE_TAG=${K8S_VER}
74 | Environment=KUBELET_IMAGE_URL=${HYPERKUBE_IMAGE_REPO}
75 | Environment="RKT_RUN_ARGS=--uuid-file-save=${uuid_file} \
76 | --volume dns,kind=host,source=/etc/resolv.conf \
77 | --mount volume=dns,target=/etc/resolv.conf \
78 | --volume rkt,kind=host,source=/opt/bin/host-rkt \
79 | --mount volume=rkt,target=/usr/bin/rkt \
80 | --volume var-lib-rkt,kind=host,source=/var/lib/rkt \
81 | --mount volume=var-lib-rkt,target=/var/lib/rkt \
82 | --volume stage,kind=host,source=/tmp \
83 | --mount volume=stage,target=/tmp \
84 | --volume var-log,kind=host,source=/var/log \
85 | --mount volume=var-log,target=/var/log \
86 | ${CALICO_OPTS}"
87 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
88 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers
89 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file}
90 | ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin
91 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
92 | --api-servers=${CONTROLLER_ENDPOINT} \
93 | --cni-conf-dir=/etc/kubernetes/cni/net.d \
94 | --network-plugin=cni \
95 | --container-runtime=${CONTAINER_RUNTIME} \
96 | --rkt-path=/usr/bin/rkt \
97 | --rkt-stage1-image=coreos.com/rkt/stage1-coreos \
98 | --register-node=true \
99 | --allow-privileged=true \
100 | --pod-manifest-path=/etc/kubernetes/manifests \
101 | --hostname-override=${ADVERTISE_IP} \
102 | --cluster_dns=${DNS_SERVICE_IP} \
103 | --cluster_domain=cluster.local \
104 | --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \
105 | --tls-cert-file=/etc/kubernetes/ssl/worker.pem \
106 | --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem
107 | ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file}
108 | Restart=always
109 | RestartSec=10
110 |
111 | [Install]
112 | WantedBy=multi-user.target
113 | EOF
114 | fi
115 |
116 | local TEMPLATE=/opt/bin/host-rkt
117 | if [ ! -f $TEMPLATE ]; then
118 | echo "TEMPLATE: $TEMPLATE"
119 | mkdir -p $(dirname $TEMPLATE)
120 | cat << EOF > $TEMPLATE
121 | #!/bin/sh
122 | # This is bind mounted into the kubelet rootfs and all rkt shell-outs go
123 | # through this rkt wrapper. It essentially enters the host mount namespace
124 | # (which it is already in) only for the purpose of breaking out of the chroot
125 | # before calling rkt. It makes things like rkt gc work and avoids bind mounting
126 | # in certain rkt filesystem dependancies into the kubelet rootfs. This can
127 | # eventually be obviated when the write-api stuff gets upstream and rkt gc is
128 | # through the api-server. Related issue:
129 | # https://github.com/coreos/rkt/issues/2878
130 | exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@"
131 | EOF
132 | fi
133 |
134 | local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service
135 | if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
136 | echo "TEMPLATE: $TEMPLATE"
137 | mkdir -p $(dirname $TEMPLATE)
138 | cat << EOF > $TEMPLATE
139 | [Unit]
140 | Description=Load rkt stage1 images
141 | Documentation=http://github.com/coreos/rkt
142 | Requires=network-online.target
143 | After=network-online.target
144 | Before=rkt-api.service
145 |
146 | [Service]
147 | Type=oneshot
148 | RemainAfterExit=yes
149 | ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image
150 |
151 | [Install]
152 | RequiredBy=rkt-api.service
153 | EOF
154 | fi
155 |
156 | local TEMPLATE=/etc/systemd/system/rkt-api.service
157 | if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
158 | echo "TEMPLATE: $TEMPLATE"
159 | mkdir -p $(dirname $TEMPLATE)
160 | cat << EOF > $TEMPLATE
161 | [Unit]
162 | Before=kubelet.service
163 |
164 | [Service]
165 | ExecStart=/usr/bin/rkt api-service
166 | Restart=always
167 | RestartSec=10
168 |
169 | [Install]
170 | RequiredBy=kubelet.service
171 | EOF
172 | fi
173 |
174 | local TEMPLATE=/etc/kubernetes/worker-kubeconfig.yaml
175 | if [ ! -f $TEMPLATE ]; then
176 | echo "TEMPLATE: $TEMPLATE"
177 | mkdir -p $(dirname $TEMPLATE)
178 | cat << EOF > $TEMPLATE
179 | apiVersion: v1
180 | kind: Config
181 | clusters:
182 | - name: local
183 | cluster:
184 | certificate-authority: /etc/kubernetes/ssl/ca.pem
185 | users:
186 | - name: kubelet
187 | user:
188 | client-certificate: /etc/kubernetes/ssl/worker.pem
189 | client-key: /etc/kubernetes/ssl/worker-key.pem
190 | contexts:
191 | - context:
192 | cluster: local
193 | user: kubelet
194 | name: kubelet-context
195 | current-context: kubelet-context
196 | EOF
197 | fi
198 |
199 | local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml
200 | if [ ! -f $TEMPLATE ]; then
201 | echo "TEMPLATE: $TEMPLATE"
202 | mkdir -p $(dirname $TEMPLATE)
203 | cat << EOF > $TEMPLATE
204 | apiVersion: v1
205 | kind: Pod
206 | metadata:
207 | name: kube-proxy
208 | namespace: kube-system
209 | annotations:
210 | rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly
211 | spec:
212 | hostNetwork: true
213 | containers:
214 | - name: kube-proxy
215 | image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER
216 | command:
217 | - /hyperkube
218 | - proxy
219 | - --master=${CONTROLLER_ENDPOINT}
220 | - --cluster-cidr=${POD_NETWORK}
221 | - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml
222 | securityContext:
223 | privileged: true
224 | volumeMounts:
225 | - mountPath: /etc/ssl/certs
226 | name: "ssl-certs"
227 | - mountPath: /etc/kubernetes/worker-kubeconfig.yaml
228 | name: "kubeconfig"
229 | readOnly: true
230 | - mountPath: /etc/kubernetes/ssl
231 | name: "etc-kube-ssl"
232 | readOnly: true
233 | - mountPath: /var/run/dbus
234 | name: dbus
235 | readOnly: false
236 | volumes:
237 | - name: "ssl-certs"
238 | hostPath:
239 | path: "/usr/share/ca-certificates"
240 | - name: "kubeconfig"
241 | hostPath:
242 | path: "/etc/kubernetes/worker-kubeconfig.yaml"
243 | - name: "etc-kube-ssl"
244 | hostPath:
245 | path: "/etc/kubernetes/ssl"
246 | - hostPath:
247 | path: /var/run/dbus
248 | name: dbus
249 | EOF
250 | fi
251 |
252 | local TEMPLATE=/etc/flannel/options.env
253 | if [ ! -f $TEMPLATE ]; then
254 | echo "TEMPLATE: $TEMPLATE"
255 | mkdir -p $(dirname $TEMPLATE)
256 | cat << EOF > $TEMPLATE
257 | FLANNELD_IFACE=$ADVERTISE_IP
258 | FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS
259 | EOF
260 | fi
261 |
262 | local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf
263 | if [ ! -f $TEMPLATE ]; then
264 | echo "TEMPLATE: $TEMPLATE"
265 | mkdir -p $(dirname $TEMPLATE)
266 | cat << EOF > $TEMPLATE
267 | [Service]
268 | ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
269 | EOF
270 | fi
271 |
272 | local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf
273 | if [ ! -f $TEMPLATE ]; then
274 | echo "TEMPLATE: $TEMPLATE"
275 | mkdir -p $(dirname $TEMPLATE)
276 | cat << EOF > $TEMPLATE
277 | [Unit]
278 | Requires=flanneld.service
279 | After=flanneld.service
280 | [Service]
281 | EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
282 | EOF
283 | fi
284 |
285 | local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env
286 | if [ ! -f $TEMPLATE ]; then
287 | echo "TEMPLATE: $TEMPLATE"
288 | mkdir -p $(dirname $TEMPLATE)
289 | cat << EOF > $TEMPLATE
290 | DOCKER_OPT_BIP=""
291 | DOCKER_OPT_IPMASQ=""
292 | EOF
293 |
294 | fi
295 |
296 | local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf
297 | if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then
298 | echo "TEMPLATE: $TEMPLATE"
299 | mkdir -p $(dirname $TEMPLATE)
300 | cat << EOF > $TEMPLATE
301 | {
302 | "name": "podnet",
303 | "type": "flannel",
304 | "delegate": {
305 | "isDefaultGateway": true
306 | }
307 | }
308 | EOF
309 | fi
310 |
311 | }
312 |
313 | init_config
314 | init_templates
315 |
316 | chmod +x /opt/bin/host-rkt
317 |
318 | systemctl stop update-engine; systemctl mask update-engine
319 |
320 | systemctl daemon-reload
321 |
322 | if [ $CONTAINER_RUNTIME = "rkt" ]; then
323 | systemctl enable load-rkt-stage1
324 | systemctl enable rkt-api
325 | fi
326 |
327 | systemctl enable flanneld; systemctl start flanneld
328 |
329 |
330 | systemctl enable kubelet; systemctl start kubelet
331 |
--------------------------------------------------------------------------------
/multi-node/vagrant/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | config.rb
3 | ssl/
4 |
--------------------------------------------------------------------------------
/multi-node/vagrant/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Cluster with Vagrant on CoreOS
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
--------------------------------------------------------------------------------
/multi-node/vagrant/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # # vi: set ft=ruby :
3 |
4 | require 'fileutils'
5 | require 'open-uri'
6 | require 'tempfile'
7 | require 'yaml'
8 |
9 | Vagrant.require_version ">= 1.6.0"
10 |
11 | $update_channel = "alpha"
12 | $controller_count = 1
13 | $controller_vm_memory = 1024
14 | $worker_count = 1
15 | $worker_vm_memory = 1024
16 | $etcd_count = 1
17 | $etcd_vm_memory = 512
18 |
19 | CONFIG = File.expand_path("config.rb")
20 | if File.exist?(CONFIG)
21 | require CONFIG
22 | end
23 |
24 | if $worker_vm_memory < 1024
25 | puts "Workers should have at least 1024 MB of memory"
26 | end
27 |
28 | CONTROLLER_CLUSTER_IP="10.3.0.1"
29 |
30 | ETCD_CLOUD_CONFIG_PATH = File.expand_path("etcd-cloud-config.yaml")
31 |
32 | CONTROLLER_CLOUD_CONFIG_PATH = File.expand_path("../generic/controller-install.sh")
33 | WORKER_CLOUD_CONFIG_PATH = File.expand_path("../generic/worker-install.sh")
34 |
35 | def etcdIP(num)
36 | return "172.17.4.#{num+50}"
37 | end
38 |
39 | def controllerIP(num)
40 | return "172.17.4.#{num+100}"
41 | end
42 |
43 | def workerIP(num)
44 | return "172.17.4.#{num+200}"
45 | end
46 |
47 | controllerIPs = [*1..$controller_count].map{ |i| controllerIP(i) } << CONTROLLER_CLUSTER_IP
48 | etcdIPs = [*1..$etcd_count].map{ |i| etcdIP(i) }
49 | initial_etcd_cluster = etcdIPs.map.with_index{ |ip, i| "e#{i+1}=http://#{ip}:2380" }.join(",")
50 | etcd_endpoints = etcdIPs.map.with_index{ |ip, i| "http://#{ip}:2379" }.join(",")
51 |
52 | # Generate root CA
53 | system("mkdir -p ssl && ./../../lib/init-ssl-ca ssl") or abort ("failed generating SSL artifacts")
54 |
55 | # Generate admin key/cert
56 | system("./../../lib/init-ssl ssl admin kube-admin") or abort("failed generating admin SSL artifacts")
57 |
58 | def provisionMachineSSL(machine,certBaseName,cn,ipAddrs)
59 | tarFile = "ssl/#{cn}.tar"
60 | ipString = ipAddrs.map.with_index { |ip, i| "IP.#{i+1}=#{ip}"}.join(",")
61 | system("./../../lib/init-ssl ssl #{certBaseName} #{cn} #{ipString}") or abort("failed generating #{cn} SSL artifacts")
62 | machine.vm.provision :file, :source => tarFile, :destination => "/tmp/ssl.tar"
63 | machine.vm.provision :shell, :inline => "mkdir -p /etc/kubernetes/ssl && tar -C /etc/kubernetes/ssl -xf /tmp/ssl.tar", :privileged => true
64 | end
65 |
66 | Vagrant.configure("2") do |config|
67 | # always use Vagrant's insecure key
68 | config.ssh.insert_key = false
69 |
70 | config.vm.box = "coreos-%s" % $update_channel
71 | config.vm.box_version = ">= 1151.0.0"
72 | config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % $update_channel
73 |
74 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
75 | config.vm.provider vmware do |v, override|
76 | override.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant_vmware_fusion.json" % $update_channel
77 | end
78 | end
79 |
80 | config.vm.provider :virtualbox do |v|
81 | # On VirtualBox, we don't have guest additions or a functional vboxsf
82 | # in CoreOS, so tell Vagrant that so it can be smarter.
83 | v.check_guest_additions = false
84 | v.functional_vboxsf = false
85 | end
86 |
87 | # plugin conflict
88 | if Vagrant.has_plugin?("vagrant-vbguest") then
89 | config.vbguest.auto_update = false
90 | end
91 |
92 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
93 | config.vm.provider vmware do |v|
94 | v.vmx['numvcpus'] = 1
95 | v.gui = false
96 | end
97 | end
98 |
99 | config.vm.provider :virtualbox do |vb|
100 | vb.cpus = 1
101 | vb.gui = false
102 | end
103 |
104 | (1..$etcd_count).each do |i|
105 | config.vm.define vm_name = "e%d" % i do |etcd|
106 |
107 | data = YAML.load(IO.readlines(ETCD_CLOUD_CONFIG_PATH)[1..-1].join)
108 | data['coreos']['etcd2']['initial-cluster'] = initial_etcd_cluster
109 | data['coreos']['etcd2']['name'] = vm_name
110 | etcd_config_file = Tempfile.new('etcd_config', :binmode => true)
111 | etcd_config_file.write("#cloud-config\n#{data.to_yaml}")
112 | etcd_config_file.close
113 |
114 | etcd.vm.hostname = vm_name
115 |
116 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
117 | etcd.vm.provider vmware do |v|
118 | v.vmx['memsize'] = $etcd_vm_memory
119 | end
120 | end
121 |
122 | etcd.vm.provider :virtualbox do |vb|
123 | vb.memory = $etcd_vm_memory
124 | end
125 |
126 | etcd.vm.network :private_network, ip: etcdIP(i)
127 |
128 | etcd.vm.provision :file, :source => etcd_config_file.path, :destination => "/tmp/vagrantfile-user-data"
129 | etcd.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true
130 | end
131 | end
132 |
133 |
134 | (1..$controller_count).each do |i|
135 | config.vm.define vm_name = "c%d" % i do |controller|
136 |
137 | env_file = Tempfile.new('env_file', :binmode => true)
138 | env_file.write("ETCD_ENDPOINTS=#{etcd_endpoints}\n")
139 | env_file.close
140 |
141 | controller.vm.hostname = vm_name
142 |
143 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
144 | controller.vm.provider vmware do |v|
145 | v.vmx['memsize'] = $controller_vm_memory
146 | end
147 | end
148 |
149 | controller.vm.provider :virtualbox do |vb|
150 | vb.memory = $controller_vm_memory
151 | end
152 |
153 | controllerIP = controllerIP(i)
154 | controller.vm.network :private_network, ip: controllerIP
155 |
156 | # Each controller gets the same cert
157 | provisionMachineSSL(controller,"apiserver","kube-apiserver-#{controllerIP}",controllerIPs)
158 |
159 | controller.vm.provision :file, :source => env_file, :destination => "/tmp/coreos-kube-options.env"
160 | controller.vm.provision :shell, :inline => "mkdir -p /run/coreos-kubernetes && mv /tmp/coreos-kube-options.env /run/coreos-kubernetes/options.env", :privileged => true
161 |
162 | controller.vm.provision :file, :source => CONTROLLER_CLOUD_CONFIG_PATH, :destination => "/tmp/vagrantfile-user-data"
163 | controller.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true
164 | end
165 | end
166 |
167 | (1..$worker_count).each do |i|
168 | config.vm.define vm_name = "w%d" % i do |worker|
169 | worker.vm.hostname = vm_name
170 |
171 | env_file = Tempfile.new('env_file', :binmode => true)
172 | env_file.write("ETCD_ENDPOINTS=#{etcd_endpoints}\n")
173 | env_file.write("CONTROLLER_ENDPOINT=https://#{controllerIPs[0]}\n") #TODO(aaron): LB or DNS across control nodes
174 | env_file.close
175 |
176 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
177 | worker.vm.provider vmware do |v|
178 | v.vmx['memsize'] = $worker_vm_memory
179 | end
180 | end
181 |
182 | worker.vm.provider :virtualbox do |vb|
183 | vb.memory = $worker_vm_memory
184 | end
185 |
186 | workerIP = workerIP(i)
187 | worker.vm.network :private_network, ip: workerIP
188 |
189 | provisionMachineSSL(worker,"worker","kube-worker-#{workerIP}",[workerIP])
190 |
191 | worker.vm.provision :file, :source => env_file, :destination => "/tmp/coreos-kube-options.env"
192 | worker.vm.provision :shell, :inline => "mkdir -p /run/coreos-kubernetes && mv /tmp/coreos-kube-options.env /run/coreos-kubernetes/options.env", :privileged => true
193 |
194 | worker.vm.provision :file, :source => WORKER_CLOUD_CONFIG_PATH, :destination => "/tmp/vagrantfile-user-data"
195 | worker.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true
196 | end
197 | end
198 |
199 | end
200 |
--------------------------------------------------------------------------------
/multi-node/vagrant/config.rb.sample:
--------------------------------------------------------------------------------
1 | #$update_channel="alpha"
2 |
3 | #$controller_count=1
4 | #$controller_vm_memory=512
5 |
6 | #$worker_count=1
7 | #$worker_vm_memory=1024
8 |
9 | #$etcd_count=1
10 | #$etcd_vm_memory=512
11 |
--------------------------------------------------------------------------------
/multi-node/vagrant/conformance-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 |
4 | ssh_key="$(vagrant ssh-config c1 | awk '/IdentityFile/ {print $2}' | tr -d '"')"
5 | ssh_port="$(vagrant ssh-config c1 | awk '/Port [0-9]+/ {print $2}')"
6 |
7 | SSH_OPTS='-q -o stricthostkeychecking=no' ../../contrib/conformance-test.sh "127.0.0.1" "${ssh_port}" "${ssh_key}"
8 |
--------------------------------------------------------------------------------
/multi-node/vagrant/etcd-cloud-config.yaml:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | coreos:
4 | update:
5 | reboot-strategy: "off"
6 |
7 | etcd2:
8 | name: {{ETCD_NODE_NAME}}
9 | initial-cluster: {{ETCD_INITIAL_CLUSTER}}
10 | advertise-client-urls: http://$private_ipv4:2379
11 | listen-client-urls: http://0.0.0.0:2379
12 | initial-advertise-peer-urls: http://$private_ipv4:2380
13 | listen-peer-urls: http://$private_ipv4:2380
14 |
15 | units:
16 |
17 | - name: etcd2.service
18 | command: start
19 |
20 |
--------------------------------------------------------------------------------
/multi-node/vagrant/kubeconfig:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Config
3 | clusters:
4 | - cluster:
5 | certificate-authority: ssl/ca.pem
6 | server: https://172.17.4.101:443
7 | name: vagrant-multi-cluster
8 | contexts:
9 | - context:
10 | cluster: vagrant-multi-cluster
11 | namespace: default
12 | user: vagrant-multi-admin
13 | name: vagrant-multi
14 | users:
15 | - name: vagrant-multi-admin
16 | user:
17 | client-certificate: ssl/admin.pem
18 | client-key: ssl/admin-key.pem
19 | current-context: vagrant-multi
20 |
--------------------------------------------------------------------------------
/multi-node/vagrant/kubectl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coreos/coreos-kubernetes/f01c2f71d494ddd0753ae6a733cc00e61af08de5/multi-node/vagrant/kubectl
--------------------------------------------------------------------------------
/single-node/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | ssl/
3 |
--------------------------------------------------------------------------------
/single-node/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Cluster with Vagrant on CoreOS
2 |
3 |
4 |
This repo is not in alignment with current versions of Kubernetes, and will not be active in the future. The CoreOS Kubernetes documentation has been moved to the tectonic-docs repo, where it will be published and updated.
5 |
6 |
For tested, maintained, and production-ready Kubernetes instructions, see our Tectonic Installer documentation. The Tectonic Installer provides a Terraform-based Kubernetes installation. It is open source, uses upstream Kubernetes and can be easily customized.
7 |
8 |
--------------------------------------------------------------------------------
/single-node/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # # vi: set ft=ruby :
3 |
4 | require 'fileutils'
5 | require 'open-uri'
6 | require 'tempfile'
7 | require 'yaml'
8 |
9 | Vagrant.require_version ">= 1.6.0"
10 |
11 | $update_channel = "alpha"
12 |
13 | CLUSTER_IP="10.3.0.1"
14 | NODE_IP = "172.17.4.99"
15 | NODE_VCPUS = 1
16 | NODE_MEMORY_SIZE = 2048
17 | USER_DATA_PATH = File.expand_path("user-data")
18 | SSL_TARBALL_PATH = File.expand_path("ssl/controller.tar")
19 |
20 | system("mkdir -p ssl && ./../lib/init-ssl-ca ssl") or abort ("failed generating SSL CA artifacts")
21 | system("./../lib/init-ssl ssl apiserver controller IP.1=#{NODE_IP},IP.2=#{CLUSTER_IP}") or abort ("failed generating SSL certificate artifacts")
22 | system("./../lib/init-ssl ssl admin kube-admin") or abort("failed generating admin SSL artifacts")
23 |
24 | Vagrant.configure("2") do |config|
25 | # always use Vagrant's insecure key
26 | config.ssh.insert_key = false
27 |
28 | config.vm.box = "coreos-%s" % $update_channel
29 | config.vm.box_version = ">= 1151.0.0"
30 | config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % $update_channel
31 |
32 | ["vmware_fusion", "vmware_workstation"].each do |vmware|
33 | config.vm.provider vmware do |v, override|
34 | v.vmx['numvcpus'] = NODE_VCPUS
35 | v.vmx['memsize'] = NODE_MEMORY_SIZE
36 | v.gui = false
37 |
38 | override.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant_vmware_fusion.json" % $update_channel
39 | end
40 | end
41 |
42 | config.vm.provider :virtualbox do |v|
43 | v.cpus = NODE_VCPUS
44 | v.gui = false
45 | v.memory = NODE_MEMORY_SIZE
46 |
47 | # On VirtualBox, we don't have guest additions or a functional vboxsf
48 | # in CoreOS, so tell Vagrant that so it can be smarter.
49 | v.check_guest_additions = false
50 | v.functional_vboxsf = false
51 | end
52 |
53 | # plugin conflict
54 | if Vagrant.has_plugin?("vagrant-vbguest") then
55 | config.vbguest.auto_update = false
56 | end
57 |
58 | config.vm.network :private_network, ip: NODE_IP
59 |
60 | config.vm.provision :file, :source => SSL_TARBALL_PATH, :destination => "/tmp/ssl.tar"
61 | config.vm.provision :shell, :inline => "mkdir -p /etc/kubernetes/ssl && tar -C /etc/kubernetes/ssl -xf /tmp/ssl.tar", :privileged => true
62 |
63 | config.vm.provision :file, :source => USER_DATA_PATH, :destination => "/tmp/vagrantfile-user-data"
64 | config.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true
65 |
66 | end
67 |
--------------------------------------------------------------------------------
/single-node/conformance-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 |
4 | ssh_key="$(vagrant ssh-config | awk '/IdentityFile/ {print $2}' | tr -d '"')"
5 | ssh_port="$(vagrant ssh-config | awk '/Port [0-9]+/ {print $2}')"
6 |
7 | export CHECK_NODE_COUNT=false
8 | SSH_OPTS='-q -o stricthostkeychecking=no' ../contrib/conformance-test.sh "127.0.0.1" "${ssh_port}" "${ssh_key}"
9 |
--------------------------------------------------------------------------------
/single-node/kubeconfig:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Config
3 | clusters:
4 | - cluster:
5 | certificate-authority: ssl/ca.pem
6 | server: https://172.17.4.99:443
7 | name: vagrant-single-cluster
8 | contexts:
9 | - context:
10 | cluster: vagrant-single-cluster
11 | namespace: default
12 | user: vagrant-single-admin
13 | name: vagrant-single
14 | users:
15 | - name: vagrant-single-admin
16 | user:
17 | client-certificate: ssl/admin.pem
18 | client-key: ssl/admin-key.pem
19 | current-context: vagrant-single
20 |
21 |
--------------------------------------------------------------------------------
/single-node/user-data:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # List of etcd servers (http://ip:port), comma separated
5 | export ETCD_ENDPOINTS="http://127.0.0.1:2379"
6 |
7 | # Specify the version (vX.Y.Z) of Kubernetes assets to deploy
8 | export K8S_VER=v1.5.4_coreos.0
9 |
10 | # Hyperkube image repository to use.
11 | export HYPERKUBE_IMAGE_REPO=quay.io/coreos/hyperkube
12 |
13 | # The CIDR network to use for pod IPs.
14 | # Each pod launched in the cluster will be assigned an IP out of this range.
15 | # Each node will be configured such that these IPs will be routable using the flannel overlay network.
16 | export POD_NETWORK=10.2.0.0/16
17 |
18 | # The CIDR network to use for service cluster IPs.
19 | # Each service will be assigned a cluster IP out of this range.
20 | # This must not overlap with any IP ranges assigned to the POD_NETWORK, or other existing network infrastructure.
21 | # Routing to these IPs is handled by a proxy service local to each node, and are not required to be routable between nodes.
22 | export SERVICE_IP_RANGE=10.3.0.0/24
23 |
24 | # The IP address of the Kubernetes API Service
25 | # If the SERVICE_IP_RANGE is changed above, this must be set to the first IP in that range.
26 | export K8S_SERVICE_IP=10.3.0.1
27 |
28 | # The IP address of the cluster DNS service.
29 | # This IP must be in the range of the SERVICE_IP_RANGE and cannot be the first IP in the range.
30 | # This same IP must be configured on all worker nodes to enable DNS service discovery.
31 | export DNS_SERVICE_IP=10.3.0.10
32 |
33 | # Whether to use Calico for Kubernetes network policy.
34 | export USE_CALICO=false
35 |
36 | # Determines the container runtime for kubernetes to use. Accepts 'docker' or 'rkt'.
37 | export CONTAINER_RUNTIME=docker
38 |
39 | # We need to overwrite this for a hosted Calico install
40 | if [ "${USE_CALICO}" = "true" ]; then
41 | export CALICO_OPTS="--volume cni-bin,kind=host,source=/opt/cni/bin \
42 | --mount volume=cni-bin,target=/opt/cni/bin"
43 | else
44 | export CALICO_OPTS=""
45 | fi
46 |
47 | # -------------
48 |
49 | function init_config {
50 | local REQUIRED=('ADVERTISE_IP' 'POD_NETWORK' 'ETCD_ENDPOINTS' 'SERVICE_IP_RANGE' 'K8S_SERVICE_IP' 'DNS_SERVICE_IP' 'K8S_VER' 'USE_CALICO')
51 |
52 | if [ -z $ADVERTISE_IP ]; then
53 | export ADVERTISE_IP=$(awk -F= '/COREOS_PUBLIC_IPV4/ {print $2}' /etc/environment)
54 | fi
55 |
56 | for REQ in "${REQUIRED[@]}"; do
57 | if [ -z "$(eval echo \$$REQ)" ]; then
58 | echo "Missing required config value: ${REQ}"
59 | exit 1
60 | fi
61 | done
62 | }
63 |
64 | function init_flannel {
65 | echo "Waiting for etcd..."
66 | while true
67 | do
68 | IFS=',' read -ra ES <<< "$ETCD_ENDPOINTS"
69 | for ETCD in "${ES[@]}"; do
70 | echo "Trying: $ETCD"
71 | if [ -n "$(curl --silent "$ETCD/v2/machines")" ]; then
72 | local ACTIVE_ETCD=$ETCD
73 | break
74 | fi
75 | sleep 1
76 | done
77 | if [ -n "$ACTIVE_ETCD" ]; then
78 | break
79 | fi
80 | done
81 | RES=$(curl --silent -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false")
82 | if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then
83 | echo "Unexpected error configuring flannel pod network: $RES"
84 | fi
85 | }
86 |
87 | function init_templates {
88 | local TEMPLATE=/etc/systemd/system/kubelet.service
89 | local uuid_file="/var/run/kubelet-pod.uuid"
90 | if [ ! -f $TEMPLATE ]; then
91 | echo "TEMPLATE: $TEMPLATE"
92 | mkdir -p $(dirname $TEMPLATE)
93 | cat << EOF > $TEMPLATE
94 | [Service]
95 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
96 | ExecStartPre=/usr/bin/mkdir -p /opt/cni/bin
97 | Environment=KUBELET_IMAGE_TAG=${K8S_VER}
98 | Environment=KUBELET_IMAGE_URL=${HYPERKUBE_IMAGE_REPO}
99 | Environment="RKT_RUN_ARGS=--uuid-file-save=${uuid_file} \
100 | --volume dns,kind=host,source=/etc/resolv.conf \
101 | --mount volume=dns,target=/etc/resolv.conf \
102 | --volume rkt,kind=host,source=/opt/bin/host-rkt \
103 | --mount volume=rkt,target=/usr/bin/rkt \
104 | --volume var-lib-rkt,kind=host,source=/var/lib/rkt \
105 | --mount volume=var-lib-rkt,target=/var/lib/rkt \
106 | --volume stage,kind=host,source=/tmp \
107 | --mount volume=stage,target=/tmp \
108 | --volume var-log,kind=host,source=/var/log \
109 | --mount volume=var-log,target=/var/log \
110 | ${CALICO_OPTS}"
111 | ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests
112 | ExecStartPre=/usr/bin/mkdir -p /var/log/containers
113 | ExecStartPre=-/usr/bin/rkt rm --uuid-file=${uuid_file}
114 | ExecStart=/usr/lib/coreos/kubelet-wrapper \
115 | --api-servers=http://127.0.0.1:8080 \
116 | --cni-conf-dir=/etc/kubernetes/cni/net.d \
117 | --network-plugin=cni \
118 | --container-runtime=${CONTAINER_RUNTIME} \
119 | --rkt-path=/usr/bin/rkt \
120 | --rkt-stage1-image=coreos.com/rkt/stage1-coreos \
121 | --register-node=true \
122 | --allow-privileged=true \
123 | --pod-manifest-path=/etc/kubernetes/manifests \
124 | --hostname-override=${ADVERTISE_IP} \
125 | --cluster_dns=${DNS_SERVICE_IP} \
126 | --cluster_domain=cluster.local
127 | ExecStop=-/usr/bin/rkt stop --uuid-file=${uuid_file}
128 | Restart=always
129 | RestartSec=10
130 | KillMode=process
131 |
132 | [Install]
133 | WantedBy=multi-user.target
134 | EOF
135 | fi
136 |
137 | local TEMPLATE=/opt/bin/host-rkt
138 | if [ ! -f $TEMPLATE ]; then
139 | echo "TEMPLATE: $TEMPLATE"
140 | mkdir -p $(dirname $TEMPLATE)
141 | cat << EOF > $TEMPLATE
142 | #!/bin/sh
143 | # This is bind mounted into the kubelet rootfs and all rkt shell-outs go
144 | # through this rkt wrapper. It essentially enters the host mount namespace
145 | # (which it is already in) only for the purpose of breaking out of the chroot
146 | # before calling rkt. It makes things like rkt gc work and avoids bind mounting
147 | # in certain rkt filesystem dependancies into the kubelet rootfs. This can
148 | # eventually be obviated when the write-api stuff gets upstream and rkt gc is
149 | # through the api-server. Related issue:
150 | # https://github.com/coreos/rkt/issues/2878
151 | exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "\$@"
152 | EOF
153 | fi
154 |
155 | local TEMPLATE=/etc/systemd/system/load-rkt-stage1.service
156 | if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
157 | echo "TEMPLATE: $TEMPLATE"
158 | mkdir -p $(dirname $TEMPLATE)
159 | cat << EOF > $TEMPLATE
160 | [Unit]
161 | Requires=network-online.target
162 | After=network-online.target
163 | Before=rkt-api.service
164 |
165 | [Service]
166 | Type=oneshot
167 | RemainAfterExit=yes
168 | ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image
169 |
170 | [Install]
171 | RequiredBy=rkt-api.service
172 | EOF
173 | fi
174 |
175 | local TEMPLATE=/etc/systemd/system/rkt-api.service
176 | if [ ${CONTAINER_RUNTIME} = "rkt" ] && [ ! -f $TEMPLATE ]; then
177 | echo "TEMPLATE: $TEMPLATE"
178 | mkdir -p $(dirname $TEMPLATE)
179 | cat << EOF > $TEMPLATE
180 | [Unit]
181 | Before=kubelet.service
182 |
183 | [Service]
184 | ExecStart=/usr/bin/rkt api-service
185 | Restart=always
186 | RestartSec=10
187 |
188 | [Install]
189 | RequiredBy=kubelet.service
190 | EOF
191 | fi
192 |
193 | local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml
194 | if [ ! -f $TEMPLATE ]; then
195 | echo "TEMPLATE: $TEMPLATE"
196 | mkdir -p $(dirname $TEMPLATE)
197 | cat << EOF > $TEMPLATE
198 | apiVersion: v1
199 | kind: Pod
200 | metadata:
201 | name: kube-proxy
202 | namespace: kube-system
203 | annotations:
204 | rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly
205 | spec:
206 | hostNetwork: true
207 | containers:
208 | - name: kube-proxy
209 | image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER
210 | command:
211 | - /hyperkube
212 | - proxy
213 | - --master=http://127.0.0.1:8080
214 | securityContext:
215 | privileged: true
216 | volumeMounts:
217 | - mountPath: /etc/ssl/certs
218 | name: ssl-certs-host
219 | readOnly: true
220 | - mountPath: /var/run/dbus
221 | name: dbus
222 | readOnly: false
223 | volumes:
224 | - hostPath:
225 | path: /usr/share/ca-certificates
226 | name: ssl-certs-host
227 | - hostPath:
228 | path: /var/run/dbus
229 | name: dbus
230 | EOF
231 | fi
232 |
233 | local TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml
234 | if [ ! -f $TEMPLATE ]; then
235 | echo "TEMPLATE: $TEMPLATE"
236 | mkdir -p $(dirname $TEMPLATE)
237 | cat << EOF > $TEMPLATE
238 | apiVersion: v1
239 | kind: Pod
240 | metadata:
241 | name: kube-apiserver
242 | namespace: kube-system
243 | spec:
244 | hostNetwork: true
245 | containers:
246 | - name: kube-apiserver
247 | image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER
248 | command:
249 | - /hyperkube
250 | - apiserver
251 | - --bind-address=0.0.0.0
252 | - --etcd-servers=${ETCD_ENDPOINTS}
253 | - --allow-privileged=true
254 | - --service-cluster-ip-range=${SERVICE_IP_RANGE}
255 | - --secure-port=443
256 | - --advertise-address=${ADVERTISE_IP}
257 | - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
258 | - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem
259 | - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
260 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem
261 | - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem
262 | - --runtime-config=extensions/v1beta1/networkpolicies=true
263 | - --anonymous-auth=false
264 | livenessProbe:
265 | httpGet:
266 | host: 127.0.0.1
267 | port: 8080
268 | path: /healthz
269 | initialDelaySeconds: 15
270 | timeoutSeconds: 15
271 | ports:
272 | - containerPort: 443
273 | hostPort: 443
274 | name: https
275 | - containerPort: 8080
276 | hostPort: 8080
277 | name: local
278 | volumeMounts:
279 | - mountPath: /etc/kubernetes/ssl
280 | name: ssl-certs-kubernetes
281 | readOnly: true
282 | - mountPath: /etc/ssl/certs
283 | name: ssl-certs-host
284 | readOnly: true
285 | volumes:
286 | - hostPath:
287 | path: /etc/kubernetes/ssl
288 | name: ssl-certs-kubernetes
289 | - hostPath:
290 | path: /usr/share/ca-certificates
291 | name: ssl-certs-host
292 | EOF
293 | fi
294 |
295 | local TEMPLATE=/etc/kubernetes/manifests/kube-controller-manager.yaml
296 | if [ ! -f $TEMPLATE ]; then
297 | echo "TEMPLATE: $TEMPLATE"
298 | mkdir -p $(dirname $TEMPLATE)
299 | cat << EOF > $TEMPLATE
300 | apiVersion: v1
301 | kind: Pod
302 | metadata:
303 | name: kube-controller-manager
304 | namespace: kube-system
305 | spec:
306 | containers:
307 | - name: kube-controller-manager
308 | image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER
309 | command:
310 | - /hyperkube
311 | - controller-manager
312 | - --master=http://127.0.0.1:8080
313 | - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
314 | - --root-ca-file=/etc/kubernetes/ssl/ca.pem
315 | resources:
316 | requests:
317 | cpu: 200m
318 | livenessProbe:
319 | httpGet:
320 | host: 127.0.0.1
321 | path: /healthz
322 | port: 10252
323 | initialDelaySeconds: 15
324 | timeoutSeconds: 15
325 | volumeMounts:
326 | - mountPath: /etc/kubernetes/ssl
327 | name: ssl-certs-kubernetes
328 | readOnly: true
329 | - mountPath: /etc/ssl/certs
330 | name: ssl-certs-host
331 | readOnly: true
332 | hostNetwork: true
333 | volumes:
334 | - hostPath:
335 | path: /etc/kubernetes/ssl
336 | name: ssl-certs-kubernetes
337 | - hostPath:
338 | path: /usr/share/ca-certificates
339 | name: ssl-certs-host
340 | EOF
341 | fi
342 |
343 | local TEMPLATE=/etc/kubernetes/manifests/kube-scheduler.yaml
344 | if [ ! -f $TEMPLATE ]; then
345 | echo "TEMPLATE: $TEMPLATE"
346 | mkdir -p $(dirname $TEMPLATE)
347 | cat << EOF > $TEMPLATE
348 | apiVersion: v1
349 | kind: Pod
350 | metadata:
351 | name: kube-scheduler
352 | namespace: kube-system
353 | spec:
354 | hostNetwork: true
355 | containers:
356 | - name: kube-scheduler
357 | image: ${HYPERKUBE_IMAGE_REPO}:$K8S_VER
358 | command:
359 | - /hyperkube
360 | - scheduler
361 | - --master=http://127.0.0.1:8080
362 | resources:
363 | requests:
364 | cpu: 100m
365 | livenessProbe:
366 | httpGet:
367 | host: 127.0.0.1
368 | path: /healthz
369 | port: 10251
370 | initialDelaySeconds: 15
371 | timeoutSeconds: 15
372 | EOF
373 | fi
374 |
375 | local TEMPLATE=/srv/kubernetes/manifests/kube-dns-de.yaml
376 | if [ ! -f $TEMPLATE ]; then
377 | echo "TEMPLATE: $TEMPLATE"
378 | mkdir -p $(dirname $TEMPLATE)
379 | cat << EOF > $TEMPLATE
380 | apiVersion: extensions/v1beta1
381 | kind: Deployment
382 | metadata:
383 | name: kube-dns
384 | namespace: kube-system
385 | labels:
386 | k8s-app: kube-dns
387 | kubernetes.io/cluster-service: "true"
388 | spec:
389 | strategy:
390 | rollingUpdate:
391 | maxSurge: 10%
392 | maxUnavailable: 0
393 | selector:
394 | matchLabels:
395 | k8s-app: kube-dns
396 | template:
397 | metadata:
398 | labels:
399 | k8s-app: kube-dns
400 | annotations:
401 | scheduler.alpha.kubernetes.io/critical-pod: ''
402 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
403 | spec:
404 | containers:
405 | - name: kubedns
406 | image: gcr.io/google_containers/kubedns-amd64:1.9
407 | resources:
408 | limits:
409 | memory: 170Mi
410 | requests:
411 | cpu: 100m
412 | memory: 70Mi
413 | livenessProbe:
414 | httpGet:
415 | path: /healthz-kubedns
416 | port: 8080
417 | scheme: HTTP
418 | initialDelaySeconds: 60
419 | timeoutSeconds: 5
420 | successThreshold: 1
421 | failureThreshold: 5
422 | readinessProbe:
423 | httpGet:
424 | path: /readiness
425 | port: 8081
426 | scheme: HTTP
427 | initialDelaySeconds: 3
428 | timeoutSeconds: 5
429 | args:
430 | - --domain=cluster.local.
431 | - --dns-port=10053
432 | - --config-map=kube-dns
433 | # This should be set to v=2 only after the new image (cut from 1.5) has
434 | # been released, otherwise we will flood the logs.
435 | - --v=2
436 | env:
437 | - name: PROMETHEUS_PORT
438 | value: "10055"
439 | ports:
440 | - containerPort: 10053
441 | name: dns-local
442 | protocol: UDP
443 | - containerPort: 10053
444 | name: dns-tcp-local
445 | protocol: TCP
446 | - containerPort: 10055
447 | name: metrics
448 | protocol: TCP
449 | - name: dnsmasq
450 | image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
451 | livenessProbe:
452 | httpGet:
453 | path: /healthz-dnsmasq
454 | port: 8080
455 | scheme: HTTP
456 | initialDelaySeconds: 60
457 | timeoutSeconds: 5
458 | successThreshold: 1
459 | failureThreshold: 5
460 | args:
461 | - --cache-size=1000
462 | - --no-resolv
463 | - --server=127.0.0.1#10053
464 | - --log-facility=-
465 | ports:
466 | - containerPort: 53
467 | name: dns
468 | protocol: UDP
469 | - containerPort: 53
470 | name: dns-tcp
471 | protocol: TCP
472 | # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
473 | resources:
474 | requests:
475 | cpu: 150m
476 | memory: 10Mi
477 | - name: dnsmasq-metrics
478 | image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
479 | livenessProbe:
480 | httpGet:
481 | path: /metrics
482 | port: 10054
483 | scheme: HTTP
484 | initialDelaySeconds: 60
485 | timeoutSeconds: 5
486 | successThreshold: 1
487 | failureThreshold: 5
488 | args:
489 | - --v=2
490 | - --logtostderr
491 | ports:
492 | - containerPort: 10054
493 | name: metrics
494 | protocol: TCP
495 | resources:
496 | requests:
497 | memory: 10Mi
498 | - name: healthz
499 | image: gcr.io/google_containers/exechealthz-amd64:1.2
500 | resources:
501 | limits:
502 | memory: 50Mi
503 | requests:
504 | cpu: 10m
505 | memory: 50Mi
506 | args:
507 | - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
508 | - --url=/healthz-dnsmasq
509 | - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
510 | - --url=/healthz-kubedns
511 | - --port=8080
512 | - --quiet
513 | ports:
514 | - containerPort: 8080
515 | protocol: TCP
516 | dnsPolicy: Default
517 | EOF
518 | fi
519 |
520 | local TEMPLATE=/srv/kubernetes/manifests/kube-dns-autoscaler-de.yaml
521 | if [ ! -f $TEMPLATE ]; then
522 | echo "TEMPLATE: $TEMPLATE"
523 | mkdir -p $(dirname $TEMPLATE)
524 | cat << EOF > $TEMPLATE
525 | apiVersion: extensions/v1beta1
526 | kind: Deployment
527 | metadata:
528 | name: kube-dns-autoscaler
529 | namespace: kube-system
530 | labels:
531 | k8s-app: kube-dns-autoscaler
532 | kubernetes.io/cluster-service: "true"
533 | spec:
534 | template:
535 | metadata:
536 | labels:
537 | k8s-app: kube-dns-autoscaler
538 | annotations:
539 | scheduler.alpha.kubernetes.io/critical-pod: ''
540 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
541 | spec:
542 | containers:
543 | - name: autoscaler
544 | image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
545 | resources:
546 | requests:
547 | cpu: "20m"
548 | memory: "10Mi"
549 | command:
550 | - /cluster-proportional-autoscaler
551 | - --namespace=kube-system
552 | - --configmap=kube-dns-autoscaler
553 | - --mode=linear
554 | - --target=Deployment/kube-dns
555 | - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}
556 | - --logtostderr=true
557 | - --v=2
558 | EOF
559 | fi
560 |
561 | local TEMPLATE=/srv/kubernetes/manifests/kube-dns-svc.yaml
562 | if [ ! -f $TEMPLATE ]; then
563 | echo "TEMPLATE: $TEMPLATE"
564 | mkdir -p $(dirname $TEMPLATE)
565 | cat << EOF > $TEMPLATE
566 | apiVersion: v1
567 | kind: Service
568 | metadata:
569 | name: kube-dns
570 | namespace: kube-system
571 | labels:
572 | k8s-app: kube-dns
573 | kubernetes.io/cluster-service: "true"
574 | kubernetes.io/name: "KubeDNS"
575 | spec:
576 | selector:
577 | k8s-app: kube-dns
578 | clusterIP: ${DNS_SERVICE_IP}
579 | ports:
580 | - name: dns
581 | port: 53
582 | protocol: UDP
583 | - name: dns-tcp
584 | port: 53
585 | protocol: TCP
586 | EOF
587 | fi
588 |
589 | local TEMPLATE=/srv/kubernetes/manifests/heapster-de.yaml
590 | if [ ! -f $TEMPLATE ]; then
591 | echo "TEMPLATE: $TEMPLATE"
592 | mkdir -p $(dirname $TEMPLATE)
593 | cat << EOF > $TEMPLATE
594 | apiVersion: extensions/v1beta1
595 | kind: Deployment
596 | metadata:
597 | name: heapster-v1.2.0
598 | namespace: kube-system
599 | labels:
600 | k8s-app: heapster
601 | kubernetes.io/cluster-service: "true"
602 | version: v1.2.0
603 | spec:
604 | replicas: 1
605 | selector:
606 | matchLabels:
607 | k8s-app: heapster
608 | version: v1.2.0
609 | template:
610 | metadata:
611 | labels:
612 | k8s-app: heapster
613 | version: v1.2.0
614 | annotations:
615 | scheduler.alpha.kubernetes.io/critical-pod: ''
616 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
617 | spec:
618 | containers:
619 | - image: gcr.io/google_containers/heapster:v1.2.0
620 | name: heapster
621 | livenessProbe:
622 | httpGet:
623 | path: /healthz
624 | port: 8082
625 | scheme: HTTP
626 | initialDelaySeconds: 180
627 | timeoutSeconds: 5
628 | command:
629 | - /heapster
630 | - --source=kubernetes.summary_api:''
631 | - image: gcr.io/google_containers/addon-resizer:1.6
632 | name: heapster-nanny
633 | resources:
634 | limits:
635 | cpu: 50m
636 | memory: 90Mi
637 | requests:
638 | cpu: 50m
639 | memory: 90Mi
640 | env:
641 | - name: MY_POD_NAME
642 | valueFrom:
643 | fieldRef:
644 | fieldPath: metadata.name
645 | - name: MY_POD_NAMESPACE
646 | valueFrom:
647 | fieldRef:
648 | fieldPath: metadata.namespace
649 | command:
650 | - /pod_nanny
651 | - --cpu=80m
652 | - --extra-cpu=4m
653 | - --memory=200Mi
654 | - --extra-memory=4Mi
655 | - --threshold=5
656 | - --deployment=heapster-v1.2.0
657 | - --container=heapster
658 | - --poll-period=300000
659 | - --estimator=exponential
660 | EOF
661 | fi
662 |
663 | local TEMPLATE=/srv/kubernetes/manifests/heapster-svc.yaml
664 | if [ ! -f $TEMPLATE ]; then
665 | echo "TEMPLATE: $TEMPLATE"
666 | mkdir -p $(dirname $TEMPLATE)
667 | cat << EOF > $TEMPLATE
668 | kind: Service
669 | apiVersion: v1
670 | metadata:
671 | name: heapster
672 | namespace: kube-system
673 | labels:
674 | kubernetes.io/cluster-service: "true"
675 | kubernetes.io/name: "Heapster"
676 | spec:
677 | ports:
678 | - port: 80
679 | targetPort: 8082
680 | selector:
681 | k8s-app: heapster
682 | EOF
683 | fi
684 |
685 | local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-de.yaml
686 | if [ ! -f $TEMPLATE ]; then
687 | echo "TEMPLATE: $TEMPLATE"
688 | mkdir -p $(dirname $TEMPLATE)
689 | cat << EOF > $TEMPLATE
690 | apiVersion: extensions/v1beta1
691 | kind: Deployment
692 | metadata:
693 | name: kubernetes-dashboard
694 | namespace: kube-system
695 | labels:
696 | k8s-app: kubernetes-dashboard
697 | kubernetes.io/cluster-service: "true"
698 | spec:
699 | selector:
700 | matchLabels:
701 | k8s-app: kubernetes-dashboard
702 | template:
703 | metadata:
704 | labels:
705 | k8s-app: kubernetes-dashboard
706 | annotations:
707 | scheduler.alpha.kubernetes.io/critical-pod: ''
708 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
709 | spec:
710 | containers:
711 | - name: kubernetes-dashboard
712 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0
713 | resources:
714 | # keep request = limit to keep this container in guaranteed class
715 | limits:
716 | cpu: 100m
717 | memory: 50Mi
718 | requests:
719 | cpu: 100m
720 | memory: 50Mi
721 | ports:
722 | - containerPort: 9090
723 | livenessProbe:
724 | httpGet:
725 | path: /
726 | port: 9090
727 | initialDelaySeconds: 30
728 | timeoutSeconds: 30
729 | EOF
730 | fi
731 |
732 | local TEMPLATE=/srv/kubernetes/manifests/kube-dashboard-svc.yaml
733 | if [ ! -f $TEMPLATE ]; then
734 | echo "TEMPLATE: $TEMPLATE"
735 | mkdir -p $(dirname $TEMPLATE)
736 | cat << EOF > $TEMPLATE
737 | apiVersion: v1
738 | kind: Service
739 | metadata:
740 | name: kubernetes-dashboard
741 | namespace: kube-system
742 | labels:
743 | k8s-app: kubernetes-dashboard
744 | kubernetes.io/cluster-service: "true"
745 | spec:
746 | selector:
747 | k8s-app: kubernetes-dashboard
748 | ports:
749 | - port: 80
750 | targetPort: 9090
751 | EOF
752 | fi
753 |
754 | local TEMPLATE=/etc/flannel/options.env
755 | if [ ! -f $TEMPLATE ]; then
756 | echo "TEMPLATE: $TEMPLATE"
757 | mkdir -p $(dirname $TEMPLATE)
758 | cat << EOF > $TEMPLATE
759 | FLANNELD_IFACE=$ADVERTISE_IP
760 | FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS
761 | EOF
762 | fi
763 |
764 | local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf
765 | if [ ! -f $TEMPLATE ]; then
766 | echo "TEMPLATE: $TEMPLATE"
767 | mkdir -p $(dirname $TEMPLATE)
768 | cat << EOF > $TEMPLATE
769 | [Service]
770 | ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env
771 | EOF
772 | fi
773 |
774 | local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf
775 | if [ ! -f $TEMPLATE ]; then
776 | echo "TEMPLATE: $TEMPLATE"
777 | mkdir -p $(dirname $TEMPLATE)
778 | cat << EOF > $TEMPLATE
779 | [Unit]
780 | Requires=flanneld.service
781 | After=flanneld.service
782 | [Service]
783 | EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env
784 | EOF
785 | fi
786 |
787 | local TEMPLATE=/etc/kubernetes/cni/docker_opts_cni.env
788 | if [ ! -f $TEMPLATE ]; then
789 | echo "TEMPLATE: $TEMPLATE"
790 | mkdir -p $(dirname $TEMPLATE)
791 | cat << EOF > $TEMPLATE
792 | DOCKER_OPT_BIP=""
793 | DOCKER_OPT_IPMASQ=""
794 | EOF
795 | fi
796 |
797 | local TEMPLATE=/etc/kubernetes/cni/net.d/10-flannel.conf
798 | if [ "${USE_CALICO}" = "false" ] && [ ! -f "${TEMPLATE}" ]; then
799 | echo "TEMPLATE: $TEMPLATE"
800 | mkdir -p $(dirname $TEMPLATE)
801 | cat << EOF > $TEMPLATE
802 | {
803 | "name": "podnet",
804 | "type": "flannel",
805 | "delegate": {
806 | "isDefaultGateway": true
807 | }
808 | }
809 | EOF
810 | fi
811 | local TEMPLATE=/srv/kubernetes/manifests/calico.yaml
812 | if [ "${USE_CALICO}" = "true" ]; then
813 | echo "TEMPLATE: $TEMPLATE"
814 | mkdir -p $(dirname $TEMPLATE)
815 | cat << EOF > $TEMPLATE
816 | # This ConfigMap is used to configure a self-hosted Calico installation.
817 | kind: ConfigMap
818 | apiVersion: v1
819 | metadata:
820 | name: calico-config
821 | namespace: kube-system
822 | data:
823 | # Configure this with the location of your etcd cluster.
824 | etcd_endpoints: "${ETCD_ENDPOINTS}"
825 |
826 | # The CNI network configuration to install on each node. The special
827 | # values in this config will be automatically populated.
828 | cni_network_config: |-
829 | {
830 | "name": "calico",
831 | "type": "flannel",
832 | "delegate": {
833 | "type": "calico",
834 | "etcd_endpoints": "__ETCD_ENDPOINTS__",
835 | "log_level": "info",
836 | "policy": {
837 | "type": "k8s",
838 | "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
839 | "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
840 | },
841 | "kubernetes": {
842 | "kubeconfig": "/etc/kubernetes/cni/net.d/__KUBECONFIG_FILENAME__"
843 | }
844 | }
845 | }
846 |
847 | ---
848 |
849 | # This manifest installs the calico/node container, as well
850 | # as the Calico CNI plugins and network config on
851 | # each master and worker node in a Kubernetes cluster.
852 | kind: DaemonSet
853 | apiVersion: extensions/v1beta1
854 | metadata:
855 | name: calico-node
856 | namespace: kube-system
857 | labels:
858 | k8s-app: calico-node
859 | spec:
860 | selector:
861 | matchLabels:
862 | k8s-app: calico-node
863 | template:
864 | metadata:
865 | labels:
866 | k8s-app: calico-node
867 | annotations:
868 | scheduler.alpha.kubernetes.io/critical-pod: ''
869 | scheduler.alpha.kubernetes.io/tolerations: |
870 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
871 | {"key":"CriticalAddonsOnly", "operator":"Exists"}]
872 | spec:
873 | hostNetwork: true
874 | containers:
875 | # Runs calico/node container on each Kubernetes node. This
876 | # container programs network policy and routes on each
877 | # host.
878 | - name: calico-node
879 | image: quay.io/calico/node:v0.23.0
880 | env:
881 | # The location of the Calico etcd cluster.
882 | - name: ETCD_ENDPOINTS
883 | valueFrom:
884 | configMapKeyRef:
885 | name: calico-config
886 | key: etcd_endpoints
887 | # Choose the backend to use.
888 | - name: CALICO_NETWORKING_BACKEND
889 | value: "none"
890 | # Disable file logging so `kubectl logs` works.
891 | - name: CALICO_DISABLE_FILE_LOGGING
892 | value: "true"
893 | - name: NO_DEFAULT_POOLS
894 | value: "true"
895 | securityContext:
896 | privileged: true
897 | volumeMounts:
898 | - mountPath: /lib/modules
899 | name: lib-modules
900 | readOnly: false
901 | - mountPath: /var/run/calico
902 | name: var-run-calico
903 | readOnly: false
904 | - mountPath: /etc/resolv.conf
905 | name: dns
906 | readOnly: true
907 | # This container installs the Calico CNI binaries
908 | # and CNI network config file on each node.
909 | - name: install-cni
910 | image: quay.io/calico/cni:v1.5.2
911 | imagePullPolicy: Always
912 | command: ["/install-cni.sh"]
913 | env:
914 | # CNI configuration filename
915 | - name: CNI_CONF_NAME
916 | value: "10-calico.conf"
917 | # The location of the Calico etcd cluster.
918 | - name: ETCD_ENDPOINTS
919 | valueFrom:
920 | configMapKeyRef:
921 | name: calico-config
922 | key: etcd_endpoints
923 | # The CNI network config to install on each node.
924 | - name: CNI_NETWORK_CONFIG
925 | valueFrom:
926 | configMapKeyRef:
927 | name: calico-config
928 | key: cni_network_config
929 | volumeMounts:
930 | - mountPath: /host/opt/cni/bin
931 | name: cni-bin-dir
932 | - mountPath: /host/etc/cni/net.d
933 | name: cni-net-dir
934 | volumes:
935 | # Used by calico/node.
936 | - name: lib-modules
937 | hostPath:
938 | path: /lib/modules
939 | - name: var-run-calico
940 | hostPath:
941 | path: /var/run/calico
942 | # Used to install CNI.
943 | - name: cni-bin-dir
944 | hostPath:
945 | path: /opt/cni/bin
946 | - name: cni-net-dir
947 | hostPath:
948 | path: /etc/kubernetes/cni/net.d
949 | - name: dns
950 | hostPath:
951 | path: /etc/resolv.conf
952 |
953 | ---
954 |
955 | # This manifest deploys the Calico policy controller on Kubernetes.
956 | # See https://github.com/projectcalico/k8s-policy
957 | apiVersion: extensions/v1beta1
958 | kind: ReplicaSet
959 | metadata:
960 | name: calico-policy-controller
961 | namespace: kube-system
962 | labels:
963 | k8s-app: calico-policy
964 | spec:
965 | # The policy controller can only have a single active instance.
966 | replicas: 1
967 | template:
968 | metadata:
969 | name: calico-policy-controller
970 | namespace: kube-system
971 | labels:
972 | k8s-app: calico-policy
973 | annotations:
974 | scheduler.alpha.kubernetes.io/critical-pod: ''
975 | scheduler.alpha.kubernetes.io/tolerations: |
976 | [{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
977 | {"key":"CriticalAddonsOnly", "operator":"Exists"}]
978 | spec:
979 | # The policy controller must run in the host network namespace so that
980 | # it isn't governed by policy that would prevent it from working.
981 | hostNetwork: true
982 | containers:
983 | - name: calico-policy-controller
984 | image: calico/kube-policy-controller:v0.4.0
985 | env:
986 | # The location of the Calico etcd cluster.
987 | - name: ETCD_ENDPOINTS
988 | valueFrom:
989 | configMapKeyRef:
990 | name: calico-config
991 | key: etcd_endpoints
992 | # The location of the Kubernetes API. Use the default Kubernetes
993 | # service for API access.
994 | - name: K8S_API
995 | value: "https://kubernetes.default:443"
996 | # Since we're running in the host namespace and might not have KubeDNS
997 | # access, configure the container's /etc/hosts to resolve
998 | # kubernetes.default to the correct service clusterIP.
999 | - name: CONFIGURE_ETC_HOSTS
1000 | value: "true"
1001 | EOF
1002 | fi
1003 | }
1004 |
1005 | function start_addons {
1006 | echo "Waiting for Kubernetes API..."
1007 | until curl --silent "http://127.0.0.1:8080/version"
1008 | do
1009 | sleep 5
1010 | done
1011 | echo
1012 | echo "K8S: DNS addon"
1013 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null
1014 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null
1015 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-autoscaler-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null
1016 | echo "K8S: Heapster addon"
1017 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null
1018 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null
1019 | echo "K8S: Dashboard addon"
1020 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-de.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" > /dev/null
1021 | curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null
1022 | }
1023 |
1024 | function enable_calico {
1025 | echo "Waiting for Kubernetes API..."
1026 | until curl --silent "http://127.0.0.1:8080/version"
1027 | do
1028 | sleep 5
1029 | done
1030 | echo "Deploying Calico"
1031 | #TODO: change to rkt once this is resolved (https://github.com/coreos/rkt/issues/3181)
1032 | docker run --rm --net=host -v /srv/kubernetes/manifests:/host/manifests $HYPERKUBE_IMAGE_REPO:$K8S_VER /hyperkube kubectl apply -f /host/manifests/calico.yaml
1033 | }
1034 |
1035 | init_config
1036 | init_templates
1037 | systemctl enable etcd2; systemctl start etcd2
1038 |
1039 | chmod +x /opt/bin/host-rkt
1040 |
1041 | init_flannel
1042 |
1043 | systemctl stop update-engine; systemctl mask update-engine
1044 |
1045 | systemctl daemon-reload
1046 |
1047 | if [ $CONTAINER_RUNTIME = "rkt" ]; then
1048 | systemctl enable load-rkt-stage1
1049 | systemctl enable rkt-api
1050 | fi
1051 |
1052 | systemctl enable flanneld; systemctl start flanneld
1053 | systemctl enable kubelet; systemctl start kubelet
1054 |
1055 | if [ $USE_CALICO = "true" ]; then
1056 | enable_calico
1057 | fi
1058 |
1059 | start_addons
1060 | echo "DONE"
1061 |
--------------------------------------------------------------------------------