├── .gitignore ├── LICENSE ├── README.md ├── calico ├── calico.example.yaml ├── conf │ └── calicoctl.cfg └── install.sh ├── certs ├── clean.sh ├── etcd │ ├── copy.sh │ ├── create.sh │ ├── etcd-csr.json │ ├── etcd-gencert.json │ └── etcd-root-ca-csr.json ├── install_cfssl.sh └── k8s │ ├── admin-csr.json │ ├── clean.sh │ ├── copy.sh │ ├── create.sh │ ├── k8s-gencert.json │ ├── k8s-root-ca-csr.json │ ├── kube-apiserver-csr.json │ ├── kube-controller-manager-csr.json │ ├── kube-proxy-csr.json │ ├── kube-scheduler-csr.json │ └── kubelet-api-admin-csr.json ├── etcd ├── conf │ ├── etcd.conf │ ├── etcd.conf.cluster.example │ └── etcd.conf.single.example ├── install.sh ├── systemd │ └── etcd.service └── uninstall.sh ├── fixPermissions.sh ├── k8s ├── addons │ ├── coredns │ │ ├── coredns.yaml │ │ ├── coredns.yaml.sed │ │ ├── create.sh │ │ └── deploy.sh │ ├── dashborad │ │ ├── create_dashboard_sa.sh │ │ └── kubernetes-dashboard.yaml │ ├── dns-horizontal-autoscaler │ │ └── dns-horizontal-autoscaler.yaml │ └── heapster │ │ ├── grafana.yaml │ │ ├── heapster-rbac.yaml │ │ ├── heapster.yaml │ │ └── influxdb.yaml ├── bootstrapping │ └── bootstrapping.sh ├── conf │ └── example │ │ ├── 1.10.1 │ │ ├── apiserver │ │ ├── config │ │ ├── controller-manager │ │ ├── kubelet │ │ ├── proxy │ │ └── scheduler │ │ ├── 1.11.2 │ │ ├── apiserver │ │ ├── controller-manager │ │ ├── kubelet │ │ ├── kubeletconfig.yaml │ │ ├── proxy │ │ └── scheduler │ │ ├── 1.13.4 │ │ ├── apiserver │ │ ├── controller-manager │ │ ├── kubelet │ │ ├── proxy │ │ └── scheduler │ │ └── 1.8 │ │ ├── add_cni_config.sh │ │ ├── apiserver │ │ ├── config │ │ ├── controller-manager │ │ ├── kubelet │ │ ├── proxy │ │ └── scheduler ├── install.sh ├── ipvs.sh ├── systemd │ ├── kube-apiserver.service │ ├── kube-controller-manager.service │ ├── kube-proxy.service │ ├── kube-scheduler.service │ └── kubelet.service ├── test.yaml └── uninstall.sh └── nginx ├── install.sh ├── nginx-proxy.service ├── nginx.conf └── uninstall.sh /.gitignore: -------------------------------------------------------------------------------- 1 | k8s/hyperkube* 2 | k8s/conf/*.yaml 3 | certs/**/*.yaml 4 | calico/calico.yaml 5 | *.tar.gz 6 | *.pem 7 | *.csr 8 | *.csv 9 | *.kubeconfig 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 mritd 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ktool 2 | 3 | **本项目停止维护,目前已经切换到 kubeadm 部署,详情请参考以下文章:** 4 | 5 | - [kubeadm 搭建 HA kubernetes 集群](https://mritd.me/2020/01/21/set-up-kuberntes-ha-cluster-by-kubeadm/) 6 | - [kubeadm 集群升级](https://mritd.me/2020/01/21/how-to-upgrade-kubeadm-cluster/) 7 | - [kubeadm 证书期限调整](https://mritd.me/2020/01/21/how-to-extend-the-validity-of-your-kubeadm-certificate/) 8 | 9 | **相关文章在 mritd.com 会同步发布,文章中配置文件已经打包为 deb 文件,请参考 [kubeadm-config](https://github.com/mritd/kubeadm-config) 仓库** 10 | -------------------------------------------------------------------------------- /calico/calico.example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-etcd-secrets.yaml 3 | # The following contains k8s Secrets for use with a TLS enabled etcd cluster. 4 | # For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ 5 | apiVersion: v1 6 | kind: Secret 7 | type: Opaque 8 | metadata: 9 | name: calico-etcd-secrets 10 | namespace: kube-system 11 | data: 12 | # Populate the following with etcd TLS configuration if desired, but leave blank if 13 | # not using TLS for etcd. 14 | # The keys below should be uncommented and the values populated with the base64 15 | # encoded contents of each file that would be associated with the TLS data. 16 | # Example command for encoding a file contents: cat | base64 -w 0 17 | # etcd-key: null 18 | # etcd-cert: null 19 | # etcd-ca: null 20 | --- 21 | # Source: calico/templates/calico-config.yaml 22 | # This ConfigMap is used to configure a self-hosted Calico installation. 23 | kind: ConfigMap 24 | apiVersion: v1 25 | metadata: 26 | name: calico-config 27 | namespace: kube-system 28 | data: 29 | # Configure this with the location of your etcd cluster. 30 | etcd_endpoints: "http://:" 31 | 32 | # If you're using TLS enabled etcd uncomment the following. 33 | # You must also populate the Secret below with these files. 34 | etcd_ca: "" # "/calico-secrets/etcd-ca" 35 | etcd_cert: "" # "/calico-secrets/etcd-cert" 36 | etcd_key: "" # "/calico-secrets/etcd-key" 37 | # Typha is disabled. 38 | typha_service_name: "none" 39 | # Configure the Calico backend to use. 40 | calico_backend: "bird" 41 | 42 | # Configure the MTU to use 43 | veth_mtu: "1440" 44 | 45 | # The CNI network configuration to install on each node. The special 46 | # values in this config will be automatically populated. 47 | cni_network_config: |- 48 | { 49 | "name": "k8s-pod-network", 50 | "cniVersion": "0.3.0", 51 | "plugins": [ 52 | { 53 | "type": "calico", 54 | "log_level": "info", 55 | "etcd_endpoints": "__ETCD_ENDPOINTS__", 56 | "etcd_key_file": "__ETCD_KEY_FILE__", 57 | "etcd_cert_file": "__ETCD_CERT_FILE__", 58 | "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", 59 | "mtu": __CNI_MTU__, 60 | "ipam": { 61 | "type": "calico-ipam" 62 | }, 63 | "policy": { 64 | "type": "k8s" 65 | }, 66 | "kubernetes": { 67 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 68 | } 69 | }, 70 | { 71 | "type": "portmap", 72 | "snat": true, 73 | "capabilities": {"portMappings": true} 74 | } 75 | ] 76 | } 77 | 78 | --- 79 | # Source: calico/templates/rbac.yaml 80 | 81 | # Include a clusterrole for the kube-controllers component, 82 | # and bind it to the calico-kube-controllers serviceaccount. 83 | kind: ClusterRole 84 | apiVersion: rbac.authorization.k8s.io/v1beta1 85 | metadata: 86 | name: calico-kube-controllers 87 | rules: 88 | # Pods are monitored for changing labels. 89 | # The node controller monitors Kubernetes nodes. 90 | # Namespace and serviceaccount labels are used for policy. 91 | - apiGroups: [""] 92 | resources: 93 | - pods 94 | - nodes 95 | - namespaces 96 | - serviceaccounts 97 | verbs: 98 | - watch 99 | - list 100 | # Watch for changes to Kubernetes NetworkPolicies. 101 | - apiGroups: ["networking.k8s.io"] 102 | resources: 103 | - networkpolicies 104 | verbs: 105 | - watch 106 | - list 107 | --- 108 | kind: ClusterRoleBinding 109 | apiVersion: rbac.authorization.k8s.io/v1beta1 110 | metadata: 111 | name: calico-kube-controllers 112 | roleRef: 113 | apiGroup: rbac.authorization.k8s.io 114 | kind: ClusterRole 115 | name: calico-kube-controllers 116 | subjects: 117 | - kind: ServiceAccount 118 | name: calico-kube-controllers 119 | namespace: kube-system 120 | --- 121 | # Include a clusterrole for the calico-node DaemonSet, 122 | # and bind it to the calico-node serviceaccount. 123 | kind: ClusterRole 124 | apiVersion: rbac.authorization.k8s.io/v1beta1 125 | metadata: 126 | name: calico-node 127 | rules: 128 | # The CNI plugin needs to get pods, nodes, and namespaces. 129 | - apiGroups: [""] 130 | resources: 131 | - pods 132 | - nodes 133 | - namespaces 134 | verbs: 135 | - get 136 | - apiGroups: [""] 137 | resources: 138 | - endpoints 139 | - services 140 | verbs: 141 | # Used to discover service IPs for advertisement. 142 | - watch 143 | - list 144 | - apiGroups: [""] 145 | resources: 146 | - nodes/status 147 | verbs: 148 | # Needed for clearing NodeNetworkUnavailable flag. 149 | - patch 150 | --- 151 | apiVersion: rbac.authorization.k8s.io/v1beta1 152 | kind: ClusterRoleBinding 153 | metadata: 154 | name: calico-node 155 | roleRef: 156 | apiGroup: rbac.authorization.k8s.io 157 | kind: ClusterRole 158 | name: calico-node 159 | subjects: 160 | - kind: ServiceAccount 161 | name: calico-node 162 | namespace: kube-system 163 | --- 164 | 165 | --- 166 | # Source: calico/templates/calico-node.yaml 167 | # This manifest installs the calico/node container, as well 168 | # as the Calico CNI plugins and network config on 169 | # each master and worker node in a Kubernetes cluster. 170 | kind: DaemonSet 171 | apiVersion: extensions/v1beta1 172 | metadata: 173 | name: calico-node 174 | namespace: kube-system 175 | labels: 176 | k8s-app: calico-node 177 | spec: 178 | selector: 179 | matchLabels: 180 | k8s-app: calico-node 181 | updateStrategy: 182 | type: RollingUpdate 183 | rollingUpdate: 184 | maxUnavailable: 1 185 | template: 186 | metadata: 187 | labels: 188 | k8s-app: calico-node 189 | annotations: 190 | # This, along with the CriticalAddonsOnly toleration below, 191 | # marks the pod as a critical add-on, ensuring it gets 192 | # priority scheduling and that its resources are reserved 193 | # if it ever gets evicted. 194 | scheduler.alpha.kubernetes.io/critical-pod: '' 195 | spec: 196 | nodeSelector: 197 | beta.kubernetes.io/os: linux 198 | hostNetwork: true 199 | tolerations: 200 | # Make sure calico-node gets scheduled on all nodes. 201 | - effect: NoSchedule 202 | operator: Exists 203 | # Mark the pod as a critical add-on for rescheduling. 204 | - key: CriticalAddonsOnly 205 | operator: Exists 206 | - effect: NoExecute 207 | operator: Exists 208 | serviceAccountName: calico-node 209 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 210 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 211 | terminationGracePeriodSeconds: 0 212 | initContainers: 213 | # This container installs the Calico CNI binaries 214 | # and CNI network config file on each node. 215 | - name: install-cni 216 | image: calico/cni:v3.6.0 217 | command: ["/install-cni.sh"] 218 | env: 219 | # Name of the CNI config file to create. 220 | - name: CNI_CONF_NAME 221 | value: "10-calico.conflist" 222 | # The CNI network config to install on each node. 223 | - name: CNI_NETWORK_CONFIG 224 | valueFrom: 225 | configMapKeyRef: 226 | name: calico-config 227 | key: cni_network_config 228 | # The location of the Calico etcd cluster. 229 | - name: ETCD_ENDPOINTS 230 | valueFrom: 231 | configMapKeyRef: 232 | name: calico-config 233 | key: etcd_endpoints 234 | # CNI MTU Config variable 235 | - name: CNI_MTU 236 | valueFrom: 237 | configMapKeyRef: 238 | name: calico-config 239 | key: veth_mtu 240 | # Prevents the container from sleeping forever. 241 | - name: SLEEP 242 | value: "false" 243 | volumeMounts: 244 | - mountPath: /host/opt/cni/bin 245 | name: cni-bin-dir 246 | - mountPath: /host/etc/cni/net.d 247 | name: cni-net-dir 248 | - mountPath: /calico-secrets 249 | name: etcd-certs 250 | containers: 251 | # Runs calico/node container on each Kubernetes node. This 252 | # container programs network policy and routes on each 253 | # host. 254 | - name: calico-node 255 | image: calico/node:v3.6.0 256 | env: 257 | # The location of the Calico etcd cluster. 258 | - name: ETCD_ENDPOINTS 259 | valueFrom: 260 | configMapKeyRef: 261 | name: calico-config 262 | key: etcd_endpoints 263 | # Location of the CA certificate for etcd. 264 | - name: ETCD_CA_CERT_FILE 265 | valueFrom: 266 | configMapKeyRef: 267 | name: calico-config 268 | key: etcd_ca 269 | # Location of the client key for etcd. 270 | - name: ETCD_KEY_FILE 271 | valueFrom: 272 | configMapKeyRef: 273 | name: calico-config 274 | key: etcd_key 275 | # Location of the client certificate for etcd. 276 | - name: ETCD_CERT_FILE 277 | valueFrom: 278 | configMapKeyRef: 279 | name: calico-config 280 | key: etcd_cert 281 | # Set noderef for node controller. 282 | - name: CALICO_K8S_NODE_REF 283 | valueFrom: 284 | fieldRef: 285 | fieldPath: spec.nodeName 286 | # Choose the backend to use. 287 | - name: CALICO_NETWORKING_BACKEND 288 | valueFrom: 289 | configMapKeyRef: 290 | name: calico-config 291 | key: calico_backend 292 | # Cluster type to identify the deployment type 293 | - name: CLUSTER_TYPE 294 | value: "k8s,bgp" 295 | # Auto-detect the BGP IP address. 296 | - name: IP 297 | value: "autodetect" 298 | # Enable IPIP 299 | - name: CALICO_IPV4POOL_IPIP 300 | value: "Always" 301 | # Set MTU for tunnel device used if ipip is enabled 302 | - name: FELIX_IPINIPMTU 303 | valueFrom: 304 | configMapKeyRef: 305 | name: calico-config 306 | key: veth_mtu 307 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 308 | # chosen from this range. Changing this value after installation will have 309 | # no effect. This should fall within `--cluster-cidr`. 310 | - name: CALICO_IPV4POOL_CIDR 311 | value: "10.20.0.0/16" 312 | # Disable file logging so `kubectl logs` works. 313 | - name: CALICO_DISABLE_FILE_LOGGING 314 | value: "true" 315 | # Set Felix endpoint to host default action to ACCEPT. 316 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 317 | value: "ACCEPT" 318 | # Disable IPv6 on Kubernetes. 319 | - name: FELIX_IPV6SUPPORT 320 | value: "false" 321 | # Set Felix logging to "info" 322 | - name: FELIX_LOGSEVERITYSCREEN 323 | value: "info" 324 | - name: FELIX_HEALTHENABLED 325 | value: "true" 326 | - name: IP_AUTODETECTION_METHOD 327 | value: can-reach=192.168.1.51 328 | securityContext: 329 | privileged: true 330 | resources: 331 | requests: 332 | cpu: 250m 333 | livenessProbe: 334 | httpGet: 335 | path: /liveness 336 | port: 9099 337 | host: localhost 338 | periodSeconds: 10 339 | initialDelaySeconds: 10 340 | failureThreshold: 6 341 | readinessProbe: 342 | exec: 343 | command: 344 | - /bin/calico-node 345 | - -bird-ready 346 | - -felix-ready 347 | periodSeconds: 10 348 | volumeMounts: 349 | - mountPath: /lib/modules 350 | name: lib-modules 351 | readOnly: true 352 | - mountPath: /run/xtables.lock 353 | name: xtables-lock 354 | readOnly: false 355 | - mountPath: /var/run/calico 356 | name: var-run-calico 357 | readOnly: false 358 | - mountPath: /var/lib/calico 359 | name: var-lib-calico 360 | readOnly: false 361 | - mountPath: /calico-secrets 362 | name: etcd-certs 363 | volumes: 364 | # Used by calico/node. 365 | - name: lib-modules 366 | hostPath: 367 | path: /lib/modules 368 | - name: var-run-calico 369 | hostPath: 370 | path: /var/run/calico 371 | - name: var-lib-calico 372 | hostPath: 373 | path: /var/lib/calico 374 | - name: xtables-lock 375 | hostPath: 376 | path: /run/xtables.lock 377 | type: FileOrCreate 378 | # Used to install CNI. 379 | - name: cni-bin-dir 380 | hostPath: 381 | path: /opt/cni/bin 382 | - name: cni-net-dir 383 | hostPath: 384 | path: /etc/cni/net.d 385 | # Mount in the etcd TLS secrets with mode 400. 386 | # See https://kubernetes.io/docs/concepts/configuration/secret/ 387 | - name: etcd-certs 388 | secret: 389 | secretName: calico-etcd-secrets 390 | defaultMode: 0400 391 | --- 392 | 393 | apiVersion: v1 394 | kind: ServiceAccount 395 | metadata: 396 | name: calico-node 397 | namespace: kube-system 398 | 399 | --- 400 | # Source: calico/templates/calico-kube-controllers.yaml 401 | # This manifest deploys the Calico Kubernetes controllers. 402 | # See https://github.com/projectcalico/kube-controllers 403 | apiVersion: extensions/v1beta1 404 | kind: Deployment 405 | metadata: 406 | name: calico-kube-controllers 407 | namespace: kube-system 408 | labels: 409 | k8s-app: calico-kube-controllers 410 | annotations: 411 | scheduler.alpha.kubernetes.io/critical-pod: '' 412 | spec: 413 | # The controllers can only have a single active instance. 414 | replicas: 1 415 | strategy: 416 | type: Recreate 417 | template: 418 | metadata: 419 | name: calico-kube-controllers 420 | namespace: kube-system 421 | labels: 422 | k8s-app: calico-kube-controllers 423 | spec: 424 | nodeSelector: 425 | beta.kubernetes.io/os: linux 426 | # The controllers must run in the host network namespace so that 427 | # it isn't governed by policy that would prevent it from working. 428 | hostNetwork: true 429 | tolerations: 430 | # Mark the pod as a critical add-on for rescheduling. 431 | - key: CriticalAddonsOnly 432 | operator: Exists 433 | - key: node-role.kubernetes.io/master 434 | effect: NoSchedule 435 | serviceAccountName: calico-kube-controllers 436 | containers: 437 | - name: calico-kube-controllers 438 | image: calico/kube-controllers:v3.6.0 439 | env: 440 | # The location of the Calico etcd cluster. 441 | - name: ETCD_ENDPOINTS 442 | valueFrom: 443 | configMapKeyRef: 444 | name: calico-config 445 | key: etcd_endpoints 446 | # Location of the CA certificate for etcd. 447 | - name: ETCD_CA_CERT_FILE 448 | valueFrom: 449 | configMapKeyRef: 450 | name: calico-config 451 | key: etcd_ca 452 | # Location of the client key for etcd. 453 | - name: ETCD_KEY_FILE 454 | valueFrom: 455 | configMapKeyRef: 456 | name: calico-config 457 | key: etcd_key 458 | # Location of the client certificate for etcd. 459 | - name: ETCD_CERT_FILE 460 | valueFrom: 461 | configMapKeyRef: 462 | name: calico-config 463 | key: etcd_cert 464 | # Choose which controllers to run. 465 | - name: ENABLED_CONTROLLERS 466 | value: policy,namespace,serviceaccount,workloadendpoint,node 467 | volumeMounts: 468 | # Mount in the etcd TLS secrets. 469 | - mountPath: /calico-secrets 470 | name: etcd-certs 471 | readinessProbe: 472 | exec: 473 | command: 474 | - /usr/bin/check-status 475 | - -r 476 | volumes: 477 | # Mount in the etcd TLS secrets with mode 400. 478 | # See https://kubernetes.io/docs/concepts/configuration/secret/ 479 | - name: etcd-certs 480 | secret: 481 | secretName: calico-etcd-secrets 482 | defaultMode: 0400 483 | 484 | --- 485 | 486 | apiVersion: v1 487 | kind: ServiceAccount 488 | metadata: 489 | name: calico-kube-controllers 490 | namespace: kube-system 491 | --- 492 | # Source: calico/templates/calico-typha.yaml 493 | 494 | --- 495 | # Source: calico/templates/configure-canal.yaml 496 | 497 | --- 498 | # Source: calico/templates/kdd-crds.yaml 499 | 500 | 501 | -------------------------------------------------------------------------------- /calico/conf/calicoctl.cfg: -------------------------------------------------------------------------------- 1 | apiVersion: projectcalico.org/v3 2 | kind: CalicoAPIConfig 3 | metadata: 4 | spec: 5 | datastoreType: etcdv3 6 | etcdEndpoints: https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379 7 | etcdKeyFile: /etc/calico/etcd-key.pem 8 | etcdCertFile: /etc/calico/etcd.pem 9 | etcdCACertFile: /etc/calico/etcd-root-ca.pem 10 | -------------------------------------------------------------------------------- /calico/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | K8S_MASTER_IP=$1 6 | ETCD_ENDPOINTS=$2 7 | CALICO_VERSION="3.1.0" 8 | 9 | if [ "" == "${K8S_MASTER_IP}" ]; then 10 | echo -e "\033[33mWARNING: K8S_MASTER_IP is blank,use default value: 192.168.1.51\033[0m" 11 | K8S_MASTER_IP="192.168.1.51" 12 | fi 13 | 14 | if [ "" == "${ETCD_ENDPOINTS}" ]; then 15 | echo -e "\033[33mWARNING: ETCD_ENDPOINTS is blank,use default value: https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379\033[0m" 16 | ETCD_ENDPOINTS="https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379" 17 | fi 18 | 19 | ETCD_CERT=`cat conf/etcd.pem | base64 | tr -d '\n'` 20 | ETCD_KEY=`cat conf/etcd-key.pem | base64 | tr -d '\n'` 21 | ETCD_CA=`cat conf/etcd-root-ca.pem | base64 | tr -d '\n'` 22 | 23 | 24 | cp calico.example.yaml calico.yaml 25 | 26 | sed -i "s@.*etcd_endpoints:.*@\ \ etcd_endpoints:\ \"${ETCD_ENDPOINTS}\"@gi" calico.yaml 27 | 28 | sed -i "s@.*etcd-cert:.*@\ \ etcd-cert:\ ${ETCD_CERT}@gi" calico.yaml 29 | sed -i "s@.*etcd-key:.*@\ \ etcd-key:\ ${ETCD_KEY}@gi" calico.yaml 30 | sed -i "s@.*etcd-ca:.*@\ \ etcd-ca:\ ${ETCD_CA}@gi" calico.yaml 31 | 32 | sed -i 's@.*etcd_ca:.*@\ \ etcd_ca:\ "/calico-secrets/etcd-ca"@gi' calico.yaml 33 | sed -i 's@.*etcd_cert:.*@\ \ etcd_cert:\ "/calico-secrets/etcd-cert"@gi' calico.yaml 34 | sed -i 's@.*etcd_key:.*@\ \ etcd_key:\ "/calico-secrets/etcd-key"@gi' calico.yaml 35 | 36 | sed -i "s@K8S_MASTER_IP@${K8S_MASTER_IP}@gi" calico.yaml 37 | 38 | wget https://github.com/projectcalico/calicoctl/releases/download/v3.2.6/calicoctl-linux-amd64 -O /usr/bin/calicoctl 39 | chmod +x /usr/bin/calicoctl 40 | 41 | sed -i "s@.*etcdEndpoints:.*@\ \ etcdEndpoints:\ ${ETCD_ENDPOINTS}@gi" conf/calicoctl.cfg 42 | 43 | rm -rf /etc/calico && cp -r conf /etc/calico 44 | 45 | echo -e "\033[32m\nGenerate the configuration file done! Next:\n\033[0m" 46 | echo -e "\033[32mUse \"kubectl create -f calico.yaml\" to create calico-kube-controllers.\033[0m" 47 | echo -e "\033[32m\nFinally, do not forget to add an \"--network-plugin=cni\" option to kubelet.\033[0m" 48 | -------------------------------------------------------------------------------- /certs/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -f k8s/{*.pem,*.csr,*.yaml,*.kubeconfig,token.csv} 4 | rm -f etcd/{*.pem,*.csr} 5 | 6 | rm -rf ../k8s/conf/{*.kubeconfig,*.yaml,token.csv,ssl} 7 | rm -rf ../etcd/conf/ssl 8 | 9 | rm -rf ../calico/conf/*.pem 10 | -------------------------------------------------------------------------------- /certs/etcd/copy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf ../../etcd/conf/ssl 4 | mkdir ../../etcd/conf/ssl 5 | cp *.pem ../../etcd/conf/ssl 6 | cp *.pem ../../calico/conf 7 | -------------------------------------------------------------------------------- /certs/etcd/create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cfssl gencert --initca=true etcd-root-ca-csr.json | cfssljson --bare etcd-root-ca 4 | cfssl gencert --ca etcd-root-ca.pem --ca-key etcd-root-ca-key.pem --config etcd-gencert.json etcd-csr.json | cfssljson --bare etcd 5 | -------------------------------------------------------------------------------- /certs/etcd/etcd-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "key": { 3 | "algo": "rsa", 4 | "size": 2048 5 | }, 6 | "names": [ 7 | { 8 | "O": "etcd", 9 | "OU": "etcd Security", 10 | "L": "Beijing", 11 | "ST": "Beijing", 12 | "C": "CN" 13 | } 14 | ], 15 | "CN": "etcd", 16 | "hosts": [ 17 | "127.0.0.1", 18 | "localhost", 19 | "192.168.1.51", 20 | "192.168.1.52", 21 | "192.168.1.53" 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /certs/etcd/etcd-gencert.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "usages": [ 5 | "signing", 6 | "key encipherment", 7 | "server auth", 8 | "client auth" 9 | ], 10 | "expiry": "87600h" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /certs/etcd/etcd-root-ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "etcd-root-ca", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 4096 6 | }, 7 | "names": [ 8 | { 9 | "O": "etcd", 10 | "OU": "etcd Security", 11 | "L": "Beijing", 12 | "ST": "Beijing", 13 | "C": "CN" 14 | } 15 | ], 16 | "ca": { 17 | "expiry": "87600h" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /certs/install_cfssl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | wget https://oss.link/files/cfssl.tar.gz 6 | tar -zxvf cfssl.tar.gz -C /usr/bin 7 | rm -f cfssl.tar.gz 8 | -------------------------------------------------------------------------------- /certs/k8s/admin-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:masters", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "system:masters", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /certs/k8s/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -f *.pem *.csr *.yaml *.kubeconfig token.csv 4 | rm -rf ../conf/{*.kubeconfig,*.yaml,token.csv,ssl} 5 | -------------------------------------------------------------------------------- /certs/k8s/copy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf ../../k8s/conf/ssl 4 | rm -f ../../k8s/conf/*.csv 5 | rm -f ../../k8s/conf/*.yaml 6 | rm -f ../../k8s/conf/*.kubeconfig 7 | 8 | mkdir ../../k8s/conf/ssl 9 | cp *.pem ../../k8s/conf/ssl 10 | cp *.yaml ../../k8s/conf 11 | cp *.kubeconfig ../../k8s/conf 12 | -------------------------------------------------------------------------------- /certs/k8s/create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cfssl gencert --initca=true k8s-root-ca-csr.json | cfssljson --bare k8s-root-ca 6 | 7 | for targetName in kube-apiserver kube-controller-manager kube-scheduler kube-proxy kubelet-api-admin admin; do 8 | cfssl gencert --ca k8s-root-ca.pem --ca-key k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes $targetName-csr.json | cfssljson --bare $targetName 9 | done 10 | 11 | KUBE_APISERVER="https://127.0.0.1:6443" 12 | BOOTSTRAP_TOKEN_ID=$(head -c 6 /dev/urandom | md5sum | head -c 6) 13 | BOOTSTRAP_TOKEN_SECRET=$(head -c 16 /dev/urandom | md5sum | head -c 16) 14 | BOOTSTRAP_TOKEN="${BOOTSTRAP_TOKEN_ID}.${BOOTSTRAP_TOKEN_SECRET}" 15 | 16 | echo "Bootstrap Tokne: ${BOOTSTRAP_TOKEN}" 17 | 18 | echo "Create kubelet bootstrapping kubeconfig..." 19 | 20 | # 设置集群参数 21 | kubectl config set-cluster kubernetes \ 22 | --certificate-authority=k8s-root-ca.pem \ 23 | --embed-certs=true \ 24 | --server=${KUBE_APISERVER} \ 25 | --kubeconfig=bootstrap.kubeconfig 26 | # 设置客户端认证参数 27 | kubectl config set-credentials "system:bootstrap:${BOOTSTRAP_TOKEN_ID}" \ 28 | --token=${BOOTSTRAP_TOKEN} \ 29 | --kubeconfig=bootstrap.kubeconfig 30 | # 设置上下文参数 31 | kubectl config set-context default \ 32 | --cluster=kubernetes \ 33 | --user="system:bootstrap:${BOOTSTRAP_TOKEN_ID}" \ 34 | --kubeconfig=bootstrap.kubeconfig 35 | # 设置默认上下文 36 | kubectl config use-context default --kubeconfig=bootstrap.kubeconfig 37 | 38 | echo "Create kube-controller-manager kubeconfig..." 39 | 40 | kubectl config set-cluster kubernetes \ 41 | --certificate-authority=k8s-root-ca.pem \ 42 | --embed-certs=true \ 43 | --server=${KUBE_APISERVER} \ 44 | --kubeconfig=kube-controller-manager.kubeconfig 45 | # 设置客户端认证参数 46 | kubectl config set-credentials "system:kube-controller-manager" \ 47 | --client-certificate=kube-controller-manager.pem \ 48 | --client-key=kube-controller-manager-key.pem \ 49 | --embed-certs=true \ 50 | --kubeconfig=kube-controller-manager.kubeconfig 51 | # 设置上下文参数 52 | kubectl config set-context default \ 53 | --cluster=kubernetes \ 54 | --user=system:kube-controller-manager \ 55 | --kubeconfig=kube-controller-manager.kubeconfig 56 | # 设置默认上下文 57 | kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig 58 | 59 | echo "Create kube-scheduler kubeconfig..." 60 | 61 | kubectl config set-cluster kubernetes \ 62 | --certificate-authority=k8s-root-ca.pem \ 63 | --embed-certs=true \ 64 | --server=${KUBE_APISERVER} \ 65 | --kubeconfig=kube-scheduler.kubeconfig 66 | # 设置客户端认证参数 67 | kubectl config set-credentials "system:kube-scheduler" \ 68 | --client-certificate=kube-scheduler.pem \ 69 | --client-key=kube-scheduler-key.pem \ 70 | --embed-certs=true \ 71 | --kubeconfig=kube-scheduler.kubeconfig 72 | # 设置上下文参数 73 | kubectl config set-context default \ 74 | --cluster=kubernetes \ 75 | --user=system:kube-scheduler \ 76 | --kubeconfig=kube-scheduler.kubeconfig 77 | # 设置默认上下文 78 | kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig 79 | 80 | echo "Create kube-proxy kubeconfig..." 81 | 82 | kubectl config set-cluster kubernetes \ 83 | --certificate-authority=k8s-root-ca.pem \ 84 | --embed-certs=true \ 85 | --server=${KUBE_APISERVER} \ 86 | --kubeconfig=kube-proxy.kubeconfig 87 | # 设置客户端认证参数 88 | kubectl config set-credentials "system:kube-proxy" \ 89 | --client-certificate=kube-proxy.pem \ 90 | --client-key=kube-proxy-key.pem \ 91 | --embed-certs=true \ 92 | --kubeconfig=kube-proxy.kubeconfig 93 | # 设置上下文参数 94 | kubectl config set-context default \ 95 | --cluster=kubernetes \ 96 | --user=system:kube-proxy \ 97 | --kubeconfig=kube-proxy.kubeconfig 98 | # 设置默认上下文 99 | kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig 100 | 101 | cat >> audit-policy.yaml <> bootstrap.secret.yaml <" 114 | name: bootstrap-token-${BOOTSTRAP_TOKEN_ID} 115 | namespace: kube-system 116 | 117 | # Type MUST be 'bootstrap.kubernetes.io/token' 118 | type: bootstrap.kubernetes.io/token 119 | stringData: 120 | # Human readable description. Optional. 121 | description: "The default bootstrap token." 122 | 123 | # Token ID and secret. Required. 124 | token-id: ${BOOTSTRAP_TOKEN_ID} 125 | token-secret: ${BOOTSTRAP_TOKEN_SECRET} 126 | 127 | # Expiration. Optional. 128 | expiration: $(date -d'+1 day' -u +"%Y-%m-%dT%H:%M:%SZ") 129 | 130 | # Allowed usages. 131 | usage-bootstrap-authentication: "true" 132 | usage-bootstrap-signing: "true" 133 | 134 | # Extra groups to authenticate the token as. Must start with "system:bootstrappers:" 135 | # auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress 136 | EOF 137 | -------------------------------------------------------------------------------- /certs/k8s/k8s-gencert.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "87600h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth" 13 | ], 14 | "expiry": "87600h" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /certs/k8s/k8s-root-ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 4096 6 | }, 7 | "names": [ 8 | { 9 | "C": "CN", 10 | "ST": "BeiJing", 11 | "L": "BeiJing", 12 | "O": "kubernetes", 13 | "OU": "System" 14 | } 15 | ], 16 | "ca": { 17 | "expiry": "87600h" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /certs/k8s/kube-apiserver-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "10.254.0.1", 6 | "localhost", 7 | "*.master.kubernetes.node", 8 | "kubernetes", 9 | "kubernetes.default", 10 | "kubernetes.default.svc", 11 | "kubernetes.default.svc.cluster", 12 | "kubernetes.default.svc.cluster.local" 13 | ], 14 | "key": { 15 | "algo": "rsa", 16 | "size": 2048 17 | }, 18 | "names": [ 19 | { 20 | "C": "CN", 21 | "ST": "BeiJing", 22 | "L": "BeiJing", 23 | "O": "kubernetes", 24 | "OU": "System" 25 | } 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /certs/k8s/kube-controller-manager-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-controller-manager", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "localhost", 6 | "*.master.kubernetes.node" 7 | ], 8 | "key": { 9 | "algo": "rsa", 10 | "size": 2048 11 | }, 12 | "names": [ 13 | { 14 | "C": "CN", 15 | "ST": "BeiJing", 16 | "L": "BeiJing", 17 | "O": "system:kube-controller-manager", 18 | "OU": "System" 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /certs/k8s/kube-proxy-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-proxy", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "system:kube-proxy", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /certs/k8s/kube-scheduler-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-scheduler", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "localhost", 6 | "*.master.kubernetes.node" 7 | ], 8 | "key": { 9 | "algo": "rsa", 10 | "size": 2048 11 | }, 12 | "names": [ 13 | { 14 | "C": "CN", 15 | "ST": "BeiJing", 16 | "L": "BeiJing", 17 | "O": "system:kube-scheduler", 18 | "OU": "System" 19 | } 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /certs/k8s/kubelet-api-admin-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kubelet-api-admin", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "BeiJing", 12 | "L": "BeiJing", 13 | "O": "system:kubelet-api-admin", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /etcd/conf/etcd.conf: -------------------------------------------------------------------------------- 1 | # [member] 2 | ETCD_NAME=etcd1 3 | ETCD_DATA_DIR="/var/lib/etcd/data" 4 | ETCD_WAL_DIR="/var/lib/etcd/wal" 5 | ETCD_SNAPSHOT_COUNT="100" 6 | ETCD_HEARTBEAT_INTERVAL="100" 7 | ETCD_ELECTION_TIMEOUT="1000" 8 | ETCD_LISTEN_PEER_URLS="https://192.168.1.51:2380" 9 | ETCD_LISTEN_CLIENT_URLS="https://192.168.1.51:2379,http://127.0.0.1:2379" 10 | ETCD_MAX_SNAPSHOTS="5" 11 | ETCD_MAX_WALS="5" 12 | #ETCD_CORS="" 13 | 14 | # [cluster] 15 | ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.51:2380" 16 | # if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." 17 | ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.51:2380,etcd2=https://192.168.1.52:2380,etcd3=https://192.168.1.53:2380" 18 | ETCD_INITIAL_CLUSTER_STATE="new" 19 | ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" 20 | ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.51:2379" 21 | #ETCD_DISCOVERY="" 22 | #ETCD_DISCOVERY_SRV="" 23 | #ETCD_DISCOVERY_FALLBACK="proxy" 24 | #ETCD_DISCOVERY_PROXY="" 25 | #ETCD_STRICT_RECONFIG_CHECK="false" 26 | #ETCD_AUTO_COMPACTION_RETENTION="0" 27 | 28 | # [proxy] 29 | #ETCD_PROXY="off" 30 | #ETCD_PROXY_FAILURE_WAIT="5000" 31 | #ETCD_PROXY_REFRESH_INTERVAL="30000" 32 | #ETCD_PROXY_DIAL_TIMEOUT="1000" 33 | #ETCD_PROXY_WRITE_TIMEOUT="5000" 34 | #ETCD_PROXY_READ_TIMEOUT="0" 35 | 36 | # [security] 37 | ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" 38 | ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" 39 | ETCD_CLIENT_CERT_AUTH="true" 40 | ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" 41 | ETCD_AUTO_TLS="true" 42 | ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" 43 | ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" 44 | ETCD_PEER_CLIENT_CERT_AUTH="true" 45 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" 46 | ETCD_PEER_AUTO_TLS="true" 47 | 48 | # [logging] 49 | #ETCD_DEBUG="false" 50 | # examples for -log-package-levels etcdserver=WARNING,security=DEBUG 51 | #ETCD_LOG_PACKAGE_LEVELS="" 52 | -------------------------------------------------------------------------------- /etcd/conf/etcd.conf.cluster.example: -------------------------------------------------------------------------------- 1 | # [member] 2 | ETCD_NAME=etcd1 3 | ETCD_DATA_DIR="/var/lib/etcd/data" 4 | ETCD_WAL_DIR="/var/lib/etcd/wal" 5 | ETCD_SNAPSHOT_COUNT="100" 6 | ETCD_HEARTBEAT_INTERVAL="100" 7 | ETCD_ELECTION_TIMEOUT="1000" 8 | ETCD_LISTEN_PEER_URLS="https://192.168.1.51:2380" 9 | ETCD_LISTEN_CLIENT_URLS="https://192.168.1.51:2379,http://127.0.0.1:2379" 10 | ETCD_MAX_SNAPSHOTS="5" 11 | ETCD_MAX_WALS="5" 12 | #ETCD_CORS="" 13 | 14 | # [cluster] 15 | ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.51:2380" 16 | # if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." 17 | ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.51:2380,etcd2=https://192.168.1.52:2380,etcd3=https://192.168.1.53:2380" 18 | ETCD_INITIAL_CLUSTER_STATE="new" 19 | ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" 20 | ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.51:2379" 21 | #ETCD_DISCOVERY="" 22 | #ETCD_DISCOVERY_SRV="" 23 | #ETCD_DISCOVERY_FALLBACK="proxy" 24 | #ETCD_DISCOVERY_PROXY="" 25 | #ETCD_STRICT_RECONFIG_CHECK="false" 26 | #ETCD_AUTO_COMPACTION_RETENTION="0" 27 | 28 | # [proxy] 29 | #ETCD_PROXY="off" 30 | #ETCD_PROXY_FAILURE_WAIT="5000" 31 | #ETCD_PROXY_REFRESH_INTERVAL="30000" 32 | #ETCD_PROXY_DIAL_TIMEOUT="1000" 33 | #ETCD_PROXY_WRITE_TIMEOUT="5000" 34 | #ETCD_PROXY_READ_TIMEOUT="0" 35 | 36 | # [security] 37 | ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" 38 | ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" 39 | ETCD_CLIENT_CERT_AUTH="true" 40 | ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" 41 | ETCD_AUTO_TLS="true" 42 | ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" 43 | ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" 44 | ETCD_PEER_CLIENT_CERT_AUTH="true" 45 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" 46 | ETCD_PEER_AUTO_TLS="true" 47 | 48 | # [logging] 49 | #ETCD_DEBUG="false" 50 | # examples for -log-package-levels etcdserver=WARNING,security=DEBUG 51 | #ETCD_LOG_PACKAGE_LEVELS="" 52 | -------------------------------------------------------------------------------- /etcd/conf/etcd.conf.single.example: -------------------------------------------------------------------------------- 1 | # [member] 2 | ETCD_NAME=etcd 3 | ETCD_DATA_DIR="/var/lib/etcd/data" 4 | ETCD_WAL_DIR="/var/lib/etcd/wal" 5 | ETCD_SNAPSHOT_COUNT="100" 6 | ETCD_HEARTBEAT_INTERVAL="100" 7 | ETCD_ELECTION_TIMEOUT="1000" 8 | ETCD_LISTEN_PEER_URLS="https://192.168.1.51:2380" 9 | ETCD_LISTEN_CLIENT_URLS="https://192.168.1.51:2379,http://127.0.0.1:2379" 10 | ETCD_MAX_SNAPSHOTS="5" 11 | ETCD_MAX_WALS="5" 12 | #ETCD_CORS="" 13 | 14 | # [cluster] 15 | #ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.51:2380" 16 | # if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." 17 | #ETCD_INITIAL_CLUSTER="etcd1=https://192.168.1.51:2380,etcd2=https://192.168.1.52:2380,etcd3=https://192.168.1.53:2380" 18 | #ETCD_INITIAL_CLUSTER_STATE="new" 19 | #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" 20 | ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.51:2379" 21 | #ETCD_DISCOVERY="" 22 | #ETCD_DISCOVERY_SRV="" 23 | #ETCD_DISCOVERY_FALLBACK="proxy" 24 | #ETCD_DISCOVERY_PROXY="" 25 | #ETCD_STRICT_RECONFIG_CHECK="false" 26 | #ETCD_AUTO_COMPACTION_RETENTION="0" 27 | 28 | # [proxy] 29 | #ETCD_PROXY="off" 30 | #ETCD_PROXY_FAILURE_WAIT="5000" 31 | #ETCD_PROXY_REFRESH_INTERVAL="30000" 32 | #ETCD_PROXY_DIAL_TIMEOUT="1000" 33 | #ETCD_PROXY_WRITE_TIMEOUT="5000" 34 | #ETCD_PROXY_READ_TIMEOUT="0" 35 | 36 | # [security] 37 | ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem" 38 | ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" 39 | ETCD_CLIENT_CERT_AUTH="true" 40 | ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" 41 | ETCD_AUTO_TLS="true" 42 | ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem" 43 | ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem" 44 | ETCD_PEER_CLIENT_CERT_AUTH="true" 45 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-root-ca.pem" 46 | ETCD_PEER_AUTO_TLS="true" 47 | 48 | # [logging] 49 | #ETCD_DEBUG="false" 50 | # examples for -log-package-levels etcdserver=WARNING,security=DEBUG 51 | #ETCD_LOG_PACKAGE_LEVELS="" 52 | -------------------------------------------------------------------------------- /etcd/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ETCD_DEFAULT_VERSION="3.3.12" 6 | 7 | if [ "$1" != "" ]; then 8 | ETCD_VERSION=$1 9 | else 10 | echo -e "\033[33mWARNING: ETCD_VERSION is blank,use default version: ${ETCD_DEFAULT_VERSION}\033[0m" 11 | ETCD_VERSION=${ETCD_DEFAULT_VERSION} 12 | fi 13 | 14 | function download(){ 15 | if [ ! -f "etcd-v${ETCD_VERSION}-linux-amd64.tar.gz" ]; then 16 | wget https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz 17 | tar -zxvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz 18 | fi 19 | } 20 | 21 | function preinstall(){ 22 | getent group etcd >/dev/null || groupadd -r etcd 23 | getent passwd etcd >/dev/null || useradd -r -g etcd -d /var/lib/etcd -s /sbin/nologin -c "etcd user" etcd 24 | } 25 | 26 | function install(){ 27 | echo -e "\033[32mINFO: Copy etcd...\033[0m" 28 | tar -zxvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz 29 | cp etcd-v${ETCD_VERSION}-linux-amd64/etcd* /usr/local/bin 30 | rm -rf etcd-v${ETCD_VERSION}-linux-amd64 31 | 32 | echo -e "\033[32mINFO: Copy etcd config...\033[0m" 33 | cp -r conf /etc/etcd 34 | chown -R etcd:etcd /etc/etcd 35 | chmod -R 755 /etc/etcd/ssl 36 | 37 | echo -e "\033[32mINFO: Copy etcd systemd config...\033[0m" 38 | cp systemd/*.service /lib/systemd/system 39 | systemctl daemon-reload 40 | } 41 | 42 | function postinstall(){ 43 | if [ ! -d "/var/lib/etcd" ]; then 44 | mkdir /var/lib/etcd 45 | chown -R etcd:etcd /var/lib/etcd 46 | fi 47 | 48 | } 49 | 50 | 51 | download 52 | preinstall 53 | install 54 | postinstall 55 | -------------------------------------------------------------------------------- /etcd/systemd/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Server 3 | After=network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | Type=notify 9 | WorkingDirectory=/var/lib/etcd/ 10 | EnvironmentFile=-/etc/etcd/etcd.conf 11 | User=etcd 12 | # set GOMAXPROCS to number of processors 13 | ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\"" 14 | Restart=on-failure 15 | LimitNOFILE=65536 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /etcd/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo -e "\033[33mWARNING: Delete etcd!\033[0m" 6 | rm -f /usr/local/bin/etcd /usr/local/bin/etcdctl 7 | 8 | echo -e "\033[33mWARNING: Delete etcd config!\033[0m" 9 | rm -rf /etc/etcd /var/lib/etcd 10 | 11 | echo -e "\033[33mWARNING: Delete etcd systemd config!\033[0m" 12 | if [ -z "/lib/systemd/system/etcd.service" ]; then 13 | systemctl disable etcd 14 | systemctl stop etcd 15 | rm -f /lib/systemd/system/etcd.service 16 | fi 17 | systemctl daemon-reload 18 | -------------------------------------------------------------------------------- /fixPermissions.sh: -------------------------------------------------------------------------------- 1 | #/!bin/bash 2 | 3 | [ -d "/etc/etcd/ssl" ] && chmod -R 755 /etc/etcd/ssl 4 | [ -d "/etc/etcd/ssl" ] && chown -R etcd:etcd /etc/etcd/ssl 5 | [ -d "/var/lib/etcd" ] && chown -R etcd:etcd /var/lib/etcd 6 | [ -d "/var/log/kube-audit" ] && chown -R kube:kube /var/log/kube-audit 7 | [ -d "/var/lib/kubelet" ] && chown -R kube:kube /var/lib/kubelet 8 | [ -d "/etc/kubernetes/ssl" ] && chown -R kube:kube /etc/kubernetes/ssl 9 | -------------------------------------------------------------------------------- /k8s/addons/coredns/coredns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | labels: 11 | kubernetes.io/bootstrapping: rbac-defaults 12 | name: system:coredns 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | - services 19 | - pods 20 | - namespaces 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - nodes 28 | verbs: 29 | - get 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRoleBinding 33 | metadata: 34 | annotations: 35 | rbac.authorization.kubernetes.io/autoupdate: "true" 36 | labels: 37 | kubernetes.io/bootstrapping: rbac-defaults 38 | name: system:coredns 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: system:coredns 43 | subjects: 44 | - kind: ServiceAccount 45 | name: coredns 46 | namespace: kube-system 47 | --- 48 | apiVersion: v1 49 | kind: ConfigMap 50 | metadata: 51 | name: coredns 52 | namespace: kube-system 53 | data: 54 | Corefile: | 55 | .:53 { 56 | errors 57 | health 58 | kubernetes cluster.local in-addr.arpa ip6.arpa { 59 | pods insecure 60 | upstream 61 | fallthrough in-addr.arpa ip6.arpa 62 | } 63 | prometheus :9153 64 | forward . /etc/resolv.conf 65 | cache 30 66 | loop 67 | reload 68 | loadbalance 69 | } 70 | --- 71 | apiVersion: apps/v1 72 | kind: Deployment 73 | metadata: 74 | name: coredns 75 | namespace: kube-system 76 | labels: 77 | k8s-app: kube-dns 78 | kubernetes.io/name: "CoreDNS" 79 | spec: 80 | replicas: 2 81 | strategy: 82 | type: RollingUpdate 83 | rollingUpdate: 84 | maxUnavailable: 1 85 | selector: 86 | matchLabels: 87 | k8s-app: kube-dns 88 | template: 89 | metadata: 90 | labels: 91 | k8s-app: kube-dns 92 | spec: 93 | priorityClassName: system-cluster-critical 94 | serviceAccountName: coredns 95 | tolerations: 96 | - key: "CriticalAddonsOnly" 97 | operator: "Exists" 98 | nodeSelector: 99 | beta.kubernetes.io/os: linux 100 | containers: 101 | - name: coredns 102 | image: coredns/coredns:1.3.1 103 | imagePullPolicy: IfNotPresent 104 | resources: 105 | limits: 106 | memory: 170Mi 107 | requests: 108 | cpu: 100m 109 | memory: 70Mi 110 | args: [ "-conf", "/etc/coredns/Corefile" ] 111 | volumeMounts: 112 | - name: config-volume 113 | mountPath: /etc/coredns 114 | readOnly: true 115 | ports: 116 | - containerPort: 53 117 | name: dns 118 | protocol: UDP 119 | - containerPort: 53 120 | name: dns-tcp 121 | protocol: TCP 122 | - containerPort: 9153 123 | name: metrics 124 | protocol: TCP 125 | securityContext: 126 | allowPrivilegeEscalation: false 127 | capabilities: 128 | add: 129 | - NET_BIND_SERVICE 130 | drop: 131 | - all 132 | readOnlyRootFilesystem: true 133 | livenessProbe: 134 | httpGet: 135 | path: /health 136 | port: 8080 137 | scheme: HTTP 138 | initialDelaySeconds: 60 139 | timeoutSeconds: 5 140 | successThreshold: 1 141 | failureThreshold: 5 142 | readinessProbe: 143 | httpGet: 144 | path: /health 145 | port: 8080 146 | scheme: HTTP 147 | dnsPolicy: Default 148 | volumes: 149 | - name: config-volume 150 | configMap: 151 | name: coredns 152 | items: 153 | - key: Corefile 154 | path: Corefile 155 | --- 156 | apiVersion: v1 157 | kind: Service 158 | metadata: 159 | name: kube-dns 160 | namespace: kube-system 161 | annotations: 162 | prometheus.io/port: "9153" 163 | prometheus.io/scrape: "true" 164 | labels: 165 | k8s-app: kube-dns 166 | kubernetes.io/cluster-service: "true" 167 | kubernetes.io/name: "CoreDNS" 168 | spec: 169 | selector: 170 | k8s-app: kube-dns 171 | clusterIP: 10.254.0.2 172 | ports: 173 | - name: dns 174 | port: 53 175 | protocol: UDP 176 | - name: dns-tcp 177 | port: 53 178 | protocol: TCP 179 | - name: metrics 180 | port: 9153 181 | protocol: TCP 182 | -------------------------------------------------------------------------------- /k8s/addons/coredns/coredns.yaml.sed: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | labels: 11 | kubernetes.io/bootstrapping: rbac-defaults 12 | name: system:coredns 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | - services 19 | - pods 20 | - namespaces 21 | verbs: 22 | - list 23 | - watch 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - nodes 28 | verbs: 29 | - get 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRoleBinding 33 | metadata: 34 | annotations: 35 | rbac.authorization.kubernetes.io/autoupdate: "true" 36 | labels: 37 | kubernetes.io/bootstrapping: rbac-defaults 38 | name: system:coredns 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: system:coredns 43 | subjects: 44 | - kind: ServiceAccount 45 | name: coredns 46 | namespace: kube-system 47 | --- 48 | apiVersion: v1 49 | kind: ConfigMap 50 | metadata: 51 | name: coredns 52 | namespace: kube-system 53 | data: 54 | Corefile: | 55 | .:53 { 56 | errors 57 | health 58 | kubernetes CLUSTER_DOMAIN REVERSE_CIDRS { 59 | pods insecure 60 | upstream 61 | fallthrough in-addr.arpa ip6.arpa 62 | }FEDERATIONS 63 | prometheus :9153 64 | forward . UPSTREAMNAMESERVER 65 | cache 30 66 | loop 67 | reload 68 | loadbalance 69 | }STUBDOMAINS 70 | --- 71 | apiVersion: apps/v1 72 | kind: Deployment 73 | metadata: 74 | name: coredns 75 | namespace: kube-system 76 | labels: 77 | k8s-app: kube-dns 78 | kubernetes.io/name: "CoreDNS" 79 | spec: 80 | replicas: 2 81 | strategy: 82 | type: RollingUpdate 83 | rollingUpdate: 84 | maxUnavailable: 1 85 | selector: 86 | matchLabels: 87 | k8s-app: kube-dns 88 | template: 89 | metadata: 90 | labels: 91 | k8s-app: kube-dns 92 | spec: 93 | priorityClassName: system-cluster-critical 94 | serviceAccountName: coredns 95 | tolerations: 96 | - key: "CriticalAddonsOnly" 97 | operator: "Exists" 98 | nodeSelector: 99 | beta.kubernetes.io/os: linux 100 | containers: 101 | - name: coredns 102 | image: coredns/coredns:1.3.1 103 | imagePullPolicy: IfNotPresent 104 | resources: 105 | limits: 106 | memory: 170Mi 107 | requests: 108 | cpu: 100m 109 | memory: 70Mi 110 | args: [ "-conf", "/etc/coredns/Corefile" ] 111 | volumeMounts: 112 | - name: config-volume 113 | mountPath: /etc/coredns 114 | readOnly: true 115 | ports: 116 | - containerPort: 53 117 | name: dns 118 | protocol: UDP 119 | - containerPort: 53 120 | name: dns-tcp 121 | protocol: TCP 122 | - containerPort: 9153 123 | name: metrics 124 | protocol: TCP 125 | securityContext: 126 | allowPrivilegeEscalation: false 127 | capabilities: 128 | add: 129 | - NET_BIND_SERVICE 130 | drop: 131 | - all 132 | readOnlyRootFilesystem: true 133 | livenessProbe: 134 | httpGet: 135 | path: /health 136 | port: 8080 137 | scheme: HTTP 138 | initialDelaySeconds: 60 139 | timeoutSeconds: 5 140 | successThreshold: 1 141 | failureThreshold: 5 142 | readinessProbe: 143 | httpGet: 144 | path: /health 145 | port: 8080 146 | scheme: HTTP 147 | dnsPolicy: Default 148 | volumes: 149 | - name: config-volume 150 | configMap: 151 | name: coredns 152 | items: 153 | - key: Corefile 154 | path: Corefile 155 | --- 156 | apiVersion: v1 157 | kind: Service 158 | metadata: 159 | name: kube-dns 160 | namespace: kube-system 161 | annotations: 162 | prometheus.io/port: "9153" 163 | prometheus.io/scrape: "true" 164 | labels: 165 | k8s-app: kube-dns 166 | kubernetes.io/cluster-service: "true" 167 | kubernetes.io/name: "CoreDNS" 168 | spec: 169 | selector: 170 | k8s-app: kube-dns 171 | clusterIP: CLUSTER_DNS_IP 172 | ports: 173 | - name: dns 174 | port: 53 175 | protocol: UDP 176 | - name: dns-tcp 177 | port: 53 178 | protocol: TCP 179 | - name: metrics 180 | port: 9153 181 | protocol: TCP 182 | -------------------------------------------------------------------------------- /k8s/addons/coredns/create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ./deploy.sh -s -i 10.254.0.2 -d cluster.local -t coredns.yaml.sed > coredns.yaml 6 | -------------------------------------------------------------------------------- /k8s/addons/coredns/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Deploys CoreDNS to a cluster currently running Kube-DNS. 4 | 5 | set -eo pipefail 6 | 7 | show_help () { 8 | cat << USAGE 9 | usage: $0 [ -r REVERSE-CIDR ] [ -i DNS-IP ] [ -d CLUSTER-DOMAIN ] [ -t YAML-TEMPLATE ] 10 | 11 | -r : Define a reverse zone for the given CIDR. You may specifcy this option more 12 | than once to add multiple reverse zones. If no reverse CIDRs are defined, 13 | then the default is to handle all reverse zones (i.e. in-addr.arpa and ip6.arpa) 14 | -i : Specify the cluster DNS IP address. If not specificed, the IP address of 15 | the existing "kube-dns" service is used, if present. 16 | -s : Skips the translation of kube-dns configmap to the corresponding CoreDNS Corefile configuration. 17 | 18 | USAGE 19 | exit 0 20 | } 21 | 22 | # Simple Defaults 23 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" 24 | CLUSTER_DOMAIN=cluster.local 25 | YAML_TEMPLATE="$DIR/coredns.yaml.sed" 26 | STUBDOMAINS="" 27 | UPSTREAM=\\/etc\\/resolv\.conf 28 | FEDERATIONS="" 29 | 30 | 31 | # Translates the kube-dns ConfigMap to equivalent CoreDNS Configuration. 32 | function translate-kube-dns-configmap { 33 | kube-dns-federation-to-coredns 34 | kube-dns-upstreamnameserver-to-coredns 35 | kube-dns-stubdomains-to-coredns 36 | } 37 | 38 | function kube-dns-federation-to-coredns { 39 | fed=$(kubectl -n kube-system get configmap kube-dns -ojsonpath='{.data.federations}' 2> /dev/null | jq . | tr -d '":,') 40 | if [[ ! -z ${fed} ]]; then 41 | FEDERATIONS=$(sed -e '1s/^/federation /' -e 's/^/ /' -e '1i\\' <<< "${fed}") # add federation to the stanza 42 | fi 43 | } 44 | 45 | function kube-dns-upstreamnameserver-to-coredns { 46 | up=$(kubectl -n kube-system get configmap kube-dns -ojsonpath='{.data.upstreamNameservers}' 2> /dev/null | tr -d '[",]') 47 | if [[ ! -z ${up} ]]; then 48 | UPSTREAM=${up} 49 | fi 50 | } 51 | 52 | function kube-dns-stubdomains-to-coredns { 53 | STUBDOMAIN_TEMPLATE=' 54 | SD_DOMAIN:53 { 55 | errors 56 | cache 30 57 | loop 58 | forward . SD_DESTINATION 59 | }' 60 | 61 | function dequote { 62 | str=${1#\"} # delete leading quote 63 | str=${str%\"} # delete trailing quote 64 | echo ${str} 65 | } 66 | 67 | function parse_stub_domains() { 68 | sd=$1 69 | 70 | # get keys - each key is a domain 71 | sd_keys=$(echo -n $sd | jq keys[]) 72 | 73 | # For each domain ... 74 | for dom in $sd_keys; do 75 | dst=$(echo -n $sd | jq '.['$dom'][0]') # get the destination 76 | 77 | dom=$(dequote $dom) 78 | dst=$(dequote $dst) 79 | 80 | sd_stanza=${STUBDOMAIN_TEMPLATE/SD_DOMAIN/$dom} # replace SD_DOMAIN 81 | sd_stanza=${sd_stanza/SD_DESTINATION/$dst} # replace SD_DESTINATION 82 | echo "$sd_stanza" 83 | done 84 | } 85 | 86 | sd=$(kubectl -n kube-system get configmap kube-dns -ojsonpath='{.data.stubDomains}' 2> /dev/null) 87 | STUBDOMAINS=$(parse_stub_domains "$sd") 88 | } 89 | 90 | 91 | # Get Opts 92 | while getopts "hsr:i:d:t:k:" opt; do 93 | case "$opt" in 94 | h) show_help 95 | ;; 96 | s) SKIP=1 97 | ;; 98 | r) REVERSE_CIDRS="$REVERSE_CIDRS $OPTARG" 99 | ;; 100 | i) CLUSTER_DNS_IP=$OPTARG 101 | ;; 102 | d) CLUSTER_DOMAIN=$OPTARG 103 | ;; 104 | t) YAML_TEMPLATE=$OPTARG 105 | ;; 106 | esac 107 | done 108 | 109 | # Conditional Defaults 110 | if [[ -z $REVERSE_CIDRS ]]; then 111 | REVERSE_CIDRS="in-addr.arpa ip6.arpa" 112 | fi 113 | if [[ -z $CLUSTER_DNS_IP ]]; then 114 | # Default IP to kube-dns IP 115 | CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}") 116 | if [ $? -ne 0 ]; then 117 | >&2 echo "Error! The IP address for DNS service couldn't be determined automatically. Please specify the DNS-IP with the '-i' option." 118 | exit 2 119 | fi 120 | fi 121 | 122 | if [[ "${SKIP}" -ne 1 ]] ; then 123 | translate-kube-dns-configmap 124 | fi 125 | 126 | orig=$'\n' 127 | replace=$'\\\n' 128 | sed -e "s/CLUSTER_DNS_IP/$CLUSTER_DNS_IP/g" \ 129 | -e "s/CLUSTER_DOMAIN/$CLUSTER_DOMAIN/g" \ 130 | -e "s?REVERSE_CIDRS?$REVERSE_CIDRS?g" \ 131 | -e "s@STUBDOMAINS@${STUBDOMAINS//$orig/$replace}@g" \ 132 | -e "s@FEDERATIONS@${FEDERATIONS//$orig/$replace}@g" \ 133 | -e "s/UPSTREAMNAMESERVER/$UPSTREAM/g" \ 134 | "${YAML_TEMPLATE}" 135 | -------------------------------------------------------------------------------- /k8s/addons/dashborad/create_dashboard_sa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | if kubectl get sa dashboard-admin -n kube-system &> /dev/null;then 5 | echo -e "\033[33mWARNING: ServiceAccount dashboard-admin exist!\033[0m" 6 | else 7 | kubectl create sa dashboard-admin -n kube-system 8 | kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin 9 | fi 10 | 11 | kubectl describe secret -n kube-system $(kubectl get secrets -n kube-system | grep dashboard-admin | cut -f1 -d ' ') | grep -E '^token' 12 | -------------------------------------------------------------------------------- /k8s/addons/dashborad/kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # ------------------- Dashboard Secret ------------------- # 16 | 17 | apiVersion: v1 18 | kind: Secret 19 | metadata: 20 | labels: 21 | k8s-app: kubernetes-dashboard 22 | name: kubernetes-dashboard-certs 23 | namespace: kube-system 24 | type: Opaque 25 | 26 | --- 27 | # ------------------- Dashboard Service Account ------------------- # 28 | 29 | apiVersion: v1 30 | kind: ServiceAccount 31 | metadata: 32 | labels: 33 | k8s-app: kubernetes-dashboard 34 | name: kubernetes-dashboard 35 | namespace: kube-system 36 | 37 | --- 38 | # ------------------- Dashboard Role & Role Binding ------------------- # 39 | 40 | kind: Role 41 | apiVersion: rbac.authorization.k8s.io/v1 42 | metadata: 43 | name: kubernetes-dashboard-minimal 44 | namespace: kube-system 45 | rules: 46 | # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. 47 | - apiGroups: [""] 48 | resources: ["secrets"] 49 | verbs: ["create"] 50 | # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. 51 | - apiGroups: [""] 52 | resources: ["configmaps"] 53 | verbs: ["create"] 54 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 55 | - apiGroups: [""] 56 | resources: ["secrets"] 57 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] 58 | verbs: ["get", "update", "delete"] 59 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 60 | - apiGroups: [""] 61 | resources: ["configmaps"] 62 | resourceNames: ["kubernetes-dashboard-settings"] 63 | verbs: ["get", "update"] 64 | # Allow Dashboard to get metrics from heapster. 65 | - apiGroups: [""] 66 | resources: ["services"] 67 | resourceNames: ["heapster"] 68 | verbs: ["proxy"] 69 | - apiGroups: [""] 70 | resources: ["services/proxy"] 71 | resourceNames: ["heapster", "http:heapster:", "https:heapster:"] 72 | verbs: ["get"] 73 | 74 | --- 75 | apiVersion: rbac.authorization.k8s.io/v1 76 | kind: RoleBinding 77 | metadata: 78 | name: kubernetes-dashboard-minimal 79 | namespace: kube-system 80 | roleRef: 81 | apiGroup: rbac.authorization.k8s.io 82 | kind: Role 83 | name: kubernetes-dashboard-minimal 84 | subjects: 85 | - kind: ServiceAccount 86 | name: kubernetes-dashboard 87 | namespace: kube-system 88 | 89 | --- 90 | # ------------------- Dashboard Deployment ------------------- # 91 | 92 | kind: Deployment 93 | apiVersion: apps/v1 94 | metadata: 95 | labels: 96 | k8s-app: kubernetes-dashboard 97 | name: kubernetes-dashboard 98 | namespace: kube-system 99 | spec: 100 | replicas: 1 101 | revisionHistoryLimit: 10 102 | selector: 103 | matchLabels: 104 | k8s-app: kubernetes-dashboard 105 | template: 106 | metadata: 107 | labels: 108 | k8s-app: kubernetes-dashboard 109 | spec: 110 | containers: 111 | - name: kubernetes-dashboard 112 | image: gcr.azk8s.cn/google_containers/kubernetes-dashboard-amd64:v1.10.1 113 | ports: 114 | - containerPort: 8443 115 | protocol: TCP 116 | args: 117 | - --auto-generate-certificates 118 | # Uncomment the following line to manually specify Kubernetes API server Host 119 | # If not specified, Dashboard will attempt to auto discover the API server and connect 120 | # to it. Uncomment only if the default does not work. 121 | # - --apiserver-host=http://my-address:port 122 | volumeMounts: 123 | - name: kubernetes-dashboard-certs 124 | mountPath: /certs 125 | # Create on-disk volume to store exec logs 126 | - mountPath: /tmp 127 | name: tmp-volume 128 | livenessProbe: 129 | httpGet: 130 | scheme: HTTPS 131 | path: / 132 | port: 8443 133 | initialDelaySeconds: 30 134 | timeoutSeconds: 30 135 | volumes: 136 | - name: kubernetes-dashboard-certs 137 | secret: 138 | secretName: kubernetes-dashboard-certs 139 | - name: tmp-volume 140 | emptyDir: {} 141 | serviceAccountName: kubernetes-dashboard 142 | # Comment the following tolerations if Dashboard must not be deployed on master 143 | tolerations: 144 | - key: node-role.kubernetes.io/master 145 | effect: NoSchedule 146 | 147 | --- 148 | # ------------------- Dashboard Service ------------------- # 149 | 150 | kind: Service 151 | apiVersion: v1 152 | metadata: 153 | labels: 154 | k8s-app: kubernetes-dashboard 155 | name: kubernetes-dashboard 156 | namespace: kube-system 157 | spec: 158 | ports: 159 | - port: 443 160 | targetPort: 8443 161 | nodePort: 30000 162 | type: NodePort 163 | selector: 164 | k8s-app: kubernetes-dashboard 165 | -------------------------------------------------------------------------------- /k8s/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | kind: ServiceAccount 16 | apiVersion: v1 17 | metadata: 18 | name: kube-dns-autoscaler 19 | namespace: kube-system 20 | labels: 21 | addonmanager.kubernetes.io/mode: Reconcile 22 | --- 23 | kind: ClusterRole 24 | apiVersion: rbac.authorization.k8s.io/v1 25 | metadata: 26 | name: system:kube-dns-autoscaler 27 | labels: 28 | addonmanager.kubernetes.io/mode: Reconcile 29 | rules: 30 | - apiGroups: [""] 31 | resources: ["nodes"] 32 | verbs: ["list"] 33 | - apiGroups: [""] 34 | resources: ["replicationcontrollers/scale"] 35 | verbs: ["get", "update"] 36 | - apiGroups: ["extensions"] 37 | resources: ["deployments/scale", "replicasets/scale"] 38 | verbs: ["get", "update"] 39 | # Remove the configmaps rule once below issue is fixed: 40 | # kubernetes-incubator/cluster-proportional-autoscaler#16 41 | - apiGroups: [""] 42 | resources: ["configmaps"] 43 | verbs: ["get", "create"] 44 | --- 45 | kind: ClusterRoleBinding 46 | apiVersion: rbac.authorization.k8s.io/v1 47 | metadata: 48 | name: system:kube-dns-autoscaler 49 | labels: 50 | addonmanager.kubernetes.io/mode: Reconcile 51 | subjects: 52 | - kind: ServiceAccount 53 | name: kube-dns-autoscaler 54 | namespace: kube-system 55 | roleRef: 56 | kind: ClusterRole 57 | name: system:kube-dns-autoscaler 58 | apiGroup: rbac.authorization.k8s.io 59 | 60 | --- 61 | apiVersion: apps/v1 62 | kind: Deployment 63 | metadata: 64 | name: kube-dns-autoscaler 65 | namespace: kube-system 66 | labels: 67 | k8s-app: kube-dns-autoscaler 68 | kubernetes.io/cluster-service: "true" 69 | addonmanager.kubernetes.io/mode: Reconcile 70 | spec: 71 | selector: 72 | matchLabels: 73 | k8s-app: kube-dns-autoscaler 74 | template: 75 | metadata: 76 | labels: 77 | k8s-app: kube-dns-autoscaler 78 | annotations: 79 | scheduler.alpha.kubernetes.io/critical-pod: '' 80 | spec: 81 | priorityClassName: system-cluster-critical 82 | containers: 83 | - name: autoscaler 84 | image: gcr.azk8s.cn/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2 85 | resources: 86 | requests: 87 | cpu: "20m" 88 | memory: "10Mi" 89 | command: 90 | - /cluster-proportional-autoscaler 91 | - --namespace=kube-system 92 | - --configmap=kube-dns-autoscaler 93 | # Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base 94 | - --target=Deployment/coredns 95 | # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. 96 | # If using small nodes, "nodesPerReplica" should dominate. 97 | - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} 98 | - --logtostderr=true 99 | - --v=2 100 | tolerations: 101 | - key: "CriticalAddonsOnly" 102 | operator: "Exists" 103 | serviceAccountName: kube-dns-autoscaler 104 | -------------------------------------------------------------------------------- /k8s/addons/heapster/grafana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: monitoring-grafana 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | task: monitoring 12 | k8s-app: grafana 13 | spec: 14 | containers: 15 | - name: grafana 16 | image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3 17 | ports: 18 | - containerPort: 3000 19 | protocol: TCP 20 | volumeMounts: 21 | - mountPath: /etc/ssl/certs 22 | name: ca-certificates 23 | readOnly: true 24 | - mountPath: /var 25 | name: grafana-storage 26 | env: 27 | - name: INFLUXDB_HOST 28 | value: monitoring-influxdb 29 | - name: GF_SERVER_HTTP_PORT 30 | value: "3000" 31 | # The following env variables are required to make Grafana accessible via 32 | # the kubernetes api-server proxy. On production clusters, we recommend 33 | # removing these env variables, setup auth for grafana, and expose the grafana 34 | # service using a LoadBalancer or a public IP. 35 | - name: GF_AUTH_BASIC_ENABLED 36 | value: "false" 37 | - name: GF_AUTH_ANONYMOUS_ENABLED 38 | value: "true" 39 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 40 | value: Admin 41 | - name: GF_SERVER_ROOT_URL 42 | # If you're only using the API Server proxy, set this value instead: 43 | # value: /api/v1/namespaces/kube-system/services/monitoring-grafana/proxy 44 | value: / 45 | volumes: 46 | - name: ca-certificates 47 | hostPath: 48 | path: /etc/ssl/certs 49 | - name: grafana-storage 50 | emptyDir: {} 51 | --- 52 | apiVersion: v1 53 | kind: Service 54 | metadata: 55 | labels: 56 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 57 | # If you are NOT using this as an addon, you should comment out this line. 58 | kubernetes.io/cluster-service: 'true' 59 | kubernetes.io/name: monitoring-grafana 60 | name: monitoring-grafana 61 | namespace: kube-system 62 | spec: 63 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer 64 | # or through a public IP. 65 | # type: LoadBalancer 66 | # You could also use NodePort to expose the service at a randomly-generated port 67 | # type: NodePort 68 | ports: 69 | - port: 80 70 | targetPort: 3000 71 | selector: 72 | k8s-app: grafana 73 | -------------------------------------------------------------------------------- /k8s/addons/heapster/heapster-rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: heapster 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:heapster 9 | subjects: 10 | - kind: ServiceAccount 11 | name: heapster 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /k8s/addons/heapster/heapster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: heapster 5 | namespace: kube-system 6 | --- 7 | apiVersion: extensions/v1beta1 8 | kind: Deployment 9 | metadata: 10 | name: heapster 11 | namespace: kube-system 12 | spec: 13 | replicas: 1 14 | template: 15 | metadata: 16 | labels: 17 | task: monitoring 18 | k8s-app: heapster 19 | spec: 20 | serviceAccountName: heapster 21 | containers: 22 | - name: heapster 23 | image: k8s.gcr.io/heapster-amd64:v1.4.2 24 | imagePullPolicy: IfNotPresent 25 | command: 26 | - /heapster 27 | - --source=kubernetes:https://kubernetes.default 28 | - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086 29 | --- 30 | apiVersion: v1 31 | kind: Service 32 | metadata: 33 | labels: 34 | task: monitoring 35 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 36 | # If you are NOT using this as an addon, you should comment out this line. 37 | kubernetes.io/cluster-service: 'true' 38 | kubernetes.io/name: Heapster 39 | name: heapster 40 | namespace: kube-system 41 | spec: 42 | ports: 43 | - port: 80 44 | targetPort: 8082 45 | selector: 46 | k8s-app: heapster 47 | -------------------------------------------------------------------------------- /k8s/addons/heapster/influxdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: monitoring-influxdb 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | task: monitoring 12 | k8s-app: influxdb 13 | spec: 14 | containers: 15 | - name: influxdb 16 | image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3 17 | volumeMounts: 18 | - mountPath: /data 19 | name: influxdb-storage 20 | volumes: 21 | - name: influxdb-storage 22 | emptyDir: {} 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | labels: 28 | task: monitoring 29 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 30 | # If you are NOT using this as an addon, you should comment out this line. 31 | kubernetes.io/cluster-service: 'true' 32 | kubernetes.io/name: monitoring-influxdb 33 | name: monitoring-influxdb 34 | namespace: kube-system 35 | spec: 36 | ports: 37 | - port: 8086 38 | targetPort: 8086 39 | selector: 40 | k8s-app: influxdb 41 | -------------------------------------------------------------------------------- /k8s/bootstrapping/bootstrapping.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # 允许 kubelet tls bootstrap 创建 csr 请求 6 | kubectl create clusterrolebinding create-csrs-for-bootstrapping \ 7 | --clusterrole=system:node-bootstrapper \ 8 | --group=system:bootstrappers 9 | 10 | # 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求 11 | kubectl create clusterrolebinding auto-approve-csrs-for-group \ 12 | --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient \ 13 | --group=system:bootstrappers 14 | 15 | # 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求 16 | kubectl create clusterrolebinding auto-approve-renewals-for-nodes \ 17 | --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient \ 18 | --group=system:nodes 19 | 20 | # 在 kubelet server 开启 api 认证的情况下,apiserver 反向访问 kubelet 10250 需要此授权(eg: kubectl logs) 21 | kubectl create clusterrolebinding system:kubelet-api-admin \ 22 | --clusterrole=system:kubelet-api-admin \ 23 | --user=system:kubelet-api-admin 24 | -------------------------------------------------------------------------------- /k8s/conf/example/1.10.1/apiserver: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure the kube-apiserver 5 | # 6 | 7 | # The address on the local server to listen to. 8 | KUBE_API_ADDRESS="--advertise-address=172.16.0.31 --bind-address=172.16.0.31" 9 | 10 | # The port on the local server to listen on. 11 | KUBE_API_PORT="--secure-port=6443" 12 | 13 | # Port minions listen on 14 | # KUBELET_PORT="--kubelet-port=10250" 15 | 16 | # Comma separated list of nodes in the etcd cluster 17 | KUBE_ETCD_SERVERS="--etcd-servers=https://172.16.0.36:2379,https://172.16.0.37:2379,https://172.16.0.33:2379" 18 | 19 | # Address range to use for services 20 | KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" 21 | 22 | # default admission control policies 23 | KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction" 24 | 25 | # Add your own! 26 | KUBE_API_ARGS=" --anonymous-auth=false \ 27 | --apiserver-count=3 \ 28 | --audit-log-maxage=30 \ 29 | --audit-log-maxbackup=3 \ 30 | --audit-log-maxsize=100 \ 31 | --audit-log-path=/var/log/kube-audit/audit.log \ 32 | --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ 33 | --authorization-mode=Node,RBAC \ 34 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 35 | --enable-bootstrap-token-auth \ 36 | --enable-garbage-collector \ 37 | --enable-logs-handler \ 38 | --enable-swagger-ui \ 39 | --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \ 40 | --etcd-certfile=/etc/etcd/ssl/etcd.pem \ 41 | --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ 42 | --etcd-compaction-interval=5m0s \ 43 | --etcd-count-metric-poll-period=1m0s \ 44 | --event-ttl=48h0m0s \ 45 | --kubelet-https=true \ 46 | --kubelet-timeout=3s \ 47 | --log-flush-frequency=5s \ 48 | --token-auth-file=/etc/kubernetes/token.csv \ 49 | --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ 50 | --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ 51 | --service-node-port-range=30000-50000 \ 52 | --service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 53 | --storage-backend=etcd3 \ 54 | --enable-swagger-ui=true" 55 | -------------------------------------------------------------------------------- /k8s/conf/example/1.10.1/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=2" 17 | 18 | # Should this cluster be allowed to run privileged docker containers 19 | KUBE_ALLOW_PRIV="--allow-privileged=true" 20 | 21 | # How the controller-manager, scheduler, and proxy find the apiserver 22 | KUBE_MASTER="--master=http://127.0.0.1:8080" 23 | -------------------------------------------------------------------------------- /k8s/conf/example/1.10.1/controller-manager: -------------------------------------------------------------------------------- 1 | ### 2 | # The following values are used to configure the kubernetes controller-manager 3 | 4 | # defaults from config and apiserver should be adequate 5 | 6 | # Add your own! 7 | KUBE_CONTROLLER_MANAGER_ARGS=" --bind-address=0.0.0.0 \ 8 | --cluster-name=kubernetes \ 9 | --cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 10 | --cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 11 | --controllers=*,bootstrapsigner,tokencleaner \ 12 | --deployment-controller-sync-period=10s \ 13 | --experimental-cluster-signing-duration=86700h0m0s \ 14 | --leader-elect=true \ 15 | --node-monitor-grace-period=40s \ 16 | --node-monitor-period=5s \ 17 | --pod-eviction-timeout=5m0s \ 18 | --terminated-pod-gc-threshold=50 \ 19 | --root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 20 | --service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 21 | --feature-gates=RotateKubeletServerCertificate=true" 22 | -------------------------------------------------------------------------------- /k8s/conf/example/1.10.1/kubelet: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes kubelet (minion) config 3 | 4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) 5 | KUBELET_ADDRESS="--node-ip=172.16.0.36" 6 | 7 | # The port for the info server to serve on 8 | # KUBELET_PORT="--port=10250" 9 | 10 | # You may leave this blank to use the actual hostname 11 | KUBELET_HOSTNAME="--hostname-override=test36.node" 12 | 13 | # location of the api-server 14 | # KUBELET_API_SERVER="" 15 | 16 | # Add your own! 17 | KUBELET_ARGS=" --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ 18 | --cert-dir=/etc/kubernetes/ssl \ 19 | --cgroup-driver=cgroupfs \ 20 | --cluster-dns=10.254.0.2 \ 21 | --cluster-domain=cluster.local. \ 22 | --fail-swap-on=false \ 23 | --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \ 24 | --node-labels=node-role.kubernetes.io/k8s-master=true \ 25 | --image-gc-high-threshold=70 \ 26 | --image-gc-low-threshold=50 \ 27 | --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \ 28 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 29 | --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \ 30 | --serialize-image-pulls=false \ 31 | --sync-frequency=30s \ 32 | --pod-infra-container-image=k8s.gcr.io/pause-amd64:3.0 \ 33 | --resolv-conf=/etc/resolv.conf \ 34 | --rotate-certificates" 35 | -------------------------------------------------------------------------------- /k8s/conf/example/1.10.1/proxy: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes proxy config 3 | # default config should be adequate 4 | # Add your own! 5 | KUBE_PROXY_ARGS="--bind-address=0.0.0.0 \ 6 | --hostname-override=test36.node \ 7 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ 8 | --cluster-cidr=10.254.0.0/16" 9 | -------------------------------------------------------------------------------- /k8s/conf/example/1.10.1/scheduler: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes scheduler config 3 | 4 | # default config should be adequate 5 | 6 | # Add your own! 7 | KUBE_SCHEDULER_ARGS=" --address=0.0.0.0 \ 8 | --leader-elect=true \ 9 | --algorithm-provider=DefaultProvider" 10 | -------------------------------------------------------------------------------- /k8s/conf/example/1.11.2/apiserver: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure the kube-apiserver 5 | # 6 | 7 | # The address on the local server to listen to. 8 | KUBE_API_ADDRESS="--advertise-address=192.168.1.61 --bind-address=192.168.1.61" 9 | 10 | # The port on the local server to listen on. 11 | KUBE_API_PORT="--secure-port=6443" 12 | 13 | # Port minions listen on 14 | # KUBELET_PORT="--kubelet-port=10250" 15 | 16 | # Comma separated list of nodes in the etcd cluster 17 | KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.1.61:2379,https://192.168.1.62:2379,https://192.168.1.63:2379" 18 | 19 | # Address range to use for services 20 | KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" 21 | 22 | # default admission control policies 23 | KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" 24 | 25 | # Add your own! 26 | KUBE_API_ARGS=" --allow-privileged=true \ 27 | --anonymous-auth=false \ 28 | --apiserver-count=3 \ 29 | --audit-log-maxage=30 \ 30 | --audit-log-maxbackup=3 \ 31 | --audit-log-maxsize=100 \ 32 | --audit-log-path=/var/log/kube-audit/audit.log \ 33 | --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ 34 | --authorization-mode=Node,RBAC \ 35 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 36 | --enable-bootstrap-token-auth \ 37 | --enable-garbage-collector \ 38 | --enable-logs-handler \ 39 | --endpoint-reconciler-type=lease \ 40 | --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \ 41 | --etcd-certfile=/etc/etcd/ssl/etcd.pem \ 42 | --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ 43 | --etcd-compaction-interval=5m0s \ 44 | --etcd-count-metric-poll-period=1m0s \ 45 | --event-ttl=168h0m0s \ 46 | --kubelet-https=true \ 47 | --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \ 48 | --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \ 49 | --kubelet-timeout=3s \ 50 | --log-flush-frequency=5s \ 51 | --logtostderr=true \ 52 | --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ 53 | --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ 54 | --service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 55 | --service-node-port-range=30000-50000 \ 56 | --storage-backend=etcd3 \ 57 | --v=2" 58 | -------------------------------------------------------------------------------- /k8s/conf/example/1.11.2/controller-manager: -------------------------------------------------------------------------------- 1 | ### 2 | # The following values are used to configure the kubernetes controller-manager 3 | 4 | # defaults from config and apiserver should be adequate 5 | 6 | # Add your own! 7 | KUBE_CONTROLLER_MANAGER_ARGS=" --address=127.0.0.1 \ 8 | --bind-address=192.168.1.61 \ 9 | --port=10252 \ 10 | --secure-port=10258 \ 11 | --cluster-cidr=10.20.0.0/16 \ 12 | --allocate-node-cidrs=true \ 13 | --service-cluster-ip-range=10.254.0.0/16 \ 14 | --cluster-name=kubernetes \ 15 | --cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 16 | --cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 17 | --controllers=*,bootstrapsigner,tokencleaner \ 18 | --deployment-controller-sync-period=10s \ 19 | --experimental-cluster-signing-duration=86700h0m0s \ 20 | --enable-garbage-collector=true \ 21 | --leader-elect=true \ 22 | --master=http://127.0.0.1:8080 \ 23 | --node-monitor-grace-period=40s \ 24 | --node-monitor-period=5s \ 25 | --pod-eviction-timeout=5m0s \ 26 | --terminated-pod-gc-threshold=50 \ 27 | --root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 28 | --service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 29 | --feature-gates=RotateKubeletServerCertificate=true \ 30 | --logtostderr=true \ 31 | --v=2" 32 | -------------------------------------------------------------------------------- /k8s/conf/example/1.11.2/kubelet: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes kubelet (minion) config 3 | 4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) 5 | KUBELET_ADDRESS="--node-ip=192.168.1.64" 6 | 7 | # The port for the info server to serve on 8 | # KUBELET_PORT="--port=10250" 9 | 10 | # You may leave this blank to use the actual hostname 11 | KUBELET_HOSTNAME="--hostname-override=docker4.node" 12 | 13 | # location of the api-server 14 | # KUBELET_API_SERVER="" 15 | 16 | # Add your own! 17 | KUBELET_ARGS=" --alsologtostderr \ 18 | --logtostderr=true \ 19 | --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ 20 | --network-plugin=cni \ 21 | --cni-conf-dir=/etc/cni/net.d \ 22 | --cert-dir=/etc/kubernetes/ssl \ 23 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 24 | --node-labels=node-role.kubernetes.io/k8s-node=true \ 25 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 26 | --pod-infra-container-image=gcrxio/pause:3.1 \ 27 | --config=/etc/kubernetes/kubeletconfig.yaml \ 28 | --v=2" 29 | -------------------------------------------------------------------------------- /k8s/conf/example/1.11.2/kubeletconfig.yaml: -------------------------------------------------------------------------------- 1 | kind: KubeletConfiguration 2 | apiVersion: kubelet.config.k8s.io/v1beta1 3 | address: 192.168.1.64 4 | authentication.anonymous: fasle 5 | cgroupDriver: cgroupfs 6 | clusterDNS: 7 | - "10.254.0.2" 8 | clusterDomain: "cluster.local." 9 | failSwapOn: false 10 | healthzPort: 10248 11 | healthzBindAddress: 192.168.1.64 12 | featureGates: 13 | RotateKubeletClientCertificate: true 14 | RotateKubeletServerCertificate: true 15 | imageGCHighThresholdPercent: 70 16 | imageGCLowThresholdPercent: 50 17 | kubeReserved: 18 | cpu: "500m" 19 | memory: "512Mi" 20 | ephemeral-storage: "1Gi" 21 | systemReserved: 22 | cpu: "1000m" 23 | memory: "1024Mi" 24 | ephemeral-storage: "1Gi" 25 | serializeImagePulls: true 26 | syncFrequency: 30s 27 | resolvConf: "/etc/resolv.conf" 28 | rotateCertificates: true 29 | -------------------------------------------------------------------------------- /k8s/conf/example/1.11.2/proxy: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes proxy config 3 | # default config should be adequate 4 | # Add your own! 5 | KUBE_PROXY_ARGS="--bind-address=192.168.1.64 \ 6 | --master=https://127.0.0.1:6443 \ 7 | --proxy-mode=ipvs \ 8 | --masquerade-all \ 9 | --ipvs-min-sync-period=5s \ 10 | --ipvs-sync-period=5s \ 11 | --ipvs-scheduler=wrr \ 12 | --cleanup-ipvs=true \ 13 | --hostname-override=docker4.node \ 14 | --healthz-bind-address=192.168.1.64 \ 15 | --healthz-port=10256 \ 16 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ 17 | --cluster-cidr=10.254.0.0/16 \ 18 | --logtostderr=true \ 19 | --v=2" 20 | -------------------------------------------------------------------------------- /k8s/conf/example/1.11.2/scheduler: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes scheduler config 3 | 4 | # default config should be adequate 5 | 6 | # Add your own! 7 | KUBE_SCHEDULER_ARGS=" --address=127.0.0.1 \ 8 | --port=10251 \ 9 | --master=http://127.0.0.1:8080 \ 10 | --leader-elect=true \ 11 | --algorithm-provider=DefaultProvider \ 12 | --logtostderr=true \ 13 | --v=2" 14 | -------------------------------------------------------------------------------- /k8s/conf/example/1.13.4/apiserver: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure the kube-apiserver 5 | # 6 | 7 | # The address on the local server to listen to. 8 | KUBE_API_ADDRESS="--advertise-address=192.168.1.51 --bind-address=0.0.0.0" 9 | 10 | # The port on the local server to listen on. 11 | KUBE_API_PORT="--secure-port=6443" 12 | 13 | # Port minions listen on 14 | # KUBELET_PORT="--kubelet-port=10250" 15 | 16 | # Comma separated list of nodes in the etcd cluster 17 | KUBE_ETCD_SERVERS="--etcd-servers=https://192.168.1.51:2379,https://192.168.1.52:2379,https://192.168.1.53:2379" 18 | 19 | # Address range to use for services 20 | KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" 21 | 22 | # default admission control policies 23 | KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,Priority,ResourceQuota" 24 | 25 | # Add your own! 26 | KUBE_API_ARGS=" --allow-privileged=true \ 27 | --anonymous-auth=false \ 28 | --alsologtostderr \ 29 | --apiserver-count=3 \ 30 | --audit-log-maxage=30 \ 31 | --audit-log-maxbackup=3 \ 32 | --audit-log-maxsize=100 \ 33 | --audit-log-path=/var/log/kube-audit/audit.log \ 34 | --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ 35 | --authorization-mode=Node,RBAC \ 36 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 37 | --enable-bootstrap-token-auth \ 38 | --enable-garbage-collector \ 39 | --enable-logs-handler \ 40 | --endpoint-reconciler-type=lease \ 41 | --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \ 42 | --etcd-certfile=/etc/etcd/ssl/etcd.pem \ 43 | --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ 44 | --etcd-compaction-interval=0s \ 45 | --event-ttl=168h0m0s \ 46 | --kubelet-https=true \ 47 | --kubelet-certificate-authority=/etc/kubernetes/ssl/k8s-root-ca.pem \ 48 | --kubelet-client-certificate=/etc/kubernetes/ssl/kubelet-api-admin.pem \ 49 | --kubelet-client-key=/etc/kubernetes/ssl/kubelet-api-admin-key.pem \ 50 | --kubelet-timeout=3s \ 51 | --runtime-config=api/all=true \ 52 | --service-node-port-range=30000-50000 \ 53 | --service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 54 | --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ 55 | --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ 56 | --v=2" 57 | -------------------------------------------------------------------------------- /k8s/conf/example/1.13.4/controller-manager: -------------------------------------------------------------------------------- 1 | ### 2 | # The following values are used to configure the kubernetes controller-manager 3 | 4 | # defaults from config and apiserver should be adequate 5 | 6 | # Add your own! 7 | KUBE_CONTROLLER_MANAGER_ARGS=" --address=127.0.0.1 \ 8 | --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ 9 | --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ 10 | --bind-address=192.168.1.51 \ 11 | --cluster-name=kubernetes \ 12 | --cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 13 | --cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 14 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 15 | --controllers=*,bootstrapsigner,tokencleaner \ 16 | --deployment-controller-sync-period=10s \ 17 | --experimental-cluster-signing-duration=87600h0m0s \ 18 | --enable-garbage-collector=true \ 19 | --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ 20 | --leader-elect=true \ 21 | --node-monitor-grace-period=20s \ 22 | --node-monitor-period=5s \ 23 | --port=10252 \ 24 | --pod-eviction-timeout=2m0s \ 25 | --requestheader-client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 26 | --terminated-pod-gc-threshold=50 \ 27 | --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \ 28 | --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \ 29 | --root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 30 | --secure-port=10257 \ 31 | --service-cluster-ip-range=10.254.0.0/16 \ 32 | --service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 33 | --use-service-account-credentials=true \ 34 | --v=2" 35 | -------------------------------------------------------------------------------- /k8s/conf/example/1.13.4/kubelet: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes kubelet (minion) config 3 | 4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) 5 | KUBELET_ADDRESS="--node-ip=192.168.1.51" 6 | 7 | # The port for the info server to serve on 8 | # KUBELET_PORT="--port=10250" 9 | 10 | # You may leave this blank to use the actual hostname 11 | KUBELET_HOSTNAME="--hostname-override=docker1.node" 12 | 13 | # location of the api-server 14 | # KUBELET_API_SERVER="" 15 | 16 | # Add your own! 17 | KUBELET_ARGS=" --address=0.0.0.0 \ 18 | --allow-privileged \ 19 | --anonymous-auth=false \ 20 | --authorization-mode=Webhook \ 21 | --bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ 22 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 23 | --network-plugin=cni \ 24 | --cgroup-driver=cgroupfs \ 25 | --cert-dir=/etc/kubernetes/ssl \ 26 | --cluster-dns=10.254.0.2 \ 27 | --cluster-domain=cluster.local \ 28 | --cni-conf-dir=/etc/cni/net.d \ 29 | --eviction-soft=imagefs.available<15%,memory.available<512Mi,nodefs.available<15%,nodefs.inodesFree<10% \ 30 | --eviction-soft-grace-period=imagefs.available=3m,memory.available=1m,nodefs.available=3m,nodefs.inodesFree=1m \ 31 | --eviction-hard=imagefs.available<10%,memory.available<256Mi,nodefs.available<10%,nodefs.inodesFree<5% \ 32 | --eviction-max-pod-grace-period=30 \ 33 | --image-gc-high-threshold=80 \ 34 | --image-gc-low-threshold=70 \ 35 | --image-pull-progress-deadline=30s \ 36 | --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \ 37 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 38 | --max-pods=100 \ 39 | --minimum-image-ttl-duration=720h0m0s \ 40 | --node-labels=node.kubernetes.io/k8s-node=true \ 41 | --pod-infra-container-image=gcr.azk8s.cn/google_containers/pause-amd64:3.1 \ 42 | --port=10250 \ 43 | --read-only-port=0 \ 44 | --rotate-certificates \ 45 | --rotate-server-certificates \ 46 | --resolv-conf=/run/systemd/resolve/resolv.conf \ 47 | --system-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \ 48 | --fail-swap-on=false \ 49 | --v=2" 50 | -------------------------------------------------------------------------------- /k8s/conf/example/1.13.4/proxy: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes proxy config 3 | # default config should be adequate 4 | # Add your own! 5 | KUBE_PROXY_ARGS=" --bind-address=0.0.0.0 \ 6 | --cleanup-ipvs=true \ 7 | --cluster-cidr=10.254.0.0/16 \ 8 | --hostname-override=docker1.node \ 9 | --healthz-bind-address=0.0.0.0 \ 10 | --healthz-port=10256 \ 11 | --masquerade-all=true \ 12 | --proxy-mode=ipvs \ 13 | --ipvs-min-sync-period=5s \ 14 | --ipvs-sync-period=5s \ 15 | --ipvs-scheduler=wrr \ 16 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ 17 | --logtostderr=true \ 18 | --v=2" 19 | -------------------------------------------------------------------------------- /k8s/conf/example/1.13.4/scheduler: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes scheduler config 3 | 4 | # default config should be adequate 5 | 6 | # Add your own! 7 | KUBE_SCHEDULER_ARGS=" --address=127.0.0.1 \ 8 | --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ 9 | --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ 10 | --bind-address=0.0.0.0 \ 11 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 12 | --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ 13 | --requestheader-client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 14 | --secure-port=10259 \ 15 | --leader-elect=true \ 16 | --port=10251 \ 17 | --tls-cert-file=/etc/kubernetes/ssl/kube-scheduler.pem \ 18 | --tls-private-key-file=/etc/kubernetes/ssl/kube-scheduler-key.pem \ 19 | --v=2" 20 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/add_cni_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sed -i 's@\(--cluster-dns.*\)@\1\n --network-plugin=cni \\@gi' /etc/kubernetes/kubelet 4 | systemctl daemon-reload 5 | systemctl restart kubelet 6 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/apiserver: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure the kube-apiserver 5 | # 6 | 7 | # The address on the local server to listen to. 8 | KUBE_API_ADDRESS="--advertise-address=172.16.0.81 --insecure-bind-address=127.0.0.1 --bind-address=172.16.0.81" 9 | 10 | # The port on the local server to listen on. 11 | KUBE_API_PORT="--insecure-port=8080 --secure-port=6443" 12 | 13 | # Port minions listen on 14 | # KUBELET_PORT="--kubelet-port=10250" 15 | 16 | # Comma separated list of nodes in the etcd cluster 17 | KUBE_ETCD_SERVERS="--etcd-servers=https://172.16.0.81:2379,https://172.16.0.82:2379,https://172.16.0.83:2379" 18 | 19 | # Address range to use for services 20 | KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" 21 | 22 | # default admission control policies 23 | KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction" 24 | 25 | # Add your own! 26 | KUBE_API_ARGS="--authorization-mode=Node,RBAC \ 27 | --anonymous-auth=false \ 28 | --kubelet-https=true \ 29 | --enable-bootstrap-token-auth \ 30 | --token-auth-file=/etc/kubernetes/token.csv \ 31 | --service-node-port-range=30000-50000 \ 32 | --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \ 33 | --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \ 34 | --client-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 35 | --service-account-key-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 36 | --etcd-quorum-read=true \ 37 | --storage-backend=etcd3 \ 38 | --etcd-cafile=/etc/etcd/ssl/etcd-root-ca.pem \ 39 | --etcd-certfile=/etc/etcd/ssl/etcd.pem \ 40 | --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \ 41 | --enable-swagger-ui=true \ 42 | --apiserver-count=3 \ 43 | --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ 44 | --audit-log-maxage=30 \ 45 | --audit-log-maxbackup=3 \ 46 | --audit-log-maxsize=100 \ 47 | --audit-log-path=/var/log/kube-audit/audit.log \ 48 | --event-ttl=1h" 49 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=2" 17 | 18 | # Should this cluster be allowed to run privileged docker containers 19 | KUBE_ALLOW_PRIV="--allow-privileged=true" 20 | 21 | # How the controller-manager, scheduler, and proxy find the apiserver 22 | KUBE_MASTER="--master=http://127.0.0.1:8080" 23 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/controller-manager: -------------------------------------------------------------------------------- 1 | ### 2 | # The following values are used to configure the kubernetes controller-manager 3 | 4 | # defaults from config and apiserver should be adequate 5 | 6 | # Add your own! 7 | KUBE_CONTROLLER_MANAGER_ARGS="--address=0.0.0.0 \ 8 | --service-cluster-ip-range=10.254.0.0/16 \ 9 | --cluster-name=kubernetes \ 10 | --cluster-signing-cert-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 11 | --cluster-signing-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 12 | --experimental-cluster-signing-duration=87600h0m0s \ 13 | --service-account-private-key-file=/etc/kubernetes/ssl/k8s-root-ca-key.pem \ 14 | --feature-gates=RotateKubeletServerCertificate=true \ 15 | --root-ca-file=/etc/kubernetes/ssl/k8s-root-ca.pem \ 16 | --leader-elect=true \ 17 | --node-monitor-grace-period=40s \ 18 | --node-monitor-period=5s \ 19 | --pod-eviction-timeout=5m0s" 20 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/kubelet: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes kubelet (minion) config 3 | 4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) 5 | KUBELET_ADDRESS="--address=172.16.0.81" 6 | 7 | # The port for the info server to serve on 8 | # KUBELET_PORT="--port=10250" 9 | 10 | # You may leave this blank to use the actual hostname 11 | KUBELET_HOSTNAME="--hostname-override=k1.node" 12 | 13 | # location of the api-server 14 | # KUBELET_API_SERVER="" 15 | 16 | # Add your own! 17 | KUBELET_ARGS="--cgroup-driver=cgroupfs \ 18 | --cluster-dns=10.254.0.2 \ 19 | --resolv-conf=/etc/resolv.conf \ 20 | --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ 21 | --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \ 22 | --rotate-certificates \ 23 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 24 | --fail-swap-on=false \ 25 | --image-gc-high-threshold=70 \ 26 | --image-gc-low-threshold=60 \ 27 | --kube-reserved=cpu=1000m,memory=2048Mi,ephemeral-storage=1Gi \ 28 | --cert-dir=/etc/kubernetes/ssl \ 29 | --cluster-domain=cluster.local. \ 30 | --hairpin-mode=promiscuous-bridge \ 31 | --serialize-image-pulls=false \ 32 | --pod-infra-container-image=k8s.gcr.io/pause-amd64:3.0" 33 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/proxy: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes proxy config 3 | # default config should be adequate 4 | # Add your own! 5 | KUBE_PROXY_ARGS="--bind-address=172.16.0.81 \ 6 | --hostname-override=k1.node \ 7 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ 8 | --cluster-cidr=10.254.0.0/16" 9 | -------------------------------------------------------------------------------- /k8s/conf/example/1.8/scheduler: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes scheduler config 3 | 4 | # default config should be adequate 5 | 6 | # Add your own! 7 | KUBE_SCHEDULER_ARGS="--leader-elect=true --address=0.0.0.0" 8 | -------------------------------------------------------------------------------- /k8s/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | KUBE_DEFAULT_VERSION="1.13.4" 6 | 7 | if [ "$1" != "" ]; then 8 | KUBE_VERSION=$1 9 | else 10 | echo -e "\033[33mWARNING: KUBE_VERSION is blank,use default version: ${KUBE_DEFAULT_VERSION}\033[0m" 11 | KUBE_VERSION=${KUBE_DEFAULT_VERSION} 12 | fi 13 | 14 | function download_k8s(){ 15 | if [ ! -f "hyperkube_v${KUBE_VERSION}" ]; then 16 | wget https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/hyperkube -O hyperkube_v${KUBE_VERSION} 17 | chmod +x hyperkube_v${KUBE_VERSION} 18 | fi 19 | } 20 | 21 | 22 | function preinstall(){ 23 | getent group kube >/dev/null || groupadd -r kube 24 | getent passwd kube >/dev/null || useradd -r -g kube -d / -s /sbin/nologin -c "Kubernetes user" kube 25 | } 26 | 27 | function install_k8s(){ 28 | echo -e "\033[32mINFO: Copy hyperkube...\033[0m" 29 | cp hyperkube_v${KUBE_VERSION} /usr/bin/hyperkube 30 | 31 | echo -e "\033[32mINFO: Create symbolic link...\033[0m" 32 | (cd /usr/bin && hyperkube --make-symlinks) 33 | 34 | echo -e "\033[32mINFO: Copy kubernetes config...\033[0m" 35 | cp -r conf /etc/kubernetes 36 | if [ -d "/etc/kubernetes/ssl" ]; then 37 | chown -R kube:kube /etc/kubernetes/ssl 38 | fi 39 | 40 | echo -e "\033[32mINFO: Copy kubernetes systemd config...\033[0m" 41 | cp systemd/*.service /lib/systemd/system 42 | systemctl daemon-reload 43 | } 44 | 45 | function postinstall(){ 46 | if [ ! -d "/var/log/kube-audit" ]; then 47 | mkdir /var/log/kube-audit 48 | fi 49 | 50 | if [ ! -d "/var/lib/kubelet" ]; then 51 | mkdir /var/lib/kubelet 52 | fi 53 | if [ ! -d "/usr/libexec" ]; then 54 | mkdir /usr/libexec 55 | fi 56 | chown -R kube:kube /etc/kubernetes /var/log/kube-audit /var/lib/kubelet /usr/libexec 57 | } 58 | 59 | 60 | download_k8s 61 | preinstall 62 | install_k8s 63 | postinstall 64 | -------------------------------------------------------------------------------- /k8s/ipvs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cat >> /etc/sysctl.conf <> /etc/modules <