├── .gitignore
├── .idea
└── k8s-tutorial-cn.iml
├── Dockerfile
├── License.md
├── README.md
├── calico.yaml
├── cert-manager.yaml
├── configmap-hellok8s.yaml
├── customize_object.yaml
├── customize_resource_define.yaml
├── daemonset.yaml
├── deployment-use-configmap.yaml
├── deployment-use-secret.yaml
├── deployment.yaml
├── deployment_httpd_svc.yaml
├── deployment_liveness.yaml
├── deployment_logtest.yaml
├── deployment_logtest_fast.yaml
├── deployment_logtest_json.yaml
├── deployment_python_http_svc_nodeport.yaml
├── deployment_readiness.yaml
├── doc_es_quickstart.md
├── doc_helm.md
├── doc_install_ohmyzsh.md
├── doc_k8s_actions_guide.md
├── doc_log_collection.md
├── doc_maintaintion.md
├── doc_security.md
├── doc_skills.md
├── doc_tutorial.md
├── doc_tutorial_senior.md
├── efk-arch
├── .env
├── docker-compose.yml
├── es-master.yml
├── filebeat-configmap.yaml
├── filebeat-daemonset.yaml
└── filebeat-sidecar-example.yaml
├── example_job
├── cronjob.yaml
└── job.yaml
├── example_pod
├── debug-network.yaml
├── dns-config.yaml
├── dns-debug.yaml
├── dns-policy.yaml
├── env.yaml
├── host-aliases.yaml
├── init-container.yaml
├── lifecycle.yaml
├── node-affinity.yaml
├── node-selector.yaml
├── privileged-namespace.yaml
├── privileged-simple.yaml
├── probe-liveness.yaml
├── probe-readiness.yaml
├── projected.yaml
├── resources-limit.yaml
├── sa-token.yaml
├── simple.yaml
├── terminate-grace-period-sec.yaml
├── toleration.yaml
├── topology-spread-constraints-with-node-affinity.yaml
├── topology-spread-constraints.yaml
├── volume-emptydir.yaml
├── volume-hostdir.yaml
├── volume-hostpath-file-or-create.yaml
├── volume-persistent.yaml
├── volume-subpath.yaml
└── volume-subpathexpr.yaml
├── helm
├── elasticsearch
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── Makefile
│ ├── README.md
│ ├── examples
│ │ ├── config
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── test
│ │ │ │ └── goss.yaml
│ │ │ ├── values.yaml
│ │ │ └── watcher_encryption_key
│ │ ├── default
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── rolling_upgrade.sh
│ │ │ └── test
│ │ │ │ └── goss.yaml
│ │ ├── docker-for-mac
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ └── values.yaml
│ │ ├── kubernetes-kind
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── values-local-path.yaml
│ │ │ └── values.yaml
│ │ ├── microk8s
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ └── values.yaml
│ │ ├── migration
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── client.yaml
│ │ │ ├── data.yaml
│ │ │ └── master.yaml
│ │ ├── minikube
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ └── values.yaml
│ │ ├── multi
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── client.yaml
│ │ │ ├── data.yaml
│ │ │ ├── master.yaml
│ │ │ └── test
│ │ │ │ └── goss.yaml
│ │ ├── networkpolicy
│ │ │ ├── Makefile
│ │ │ └── values.yaml
│ │ ├── openshift
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── test
│ │ │ │ └── goss.yaml
│ │ │ └── values.yaml
│ │ ├── security
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── test
│ │ │ │ └── goss.yaml
│ │ │ └── values.yaml
│ │ └── upgrade
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── test
│ │ │ └── goss.yaml
│ │ │ └── values.yaml
│ ├── templates
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── configmap.yaml
│ │ ├── ingress.yaml
│ │ ├── networkpolicy.yaml
│ │ ├── poddisruptionbudget.yaml
│ │ ├── podsecuritypolicy.yaml
│ │ ├── role.yaml
│ │ ├── rolebinding.yaml
│ │ ├── secret-cert.yaml
│ │ ├── secret.yaml
│ │ ├── service.yaml
│ │ ├── serviceaccount.yaml
│ │ ├── statefulset.yaml
│ │ └── test
│ │ │ └── test-elasticsearch-health.yaml
│ ├── values-master.yaml
│ └── values.yaml
├── example-chart
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── templates
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── deployment.yaml
│ │ ├── hpa.yaml
│ │ ├── ingress.yaml
│ │ ├── service.yaml
│ │ ├── serviceaccount.yaml
│ │ └── tests
│ │ │ └── test-connection.yaml
│ └── values.yaml
└── kibana
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── Makefile
│ ├── README.md
│ ├── examples
│ ├── default
│ │ ├── Makefile
│ │ ├── README.md
│ │ └── test
│ │ │ └── goss.yaml
│ ├── openshift
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── test
│ │ │ └── goss.yaml
│ │ └── values.yml
│ ├── security
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── test
│ │ │ └── goss.yaml
│ │ └── values.yaml
│ └── upgrade
│ │ ├── Makefile
│ │ ├── README.md
│ │ ├── test
│ │ └── goss.yaml
│ │ └── values.yaml
│ ├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── configmap-helm-scripts.yaml
│ ├── configmap.yaml
│ ├── deployment.yaml
│ ├── ingress.yaml
│ ├── post-delete-job.yaml
│ ├── post-delete-role.yaml
│ ├── post-delete-rolebinding.yaml
│ ├── post-delete-serviceaccount.yaml
│ ├── pre-install-job.yaml
│ ├── pre-install-role.yaml
│ ├── pre-install-rolebinding.yaml
│ ├── pre-install-serviceaccount.yaml
│ └── service.yaml
│ ├── values-deploy.yaml
│ └── values.yaml
├── hpa_nginx.yaml
├── hpa_nginx_behavior.yaml
├── hpa_nginx_behavior_default.yaml
├── img
├── es_recv_filebeat_data.jpg
├── filebeat.png
├── filebeat_log.jpg
├── istio-architecture.png
├── k8s-arch.webp
├── k8s-loadbalancer.png
├── k8s-object-model.jpg
├── k9s.png
├── kibana-backing-index.jpg
├── kibana-check-json.jpg
├── kibana-dataview-use-guide.jpg
├── kibana-dataview.jpg
├── kibana-discover.jpg
├── kibana-ds-index-detail.jpg
├── kibana-ds.jpg
├── kibana-new-dataview.jpg
├── prometheus_architecture.png
├── service-mesh.jpg
└── toc.jpg
├── ingress-hellok8s-cert.yaml
├── ingress-hellok8s-defaultbackend.yaml
├── ingress-hellok8s-host.yaml
├── ingress-hellok8s-regex.yaml
├── ingress-hellok8s.yaml
├── ingress-nginx-daemonset-hostnetwork.yaml
├── ingress-nginx-deployment-nodeport.yaml
├── install_by_kind
├── kind-config.yaml
└── readme.md
├── install_by_kubeadm
├── containerd.config.toml
├── install.md
└── k8s-cluster-init.log
├── install_by_minikube
├── install.md
└── nginx.yaml
├── istio
├── istio-manifest.yaml
└── istio-operator.yaml
├── k8s_account_tokens.csv
├── k8s_actions_guide
└── version1
│ ├── base_manifest
│ ├── configmap.yaml
│ ├── deployment-v2.yaml
│ ├── deployment.yaml
│ └── secret.yaml
│ ├── expose_manifest
│ ├── ingress.yaml
│ └── service.yaml
│ ├── go_code
│ ├── Dockerfile
│ ├── client
│ │ ├── go.mod
│ │ ├── go.sum
│ │ └── main_client.go
│ ├── client2
│ │ ├── go.mod
│ │ ├── go.sum
│ │ └── main_client2.go
│ └── go_multiroute
│ │ ├── go.mod
│ │ ├── go.sum
│ │ └── main_multiroute.go
│ ├── istio_manifest
│ ├── authz-accept-only-ingress.yaml
│ ├── authz-allow-nothing.yaml
│ ├── authz-allow-to-go-multiroute.yaml
│ ├── authz-deny-emptyid.yaml
│ ├── authz-recommend.yaml
│ ├── default_svc_route_rule.yaml
│ ├── egressgwy-proxy-http2http.yaml
│ ├── egressgwy-proxy-http2https.yaml
│ ├── egressgwy-proxy-https2https.yaml
│ ├── external-access-control.yaml
│ ├── get_client_ip.yaml
│ ├── ingress-gwy.yaml
│ ├── ingress-virtualsvc.yaml
│ ├── istio_client_test.yaml
│ ├── peer_authn.yaml
│ ├── peer_authn_default.yaml
│ ├── pod_two_port_svc.yaml
│ ├── route-destinationrule.yaml
│ ├── route-virtualservice.yaml
│ ├── unmergeable-vs.yaml
│ └── virtualservice-in-order.yaml
│ ├── other_manifest
│ └── network-policy.yaml
│ └── script
│ ├── del_user.sh
│ ├── new_user.sh
│ └── setup_kubeconfig.sh
├── kubeadm-kuberouter.yaml
├── kubeconfig.yaml
├── kubernetes-dashboard-role.yaml
├── kubernetes-dashboard.yaml
├── kubeschduler-config.yaml
├── limitrange-for-container.yaml
├── limitrange-for-pod.yaml
├── limitrange-for-pvc.yaml
├── main.go
├── main2.go
├── main_hostname.go
├── main_liveness.go
├── main_log.go
├── main_log_json.go
├── main_nginxingress.go
├── main_panic.go
├── main_read_configmap.go
├── main_read_secret.go
├── main_readiness.go
├── metrics-server.yaml
├── namespace.yaml
├── nginx-ingress.yaml
├── pod.yaml
├── pod_affinityNode.yaml
├── pod_affinityPod.yaml
├── pod_associate_serviceaccount.yaml
├── pod_busybox.yaml
├── pod_curl.yaml
├── pod_diff_images.yaml
├── pod_initContainer.yaml
├── pod_limitResource.yaml
├── pod_nginx.yaml
├── pod_nginx_svc.yaml
├── pod_nodeLabel.yaml
├── pod_nodeName.yaml
├── pod_tolerance.yaml
├── pod_two_sidecar_container.yaml
├── pod_use_downwardAPI.yaml
├── pod_use_priorityClass.yaml
├── pod_use_pvc.yaml
├── pod_use_storageclass.yaml
├── pod_volume_emptydir.yaml
├── pod_volume_hostpath.yaml
├── pod_volume_nfs.yaml
├── pods_diff_labels.yaml
├── pvc_hostpath.yaml
├── rbac_aggregate_clusterrole.yaml
├── rbac_clusterrole.yaml
├── rbac_clusterrolebinding.yaml
├── rbac_role.yaml
├── rbac_role_granter.yaml
├── rbac_rolebinding.yaml
├── rbac_rolebinding_clusterrole.yaml
├── replicaset.yaml
├── resource-quota-scope-invalid.yaml
├── resource-quota-scope.yaml
├── resource-quota.yaml
├── secret-hellok8s-cert.yaml
├── secret-hellok8s-misc.yaml
├── secret-serviceaccount.yaml
├── service-clusterip-externalip.yaml
├── service-clusterip-headless.yaml
├── service-clusterip.yaml
├── service-externalname.yaml
├── service-headless-endpoints.yaml
├── service-loadbalancer.yaml
├── service-nodeport.yaml
├── serviceaccount.yaml
├── stateful-nginx.yaml
├── stateful-svc.yaml
└── troubleshotting
└── t1.unexpected_cluster_boom.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
--------------------------------------------------------------------------------
/.idea/k8s-tutorial-cn.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.20-alpine AS builder
2 |
3 | # 缓存依赖
4 | WORKDIR /go/cache
5 | COPY go.mod .
6 | COPY go.sum .
7 | RUN GOPROXY=https://goproxy.cn,direct go mod tidy
8 |
9 | WORKDIR /build
10 | COPY . .
11 |
12 | # 关闭cgo的原因:使用了多阶段构建,go程序的编译环境和运行环境不同,不关就无法运行go程序
13 | RUN GOOS=linux CGO_ENABLED=0 GOARCH=amd64 GO111MODULE=auto go build -o main -ldflags "-w -extldflags -static"
14 |
15 | #FROM scratch as prod
16 | FROM alpine as prod
17 | # 通过 http://www.asznl.com/post/48 了解docker基础镜像:scratc、busybox、alpine
18 | # 比他们还小的是distroless 由谷歌提供,了解:https://github.com/GoogleContainerTools/distroless
19 |
20 | # alpine设置时区
21 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
22 | apk add -U tzdata && cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && apk del tzdata && date
23 |
24 | COPY --from=builder /build/main .
25 |
26 | EXPOSE 3000
27 | ENTRYPOINT ["/main"]
--------------------------------------------------------------------------------
/configmap-hellok8s.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: hellok8s-configmap
5 | data: # 用来保存UTF8字符串
6 | DB_URL1: "http://mydb.example123.com"
7 | binaryData: # 用来保存二进制数据作为 base64 编码的字串。
8 | app-config.json: eyJkYl91cmwiOiJteXNxbC5leGFtcGxlLmNvbSJ9Cg== # echo '{"db_url":"mysql.example.com"}' |base64
9 |
10 | # 对于一个大量使用 configmap 的集群,禁用 configmap 修改会带来以下好处
11 | # 1. 保护应用,使之免受意外(不想要的)更新所带来的负面影响。
12 | # 2. 通过大幅降低对 kube-apiserver 的压力提升集群性能, 这是因为系统会关闭对已标记为不可变更的 configmap 的监视操作。
13 | # 一旦标记为不可更改,这个操作就不可逆,再想要修改就只能删除并重建 configmap
14 | immutable: true
15 |
--------------------------------------------------------------------------------
/customize_object.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stable.example.com/v1
2 | kind: CronTab
3 | metadata:
4 | name: my-new-cron-object
5 | namespace: default
6 | spec:
7 | cronSpec: "* * * * */5"
8 | image: my-awesome-cron-image
--------------------------------------------------------------------------------
/customize_resource_define.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | # 名字必需与下面的 spec 字段匹配,并且格式为 '<名称的复数形式>.<组名>'
5 | name: crontabs.stable.example.com
6 | spec:
7 | # 组名称,用于 REST API: /apis/<组>/<版本>
8 | group: stable.example.com
9 | # 列举此 CustomResourceDefinition 所支持的版本
10 | versions:
11 | - name: v1
12 | # 每个版本都可以通过 served 标志来独立启用或禁止
13 | served: true
14 | # 其中一个且只有一个版本必需被标记为存储版本
15 | storage: true
16 | schema:
17 | openAPIV3Schema:
18 | type: object
19 | properties:
20 | spec:
21 | type: object
22 | properties:
23 | cronSpec:
24 | type: string
25 | image:
26 | type: string
27 | replicas:
28 | type: integer
29 | # 可以是 Namespaced 或 Cluster
30 | scope: Namespaced
31 | names:
32 | # 名称的复数形式,用于 URL:/apis/<组>/<版本>/<名称的复数形式>
33 | plural: crontabs
34 | # 名称的单数形式,作为命令行使用时和显示时的别名
35 | singular: crontab
36 | # kind 通常是单数形式的驼峰命名(CamelCased)形式。你的资源清单会使用这一形式。
37 | kind: CronTab
38 | # shortNames 允许你在命令行使用较短的字符串来匹配资源
39 | shortNames:
40 | - ct
--------------------------------------------------------------------------------
/daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: daemonset-hellok8s-go-http
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: hellok8s
9 | template:
10 | metadata:
11 | labels:
12 | app: hellok8s
13 | spec:
14 | tolerations:
15 | # 这些容忍度设置是为了让该守护进程集在控制平面节点上运行
16 | # 如果你不希望自己的控制平面节点运行 Pod,可以删除它们
17 | - key: node-role.kubernetes.io/master
18 | effect: NoSchedule
19 | containers:
20 | - image: leigg/hellok8s:v1
21 | name: hellok8s
--------------------------------------------------------------------------------
/deployment-use-configmap.yaml:
--------------------------------------------------------------------------------
1 | # 此模板演示了两种使用configmap的方式
2 | # - 1. 环境变量方式
3 | # - 2. 挂载volume方式
4 | apiVersion: apps/v1
5 | kind: Deployment
6 | metadata:
7 | name: hellok8s-go-http
8 | spec:
9 | replicas: 1 # 当使用 hostPort 时,每个节点只能运行一个 pod
10 | strategy:
11 | type: Recreate # 因为下面使用hostPort进行测试,所以更新时只能先销毁再创建
12 | selector:
13 | matchLabels:
14 | app: hellok8s
15 | template:
16 | metadata:
17 | labels:
18 | app: hellok8s
19 | spec:
20 | containers:
21 | - image: leigg/hellok8s:v4_configmap
22 | name: hellok8s
23 | ports:
24 | - containerPort: 3000
25 | hostPort: 3000
26 | env: # 以环境变量的方式读取data
27 | - name: DB_URL
28 | valueFrom:
29 | configMapKeyRef:
30 | name: hellok8s-configmap
31 | key: DB_URL
32 | volumeMounts: # 以挂载卷的方式读取二进制数据
33 | - name: configmap-volume
34 | mountPath: "/etc/configmap_vol"
35 | volumes:
36 | - name: configmap-volume
37 | configMap:
38 | name: hellok8s-configmap
--------------------------------------------------------------------------------
/deployment-use-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hellok8s-go-http
5 | spec:
6 | strategy:
7 | type: Recreate # 因为下面使用hostPort进行测试,所以更新时只能先销毁再创建
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: hellok8s
12 | template:
13 | metadata:
14 | labels:
15 | app: hellok8s
16 | spec:
17 | containers:
18 | - image: leigg/hellok8s:v4_secret
19 | name: hellok8s
20 | ports:
21 | - containerPort: 3000
22 | hostPort: 3000
23 | env:
24 | - name: DB_PASSWD
25 | valueFrom:
26 | secretKeyRef:
27 | name: hellok8s-secret
28 | key: DB_PASSWD
29 | - name: LOG_LEVEL
30 | valueFrom:
31 | secretKeyRef:
32 | name: hellok8s-secret # name必须是有效且存在的
33 | key: not_found_key
34 | optional: true # 允许key不存在,这样在key找不到时不会影响Pod启动
35 | volumeMounts:
36 | - name: secret-volume
37 | mountPath: "/etc/secret_vol"
38 | volumes:
39 | - name: secret-volume
40 | secret:
41 | secretName: hellok8s-secret
--------------------------------------------------------------------------------
/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | # deployment 唯一名称
5 | name: hellok8s-go-http
6 | spec:
7 | replicas: 2 # 副本数量
8 | selector:
9 | matchLabels:
10 | app: hellok8s # 管理template下所有 app=hellok8s的pod,(要求和template.metadata.labels完全一致!!!否则无法部署deployment)
11 | template: # template 定义一组容器
12 | metadata:
13 | labels:
14 | app: hellok8s
15 | spec:
16 | containers:
17 | - image: leigg/hellok8s:v1
18 | name: hellok8s
--------------------------------------------------------------------------------
/deployment_httpd_svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: httpd
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: httpd
9 | template:
10 | metadata:
11 | labels:
12 | app: httpd
13 | spec:
14 | containers:
15 | - name: httpd-container
16 | image: httpd
17 | ports:
18 | - containerPort: 80
19 |
20 | ---
21 |
22 | apiVersion: v1
23 | kind: Service
24 | metadata:
25 | name: service-httpd
26 | spec:
27 | type: ClusterIP
28 | selector:
29 | app: httpd
30 | ports:
31 | - port: 8080
32 | targetPort: 80
--------------------------------------------------------------------------------
/deployment_liveness.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | # deployment唯一名称
5 | name: hellok8s-go-http
6 | spec:
7 | replicas: 2 # 副本数量
8 | selector:
9 | matchLabels:
10 | app: hellok8s # 管理template下所有 app=hellok8s的pod,(要求和template.metadata.labels完全一致!!!否则无法部署deployment)
11 | template: # template 定义一组pod
12 | metadata:
13 | labels:
14 | app: hellok8s
15 | spec:
16 | containers:
17 | - image: leigg/hellok8s:v1
18 | name: hellok8s
19 | # 存活探针
20 | livenessProbe:
21 | # http get 探测pod提供HTTP服务的路径和端口
22 | httpGet:
23 | path: /healthz
24 | port: 3000
25 | initialDelaySeconds: 3 # 3s后开始探测
26 | periodSeconds: 3 # 每3s探测一次
27 | timeoutSeconds: 1 # 单次探测超时,默认1
28 | failureThreshold: 3 # 探测失败时,k8s的重试次数,默认3,达到这个次数后 停止探测,再重启pod内的容器
--------------------------------------------------------------------------------
/deployment_logtest.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hellok8s-logtest
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hellok8s
10 | template:
11 | metadata:
12 | labels:
13 | app: hellok8s
14 | spec:
15 | containers:
16 | - image: leigg/hellok8s:log_test
17 | name: hellok8s
--------------------------------------------------------------------------------
/deployment_logtest_fast.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hellok8s-logtest-fast
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hellok8s
10 | template:
11 | metadata:
12 | labels:
13 | app: hellok8s
14 | spec:
15 | containers:
16 | - image: leigg/hellok8s:log_test_fast
17 | name: hellok8s
--------------------------------------------------------------------------------
/deployment_logtest_json.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hellok8s-logtest-json
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hellok8s
10 | template:
11 | metadata:
12 | labels:
13 | app: hellok8s
14 | spec:
15 | containers:
16 | - image: leigg/hellok8s:log_test_json
17 | name: hellok8s
--------------------------------------------------------------------------------
/deployment_python_http_svc_nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: python-http-serv
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: http
9 | template:
10 | metadata:
11 | labels:
12 | app: http
13 | spec:
14 | containers:
15 | - name: http-container
16 | image: python:3.9-alpine
17 | command: [ "python3", "-mhttp.server", "8080" ]
18 | ports:
19 | - containerPort: 8080
20 |
21 | ---
22 |
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: python-http-serv
27 | spec:
28 | type: NodePort
29 | selector:
30 | app: http
31 | ports:
32 | - port: 80
33 | targetPort: 8080
34 | nodePort: 30080
--------------------------------------------------------------------------------
/deployment_readiness.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | # deployment唯一名称
5 | name: hellok8s-go-http
6 | spec:
7 | strategy:
8 | rollingUpdate:
9 | maxSurge: 1
10 | maxUnavailable: 1
11 | replicas: 3 # 副本数量
12 | selector:
13 | matchLabels:
14 | app: hellok8s # 管理template下所有 app=hellok8s的pod,(要求和template.metadata.labels完全一致!!!否则无法部署deployment)
15 | template: # template 定义一组pod
16 | metadata:
17 | labels:
18 | app: hellok8s
19 | spec:
20 | containers:
21 | - image: leigg/hellok8s:v1
22 | name: hellok8s
23 | # 就绪探针
24 | readinessProbe:
25 | # http get 探测pod提供HTTP服务的路径和端口
26 | httpGet:
27 | path: /healthz
28 | port: 3000
29 | initialDelaySeconds: 1 # 1s后开始探测
30 | periodSeconds: 5 # 每5s探测一次
31 | timeoutSeconds: 1 # 单次探测超时,默认1
32 | failureThreshold: 3 # 探测失败时,k8s的重试次数,默认3,达到这个次数后 停止探测,并打上未就绪的标签
--------------------------------------------------------------------------------
/doc_install_ohmyzsh.md:
--------------------------------------------------------------------------------
1 | ## 快速安装ohmyzsh
2 |
3 | [什么是ohmyzsh?](https://www.google.com.hk/search?q=什么是ohmyzsh)
4 |
5 | ```shell
6 | yum install -y git zsh wget
7 |
8 | wget https://gitee.com/mirrors/oh-my-zsh/raw/master/tools/install.sh
9 | vi install.sh
10 | # 修改下面两行
11 | # REPO=${REPO:-ohmyzsh/ohmyzsh}
12 | # REMOTE=${REMOTE:-https://github.com/${REPO}.git}
13 | # 为
14 | # REPO=${REPO:-mirrors/oh-my-zsh}
15 | # REMOTE=${REMOTE:-https://gitee.com/${REPO}.git}
16 | # 保存 并 执行
17 | chmod +x install.sh && ./install.sh
18 |
19 | # 修改主题
20 | ls ~/.oh-my-zsh/themes
21 | vi ~/.zshrc
22 | # 找到 ZSH_THEME 行,修改为自己想用的主题名称即可
23 |
24 | # 安装插件
25 | git clone https://gitee.com/jsharkc/zsh-autosuggestions.git $ZSH_CUSTOM/plugins/zsh-autosuggestions
26 | git clone https://gitee.com/jsharkc/zsh-syntax-highlighting.git $ZSH_CUSTOM/plugins/zsh-syntax-highlighting
27 |
28 | # 配置插件
29 | sed -i 's/plugins=(git)/plugins=(git zsh-autosuggestions zsh-syntax-highlighting)/' ~/.zshrc
30 | # 设置别名
31 | echo 'alias kk="kubectl"' >> ~/.zshrc
32 | echo 'alias m="minikube"' >> ~/.zshrc # 如果安装了minikube
33 |
34 | # 生效
35 | source ~/.zshrc
36 | ```
--------------------------------------------------------------------------------
/efk-arch/.env:
--------------------------------------------------------------------------------
1 | # Password for the 'elastic' user (at least 6 characters)
2 | ELASTIC_PASSWORD=123456
3 |
4 | # Password for the 'kibana_system' user (at least 6 characters)
5 | KIBANA_PASSWORD=123456
6 |
7 | # Version of Elastic products
8 | STACK_VERSION=8.11.3
9 |
10 | # Set the cluster name
11 | CLUSTER_NAME=docker-cluster
12 |
13 | # Set to 'basic' or 'trial' to automatically start the 30-day trial
14 | LICENSE=basic
15 | #LICENSE=trial
16 |
17 | # Port to expose Elasticsearch HTTP API to the host
18 | ES_PORT=9200
19 | #ES_PORT=127.0.0.1:9200
20 |
21 | # Port to expose Kibana to the host
22 | KIBANA_PORT=5601
23 | #KIBANA_PORT=80
24 |
25 | # Increase or decrease based on the available host memory (in bytes)
26 | # 建议不要低于1GB,否则集群大概率会出现异常(亲测)
27 | MEM_LIMIT=1073741824
28 |
29 | # Project namespace (defaults to the current folder name if not set)
30 | COMPOSE_PROJECT_NAME=test-es
--------------------------------------------------------------------------------
/example_job/cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: pods-cronjob
5 | spec:
6 | schedule: "*/1 * * * *" # 最小到min级别,这表示每分钟1次
7 | startingDeadlineSeconds: 3 # 最大启动时间,超时后变成失败
8 | concurrencyPolicy: Forbid # Allow/Forbid/Replace,上个周期的Job未执行结束时,是否允许下个周期的Job开始执行,默认Allow
9 | suspend: false # 是否暂停cronjob的执行,一般通过kubectl edit修改
10 | successfulJobsHistoryLimit: 3 # 保留多少条执行成功的Job记录,默认3
11 | failedJobsHistoryLimit: 1 # 保留多少条执行失败的Job记录,默认1
12 | jobTemplate:
13 | spec:
14 | template:
15 | spec:
16 | restartPolicy: Never
17 | containers:
18 | - command: [ 'sh', '-c', 'echo "Start Job!"; sleep 30; echo "Job Done!"' ]
19 | image: busybox
20 | name: pods-cronjob-container
21 |
--------------------------------------------------------------------------------
/example_job/job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pods-job
5 | spec:
6 | # completions: 3 # 启用它表示串行执行3次
7 | # parallelism: 3 # 启动它表示并发数,由completions指定总次数
8 | # backoffLimit: 3 # 限制重试次数,默认6,超过次数则不再启动新pod
9 | # activeDeadlineSeconds: 10 # 限制job执行时间,超时还不终止则强制终止,并且稍后执行自动删除(若设置),且不受restartPolicy字段影响
10 | ttlSecondsAfterFinished: 10 # 多少秒后自动删除执行成功的job,避免太多不再需要的job累积
11 | template:
12 | spec:
13 | restartPolicy: Never # or OnFailure, 不能是其他值;推荐Never,因为这个策略下控制会启动新的pod,不会删除失败的pod,有助于排查问题;OnFailure是不断重启旧的pod
14 | containers:
15 | - command: ['sh', '-c', 'echo "Start Job!"; sleep 30; echo "Job Done!"']
16 | image: busybox
17 | name: pods-job-container
--------------------------------------------------------------------------------
/example_pod/debug-network.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: debug-network-pod
6 | spec:
7 | containers:
8 | - command:
9 | - sleep
10 | - "3600"
11 | image: praqma/network-multitool
12 | name: debug-network-container
--------------------------------------------------------------------------------
/example_pod/dns-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: dns-config-dns-config-pod
6 | spec:
7 | containers:
8 | - name: test
9 | image: nginx
10 | dnsPolicy: "None"
11 | dnsConfig:
12 | nameservers:
13 | - 1.2.3.4
14 | searches:
15 | - ns1.svc.cluster-domain.example
16 | - my.dns.search.suffix
17 | options:
18 | - name: ndots
19 | value: "2"
20 | - name: edns0
21 |
--------------------------------------------------------------------------------
/example_pod/dns-debug.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: pod-dns-debug
7 | spec:
8 | containers:
9 | - command:
10 | - sleep
11 | - "3600"
12 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
13 | name: dnsutils
--------------------------------------------------------------------------------
/example_pod/dns-policy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Adapted from: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
3 | # "Default": The Pod inherits the name resolution configuration from the node that the pods run on. See related discussion for more details.
4 | # "ClusterFirst": Any DNS query that does not match the configured cluster domain suffix, such as "www.kubernetes.io", is forwarded to the upstream nameserver inherited from the node. Cluster administrators may have extra stub-domain and upstream DNS servers configured. See related discussion for details on how DNS queries are handled in those cases.
5 | # "ClusterFirstWithHostNet": For Pods running with hostNetwork, you should explicitly set its DNS policy "ClusterFirstWithHostNet".
6 | # "None": It allows a Pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsConfig field in the Pod Spec. See Pod's DNS config subsection below.
7 |
8 | apiVersion: v1
9 | kind: Pod
10 | metadata:
11 | name: dns-config-policy-pod
12 | spec:
13 | containers:
14 | - command:
15 | - sleep
16 | - "3600"
17 | image: busybox
18 | name: dns-config-policy-container
19 | hostNetwork: true
20 | dnsPolicy: ClusterFirstWithHostNet
21 |
--------------------------------------------------------------------------------
/example_pod/env.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: pods-env-pod
6 | spec:
7 | containers:
8 | - command:
9 | - sleep
10 | - "3600"
11 | image: busybox
12 | name: pods-simple-container
13 | env:
14 | - name: CONFIG_MAP_KEY
15 | valueFrom:
16 | configMapKeyRef:
17 | name: my-config-map
18 | key: my-key
19 | - name: CPU_LIMIT
20 | valueFrom:
21 | resourceFieldRef:
22 | resource: limits.cpu
23 | - name: SECRET_KEY
24 | valueFrom:
25 | secretKeyRef:
26 | name: my-secret
27 | key: secret-key
28 | - name: NODE_NAME
29 | valueFrom:
30 | fieldRef:
31 | fieldPath: spec.nodeName
--------------------------------------------------------------------------------
/example_pod/host-aliases.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/#adding-additional-entries-with-hostaliases
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: pods-host-aliases-pod
7 | spec:
8 | hostAliases:
9 | - ip: "127.0.0.1"
10 | hostnames:
11 | - "foo.local"
12 | - "bar.local"
13 | - ip: "10.1.2.3"
14 | hostnames:
15 | - "foo.remote"
16 | - "bar.remote"
17 | containers:
18 | - name: cat-hosts
19 | image: busybox
20 | command:
21 | - cat
22 | args:
23 | - "/etc/hosts"
24 |
--------------------------------------------------------------------------------
/example_pod/init-container.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: init-container-pod
6 | spec:
7 | containers:
8 | - name: init-container-container
9 | image: busybox
10 | command: ['sh', '-c', 'echo The app is running! && sleep 3600']
11 | initContainers:
12 | - name: init-container-init-container
13 | image: busybox
14 | command: ['sh', '-c', "until nslookup pods-init-container-service.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
15 |
--------------------------------------------------------------------------------
/example_pod/lifecycle.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: lifecycle-pod
6 | spec:
7 | containers:
8 | - image: nginx
9 | lifecycle:
10 | postStart:
11 | exec:
12 | command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
13 | preStop:
14 | exec:
15 | command: ["/bin/sh", "-c", "nginx -s quit; while killall -0 nginx; do sleep 1; done"]
16 | name: lifecycle-container
17 |
--------------------------------------------------------------------------------
/example_pod/node-affinity.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: pod-node-affinity
6 | spec:
7 | affinity:
8 | nodeAffinity:
9 | requiredDuringSchedulingIgnoredDuringExecution:
10 | nodeSelectorTerms:
11 | - matchExpressions:
12 | - key: kubernetes.io/hostname
13 | operator: Exists
14 | containers:
15 | - command: ["sleep", "3600"]
16 | name: pod-node-affinity-container
17 | image: busybox
18 |
--------------------------------------------------------------------------------
/example_pod/node-selector.yaml:
--------------------------------------------------------------------------------
1 | # Assumes the existence of the label: node-role.kubernetes.io/master, and tries to assign the pod to the labelled node.
2 | ---
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: pod-node-selector-simple
7 | spec:
8 | containers:
9 | - command: ["sleep", "3600"]
10 | image: busybox
11 | name: pod-node-selector-simple-container
12 | nodeSelector:
13 | node-role.kubernetes.io/master: ""
14 |
--------------------------------------------------------------------------------
/example_pod/privileged-namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Namespace here refers to the container namespaces, not kubernetes
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: privileged-namespace-pod
7 | spec:
8 | hostPID: true
9 | hostIPC: true
10 | hostNetwork: true
11 | containers:
12 | - command:
13 | - sleep
14 | - "3600"
15 | image: busybox
16 | name: privileged-namespace-container
17 | securityContext:
18 | privileged: true
19 |
--------------------------------------------------------------------------------
/example_pod/privileged-simple.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: privileged-simple-pod
6 | spec:
7 | containers:
8 | - command:
9 | - sleep
10 | - "3600"
11 | image: busybox
12 | name: privileged-simple-pod
13 | securityContext:
14 | privileged: true
15 | runAsNonRoot: false
16 | runAsUser: 1000 # uid
17 | runAsGroup: 1000 # gid
18 | procMount: "Unmasked" # 可以设置为 "Default"、"Unmasked" 或 "Masked"
19 | readOnlyRootFilesystem: true
--------------------------------------------------------------------------------
/example_pod/probe-liveness.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | labels:
6 | test: liveness
7 | name: liveness-http
8 | spec:
9 | containers:
10 | - args:
11 | - /server
12 | image: k8s.gcr.io/liveness
13 | livenessProbe:
14 | exec:
15 | command: [ "sh", "-c", "echo liveness" ]
16 | tcpSocket:
17 | port: 80
18 | httpGet:
19 | httpHeaders:
20 | - name: X-Custom-Header
21 | value: Awesome
22 | # when "host" is not defined, "PodIP" will be used
23 | # host: my-host
24 | # when "scheme" is not defined, "HTTP" scheme will be used. Only "HTTP" and "HTTPS" are allowed
25 | # scheme: HTTPS
26 | path: /healthz
27 | port: 8080
28 | initialDelaySeconds: 15
29 | timeoutSeconds: 1
30 | successThreshold: 1 # 连续多少次探测成功才算成功
31 | failureThreshold: 5 # 连续多少次探测失败才算失败
32 | name: liveness
33 |
--------------------------------------------------------------------------------
/example_pod/probe-readiness.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: pods-readiness-exec-pod
6 | spec:
7 | containers:
8 | - args:
9 | - /bin/sh
10 | - -c
11 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
12 | image: busybox
13 | readinessProbe:
14 | exec:
15 | command:
16 | - cat
17 | - /tmp/healthy
18 | initialDelaySeconds: 5
19 | name: pods-readiness-exec-container
20 |
--------------------------------------------------------------------------------
/example_pod/projected.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-with-a-secret-a-downward-api-and-a-configmap
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: volumes-projected-pod
7 | spec:
8 | containers:
9 | - command:
10 | - sleep
11 | - "3600"
12 | image: busybox
13 | name: volumes-projected-container
14 | volumeMounts:
15 | - name: volumes-projected-volume-mount
16 | mountPath: "/volumes-projected-volume-path"
17 | readOnly: true
18 | volumes:
19 | - name: volumes-projected-volume-mount
20 | projected:
21 | sources:
22 | - secret:
23 | items:
24 | - key: username
25 | path: my-group/my-username
26 | mode: 511
27 | name: volumes-projected-secret
28 | - downwardAPI:
29 | items:
30 | - path: "labels"
31 | fieldRef:
32 | fieldPath: metadata.labels
33 | - path: "cpu_limit"
34 | resourceFieldRef:
35 | containerName: container-test
36 | resource: limits.cpu
37 | - configMap:
38 | items:
39 | - key: config
40 | path: my-group/my-config
41 | name: volumes-projected-configmap
42 |
--------------------------------------------------------------------------------
/example_pod/resources-limit.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: memory-request-limit-pod
6 | spec:
7 | containers:
8 | - command: ["sleep", "3600"]
9 | image: busybox
10 | name: resources-limit-container
11 | resources:
12 | limits: # 容器能使用的最大资源,超出限制容器可能会被终止或受到严重限制
13 | cpu: "0.1" # 默认单位为内核数,0.1等价100m(milli cpu)
14 | memory: "128Mi" # 单位支持 MiB 、GiB、MB、GB (MiB=1024*1024B,MB=1000*1000B)
15 | requests: # pod调度的最低要求,若资源不足,则无法调度
16 | cpu: "0.1"
17 | memory: "128Mi"
18 |
--------------------------------------------------------------------------------
/example_pod/sa-token.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-with-a-secret-a-downward-api-and-a-configmap
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: volumes-sa-token-pod
7 | spec:
8 | containers:
9 | - name: container-test
10 | image: busybox
11 | volumeMounts:
12 | - mountPath: "/service-account"
13 | name: volumes-sa-token-volume
14 | readOnly: true
15 | volumes:
16 | - name: volumes-sa-token-volume
17 | projected:
18 | sources:
19 | - serviceAccountToken:
20 | audience: api
21 | expirationSeconds: 3600
22 | path: token
23 |
--------------------------------------------------------------------------------
/example_pod/simple.yaml:
--------------------------------------------------------------------------------
1 | # kubectl example pod 可查看每个字段含义
2 | # kubectl example pod.spec
3 | ---
4 | apiVersion: v1
5 | kind: Pod
6 | metadata:
7 | name: pods-simple-pod
8 | spec:
9 | imagePullSecrets: # 拉取镜像时可能需要使用secret,一般用来指定docker registry的认证信息
10 | - name: regcred # 需提前创建secret
11 | containers:
12 | - command:
13 | - sleep
14 | - "3600"
15 | image: busybox
16 | imagePullPolicy: IfNotPresent # 支持3种下载策略:Always/Never/IfNotPresent(默认)
17 | name: pods-simple-container
18 | workingDir: /data
19 |
20 |
--------------------------------------------------------------------------------
/example_pod/terminate-grace-period-sec.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: pods-termination-grace-period-seconds
6 | spec:
7 | containers:
8 | - command:
9 | - sleep
10 | - "3600"
11 | image: busybox
12 | name: pods-termination-grace-period-seconds
13 | terminationGracePeriodSeconds: 5 # Time to wait before moving from a TERM signal to the pod's main process to a KILL signal.
14 |
--------------------------------------------------------------------------------
/example_pod/toleration.yaml:
--------------------------------------------------------------------------------
1 | # IMPORTANT:
2 | #
3 | # This example makes some assumptions:
4 | #
5 | # - There is one single node that is also a master (called 'master')
6 | # - The following command has been run: `kubectl taint nodes master pod-toleration:NoSchedule`
7 | #
8 | # Once the master node is tainted, a pod will not be scheduled on there (you can try the below yaml _without_ the spec.toleration to test this).
9 | #
10 | # CLEANUP:
11 | #
12 | # delete pod pod-toleration # delete pod
13 | # kubectl taint nodes master pod-toleration:NoSchedule- # delete taint
14 | ---
15 | apiVersion: v1
16 | kind: Pod
17 | metadata:
18 | name: pod-toleration
19 | spec:
20 | containers:
21 | - command: ["sleep", "3600"]
22 | image: busybox
23 | name: pod-toleration-container
24 | tolerations:
25 | - key: "" # empty means match all taint keys
26 | operator: Exists
27 |
--------------------------------------------------------------------------------
/example_pod/topology-spread-constraints-with-node-affinity.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
3 | kind: Pod
4 | apiVersion: v1
5 | metadata:
6 | name: topology-spread-constraints-with-node-affinity-pod
7 | labels:
8 | label1: value1
9 | spec:
10 | topologySpreadConstraints:
11 | - labelSelector:
12 | matchLabels:
13 | label1: value1
14 | maxSkew: 1
15 | topologyKey: zone
16 | whenUnsatisfiable: DoNotSchedule
17 | affinity:
18 | nodeAffinity:
19 | requiredDuringSchedulingIgnoredDuringExecution:
20 | nodeSelectorTerms:
21 | - matchExpressions:
22 | - key: zone
23 | operator: NotIn
24 | values:
25 | - zoneC
26 | containers:
27 | - name: pause
28 | image: k8s.gcr.io/pause:3.1
29 |
--------------------------------------------------------------------------------
/example_pod/topology-spread-constraints.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
3 | kind: Pod
4 | apiVersion: v1
5 | metadata:
6 | name: topology-spread-constraints-pod
7 | labels:
8 | label1: value1
9 | spec:
10 | topologySpreadConstraints:
11 | - maxSkew: 1
12 | topologyKey: zone
13 | whenUnsatisfiable: DoNotSchedule
14 | labelSelector:
15 | matchLabels:
16 | label1: value1
17 | containers:
18 | - name: pause
19 | image: k8s.gcr.io/pause:3.1
20 |
--------------------------------------------------------------------------------
/example_pod/volume-emptydir.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: volumes-emptydir-pod
6 | spec:
7 | containers:
8 | - command:
9 | - sleep
10 | - "3600"
11 | image: busybox
12 | name: volumes-emptydir-container
13 | volumeMounts:
14 | - mountPath: /volumes-emptydir-mount-path
15 | name: volumes-emptydir-volume
16 | # readOnly: true
17 | volumes:
18 | - name: volumes-emptydir-volume
19 | emptyDir: {}
20 |
--------------------------------------------------------------------------------
/example_pod/volume-hostdir.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: volumes-hostdir-pod
6 | spec:
7 | containers:
8 | - command:
9 | - sleep
10 | - "3600"
11 | image: busybox
12 | name: volumes-hostdir-container
13 | volumeMounts:
14 | - mountPath: /volumes-hostdir-mount-path
15 | name: volumes-hostdir-volume
16 | volumes:
17 | - hostPath:
18 | # directory location on host
19 | path: /tmp
20 | name: volumes-hostdir-volume
21 |
--------------------------------------------------------------------------------
/example_pod/volume-hostpath-file-or-create.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-fileorcreate
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: volumes-file-or-create-pod
7 | spec:
8 | containers:
9 | - command:
10 | - sleep
11 | - "3600"
12 | name: busybox
13 | image: busybox
14 | volumeMounts:
15 | - mountPath: /var/local/aaa
16 | name: volumes-file-or-create-dir
17 | - mountPath: /var/local/aaa/1.txt
18 | name: volumes-file-or-create-file
19 | volumes:
20 | - name: volumes-file-or-create-dir
21 | hostPath:
22 | # Ensure the file directory is created.
23 | path: /var/local/aaa
24 | type: DirectoryOrCreate
25 | - name: volumes-file-or-create-file
26 | hostPath:
27 | path: /var/local/aaa/1.txt
28 | type: FileOrCreate
29 |
--------------------------------------------------------------------------------
/example_pod/volume-persistent.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: volume-pvc
6 | spec:
7 | containers:
8 | - name: frontend
9 | image: nginx
10 | volumeMounts:
11 | - mountPath: /usr/share/nginx/html
12 | name: volume-pvc
13 | volumes:
14 | - name: volume-pvc
15 | persistentVolumeClaim:
16 | claimName: persistent-volume-claim
17 |
--------------------------------------------------------------------------------
/example_pod/volume-subpath.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath
3 | # Sometimes, it is useful to share one volume for multiple uses in a single Pod.
4 | # The volumeMounts.subPath property can be used to specify a sub-path inside the
5 | # referenced volume instead of its root.
6 | apiVersion: v1
7 | kind: Pod
8 | metadata:
9 | name: volumes-subpath-pod
10 | spec:
11 | containers:
12 | - env:
13 | - name: MYSQL_ROOT_PASSWORD
14 | value: "rootpasswd"
15 | image: mysql
16 | name: mysql
17 | volumeMounts:
18 | - mountPath: /var/lib/mysql
19 | name: site-data
20 | subPath: mysql
21 | - image: php:7.0-apache
22 | name: php
23 | volumeMounts:
24 | - mountPath: /var/www/html
25 | name: site-data
26 | subPath: html
27 | volumes:
28 | - name: site-data
29 | persistentVolumeClaim:
30 | claimName: my-lamp-site-data
31 |
--------------------------------------------------------------------------------
/example_pod/volume-subpathexpr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-with-expanded-environment-variables
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: volumes-subpathexpr-pod
7 | spec:
8 | containers:
9 | - command: ["sleep", "3600"]
10 | env:
11 | - name: POD_NAME
12 | valueFrom:
13 | fieldRef:
14 | apiVersion: v1
15 | fieldPath: metadata.name
16 | image: busybox
17 | name: volumes-subpathexpr-container
18 | volumeMounts:
19 | - name: volumes-subpathexpr-volume
20 | mountPath: /logs
21 | subPathExpr: $(POD_NAME)
22 | restartPolicy: Never
23 | volumes:
24 | - name: volumes-subpathexpr-volume
25 | hostPath:
26 | path: /var/log/pods
27 |
--------------------------------------------------------------------------------
/helm/elasticsearch/.helmignore:
--------------------------------------------------------------------------------
1 | tests/
2 | .pytest_cache/
3 |
--------------------------------------------------------------------------------
/helm/elasticsearch/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | appVersion: 8.5.1
3 | description: Official Elastic helm chart for Elasticsearch
4 | home: https://github.com/elastic/helm-charts
5 | icon: https://helm.elastic.co/icons/elasticsearch.png
6 | maintainers:
7 | - email: helm-charts@elastic.co
8 | name: Elastic
9 | name: elasticsearch
10 | sources:
11 | - https://github.com/elastic/elasticsearch
12 | version: 8.5.1
13 |
--------------------------------------------------------------------------------
/helm/elasticsearch/Makefile:
--------------------------------------------------------------------------------
1 | include ../helpers/common.mk
2 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/config/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := helm-es-config
6 | TIMEOUT := 1200s
7 |
8 | install:
9 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
10 |
11 | secrets:
12 | kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true
13 | kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic
14 | kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd'
15 | kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key
16 | kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test
17 |
18 | test: secrets install goss
19 |
20 | purge:
21 | helm del $(RELEASE)
22 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/config/README.md:
--------------------------------------------------------------------------------
1 | # Config
2 |
3 | This example deploy a single node Elasticsearch 8.5.1 with authentication and
4 | custom [values][].
5 |
6 |
7 | ## Usage
8 |
9 | * Create the required secrets: `make secrets`
10 |
11 | * Deploy Elasticsearch chart with the default values: `make install`
12 |
13 | * You can now setup a port forward to query Elasticsearch API:
14 |
15 | ```
16 | kubectl port-forward svc/config-master 9200
17 | curl -u elastic:changeme http://localhost:9200/_cat/indices
18 | ```
19 |
20 |
21 | ## Testing
22 |
23 | You can also run [goss integration tests][] using `make test`
24 |
25 |
26 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/config/test/goss.yaml
27 | [values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/config/values.yaml
28 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/config/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | https://localhost:9200/_cluster/health:
3 | status: 200
4 | timeout: 2000
5 | allow-insecure: true
6 | username: elastic
7 | password: "{{ .Env.ELASTIC_PASSWORD }}"
8 | body:
9 | - "green"
10 | - '"number_of_nodes":1'
11 | - '"number_of_data_nodes":1'
12 |
13 | https://localhost:9200:
14 | status: 200
15 | timeout: 2000
16 | username: elastic
17 | allow-insecure: true
18 | password: "{{ .Env.ELASTIC_PASSWORD }}"
19 | body:
20 | - '"cluster_name" : "config"'
21 | - "You Know, for Search"
22 |
23 | command:
24 | "elasticsearch-keystore list":
25 | exit-status: 0
26 | stdout:
27 | - keystore.seed
28 | - bootstrap.password
29 | - xpack.notification.slack.account.monitoring.secure_url
30 | - xpack.notification.slack.account.otheraccount.secure_url
31 | - xpack.watcher.encryption_key
32 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/config/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | clusterName: "config"
3 | replicas: 1
4 |
5 | extraEnvs:
6 | - name: ELASTIC_PASSWORD
7 | valueFrom:
8 | secretKeyRef:
9 | name: elastic-config-credentials
10 | key: password
11 |
12 | # This is just a dummy file to make sure that
13 | # the keystore can be mounted at the same time
14 | # as a custom elasticsearch.yml
15 | esConfig:
16 | elasticsearch.yml: |
17 | xpack.security.enabled: true
18 | path.data: /usr/share/elasticsearch/data
19 |
20 | keystore:
21 | - secretName: elastic-config-secret
22 | - secretName: elastic-config-slack
23 | - secretName: elastic-config-custom-path
24 | items:
25 | - key: slack_url
26 | path: xpack.notification.slack.account.otheraccount.secure_url
27 |
28 | secret:
29 | enabled: false
30 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/config/watcher_encryption_key:
--------------------------------------------------------------------------------
1 | supersecret
2 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/default/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := helm-es-default
6 | TIMEOUT := 1200s
7 |
8 | install:
9 | helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../
10 |
11 | test: install goss
12 |
13 | purge:
14 | helm del $(RELEASE)
15 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/default/README.md:
--------------------------------------------------------------------------------
1 | # Default
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 cluster using
4 | [default values][].
5 |
6 |
7 | ## Usage
8 |
9 | * Deploy Elasticsearch chart with the default values: `make install`
10 |
11 | * You can now setup a port forward to query Elasticsearch API:
12 |
13 | ```
14 | kubectl port-forward svc/elasticsearch-master 9200
15 | curl localhost:9200/_cat/indices
16 | ```
17 |
18 |
19 | ## Testing
20 |
21 | You can also run [goss integration tests][] using `make test`
22 |
23 |
24 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/default/test/goss.yaml
25 | [default values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/values.yaml
26 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/default/rolling_upgrade.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash -x
2 |
3 | kubectl proxy || true &
4 |
5 | make &
6 | PROC_ID=$!
7 |
8 | while kill -0 "$PROC_ID" >/dev/null 2>&1; do
9 | echo "PROCESS IS RUNNING"
10 | if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then
11 | echo "cluster is healthy"
12 | else
13 | echo "cluster not healthy!"
14 | exit 1
15 | fi
16 | sleep 1
17 | done
18 | echo "PROCESS TERMINATED"
19 | exit 0
20 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/default/test/goss.yaml:
--------------------------------------------------------------------------------
1 | kernel-param:
2 | vm.max_map_count:
3 | value: "262144"
4 |
5 | http:
6 | https://elasticsearch-master:9200/_cluster/health:
7 | status: 200
8 | timeout: 2000
9 | username: elastic
10 | allow-insecure: true
11 | password: "{{ .Env.ELASTIC_PASSWORD }}"
12 | body:
13 | - "green"
14 | - '"number_of_nodes":3'
15 | - '"number_of_data_nodes":3'
16 |
17 | https://localhost:9200:
18 | status: 200
19 | timeout: 2000
20 | allow-insecure: true
21 | username: elastic
22 | password: "{{ .Env.ELASTIC_PASSWORD }}"
23 | body:
24 | - '"number" : "8.5.1"'
25 | - '"cluster_name" : "elasticsearch"'
26 | - "You Know, for Search"
27 |
28 | file:
29 | /usr/share/elasticsearch/data:
30 | exists: true
31 | mode: "2775"
32 | owner: root
33 | group: elasticsearch
34 | filetype: directory
35 |
36 | mount:
37 | /usr/share/elasticsearch/data:
38 | exists: true
39 |
40 | user:
41 | elasticsearch:
42 | exists: true
43 | uid: 1000
44 | gid: 1000
45 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/docker-for-mac/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | RELEASE := helm-es-docker-for-mac
4 | TIMEOUT := 1200s
5 |
6 | install:
7 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
8 |
9 | test: install
10 | helm test $(RELEASE)
11 |
12 | purge:
13 | helm del $(RELEASE)
14 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/docker-for-mac/README.md:
--------------------------------------------------------------------------------
1 | # Docker for Mac
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 cluster on [Docker for Mac][]
4 | using [custom values][].
5 |
6 | Note that this configuration should be used for test only and isn't recommended
7 | for production.
8 |
9 |
10 | ## Usage
11 |
12 | * Deploy Elasticsearch chart with the default values: `make install`
13 |
14 | * You can now setup a port forward to query Elasticsearch API:
15 |
16 | ```
17 | kubectl port-forward svc/elasticsearch-master 9200
18 | curl localhost:9200/_cat/indices
19 | ```
20 |
21 |
22 | [custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/docker-for-mac/values.yaml
23 | [docker for mac]: https://docs.docker.com/docker-for-mac/kubernetes/
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/docker-for-mac/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Permit co-located instances for solitary minikube virtual machines.
3 | antiAffinity: "soft"
4 |
5 | # Shrink default JVM heap.
6 | esJavaOpts: "-Xmx128m -Xms128m"
7 |
8 | # Allocate smaller chunks of memory per pod.
9 | resources:
10 | requests:
11 | cpu: "100m"
12 | memory: "512M"
13 | limits:
14 | cpu: "1000m"
15 | memory: "512M"
16 |
17 | # Request smaller persistent volumes.
18 | volumeClaimTemplate:
19 | accessModes: [ "ReadWriteOnce" ]
20 | storageClassName: "hostpath"
21 | resources:
22 | requests:
23 | storage: 100M
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/kubernetes-kind/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | RELEASE := helm-es-kind
4 | TIMEOUT := 1200s
5 |
6 | install:
7 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
8 |
9 | install-local-path:
10 | kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml
11 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values-local-path.yaml $(RELEASE) ../../
12 |
13 | test: install
14 | helm test $(RELEASE)
15 |
16 | purge:
17 | helm del $(RELEASE)
18 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/kubernetes-kind/README.md:
--------------------------------------------------------------------------------
1 | # KIND
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 cluster on [Kind][]
4 | using [custom values][].
5 |
6 | Note that this configuration should be used for test only and isn't recommended
7 | for production.
8 |
9 | Note that Kind < 0.7.0 are affected by a [kind issue][] with mount points
10 | created from PVCs not writable by non-root users. [kubernetes-sigs/kind#1157][]
11 | fix it in Kind 0.7.0.
12 |
13 | The workaround for Kind < 0.7.0 is to install manually
14 | [Rancher Local Path Provisioner][] and use `local-path` storage class for
15 | Elasticsearch volumes (see [Makefile][] instructions).
16 |
17 |
18 | ## Usage
19 |
20 | * For Kind >= 0.7.0: Deploy Elasticsearch chart with the default values: `make install`
21 | * For Kind < 0.7.0: Deploy Elasticsearch chart with `local-path` storage class: `make install-local-path`
22 |
23 | * You can now setup a port forward to query Elasticsearch API:
24 |
25 | ```
26 | kubectl port-forward svc/elasticsearch-master 9200
27 | curl localhost:9200/_cat/indices
28 | ```
29 |
30 |
31 | [custom values]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/kubernetes-kind/values.yaml
32 | [kind]: https://kind.sigs.k8s.io/
33 | [kind issue]: https://github.com/kubernetes-sigs/kind/issues/830
34 | [kubernetes-sigs/kind#1157]: https://github.com/kubernetes-sigs/kind/pull/1157
35 | [rancher local path provisioner]: https://github.com/rancher/local-path-provisioner
36 | [Makefile]: https://github.com/elastic/helm-charts/blob/main/elasticsearch/examples/kubernetes-kind/Makefile
37 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/kubernetes-kind/values-local-path.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Permit co-located instances for solitary minikube virtual machines.
3 | antiAffinity: "soft"
4 |
5 | # Shrink default JVM heap.
6 | esJavaOpts: "-Xmx128m -Xms128m"
7 |
8 | # Allocate smaller chunks of memory per pod.
9 | resources:
10 | requests:
11 | cpu: "100m"
12 | memory: "512M"
13 | limits:
14 | cpu: "1000m"
15 | memory: "512M"
16 |
17 | # Request smaller persistent volumes.
18 | volumeClaimTemplate:
19 | accessModes: [ "ReadWriteOnce" ]
20 | storageClassName: "local-path"
21 | resources:
22 | requests:
23 | storage: 100M
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/kubernetes-kind/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Permit co-located instances for solitary minikube virtual machines.
3 | antiAffinity: "soft"
4 |
5 | # Shrink default JVM heap.
6 | esJavaOpts: "-Xmx128m -Xms128m"
7 |
8 | # Allocate smaller chunks of memory per pod.
9 | resources:
10 | requests:
11 | cpu: "100m"
12 | memory: "512M"
13 | limits:
14 | cpu: "1000m"
15 | memory: "512M"
16 |
17 | # Request smaller persistent volumes.
18 | volumeClaimTemplate:
19 | accessModes: [ "ReadWriteOnce" ]
20 | storageClassName: "local-path"
21 | resources:
22 | requests:
23 | storage: 100M
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/microk8s/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | RELEASE := helm-es-microk8s
4 | TIMEOUT := 1200s
5 |
6 | install:
7 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
8 |
9 | test: install
10 | helm test $(RELEASE)
11 |
12 | purge:
13 | helm del $(RELEASE)
14 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/microk8s/README.md:
--------------------------------------------------------------------------------
1 | # MicroK8S
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 cluster on [MicroK8S][]
4 | using [custom values][].
5 |
6 | Note that this configuration should be used for test only and isn't recommended
7 | for production.
8 |
9 |
10 | ## Requirements
11 |
12 | The following MicroK8S [addons][] need to be enabled:
13 | - `dns`
14 | - `helm`
15 | - `storage`
16 |
17 |
18 | ## Usage
19 |
20 | * Deploy Elasticsearch chart with the default values: `make install`
21 |
22 | * You can now setup a port forward to query Elasticsearch API:
23 |
24 | ```
25 | kubectl port-forward svc/elasticsearch-master 9200
26 | curl localhost:9200/_cat/indices
27 | ```
28 |
29 |
30 | [addons]: https://microk8s.io/docs/addons
31 | [custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/microk8s/values.yaml
32 | [MicroK8S]: https://microk8s.io
33 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/microk8s/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Disable privileged init Container creation.
3 | sysctlInitContainer:
4 | enabled: false
5 |
6 | # Restrict the use of the memory-mapping when sysctlInitContainer is disabled.
7 | esConfig:
8 | elasticsearch.yml: |
9 | node.store.allow_mmap: false
10 |
11 | # Permit co-located instances for solitary minikube virtual machines.
12 | antiAffinity: "soft"
13 |
14 | # Shrink default JVM heap.
15 | esJavaOpts: "-Xmx128m -Xms128m"
16 |
17 | # Allocate smaller chunks of memory per pod.
18 | resources:
19 | requests:
20 | cpu: "100m"
21 | memory: "512M"
22 | limits:
23 | cpu: "1000m"
24 | memory: "512M"
25 |
26 | # Request smaller persistent volumes.
27 | volumeClaimTemplate:
28 | accessModes: [ "ReadWriteOnce" ]
29 | storageClassName: "microk8s-hostpath"
30 | resources:
31 | requests:
32 | storage: 100M
33 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/migration/Makefile:
--------------------------------------------------------------------------------
1 | PREFIX := helm-es-migration
2 |
3 | data:
4 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../
5 |
6 | master:
7 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../
8 |
9 | client:
10 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../
11 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/migration/client.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | replicas: 2
3 |
4 | clusterName: "elasticsearch"
5 | nodeGroup: "client"
6 |
7 | esMajorVersion: 6
8 |
9 | roles: []
10 |
11 | volumeClaimTemplate:
12 | accessModes: ["ReadWriteOnce"]
13 | storageClassName: "standard"
14 | resources:
15 | requests:
16 | storage: 1Gi # Currently needed till pvcs are made optional
17 |
18 | persistence:
19 | enabled: false
20 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/migration/data.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | replicas: 2
3 |
4 | esMajorVersion: 6
5 |
6 | extraEnvs:
7 | - name: discovery.zen.ping.unicast.hosts
8 | value: "my-release-elasticsearch-discovery"
9 |
10 | clusterName: "elasticsearch"
11 | nodeGroup: "data"
12 |
13 | roles:
14 | - data
15 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/migration/master.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Temporarily set to 3 so we can scale up/down the old a new cluster
3 | # one at a time whilst always keeping 3 masters running
4 | replicas: 1
5 |
6 | esMajorVersion: 6
7 |
8 | extraEnvs:
9 | - name: discovery.zen.ping.unicast.hosts
10 | value: "my-release-elasticsearch-discovery"
11 |
12 | clusterName: "elasticsearch"
13 | nodeGroup: "master"
14 |
15 | roles:
16 | - master
17 |
18 | volumeClaimTemplate:
19 | accessModes: ["ReadWriteOnce"]
20 | storageClassName: "standard"
21 | resources:
22 | requests:
23 | storage: 4Gi
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/minikube/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | RELEASE := helm-es-minikube
4 | TIMEOUT := 1200s
5 |
6 | install:
7 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
8 |
9 | test: install
10 | helm test $(RELEASE)
11 |
12 | purge:
13 | helm del $(RELEASE)
14 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/minikube/README.md:
--------------------------------------------------------------------------------
1 | # Minikube
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 cluster on [Minikube][]
4 | using [custom values][].
5 |
6 | If helm or kubectl timeouts occur, you may consider creating a minikube VM with
7 | more CPU cores or memory allocated.
8 |
9 | Note that this configuration should be used for test only and isn't recommended
10 | for production.
11 |
12 |
13 | ## Requirements
14 |
15 | In order to properly support the required persistent volume claims for the
16 | Elasticsearch StatefulSet, the `default-storageclass` and `storage-provisioner`
17 | minikube addons must be enabled.
18 |
19 | ```
20 | minikube addons enable default-storageclass
21 | minikube addons enable storage-provisioner
22 | ```
23 |
24 |
25 | ## Usage
26 |
27 | * Deploy Elasticsearch chart with the default values: `make install`
28 |
29 | * You can now setup a port forward to query Elasticsearch API:
30 |
31 | ```
32 | kubectl port-forward svc/elasticsearch-master 9200
33 | curl localhost:9200/_cat/indices
34 | ```
35 |
36 |
37 | [custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/minikube/values.yaml
38 | [minikube]: https://minikube.sigs.k8s.io/docs/
39 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/minikube/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Permit co-located instances for solitary minikube virtual machines.
3 | antiAffinity: "soft"
4 |
5 | # Shrink default JVM heap.
6 | esJavaOpts: "-Xmx128m -Xms128m"
7 |
8 | # Allocate smaller chunks of memory per pod.
9 | resources:
10 | requests:
11 | cpu: "100m"
12 | memory: "512M"
13 | limits:
14 | cpu: "1000m"
15 | memory: "512M"
16 |
17 | # Request smaller persistent volumes.
18 | volumeClaimTemplate:
19 | accessModes: [ "ReadWriteOnce" ]
20 | storageClassName: "standard"
21 | resources:
22 | requests:
23 | storage: 100M
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/multi/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | PREFIX := helm-es-multi
6 | RELEASE := helm-es-multi-master
7 | TIMEOUT := 1200s
8 |
9 | install:
10 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../
11 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../
12 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../
13 |
14 | test: install goss
15 |
16 | purge:
17 | helm del $(PREFIX)-master
18 | helm del $(PREFIX)-data
19 | helm del $(PREFIX)-client
20 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/multi/README.md:
--------------------------------------------------------------------------------
1 | # Multi
2 |
3 | This example deploy an Elasticsearch 8.5.1 cluster composed of 3 different Helm
4 | releases:
5 |
6 | - `helm-es-multi-master` for the 3 master nodes using [master values][]
7 | - `helm-es-multi-data` for the 3 data nodes using [data values][]
8 | - `helm-es-multi-client` for the 3 client nodes using [client values][]
9 |
10 | ## Usage
11 |
12 | * Deploy the 3 Elasticsearch releases: `make install`
13 |
14 | * You can now setup a port forward to query Elasticsearch API:
15 |
16 | ```
17 | kubectl port-forward svc/multi-master 9200
18 | curl -u elastic:changeme http://localhost:9200/_cat/indices
19 | ```
20 |
21 | ## Testing
22 |
23 | You can also run [goss integration tests][] using `make test`
24 |
25 |
26 | [client values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/client.yaml
27 | [data values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/data.yaml
28 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/test/goss.yaml
29 | [master values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/multi/master.yaml
30 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/multi/client.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | clusterName: "multi"
3 | nodeGroup: "client"
4 |
5 | extraEnvs:
6 | - name: ELASTIC_PASSWORD
7 | valueFrom:
8 | secretKeyRef:
9 | name: multi-master-credentials
10 | key: password
11 | - name: xpack.security.enabled
12 | value: "true"
13 | - name: xpack.security.transport.ssl.enabled
14 | value: "true"
15 | - name: xpack.security.http.ssl.enabled
16 | value: "true"
17 | - name: xpack.security.transport.ssl.verification_mode
18 | value: "certificate"
19 | - name: xpack.security.transport.ssl.key
20 | value: "/usr/share/elasticsearch/config/certs/tls.key"
21 | - name: xpack.security.transport.ssl.certificate
22 | value: "/usr/share/elasticsearch/config/certs/tls.crt"
23 | - name: xpack.security.transport.ssl.certificate_authorities
24 | value: "/usr/share/elasticsearch/config/certs/ca.crt"
25 | - name: xpack.security.http.ssl.key
26 | value: "/usr/share/elasticsearch/config/certs/tls.key"
27 | - name: xpack.security.http.ssl.certificate
28 | value: "/usr/share/elasticsearch/config/certs/tls.crt"
29 | - name: xpack.security.http.ssl.certificate_authorities
30 | value: "/usr/share/elasticsearch/config/certs/ca.crt"
31 |
32 | roles: []
33 |
34 | persistence:
35 | enabled: false
36 |
37 | # For client nodes, we also need to add an empty node.roles in elasticsearch.yml
38 | # This is due to https://github.com/elastic/helm-charts/pull/1186#discussion_r631225687
39 | esConfig:
40 | elasticsearch.yml: |
41 | node.roles: []
42 |
43 | secret:
44 | enabled: false
45 |
46 | createCert: false
47 | secretMounts:
48 | - name: elastic-certificates
49 | secretName: multi-master-certs
50 | path: /usr/share/elasticsearch/config/certs
51 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/multi/data.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | clusterName: "multi"
3 | nodeGroup: "data"
4 |
5 | extraEnvs:
6 | - name: ELASTIC_PASSWORD
7 | valueFrom:
8 | secretKeyRef:
9 | name: multi-master-credentials
10 | key: password
11 | - name: xpack.security.enabled
12 | value: "true"
13 | - name: xpack.security.transport.ssl.enabled
14 | value: "true"
15 | - name: xpack.security.http.ssl.enabled
16 | value: "true"
17 | - name: xpack.security.transport.ssl.verification_mode
18 | value: "certificate"
19 | - name: xpack.security.transport.ssl.key
20 | value: "/usr/share/elasticsearch/config/certs/tls.key"
21 | - name: xpack.security.transport.ssl.certificate
22 | value: "/usr/share/elasticsearch/config/certs/tls.crt"
23 | - name: xpack.security.transport.ssl.certificate_authorities
24 | value: "/usr/share/elasticsearch/config/certs/ca.crt"
25 | - name: xpack.security.http.ssl.key
26 | value: "/usr/share/elasticsearch/config/certs/tls.key"
27 | - name: xpack.security.http.ssl.certificate
28 | value: "/usr/share/elasticsearch/config/certs/tls.crt"
29 | - name: xpack.security.http.ssl.certificate_authorities
30 | value: "/usr/share/elasticsearch/config/certs/ca.crt"
31 |
32 | roles:
33 | - data
34 | - data_content
35 | - data_hot
36 | - data_warm
37 | - data_cold
38 | - data_frozen
39 | - ingest
40 |
41 | secret:
42 | enabled: false
43 |
44 | createCert: false
45 | secretMounts:
46 | - name: elastic-certificates
47 | secretName: multi-master-certs
48 | path: /usr/share/elasticsearch/config/certs
49 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/multi/master.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | clusterName: "multi"
3 | nodeGroup: "master"
4 |
5 | roles:
6 | - master
7 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/multi/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | https://localhost:9200/_cluster/health:
3 | status: 200
4 | timeout: 2000
5 | allow-insecure: true
6 | username: elastic
7 | password: "{{ .Env.ELASTIC_PASSWORD }}"
8 | body:
9 | - "green"
10 | - '"cluster_name":"multi"'
11 | - '"number_of_nodes":9'
12 | - '"number_of_data_nodes":3'
13 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/networkpolicy/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := helm-es-networkpolicy
6 | TIMEOUT := 1200s
7 |
8 | install:
9 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
10 |
11 | test: install goss
12 |
13 | purge:
14 | helm del $(RELEASE)
15 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/networkpolicy/values.yaml:
--------------------------------------------------------------------------------
1 | networkPolicy:
2 | http:
3 | enabled: true
4 | explicitNamespacesSelector:
5 | # Accept from namespaces with all those different rules (from whitelisted Pods)
6 | matchLabels:
7 | role: frontend-http
8 | matchExpressions:
9 | - {key: role, operator: In, values: [frontend-http]}
10 | additionalRules:
11 | - podSelector:
12 | matchLabels:
13 | role: frontend-http
14 | - podSelector:
15 | matchExpressions:
16 | - key: role
17 | operator: In
18 | values:
19 | - frontend-http
20 | transport:
21 | enabled: true
22 | allowExternal: true
23 | explicitNamespacesSelector:
24 | matchLabels:
25 | role: frontend-transport
26 | matchExpressions:
27 | - {key: role, operator: In, values: [frontend-transport]}
28 | additionalRules:
29 | - podSelector:
30 | matchLabels:
31 | role: frontend-transport
32 | - podSelector:
33 | matchExpressions:
34 | - key: role
35 | operator: In
36 | values:
37 | - frontend-transport
38 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/openshift/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := elasticsearch
6 |
7 | install:
8 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
9 |
10 | test: install goss
11 |
12 | purge:
13 | helm del $(RELEASE)
14 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/openshift/README.md:
--------------------------------------------------------------------------------
1 | # OpenShift
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 cluster on [OpenShift][]
4 | using [custom values][].
5 |
6 | ## Usage
7 |
8 | * Deploy Elasticsearch chart with the default values: `make install`
9 |
10 | * You can now setup a port forward to query Elasticsearch API:
11 |
12 | ```
13 | kubectl port-forward svc/elasticsearch-master 9200
14 | curl localhost:9200/_cat/indices
15 | ```
16 |
17 | ## Testing
18 |
19 | You can also run [goss integration tests][] using `make test`
20 |
21 |
22 | [custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/values.yaml
23 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/test/goss.yaml
24 | [openshift]: https://www.openshift.com/
25 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/openshift/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | https://localhost:9200/_cluster/health:
3 | status: 200
4 | timeout: 2000
5 | username: elastic
6 | password: "{{ .Env.ELASTIC_PASSWORD }}"
7 | body:
8 | - "green"
9 | - '"number_of_nodes":3'
10 | - '"number_of_data_nodes":3'
11 |
12 | https://localhost:9200:
13 | status: 200
14 | timeout: 2000
15 | username: elastic
16 | password: "{{ .Env.ELASTIC_PASSWORD }}"
17 | body:
18 | - '"number" : "8.5.1"'
19 | - '"cluster_name" : "elasticsearch"'
20 | - "You Know, for Search"
21 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/openshift/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | securityContext:
4 | runAsUser: null
5 |
6 | podSecurityContext:
7 | fsGroup: null
8 | runAsUser: null
9 |
10 | sysctlInitContainer:
11 | enabled: false
12 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/security/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := helm-es-security
6 | ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION)
7 | TIMEOUT := 1200s
8 |
9 | install:
10 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
11 |
12 | test: secrets install goss
13 |
14 | purge:
15 | kubectl delete secrets elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true
16 | helm del $(RELEASE)
17 |
18 | pull-elasticsearch-image:
19 | docker pull $(ELASTICSEARCH_IMAGE)
20 |
21 | secrets:
22 | docker rm -f elastic-helm-charts-certs || true
23 | rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 || true
24 | docker run --name elastic-helm-charts-certs -i -w /tmp \
25 | $(ELASTICSEARCH_IMAGE) \
26 | /bin/sh -c " \
27 | elasticsearch-certutil ca --out /tmp/elastic-stack-ca.p12 --pass '' && \
28 | elasticsearch-certutil cert --name security-master --dns security-master --ca /tmp/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /tmp/elastic-certificates.p12" && \
29 | docker cp elastic-helm-charts-certs:/tmp/elastic-certificates.p12 ./ && \
30 | docker rm -f elastic-helm-charts-certs && \
31 | openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \
32 | openssl x509 -outform der -in elastic-certificate.pem -out elastic-certificate.crt && \
33 | kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \
34 | kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \
35 | kubectl create secret generic elastic-certificate-crt --from-file=elastic-certificate.crt && \
36 | rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12
37 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/security/README.md:
--------------------------------------------------------------------------------
1 | # Security
2 |
3 | This example deploy a 3 nodes Elasticsearch 8.5.1 with authentication and
4 | autogenerated certificates for TLS (see [values][]).
5 |
6 | Note that this configuration should be used for test only. For a production
7 | deployment you should generate SSL certificates following the [official docs][].
8 |
9 | ## Usage
10 |
11 | * Create the required secrets: `make secrets`
12 |
13 | * Deploy Elasticsearch chart with the default values: `make install`
14 |
15 | * You can now setup a port forward to query Elasticsearch API:
16 |
17 | ```
18 | kubectl port-forward svc/security-master 9200
19 | curl -u elastic:changeme https://localhost:9200/_cat/indices
20 | ```
21 |
22 | ## Testing
23 |
24 | You can also run [goss integration tests][] using `make test`
25 |
26 |
27 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/test/goss.yaml
28 | [official docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-tls.html#node-certificates
29 | [values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/values.yaml
30 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/security/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | https://security-master:9200/_cluster/health:
3 | status: 200
4 | timeout: 2000
5 | allow-insecure: true
6 | username: elastic
7 | password: "{{ .Env.ELASTIC_PASSWORD }}"
8 | body:
9 | - "green"
10 | - '"number_of_nodes":3'
11 | - '"number_of_data_nodes":3'
12 |
13 | https://localhost:9200/:
14 | status: 200
15 | timeout: 2000
16 | allow-insecure: true
17 | username: elastic
18 | password: "{{ .Env.ELASTIC_PASSWORD }}"
19 | body:
20 | - '"cluster_name" : "security"'
21 | - "You Know, for Search"
22 |
23 | https://localhost:9200/_license:
24 | status: 200
25 | timeout: 2000
26 | allow-insecure: true
27 | username: elastic
28 | password: "{{ .Env.ELASTIC_PASSWORD }}"
29 | body:
30 | - "active"
31 | - "basic"
32 |
33 | file:
34 | /usr/share/elasticsearch/config/elasticsearch.yml:
35 | exists: true
36 | contains:
37 | - "xpack.security.enabled: true"
38 | - "xpack.security.transport.ssl.enabled: true"
39 | - "xpack.security.transport.ssl.verification_mode: certificate"
40 | - "xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
41 | - "xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
42 | - "xpack.security.http.ssl.enabled: true"
43 | - "xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
44 | - "xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12"
45 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/security/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | clusterName: "security"
3 | nodeGroup: "master"
4 |
5 | createCert: false
6 |
7 | roles:
8 | - master
9 | - ingest
10 | - data
11 |
12 | protocol: https
13 |
14 | esConfig:
15 | elasticsearch.yml: |
16 | xpack.security.enabled: true
17 | xpack.security.transport.ssl.enabled: true
18 | xpack.security.transport.ssl.verification_mode: certificate
19 | xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
20 | xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
21 | xpack.security.http.ssl.enabled: true
22 | xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
23 | xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12
24 |
25 | secretMounts:
26 | - name: elastic-certificates
27 | secretName: elastic-certificates
28 | path: /usr/share/elasticsearch/config/certs
29 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/upgrade/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | CHART := elasticsearch
6 | RELEASE := helm-es-upgrade
7 | FROM := 7.17.1 # upgrade from versions before 7.17.1 isn't compatible with 8.x
8 |
9 | install:
10 | ../../../helpers/upgrade.sh --chart $(CHART) --release $(RELEASE) --from $(FROM)
11 | # Rolling upgrade doesn't work when upgrading from clusters with security disabled.
12 | # This is because nodes with security enabled can't join a cluster with security disabled.
13 | # Every nodes need to be recreated at the same time so they can recreate a cluster with security enabled
14 | kubectl delete pod --selector=app=upgrade-master
15 |
16 | test: install goss
17 |
18 | purge:
19 | helm del $(RELEASE)
20 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/upgrade/README.md:
--------------------------------------------------------------------------------
1 | # Upgrade
2 |
3 | This example will deploy a 3 node Elasticsearch cluster chart using an old chart
4 | version, then upgrade it.
5 |
6 |
7 | ## Usage
8 |
9 | * Deploy and upgrade Elasticsearch chart with the default values: `make install`
10 |
11 |
12 | ## Testing
13 |
14 | You can also run [goss integration tests][] using `make test`.
15 |
16 |
17 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/upgrade/test/goss.yaml
18 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/upgrade/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | https://localhost:9200/_cluster/health:
3 | status: 200
4 | username: elastic
5 | password: "{{ .Env.ELASTIC_PASSWORD }}"
6 | allow-insecure: true
7 | timeout: 2000
8 | body:
9 | - "green"
10 | - '"number_of_nodes":3'
11 | - '"number_of_data_nodes":3'
12 |
13 | https://localhost:9200:
14 | status: 200
15 | username: elastic
16 | password: "{{ .Env.ELASTIC_PASSWORD }}"
17 | allow-insecure: true
18 | timeout: 2000
19 | body:
20 | - '"number" : "8.5.1"'
21 | - '"cluster_name" : "upgrade"'
22 | - "You Know, for Search"
23 |
--------------------------------------------------------------------------------
/helm/elasticsearch/examples/upgrade/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | clusterName: upgrade
3 | # Rolling upgrade doesn't work when upgrading from clusters with security disabled.
4 | # This is because nodes with security enabled can't join a cluster with security disabled.
5 | # Every nodes need to be recreated at the same time so they can recreate a cluster with security enabled
6 | updateStrategy: OnDelete
7 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Watch all cluster members come up.
2 | $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w
3 | 2. Retrieve elastic user's password.
4 | $ kubectl get secrets --namespace={{ .Release.Namespace }} {{ template "elasticsearch.uname" . }}-credentials -ojsonpath='{.data.password}' | base64 -d
5 | {{- if .Values.tests.enabled }}
6 | 3. Test cluster health using Helm test.
7 | $ helm --namespace={{ .Release.Namespace }} test {{ .Release.Name }}
8 | {{- end -}}
9 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.esConfig }}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: {{ template "elasticsearch.uname" . }}-config
7 | labels:
8 | heritage: {{ .Release.Service | quote }}
9 | release: {{ .Release.Name | quote }}
10 | chart: "{{ .Chart.Name }}"
11 | app: "{{ template "elasticsearch.uname" . }}"
12 | data:
13 | {{- range $path, $config := .Values.esConfig }}
14 | {{ $path }}: |
15 | {{ $config | indent 4 -}}
16 | {{- end -}}
17 | {{- end -}}
18 | {{- if .Values.esJvmOptions }}
19 | ---
20 | apiVersion: v1
21 | kind: ConfigMap
22 | metadata:
23 | name: {{ template "elasticsearch.uname" . }}-jvm-options
24 | labels:
25 | heritage: {{ .Release.Service | quote }}
26 | release: {{ .Release.Name | quote }}
27 | chart: "{{ .Chart.Name }}"
28 | app: "{{ template "elasticsearch.uname" . }}"
29 | data:
30 | {{- range $path, $config := .Values.esJvmOptions }}
31 | {{ $path }}: |
32 | {{ $config | indent 4 -}}
33 | {{- end -}}
34 | {{- end -}}
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "elasticsearch.uname" . -}}
3 | {{- $httpPort := .Values.httpPort -}}
4 | {{- $pathtype := .Values.ingress.pathtype -}}
5 | {{- $ingressPath := .Values.ingress.path -}}
6 | apiVersion: networking.k8s.io/v1
7 | kind: Ingress
8 | metadata:
9 | name: {{ $fullName }}
10 | labels:
11 | app: {{ .Chart.Name }}
12 | release: {{ .Release.Name }}
13 | heritage: {{ .Release.Service }}
14 | {{- with .Values.ingress.annotations }}
15 | annotations:
16 | {{ toYaml . | indent 4 }}
17 | {{- end }}
18 | spec:
19 | {{- if .Values.ingress.className }}
20 | ingressClassName: {{ .Values.ingress.className | quote }}
21 | {{- end }}
22 | {{- if .Values.ingress.tls }}
23 | tls:
24 | {{- if .ingressPath }}
25 | {{- range .Values.ingress.tls }}
26 | - hosts:
27 | {{- range .hosts }}
28 | - {{ . }}
29 | {{- end }}
30 | secretName: {{ .secretName }}
31 | {{- end }}
32 | {{- else }}
33 | {{ toYaml .Values.ingress.tls | indent 4 }}
34 | {{- end }}
35 | {{- end}}
36 | rules:
37 | {{- range .Values.ingress.hosts }}
38 | {{- if $ingressPath }}
39 | - host: {{ . }}
40 | http:
41 | paths:
42 | - path: {{ $ingressPath }}
43 | pathType: {{ $pathtype }}
44 | backend:
45 | service:
46 | name: {{ $fullName }}
47 | port:
48 | number: {{ $httpPort }}
49 | {{- else }}
50 | - host: {{ .host }}
51 | http:
52 | paths:
53 | {{- range .paths }}
54 | - path: {{ .path }}
55 | pathType: {{ $pathtype }}
56 | backend:
57 | service:
58 | name: {{ $fullName }}
59 | port:
60 | number: {{ .servicePort | default $httpPort }}
61 | {{- end }}
62 | {{- end }}
63 | {{- end }}
64 | {{- end }}
65 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/networkpolicy.yaml:
--------------------------------------------------------------------------------
1 | {{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }}
2 | kind: NetworkPolicy
3 | apiVersion: networking.k8s.io/v1
4 | metadata:
5 | name: {{ template "elasticsearch.uname" . }}
6 | labels:
7 | heritage: {{ .Release.Service | quote }}
8 | release: {{ .Release.Name | quote }}
9 | chart: "{{ .Chart.Name }}"
10 | app: "{{ template "elasticsearch.uname" . }}"
11 | spec:
12 | podSelector:
13 | matchLabels:
14 | app: "{{ template "elasticsearch.uname" . }}"
15 | ingress: # Allow inbound connections
16 |
17 | {{- if .Values.networkPolicy.http.enabled }}
18 | # For HTTP access
19 | - ports:
20 | - port: {{ .Values.httpPort }}
21 | from:
22 | # From authorized Pods (having the correct label)
23 | - podSelector:
24 | matchLabels:
25 | {{ template "elasticsearch.uname" . }}-http-client: "true"
26 | {{- with .Values.networkPolicy.http.explicitNamespacesSelector }}
27 | # From authorized namespaces
28 | namespaceSelector:
29 | {{ toYaml . | indent 12 }}
30 | {{- end }}
31 | {{- with .Values.networkPolicy.http.additionalRules }}
32 | # Or from custom additional rules
33 | {{ toYaml . | indent 8 }}
34 | {{- end }}
35 | {{- end }}
36 |
37 | {{- if .Values.networkPolicy.transport.enabled }}
38 | # For transport access
39 | - ports:
40 | - port: {{ .Values.transportPort }}
41 | from:
42 | # From authorized Pods (having the correct label)
43 | - podSelector:
44 | matchLabels:
45 | {{ template "elasticsearch.uname" . }}-transport-client: "true"
46 | {{- with .Values.networkPolicy.transport.explicitNamespacesSelector }}
47 | # From authorized namespaces
48 | namespaceSelector:
49 | {{ toYaml . | indent 12 }}
50 | {{- end }}
51 | {{- with .Values.networkPolicy.transport.additionalRules }}
52 | # Or from custom additional rules
53 | {{ toYaml . | indent 8 }}
54 | {{- end }}
55 | # Or from other ElasticSearch Pods
56 | - podSelector:
57 | matchLabels:
58 | app: "{{ template "elasticsearch.uname" . }}"
59 | {{- end }}
60 |
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/poddisruptionbudget.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.maxUnavailable }}
2 | {{- if .Capabilities.APIVersions.Has "policy/v1" -}}
3 | apiVersion: policy/v1
4 | {{- else}}
5 | apiVersion: policy/v1beta1
6 | {{- end }}
7 | kind: PodDisruptionBudget
8 | metadata:
9 | name: "{{ template "elasticsearch.uname" . }}-pdb"
10 | spec:
11 | maxUnavailable: {{ .Values.maxUnavailable }}
12 | selector:
13 | matchLabels:
14 | app: "{{ template "elasticsearch.uname" . }}"
15 | {{- end }}
16 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/podsecuritypolicy.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.podSecurityPolicy.create -}}
2 | {{- $fullName := include "elasticsearch.uname" . -}}
3 | apiVersion: policy/v1beta1
4 | kind: PodSecurityPolicy
5 | metadata:
6 | name: {{ default $fullName .Values.podSecurityPolicy.name | quote }}
7 | labels:
8 | heritage: {{ .Release.Service | quote }}
9 | release: {{ .Release.Name | quote }}
10 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
11 | app: {{ $fullName | quote }}
12 | spec:
13 | {{ toYaml .Values.podSecurityPolicy.spec | indent 2 }}
14 | {{- end -}}
15 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/role.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create -}}
2 | {{- $fullName := include "elasticsearch.uname" . -}}
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: Role
5 | metadata:
6 | name: {{ $fullName | quote }}
7 | labels:
8 | heritage: {{ .Release.Service | quote }}
9 | release: {{ .Release.Name | quote }}
10 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
11 | app: {{ $fullName | quote }}
12 | rules:
13 | - apiGroups:
14 | - extensions
15 | resources:
16 | - podsecuritypolicies
17 | resourceNames:
18 | {{- if eq .Values.podSecurityPolicy.name "" }}
19 | - {{ $fullName | quote }}
20 | {{- else }}
21 | - {{ .Values.podSecurityPolicy.name | quote }}
22 | {{- end }}
23 | verbs:
24 | - use
25 | {{- end -}}
26 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/rolebinding.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create -}}
2 | {{- $fullName := include "elasticsearch.uname" . -}}
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: RoleBinding
5 | metadata:
6 | name: {{ $fullName | quote }}
7 | labels:
8 | heritage: {{ .Release.Service | quote }}
9 | release: {{ .Release.Name | quote }}
10 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
11 | app: {{ $fullName | quote }}
12 | subjects:
13 | - kind: ServiceAccount
14 | name: "{{ template "elasticsearch.serviceAccount" . }}"
15 | namespace: {{ .Release.Namespace | quote }}
16 | roleRef:
17 | kind: Role
18 | name: {{ $fullName | quote }}
19 | apiGroup: rbac.authorization.k8s.io
20 | {{- end -}}
21 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/secret-cert.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.createCert }}
2 | apiVersion: v1
3 | kind: Secret
4 | type: kubernetes.io/tls
5 | metadata:
6 | name: {{ template "elasticsearch.uname" . }}-certs
7 | labels:
8 | app: {{ template "elasticsearch.uname" . }}
9 | chart: "{{ .Chart.Name }}"
10 | heritage: {{ .Release.Service }}
11 | release: {{ .Release.Name }}
12 | data:
13 | {{ ( include "elasticsearch.gen-certs" . ) | indent 2 }}
14 | {{- end }}
15 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.secret.enabled -}}
2 | {{- $passwordValue := (randAlphaNum 16) | b64enc | quote }}
3 | apiVersion: v1
4 | kind: Secret
5 | metadata:
6 | name: {{ template "elasticsearch.uname" . }}-credentials
7 | labels:
8 | heritage: {{ .Release.Service | quote }}
9 | release: {{ .Release.Name | quote }}
10 | chart: "{{ .Chart.Name }}"
11 | app: "{{ template "elasticsearch.uname" . }}"
12 | {{- range $key, $value := .Values.labels }}
13 | {{ $key }}: {{ $value | quote }}
14 | {{- end }}
15 | type: Opaque
16 | data:
17 | username: {{ "elastic" | b64enc }}
18 | {{- if .Values.secret.password }}
19 | password: {{ .Values.secret.password | b64enc }}
20 | {{- else }}
21 | password: {{ $passwordValue }}
22 | {{- end }}
23 | {{- end }}
24 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.rbac.create -}}
2 | {{- $fullName := include "elasticsearch.uname" . -}}
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: "{{ template "elasticsearch.serviceAccount" . }}"
7 | annotations:
8 | {{- with .Values.rbac.serviceAccountAnnotations }}
9 | {{- toYaml . | nindent 4 }}
10 | {{- end }}
11 | labels:
12 | heritage: {{ .Release.Service | quote }}
13 | release: {{ .Release.Name | quote }}
14 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
15 | app: {{ $fullName | quote }}
16 | {{- end -}}
17 |
--------------------------------------------------------------------------------
/helm/elasticsearch/templates/test/test-elasticsearch-health.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.tests.enabled -}}
2 | ---
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | {{- if .Values.healthNameOverride }}
7 | name: {{ .Values.healthNameOverride | quote }}
8 | {{- else }}
9 | name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test"
10 | {{- end }}
11 | annotations:
12 | "helm.sh/hook": test
13 | "helm.sh/hook-delete-policy": hook-succeeded
14 | spec:
15 | securityContext:
16 | {{ toYaml .Values.podSecurityContext | indent 4 }}
17 | containers:
18 | {{- if .Values.healthNameOverride }}
19 | - name: {{ .Values.healthNameOverride | quote }}
20 | {{- else }}
21 | - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test"
22 | {{- end }}
23 | env:
24 | - name: ELASTIC_PASSWORD
25 | valueFrom:
26 | secretKeyRef:
27 | name: {{ template "elasticsearch.uname" . }}-credentials
28 | key: password
29 | image: "{{ .Values.image }}:{{ .Values.imageTag }}"
30 | imagePullPolicy: "{{ .Values.imagePullPolicy }}"
31 | command:
32 | - "sh"
33 | - "-c"
34 | - |
35 | #!/usr/bin/env bash -e
36 | curl -XGET --fail --cacert /usr/share/elasticsearch/config/certs/tls.crt -u "elastic:${ELASTIC_PASSWORD}" https://'{{ template "elasticsearch.uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}'
37 | volumeMounts:
38 | - name: elasticsearch-certs
39 | mountPath: /usr/share/elasticsearch/config/certs
40 | readOnly: true
41 | {{- if .Values.imagePullSecrets }}
42 | imagePullSecrets:
43 | {{ toYaml .Values.imagePullSecrets | indent 4 }}
44 | {{- end }}
45 | restartPolicy: Never
46 | volumes:
47 | - name: elasticsearch-certs
48 | secret:
49 | secretName: {{ template "elasticsearch.uname" . }}-certs
50 | {{- end -}}
51 |
--------------------------------------------------------------------------------
/helm/elasticsearch/values-master.yaml:
--------------------------------------------------------------------------------
1 | # 部署单节点集群足以本地开发测试
2 | replicas: 1
3 | #minimumMasterNodes: 1
4 |
5 | multiNode: false
6 |
7 | # 多个节点具有相同的角色(通用中小规模架构),单节点时不能设置角色!
8 | #roles:
9 | # - master
10 | # - data
11 | # - remote_cluster_client
12 |
13 | #image: "docker.io/library/elasticsearch"
14 | image: "elasticsearch"
15 | imageTag: "8.11.3"
16 |
17 | # 生产环境一般需要设置nodeSelector来部署到特定节点,测试时不持久化数据,所以无要求
18 | #nodeSelector:
19 | # kubernetes.io/hostname: k8s-node1
20 |
21 | #esConfig:
22 | # elasticsearch.yml: |
23 | # xpack.security.enabled: false
24 |
25 | # 测试时降低requests资源
26 | resources:
27 | requests:
28 | cpu: "100m"
29 | memory: "512Mi"
30 | limits:
31 | cpu: "1000m"
32 | memory: "2Gi"
33 |
34 | # 设置ES密码(留空自动生成)
35 | secret:
36 | enabled: true
37 | password: "123456"
38 |
39 | # 为es的transport和http通信启用ssl
40 | createCert: true
41 |
42 | # 使用http访问es(生产使用https,这里是从简)
43 | protocol: https
44 |
45 | # 若要将pod调度到master,需要设置容忍度
46 | #tolerations:
47 | # - key: node-role.kubernetes.io/control-plane
48 | # effect: NoSchedule
49 |
--------------------------------------------------------------------------------
/helm/example-chart/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/helm/example-chart/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: example-chart
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.1.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.1"
25 |
--------------------------------------------------------------------------------
/helm/example-chart/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Get the application URL by running these commands:
2 | {{- if .Values.ingress.enabled }}
3 | {{- range $host := .Values.ingress.hosts }}
4 | {{- range .paths }}
5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
6 | {{- end }}
7 | {{- end }}
8 | {{- else if contains "NodePort" .Values.service.type }}
9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "example-chart.fullname" . }})
10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
11 | echo http://$NODE_IP:$NODE_PORT
12 | {{- else if contains "LoadBalancer" .Values.service.type }}
13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available.
14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "example-chart.fullname" . }}'
15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "example-chart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
16 | echo http://$SERVICE_IP:{{ .Values.service.port }}
17 | {{- else if contains "ClusterIP" .Values.service.type }}
18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "example-chart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
20 | echo "Visit http://127.0.0.1:8080 to use your application"
21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/helm/example-chart/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "example-chart.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "example-chart.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "example-chart.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "example-chart.labels" -}}
37 | helm.sh/chart: {{ include "example-chart.chart" . }}
38 | {{ include "example-chart.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "example-chart.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "example-chart.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "example-chart.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "example-chart.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/helm/example-chart/templates/hpa.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.autoscaling.enabled }}
2 | apiVersion: autoscaling/v2
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: {{ include "example-chart.fullname" . }}
6 | labels:
7 | {{- include "example-chart.labels" . | nindent 4 }}
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: {{ include "example-chart.fullname" . }}
13 | minReplicas: {{ .Values.autoscaling.minReplicas }}
14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }}
15 | metrics:
16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | target:
21 | type: Utilization
22 | averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
23 | {{- end }}
24 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
25 | - type: Resource
26 | resource:
27 | name: memory
28 | target:
29 | type: Utilization
30 | averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
31 | {{- end }}
32 | {{- end }}
33 |
--------------------------------------------------------------------------------
/helm/example-chart/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "example-chart.fullname" . }}
5 | labels:
6 | {{- include "example-chart.labels" . | nindent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | {{- include "example-chart.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/helm/example-chart/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "example-chart.serviceAccountName" . }}
6 | labels:
7 | {{- include "example-chart.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
13 | {{- end }}
14 |
--------------------------------------------------------------------------------
/helm/example-chart/templates/tests/test-connection.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ include "example-chart.fullname" . }}-test-connection"
5 | labels:
6 | {{- include "example-chart.labels" . | nindent 4 }}
7 | annotations:
8 | "helm.sh/hook": test
9 | spec:
10 | containers:
11 | - name: wget
12 | image: busybox
13 | command: ['wget']
14 | args: ['{{ include "example-chart.fullname" . }}:{{ .Values.service.port }}']
15 | restartPolicy: Never
16 |
--------------------------------------------------------------------------------
/helm/kibana/.helmignore:
--------------------------------------------------------------------------------
1 | tests/
2 | .pytest_cache/
3 |
--------------------------------------------------------------------------------
/helm/kibana/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | appVersion: 8.5.1
3 | description: Official Elastic helm chart for Kibana
4 | home: https://github.com/elastic/helm-charts
5 | icon: https://helm.elastic.co/icons/kibana.png
6 | maintainers:
7 | - email: helm-charts@elastic.co
8 | name: Elastic
9 | name: kibana
10 | sources:
11 | - https://github.com/elastic/kibana
12 | version: 8.5.1
13 |
--------------------------------------------------------------------------------
/helm/kibana/Makefile:
--------------------------------------------------------------------------------
1 | include ../helpers/common.mk
2 |
--------------------------------------------------------------------------------
/helm/kibana/examples/default/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := helm-kibana-default
6 |
7 | install:
8 | helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../
9 |
10 | test: install goss
11 |
12 | purge:
13 | helm del $(RELEASE)
14 |
--------------------------------------------------------------------------------
/helm/kibana/examples/default/README.md:
--------------------------------------------------------------------------------
1 | # Default
2 |
3 | This example deploy Kibana 8.5.1 using [default values][].
4 |
5 |
6 | ## Usage
7 |
8 | * Deploy [Elasticsearch Helm chart][].
9 |
10 | * Deploy Kibana chart with the default values: `make install`
11 |
12 | * You can now retrieve the `elastic` user password and setup a port forward to connect Kibana:
13 |
14 | ```
15 | # Get elastic user password:
16 | kubectl get secrets --namespace=default elasticsearch-master-credentials -ojsonpath='{.data.password}' | base64 -d
17 | # Setup port forward
18 | kubectl port-forward svc/helm-kibana-default-kibana 5601
19 | ```
20 |
21 |
22 | ## Testing
23 |
24 | You can also run [goss integration tests][] using `make test`
25 |
26 |
27 | [elasticsearch helm chart]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/default/
28 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/kibana/examples/default/test/goss.yaml
29 | [default values]: https://github.com/elastic/helm-charts/tree/main/kibana/values.yaml
30 |
--------------------------------------------------------------------------------
/helm/kibana/examples/default/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | http://localhost:5601/api/status:
3 | status: 200
4 | timeout: 2000
5 | request-headers:
6 | - "Authorization: Bearer {{ .Env.ELASTICSEARCH_SERVICEACCOUNTTOKEN}}"
7 | body:
8 | - '"number":"8.5.1"'
9 |
10 | http://localhost:5601/app/kibana:
11 | status: 200
12 | timeout: 2000
13 |
14 | http://helm-kibana-default-kibana:5601/app/kibana:
15 | status: 200
16 | timeout: 2000
17 |
--------------------------------------------------------------------------------
/helm/kibana/examples/openshift/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 | include ../../../helpers/examples.mk
3 |
4 | RELEASE := kibana
5 |
6 | template:
7 | helm template --values values.yaml ../../
8 |
9 | install:
10 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
11 |
12 | test: install goss
13 |
14 | purge:
15 | helm del $(RELEASE)
16 |
--------------------------------------------------------------------------------
/helm/kibana/examples/openshift/README.md:
--------------------------------------------------------------------------------
1 | # OpenShift
2 |
3 | This example deploy Kibana 8.5.1 on [OpenShift][] using [custom values][].
4 |
5 | ## Usage
6 |
7 | * Deploy [Elasticsearch Helm chart][].
8 |
9 | * Deploy Kibana chart with the default values: `make install`
10 |
11 | * You can now setup a port forward to query Elasticsearch API:
12 |
13 | ```
14 | kubectl port-forward svc/elasticsearch-master 9200
15 | curl localhost:9200/_cat/indices
16 | ```
17 |
18 | ## Testing
19 |
20 | You can also run [goss integration tests][] using `make test`
21 |
22 |
23 | [custom values]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/values.yaml
24 | [elasticsearch helm chart]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/
25 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/openshift/test/goss.yaml
26 | [openshift]: https://www.openshift.com/
27 |
--------------------------------------------------------------------------------
/helm/kibana/examples/openshift/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | http://localhost:5601/app/kibana:
3 | status: 200
4 | timeout: 2000
5 |
--------------------------------------------------------------------------------
/helm/kibana/examples/openshift/values.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | podSecurityContext:
4 | fsGroup: null
5 |
6 | securityContext:
7 | runAsUser: null
8 |
--------------------------------------------------------------------------------
/helm/kibana/examples/security/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | RELEASE := helm-kibana-security
6 |
7 | install:
8 | helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../
9 |
10 | test: secrets install goss
11 |
12 | purge:
13 | kubectl delete secret kibana || true
14 | helm del $(RELEASE)
15 |
16 | secrets:
17 | encryptionkey=$$(docker run --rm docker.elastic.co/kibana/kibana:$(STACK_VERSION) /bin/sh -c "< /dev/urandom tr -dc _A-Za-z0-9 | head -c50") && \
18 | kubectl create secret generic kibana --from-literal=encryptionkey=$$encryptionkey
19 |
--------------------------------------------------------------------------------
/helm/kibana/examples/security/README.md:
--------------------------------------------------------------------------------
1 | # Security
2 |
3 | This example deploy Kibana 8.5.1 using authentication and TLS to connect to
4 | Elasticsearch (see [values][]).
5 |
6 |
7 | ## Usage
8 |
9 | * Deploy [Elasticsearch Helm chart][].
10 |
11 | * Deploy Kibana chart with security: `make secrets install`
12 |
13 | * You can now retrieve the `elastic` user password and setup a port forward to connect Kibana:
14 |
15 | ```
16 | # Get elastic user password:
17 | kubectl get secrets --namespace=default security-master-credentials -ojsonpath='{.data.password}' | base64 -d
18 | # Setup port forward
19 | kubectl port-forward svc/helm-kibana-security-kibana 5601
20 | ```
21 |
22 |
23 | ## Testing
24 |
25 | You can also run [goss integration tests][] using `make test`
26 |
27 |
28 | [elasticsearch helm chart]: https://github.com/elastic/helm-charts/tree/main/elasticsearch/examples/security/
29 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/kibana/examples/security/test/goss.yaml
30 | [values]: https://github.com/elastic/helm-charts/tree/main/kibana/examples/security/values.yaml
31 |
--------------------------------------------------------------------------------
/helm/kibana/examples/security/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | https://localhost:5601/api/status:
3 | status: 200
4 | timeout: 2000
5 | allow-insecure: true
6 | request-headers:
7 | - "Authorization: Bearer {{ .Env.ELASTICSEARCH_SERVICEACCOUNTTOKEN}}"
8 | body:
9 | - '"number":"8.5.1"'
10 |
11 | file:
12 | /usr/share/kibana/config/kibana.yml:
13 | exists: true
14 | contains:
15 | - "server.ssl.certificate: /usr/share/kibana/config/certs/elastic-certificate.pem"
16 | - "server.ssl.enabled: true"
17 | - "server.ssl.key: /usr/share/kibana/config/certs/elastic-certificate.pem"
18 | - "xpack.security.encryptionKey:"
19 |
--------------------------------------------------------------------------------
/helm/kibana/examples/security/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | elasticsearchHosts: "https://security-master:9200"
3 | elasticsearchCertificateSecret: elastic-certificate-pem
4 | elasticsearchCertificateAuthoritiesFile: elastic-certificate.pem
5 | elasticsearchCredentialSecret: security-master-credentials
6 |
7 | extraEnvs:
8 | - name: "KIBANA_ENCRYPTION_KEY"
9 | valueFrom:
10 | secretKeyRef:
11 | name: kibana
12 | key: encryptionkey
13 |
14 | kibanaConfig:
15 | kibana.yml: |
16 | server.ssl.certificate: /usr/share/kibana/config/certs/elastic-certificate.pem
17 | server.ssl.enabled: true
18 | server.ssl.key: /usr/share/kibana/config/certs/elastic-certificate.pem
19 | xpack.security.encryptionKey: ${KIBANA_ENCRYPTION_KEY}
20 |
21 | protocol: https
22 |
--------------------------------------------------------------------------------
/helm/kibana/examples/upgrade/Makefile:
--------------------------------------------------------------------------------
1 | default: test
2 |
3 | include ../../../helpers/examples.mk
4 |
5 | CHART := kibana
6 | ES_CLUSTER := kibana-upgrade
7 | ES_RELEASE := helm-kibana-upgrade-es
8 | RELEASE := helm-kibana-upgrade-kb
9 | FROM := 7.17.1
10 |
11 | repo:
12 | helm repo add elastic https://helm.elastic.co
13 |
14 | install-es:
15 | helm install $(ES_RELEASE) elastic/elasticsearch --wait --version $(FROM) --set clusterName=$(ES_CLUSTER)
16 |
17 | install-kb:
18 | helm install $(RELEASE) elastic/kibana --wait --version $(FROM) --set elasticsearchHosts="http://$(ES_CLUSTER)-master:9200"
19 |
20 | upgrade-es:
21 | helm upgrade $(ES_RELEASE) ../../../elasticsearch/ --wait --set clusterName=$(ES_CLUSTER) --set updateStrategy=OnDelete
22 | # Rollout ES pods
23 | kubectl delete pod --selector=app=$(ES_CLUSTER)-master
24 |
25 | upgrade-kb:
26 | helm upgrade $(RELEASE) ../../ --wait --values values.yaml
27 | kubectl rollout status deployment $(RELEASE)-kibana
28 |
29 | install: repo install-es install-kb upgrade-es upgrade-kb
30 |
31 | test: install goss
32 |
33 | purge:
34 | helm delete $(RELEASE)
35 | helm delete $(ES_RELEASE)
36 | kubectl delete $$(kubectl get pvc -o name | grep $(ES_CLUSTER))
37 |
--------------------------------------------------------------------------------
/helm/kibana/examples/upgrade/README.md:
--------------------------------------------------------------------------------
1 | # Upgrade
2 |
3 | This example will deploy Kibana chart using an old chart version,
4 | then upgrade it.
5 |
6 |
7 | ## Usage
8 |
9 | * Add the Elastic Helm charts repo: `helm repo add elastic https://helm.elastic.co`
10 |
11 | * Deploy [Elasticsearch Helm chart][]: `helm install elasticsearch elastic/elasticsearch`
12 |
13 | * Deploy and upgrade Kibana chart with the default values: `make install`
14 |
15 |
16 | ## Testing
17 |
18 | You can also run [goss integration tests][] using `make test`.
19 |
20 |
21 | [goss integration tests]: https://github.com/elastic/helm-charts/tree/main/kibana/examples/upgrade/test/goss.yaml
22 |
--------------------------------------------------------------------------------
/helm/kibana/examples/upgrade/test/goss.yaml:
--------------------------------------------------------------------------------
1 | http:
2 | http://localhost:5601/api/status:
3 | status: 200
4 | timeout: 2000
5 | request-headers:
6 | - "Authorization: Bearer {{ .Env.ELASTICSEARCH_SERVICEACCOUNTTOKEN}}"
7 | body:
8 | - '"number":"8.5.1"'
9 |
10 | http://localhost:5601/app/kibana:
11 | status: 200
12 | timeout: 2000
13 |
14 | http://helm-kibana-upgrade-kb-kibana:5601/app/kibana:
15 | status: 200
16 | timeout: 2000
17 |
--------------------------------------------------------------------------------
/helm/kibana/examples/upgrade/values.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | elasticsearchHosts: "https://kibana-upgrade-master:9200"
3 | elasticsearchCertificateSecret: kibana-upgrade-master-certs
4 | elasticsearchCertificateAuthoritiesFile: ca.crt
5 | elasticsearchCredentialSecret: kibana-upgrade-master-credentials
6 |
--------------------------------------------------------------------------------
/helm/kibana/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Watch all containers come up.
2 | $ kubectl get pods --namespace={{ .Release.Namespace }} -l release={{ .Release.Name }} -w
3 | 2. Retrieve the elastic user's password.
4 | $ kubectl get secrets --namespace={{ .Release.Namespace }} {{ .Values.elasticsearchCredentialSecret }} -ojsonpath='{.data.password}' | base64 -d
5 | 3. Retrieve the kibana service account token.
6 | $ kubectl get secrets --namespace={{ .Release.Namespace }} {{ template "kibana.fullname" . }}-es-token -ojsonpath='{.data.token}' | base64 -d
7 |
--------------------------------------------------------------------------------
/helm/kibana/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "kibana.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | */}}
13 | {{- define "kibana.fullname" -}}
14 | {{- if .Values.fullnameOverride -}}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
16 | {{- else -}}
17 | {{- $name := default .Release.Name .Values.nameOverride -}}
18 | {{- printf "%s-%s" $name .Chart.Name | trunc 63 | trimSuffix "-" -}}
19 | {{- end -}}
20 | {{- end -}}
21 |
22 | {{/*
23 | Common labels
24 | */}}
25 | {{- define "kibana.labels" -}}
26 | app: {{ .Chart.Name }}
27 | release: {{ .Release.Name | quote }}
28 | heritage: {{ .Release.Service }}
29 | {{- if .Values.labels }}
30 | {{ toYaml .Values.labels }}
31 | {{- end }}
32 | {{- end -}}
33 |
34 | {{- define "kibana.home_dir" -}}
35 | /usr/share/kibana
36 | {{- end -}}
37 |
--------------------------------------------------------------------------------
/helm/kibana/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.kibanaConfig }}
2 | ---
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: {{ template "kibana.fullname" . }}-config
7 | labels: {{ include "kibana.labels" . | nindent 4 }}
8 | data:
9 | {{- range $path, $config := .Values.kibanaConfig }}
10 | {{ $path }}: |
11 | {{ tpl $config $ | indent 4 -}}
12 | {{- end -}}
13 | {{- end -}}
14 |
--------------------------------------------------------------------------------
/helm/kibana/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "kibana.fullname" . -}}
3 | {{- $httpPort := .Values.httpPort -}}
4 | {{- $pathtype := .Values.ingress.pathtype -}}
5 | {{- $ingressPath := .Values.ingress.path -}}
6 | apiVersion: networking.k8s.io/v1
7 | kind: Ingress
8 | metadata:
9 | name: {{ $fullName }}
10 | labels: {{ include "kibana.labels" . | nindent 4 }}
11 | {{- with .Values.ingress.annotations }}
12 | annotations:
13 | {{ toYaml . | indent 4 }}
14 | {{- end }}
15 | spec:
16 | {{- if .Values.ingress.className }}
17 | ingressClassName: {{ .Values.ingress.className | quote }}
18 | {{- end }}
19 | {{- if .Values.ingress.tls }}
20 | tls:
21 | {{- if .ingressPath }}
22 | {{- range .Values.ingress.tls }}
23 | - hosts:
24 | {{- range .hosts }}
25 | - {{ . }}
26 | {{- end }}
27 | secretName: {{ .secretName }}
28 | {{- end }}
29 | {{- else }}
30 | {{ toYaml .Values.ingress.tls | indent 4 }}
31 | {{- end }}
32 | {{- end }}
33 | rules:
34 | {{- range .Values.ingress.hosts }}
35 | {{- if $ingressPath }}
36 | - host: {{ . }}
37 | http:
38 | paths:
39 | - path: {{ $ingressPath }}
40 | pathType: {{ $pathtype }}
41 | backend:
42 | service:
43 | name: {{ $fullName }}
44 | port:
45 | number: {{ $httpPort }}
46 | {{- else }}
47 | - host: {{ .host }}
48 | http:
49 | paths:
50 | {{- range .paths }}
51 | - path: {{ .path }}
52 | pathType: {{ $pathtype }}
53 | backend:
54 | service:
55 | name: {{ $fullName }}
56 | port:
57 | number: {{ .servicePort | default $httpPort }}
58 | {{- end }}
59 | {{- end }}
60 | {{- end }}
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/helm/kibana/templates/post-delete-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | name: post-delete-{{ template "kibana.fullname" . }}
5 | labels: {{ include "kibana.labels" . | nindent 4 }}
6 | annotations:
7 | "helm.sh/hook": post-delete
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | {{- if .Values.annotations }}
10 | {{- range $key, $value := .Values.annotations }}
11 | {{ $key }}: {{ $value | quote }}
12 | {{- end }}
13 | {{- end }}
14 | rules:
15 | - apiGroups:
16 | - ""
17 | resources:
18 | - secrets
19 | verbs:
20 | - delete
21 |
--------------------------------------------------------------------------------
/helm/kibana/templates/post-delete-rolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: post-delete-{{ template "kibana.fullname" . }}
5 | labels: {{ include "kibana.labels" . | nindent 4 }}
6 | annotations:
7 | "helm.sh/hook": post-delete
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | {{- if .Values.annotations }}
10 | {{- range $key, $value := .Values.annotations }}
11 | {{ $key }}: {{ $value | quote }}
12 | {{- end }}
13 | {{- end }}
14 | subjects:
15 | - kind: ServiceAccount
16 | name: post-delete-{{ template "kibana.fullname" . }}
17 | namespace: {{ .Release.Namespace | quote }}
18 | roleRef:
19 | kind: Role
20 | name: post-delete-{{ template "kibana.fullname" . }}
21 | apiGroup: rbac.authorization.k8s.io
22 |
--------------------------------------------------------------------------------
/helm/kibana/templates/post-delete-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: post-delete-{{ template "kibana.fullname" . }}
5 | labels: {{ include "kibana.labels" . | nindent 4 }}
6 | annotations:
7 | "helm.sh/hook": post-delete
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | {{- if .Values.annotations }}
10 | {{- range $key, $value := .Values.annotations }}
11 | {{ $key }}: {{ $value | quote }}
12 | {{- end }}
13 | {{- end }}
14 |
--------------------------------------------------------------------------------
/helm/kibana/templates/pre-install-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | name: pre-install-{{ template "kibana.fullname" . }}
5 | labels: {{ include "kibana.labels" . | nindent 4 }}
6 | annotations:
7 | "helm.sh/hook": pre-install,pre-upgrade
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | {{- if .Values.annotations }}
10 | {{- range $key, $value := .Values.annotations }}
11 | {{ $key }}: {{ $value | quote }}
12 | {{- end }}
13 | {{- end }}
14 | rules:
15 | - apiGroups:
16 | - ""
17 | resources:
18 | - secrets
19 | verbs:
20 | - create
21 | - update
22 |
--------------------------------------------------------------------------------
/helm/kibana/templates/pre-install-rolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: pre-install-{{ template "kibana.fullname" . }}
5 | labels: {{ include "kibana.labels" . | nindent 4 }}
6 | annotations:
7 | "helm.sh/hook": pre-install,pre-upgrade
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | {{- if .Values.annotations }}
10 | {{- range $key, $value := .Values.annotations }}
11 | {{ $key }}: {{ $value | quote }}
12 | {{- end }}
13 | {{- end }}
14 | subjects:
15 | - kind: ServiceAccount
16 | name: pre-install-{{ template "kibana.fullname" . }}
17 | namespace: {{ .Release.Namespace | quote }}
18 | roleRef:
19 | kind: Role
20 | name: pre-install-{{ template "kibana.fullname" . }}
21 | apiGroup: rbac.authorization.k8s.io
22 |
--------------------------------------------------------------------------------
/helm/kibana/templates/pre-install-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: pre-install-{{ template "kibana.fullname" . }}
5 | labels: {{ include "kibana.labels" . | nindent 4 }}
6 | annotations:
7 | "helm.sh/hook": pre-install,pre-upgrade
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | {{- if .Values.annotations }}
10 | {{- range $key, $value := .Values.annotations }}
11 | {{ $key }}: {{ $value | quote }}
12 | {{- end }}
13 | {{- end }}
14 |
--------------------------------------------------------------------------------
/helm/kibana/templates/service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: {{ template "kibana.fullname" . }}
6 | labels: {{ include "kibana.labels" . | nindent 4 }}
7 | {{- if .Values.service.labels }}
8 | {{ toYaml .Values.service.labels | indent 4}}
9 | {{- end }}
10 | {{- with .Values.service.annotations }}
11 | annotations:
12 | {{ toYaml . | indent 4 }}
13 | {{- end }}
14 | spec:
15 | type: {{ .Values.service.type }}
16 | {{- if .Values.service.loadBalancerIP }}
17 | loadBalancerIP: {{ .Values.service.loadBalancerIP }}
18 | {{- end }}
19 | {{- with .Values.service.loadBalancerSourceRanges }}
20 | loadBalancerSourceRanges:
21 | {{ toYaml . | indent 4 }}
22 | {{- end }}
23 | ports:
24 | - port: {{ .Values.service.port }}
25 | {{- if .Values.service.nodePort }}
26 | nodePort: {{ .Values.service.nodePort }}
27 | {{- end }}
28 | protocol: TCP
29 | name: {{ .Values.service.httpPortName | default "http" }}
30 | targetPort: {{ .Values.httpPort }}
31 | selector:
32 | app: {{ .Chart.Name }}
33 | release: {{ .Release.Name | quote }}
34 |
--------------------------------------------------------------------------------
/helm/kibana/values-deploy.yaml:
--------------------------------------------------------------------------------
1 | elasticsearchHosts: "https://elasticsearch-master:9200"
2 | elasticsearchCertificateSecret: elasticsearch-master-certs
3 | elasticsearchCertificateAuthoritiesFile: ca.crt
4 | elasticsearchCredentialSecret: elasticsearch-master-credentials
5 |
6 | # 笔者测试环境内存不足,所以降低一些资源消耗
7 | resources:
8 | requests:
9 | cpu: "100m"
10 | memory: "200Mi"
11 | limits:
12 | cpu: "1000m"
13 | memory: "2Gi"
14 |
15 | # 若docker官方仓库以便加速下载
16 | image: "kibana"
17 | imageTag: "8.11.3"
18 |
19 |
20 | # web端口,默认5601
21 | httpPort: 5601
22 |
23 | # 设置 kibana 为中文
24 | kibanaConfig:
25 | kibana.yml: |
26 | i18n.locale: "zh-CN"
27 |
28 | # 为了方便测试(跳过ingress),service使用NodePort类型
29 | service:
30 | type: NodePort
31 | loadBalancerIP: ""
32 | port: 5601
33 | nodePort: 30080
34 |
--------------------------------------------------------------------------------
/hpa_nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: nginx-hpa-test
5 | spec:
6 | # 绑定需要自动扩缩的资源对象,如deployment/statefulset等
7 | scaleTargetRef:
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | name: nginx-hpa-test
11 | # 设置扩缩时的最大最小副本数
12 | minReplicas: 1
13 | maxReplicas: 5
14 | # 设置扩缩参考的指标(可设置多个指标)
15 | # 这里的指标表示期望的Pod总负载的平均CPU利用率为50%左右,若超出则考虑增加Pod数量,若不足则考虑减少Pod数量
16 | metrics:
17 | - type: Resource
18 | resource:
19 | name: cpu # 或 memory(注意需要为管理的Deployment对象下的Pod设置requests.cpu或memory限制,否则hpa会报错)
20 | target:
21 | type: Utilization # 另一个可用值是AverageValue,表示一个具体值
22 | averageUtilization: 50
23 | # averageValue: 200m # 当 target.type=AverageValue时 需要使用此字段,值形式为 带m后缀或单纯的数字,前者是千分单位,比如1m=1000。当资源类型为memory时,这里一般显示纯数字,单位是byte
24 | # - type: ContainerResource # K8s 1.27 版本支持对容器级别对象的监控策略
25 | # containerResource:
26 | # name: cpu
27 | # container: application
28 | # target:
29 | # type: Utilization
30 | # averageUtilization: 60
--------------------------------------------------------------------------------
/hpa_nginx_behavior.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: nginx-hpa-behavior-test
5 | spec:
6 | # 绑定需要自动扩缩的资源对象,如deployment/statefulset等
7 | scaleTargetRef:
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | name: nginx-hpa-test
11 | # 设置扩缩时的最大最小副本数
12 | minReplicas: 1
13 | maxReplicas: 5
14 | # 设置扩缩参考的指标(可设置多个指标)
15 | # 这里的指标表示期望的Pod总负载的平均CPU利用率为50%左右,若超出则考虑增加Pod数量,若不足则考虑减少Pod数量
16 | metrics:
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | target:
21 | type: Utilization
22 | averageUtilization: 50
23 | behavior: # 此字段用来进一步控制扩缩时的行为,可选
24 | scaleDown: # scaleDown控制缩容时的行为
25 | policies: # 如果存在多个策略,则默认选择【最大更改量】的策略
26 | # 第一个策略表示在60s内最多缩容4个Pod副本
27 | - type: Pods
28 | value: 4
29 | periodSeconds: 60 # 最大1800(半小时)
30 | # 第二个策略表示在60s内最多缩容当前副本个数的10%
31 | - type: Percent
32 | value: 10
33 | periodSeconds: 60
34 | selectPolicy: Disabled # 表示禁用scaleDown策略(也不会使用默认,完全禁用缩容行为)。其他可选值:Max, Min,分别表示使用策略组中【每次扩缩时对每个策略计算后所得副本数】最大/最小的策略
35 | # 此字段表示(缩容时的)稳定窗口时间(秒)
36 | # 当用于扩缩的指标不断波动时,此字段(稳定窗口)用于限制Pod副本数的变动(即避免短时间内频繁创建/删除Pod)。默认300(5min)
37 | # 默认值可通过API服务的 --horizontal-pod-autoscaler-downscale-stabilization 参数进行修改
38 | stabilizationWindowSeconds: 10
39 | scaleUp: # scaleUp控制扩容时的行为
40 | policies:
41 | - periodSeconds: 15
42 | type: Pods
43 | value: 4
44 | - periodSeconds: 15
45 | type: Percent
46 | value: 100
47 | selectPolicy: Max
48 | stabilizationWindowSeconds: 10
49 |
--------------------------------------------------------------------------------
/hpa_nginx_behavior_default.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: nginx-hpa-behavior-default-test
5 | spec:
6 | # 绑定需要自动扩缩的资源对象,如deployment/statefulset等
7 | scaleTargetRef:
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | name: nginx-hpa-test
11 | # 设置扩缩时的最大最小副本数
12 | minReplicas: 1
13 | maxReplicas: 5
14 | # 设置扩缩参考的指标(可设置多个指标)
15 | # 这里的指标表示期望的Pod总负载的平均CPU利用率为50%左右,若超出则考虑增加Pod数量,若不足则考虑减少Pod数量
16 | metrics:
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | target:
21 | type: Utilization
22 | averageUtilization: 50
23 | behavior: # 以下是K8s默认的扩缩行为配置
24 | scaleDown:
25 | stabilizationWindowSeconds: 300
26 | policies:
27 | - type: Percent
28 | value: 100
29 | periodSeconds: 15
30 | scaleUp:
31 | stabilizationWindowSeconds: 0
32 | policies:
33 | - type: Percent
34 | value: 100
35 | periodSeconds: 15
36 | - type: Pods
37 | value: 4
38 | periodSeconds: 15
39 | selectPolicy: Max
--------------------------------------------------------------------------------
/img/es_recv_filebeat_data.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/es_recv_filebeat_data.jpg
--------------------------------------------------------------------------------
/img/filebeat.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/filebeat.png
--------------------------------------------------------------------------------
/img/filebeat_log.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/filebeat_log.jpg
--------------------------------------------------------------------------------
/img/istio-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/istio-architecture.png
--------------------------------------------------------------------------------
/img/k8s-arch.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/k8s-arch.webp
--------------------------------------------------------------------------------
/img/k8s-loadbalancer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/k8s-loadbalancer.png
--------------------------------------------------------------------------------
/img/k8s-object-model.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/k8s-object-model.jpg
--------------------------------------------------------------------------------
/img/k9s.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/k9s.png
--------------------------------------------------------------------------------
/img/kibana-backing-index.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-backing-index.jpg
--------------------------------------------------------------------------------
/img/kibana-check-json.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-check-json.jpg
--------------------------------------------------------------------------------
/img/kibana-dataview-use-guide.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-dataview-use-guide.jpg
--------------------------------------------------------------------------------
/img/kibana-dataview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-dataview.jpg
--------------------------------------------------------------------------------
/img/kibana-discover.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-discover.jpg
--------------------------------------------------------------------------------
/img/kibana-ds-index-detail.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-ds-index-detail.jpg
--------------------------------------------------------------------------------
/img/kibana-ds.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-ds.jpg
--------------------------------------------------------------------------------
/img/kibana-new-dataview.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/kibana-new-dataview.jpg
--------------------------------------------------------------------------------
/img/prometheus_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/prometheus_architecture.png
--------------------------------------------------------------------------------
/img/service-mesh.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/service-mesh.jpg
--------------------------------------------------------------------------------
/img/toc.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chaseSpace/k8s-tutorial-cn/d18d86d82c96ab92914bc8aaa3c58bf44eda6a15/img/toc.jpg
--------------------------------------------------------------------------------
/ingress-hellok8s-cert.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: hellok8s-ingress
5 | annotations: # 键值对形式的注解 可以进一步配置控制器的行为,比如rewrite路由、强制https等
6 | # nginx ingress提供的注解选项:https://kubernetes.github.io/ingress-nginx/examples/rewrite/#deployment
7 | nginx.ingress.kubernetes.io/ssl-redirect: "false" # 禁用https重定向到http,若ingress配置了证书则默认true
8 | spec:
9 | ingressClassName: nginx # 指定ingress控制器
10 | tls:
11 | - hosts:
12 | - hellok8s.foo.com
13 | secretName: hellok8s-tls # 引用 secret-hellok8s-cert.yaml 中配置的secret name
14 | rules:
15 | - host: hellok8s.foo.com
16 | - http:
17 | paths:
18 | - path: /hello
19 | pathType: Prefix
20 | backend:
21 | service:
22 | name: service-hellok8s-clusterip
23 | port:
24 | number: 3000
25 |
--------------------------------------------------------------------------------
/ingress-hellok8s-defaultbackend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: hellok8s-ingress
5 | spec:
6 | ingressClassName: nginx # 指定ingress控制器
7 | defaultBackend: # 【默认后端】接收那些 没有被任何规则匹配的流量
8 | service:
9 | name: service-hellok8s-clusterip
10 | port:
11 | number: 3000
12 | rules:
13 | - http:
14 | paths:
15 | - path: /hello
16 | pathType: Prefix
17 | backend:
18 | service:
19 | name: service-hellok8s-clusterip
20 | port:
21 | number: 3000
--------------------------------------------------------------------------------
/ingress-hellok8s-host.yaml:
--------------------------------------------------------------------------------
1 | #$ echo '127.0.0.1 hellok8s.foo.com' >> /etc/hosts
2 | #$ curl hellok8s.foo.com:31504/hello
3 | # [v3] Hello, Kubernetes!, From host: hellok8s-go-http-6df8b5c5d7-ll82f
4 | apiVersion: networking.k8s.io/v1
5 | kind: Ingress
6 | metadata:
7 | name: hellok8s-ingress
8 | spec:
9 | ingressClassName: nginx # 指定ingress控制器
10 | rules:
11 | - host: hellok8s.foo.com # 一旦配置了host,就不能再通过IP访问(会得到404结果)
12 | http:
13 | paths:
14 | - path: /hello
15 | pathType: Prefix
16 | backend:
17 | service:
18 | name: service-hellok8s-clusterip
19 | port:
20 | number: 3000
--------------------------------------------------------------------------------
/ingress-hellok8s-regex.yaml:
--------------------------------------------------------------------------------
1 | # $ curl 127.0.0.1:31504/hello/now_time
2 | # [v3] Hello, Kubernetes!, now time: 2023-10-29 14:42:58.419522481 +0800 CST m=+36.879122145
3 | # $ curl 127.0.0.1:31504/hello/
4 | # [v3] Hello, Kubernetes!, From host: hellok8s-go-http-6df8b5c5d7-ll82f
5 |
6 | apiVersion: networking.k8s.io/v1
7 | kind: Ingress
8 | metadata:
9 | name: hellok8s-ingress
10 | annotations: # 键值对形式的注解 可以进一步配置控制器的行为,比如rewrite路由、强制https等
11 | # nginx ingress提供的注解选项:https://kubernetes.github.io/ingress-nginx/examples/rewrite/#deployment
12 | # nginx.ingress.kubernetes.io/ssl-redirect: "false" # 禁用https重定向到http,若ingress配置了证书则默认true
13 |
14 | # 若要路径支持正则匹配,需要配置下面两个注解
15 | nginx.ingress.kubernetes.io/use-regex: "true"
16 | nginx.ingress.kubernetes.io/rewrite-target: /$1 # 可选,若路径被带括号的正则表达式匹配,则转发后的路径为 第一个路径分组,若不匹配,则不重写
17 | spec:
18 | ingressClassName: nginx # 指定ingress控制器
19 | rules:
20 | - http:
21 | paths:
22 | - path: /hello/(.*) # 正则匹配,括号内的内容为一个分组
23 | pathType: Prefix
24 | backend:
25 | service:
26 | name: service-hellok8s-clusterip
27 | port:
28 | number: 3000
--------------------------------------------------------------------------------
/ingress-hellok8s.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: hellok8s-ingress
5 | annotations: # 通过注解来进一步控制nginx控制器的行为,参考 https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/
6 | nginx.ingress.kubernetes.io/rewrite-target: / # 匹配路由后,重写为 /; 比如 /hello 重写为 /
7 | spec:
8 | ingressClassName: nginx # 指定ingress控制器
9 | rules:
10 | - http:
11 | paths:
12 | - path: /hello # 默认不区分大小写,匹配/hello/123 不匹配/hello123
13 | # 路径匹配类型
14 | # - Prefix:前缀匹配,路由直接透传不会截断;另外还有 Exact、ImplementationSpecific类型
15 | # - Exact:精确匹配 URL 路径,且区分大小写
16 | # - ImplementationSpecific:默认匹配方式,具体由 ingressClass 决定
17 | pathType: Prefix
18 | backend:
19 | service:
20 | name: service-hellok8s-clusterip
21 | port:
22 | number: 3000
23 |
24 | - path: /httpd
25 | pathType: Prefix
26 | backend:
27 | service:
28 | name: service-httpd
29 | port:
30 | number: 8080
31 |
--------------------------------------------------------------------------------
/install_by_minikube/nginx.yaml:
--------------------------------------------------------------------------------
1 | # nginx.yaml
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: nginx-pod
6 | spec:
7 | containers:
8 | - name: nginx-container
9 | image: nginx
--------------------------------------------------------------------------------
/k8s_account_tokens.csv:
--------------------------------------------------------------------------------
1 | nlZtQeHoS8k0Pvbe,user3,3
2 | nxdt123445k0P21d,user4,4
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/base_manifest/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: go-multiroute-v1
5 | data:
6 | config.yml: |
7 | routes:
8 | /route1:
9 | response: "route1's content"
10 | /route2:
11 | response: "route2's content"
12 | /test_timeout:
13 | response: "test_timeout's content"
14 | duration: 3
15 | /test_limiter:
16 | response: "test_limiter's content\n"
17 | duration: 1
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/base_manifest/deployment-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: go-multiroute-v2
5 | labels:
6 | app: web-backend # 可能有多个Deployment拥有相同的app名称,例如,后端服务通常由多个应用组成。
7 | app/env: prod # 由cd脚本更新
8 | spec:
9 | replicas: 2
10 | selector:
11 | matchLabels:
12 | app: go-multiroute
13 | version: v2
14 | template:
15 | metadata:
16 | labels:
17 | app: go-multiroute
18 | version: v2
19 | spec:
20 | containers:
21 | - image: docker.io/leigg/go_multiroute:v1
22 | name: go-multiroute
23 | command: [ "/main","-config", "/config/config.yml" ]
24 | resources:
25 | requests:
26 | cpu: 100m
27 | memory: 10Mi
28 | limits:
29 | cpu: 100m
30 | memory: 15Mi
31 | volumeMounts:
32 | - name: config
33 | mountPath: "/config"
34 | env:
35 | - name: VERSION
36 | value: v2
37 | - name: POD_IP
38 | valueFrom:
39 | fieldRef:
40 | fieldPath: status.podIP
41 | - name: DB_PASS
42 | valueFrom:
43 | secretKeyRef:
44 | key: db_pass
45 | name: go-multiroute-v1
46 | volumes:
47 | - name: config
48 | configMap:
49 | name: go-multiroute-v1
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/base_manifest/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: go-multiroute
5 | labels:
6 | app: web-backend # 可能有多个Deployment拥有相同的app名称,例如,后端服务通常由多个应用组成。
7 | app/env: prod # 由cd脚本更新
8 | spec:
9 | replicas: 2
10 | selector:
11 | matchLabels:
12 | app: go-multiroute
13 | version: v1
14 | template:
15 | metadata:
16 | labels:
17 | app: go-multiroute
18 | version: v1
19 | spec:
20 | containers:
21 | - image: docker.io/leigg/go_multiroute:v1
22 | name: go-multiroute
23 | command: [ "/main","-config", "/config/config.yml" ]
24 | resources:
25 | requests:
26 | cpu: 100m
27 | memory: 10Mi
28 | limits:
29 | cpu: 100m
30 | memory: 15Mi
31 | volumeMounts:
32 | - name: config
33 | mountPath: "/config"
34 | env:
35 | - name: VERSION
36 | value: v1
37 | - name: POD_IP
38 | valueFrom:
39 | fieldRef:
40 | fieldPath: status.podIP
41 | - name: DB_PASS
42 | valueFrom:
43 | secretKeyRef:
44 | key: db_pass
45 | name: go-multiroute-v1
46 | volumes:
47 | - name: config
48 | configMap:
49 | name: go-multiroute-v1
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/base_manifest/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: go-multiroute-v1
5 | # data字段存储的必须是base64编码后的数据(避免明文展示)
6 | data:
7 | db_pass: cGFzczEyMwo= # echo pass123 |base64
8 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/expose_manifest/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: go-multiroute
5 | spec:
6 | ingressClassName: nginx
7 | # 注意,这里没有使用HTTPS
8 | rules:
9 | - http:
10 | paths:
11 | - path: /route1
12 | # 路径匹配类型
13 | # - Prefix:前缀匹配,路由直接透传不会截断
14 | # - Exact:精确匹配 URL 路径,且区分大小写
15 | # - ImplementationSpecific:默认匹配方式,具体由 ingressClass 决定
16 | pathType: Prefix
17 | backend:
18 | service:
19 | name: go-multiroute
20 | port:
21 | number: 3000
22 | - path: /connect_db
23 | pathType: Exact
24 | backend:
25 | service:
26 | name: go-multiroute
27 | port:
28 | number: 3000
29 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/expose_manifest/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: go-multiroute
5 | spec:
6 | type: ClusterIP
7 | selector:
8 | app: go-multiroute
9 | ports:
10 | - name: http
11 | port: 3000 # service端口
12 | targetPort: 3000 # 后端pod端口
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/Dockerfile:
--------------------------------------------------------------------------------
1 | # docker build --build-arg CODE_SRC=client . -t $_IMAGE_
2 | FROM golang:1.20-alpine AS builder
3 |
4 | ARG CODE_SRC
5 |
6 | WORKDIR /go/cache
7 |
8 | COPY $CODE_SRC/go.mod .
9 | COPY $CODE_SRC/go.sum .
10 |
11 | RUN GOPROXY=https://goproxy.cn,direct go mod tidy
12 |
13 | WORKDIR /build
14 |
15 | # 复制项目代码到容器中(注意不要包含代码以外的文件,避免占用过多构建机空间)
16 | COPY $CODE_SRC .
17 |
18 | # 关闭cgo的原因:使用了多阶段构建,go程序的编译环境和运行环境不同,不关就无法运行go程序
19 | RUN GOPROXY=https://goproxy.cn,direct GOOS=linux CGO_ENABLED=0 GOARCH=amd64 GO111MODULE=auto go build -o main -ldflags "-w -extldflags -static"
20 |
21 | FROM alpine:3.19.0 as prod
22 |
23 | # alpine设置时区
24 | RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
25 | apk add -U tzdata && cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && apk del tzdata && date
26 |
27 | COPY --from=builder /build/main .
28 |
29 | ENTRYPOINT ["/main"]
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/client/go.mod:
--------------------------------------------------------------------------------
1 | module k8s_action
2 |
3 | go 1.20
4 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/client/go.sum:
--------------------------------------------------------------------------------
1 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
2 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
3 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/client/main_client.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "net/http"
7 | )
8 |
9 | func main() {
10 | go func() {
11 | // 执行调用
12 | rsp, err := http.Get("http://go-multiroute:3000/route1")
13 | if err != nil {
14 | log.Printf("Call err: " + err.Error())
15 | return
16 | }
17 |
18 | text, _ := io.ReadAll(rsp.Body)
19 |
20 | // 打印响应
21 | log.Printf("status:%d text: %s\n", rsp.StatusCode, text)
22 | }()
23 | select {}
24 | }
25 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/client2/go.mod:
--------------------------------------------------------------------------------
1 | module k8s_action
2 |
3 | go 1.20
4 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/client2/go.sum:
--------------------------------------------------------------------------------
1 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
2 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
3 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/client2/main_client2.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "net/http"
5 | )
6 |
7 | func main() {
8 | go func() {
9 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
10 | w.Write([]byte("server1: Hello, World!"))
11 | })
12 | println("listening on :3100")
13 | http.ListenAndServe(":3100", nil)
14 | }()
15 |
16 | h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
17 | w.Write([]byte("server2: Hello, World!"))
18 | })
19 | println("listening on :3200")
20 | http.ListenAndServe(":3200", h)
21 | }
22 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/go_multiroute/go.mod:
--------------------------------------------------------------------------------
1 | module k8s_action
2 |
3 | go 1.20
4 |
5 | require (
6 | golang.org/x/time v0.5.0
7 | gopkg.in/yaml.v2 v2.4.0
8 | )
9 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/go_code/go_multiroute/go.sum:
--------------------------------------------------------------------------------
1 | golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
2 | golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
3 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
4 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
5 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
6 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
7 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/authz-accept-only-ingress.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 此策略将拒绝 非foo命名空间 且 非ingress网关 的所有请求。
3 | # - 换言之,次策略将foo空间进行隔离,除了ingress网关
4 | # - 定义 DENY 策略的好处是不需要担心还有其他 ALLOW 策略对此进行覆盖
5 | apiVersion: security.istio.io/v1beta1
6 | kind: AuthorizationPolicy
7 | metadata:
8 | name: ns-isolation-except-ingress
9 | namespace: foo
10 | spec:
11 | action: DENY
12 | rules:
13 | - from:
14 | - source:
15 | notNamespaces: ["foo"]
16 | notPrincipals: ["cluster.local/ns/istio-system/sa/istio-ingressgateway-service-account"]
17 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/authz-allow-nothing.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | apiVersion: security.istio.io/v1
3 | kind: AuthorizationPolicy
4 | metadata:
5 | name: allow-nothing
6 | namespace: default
7 | spec:
8 | action: ALLOW # 将这行替换为 `{}` 也是同样的效果
9 | # 不指定selector表示默认拒绝,同时作用于当前命名空间下的所有服务。
10 | # - 注意:这里不能将action指定为DENY来作为【默认拒绝】策略。这是因为授权引擎的判定规则认为任何DENY策略优先于任何ALLOW策略,
11 | # - 而一旦存在了默认的DENY策略,则其他任何ALLOW策略都会被忽略,除非你明确需要拒绝所有服务的通信。
12 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/authz-allow-to-go-multiroute.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | apiVersion: security.istio.io/v1
3 | kind: AuthorizationPolicy
4 | metadata:
5 | name: allow-to-go-multiroute
6 | namespace: default # 此命名空间只限定selector中的访问目标,不限定访问源
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: go-multiroute
11 | action: ALLOW
12 | rules: # 下面的from、to、when逻辑块都是可选的
13 | - from: # 列表中的元素之间的关系是【或】,单个元素内的子项之间的关系是【且】
14 | - source:
15 | namespaces: [ "other_ns" ]
16 | # principals: [ "cluster.local/ns/default/sa/default" ]
17 | # ipBlocks: ["203.0.113.4"]
18 | # remoteIpBlocks: [ ... ] # 读取 X-Forwarded-For,前提是对ingress gateway进行相应配置
19 | # notIpBlocks: ["203.0.113.4"] # 反向匹配
20 | # notPrincipals, notNamespaces, notRemoteIpBlocks
21 | - source:
22 | principals: [ "cluster.local/ns/default/sa/default" ]
23 | to:
24 | - operation:
25 | methods: [ "GET", "POST" ]
26 | paths: [ "/route*" ] # 此模板中的大部分字段都支持完全匹配、前缀匹配、后缀匹配、非空匹配("*"),除了when.key, source.ipBlocks, to.ports
27 | # ports: [ 3000 ]
28 | # hosts: [ "foo.example.com" ] # 取自HTTP请求中的Host字段,忽略大小
29 | # notMethods, notPaths, notPorts, notHosts
30 | when:
31 | - key: request.headers[version] # 还支持
32 | values: [ "v1" ]
33 | # notValues
34 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/authz-deny-emptyid.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | apiVersion: security.istio.io/v1beta1
3 | kind: AuthorizationPolicy
4 | metadata:
5 | name: require-mtls
6 | namespace: foo
7 | spec:
8 | action: DENY
9 | rules:
10 | - from:
11 | - source:
12 | notPrincipals: ["*"]
13 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/authz-recommend.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # ALLOW-with-positive-matching 意思是通过正向匹配来定义需要放行的实体
3 | apiVersion: security.istio.io/v1
4 | kind: AuthorizationPolicy
5 | metadata:
6 | name: foo
7 | spec:
8 | action: ALLOW
9 | rules:
10 | - to:
11 | - operation:
12 | paths: [ "/public" ]
13 | ---
14 | # DENY-with-negative-match 意思是通过反向匹配来定义需要拒绝的实体
15 | apiVersion: security.istio.io/v1
16 | kind: AuthorizationPolicy
17 | metadata:
18 | name: foo
19 | spec:
20 | action: DENY
21 | rules:
22 | - to:
23 | - operation:
24 | notPaths: [ "/public" ]
25 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/default_svc_route_rule.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | apiVersion: networking.istio.io/v1beta1
3 | kind: DestinationRule
4 | metadata:
5 | name: reviews
6 | spec:
7 | host: reviews
8 | trafficPolicy:
9 | connectionPool:
10 | tcp:
11 | maxConnections: 100
12 | subsets:
13 | - name: v1
14 | labels:
15 | version: v1
16 | ---
17 | apiVersion: networking.istio.io/v1beta1
18 | kind: VirtualService
19 | metadata:
20 | name: reviews
21 | spec:
22 | hosts:
23 | - reviews
24 | http:
25 | - route:
26 | - destination:
27 | host: reviews
28 | subset: v1
29 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/egressgwy-proxy-http2http.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # ServiceEntry规范:https://istio.io/latest/docs/reference/config/networking/service-entry/
3 | # 1. 首先使用ServiceEntry对象定义外部域名:istio.io(定义后才能在VirtualService中作为dst)
4 | apiVersion: networking.istio.io/v1beta1
5 | kind: ServiceEntry
6 | metadata:
7 | name: istio-io
8 | spec:
9 | hosts:
10 | - istio.io
11 | ports:
12 | - number: 80
13 | name: http
14 | protocol: HTTP
15 | resolution: DNS
16 | location: MESH_EXTERNAL
17 | ---
18 | # 2. 为 Host:istio.io 定义一个 egressGateway
19 | apiVersion: networking.istio.io/v1beta1
20 | kind: Gateway
21 | metadata:
22 | name: egress-istio-io
23 | spec:
24 | selector:
25 | istio: egressgateway
26 | servers:
27 | - port:
28 | # 此端口号定义此网关用于监听 去往 host:istio.io 流量的端口号(必须是egress网关Service已经暴露的端口号之一)
29 | # 端口号错误会导致应用访问404/503
30 | number: 80
31 | name: http
32 | protocol: HTTP
33 | # 对于未定义的端口,不会经过网关转发
34 | hosts:
35 | - istio.io
36 | ---
37 | # 3. 定义VS将 istio.io 流量转发到 egressGateway,再从 egressGateway 转发到 istio.io
38 | apiVersion: networking.istio.io/v1beta1
39 | kind: VirtualService
40 | metadata:
41 | name: egressgateway-proxy-http-istio-io
42 | spec:
43 | hosts:
44 | - istio.io
45 | gateways: # 指定路由规则要应用的网关
46 | - mesh
47 | - egress-istio-io
48 | http:
49 | # rule-1:定义整个网格(不含网关)内去往 host:istio.io:80 的请求转发到 istio-egressgateway.istio-system.svc.cluster.local:80
50 | - match:
51 | - gateways:
52 | - mesh
53 | port: 80
54 | route:
55 | # 目的地是 egressGateway svc的80端口(该service暴露了80和443端口)
56 | - destination:
57 | host: istio-egressgateway.istio-system.svc.cluster.local
58 | port: # 可省略(因为目标svc定义的ports[0].name是可读的“http2”)
59 | number: 80
60 | # rule-2:定义从egress-istio-io网关出发并去往 host:istio.io:80 的请求转发到 istio.io:80
61 | - match:
62 | - gateways:
63 | - egress-istio-io
64 | port: 80
65 | route:
66 | - destination:
67 | host: istio.io
68 | port:
69 | number: 80
70 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/egressgwy-proxy-https2https.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # ServiceEntry规范:https://istio.io/latest/docs/reference/config/networking/service-entry/
3 | # 1. 首先使用ServiceEntry对象定义外部域名:istio.io(定义后才能在VirtualService中作为dst)
4 | apiVersion: networking.istio.io/v1beta1
5 | kind: ServiceEntry
6 | metadata:
7 | name: istio-io-https
8 | spec:
9 | hosts:
10 | - istio.io
11 | ports:
12 | # - number: 80
13 | # name: http
14 | # protocol: HTTP
15 | - number: 443
16 | name: tls
17 | protocol: TLS
18 | resolution: DNS
19 | location: MESH_EXTERNAL
20 |
21 | ---
22 | # 2. 为 Host:istio.io 定义一个 egressGateway
23 | apiVersion: networking.istio.io/v1beta1
24 | kind: Gateway
25 | metadata:
26 | name: egress-istio-io-https
27 | spec:
28 | selector:
29 | istio: egressgateway
30 | servers:
31 | - port:
32 | # 定义此网关要用于监听 去往 host:istio.io 流量的端口号(必须是egress网关Service已经暴露的端口号之一)
33 | # 端口号错误会导致应用访问404/503
34 | number: 443
35 | name: tls
36 | protocol: TLS
37 | hosts:
38 | - istio.io
39 | tls:
40 | mode: PASSTHROUGH # 透传tls流量
41 | ---
42 | # 3. 定义VS将 istio.io 流量转发到 egressGateway,再从 egressGateway 转发到 istio.io
43 | apiVersion: networking.istio.io/v1beta1
44 | kind: VirtualService
45 | metadata:
46 | name: egressgateway-proxy-https-istio-io
47 | spec:
48 | hosts:
49 | - istio.io
50 | gateways:
51 | - mesh
52 | - egress-istio-io-https
53 | tls: # 匹配HTTPS或TLS流量
54 | - match:
55 | - gateways:
56 | - mesh
57 | port: 443
58 | sniHosts:
59 | - istio.io
60 | route:
61 | - destination:
62 | host: istio-egressgateway.istio-system.svc.cluster.local
63 | port: # 可省略(因为目标svc定义的ports[0].name是可读的“https”)
64 | number: 443
65 | - match:
66 | - gateways:
67 | - egress-istio-io-https
68 | port: 443
69 | sniHosts:
70 | - istio.io
71 | route:
72 | - destination:
73 | host: istio.io
74 | port:
75 | number: 443
76 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/external-access-control.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # ServiceEntry规范:https://istio.io/latest/docs/reference/config/networking/service-entry/
3 | # 1. 首先使用ServiceEntry对象定义外部域名:httpbin.org(定义后才能在VirtualService中作为dst)
4 | apiVersion: networking.istio.io/v1beta1
5 | kind: ServiceEntry
6 | metadata:
7 | name: httpbin
8 | spec:
9 | hosts:
10 | # 这里定义的host必需是有效的外部域名,否则应用容器将无法解析该域名(因为也不是集群Service)
11 | - httpbin.org
12 | ports:
13 | - number: 80
14 | name: http
15 | protocol: HTTP
16 | resolution: DNS
17 | location: MESH_EXTERNAL
18 | ---
19 | apiVersion: networking.istio.io/v1beta1
20 | kind: VirtualService
21 | metadata:
22 | name: httpbin
23 | spec:
24 | hosts:
25 | - httpbin.org
26 | http:
27 | # rule-1:将 / 重定向到 /ip
28 | - match:
29 | - uri:
30 | exact: /
31 | redirect:
32 | uri: /ip
33 | # rule-2:无条件转发至 httpbin.org,但限制超时2s
34 | - timeout: 2s
35 | route:
36 | - destination:
37 | host: httpbin.org # 这里的host要么是集群service,要么是通过ServiceEntry定义的host,其他任何地址都将导致访问503
38 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/ingress-gwy.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 规范:https://istio.io/latest/docs/reference/config/networking/gateway/
3 | # ingress网关根据下面定义的Host接收外部请求,再结合VirtualService实现流量转发(强依赖)
4 | # ingress网关定义要对外暴露的端口&协议&主机名&证书相关
5 | apiVersion: networking.istio.io/v1beta1
6 | kind: Gateway
7 | metadata:
8 | # 命名携带 ingress前缀,方便与egress网关策略进行区分
9 | name: ingress-go-multiroute
10 | namespace: istio-system # 建议将网关对象部署在istio-system命名空间中,以便可以通过istioctl管理它
11 | spec:
12 | selector: # 关联ingress网关的pod(默认在所有命名空间中匹配,除非istiod中的 PILOT_SCOPE_GATEWAY_TO_NAMESPACE 变量设置true)
13 | app: istio-ingressgateway
14 | servers:
15 | - name: go-multiroute # 为这个server起个备注,可省略
16 | port: # 网关(本地Pod或公有云的LoadBalancer)对外监听的端口&协议
17 | name: http
18 | number: 8080 # 此端口号必须在 service:istio-ingressgateway 定义的Port端口映射中存在(http:80/8080,https:443/8443),且协议能够对应。
19 | protocol: HTTP # HTTP|HTTPS|GRPC|GRPC-WEB|HTTP2|MONGO|TCP|TLS
20 | # 允许VirtualService绑定的域名或集群服务名,"*"表示不限制或用作通配符如 "*.foo.com" "ns/*"
21 | # - 注意:这里定义host只是步骤一,还需要有一个VirtualService来匹配网关中的host来进行转发
22 | # - 若下面定义的host不能匹配到任何VS策略,则404处理
23 | hosts:
24 | # HTTP请求必须携带 "Host: 下面任何一个名称" 这个Header字段,否则网关返回404
25 | - "*.foobar.com"
26 | - name: go-multiroute-https
27 | port:
28 | name: https
29 | number: 8443
30 | protocol: HTTPS
31 | hosts:
32 | - "*.foobar.com"
33 | tls:
34 | # tls.mode=SIMPLE表示网关不会要求外部客户端提供证书,并且网关会进行TLS终止!即网关转发到集群内的流量是HTTP或原始TCP的,而不是TLS的
35 | # - ingress网关的tls.mode一般选择SIMPLE,即手动配置host证书(证书一般由权威机构签发,client才能信任)
36 | mode: SIMPLE
37 | credentialName: cert-foobar
38 |
39 | # 可以在同一个gateway中配置多个server;但若存在较多server,最好将它们分类到不同模板下进行管理。
40 | #- port:
41 | # number: 2379
42 | # name: mongo
43 | # protocol: MONGO
44 | # hosts:
45 | # - "*"
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/ingress-virtualsvc.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 定义从ingress网关进入的流量 下一步 去往哪儿
3 | apiVersion: networking.istio.io/v1beta1
4 | kind: VirtualService
5 | metadata:
6 | name: ingress-go-multiroute
7 | namespace: default
8 | spec:
9 | # 指定最终要接收流量的目标主机列表,可以是(携带通配符)域名/ip/集群服务名
10 | # - 若网关那边定义了hosts,则此处hosts必须匹配其中一个,未匹配任何VS策略的主机名的请求将统一被网关以404处理
11 | hosts:
12 | - "*.foobar.com"
13 | gateways: # 指定策略要应用的网关
14 | - istio-system/ingress-go-multiroute # 需要指定ns前缀,否则无法正常工作
15 | #- mesh # 应用于整个网格
16 | http:
17 | - route:
18 | - destination:
19 | port:
20 | number: 3000
21 | host: go-multiroute.default.svc.cluster.local
22 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/istio_client_test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: istio-client-test
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: istio-client-test
9 | template:
10 | metadata:
11 | labels:
12 | app: istio-client-test
13 | spec:
14 | containers:
15 | - name: istio-client-test
16 | image: docker.io/leigg/istio_client_test_pod:v1
17 | - name: tcpdump # 可以使用此容器抓取 Pod 上流经 sidecar 容器的流量
18 | image: docker.io/kaazing/tcpdump:latest
19 | command: [ "sleep", "1d" ]
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/peer_authn.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 创建 go-multiroute 服务的对等认证策略。
3 | apiVersion: security.istio.io/v1beta1
4 | kind: PeerAuthentication
5 | metadata:
6 | # 一个网格或一个命名空间范围只能有一个泛域对等认证(mTLS)策略,若有多个,以旧的为准。
7 | # - 泛域指的是不带或带有空的selector字段
8 | name: go-multiroute
9 | namespace: default
10 | spec:
11 | # 0. 此selector仅在对应工作负载定义了K8s Service对象时生效。
12 | # 1. 不带或带有空的selector意味着它适用于它所在的命名空间中的所有服务。
13 | # 2. 若有多个针对非空且相同标签的策略,以旧的为准。
14 | selector:
15 | matchLabels:
16 | app: go-multiroute
17 | mtls:
18 | # 支持三种模式
19 | # PERMISSIVE: 接收mTLS和明文流量,一般在刚为服务开启mTLS时使用,或者作为DISABLE到STRICT的过渡模式。
20 | # STRICT: 只接收mTLS流量,拒绝明文流量。
21 | # DISABLE: 禁用mTLS。
22 | mode: STRICT
23 | # 仅为3000端口禁用mTLS;要求工作负载的Service对象中的targetPort中包含80
24 | # portLevelMtls:
25 | # 3000:
26 | # mode: DISABLE
27 |
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/peer_authn_default.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 在istio安装的根命名空间中创建默认的PeerAuthentication策略,作为整个网格的默认配置。
3 | apiVersion: security.istio.io/v1beta1
4 | kind: PeerAuthentication
5 | metadata:
6 | name: default
7 | namespace: istio-system # 或者改为需要单独生效的命名空间名称
8 | spec:
9 | # 没有selector字段
10 | mtls:
11 | mode: STRICT
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/pod_two_port_svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: two-port-svc
5 | labels:
6 | app: two-port-svc
7 | version: v1
8 | spec:
9 | containers:
10 | - image: docker.io/leigg/two_port_svc:v1
11 | name: two-port-svc
12 | # resources:
13 | # requests:
14 | # cpu: 100m
15 | # memory: 10Mi
16 | # limits:
17 | # cpu: 100m
18 | # memory: 15Mi
19 | # volumeMounts:
20 | # - name: config
21 | # mountPath: "/config"
22 | # env:
23 | # - name: VERSION
24 | # value: v1
25 | # - name: POD_IP
26 | # valueFrom:
27 | # fieldRef:
28 | # fieldPath: status.podIP
29 | # - name: DB_PASS
30 | # valueFrom:
31 | # secretKeyRef:
32 | # key: db_pass
33 | # name: go-multiroute-v1
34 | # volumes:
35 | # - name: config
36 | # configMap:
37 | # name: go-multiroute-v1
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/route-destinationrule.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 规范:https://istio.io/latest/zh/docs/reference/config/networking/destination-rule/
3 | # - 下面的示例远远没有列出所有选项,请参考上面的规范以获取更多信息。
4 | apiVersion: networking.istio.io/v1beta1
5 | kind: DestinationRule
6 | metadata:
7 | name: go-multiroute
8 | spec:
9 | host: go-multiroute # 与k8s service名称一致
10 | trafficPolicy: # 定义默认流量策略(还可以在subset下的每个元素中单独配置)
11 | tls:
12 | # 若负载启用了mTLS,则必须配置 ISTIO_MUTUAL,否则503,默认 DISABLE。若host是网格服务,则这里可以直接启用,因为istio默认为网格服务间通信开启mTLS
13 | mode: ISTIO_MUTUAL
14 | loadBalancer: # 定义负载均衡策略
15 | simple: ROUND_ROBIN # 其他:LEAST_CONN,LEAST_REQUEST,RANDOM,PASSTHROUGH
16 | connectionPool:
17 | tcp:
18 | maxConnections: 5 # 定义tcp连接最大并发数,这里的值仅用于测试
19 | # 若来源是网关,可以指定如下字段来使用相同协议转发流量至目标
20 | #http:
21 | # useClientProtocol: true
22 | # outlierDetection: # 定义熔断策略
23 | # consecutive5xxErrors: 3 # 指定连续多少个 5xx 错误会导致端点被剔除,默认5,0表示禁用(但是时间窗口未知)
24 | # interval: 1s # 熔断检测间隔,默认10s,要求>=1ms
25 | # baseEjectionTime: 10s # 初始的端点剔除时间,支持单位 ms s m h,默认30s,要求>=1ms
26 | # maxEjectionPercent: 100 # 端点最大剔除百分比,例如一共3个端点(pod),100%表示可以全部剔除,若允许全部可剔除,届时可能将无端点可用
27 | subsets: # 定义service下的不同子集(通过标签区分)
28 | - name: v1
29 | labels: # 匹配Deployment中Pod的标签
30 | version: v1
31 | - name: v2
32 | labels:
33 | version: v2
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/unmergeable-vs.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | # 下面两个 VS 针对同一个Host,它们无法合并,只有 go-multiroute-1 有效!!!
3 | apiVersion: networking.istio.io/v1beta1
4 | kind: VirtualService
5 | metadata:
6 | name: go-multiroute-1
7 | spec:
8 | hosts:
9 | - go-multiroute
10 | # gateways:
11 | # - gateway
12 | http:
13 | - match:
14 | - uri:
15 | exact: /route1
16 | route:
17 | - destination:
18 | host: go-multiroute
19 | subset: v1
20 | ---
21 | apiVersion: networking.istio.io/v1beta1
22 | kind: VirtualService
23 | metadata:
24 | name: go-multiroute-2
25 | spec:
26 | hosts:
27 | - go-multiroute
28 | # gateways:
29 | # - gateway
30 | http:
31 | - match:
32 | - uri:
33 | exact: /route2
34 | route:
35 | - destination:
36 | host: go-multiroute
37 | subset: v2
38 | ---
39 | apiVersion: networking.istio.io/v1beta1
40 | kind: DestinationRule
41 | metadata:
42 | name: go-multiroute
43 | spec:
44 | host: go-multiroute
45 | subsets:
46 | - name: v1
47 | labels:
48 | version: v1
49 | - name: v2
50 | labels:
51 | version: v2
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/istio_manifest/virtualservice-in-order.yaml:
--------------------------------------------------------------------------------
1 | # nonk8s
2 | apiVersion: networking.istio.io/v1alpha3
3 | kind: VirtualService
4 | metadata:
5 | name: myapp
6 | spec:
7 | hosts:
8 | - myapp.com
9 | gateways:
10 | - myapp-gateway
11 | http: # 下面的两条流量匹配规则存在重叠,所以必需合并在一个清单中才能保证匹配顺序
12 | - match:
13 | - header:
14 | version:
15 | exact: "s1"
16 | route:
17 | - destination:
18 | host: service1.default.svc.cluster.local
19 | - match:
20 | - uri:
21 | prefix: /service2
22 | route:
23 | - destination:
24 | host: service2.default.svc.cluster.local
--------------------------------------------------------------------------------
/k8s_actions_guide/version1/script/del_user.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -e # 出现任何错误则停止继续执行
3 |
4 | <" # 用于验证API Server证书的CA根证书的base64
17 | server: https://192.168.31.2:6443 # 集群访问地址
18 | name: kubernetes
19 | contexts:
20 | - context:
21 | cluster: kubernetes
22 | user: kubernetes-admin
23 | name: kubernetes-admin@kubernetes
24 | current-context: kubernetes-admin@kubernetes
25 | preferences: { }
26 | users:
27 | - name: kubernetes-admin
28 | user:
29 | client-certificate-data: ""
30 | client-key-data: ""
--------------------------------------------------------------------------------
/kubernetes-dashboard-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: dashboard-admin # 删除它会自动删除同一空间下同名的Secret
5 | namespace: kubernetes-dashboard
6 | ---
7 |
8 | apiVersion: v1
9 | kind: Secret
10 | metadata:
11 | name: dashboard-admin
12 | namespace: kubernetes-dashboard
13 | annotations:
14 | kubernetes.io/service-account.name: dashboard-admin
15 | type: kubernetes.io/service-account-token
16 |
17 | ---
18 | kind: ClusterRoleBinding
19 | apiVersion: rbac.authorization.k8s.io/v1
20 | metadata:
21 | name: dashboard-admin
22 | labels:
23 | app.kubernetes.io/part-of: kubernetes-dashboard
24 | roleRef:
25 | apiGroup: ""
26 | kind: ClusterRole
27 | name: cluster-admin # 超级管理员角色应该仅分配给集群核心管理人员。在生产环境中,不应该随意使用此角色,而应该定义新的有限制的角色
28 | subjects:
29 | - kind: ServiceAccount
30 | name: dashboard-admin
31 | namespace: kubernetes-dashboard
--------------------------------------------------------------------------------
/kubeschduler-config.yaml:
--------------------------------------------------------------------------------
1 | # v1版本适用于 Kubernetes v1.25+
2 | apiVersion: kubescheduler.config.k8s.io/v1
3 | kind: KubeSchedulerConfiguration
4 | profiles:
5 | - plugins:
6 | score:
7 | disabled:
8 | - name: ImageLocality
9 | - name: PodTopologySpread
10 | enabled:
11 | - name: CinderLimits
12 | weight: 2
13 |
--------------------------------------------------------------------------------
/limitrange-for-container.yaml:
--------------------------------------------------------------------------------
1 | #➜ kk describe limits
2 | #Name: limitrange-for-container
3 | #Namespace: default
4 | # Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio
5 | # ---- -------- --- --- --------------- ------------- -----------------------
6 | # Container cpu 100m 200m 100m 150m -
7 | # Container memory 100Mi 300Mi 100Mi 200Mi -
8 | apiVersion: v1
9 | kind: LimitRange
10 | metadata:
11 | name: limitrange-for-container
12 | namespace: default
13 | spec:
14 | limits:
15 | - type: Container # 限制容器的计算资源
16 | # 一旦设置 max/min项,再配置Pod内容器的计算资源时,就不能超过这个范围,否则无法创建
17 | max:
18 | cpu: "200m"
19 | memory: "300Mi"
20 | min:
21 | cpu: "100m"
22 | memory: "100Mi"
23 | default:
24 | cpu: "150m"
25 | memory: "200Mi"
26 | defaultRequest:
27 | cpu: "100m"
28 | memory: "100Mi"
29 | # 同时可以设置limit与requests资源的最大比例
30 | # 对于容器,这个比例(cpu)的计算方式为:
31 | # metadata.namespace 命名空间下:
32 | # 所有非终止Pod内容器的`resources.limits.cpu` / 所有非终止Pod内容器的`resources.requests.cpu`
33 | # memory同理,所以这个值必须大于等于1。
34 | maxLimitRequestRatio:
35 | memory: "2"
36 | cpu: "2"
37 |
--------------------------------------------------------------------------------
/limitrange-for-pod.yaml:
--------------------------------------------------------------------------------
1 | #➜ kk describe limits
2 | #Name: limitrange-for-pod
3 | #Namespace: default
4 | # Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio
5 | # ---- -------- --- --- --------------- ------------- -----------------------
6 | # Pod memory 100Mi 300Mi - - -
7 | # Pod cpu 100m 200m - - -
8 | apiVersion: v1
9 | kind: LimitRange
10 | metadata:
11 | name: limitrange-for-pod
12 | namespace: default
13 | spec:
14 | limits:
15 | - type: Pod # 限制Pod的计算资源(Pod内容器的资源总和)
16 | # 一旦设置 max/min项,再配置Pod的计算资源时,就不能超过这个范围,否则无法创建
17 | max:
18 | cpu: "200m"
19 | memory: "300Mi"
20 | min:
21 | cpu: "100m"
22 | memory: "100Mi"
23 | # 同时可以设置limit与requests资源的最大比例
24 | # 对于Pod,这个比例(cpu)的计算方式为:
25 | # metadata.namespace 命名空间下:
26 | # 所有非终止Pod内容器的`resources.limits.cpu`之和 / 所有非终止Pod内容器的`resources.requests.cpu`之和
27 | # memory同理,所以这个值必须大于等于1。
28 | maxLimitRequestRatio:
29 | memory: "2"
30 | cpu: "2"
31 |
--------------------------------------------------------------------------------
/limitrange-for-pvc.yaml:
--------------------------------------------------------------------------------
1 | #➜ kk describe limits
2 | #Name: limitrange-for-pvc
3 | #Namespace: default
4 | # Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio
5 | # ---- -------- --- --- --------------- ------------- -----------------------
6 | # PersistentVolumeClaim storage 100Mi 1Gi - - -
7 | apiVersion: v1
8 | kind: LimitRange
9 | metadata:
10 | name: limitrange-for-pvc
11 | namespace: default
12 | spec:
13 | limits:
14 | - type: PersistentVolumeClaim # 限制Pod的计算资源(Pod内容器的资源总和)
15 | # 一旦设置 max/min项,再配置PVC资源时,就不能超过这个范围,否则无法创建
16 | max:
17 | storage: 1Gi
18 | min:
19 | storage: 100Mi
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "net/http"
7 | )
8 |
9 | func main() {
10 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
11 | io.WriteString(w, "[v1] Hello, Kubernetes!")
12 | })
13 |
14 | log.Printf("v1 access http://localhost:3000\n")
15 | panic(http.ListenAndServe(":3000", nil))
16 | //panic("something went wrong")
17 | }
18 |
--------------------------------------------------------------------------------
/main2.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "net/http"
7 | )
8 |
9 | func main() {
10 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
11 | io.WriteString(w, "[v2] Hello, Kubernetes!")
12 | })
13 |
14 | log.Printf("access http://localhost:3000\n")
15 | panic(http.ListenAndServe(":3000", nil))
16 | }
17 |
--------------------------------------------------------------------------------
/main_hostname.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net/http"
7 | "os"
8 | "time"
9 | )
10 |
11 | func main() {
12 | http.HandleFunc("/now_time", func(w http.ResponseWriter, r *http.Request) {
13 | io.WriteString(w, fmt.Sprintf("[v3] Hello, Kubernetes!, now time: %s\n", time.Now()))
14 | })
15 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
16 | host, _ := os.Hostname()
17 | io.WriteString(w, fmt.Sprintf("[v3] Hello, Kubernetes!, From host: %s\n", host))
18 | })
19 | http.ListenAndServe(":3000", nil)
20 | }
21 |
--------------------------------------------------------------------------------
/main_liveness.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net/http"
7 | "time"
8 | )
9 |
10 | func main() {
11 | started := time.Now()
12 | http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
13 | duration := time.Since(started)
14 | if duration.Seconds() > 15 {
15 | w.WriteHeader(500)
16 | w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))
17 | } else {
18 | w.WriteHeader(200)
19 | w.Write([]byte("ok"))
20 | }
21 | })
22 |
23 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
24 | io.WriteString(w, "[v2] Hello, Kubernetes!")
25 | })
26 | http.ListenAndServe(":3000", nil)
27 | }
28 |
--------------------------------------------------------------------------------
/main_log.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "log"
5 | "net/http"
6 | "time"
7 | )
8 |
9 | func main() {
10 | go func() {
11 | i := 0
12 | for {
13 | i++
14 | time.Sleep(time.Second)
15 | log.Println("log test", i)
16 | }
17 | }()
18 | http.ListenAndServe(":3000", nil)
19 | }
20 |
--------------------------------------------------------------------------------
/main_log_json.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | "time"
7 | )
8 |
9 | func main() {
10 | go func() {
11 | i := 0
12 | for {
13 | i++
14 | time.Sleep(time.Second)
15 | // 实际项目中最好在日志中使用毫秒级的时间,这里仅精确到秒
16 | fmt.Println(fmt.Sprintf(`{"time": "%s", "number": %d, "field1":"abcdefghijklmn","field2":"0123456789","field3":"Golang","field4":"Kubernetes"}`, time.Now().Format(time.DateTime), i))
17 | }
18 | }()
19 | http.ListenAndServe(":3000", nil)
20 | }
21 |
--------------------------------------------------------------------------------
/main_nginxingress.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net/http"
7 | "os"
8 | )
9 |
10 | func main() {
11 | host, _ := os.Hostname()
12 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
13 | io.WriteString(w, fmt.Sprintf("[v3] Hello, Kubernetes!, this is ingress test, host:%s\n", host))
14 | })
15 | http.ListenAndServe(":3000", nil)
16 | }
17 |
--------------------------------------------------------------------------------
/main_panic.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "log"
6 | "net/http"
7 | )
8 |
9 | func main() {
10 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
11 | io.WriteString(w, "[v1] Hello, Kubernetes!")
12 | })
13 |
14 | log.Printf("v1 access http://localhost:3000\n")
15 | //panic(http.ListenAndServe(":3000", nil))
16 | panic("something went wrong")
17 | }
18 |
--------------------------------------------------------------------------------
/main_read_configmap.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net/http"
7 | "os"
8 | )
9 |
10 | func main() {
11 | readFile := func(f string) string {
12 | nbytes, err := os.ReadFile("/etc/configmap_vol/" + f)
13 | if err != nil {
14 | panic(err)
15 | }
16 | return string(nbytes)
17 | }
18 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
19 | host, _ := os.Hostname()
20 | dbURL := os.Getenv("DB_URL")
21 | io.WriteString(w, fmt.Sprintf("[v4] Hello, Kubernetes! From host: %s\n"+
22 | "Get Database Connect URL: %s\n"+
23 | "app-config.json:%s", host, dbURL, readFile("app-config.json")))
24 | })
25 | http.ListenAndServe(":3000", nil)
26 | }
27 |
--------------------------------------------------------------------------------
/main_read_secret.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "net/http"
7 | "os"
8 | )
9 |
10 | func main() {
11 | readFile := func(f string) string {
12 | nbytes, err := os.ReadFile("/etc/secret_vol/" + f)
13 | if err != nil {
14 | panic(err)
15 | }
16 | return string(nbytes)
17 | }
18 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
19 | host, _ := os.Hostname()
20 | io.WriteString(w, fmt.Sprintf("[v4] Hello, Kubernetes! From host: %s, Get Database Passwd: %s\n"+
21 | "some.txt:%s\ncert.key:%s\nconfig.yaml:%s",
22 | host, os.Getenv("DB_PASSWD"), readFile("some.txt"), readFile("cert.key"), readFile("config.yaml")))
23 | })
24 | http.ListenAndServe(":3000", nil)
25 | }
26 |
--------------------------------------------------------------------------------
/main_readiness.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "io"
5 | "net/http"
6 | )
7 |
8 | func hello(w http.ResponseWriter, r *http.Request) {
9 | io.WriteString(w, "[v2] Hello, Kubernetes!")
10 | }
11 |
12 | func main() {
13 | http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
14 | w.WriteHeader(500)
15 | })
16 |
17 | http.HandleFunc("/", hello)
18 | http.ListenAndServe(":3000", nil)
19 | }
20 |
--------------------------------------------------------------------------------
/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: dev
5 |
6 | --- # 分割线 隔离两个配置区
7 |
8 | apiVersion: v1
9 | kind: Namespace
10 | metadata:
11 | name: test
--------------------------------------------------------------------------------
/pod.yaml:
--------------------------------------------------------------------------------
1 | # Pod名称必须是一个合法DNS子域名
2 | # 具体要求:https://kubernetes.io/zh-cn/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: go-http
7 | labels:
8 | app: go
9 | version: v1
10 | spec:
11 | containers:
12 | - name: go-http
13 | image: leigg/hellok8s:v1
--------------------------------------------------------------------------------
/pod_affinityNode.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: go-http-nodeAffinity
5 | labels:
6 | app: go
7 | version: v1
8 | spec:
9 | containers:
10 | - name: go-http
11 | image: leigg/hellok8s:v1
12 | # nodeSelector: 如果和亲和性同时配置,则必须都满足
13 | affinity:
14 | nodeAffinity: # 不会绕过污点机制
15 | # 下面两项可以二选一,也可以同时存在
16 | # - requiredDuringSchedulingIgnoredDuringExecution
17 | # - preferredDuringSchedulingIgnoredDuringExecution
18 | requiredDuringSchedulingIgnoredDuringExecution: # 硬性调度
19 | nodeSelectorTerms: # 多个 matchExpressions 之间的关系是【或】关系
20 | - matchExpressions: # 单个 matchExpressions 中的多个表达式是【且】关系
21 | # operator 支持 In、NotIn、Exists、DoesNotExist、Gt 和 Lt,其中NotIn和DoesNotExist可用于实现节点反亲和性
22 | - {key: disktype, operator: In, values: ["ssd"]}
23 | # preferredDuringSchedulingIgnoredDuringExecution: # 软性调度
24 | # - weight: 1 # 相比其他 软性调度 策略的权重,范围是1-100
25 | # preference:
26 | # - matchExpressions:
27 | # - { key: disktype, operator: In, values: [ "ssd" ] }
28 | # - weight: 5
29 | # preference:
30 | # - matchExpressions:
31 | # - { key: cpu, operator: In, values: [ "4core" ] }
32 |
--------------------------------------------------------------------------------
/pod_affinityPod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: go-http-podaffinity
5 | spec:
6 | containers:
7 | - name: go-http
8 | image: leigg/hellok8s:v1
9 | # nodeSelector: 如果和亲和性同时配置,则必须都满足
10 | affinity:
11 | # podAffinity 可以和 podAntiAffinity 同时存在
12 | podAffinity: # pod亲和性
13 | requiredDuringSchedulingIgnoredDuringExecution:
14 | - labelSelector:
15 | matchExpressions:
16 | - { key: app, operator: In, values: [ "go" ] }
17 | topologyKey: kubernetes.io/os # 必须指定,它是你希望调度Pod的目标节点群共有的标签,亲和性规则也仅会在此节点群生效
18 | # namespaces: ["dev","test"] 允许指定命名空间,不指定则是当前Pod所在的空间
19 | # namespaceSelector: # 或者 使用标签筛选命令空间
20 | # matchExpressions:
21 | # - key:
22 | # operator:
23 | # matchLabels:
24 | # - environment: production
25 | # preferredDuringSchedulingIgnoredDuringExecution:
26 | # - podAffinityTerm:
27 | # topologyKey:
28 | # weight:
29 | podAntiAffinity: # pod反亲和性
30 | requiredDuringSchedulingIgnoredDuringExecution:
31 | - labelSelector:
32 | matchExpressions:
33 | - { key: highcpu, operator: In, values: [ "true" ] }
34 | topologyKey: kubernetes.io/hostname # 反亲和性中,此key的值固定为 kubernetes.io/hostname
35 |
--------------------------------------------------------------------------------
/pod_associate_serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-sa-longtime
5 | labels:
6 | app: nginx
7 | spec:
8 | # automountServiceAccountToken: true # 默认true,token会自动挂载到 /var/run/secrets/kubernetes.io/serviceaccount/token
9 | serviceAccountName: sa1
10 | containers:
11 | - name: nginx-container
12 | image: nginx
13 | # 启用下面的配置来自定义映射
14 | # volumeMounts:
15 | # - mountPath: /var/run/secrets/kubernetes.io/serviceaccount/
16 | # name: token
17 | # volumes:
18 | # - name: token
19 | # projected:
20 | # sources:
21 | # - serviceAccountToken:
22 | # path: token
23 | # expirationSeconds: 600
24 | # audience: vault
--------------------------------------------------------------------------------
/pod_busybox.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busybox
5 | labels:
6 | app: busybox
7 | spec:
8 | containers:
9 | - name: busybox-container
10 | image: busybox
11 | command: [ "sleep", "infinity" ]
--------------------------------------------------------------------------------
/pod_curl.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: curl
5 | labels:
6 | app: curl
7 | annotations:
8 | key1: "value1"
9 | description: The `curl` command is a powerful tool used to make HTTP requests from the command line.
10 | It is versatile and supports various protocols, including HTTP, HTTPS, FTP, FTPS, and more.
11 | spec:
12 | containers:
13 | - name: curl-container
14 | image: curlimages/curl
15 | command: [ "sh","-c", "sleep 1h" ]
--------------------------------------------------------------------------------
/pod_diff_images.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: diff-images
5 | spec:
6 | containers:
7 | - name: busybox
8 | image: busybox
9 | command: [ "sleep", "3600" ]
10 | - name: centos
11 | image: centos:6
12 | command: [ "sleep", "3600" ]
13 |
--------------------------------------------------------------------------------
/pod_initContainer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: myapp-pod
5 | labels:
6 | app.kubernetes.io/name: MyApp
7 | spec:
8 | initContainers:
9 | - name: init-myservice
10 | image: busybox:1.28
11 | command: [ 'sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done" ]
12 | - name: init-mydb
13 | image: busybox:1.28
14 | command: [ 'sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done" ]
15 | containers:
16 | - name: myapp-container
17 | image: busybox:1.28
18 | command: [ 'sh', '-c', 'echo The app is running! && sleep 3600' ]
--------------------------------------------------------------------------------
/pod_limitResource.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-limit-resource
5 | spec:
6 | containers:
7 | - name: busybox-container
8 | image: vish/stress
9 | # -mem-total 表示容器启动一会儿后占用的内存总量,-mem-alloc-size 表示每次分配的内存大小,-mem-alloc-sleep 表示每次分配内存后休眠的时间
10 | # 启动大概20s多后,内存占用量便会超过下面的限额100Mi,Pod被终止
11 | args: ['-mem-total', '150Mi', '-mem-alloc-size', '5Mi', '-mem-alloc-sleep', '1s']
12 | resources:
13 | limits: # 最多能用的额度,超过则Pod被终止
14 | cpu: "0.1" # 也可以使用 “100m”,1=1000m=1个cpu内核,可以大于1
15 | memory: "100Mi"
16 | requests: # 最少需要的额度
17 | cpu: "0.05" # 也可以使用 “100m”,1=1000m
18 | memory: "50Mi"
--------------------------------------------------------------------------------
/pod_nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | containers:
9 | - name: nginx-container
10 | image: nginx
11 |
--------------------------------------------------------------------------------
/pod_nginx_svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-hpa-test
5 | labels:
6 | app: nginx
7 | spec:
8 | # 如果一开始就要对deployment设计hpa对象,则不要设置replicas(默认1),而是由hpa对象来动态控制
9 | # -- 若显式设置replica为0,则会禁用绑定的hpa策略。直到你修改为大于0的值
10 | # replicas: 2
11 | selector:
12 | matchLabels:
13 | app: nginx
14 | template:
15 | metadata:
16 | labels:
17 | app: nginx
18 | spec:
19 | containers:
20 | - name: nginx-container
21 | image: nginx
22 | ports:
23 | - containerPort: 80
24 | resources:
25 | requests:
26 | cpu: 50m
27 | memory: 128Mi
28 | ---
29 | apiVersion: v1
30 | kind: Service
31 | metadata:
32 | name: nginx-hpa-test
33 | labels:
34 | app: nginx
35 | spec:
36 | ports:
37 | - port: 80
38 | selector:
39 | app: nginx
40 |
--------------------------------------------------------------------------------
/pod_nodeLabel.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: go-http
5 | labels:
6 | app: go
7 | version: v1
8 | spec:
9 | containers:
10 | - name: go-http
11 | image: leigg/hellok8s:v1
12 | nodeSelector:
13 | disktype: ssd
--------------------------------------------------------------------------------
/pod_nodeName.yaml:
--------------------------------------------------------------------------------
1 | # 注意所有的name都只能使用 小写字母 - . 组合
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: go-http
6 | labels:
7 | app: go
8 | version: v1
9 | spec:
10 | containers:
11 | - name: go-http
12 | image: leigg/hellok8s:v1
13 | nodeName: k8s-master
--------------------------------------------------------------------------------
/pod_tolerance.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: go-http-tolerance
5 | spec:
6 | containers:
7 | - name: go-http
8 | image: leigg/hellok8s:v1
9 | affinity: # 使Pod尽量调度到master上
10 | nodeAffinity:
11 | preferredDuringSchedulingIgnoredDuringExecution:
12 | - preference:
13 | matchExpressions:
14 | - key: kubernetes.io/hostname
15 | operator: In
16 | values:
17 | - "k8s-master"
18 | weight: 1
19 | tolerations:
20 | - key: "role/log"
21 | operator: "Equal"
22 | effect: "NoExecute"
--------------------------------------------------------------------------------
/pod_two_sidecar_container.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: counter
5 | spec:
6 | containers:
7 | - name: count
8 | image: busybox:1.28
9 | args:
10 | - /bin/sh
11 | - -c
12 | - >
13 | i=0;
14 | while true;
15 | do
16 | echo "$i: $(date)" >> /var/log/1.log;
17 | echo "$(date) INFO $i" >> /var/log/2.log;
18 | i=$((i+1));
19 | sleep 1;
20 | done
21 | volumeMounts:
22 | - name: varlog
23 | mountPath: /var/log
24 | - name: count-log-1
25 | image: busybox:1.28
26 | args: [ /bin/sh, -c, 'tail -n+1 -F /var/log/1.log' ]
27 | volumeMounts:
28 | - name: varlog
29 | mountPath: /var/log
30 | - name: count-log-2
31 | image: busybox:1.28
32 | args: [ /bin/sh, -c, 'tail -n+1 -F /var/log/2.log' ]
33 | volumeMounts:
34 | - name: varlog
35 | mountPath: /var/log
36 | volumes:
37 | - name: varlog
38 | emptyDir: { }
39 |
--------------------------------------------------------------------------------
/pod_use_downwardAPI.yaml:
--------------------------------------------------------------------------------
1 | # 此模板演示了如何使用 环境变量和存储卷 方式注入Pod信息到Pod内部
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: busybox-use-downwardapi
6 | labels:
7 | app: busybox
8 | label_test: some_value
9 | spec:
10 | containers:
11 | - name: write
12 | image: busybox
13 | command: [ "sh", "-c" ]
14 | # 通过命令行可以读取环境变量
15 | args: [ 'echo "hellok8s, downwardAPI! PodName=${POD_NAME} LIMITS_CPU=${LIMITS_CPU} POD_IP=${POD_IP}"; sleep infinity' ]
16 | resources:
17 | limits:
18 | cpu: "0.1"
19 | env:
20 | - name: POD_NAME
21 | valueFrom:
22 | fieldRef:
23 | fieldPath: metadata.name
24 | - name: LIMITS_CPU
25 | valueFrom:
26 | resourceFieldRef:
27 | resource: limits.cpu
28 | - name: POD_IP
29 | valueFrom:
30 | fieldRef:
31 | fieldPath: status.podIP
32 | volumeMounts:
33 | - mountPath: /config/downward_api_info
34 | name: volume-downward
35 | volumes:
36 | - name: volume-downward
37 | downwardAPI:
38 | items:
39 | - path: "POD_NAME"
40 | fieldRef:
41 | fieldPath: metadata.name
42 | - path: "LABELS"
43 | fieldRef:
44 | fieldPath: metadata.labels
--------------------------------------------------------------------------------
/pod_use_priorityClass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: scheduling.k8s.io/v1
2 | kind: PriorityClass
3 | metadata:
4 | name: high-priority
5 | value: 1000000
6 | globalDefault: false
7 | #preemptionPolicy: Never
8 | description: "此优先级类应仅用于 XYZ 服务 Pod。"
9 | ---
10 | apiVersion: v1
11 | kind: Pod
12 | metadata:
13 | name: nginx
14 | labels:
15 | env: test
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx
20 | imagePullPolicy: IfNotPresent
21 | priorityClassName: high-priority
--------------------------------------------------------------------------------
/pod_use_pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: dev
5 |
6 | ---
7 | # 按顺序定义 pv,pvc,pod
8 | apiVersion: v1
9 | kind: PersistentVolume
10 | metadata:
11 | name: pv-hostpath
12 | namespace: dev # pv允许被跨namespace使用
13 | spec:
14 | capacity:
15 | storage: 1Ti # 此卷容量,单位支持 Ti T Gi G Mi M Ki K,可以改小,但强烈不建议
16 | accessModes:
17 | - ReadWriteMany # 允许多个客户端读写,还有ReadWriteOnce(允许单个节点读写),ReadOnlyMany(允许多个节点只读),单个节点可以包含多个Pod
18 | persistentVolumeReclaimPolicy: Retain # 删除pvc时,pv的回收策略,这里为保留。还有 Delete(删除)
19 | storageClassName: node-local # 存储分类定义,会被pvc引用
20 | hostPath: # 可换为 nfs 等其他存储
21 | path: /home/host-pv-dir
22 |
23 | ---
24 | apiVersion: v1
25 | kind: PersistentVolumeClaim
26 | metadata:
27 | name: pvc-hostpath
28 | spec:
29 | accessModes:
30 | - ReadWriteMany # 必须和PV一致才能匹配
31 | storageClassName: node-local # 存储分类定义,对应pv定义
32 | resources:
33 | requests:
34 | storage: 500Mi # pvc一旦创建,若要修改申请的空间大小,只能增加不能减少!
35 |
36 | ---
37 | apiVersion: v1
38 | kind: Pod
39 | metadata:
40 | name: busybox-use-pvc
41 | labels:
42 | app: busybox
43 | spec:
44 | containers:
45 | - name: write
46 | image: busybox
47 | command: [ "sh", "-c" ]
48 | args: [ "echo 'hellok8s, pvc used!' > /write_dir/data; sleep infinity" ]
49 | volumeMounts:
50 | - mountPath: /write_dir
51 | name: pvc-dir
52 | volumes:
53 | - name: pvc-dir
54 | persistentVolumeClaim:
55 | claimName: pvc-hostpath # 对应pvc名称
--------------------------------------------------------------------------------
/pod_use_storageclass.yaml:
--------------------------------------------------------------------------------
1 | # 按顺序定义 sc,pvc,pod
2 | # https://kubernetes.io/zh-cn/docs/concepts/storage/storage-classes/#local
3 | apiVersion: storage.k8s.io/v1
4 | kind: StorageClass
5 | metadata:
6 | name: sc-hostpath
7 | provisioner: kubernetes.io/no-provisioner # 使用hostpath作为存储后端
8 | # volumeBindingMode: WaitForFirstConsumer 表示等待Pod调度到节点后才会申领实际的卷空间;
9 | # - 默认是Immediate,表示直接在创建PVC时直接使用后端存储空间
10 | volumeBindingMode: WaitForFirstConsumer
11 | allowVolumeExpansion: true # 允许PV扩容(不允许缩容)
12 | reclaimPolicy: Delete # 默认Delete
13 |
14 | ---
15 | # 注意:一个PV只能绑定一个PVC。也就是说,使用hostpath搭配StorageClass时,当这个PV被任何一个Pod占用时(生成一个PVC),则这个PV无法再被其他Pod占用
16 | # - 所以在下面的 matchExpressions 处一般只会匹配一个节点;若要使用其他节点,则要再创建同storageClassName而不同名的PV资源
17 | apiVersion: v1
18 | kind: PersistentVolume
19 | metadata:
20 | name: sc-pv-hostpath
21 | spec:
22 | capacity:
23 | storage: 500Gi # 创建时并不会验证实际有这么多空间
24 | accessModes:
25 | - ReadWriteOnce
26 | persistentVolumeReclaimPolicy: Retain # 手动创建本地卷时,仅支持Retain
27 | storageClassName: sc-hostpath # 匹配上面的metadata.name
28 | local:
29 | path: /home/host-sc-pv-dir # 必须在节点手动创建
30 | nodeAffinity: # 创建本地卷的必须字段,k8s会在匹配下面条件的节点上创建PV,如果没有匹配的节点,则引用的Pod和PV都会一直pending(通过describe可以查看原因)
31 | required:
32 | nodeSelectorTerms:
33 | - matchExpressions:
34 | - key: kubernetes.io/hostname
35 | operator: In
36 | values:
37 | - k8s-node1
38 | ---
39 | apiVersion: v1
40 | kind: PersistentVolumeClaim
41 | metadata:
42 | name: sc-pvc-hostpath
43 | spec:
44 | accessModes:
45 | - ReadWriteOnce
46 | storageClassName: sc-hostpath
47 | resources:
48 | requests:
49 | storage: 500Gi
50 |
51 | ---
52 | apiVersion: v1
53 | kind: Pod
54 | metadata:
55 | name: busybox-use-sc-pvc
56 | labels:
57 | app: busybox
58 | spec:
59 | containers:
60 | - name: write
61 | image: busybox
62 | command: [ "sh", "-c" ]
63 | args: [ "echo 'hellok8s, pvc used!' > /write_dir/data; sleep infinity" ]
64 | volumeMounts:
65 | - mountPath: /write_dir
66 | name: pvc-dir
67 | volumes:
68 | - name: pvc-dir
69 | persistentVolumeClaim:
70 | claimName: sc-pvc-hostpath
--------------------------------------------------------------------------------
/pod_volume_emptydir.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busybox-emptydir
5 | labels:
6 | app: busybox
7 | spec:
8 | containers:
9 | - name: write # 负责写
10 | image: busybox
11 | command: [ "sh", "-c" ]
12 | args: [ "echo 'hellok8s!' > /write_dir/data; sleep infinity" ]
13 | volumeMounts:
14 | - mountPath: /write_dir
15 | name: temp-dir
16 | - name: read # 负责读
17 | image: busybox
18 | command: [ "sh", "-c" ]
19 | args: [ "cat /read_dir/data; sleep infinity" ]
20 | volumeMounts:
21 | - mountPath: /read_dir
22 | name: temp-dir
23 | readOnly: true # 可选
24 | volumes:
25 | - name: temp-dir
26 | emptyDir: {}
27 |
--------------------------------------------------------------------------------
/pod_volume_hostpath.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busybox-hostpath
5 | labels:
6 | app: busybox
7 | spec:
8 | containers:
9 | - name: write
10 | image: busybox
11 | command: [ "sh", "-c" ]
12 | args: [ "echo 'hellok8s!' > /write_dir/data; sleep infinity" ]
13 | volumeMounts:
14 | - mountPath: /write_dir
15 | name: temp-dir
16 | volumes:
17 | - name: temp-dir
18 | hostPath:
19 | path: /home/middle/host-temp-dir # 不允许使用 ~,会提示找不到目录。必须先手动在节点创建目录,否则pod无法启动
20 | type: DirectoryOrCreate
--------------------------------------------------------------------------------
/pod_volume_nfs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: busybox-nfs
5 | labels:
6 | app: busybox
7 | spec:
8 | containers:
9 | - name: write
10 | image: busybox
11 | command: [ "sh", "-c" ]
12 | args: [ "echo 'hellok8s!' > /write_dir/data; sleep infinity" ]
13 | volumeMounts:
14 | - mountPath: /write_dir
15 | name: nfs-dir
16 | volumes:
17 | - name: nfs-dir
18 | nfs:
19 | path: /data/k8s-nfs
20 | server: my-nfs-server.example.com # 或者一个IP地址
21 |
--------------------------------------------------------------------------------
/pods_diff_labels.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: go-http-master
5 | labels:
6 | app: "go"
7 | spec:
8 | nodeName: k8s-master
9 | containers:
10 | - name: go-http
11 | image: leigg/hellok8s:v1
12 | ---
13 | apiVersion: v1
14 | kind: Pod
15 | metadata:
16 | name: go-http-node1
17 | labels:
18 | app: "go"
19 | highcpu: "true"
20 | spec:
21 | nodeName: k8s-node1
22 | containers:
23 | - name: go-http
24 | image: leigg/hellok8s:v1
--------------------------------------------------------------------------------
/pvc_hostpath.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: pvc-hostpath-2
5 | spec:
6 | accessModes:
7 | - ReadWriteMany
8 | storageClassName: "node-local" # 存储分类定义,对应pv定义
9 | resources:
10 | requests:
11 | storage: 100Mi # pvc一旦创建,若要修改申请的空间大小,只能增加不能减少!
--------------------------------------------------------------------------------
/rbac_aggregate_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: monitoring
5 | aggregationRule:
6 | clusterRoleSelectors: # 匹配其他的 ClusterRole
7 | - matchLabels:
8 | rbac.example.com/aggregate-to-monitoring: "true"
9 | rules: [ ] # api-server 自动填充这里的规则(聚合匹配的 ClusterRole 的规则)
10 |
11 | ---
12 | apiVersion: rbac.authorization.k8s.io/v1
13 | kind: ClusterRole
14 | metadata:
15 | name: secret-reader
16 | labels:
17 | rbac.example.com/aggregate-to-monitoring: "true"
18 | rules:
19 | - apiGroups: [ "" ]
20 | resources: [ "secrets" ]
21 | verbs: [ "get", "watch", "list" ]
22 |
23 | ---
24 | apiVersion: rbac.authorization.k8s.io/v1
25 | kind: ClusterRole
26 | metadata:
27 | name: deployment-reader
28 | labels:
29 | rbac.example.com/aggregate-to-monitoring: "true"
30 | rules:
31 | - apiGroups: [ "" ]
32 | resources: [ "deployments" ]
33 | verbs: [ "get", "watch", "list" ]
--------------------------------------------------------------------------------
/rbac_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: secret-reader
5 | rules:
6 | - apiGroups: [ "" ]
7 | resources: [ "secrets" ]
8 | verbs: [ "get", "watch", "list" ]
--------------------------------------------------------------------------------
/rbac_clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | # ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: read-secrets-global
6 | subjects:
7 | - kind: User
8 | name: user2
9 | namespace: default
10 | roleRef:
11 | kind: ClusterRole # 不支持 Role
12 | name: secret-reader
13 | apiGroup: rbac.authorization.k8s.io
--------------------------------------------------------------------------------
/rbac_role.yaml:
--------------------------------------------------------------------------------
1 | # 这是一个位于 default 名字空间的 Role 的示例,可用来授予对 Pod 的读访问权限
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | namespace: default
6 | name: pod-reader
7 | rules:
8 | # 下面几个属性可使用 ["*"] 表所有
9 | # apiGroups 表示要访问API组
10 | # "" 表示核心 API 组(/api/v1),查看v1.28所有API组 https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#api-groups
11 | - apiGroups: [ "" ]
12 | # resources 表示要访问的资源名称列表,例如 pods、deployments、services 等
13 | # 通过访问公开API获取所有的资源名称,如下
14 | # kubectl proxy --port 8080
15 | # curl http://localhost:8080/api/v1 |jq '.resources[].name'
16 | resources: [ "pods" ] # 某些资源拥有子资源,通过斜杠分割,例如 "pods/log" (默认不能访问子资源)
17 | # verbs 请求动词,支持create、【watch/get/list】、patch、update、【delete/deletecollection】等
18 | # 通过访问公开API获取指定资源的请求动词,如下
19 | # kubectl proxy --port 8080
20 | # 查看所有API资源列表:kubectl api-resources (结果不含子资源)
21 | # 查看某个API的子资源以及支持的verbs(JSON):kubectl get --raw="/api/v1" (注意/api/v1指的是v1)
22 | # 查看某个API的子资源以及支持的verbs(JSON):kubectl get --raw="/apis/storage.k8s.io/v1"(除了v1组,查看其他API都需要加上`/apis`的前缀)
23 | # 快速查看某个API支持的verbs,以pods为例:
24 | # kubectl get --raw="/api/v1" |jq '.resources[] | select(.name == "pods") | .verbs'
25 | # 还有一些针对某些资源的特殊动词,https://kubernetes.io/zh-cn/docs/reference/access-authn-authz/authorization/#determine-the-request-verb
26 | verbs: [ "get", "watch", "list" ]
27 | resourceNames: [ "nginx", "busybox" ] # 可选,若忽略则不限制资源名称,等同 ["*"]
--------------------------------------------------------------------------------
/rbac_role_granter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: role-grantor
5 | rules:
6 | - apiGroups: [ "rbac.authorization.k8s.io" ]
7 | resources: [ "rolebindings" ]
8 | verbs: [ "create" ]
9 | - apiGroups: [ "rbac.authorization.k8s.io" ]
10 | resources: [ "clusterroles" ]
11 | verbs: [ "bind" ]
12 | # 忽略 resourceNames 意味着允许绑定任何 ClusterRole
13 | resourceNames: [ "admin","edit","view" ]
14 | ---
15 | apiVersion: rbac.authorization.k8s.io/v1
16 | kind: RoleBinding
17 | metadata:
18 | name: role-grantor-binding
19 | namespace: user-1-namespace
20 | subjects:
21 | - apiGroup: rbac.authorization.k8s.io
22 | kind: User
23 | name: user-1
24 | roleRef:
25 | apiGroup: rbac.authorization.k8s.io
26 | kind: ClusterRole
27 | name: role-grantor
--------------------------------------------------------------------------------
/rbac_rolebinding.yaml:
--------------------------------------------------------------------------------
1 | # RoleBinding 与 Role 绑定到 特定的普通/服务账户(组)
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: RoleBinding
4 | metadata:
5 | name: read-pods
6 | namespace: default # 授权范围限定在此空间内,而不是下面用户所属空间或Role定义的空间
7 | subjects: # 你可以指定不止一个subject(主体),包含用户、组或服务账户
8 | - kind: User
9 | name: user2 # "name" 区分大小,创建时不会检查用户是否存在,且不能是系统用户(以system:作为前缀)
10 | namespace: default # default可省略
11 | - kind: User
12 | name: user3
13 | - kind: ServiceAccount
14 | name: sa1 # 不能是系统服务用户(以system:serviceaccount:作为前缀)
15 | namespace: default # default可省略
16 | # 使用Group时,除了自定义用户组外,需要注意服务账户组的命名规则:
17 | # - system:serviceaccounts:qa 表示qa空间的全部用户或组
18 | # - system:serviceaccounts 表示任何空间的全部服务用户或组
19 | # - system:authenticated 表示任何已通过身份认证的用户
20 | # - system:unauthenticated 表示任何【未通过】身份认证的用户!,与system:authenticated组合后匹配全部用户
21 | - kind: Group
22 | name: group
23 | roleRef:
24 | # 指定与当前命名空间中的某 Role 或 ClusterRole 的绑定关系
25 | # roleRef 不可修改,只能重建RoleBinding
26 | kind: Role # 此字段必须是 Role 或 ClusterRole
27 | name: pod-reader # 此字段必须与你要绑定的 Role 或 ClusterRole 的名称匹配
28 | apiGroup: rbac.authorization.k8s.io # 可为 ""
--------------------------------------------------------------------------------
/rbac_rolebinding_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | # ClusterRole + RoleBinding
2 | # 用户权限范围限制在 RoleBinding 的命名空间内(只能访问default空间内的configmap)
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: ClusterRole
5 | metadata:
6 | name: configmap-reader
7 | rules:
8 | - apiGroups: [ "" ]
9 | resources: [ "configmap" ]
10 | verbs: [ "get", "watch", "list" ]
11 |
12 | ---
13 | apiVersion: rbac.authorization.k8s.io/v1
14 | kind: RoleBinding
15 | metadata:
16 | name: read-configmap
17 | namespace: default
18 | subjects:
19 | - kind: User
20 | name: user2
21 | namespace: default
22 | roleRef:
23 | kind: ClusterRole
24 | name: configmap-reader
25 | apiGroup: rbac.authorization.k8s.io
26 |
27 |
--------------------------------------------------------------------------------
/replicaset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: ReplicaSet
3 | metadata:
4 | name: frontend
5 | labels:
6 | app: guestbook
7 | tier: frontend
8 | spec:
9 | # 按你的实际情况修改副本数
10 | replicas: 3
11 | selector:
12 | matchLabels:
13 | tier: frontend
14 | template:
15 | metadata:
16 | labels:
17 | tier: frontend
18 | spec:
19 | containers:
20 | - name: php-redis
21 | image: gcr.io/google_samples/gb-frontend:v3
22 |
--------------------------------------------------------------------------------
/resource-quota-scope-invalid.yaml:
--------------------------------------------------------------------------------
1 | #此模板无法通过验证
2 | # kk apply -f resource-quota-scope-invalid.yaml
3 | #The ResourceQuota "quota-default-error" is invalid: spec.scopeSelector.matchExpressions:
4 | # Invalid value:
5 | # core.ScopeSelector{MatchExpressions:[]core.ScopedResourceSelectorRequirement{core.ScopedResourceSelectorRequirement{ScopeName:"Terminating", Operator:"Exists", Values:[]string(nil)}}}:
6 | # unsupported scope applied to resource
7 |
8 | apiVersion: v1
9 | kind: ResourceQuota
10 | metadata:
11 | name: quota-default-scope-invalid
12 | # namespace: default # 管理哪个命名空间下的资源配额
13 | spec:
14 | hard:
15 | pods: "1"
16 | requests.storage: "10Gi" # 不能被 Terminating 跟踪,所以此模板无法通过验证
17 |
18 | # 配额作用域
19 | scopeSelector:
20 | matchExpressions:
21 | - scopeName: Terminating # 匹配所有 spec.activeDeadlineSeconds 不小于 0 的 Pod
22 | operator: Exists
23 |
--------------------------------------------------------------------------------
/resource-quota-scope.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: quota-default-scope
5 | # namespace: default # 管理哪个命名空间下的资源配额
6 | spec:
7 | hard:
8 | limits.cpu: "10" # 所有非终止状态的 Pod,其 CPU 限额总量不能超过该值
9 | limits.memory: "1Gi" # 所有非终止状态的 Pod,其内存限额总量不能超过该值
10 | requests.cpu: "10" # 所有非终止状态的 Pod,其 CPU 需求总量不能超过该值
11 | requests.memory: "500Mi" # 所有非终止状态的 Pod,其内存需求总量不能超过该值
12 | # hugepages-: "10Gi" #对于所有非终止状态的 Pod,针对指定尺寸的巨页请求总数不能超过此值
13 | cpu: "10" # 同 requests.cpu
14 | memory: "500Mi" # 同 requests.memory
15 | pods: "1"
16 |
17 | # 配额作用域
18 | scopeSelector:
19 | matchExpressions:
20 | - scopeName: Terminating # 匹配所有 spec.activeDeadlineSeconds 不小于 0 的 Pod
21 | operator: Exists
22 | - scopeName: BestEffort # 匹配所有 Qos 是 BestEffort 的 Pod
23 | operator: Exists
--------------------------------------------------------------------------------
/resource-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: quota-default
5 | # namespace: default # 管理哪个命名空间下的资源配额
6 | spec:
7 | hard:
8 | # 计算资源配额
9 | limits.cpu: "10" # 所有非终止状态的 Pod,其 CPU 限额总量不能超过该值
10 | limits.memory: "1Gi" # 所有非终止状态的 Pod,其内存限额总量不能超过该值
11 | requests.cpu: "10" # 所有非终止状态的 Pod,其 CPU 需求总量不能超过该值
12 | requests.memory: "500Mi" # 所有非终止状态的 Pod,其内存需求总量不能超过该值
13 | # hugepages-: "10Gi" #对于所有非终止状态的 Pod,针对指定尺寸的巨页请求总数不能超过此值
14 | cpu: "10" # 同 requests.cpu
15 | memory: "500Mi" # 同 requests.memory
16 |
17 | # 存储资源配额
18 | requests.storage: "10Gi" #所有 PVC,存储资源的需求总量不能超过该值
19 | persistentvolumeclaims: "10" # 在该命名空间中所允许的 PVC 总量
20 | # .storageclass.storage.k8s.io/requests.storage: 10Gi 在所有与 相关的持久卷申领中,存储请求的总和不能超过该值
21 | # .storageclass.storage.k8s.io/persistentvolumeclaims: 10 在与 storage-class-name 相关的所有持久卷申领中,命名空间中可以存在的持久卷申领总数
22 |
23 | # 对象数量配额
24 | configmaps: "10" # 在该命名空间中允许存在的 ConfigMap 总数上限
25 | pods: "50" # 在该命名空间中允许存在的非终止状态的 Pod 总数上限Pod 终止状态等价于 Pod 的 .status.phase in (Failed, Succeeded) 为真(Pod数量过多可能耗尽IP)
26 | replicationcontrollers: "20" # 在该命名空间中允许存在的 ReplicationController 总数上限
27 | resourcequotas: "5" # 在该命名空间中允许存在的 ResourceQuota 总数上限
28 | services: "10" # 在该命名空间中允许存在的 Service 总数上限
29 | services.loadbalancers: "5" # 在该命名空间中允许存在的 LoadBalancer 类型的 Service 总数上限
30 | services.nodeports: "5" # 在该命名空间中允许存在的 NodePort 类型的 Service 总数上限
31 | secrets: "10" # 在该命名空间中允许存在的 Secret 总数上限
32 |
--------------------------------------------------------------------------------
/secret-hellok8s-misc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: hellok8s-secret
5 | data:
6 | DB_PASSWD: cGFzczEyMwo= # echo pass123 |base64
7 |
8 | stringData:
9 | some.txt: "hello world"
10 |
11 | cert.key: |-
12 | -----BEGIN OPENSSH PRIVATE KEY-----
13 | J1a9V50zOAl0k2Fpmy+RDvCy/2LeCZHyWY9MR248Ah2Ko3VywDrevdPIz8bxg9zxqy0+xy
14 | jbu09sNix9b0IZuZQbbGkw4C4RcAN5HZ4UnWWRfzv2KgtXSdJCPp38hsWH2j9hmlNXLZz0
15 | EqqtXGJpxjV67NAAAACkxlaWdnQEx1eWk=
16 | -----END OPENSSH PRIVATE KEY-----
17 |
18 | config.yaml: |-
19 | username: hellok8s
20 | password: pass123
21 |
22 |
23 | # 对于一个大量使用 secret 的集群,禁用修改会带来以下好处
24 | # 1. 保护应用,使之免受意外(不想要的)更新所带来的负面影响。
25 | # 2. 通过大幅降低对 kube-apiserver 的压力提升集群性能, 这是因为系统会关闭对已标记为不可变更的 secret 的监视操作。
26 | # 一旦标记为不可更改,这个操作就不可逆,再想要修改就只能删除并重建 secret
27 | immutable: true
28 |
--------------------------------------------------------------------------------
/secret-serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | # 需要提前创建sa
2 | # 创建secret后,可通过 kk describe secret secret-sa-sample 查看token
3 | # 若要删除,直接删除sa即可(自动删除绑定的secret)
4 | apiVersion: v1
5 | kind: Secret
6 | metadata:
7 | name: secret-sa-sample
8 | namespace: default
9 | annotations:
10 | kubernetes.io/service-account.name: sa1 # 关联已存在的sa(若sa不存在,则无法创建secret)
11 | type: kubernetes.io/service-account-token # 自动为secret创建token
12 | # 创建后,secret会自动拥有 ca.crt, token数据 (通过kk get secret secret-sa-sample -o yaml 查看)
--------------------------------------------------------------------------------
/service-clusterip-externalip.yaml:
--------------------------------------------------------------------------------
1 | # 在任一节点上访问
2 | #$ curl 192.168.31.100:3000
3 | #[v3] Hello, Kubernetes!, From host: hellok8s-go-http-6bb87f8cb5-57r86
4 | apiVersion: v1
5 | kind: Service
6 | metadata:
7 | name: service-hellok8s-clusterip-externalip
8 | spec:
9 | type: ClusterIP
10 | selector:
11 | app: hellok8s
12 | ports:
13 | - port: 3000
14 | targetPort: 3000
15 | externalIPs:
16 | - 10.10.10.10 # 任意局域网IP都可
--------------------------------------------------------------------------------
/service-clusterip-headless.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: service-hellok8s-clusterip-headless
5 | spec:
6 | type: ClusterIP # 这行是默认的,可省略
7 | # sessionAffinity: ClientIP # or None, 设置会话亲和性(ClientIP表示同一客户端ip的请求会路由到同个Pod)
8 | # sessionAffinityConfig:
9 | # clientIP:
10 | # timeoutSeconds: 3600 # 范围 0~86400,默认10800(3h)
11 | clusterIP: None # None 表示不分配集群IP
12 | selector:
13 | app: hellok8s # 通过selector 选择映射的pod
14 | ports: # 可选字段。若不配置,则 kube-proxy 转发时不做任何处理,请求直达DNS解析后的某个Pod
15 | - port: 3000 # service端口
16 | targetPort: 3000 # 后端pod端口
--------------------------------------------------------------------------------
/service-clusterip.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: service-hellok8s-clusterip
5 | spec:
6 | type: ClusterIP # 这行是默认的,可省略
7 | # sessionAffinity: ClientIP # or None, 设置会话亲和性(ClientIP表示同一客户端ip的请求会路由到同个Pod)
8 | # sessionAffinityConfig:
9 | # clientIP:
10 | # timeoutSeconds: 3600 # 范围 0~86400,默认10800(3h)
11 | selector:
12 | app: hellok8s # 通过selector关联pod组
13 | ports:
14 | - port: 3000 # service端口
15 | targetPort: 3000 # 后端pod端口
--------------------------------------------------------------------------------
/service-externalname.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: service-hellok8s-externalname # 这个名称用来在集群内作为host访问
5 | namespace: default # 可省略
6 | spec:
7 | type: ExternalName
8 | externalName: www.baidu.com # 只能是一个有效的dns地址,不能包含 /,也不能是IP(可定义但无法正常解析)
--------------------------------------------------------------------------------
/service-headless-endpoints.yaml:
--------------------------------------------------------------------------------
1 | # 进入curl容器
2 | # ping service-headless-and-endpoint.default.svc.cluster.local
3 | #PING service-headless-and-endpoint.default.svc.cluster.local (14.119.104.189): 56 data bytes
4 | #64 bytes from 14.119.104.189: seq=0 ttl=54 time=12.529 ms
5 | #64 bytes from 14.119.104.189: seq=1 ttl=54 time=12.661 ms
6 | # -
7 | apiVersion: v1
8 | kind: Service
9 | metadata:
10 | name: service-headless-and-endpoint # 与下面Endpoints的 meta.name 必须一致
11 | spec:
12 | clusterIP: None # headless service
13 | # selector: {} # 不填写selector
14 | ports:
15 | - protocol: TCP
16 | port: 80
17 | targetPort: 80 # 与下面的port必须一致,否则无法正常转发
18 | ---
19 | apiVersion: v1
20 | kind: Endpoints
21 | metadata:
22 | name: service-headless-and-endpoint
23 | subsets:
24 | - addresses:
25 | - ip: 14.119.104.189 # baidu.com ip
26 | ports:
27 | - port: 80
--------------------------------------------------------------------------------
/service-loadbalancer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: service-hellok8s-loadbalancer
5 | annotations: # 这里是使用阿里云私网SLB的配置示例,SLB一般使用注解来控制LB的具体行为
6 | # 指明SLB实例地址类型为私网类型。
7 | service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: intranet
8 | # 修改为您的私网SLB实例ID。
9 | service.beta.kubernetes.io/alibaba-cloud-loadbalancer-id:
10 | # 是否自动创建SLB端口监听(会覆写已有端口监听),也可手动创建端口监听。
11 | service.beta.kubernetes.io/alibaba-cloud-loadbalancer-force-override-listeners: 'true'
12 | spec:
13 | type: LoadBalancer
14 | selector:
15 | app: hellok8s
16 | ports:
17 | - port: 80
18 | name: http
19 | targetPort: 80
20 | - port: 443
21 | name: https
22 | targetPort: 443
23 | # externalTrafficPolicy: Local # or Cluster, Local表示保留客户端源IP,Cluster(默认)表示不保留源IP但有较好的集群内负载均衡效果
24 | # healthCheckNodePort: 80 # 可选,设置节点的健康检查端口(不设置也会自动选择一个端口用作健康检查)
--------------------------------------------------------------------------------
/service-nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: service-hellok8s-nodeport
5 | spec:
6 | type: NodePort
7 | selector:
8 | app: hellok8s
9 | ports:
10 | - port: 3000 # pod端口
11 | nodePort: 30000 # 节点固定端口。在NodePort类型中,k8s要求在 30000-32767 范围内,否则apply报错
12 | # 若需要暴露多个端口,则按下面形式
13 | # - name: http
14 | # protocol: TCP
15 | # port: 80
16 | # targetPort: 9376
17 | # - name: https
18 | # protocol: TCP
19 | # port: 443
20 | # targetPort: 9377
--------------------------------------------------------------------------------
/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | # 也可使用命令创建sa:kubectl create serviceaccount sa1
2 | # 在创建sa后,再使用 kubectl create token sa1 --duration 3600s 创建一个同名的token,即可使用
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: sa1
7 | namespace: default
8 |
--------------------------------------------------------------------------------
/stateful-nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | ports:
9 | - port: 80
10 | name: web-sts-svc
11 | clusterIP: None
12 | selector:
13 | app: nginx
14 | ---
15 | apiVersion: apps/v1
16 | kind: StatefulSet
17 | metadata:
18 | name: stateful-nginx
19 | spec:
20 | serviceName: "nginx"
21 | replicas: 2
22 | podManagementPolicy: Parallel # 控制Pod扩缩行为。Parallel表示Pod扩缩时同时启停多个Pod,而不是一个一个进行(即默认的OrderedReady)
23 | updateStrategy:
24 | rollingUpdate:
25 | partition: 0
26 | type: RollingUpdate
27 | selector:
28 | matchLabels:
29 | app: nginx
30 | template:
31 | metadata:
32 | labels:
33 | app: nginx
34 | spec:
35 | containers:
36 | - name: nginx
37 | image: nginx:1.7.9
38 | ports:
39 | - containerPort: 80
40 | name: web
41 |
--------------------------------------------------------------------------------