├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── api ├── google.golang.org │ └── grpc │ │ └── examples │ │ └── helloworld │ │ └── helloworld │ │ ├── helloworld.pb.go │ │ └── helloworld_grpc.pb.go └── helloworld.proto ├── deployment ├── deployment-istio.yaml ├── deployment-service-2.yaml ├── deployment-service.yaml ├── deployment.yaml ├── ingress-nginx.yaml ├── ingress.yaml ├── istio-ingress-gateway.yaml ├── istio-mesh.yaml ├── probe │ └── probe.yaml └── telemetry │ ├── deployment-log.yaml │ └── elasticsearch.yaml ├── docs └── title.png ├── go.mod └── main.go /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 第一阶段:编译 Go 程序 2 | FROM golang:1.19 AS dependencies 3 | ENV GOPROXY=https://goproxy.cn,direct 4 | WORKDIR /go/src/app 5 | COPY go.mod . 6 | #COPY ../../go.sum . 7 | RUN --mount=type=ssh go mod download 8 | 9 | # 第二阶段:构建可执行文件 10 | FROM golang:1.19 AS builder 11 | WORKDIR /go/src/app 12 | COPY . . 13 | #COPY --from=dependencies /go/pkg /go/pkg 14 | RUN go build 15 | 16 | # 第三阶段:部署 17 | FROM debian:stable-slim 18 | #RUN apt-get update && apt-get install -y curl 19 | COPY --from=builder /go/src/app/k8s-combat /go/bin/k8s-combat 20 | ENV PATH="/go/bin:${PATH}" 21 | 22 | # 启动 Go 程序 23 | CMD ["k8s-combat"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | clean: 3 | go clean 4 | 5 | build: 6 | go build 7 | 8 | docker: 9 | @echo "Docker Build..." 10 | docker build . -t crossoverjie/k8s-combat:log && docker image push crossoverjie/k8s-combat:log -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ![](docs/title.png) 3 | 4 | # 背景 5 | 最近这这段时间更新了一些 k8s 相关的博客和视频,也收到了一些反馈;大概分为这几类: 6 | - 公司已经经历过服务化改造了,但还未接触过云原生。 7 | - 公司部分应用进行了云原生改造,但大部分工作是由基础架构和运维部门推动的,自己只是作为开发并不了解其中的细节,甚至 k8s 也接触不到。 8 | - 还处于比较传统的以虚拟机部署的传统运维为主。 9 | 10 | 其中以第二种占大多数,虽然公司进行了云原生改造,但似乎和纯业务研发同学来说没有太大关系,自己工作也没有什么变化。 11 | 12 | 恰好我之前正好从业务研发的角度转换到了基础架构部门,两个角色我都接触过,也帮助过一些业务研发了解公司的云原生架构; 13 | 14 | 为此所以我想系统性的带大家以**研发**的角度对 k8s 进行实践。 15 | 16 | 因为 k8s 部分功能其实是偏运维的,对研发来说优先级并不太高; 17 | 所以我不太会涉及一些 k8s 运维的知识点,比如安装、组件等模块;主要以我们日常开发会使用到的组件为主。 18 | 19 | ![](https://s2.loli.net/2023/11/25/aG8djFcgHse1ykb.png) 20 | 21 | 22 | # 入门 23 | - [部署应用到 k8s](#部署应用到-k8s) 24 | - [跨服务调用](#跨服务调用) 25 | - [集群外部访问-Ingress](#集群外部访问) 26 | 27 | # 进阶 28 | - [如何使用配置](#如何使用配置) 29 | - [服务网格实战](#istio-入门) 30 | - [配置Mesh](#配置mesh) 31 | - [配置网关](#配置网关) 32 | 33 | # 运维你的应用 34 | - [应用探针](#应用探针) 35 | - 滚动更新与回滚 36 | - 优雅采集日志 37 | - 应用可观测性 38 | - 指标可视化 39 | 40 | ---- 41 | 42 | # 部署应用到 k8s 43 | 44 | 首先从第一章【部署应用到 k8s】开始,我会用 Go 写一个简单的 Web 应用,然后打包为一个 Docker 镜像,之后部署到 k8s 中,并完成其中的接口调用。 45 | 46 | ## 编写应用 47 | 48 | ```go 49 | func main() { 50 | http.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { 51 | log.Println("ping") 52 | fmt.Fprint(w, "pong") 53 | }) 54 | 55 | http.ListenAndServe(":8081", nil) 56 | } 57 | ``` 58 | 59 | 应用非常简单就是提供了一个 `ping` 接口,然后返回了一个 `pong`. 60 | 61 | ## Dockerfile 62 | 63 | ```dockerfile 64 | # 第一阶段:编译 Go 程序 65 | FROM golang:1.19 AS dependencies 66 | ENV GOPROXY=https://goproxy.cn,direct 67 | WORKDIR /go/src/app 68 | COPY go.mod . 69 | #COPY ../../go.sum . 70 | RUN --mount=type=ssh go mod download 71 | 72 | # 第二阶段:构建可执行文件 73 | FROM golang:1.19 AS builder 74 | WORKDIR /go/src/app 75 | COPY . . 76 | #COPY --from=dependencies /go/pkg /go/pkg 77 | RUN go build 78 | 79 | # 第三阶段:部署 80 | FROM debian:stable-slim 81 | RUN apt-get update && apt-get install -y curl 82 | COPY --from=builder /go/src/app/k8s-combat /go/bin/k8s-combat 83 | ENV PATH="/go/bin:${PATH}" 84 | 85 | # 启动 Go 程序 86 | CMD ["k8s-combat"] 87 | ``` 88 | 89 | 之后编写了一个 `dockerfile` 用于构建 `docker` 镜像。 90 | 91 | ```makefile 92 | docker: 93 | @echo "Docker Build..." 94 | docker build . -t crossoverjie/k8s-combat:v1 && docker image push crossoverjie/k8s-combat:v1 95 | ``` 96 | 97 | 使用 `make docker` 会在本地构建镜像并上传到 `dockerhub` 98 | 99 | ## 编写 deployment 100 | 下一步便是整个过程中最重要的环节了,也是唯一和 k8s 打交道的地方,那就是编写 deployment。 101 | 102 | 103 | 在之前的视频[《一分钟了解 k8s》](【一分钟带你了解 k8s】 https://www.bilibili.com/video/BV1Cm4y1n7yG/?share_source=copy_web&vd_source=358858ab808efe832b0dda9dbc4701da) 104 | 中讲过常见的组件: 105 | ![image.png](https://s2.loli.net/2023/09/04/hrOUSVsmP2KkNlC.png) 106 | 107 | 其中我们最常见的就是 deployment,通常用于部署无状态应用;现在还不太需要了解其他的组件,先看看 deployment 如何编写: 108 | ```yaml 109 | apiVersion: apps/v1 110 | kind: Deployment 111 | metadata: 112 | labels: 113 | app: k8s-combat 114 | name: k8s-combat 115 | spec: 116 | replicas: 1 117 | selector: 118 | matchLabels: 119 | app: k8s-combat 120 | template: 121 | metadata: 122 | labels: 123 | app: k8s-combat 124 | spec: 125 | containers: 126 | - name: k8s-combat 127 | image: crossoverjie/k8s-combat:v1 128 | imagePullPolicy: Always 129 | resources: 130 | limits: 131 | cpu: "1" 132 | memory: 300Mi 133 | requests: 134 | cpu: "0.1" 135 | memory: 30Mi 136 | ``` 137 | 138 | 开头两行的 `apiVersion` 和 `kind` 可以暂时不要关注,就理解为 deployment 的固定写法即可。 139 | 140 | metadata:顾名思义就是定义元数据的地方,告诉 `Pod` 我们这个 `deployment` 叫什么名字,这里定义为:`k8s-combat` 141 | 142 | 中间的: 143 | ```yaml 144 | metadata: 145 | labels: 146 | app: k8s-combat 147 | ``` 148 | 149 | 也很容易理解,就是给这个 `deployment` 打上标签,通常是将这个标签和其他的组件进行关联使用才有意义,不然就只是一个标签而已。 150 | > 标签是键值对的格式,key, value 都可以自定义。 151 | 152 | 而这里的 `app: k8s-combat` 便是和下面的 spec 下的 selector 选择器匹配,表明都使用 `app: k8s-combat` 进行关联。 153 | 154 | 而 template 中所定义的标签也是为了让选择器和 template 中的定义的 Pod 进行关联。 155 | 156 | > Pod 是 k8s 中相同功能容器的分组,一个 Pod 可以绑定多个容器,这里就只有我们应用容器一个了;后续在讲到 istio 和日志采集时便可以看到其他的容器。 157 | 158 | template 中定义的内容就很容易理解了,指定了我们的容器拉取地址,以及所占用的资源(`cpu/ memory`)。 159 | 160 | `replicas: 1`:表示只部署一个副本,也就是只有一个节点的意思。 161 | 162 | ## 部署应用 163 | 164 | 之后我们使用命令: 165 | 166 | ```shell 167 | kubectl apply -f deployment/deployment.yaml 168 | ``` 169 | 170 | > 生产环境中往往会使用云厂商所提供的 k8s 环境,我们本地可以使用 [https://minikube.sigs.k8s.io/docs/start/](https://minikube.sigs.k8s.io/docs/start/) minikube 来模拟。 171 | 172 | 就会应用这个 deployment 同时将容器部署到 k8s 中,之后使用: 173 | ```shell 174 | kubectl get pod 175 | ``` 176 | > 在后台 k8s 会根据我们填写的资源选择一个合适的节点,将当前这个 Pod 部署过去。 177 | 178 | 就会列出我们刚才部署的 Pod: 179 | ```shell 180 | ❯ kubectl get pod 181 | NAME READY STATUS RESTARTS AGE 182 | k8s-combat-57f794c59b-7k58n 1/1 Running 0 17h 183 | ``` 184 | 185 | 我们使用命令: 186 | ```shell 187 | kubectl exec -it k8s-combat-57f794c59b-7k58n bash 188 | ``` 189 | 就会进入我们的容器,这个和使用 docker 类似。 190 | 191 | 之后执行 curl 命令便可以访问我们的接口了: 192 | ```shell 193 | root@k8s-combat-57f794c59b-7k58n:/# curl http://127.0.0.1:8081/ping 194 | pong 195 | root@k8s-combat-57f794c59b-7k58n:/# 196 | ``` 197 | 198 | 这时候我们再开一个终端执行: 199 | ``` 200 | ❯ kubectl logs -f k8s-combat-57f794c59b-7k58n 201 | 2023/09/03 09:28:07 ping 202 | ``` 203 | 便可以打印容器中的日志,当然前提是应用的日志是写入到了标准输出中。 204 | 205 | 206 | 207 | 208 | # 跨服务调用 209 | 210 | 在做传统业务开发的时候,当我们的服务提供方有多个实例时,往往我们需要将对方的服务列表保存在本地,然后采用一定的算法进行调用;当服务提供方的列表变化时还得及时通知调用方。 211 | 212 | ```yaml 213 | student: 214 | url: 215 | - 192.168.1.1:8081 216 | - 192.168.1.2:8081 217 | ``` 218 | 219 | 这样自然是对双方都带来不少的负担,所以后续推出的服务调用框架都会想办法解决这个问题。 220 | 221 | 以 `spring cloud` 为例: 222 | ![image.png](https://s2.loli.net/2023/09/06/IW1jaidQ25Xk9u4.png) 223 | 224 | 服务提供方会向一个服务注册中心注册自己的服务(名称、IP等信息),客户端每次调用的时候会向服务注册中心获取一个节点信息,然后发起调用。 225 | 226 | 但当我们切换到 `k8s` 后,这些基础设施都交给了 `k8s` 处理了,所以 `k8s` 自然得有一个组件来解决服务注册和调用的问题。 227 | 228 | 也就是我们今天重点介绍的 `service`。 229 | 230 | 231 | # service 232 | 233 | 在介绍 `service` 之前我先调整了源码: 234 | ```go 235 | func main() { 236 | http.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { 237 | name, _ := os.Hostname() 238 | log.Printf("%s ping", name) 239 | fmt.Fprint(w, "pong") 240 | }) 241 | http.HandleFunc("/service", func(w http.ResponseWriter, r *http.Request) { 242 | resp, err := http.Get("http://k8s-combat-service:8081/ping") 243 | if err != nil { 244 | log.Println(err) 245 | fmt.Fprint(w, err) 246 | return 247 | } 248 | fmt.Fprint(w, resp.Status) 249 | }) 250 | 251 | http.ListenAndServe(":8081", nil) 252 | } 253 | ``` 254 | 新增了一个 `/service` 的接口,这个接口会通过 service 的方式调用服务提供者的服务,然后重新打包。 255 | 256 | ```shell 257 | make docker 258 | ``` 259 | 260 | 同时也新增了一个 `deployment-service.yaml`: 261 | ```yaml 262 | apiVersion: apps/v1 263 | kind: Deployment 264 | metadata: 265 | labels: 266 | app: k8s-combat-service # 通过标签选择关联 267 | name: k8s-combat-service 268 | spec: 269 | replicas: 1 270 | selector: 271 | matchLabels: 272 | app: k8s-combat-service 273 | template: 274 | metadata: 275 | labels: 276 | app: k8s-combat-service 277 | spec: 278 | containers: 279 | - name: k8s-combat-service 280 | image: crossoverjie/k8s-combat:v1 281 | imagePullPolicy: Always 282 | resources: 283 | limits: 284 | cpu: "1" 285 | memory: 100Mi 286 | requests: 287 | cpu: "0.1" 288 | memory: 10Mi 289 | --- 290 | apiVersion: v1 291 | kind: Service 292 | metadata: 293 | name: k8s-combat-service 294 | spec: 295 | selector: 296 | app: k8s-combat-service # 通过标签选择关联 297 | type: ClusterIP 298 | ports: 299 | - port: 8081 # 本 Service 的端口 300 | targetPort: 8081 # 容器端口 301 | name: app 302 | ``` 303 | 304 | 使用相同的镜像部署一个新的 deployment,名称为 `k8s-combat-service`,重点是新增了一个`kind: Service` 的对象。 305 | 306 | 这个就是用于声明 `service` 的组件,在这个组件中也是使用 `selector` 标签和 `deployment` 进行了关联。 307 | 308 | 也就是说这个 `service` 用于服务于名称等于 `k8s-combat-service` 的 `deployment`。 309 | 310 | 下面的两个端口也很好理解,一个是代理的端口, 另一个是 service 自身提供出去的端口。 311 | 312 | 至于 `type: ClusterIP` 是用于声明不同类型的 `service`,除此之外的类型还有: 313 | - [`NodePort`](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) 314 | - [`LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) 315 | - [`ExternalName`](https://kubernetes.io/docs/concepts/services-networking/service/#externalname) 316 | 等类型,默认是 `ClusterIP`,现在不用纠结这几种类型的作用,后续我们在讲到 `Ingress` 的时候会具体介绍。 317 | 318 | ## 负载测试 319 | 我们先分别将这两个 `deployment` 部署好: 320 | ```shell 321 | k apply -f deployment/deployment.yaml 322 | k apply -f deployment/deployment-service.yaml 323 | 324 | ❯ k get pod 325 | NAME READY STATUS RESTARTS AGE 326 | k8s-combat-7867bfb596-67p5m 1/1 Running 0 3h22m 327 | k8s-combat-service-5b77f59bf7-zpqwt 1/1 Running 0 3h22m 328 | ``` 329 | 330 | 由于我新增了一个 `/service` 的接口,用于在 `k8s-combat` 中通过 `service` 调用 `k8s-combat-service` 的接口。 331 | 332 | ```go 333 | resp, err := http.Get("http://k8s-combat-service:8081/ping") 334 | ``` 335 | 336 | 其中 `k8s-combat-service` 服务的域名就是他的服务名称。 337 | > 如果是跨 namespace 调用时,需要指定一个完整名称,在后续的章节会演示。 338 | 339 | 340 | 341 | 我们整个的调用流程如下: 342 | ![image.png](https://s2.loli.net/2023/09/06/i12pR3DjC6wnIXQ.png) 343 | 344 | 相信大家也看得出来相对于 `spring cloud` 这类微服务框架提供的客户端负载方式,`service` 是一种服务端负载,有点类似于 `Nginx` 的反向代理。 345 | 346 | 为了更直观的验证这个流程,此时我将 `k8s-combat-service` 的副本数增加到 2: 347 | ```yaml 348 | spec: 349 | replicas: 2 350 | ``` 351 | 352 | 只需要再次执行: 353 | ```shell 354 | ❯ k apply -f deployment/deployment-service.yaml 355 | deployment.apps/k8s-combat-service configured 356 | service/k8s-combat-service unchanged 357 | ``` 358 | 359 | ![image.png](https://s2.loli.net/2023/09/06/ZC8UrjEz6ia1Qgo.png) 360 | 361 | > 不管我们对 `deployment` 的做了什么变更,都只需要 `apply` 这个 `yaml` 文件即可, k8s 会自动将当前的 `deployment` 调整为我们预期的状态(比如这里的副本数量增加为 2);这也就是 `k8s` 中常说的**声明式 API**。 362 | 363 | 364 | 可以看到此时 `k8s-combat-service` 的副本数已经变为两个了。 365 | 如果我们此时查看这个 `service` 的描述时: 366 | 367 | ```shell 368 | ❯ k describe svc k8s-combat-service |grep Endpoints 369 | Endpoints: 192.168.130.133:8081,192.168.130.29:8081 370 | ``` 371 | 会发现它已经代理了这两个 `Pod` 的 IP。 372 | 373 | 374 | ![image.png](https://s2.loli.net/2023/09/06/HbjyEcnaeCK6uMJ.png) 375 | 此时我进入了 `k8s-combat-7867bfb596-67p5m` 的容器: 376 | 377 | ```shell 378 | k exec -it k8s-combat-7867bfb596-67p5m bash 379 | curl http://127.0.0.1:8081/service 380 | ``` 381 | 382 | 并执行两次 `/service` 接口,发现请求会轮训进入 `k8s-combat-service` 的代理的 IP 中。 383 | 384 | 由于 `k8s service` 是基于 `TCP/UDP` 的四层负载,所以在 `http1.1` 中是可以做到请求级的负载均衡,但如果是类似于 `gRPC` 这类长链接就无法做到请求级的负载均衡。 385 | 386 | 换句话说 `service` 只支持连接级别的负载。 387 | 388 | 如果要支持 `gRPC`,就得使用 Istio 这类服务网格,相关内容会在后续章节详解。 389 | 390 | 总的来说 `k8s service` 提供了简易的服务注册发现和负载均衡功能,当我们只提供 http 服务时是完全够用的。 391 | 392 | 393 | # 集群外部访问 394 | 395 | 前两章中我们将应用[部署](https://crossoverjie.top/2023/08/31/ob/k8s-0-start/)到了 k8s 中,同时不同的服务之间也可以通过 [service](https://crossoverjie.top/2023/09/05/ob/k8s-service/) 进行调用,现在还有一个步骤就是将我们的应用暴露到公网,并提供域名的访问。 396 | 397 | 这一步类似于我们以前配置 Nginx 和绑定域名,提供这个能力的服务在 k8s 中成为 Ingress。 398 | 399 | 通过这个描述其实也能看出 Ingress 是偏运维的工作,但也不妨碍我们作为研发去了解这部分的内容;了解整个系统是如何运转的也是研发应该掌握的技能。 400 | 401 | # 安装 Ingress 控制器 402 | 在正式使用 Ingress 之前需要给 k8s 安装一个 Ingress 控制器,我们这里安装官方提供的 Ingress-nginx 控制器。 403 | 404 | 当然还有社区或者企业提供的各种控制器: 405 | ![image.png](https://s2.loli.net/2023/09/14/i1ebXQNUjxPkLEZ.png) 406 | 407 | 408 | 有两种安装方式: helm 或者是直接 apply 一个资源文件。 409 | 410 | 关于 `helm` 我们会在后面的章节单独讲解。 411 | 412 | 这里就直接使用资源文件安装即可,我已经上传到 GitHub 可以在这里访问: 413 | [https://github.com/crossoverJie/k8s-combat/blob/main/deployment/ingress-nginx.yaml](https://github.com/crossoverJie/k8s-combat/blob/main/deployment/ingress-nginx.yaml) 414 | 415 | 其实这个文件也是直接从官方提供的复制过来的,也可以直接使用这个路径进行安装: 416 | ```yaml 417 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.2/deploy/static/provider/cloud/deploy.yaml 418 | ``` 419 | 420 | > yaml 文件的内容是一样的。 421 | 422 | 不过要注意安装之后可能容器状态一直处于 Pending 状态,查看容器的事件时会发现镜像拉取失败。 423 | 424 | ```shell 425 | k describe pod ingress-nginx-controller-7cdfb9988c-lbcst -n ingress-nginx 426 | ``` 427 | 428 | > describe 是一个用于查看 k8s 对象详细信息的命令。 429 | 430 | 在刚才那份 yaml 文件中可以看到有几个镜像需要拉取,我们可以先在本地手动拉取镜像: 431 | ![image.png](https://s2.loli.net/2023/09/14/3IsRe2QWcmjTY41.png) 432 | ```shell 433 | docker pull registry.k8s.io/ingress-nginx/controller:v1.8.2 434 | ``` 435 | 436 | 如果依然无法拉取,可以尝试配置几个国内镜像源镜像拉取: 437 | 438 | ![image.png](https://s2.loli.net/2023/09/14/uTNDACSWdPp7BVt.png) 439 | 440 | > 我这里使用的 docker-desktop 自带的 k8s,推荐读者朋友也使用这个工具。 441 | 442 | # 创建 Ingress 443 | 使用刚才的 yaml 安装成功之后会在 `ingress-nginx` 命名空间下创建一个 Pod,通过 get 命令查看状态为 Running 即为安装成功。 444 | ```shell 445 | $ k get pod -n ingress-nginx 446 | NAME READY STATUS RESTARTS AGE 447 | ingress-nginx-controller-7cdf 1/1 Running 2 (35h ago) 3d 448 | ``` 449 | 450 | > Namespace 也是 k8s 内置的一个对象,可以简单理解为对资源进行分组管理,我们通常可以使用它来区分各个不同的环境,比如 dev/test/prod 等,不同命名空间下的资源不会互相干扰,且相互独立。 451 | 452 | 453 | 之后便可以创建 Ingress 资源了: 454 | ```yaml 455 | apiVersion: networking.k8s.io/v1 456 | kind: Ingress 457 | metadata: 458 | name: k8s-combat-ingress 459 | spec: 460 | ingressClassName: nginx 461 | rules: 462 | - host: www.service1.io 463 | http: 464 | paths: 465 | - backend: 466 | service: 467 | name: k8s-combat-service 468 | port: 469 | number: 8081 470 | path: / 471 | pathType: Prefix 472 | - host: www.service2.io 473 | http: 474 | paths: 475 | - backend: 476 | service: 477 | name: k8s-combat-service-2 478 | port: 479 | number: 8081 480 | path: / 481 | pathType: Prefix 482 | ``` 483 | 484 | 看这个内容也很容易理解,创建了一个 `Ingress` 的对象,其中的重点就是这里的规则是如何定义的。 485 | 486 | > 在 k8s 中今后还会接触到各种不同的 Kind 487 | 488 | 这里的 `ingressClassName: nginx` 也是在刚开始安装的控制器里定义的名字,由这个资源定义。 489 | 490 | ```yaml 491 | apiVersion: networking.k8s.io/v1 492 | kind: IngressClass 493 | metadata: 494 | labels: 495 | app.kubernetes.io/component: controller 496 | app.kubernetes.io/instance: ingress-nginx 497 | app.kubernetes.io/name: ingress-nginx 498 | app.kubernetes.io/part-of: ingress-nginx 499 | app.kubernetes.io/version: 1.8.2 500 | name: nginx 501 | ``` 502 | 503 | 咱们这个规则很简单,就是将两个不同的域名路由到两个不同的 service。 504 | 505 | > 这里为了方便测试又创建了一个 `k8s-combat-service-2` 的 service,和 `k8s-combat-service` 是一样的,只是改了个名字而已。 506 | 507 | # 测试 508 | 也是为了方便测试,我在应用镜像中新增了一个接口,用于返回当前 Pod 的 hostname。 509 | ```go 510 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 511 | name, _ := os.Hostname() 512 | fmt.Fprint(w, name) 513 | }) 514 | ``` 515 | 516 | 517 | 由于我实际并没有 `www.service1.io/www.service2.io` 这两个域名,所以只能在本地配置 host 进行模拟。 518 | 519 | ``` 520 | 10.0.0.37 www.service1.io 521 | 10.0.0.37 www.service2.io 522 | ``` 523 | 524 | > 我测试所使用的 k8s 部署在我家里一台限制的 Mac 上,所以这里的 IP 它的地址。 525 | 526 | 527 | 当我们反复请求两次这个接口,会拿到两个不同的 hostname,也就是将我们的请求轮训负载到了这两个 service 所代理的两个 Pod 中。 528 | 529 | ```shell 530 | ❯ curl http://www.service1.io/ 531 | k8s-combat-service-79c5579587-b6nlj% 532 | ❯ curl http://www.service1.io/ 533 | k8s-combat-service-79c5579587-bk7nw% 534 | ❯ curl http://www.service2.io/ 535 | k8s-combat-service-2-7bbf56b4d9-dkj9b% 536 | ❯ curl http://www.service2.io/ 537 | k8s-combat-service-2-7bbf56b4d9-t5l4g 538 | ``` 539 | 540 | 我们也可以直接使用 describe 查看我们的 ingress 定义以及路由规则: 541 | ![image.png](https://s2.loli.net/2023/09/14/pgZzVb1L4aQTMwn.png) 542 | 543 | ```shell 544 | $ k describe ingress k8s-combat-ingress 545 | Name: k8s-combat-ingress 546 | Labels: 547 | Namespace: default 548 | Address: localhost 549 | Ingress Class: nginx 550 | Default backend: 551 | Rules: 552 | Host Path Backends 553 | ---- ---- -------- 554 | www.service1.io 555 | / k8s-combat-service:8081 (10.1.0.65:8081,10.1.0.67:8081) 556 | www.service2.io 557 | / k8s-combat-service-2:8081 (10.1.0.63:8081,10.1.0.64:8081) 558 | Annotations: 559 | Events: 560 | ``` 561 | 562 | 如果我们手动新增一个域名解析: 563 | ```shell 564 | 10.0.0.37 www.service3.io 565 | ❯ curl http://www.service3.io/ 566 | 567 | 404 Not Found 568 | 569 |

404 Not Found

570 |
nginx
571 | 572 | 573 | ``` 574 | 会直接 404,这是因为没有找到这个域名的规则。 575 | 576 | # 访问原理 577 | ![image.png](https://s2.loli.net/2023/09/14/9JTfp6GP24VmzAK.png) 578 | 整个的请求路径如上图所示,其实我们的 Ingress 本质上也是一个 service(所以它也可以启动多个副本来进行负载),只是他的类型是 `LoadBalancer`,通常这种类型的 service 会由云厂商绑定一个外部 IP,这样就可以通过这个外部 IP 访问 Ingress 了。 579 | 580 | > 而我们应用的 service 是 ClusterIP,只能在应用内部访问 581 | 582 | ![image.png](https://s2.loli.net/2023/09/14/Bu67SlMLak1hirc.png) 583 | 584 | 通过 service 的信息也可以看到,我们 ingress 的 service 绑定的外部 IP 是 `localhost`(本地的原因) 585 | 586 | # 总结 587 | Ingress 通常是充当网关的作用,后续我们在使用 Istio 时,也可以使用 Istio 所提供的控制器来替换掉 Ingress-nginx,可以更方便的管理内外网流量。 588 | 589 | 本文的所有源码在这里可以访问: 590 | [https://github.com/crossoverJie/k8s-combat](https://github.com/crossoverJie/k8s-combat) 591 | 592 | 593 | # 如何使用配置 594 | 在前面[三节中](https://crossoverjie.top/categories/k8s/)已经讲到如何将我们的应用部署到 k8s 集群并提供对外访问的能力,x现在可以满足基本的应用开发需求了。 595 | 596 | 现在我们需要更进一步,使用 k8s 提供的一些其他对象来标准化我的应用开发。 597 | 首先就是 `ConfigMap`,从它的名字也可以看出这是用于管理配置的对象。 598 | 599 | # ConfigMap 600 | 601 | 不管我们之前是做 `Java`、`Go` 还是 `Python` 开发都会使用到配置文件,而 `ConfigMap` 的作用可以将我们原本写在配置文件里的内容转存到 `k8s` 中,然后和我们的 `Container` 进行绑定。 602 | 603 | ## 存储到环境变量 604 | 绑定的第一种方式就是将配置直接写入到环境变量,这里我先定义一个 `ConfigMap`: 605 | ```yaml 606 | apiVersion: v1 607 | kind: ConfigMap 608 | metadata: 609 | name: k8s-combat-configmap 610 | data: 611 | PG_URL: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" 612 | ``` 613 | 614 | 重点是 `data` 部分,存储的是一个 `KV` 结构的数据,这里存储的是一个数据库连接。 615 | > 需要注意,KV 的大小不能超过 1MB 616 | 617 | 接着可以在容器定义中绑定这个 `ConfigMap` 的所有 `KV` 到容器的环境变量: 618 | ```yaml 619 | # Define all the ConfigMap's data as container environment variables 620 | envFrom: 621 | - configMapRef: 622 | name: k8s-combat-configmap 623 | ``` 624 | 625 | 我将 `ConfigMap` 的定义也放在了同一个 [deployment](https://github.com/crossoverJie/k8s-combat/blob/main/deployment/deployment.yaml) 中,直接 apply: 626 | ```shell 627 | ❯ k apply -f deployment/deployment.yaml 628 | deployment.apps/k8s-combat created 629 | configmap/k8s-combat-configmap created 630 | ``` 631 | 632 | 此时 `ConfigMap` 也会被创建,我们可以使用 633 | ```shell 634 | ❯ k get configmap 635 | NAME DATA AGE 636 | k8s-combat-configmap 1 3m17s 637 | 638 | ❯ k describe configmap k8s-combat-configmap 639 | Data 640 | ==== 641 | PG_URL: 642 | ---- 643 | postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable 644 | ``` 645 | 拿到刚才声明的配置信息。 646 | 647 | ---- 648 | 同时我在代码中也读取了这个环境变量: 649 | 650 | ```go 651 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 652 | name, _ := os.Hostname() 653 | url := os.Getenv("PG_URL") 654 | fmt.Fprint(w, fmt.Sprintf("%s-%s", name, url)) 655 | }) 656 | ``` 657 | 658 | 访问这个接口便能拿到这个环境变量: 659 | ```shell 660 | root@k8s-combat-7b987bb496-pqt9s:/# curl http://127.0.0.1:8081 661 | k8s-combat-7b987bb496-pqt9s-postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable 662 | 663 | root@k8s-combat-7b987bb496-pqt9s:/# echo $PG_URL 664 | postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable 665 | ``` 666 | 667 | ## 存储到文件 668 | 有些时候我们也需要将这些配置存储到一个文件中,比如在 Java 中可以使用 `spring` 读取,`Go` 也可以使用 `configor` 这些第三方库来读取,所有配置都在一个文件中也更方便维护。 669 | 670 | ![image.png](https://s2.loli.net/2023/09/26/g2IhktH7iwWb8LT.png) 671 | 在 `ConfigMap` 中新增了一个 `key:APP` 存放了一个 `yaml` 格式的数据,然后在容器中使用 `volumes` 和 `volumeMounts` 将数据挂载到容器中的指定路径`/go/bin/app.yaml` 672 | 673 | apply 之后我们可以在容器中查看这个文件是否存在: 674 | ```shell 675 | root@k8s-combat-7b987bb496-pqt9s:/# cat /go/bin/app.yaml 676 | name: k8s-combat 677 | pulsar: 678 | url: "pulsar://localhost:6650" 679 | token: "abc" 680 | ``` 681 | 配置已经成功挂载到了这个路径,我们便可以在代码中读取这些数据。 682 | 683 | # Secret 684 | 可以看到 `ConfigMap` 中是明文存储数据的; 685 | ```shell 686 | k describe configmap k8s-combat-configmap 687 | ``` 688 | 可以直接查看。 689 | 690 | 对一些敏感数据就不够用了,这时我们可以使用 `Secret`: 691 | ```yaml 692 | apiVersion: v1 693 | kind: Secret 694 | metadata: 695 | name: k8s-combat-secret 696 | type: Opaque 697 | data: 698 | PWD: YWJjCg== 699 | 700 | --- 701 | env: 702 | - name: PG_PWD 703 | valueFrom: 704 | secretKeyRef: 705 | name: k8s-combat-secret 706 | key: PWD 707 | ``` 708 | 709 | 这里我新增了一个 `Secret` 用于存储密码,并在 `container` 中也将这个 `key` 写入到环境变量中。 710 | 711 | ```shell 712 | ❯ echo 'abc' | base64 713 | YWJjCg== 714 | ``` 715 | `Secret` 中的数据需要使用 `base64` 进行编码,所以我这里存储的是 abc. 716 | 717 | apply 之后我们再查看这个 `Secret` 是不能直接查看原始数据的。 718 | ```shell 719 | ❯ k describe secret k8s-combat-secret 720 | Name: k8s-combat-secret 721 | Type: Opaque 722 | 723 | Data 724 | ==== 725 | PWD: 4 bytes 726 | ``` 727 | 728 | `Secret` 相比 `ConfigMap` 多了一个 `Type` 选项。 729 | ![](https://s2.loli.net/2023/09/26/G25TRcSzCbIVDQ3.png) 730 | 731 | 我们现阶段在应用中用的最多的就是这里的 `Opaque`,其他的暂时还用不上。 732 | 733 | 734 | # 总结 735 | 736 | 737 | 在实际开发过程中研发人员基本上是不会直接接触 `ConfigMap`,一般会给开发者在管理台提供维护配置的页面进行 CRUD。 738 | 739 | 由于 `ConfigMap` 依赖于 k8s 与我们应用的语言无关,所以一些高级特性,比如实时更新就无法实现,每次修改后都得重启应用才能生效。 740 | 741 | 类似于 Java 中常见的配置中心:`Apollo,Nacos` 使用上会有不小的区别,但这些是应用语言强绑定的,如果业务对这些配置中心特性有强烈需求的话也是可以使用的。 742 | 743 | 但如果团队本身就是多语言研发,想要降低运维复杂度 `ConfigMap` 还是不二的选择。 744 | 745 | 746 | # Istio 入门 747 | 终于进入大家都比较感兴趣的服务网格系列了,在前面已经讲解了: 748 | - 如何部署应用到 `kubernetes` 749 | - 服务之间如何调用 750 | - 如何通过域名访问我们的服务 751 | - 如何使用 `kubernetes` 自带的配置 `ConfigMap` 752 | 753 | 基本上已经够我们开发一般规模的 web 应用了;但在企业中往往有着复杂的应用调用关系,应用与应用之间的请求也需要进行管理。 754 | 比如常见的限流、降级、trace、监控、负载均衡等功能。 755 | 756 | 在我们使用 `kubernetes` 之前往往都是由微服务框架来解决这些问题,比如 Dubbo、SpringCloud 都有对应的功能。 757 | 758 | 但当我们上了 `kubernetes` 之后这些事情就应该交给一个专门的云原生组件来解决,也就是本次会讲到的 `Istio`,它是目前使用最为广泛的服务网格解决方案。 759 | 760 | 761 | ![image.png](https://s2.loli.net/2023/10/31/CtJsogSyPD7cjEW.png) 762 | 官方对于 Istio 的解释比较简洁,落到具体的功能点也就是刚才提到的: 763 | - 限流降级 764 | - 路由转发、负载均衡 765 | - 入口网关、`TLS安全认证` 766 | - 灰度发布等 767 | 768 | ![image.png](https://s2.loli.net/2023/10/31/aXnNZhu91m7V2Tw.png) 769 | 770 | 再结合官方的架构图可知:Istio 分为控制面 `control plane` 和数据面 `data plane`。 771 | 772 | 控制面可以理解为 Istio 自身的管理功能: 773 | - 比如服务注册发现 774 | - 管理配置数据面所需要的网络规则等 775 | 776 | 而数据面可以简单的把他理解为由 `Envoy` 代理的我们的业务应用,我们应用中所有的流量进出都会经过 `Envoy` 代理。 777 | 778 | 所以它可以实现负载均衡、熔断保护、认证授权等功能。 779 | # 安装 780 | 首先安装 Istio 命令行工具 781 | > 这里的前提是有一个 kubernetes 运行环境 782 | 783 | Linux 使用: 784 | ```shell 785 | curl -L https://istio.io/downloadIstio | sh - 786 | ``` 787 | 788 | Mac 可以使用 brew: 789 | ```shell 790 | brew install istioctl 791 | ``` 792 | 793 | 其他环境可以下载 Istio 后配置环境变量: 794 | ```shell 795 | export PATH=$PWD/bin:$PATH 796 | ``` 797 | 798 | 之后我们可以使用 `install` 命令安装控制面。 799 | > 这里默认使用的是 `kubectl` 所配置的 `kubernetes` 集群 800 | ```bash 801 | istioctl install --set profile=demo -y 802 | ``` 803 | ![](https://s2.loli.net/2023/10/30/DLOeRGrA7gNC1Xa.png) 804 | 这个的 `profile` 还有以下不同的值,为了演示我们使用 `demo` 即可。 805 | ![image.png](https://s2.loli.net/2023/10/26/3JXneYvyqI4WTgt.png) 806 | # 使用 807 | ```bash 808 | # 开启 default 命名空间自动注入 809 | $ k label namespace default istio-injection=enabled 810 | 811 | $ k describe ns default 812 | Name: default 813 | Labels: istio-injection=enabled 814 | kubernetes.io/metadata.name=default 815 | Annotations: 816 | Status: Active 817 | No resource quota. 818 | No LimitRange resource. 819 | ``` 820 | 之后我们为 `namespace` 打上 `label`,使得 Istio 控制面知道哪个 `namespace` 下的 `Pod` 会自动注入 `sidecar`。 821 | 822 | 这里我们为 default 这个命名空间打开自动注入 `sidecar`,然后在这里部署我们之前使用到的 [deployment-istio.yaml](https://github.com/crossoverJie/k8s-combat/blob/main/deployment/deployment-istio.yaml) 823 | ```bash 824 | $ k apply -f deployment/deployment-istio.yaml 825 | 826 | $ k get pod 827 | NAME READY STATUS RESTARTS 828 | k8s-combat-service-5bfd78856f-8zjjf 2/2 Running 0 829 | k8s-combat-service-5bfd78856f-mblqd 2/2 Running 0 830 | k8s-combat-service-5bfd78856f-wlc8z 2/2 Running 0 831 | ``` 832 | 此时会看到每个Pod 有两个 container(其中一个就是 istio-proxy sidecar),也就是之前做 [gRPC 负载均衡](https://crossoverjie.top/2023/10/16/ob/k8s-grpc-lb/)测试时的代码。 833 | 834 | ![image.png](https://s2.loli.net/2023/10/31/js1Gz5yVCNLep9W.png) 835 | 还是进行负载均衡测试,效果是一样的,说明 `Istio` 起作用了。 836 | 837 | 此时我们再观察 `sidecar` 的日志时,会看到刚才我们所发出和接受到的流量: 838 | ```bash 839 | $ k logs -f k8s-combat-service-5bfd78856f-wlc8z -c istio-proxy 840 | 841 | [2023-10-31T14:52:14.279Z] "POST /helloworld.Greeter/SayHello HTTP/2" 200 - via_upstream - "-" 12 61 14 9 "-" "grpc-go/1.58.3" "6d293d32-af96-9f87-a8e4-6665632f7236" "k8s-combat-service:50051" "172.17.0.9:50051" inbound|50051|| 127.0.0.6:42051 172.17.0.9:50051 172.17.0.9:40804 outbound_.50051_._.k8s-combat-service.default.svc.cluster.local default 842 | [2023-10-31T14:52:14.246Z] "POST /helloworld.Greeter/SayHello HTTP/2" 200 - via_upstream - "-" 12 61 58 39 "-" "grpc-go/1.58.3" "6d293d32-af96-9f87-a8e4-6665632f7236" "k8s-combat-service:50051" "172.17.0.9:50051" outbound|50051||k8s-combat-service.default.svc.cluster.local 172.17.0.9:40804 10.101.204.13:50051 172.17.0.9:54012 - default 843 | [2023-10-31T14:52:15.659Z] "POST /helloworld.Greeter/SayHello HTTP/2" 200 - via_upstream - "-" 12 61 35 34 "-" "grpc-go/1.58.3" "ed8ab4f2-384d-98da-81b7-d4466eaf0207" "k8s-combat-service:50051" "172.17.0.10:50051" outbound|50051||k8s-combat-service.default.svc.cluster.local 172.17.0.9:39800 10.101.204.13:50051 172.17.0.9:54012 - default 844 | [2023-10-31T14:52:16.524Z] "POST /helloworld.Greeter/SayHello HTTP/2" 200 - via_upstream - "-" 12 61 28 26 "-" "grpc-go/1.58.3" "67a22028-dfb3-92ca-aa23-573660b30dd4" "k8s-combat-service:50051" "172.17.0.8:50051" outbound|50051||k8s-combat-service.default.svc.cluster.local 172.17.0.9:44580 10.101.204.13:50051 172.17.0.9:54012 - default 845 | [2023-10-31T14:52:16.680Z] "POST /helloworld.Greeter/SayHello HTTP/2" 200 - via_upstream - "-" 12 61 2 2 "-" "grpc-go/1.58.3" "b4761d9f-7e4c-9f2c-b06f-64a028faa5bc" "k8s-combat-service:50051" "172.17.0.10:50051" outbound|50051||k8s-combat-service.default.svc.cluster.local 172.17.0.9:39800 10.101.204.13:50051 172.17.0.9:54012 - default 846 | ``` 847 | 848 | # 总结 849 | 本期的内容比较简单,主要和安装配置相关,下一期更新如何配置内部服务调用的超时、限流等功能。 850 | 851 | 其实目前大部分操作都是偏运维的,即便是后续的超时配置等功能都只是编写 yaml 资源。 852 | 853 | 但在生产使用时,我们会给开发者提供一个管理台的可视化页面,可供他们自己灵活配置这些原本需要在 `yaml` 中配置的功能。 854 | 855 | ![image.png](https://s2.loli.net/2023/10/31/B3TiC9rJwPbGVHQ.png) 856 | 其实各大云平台厂商都有提供类似的能力,比如阿里云的 EDAS 等。 857 | 858 | # 配置Mesh 859 | 在上一篇 [k8s-服务网格实战-入门Istio](https://crossoverjie.top/2023/10/31/ob/k8s-Istio01/)中分享了如何安装部署 `Istio`,同时可以利用 `Istio` 实现 `gRPC` 的负载均衡。 860 | 861 | 今天我们更进一步,深入了解使用 Istio 的功能。 862 | ![image.png](https://s2.loli.net/2023/11/07/jKIeEH7ir9uqDUd.png) 863 | 从 Istio 的流量模型中可以看出:Istio 支持管理集群的出入口请求(gateway),同时也支持管理集群内的 mesh 流量,也就是集群内服务之间的请求。 864 | 865 | 本次先讲解集群内部的请求,配合实现以下两个功能: 866 | - 灰度发布(对指定的请求分别路由到不同的 service 中) 867 | - 配置 service 的请求权重 868 | 869 | ## 灰度发布 870 | 在开始之前会部署两个 `deployment` 和一个 `service`,同时这两个 `deployment` 所关联的 `Pod` 分别对应了两个不同的 `label`,由于在灰度的时候进行分组。 871 | ![image.png](https://s2.loli.net/2023/11/07/tLOYQiNg5HEe2ry.png) 872 | 873 | 使用这个 `yaml` 会部署所需要的 `deployment` 和 `service`。 874 | ```bash 875 | kubectl apply -f https://raw.githubusercontent.com/crossoverJie/k8s-combat/main/deployment/istio-mesh.yaml 876 | ``` 877 | 878 | --- 879 | 首先设想下什么情况下我们需要灰度发布,一般是某个重大功能的测试,只对部分进入内测的用户开放入口。 880 | 881 | 假设我们做的是一个 `App`,我们可以对拿到了内测包用户的所有请求头中加入一个版本号。 882 | 883 | 比如 `version=200` 表示新版本,`version=100` 表示老版本。 884 | 同时在服务端会将这个版本号打印出来,用于区分请求是否进入了预期的 Pod。 885 | 886 | ```go 887 | // Client 888 | version := r.URL.Query().Get("version") 889 | name := "world" 890 | ctx, cancel := context.WithTimeout(context.Background(), time.Second) 891 | md := metadata.New(map[string]string{ 892 | "version": version, 893 | }) 894 | ctx = metadata.NewOutgoingContext(ctx, md) 895 | defer cancel() 896 | g, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) 897 | 898 | // Server 899 | func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { 900 | md, ok := metadata.FromIncomingContext(ctx) 901 | var version string 902 | if ok { 903 | version = md.Get("version")[0] 904 | } log.Printf("Received: %v, version: %s", in.GetName(), version) 905 | name, _ := os.Hostname() 906 | return &pb.HelloReply{Message: fmt.Sprintf("hostname:%s, in:%s, version:%s", name, in.Name, version)}, nil 907 | } 908 | ``` 909 | 910 | ### 对 service 分组 911 | 进行灰度测试时往往需要新增部署一个灰度服务,这里我们称为 v2(也就是上图中的 Pod2)。 912 | 913 | 同时需要将 v1 和 v2 分组: 914 | 915 | ```yaml 916 | apiVersion: networking.istio.io/v1alpha3 917 | kind: DestinationRule 918 | metadata: 919 | name: k8s-combat-service-ds 920 | spec: 921 | host: k8s-combat-service-istio-mesh 922 | subsets: 923 | - name: v1 924 | labels: 925 | app: k8s-combat-service-v1 926 | - name: v2 927 | labels: 928 | app: k8s-combat-service-v2 929 | ``` 930 | 这里我们使用 Istio 的 `DestinationRule` 定义 `subset`,也就是将我们的 `service` 下的 Pod 分为 v1/v2。 931 | 932 | > 使用 标签 `app` 进行分组 933 | 934 | 注意这里的 `host: k8s-combat-service-istio-mesh` 通常配置的是 `service` 名称。 935 | 936 | ```yaml 937 | apiVersion: v1 938 | kind: Service 939 | metadata: 940 | name: k8s-combat-service-istio-mesh 941 | spec: 942 | selector: 943 | appId: "12345" 944 | type: ClusterIP 945 | ports: 946 | - port: 8081 947 | targetPort: 8081 948 | name: app 949 | - name: grpc 950 | port: 50051 951 | targetPort: 50051 952 | ``` 953 | 也就是这里 service 的名称,同时也支持配置为 `host: k8s-combat-service-istio-mesh.default.svc.cluster.local`,如果使用的简写`Istio` 会根据当前指定的 `namespace` 进行解析。 954 | > Istio 更推荐使用全限定名替代我们这里的简写,从而避免误操作。 955 | 956 | 当然我们也可以在 `DestinationRule` 中配置负载均衡的策略,这里我们先略过: 957 | 958 | ```yaml 959 | apiVersion: networking.istio.io/v1alpha3 960 | kind: DestinationRule 961 | metadata: 962 | name: k8s-combat-service-ds 963 | spec: 964 | host: k8s-combat-service-istio-mesh 965 | trafficPolicy: 966 | loadBalancer: 967 | simple: ROUND_ROBIN 968 | ``` 969 | ![image.png](https://s2.loli.net/2023/11/07/TJyEV6eIiCcapSH.png) 970 | 971 | 972 | --- 973 | 这样我们就定义好了两个分组: 974 | - v1:app: k8s-combat-service-v1 975 | - v2:app: k8s-combat-service-v2 976 | 977 | 之后就可以配置路由规则将流量分别指定到两个不同的组中,这里我们使用 `VirtualService` 进行配置。 978 | 979 | ```yaml 980 | apiVersion: networking.istio.io/v1alpha3 981 | kind: VirtualService 982 | metadata: 983 | name: k8s-combat-service-vs 984 | spec: 985 | gateways: 986 | - mesh 987 | hosts: 988 | - k8s-combat-service-istio-mesh # match this host 989 | http: 990 | - name: v1 991 | match: 992 | - headers: 993 | version: 994 | exact: '100' 995 | route: 996 | - destination: 997 | host: k8s-combat-service-istio-mesh 998 | subset: v1 999 | - name: v2 1000 | match: 1001 | - headers: 1002 | version: 1003 | exact: '200' 1004 | route: 1005 | - destination: 1006 | host: k8s-combat-service-istio-mesh 1007 | subset: v2 1008 | - name: default 1009 | route: 1010 | - destination: 1011 | host: k8s-combat-service-istio-mesh 1012 | subset: v1 1013 | ``` 1014 | 1015 | 这个规则很简单,会检测 http 协议的 `header` 中的 `version` 字段值,如果为 100 这路由到 `subset=v1` 这个分组的 Pod 中,同理为 200 时则路由到 `subset=v2` 这个分组的 Pod 中。 1016 | 1017 | 当没有匹配到 header 时则进入默认的 `subset:v1` 1018 | 1019 | > `gRPC` 也是基于 http 协议,它的 `metadata` 也就对应了 `http` 协议中的 `header`。 1020 | 1021 | 1022 | ### header=100 1023 | ```bash 1024 | Greeting: hostname:k8s-combat-service-v1-5b998dc8c8-hkb72, in:world, version:100istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=100" 1025 | Greeting: hostname:k8s-combat-service-v1-5b998dc8c8-hkb72, in:world, version:100istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=100" 1026 | Greeting: hostname:k8s-combat-service-v1-5b998dc8c8-hkb72, in:world, version:100istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=100" 1027 | Greeting: hostname:k8s-combat-service-v1-5b998dc8c8-hkb72, in:world, version:100istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=100" 1028 | ``` 1029 | 1030 | ### header=200 1031 | ```bash 1032 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1033 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1034 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1035 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1036 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1037 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1038 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1039 | ``` 1040 | 1041 | 根据以上的上面的测试请求来看,只要我们请求头里带上指定的 `version` 就会被路由到指定的 `Pod` 中。 1042 | 1043 | 利用这个特性我们就可以在灰度验证的时候单独发一个灰度版本的 `Deployment`,同时配合客户端指定版本就可以实现灰度功能了。 1044 | 1045 | ## 配置权重 1046 | 1047 | 同样基于 `VirtualService` 我们还可以对不同的 `subset` 分组进行权重配置。 1048 | 1049 | ```yaml 1050 | apiVersion: networking.istio.io/v1alpha3 1051 | kind: VirtualService 1052 | metadata: 1053 | name: k8s-combat-service-vs 1054 | spec: 1055 | gateways: 1056 | - mesh 1057 | hosts: 1058 | - k8s-combat-service-istio-mesh # match this host 1059 | http: 1060 | - match: 1061 | - uri: 1062 | exact: /helloworld.Greeter/SayHello 1063 | route: 1064 | - destination: 1065 | host: k8s-combat-service-istio-mesh 1066 | subset: v1 1067 | weight: 10 1068 | - destination: 1069 | host: k8s-combat-service-istio-mesh 1070 | subset: v2 1071 | weight: 90 1072 | timeout: 5000ms 1073 | ``` 1074 | 1075 | 这里演示的是针对 `SayHello` 接口进行权重配置(当然还有多种匹配规则),90% 的流量会进入 v2 这个 subset,也就是在 `k8s-combat-service-istio-mesh` service 下的 `app: k8s-combat-service-v2` Pod。 1076 | 1077 | ```bash 1078 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1079 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1080 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1081 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1082 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1083 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1084 | Greeting: hostname:k8s-combat-service-**v1**-5b998dc8c8-hkb72, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ curl "http://127.0.0.1:8081/grpc_client?name=k8s-combat-service-istio-mesh&version=200" 1085 | Greeting: hostname:k8s-combat-service-v2-5db566fb76-xj7j6, in:world, version:200istio-proxy@k8s-combat-service-v1-5b998dc8c8-hkb72:/$ 1086 | ``` 1087 | 经过测试会发现大部分的请求都会按照我们的预期进入 v2 这个分组。 1088 | 1089 | 当然除之外之外我们还可以: 1090 | - 超时时间 1091 | - 故障注入 1092 | - 重试 1093 | 具体的配置可以参考 [Istio](https://istio.io/latest/docs/reference/config/networking/virtual-service/#HTTPMatchRequest) 官方文档: 1094 | ![image.png](https://s2.loli.net/2023/11/07/LBjEtd1MP9VcAgl.png) 1095 | 当然在一些云平台也提供了可视化的页面,可以更直观的使用。 1096 | ![image.png](https://s2.loli.net/2023/11/07/2LVTgeiSK9HyxQJ.png) 1097 | 1098 | > 以上是 阿里云的截图 1099 | 1100 | 1101 | # 配置网关 1102 | 但他们的管理的资源都偏 `kubernetes`,一般是由运维或者是 DevOps 来配置,不方便开发使用,所以还需要一个介于云厂商和开发者之间的管理发布平台,可以由开发者以项目维度管理维护这些功能。 1103 | 1104 | 1105 | 在上一期 [k8s-服务网格实战-配置 Mesh](https://crossoverjie.top/2023/11/07/ob/k8s-Istio02/) 中讲解了如何配置集群内的 Mesh 请求,Istio 同样也可以处理集群外部流量,也就是我们常见的网关。 1106 | ![image.png](https://s2.loli.net/2023/11/14/TSCmnecrjHKfLzi.png) 1107 | 1108 | 1109 | 其实和之前讲到的[k8s入门到实战-使用Ingress](https://crossoverjie.top/2023/09/15/ob/k8s-Ingress/) `Ingress` 作用类似,都是将内部服务暴露出去的方法。 1110 | 1111 | 只是使用 `Istio-gateway` 会更加灵活。 1112 | ![image.png](https://s2.loli.net/2023/11/14/hVFUTLB2CHjeRuM.png) 1113 | 1114 | 这里有一张功能对比图,可以明显的看出 `Istio-gateway` 支持的功能会更多,如果是一个中大型企业并且已经用上 Istio 后还是更推荐是有 `Istio-gateway`,使用同一个控制面就可以管理内外网流量。 1115 | 1116 | 1117 | ## 创建 Gateway 1118 | 开始之前首先是创建一个 `Istio-Gateway` 的资源: 1119 | 1120 | ```yaml 1121 | apiVersion: networking.istio.io/v1alpha3 1122 | kind: Gateway 1123 | metadata: 1124 | name: istio-ingress-gateway 1125 | namespace: default 1126 | spec: 1127 | servers: 1128 | - port: 1129 | number: 80 1130 | name: http 1131 | protocol: HTTP 1132 | hosts: 1133 | - 'www.service1.io' 1134 | selector: 1135 | app: istio-ingressgateway #与现有的 gateway 关联 1136 | istio: ingressgateway 1137 | ``` 1138 | 1139 | 其中的 `selector` 选择器中匹配的 label 与我们安装 `Istio` 时候自带的 `gateway` 关联即可。 1140 | 1141 | ```shell 1142 | # 查看 gateway 的 label 1143 | k get pod -n istio-system 1144 | NAME READY STATUS 1145 | istio-ingressgateway-649f75b6b9-klljw 1/1 Running 1146 | 1147 | k describe pod istio-ingressgateway-649f75b6b9-klljw -n istio-system |grep Labels 1148 | Labels: app=istio-ingressgateway 1149 | ``` 1150 | 1151 | ![image.png](https://s2.loli.net/2023/10/26/3JXneYvyqI4WTgt.png) 1152 | 1153 | > 这个 `Gateway` 在我们第一次安装 `Istio` 的时候就会安装这个组件。 1154 | 1155 | --- 1156 | 这个配置的含义是网关会代理通过 `www.service1.io` 这个域名访问的所有请求。 1157 | 1158 | 之后需要使用刚才的 gateway 与我们的服务的 service 进行绑定,这时就需要使用到 `VirtualService`: 1159 | 1160 | ```yaml 1161 | apiVersion: networking.istio.io/v1alpha3 1162 | kind: VirtualService 1163 | metadata: 1164 | name: k8s-combat-istio-http-vs 1165 | spec: 1166 | gateways: 1167 | - istio-ingress-gateway # 绑定刚才创建的 gateway 名称 1168 | hosts: 1169 | - www.service1.io 1170 | http: 1171 | - name: default 1172 | route: 1173 | - destination: 1174 | host: k8s-combat-service-istio-mesh #service 名称 1175 | port: 1176 | number: 8081 1177 | subset: v1 1178 | ``` 1179 | 这个和我们之前讲到的 Mesh 内部流量时所使用到的 `VirtualService` 配置是一样的。 1180 | 1181 | 这里的含义也是通过 `www.service1.io` 以及 `istio-ingress-gateway` 网关的流量会进入这个虚拟服务,但所有的请求都会进入 `subset: v1` 这个分组。 1182 | 1183 | 这个的分组信息在上一节可以查询到: 1184 | ```yaml 1185 | apiVersion: networking.istio.io/v1alpha3 1186 | kind: DestinationRule 1187 | metadata: 1188 | name: k8s-combat-service-ds 1189 | spec: 1190 | host: k8s-combat-service-istio-mesh 1191 | subsets: 1192 | - name: v1 1193 | labels: 1194 | app: k8s-combat-service-v1 1195 | - name: v2 1196 | labels: 1197 | app: k8s-combat-service-v2 1198 | ``` 1199 | 1200 | 之后我们访问这个域名即可拿到响应,同时我们打开 `k8s-combat-service-istio-mesh` service 的 Pod 查看日志,会发现所有的请求都进入了 v1, 如果不需要这个限制条件,将 `subset: v1` 删除即可。 1201 | 1202 | ```bash 1203 | curl http://www.service1.io/ping 1204 | ``` 1205 | 1206 | > 本地需要配置下 host: `127.0.0.1 www.service1.io` 1207 | 1208 | ![image.png](https://s2.loli.net/2023/11/13/ksR9FbdWMEhlLBQ.png) 1209 | 1210 | 还有一点,我们需要拿到 `gateway` 的外部IP,才能将 IP 和刚才的域名`www.service1.io` 进行绑定(host,或者是域名管理台)。 1211 | 1212 | 如果使用的是 `docker-desktop` 自带的 `kubernetes` 集群时候直接使用 `127.0.0.1` 即可,默认就会绑定上。 1213 | 1214 | 如果使用的是 `minikube` 安装的,那需要使用 `minikube tunnel` 手动为 service 为`LoadBalancer` 类型的绑定一个本地 IP,具体可以参考文档: 1215 | https://minikube.sigs.k8s.io/docs/tasks/loadbalancer 1216 | 1217 | > 如果是生产环境使用,云服务厂商会自动绑定一个外网 IP。 1218 | 1219 | ## 原理 1220 | ![image.png](https://s2.loli.net/2023/11/14/4yBEDZOcsWKxLpg.png) 1221 | 1222 | 这个的访问请求的流程和之前讲到的 `kubernetes Ingress` 流程是类似的,只是 gateway 是通过 `VirtualService` 来路由的 service,同时在这个 `VirtualService` 中可以自定义许多的路由规则。 1223 | 1224 | # 总结 1225 | 服务网格 `Istio` 基本上讲完了,后续还有关于 `Telemetry` 相关的 `trace`、`log`、`metrics` 会在运维章节更新,也会和 Istio 有所关联。 1226 | 感兴趣的朋友可以持续关注。 1227 | 1228 | # 应用探针 1229 | 1230 | 今天进入 `kubernetes` 的运维部分(并不是运维 `kubernetes`,而是运维应用),其实日常我们大部分使用 `kubernetes` 的功能就是以往运维的工作,现在云原生将运维和研发关系变得更紧密了。 1231 | 1232 | 1233 | 今天主要讲解 `Probe` 探针相关的功能,探针最实用的功能就是可以控制应用优雅上线。 1234 | 1235 | # 就绪探针 1236 | 举个例子,当我们的 service 关联了多个 Pod 的时候,其中一个 Pod 正在重启但还没达到可以对外提供服务的状态,这时候如果有流量进入。 1237 | 1238 | 那这个请求肯定就会出现异常,从而导致问题,所以我们需要一个和 `kubernetes` 沟通的渠道,告诉它什么时候可以将流量放进来。 1239 | ![image.png](https://s2.loli.net/2023/11/26/StHngQR4K9vCxjf.png) 1240 | 比如如图所示的情况,红色 `Pod` 在未就绪的时候就不会有流量。 1241 | 1242 | 使用就绪探针就可以达到类似的效果: 1243 | ```yaml 1244 | livenessProbe: 1245 | failureThreshold: 3 1246 | httpGet: 1247 | path: /ping 1248 | port: 8081 1249 | scheme: HTTP 1250 | periodSeconds: 3 1251 | successThreshold: 1 1252 | timeoutSeconds: 1 1253 | ``` 1254 | 这个配置也很直接: 1255 | - 配置一个 HTTP 的 ping 接口 1256 | - 每三秒检测一次 1257 | - 失败 3 次则认为检测失败 1258 | - 成功一次就认为检测成功 1259 | 1260 | > 但没有配置就绪探针时,一旦 Pod 的 `Endpoint` 加入到 service 中(Pod 进入 `Running` 状态),请求就有可能被转发过来,所以配置就绪探针是非常有必要的。 1261 | 1262 | # 启动探针 1263 | 而启动探针往往是和就绪探针搭配干活的,如果我们一个 Pod 启动时间过长,比如超过上面配置的失败检测次数,此时 Pod 就会被 kubernetes 重启,这样可能会进入无限重启的循环。 1264 | 1265 | 所以启动探针可以先检测一次是否已经启动,直到启动成功后才会做后续的检测。 1266 | ```yaml 1267 | startupProbe: 1268 | failureThreshold: 30 1269 | httpGet: 1270 | path: /ping 1271 | port: 8081 1272 | scheme: HTTP 1273 | periodSeconds: 5 1274 | successThreshold: 1 1275 | timeoutSeconds: 1 1276 | ``` 1277 | 1278 | > 我这里两个检测接口是同一个,具体得根据自己是实际业务进行配置; 1279 | > 比如应用端口启动之后并不代表业务已经就绪了,可能某些基础数据还没加载到内存中,这个时候就需要自己写其他的接口来配置就绪探针了。 1280 | 1281 | 1282 | ![image.png](https://s2.loli.net/2023/11/26/AskpbIJiBovPGZ7.png) 1283 | 1284 | 所有关于探针相关的日志都可以在 Pod 的事件中查看,比如如果一个应用在启动的过程中频繁重启,那就可以看看是不是某个探针检测失败了。 1285 | 1286 | # 存活探针 1287 | 1288 | 存活探针往往是用于保证应用高可用的,虽然 kubernetes 可以在 Pod 退出后自动重启,比如 `Pod OOM`;但应用假死他是检测不出来的。 1289 | 1290 | 为了保证这种情况下 Pod 也能被自动重启,就可以配合存活探针使用: 1291 | ```yaml 1292 | livenessProbe: 1293 | failureThreshold: 3 1294 | httpGet: 1295 | path: /ping 1296 | port: 8081 1297 | scheme: HTTP 1298 | periodSeconds: 3 1299 | successThreshold: 1 1300 | timeoutSeconds: 1 1301 | ``` 1302 | 1303 | 一旦接口响应失败,kubernetes 就会尝试重启。 1304 | 1305 | ![image.png](https://s2.loli.net/2023/11/26/khZlsDHLyX2WOxT.png) 1306 | 1307 | # 总结 1308 | ![image.png](https://s2.loli.net/2023/11/26/jRqSIbk4HmnsTWl.png) 1309 | 1310 | 以上探针配置最好是可以在研效平台可视化配置,这样维护起来也比较简单。 1311 | 1312 | 探针是维护应用健康的必要手段,强烈推荐大家都进行配置。 1313 | 1314 | # 滚动更新与回滚 1315 | 1316 | 当我们在生产环境发布应用时,必须要考虑到当前系统还有用户正在使用的情况,所以尽量需要做到不停机发版。 1317 | 1318 | 1319 | 所以在发布过程中理论上之前的 v1 版本依然存在,必须得等待 v2 版本启动成功后再删除历史的 v1 版本。 1320 | > 如果 v2 版本启动失败 v1 版本不会做任何操作,依然能对外提供服务。 1321 | 1322 | # 滚动更新 1323 | ![image.png](https://s2.loli.net/2023/11/29/stqYlaFwecvhouS.png) 1324 | 1325 | 这是我们预期中的发布流程,要在 kubernetes 使用该功能也非常简单,只需要在 spec 下配置相关策略即可: 1326 | 1327 | ```yaml 1328 | spec: 1329 | strategy: 1330 | rollingUpdate: 1331 | maxSurge: 25% 1332 | maxUnavailable: 25% 1333 | type: RollingUpdate 1334 | ``` 1335 | 这个配置的含义是: 1336 | - 使用滚动更新,当然还有 **Recreate** 用于删除旧版本的 Pod,我们基本不会用这个策略。 1337 | - `maxSurge`:滚动更新过程中可以最多超过预期 Pod 数量的百分比,当然也可以填整数。 1338 | - `maxUnavailable`:滚动更新过程中最大不可用 Pod 数量超过预期的百分比。 1339 | 1340 | 这样一旦我们更新了 Pod 的镜像时,kubernetes 就会先创建一个新版本的 Pod 等待他启动成功后再逐步更新剩下的 Pod。 1341 | ![](https://s2.loli.net/2023/11/29/s52LOSvECPReUnT.png) 1342 | 1343 | # 优雅停机 1344 | 滚动升级过程中不可避免的又会碰到一个优雅停机的问题,毕竟是需要停掉老的 Pod。 1345 | 1346 | 这时我们需要注意两种情况: 1347 | - 停机过程中,已经进入 Pod 的请求需要执行完毕才能退出。 1348 | - 停机之后不能再有请求路由到已经停机的 Pod 1349 | 1350 | 第一个问题如果我们使用的是 `Go`,可以使用一个钩子来监听 `kubernetes` 发出的退出信号: 1351 | ```go 1352 | quit := make(chan os.Signal) 1353 | signal.Notify(quit, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE) 1354 | go func() { 1355 | <-quit 1356 | log.Printf("quit signal received, exit \n") 1357 | os.Exit(0) 1358 | }() 1359 | ``` 1360 | 在这里执行对应的资源释放。 1361 | 1362 | 如果使用的是 `spring boot` 也有对应的配置: 1363 | ```yaml 1364 | server: 1365 | shutdown: "graceful" 1366 | spring: 1367 | lifecycle: 1368 | timeout-per-shutdown-phase: "20s" 1369 | ``` 1370 | 当应用收到退出信号后,spring boot 将不会再接收新的请求,并等待现有的请求处理完毕。 1371 | 1372 | 但 kubernetes 也不会无限等待应用将 Pod 将任务执行完毕,我们可以在 Pod 中配置 1373 | ```yaml 1374 | terminationGracePeriodSeconds: 30 1375 | ``` 1376 | 来定义需要等待多长时间,这里是超过 30s 之后就会强行 kill Pod。 1377 | > 具体值大家可以根据实际情况配置 1378 | 1379 | --- 1380 | ```yaml 1381 | spec: 1382 | containers: 1383 | - name: example-container 1384 | image: example-image 1385 | lifecycle: 1386 | preStop: 1387 | exec: 1388 | command: ["sh", "-c", "sleep 10"] 1389 | ``` 1390 | 同时我们也可以配置 `preStop` 做一个 sleep 来确保 `kubernetes` 将准备删除的 Pod 在 `Iptable` 中已经更新了之后再删除 `Pod`。 1391 | 1392 | 这样可以避免第二种情况:已经删除的 `Pod` 依然还有请求路由过来。 1393 | 具体可以参考 `spring boot` 文档: 1394 | [https://docs.spring.io/spring-boot/docs/2.4.4/reference/htmlsingle/#cloud-deployment-kubernetes-container-lifecycle](https://docs.spring.io/spring-boot/docs/2.4.4/reference/htmlsingle/#cloud-deployment-kubernetes-container-lifecycle) 1395 | 1396 | # 回滚 1397 | 回滚其实也可以看作是升级的一种,只是升级到了历史版本,在 `kubernetes` 中回滚应用非常简单。 1398 | ```shell 1399 | # 回滚到上一个版本 1400 | k rollout undo deployment/abc 1401 | # 回滚到指定版本 1402 | k rollout undo daemonset/abc --to-revision=3 1403 | ``` 1404 | 同时 kubernetes 也能保证是滚动回滚的。 1405 | # 优雅重启 1406 | 在之前的 [如何优雅重启 kubernetes 的 Pod](https://crossoverjie.top/2023/10/19/ob/k8s-restart-pod/) 那篇文章中写过,如果想要优雅重启 Pod 也可以使用 rollout 命令,它也也可以保证是滚动重启。 1407 | ```shell 1408 | k rollout restart deployment/nginx 1409 | ``` 1410 | 1411 | 使用 `kubernetes` 的滚动更新确实要比我们以往的传统运维简单许多,就几个命令的事情之前得写一些复杂的运维脚本才能实现。 -------------------------------------------------------------------------------- /api/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 gRPC authors. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | // Code generated by protoc-gen-go. DO NOT EDIT. 16 | // versions: 17 | // protoc-gen-go v1.27.1 18 | // protoc v3.5.1 19 | // source: helloworld.proto 20 | 21 | package helloworld 22 | 23 | import ( 24 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 25 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 26 | reflect "reflect" 27 | sync "sync" 28 | ) 29 | 30 | const ( 31 | // Verify that this generated code is sufficiently up-to-date. 32 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 33 | // Verify that runtime/protoimpl is sufficiently up-to-date. 34 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 35 | ) 36 | 37 | // The request message containing the user's name. 38 | type HelloRequest struct { 39 | state protoimpl.MessageState 40 | sizeCache protoimpl.SizeCache 41 | unknownFields protoimpl.UnknownFields 42 | 43 | Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` 44 | } 45 | 46 | func (x *HelloRequest) Reset() { 47 | *x = HelloRequest{} 48 | if protoimpl.UnsafeEnabled { 49 | mi := &file_helloworld_proto_msgTypes[0] 50 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 51 | ms.StoreMessageInfo(mi) 52 | } 53 | } 54 | 55 | func (x *HelloRequest) String() string { 56 | return protoimpl.X.MessageStringOf(x) 57 | } 58 | 59 | func (*HelloRequest) ProtoMessage() {} 60 | 61 | func (x *HelloRequest) ProtoReflect() protoreflect.Message { 62 | mi := &file_helloworld_proto_msgTypes[0] 63 | if protoimpl.UnsafeEnabled && x != nil { 64 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 65 | if ms.LoadMessageInfo() == nil { 66 | ms.StoreMessageInfo(mi) 67 | } 68 | return ms 69 | } 70 | return mi.MessageOf(x) 71 | } 72 | 73 | // Deprecated: Use HelloRequest.ProtoReflect.Descriptor instead. 74 | func (*HelloRequest) Descriptor() ([]byte, []int) { 75 | return file_helloworld_proto_rawDescGZIP(), []int{0} 76 | } 77 | 78 | func (x *HelloRequest) GetName() string { 79 | if x != nil { 80 | return x.Name 81 | } 82 | return "" 83 | } 84 | 85 | // The response message containing the greetings 86 | type HelloReply struct { 87 | state protoimpl.MessageState 88 | sizeCache protoimpl.SizeCache 89 | unknownFields protoimpl.UnknownFields 90 | 91 | Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` 92 | } 93 | 94 | func (x *HelloReply) Reset() { 95 | *x = HelloReply{} 96 | if protoimpl.UnsafeEnabled { 97 | mi := &file_helloworld_proto_msgTypes[1] 98 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 99 | ms.StoreMessageInfo(mi) 100 | } 101 | } 102 | 103 | func (x *HelloReply) String() string { 104 | return protoimpl.X.MessageStringOf(x) 105 | } 106 | 107 | func (*HelloReply) ProtoMessage() {} 108 | 109 | func (x *HelloReply) ProtoReflect() protoreflect.Message { 110 | mi := &file_helloworld_proto_msgTypes[1] 111 | if protoimpl.UnsafeEnabled && x != nil { 112 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 113 | if ms.LoadMessageInfo() == nil { 114 | ms.StoreMessageInfo(mi) 115 | } 116 | return ms 117 | } 118 | return mi.MessageOf(x) 119 | } 120 | 121 | // Deprecated: Use HelloReply.ProtoReflect.Descriptor instead. 122 | func (*HelloReply) Descriptor() ([]byte, []int) { 123 | return file_helloworld_proto_rawDescGZIP(), []int{1} 124 | } 125 | 126 | func (x *HelloReply) GetMessage() string { 127 | if x != nil { 128 | return x.Message 129 | } 130 | return "" 131 | } 132 | 133 | var File_helloworld_proto protoreflect.FileDescriptor 134 | 135 | var file_helloworld_proto_rawDesc = []byte{ 136 | 0x0a, 0x10, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x70, 0x72, 0x6f, 137 | 0x74, 0x6f, 0x12, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x22, 0x22, 138 | 0x0a, 0x0c, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 139 | 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 140 | 0x6d, 0x65, 0x22, 0x26, 0x0a, 0x0a, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 141 | 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 142 | 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0x49, 0x0a, 0x07, 0x47, 0x72, 143 | 0x65, 0x65, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 144 | 0x6f, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 145 | 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 146 | 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 147 | 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x67, 0x0a, 0x1b, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 148 | 0x2e, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 149 | 0x6f, 0x72, 0x6c, 0x64, 0x42, 0x0f, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x57, 0x6f, 0x72, 0x6c, 0x64, 150 | 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 151 | 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 152 | 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2f, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 153 | 0x72, 0x6c, 0x64, 0x2f, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x62, 0x06, 154 | 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 155 | } 156 | 157 | var ( 158 | file_helloworld_proto_rawDescOnce sync.Once 159 | file_helloworld_proto_rawDescData = file_helloworld_proto_rawDesc 160 | ) 161 | 162 | func file_helloworld_proto_rawDescGZIP() []byte { 163 | file_helloworld_proto_rawDescOnce.Do(func() { 164 | file_helloworld_proto_rawDescData = protoimpl.X.CompressGZIP(file_helloworld_proto_rawDescData) 165 | }) 166 | return file_helloworld_proto_rawDescData 167 | } 168 | 169 | var file_helloworld_proto_msgTypes = make([]protoimpl.MessageInfo, 2) 170 | var file_helloworld_proto_goTypes = []interface{}{ 171 | (*HelloRequest)(nil), // 0: helloworld.HelloRequest 172 | (*HelloReply)(nil), // 1: helloworld.HelloReply 173 | } 174 | var file_helloworld_proto_depIdxs = []int32{ 175 | 0, // 0: helloworld.Greeter.SayHello:input_type -> helloworld.HelloRequest 176 | 1, // 1: helloworld.Greeter.SayHello:output_type -> helloworld.HelloReply 177 | 1, // [1:2] is the sub-list for method output_type 178 | 0, // [0:1] is the sub-list for method input_type 179 | 0, // [0:0] is the sub-list for extension type_name 180 | 0, // [0:0] is the sub-list for extension extendee 181 | 0, // [0:0] is the sub-list for field type_name 182 | } 183 | 184 | func init() { file_helloworld_proto_init() } 185 | func file_helloworld_proto_init() { 186 | if File_helloworld_proto != nil { 187 | return 188 | } 189 | if !protoimpl.UnsafeEnabled { 190 | file_helloworld_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 191 | switch v := v.(*HelloRequest); i { 192 | case 0: 193 | return &v.state 194 | case 1: 195 | return &v.sizeCache 196 | case 2: 197 | return &v.unknownFields 198 | default: 199 | return nil 200 | } 201 | } 202 | file_helloworld_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { 203 | switch v := v.(*HelloReply); i { 204 | case 0: 205 | return &v.state 206 | case 1: 207 | return &v.sizeCache 208 | case 2: 209 | return &v.unknownFields 210 | default: 211 | return nil 212 | } 213 | } 214 | } 215 | type x struct{} 216 | out := protoimpl.TypeBuilder{ 217 | File: protoimpl.DescBuilder{ 218 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 219 | RawDescriptor: file_helloworld_proto_rawDesc, 220 | NumEnums: 0, 221 | NumMessages: 2, 222 | NumExtensions: 0, 223 | NumServices: 1, 224 | }, 225 | GoTypes: file_helloworld_proto_goTypes, 226 | DependencyIndexes: file_helloworld_proto_depIdxs, 227 | MessageInfos: file_helloworld_proto_msgTypes, 228 | }.Build() 229 | File_helloworld_proto = out.File 230 | file_helloworld_proto_rawDesc = nil 231 | file_helloworld_proto_goTypes = nil 232 | file_helloworld_proto_depIdxs = nil 233 | } 234 | -------------------------------------------------------------------------------- /api/google.golang.org/grpc/examples/helloworld/helloworld/helloworld_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | 3 | package helloworld 4 | 5 | import ( 6 | context "context" 7 | grpc "google.golang.org/grpc" 8 | codes "google.golang.org/grpc/codes" 9 | status "google.golang.org/grpc/status" 10 | ) 11 | 12 | // This is a compile-time assertion to ensure that this generated file 13 | // is compatible with the grpc package it is being compiled against. 14 | // Requires gRPC-Go v1.32.0 or later. 15 | const _ = grpc.SupportPackageIsVersion7 16 | 17 | // GreeterClient is the client API for Greeter service. 18 | // 19 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 20 | type GreeterClient interface { 21 | // Sends a greeting 22 | SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) 23 | } 24 | 25 | type greeterClient struct { 26 | cc grpc.ClientConnInterface 27 | } 28 | 29 | func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { 30 | return &greeterClient{cc} 31 | } 32 | 33 | func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { 34 | out := new(HelloReply) 35 | err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, opts...) 36 | if err != nil { 37 | return nil, err 38 | } 39 | return out, nil 40 | } 41 | 42 | // GreeterServer is the server API for Greeter service. 43 | // All implementations must embed UnimplementedGreeterServer 44 | // for forward compatibility 45 | type GreeterServer interface { 46 | // Sends a greeting 47 | SayHello(context.Context, *HelloRequest) (*HelloReply, error) 48 | mustEmbedUnimplementedGreeterServer() 49 | } 50 | 51 | // UnimplementedGreeterServer must be embedded to have forward compatible implementations. 52 | type UnimplementedGreeterServer struct { 53 | } 54 | 55 | func (UnimplementedGreeterServer) SayHello(context.Context, *HelloRequest) (*HelloReply, error) { 56 | return nil, status.Errorf(codes.Unimplemented, "method SayHello not implemented") 57 | } 58 | func (UnimplementedGreeterServer) mustEmbedUnimplementedGreeterServer() {} 59 | 60 | // UnsafeGreeterServer may be embedded to opt out of forward compatibility for this service. 61 | // Use of this interface is not recommended, as added methods to GreeterServer will 62 | // result in compilation errors. 63 | type UnsafeGreeterServer interface { 64 | mustEmbedUnimplementedGreeterServer() 65 | } 66 | 67 | func RegisterGreeterServer(s grpc.ServiceRegistrar, srv GreeterServer) { 68 | s.RegisterService(&Greeter_ServiceDesc, srv) 69 | } 70 | 71 | func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 72 | in := new(HelloRequest) 73 | if err := dec(in); err != nil { 74 | return nil, err 75 | } 76 | if interceptor == nil { 77 | return srv.(GreeterServer).SayHello(ctx, in) 78 | } 79 | info := &grpc.UnaryServerInfo{ 80 | Server: srv, 81 | FullMethod: "/helloworld.Greeter/SayHello", 82 | } 83 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 84 | return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) 85 | } 86 | return interceptor(ctx, in, info, handler) 87 | } 88 | 89 | // Greeter_ServiceDesc is the grpc.ServiceDesc for Greeter service. 90 | // It's only intended for direct use with grpc.RegisterService, 91 | // and not to be introspected or modified (even as a copy) 92 | var Greeter_ServiceDesc = grpc.ServiceDesc{ 93 | ServiceName: "helloworld.Greeter", 94 | HandlerType: (*GreeterServer)(nil), 95 | Methods: []grpc.MethodDesc{ 96 | { 97 | MethodName: "SayHello", 98 | Handler: _Greeter_SayHello_Handler, 99 | }, 100 | }, 101 | Streams: []grpc.StreamDesc{}, 102 | Metadata: "helloworld.proto", 103 | } 104 | -------------------------------------------------------------------------------- /api/helloworld.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2015 gRPC authors. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | 17 | option go_package = "google.golang.org/grpc/examples/helloworld/helloworld"; 18 | option java_multiple_files = true; 19 | option java_package = "io.grpc.examples.helloworld"; 20 | option java_outer_classname = "HelloWorldProto"; 21 | 22 | package helloworld; 23 | 24 | // The greeting service definition. 25 | service Greeter { 26 | // Sends a greeting 27 | rpc SayHello (HelloRequest) returns (HelloReply) {} 28 | } 29 | 30 | // The request message containing the user's name. 31 | message HelloRequest { 32 | string name = 1; 33 | } 34 | 35 | // The response message containing the greetings 36 | message HelloReply { 37 | string message = 1; 38 | } -------------------------------------------------------------------------------- /deployment/deployment-istio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: k8s-combat-service # 通过标签选择关联 6 | name: k8s-combat-service 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: k8s-combat-service 12 | template: 13 | metadata: 14 | labels: 15 | app: k8s-combat-service 16 | spec: 17 | containers: 18 | - name: k8s-combat-service 19 | image: crossoverjie/k8s-combat:istio 20 | imagePullPolicy: Always 21 | resources: 22 | limits: 23 | cpu: "1" 24 | memory: 100Mi 25 | requests: 26 | cpu: "0.1" 27 | memory: 10Mi 28 | ports: 29 | - containerPort: 8081 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: k8s-combat-service 35 | spec: 36 | selector: 37 | app: k8s-combat-service # 通过标签选择关联 38 | type: ClusterIP 39 | ports: 40 | - port: 8081 # 本 Service 的端口 41 | targetPort: 8081 # 容器端口 42 | name: app 43 | - name: grpc 44 | port: 50051 45 | targetPort: 50051 46 | appProtocol: grpc -------------------------------------------------------------------------------- /deployment/deployment-service-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: k8s-combat-service-2 # 通过标签选择关联 6 | name: k8s-combat-service-2 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: k8s-combat-service-2 12 | template: 13 | metadata: 14 | labels: 15 | app: k8s-combat-service-2 16 | spec: 17 | containers: 18 | - name: k8s-combat-service-2 19 | image: crossoverjie/k8s-combat:v1 20 | imagePullPolicy: Always 21 | resources: 22 | limits: 23 | cpu: "1" 24 | memory: 100Mi 25 | requests: 26 | cpu: "0.1" 27 | memory: 10Mi 28 | ports: 29 | - containerPort: 8081 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: k8s-combat-service-2 35 | spec: 36 | selector: 37 | app: k8s-combat-service-2 # 通过标签选择关联 38 | type: ClusterIP 39 | ports: 40 | - port: 8081 # 本 Service 的端口 41 | targetPort: 8081 # 容器端口 42 | name: app -------------------------------------------------------------------------------- /deployment/deployment-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: k8s-combat-service # 通过标签选择关联 6 | name: k8s-combat-service 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: k8s-combat-service 12 | template: 13 | metadata: 14 | labels: 15 | app: k8s-combat-service 16 | spec: 17 | containers: 18 | - name: k8s-combat-service 19 | image: crossoverjie/k8s-combat:v1 20 | imagePullPolicy: Always 21 | resources: 22 | limits: 23 | cpu: "1" 24 | memory: 100Mi 25 | requests: 26 | cpu: "0.1" 27 | memory: 10Mi 28 | ports: 29 | - containerPort: 8081 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: k8s-combat-service 35 | spec: 36 | selector: 37 | app: k8s-combat-service # 通过标签选择关联 38 | type: ClusterIP 39 | ports: 40 | - port: 8081 # 本 Service 的端口 41 | targetPort: 8081 # 容器端口 42 | name: app -------------------------------------------------------------------------------- /deployment/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: k8s-combat 6 | name: k8s-combat 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: k8s-combat 12 | template: 13 | metadata: 14 | labels: 15 | app: k8s-combat 16 | spec: 17 | containers: 18 | - name: k8s-combat 19 | image: crossoverjie/k8s-combat:v1 20 | imagePullPolicy: Always 21 | volumeMounts: 22 | - name: app 23 | mountPath: "/go/bin/app.yaml" 24 | # configmap's key 25 | subPath: APP 26 | # Define all the ConfigMap's data as container environment variables 27 | envFrom: 28 | - configMapRef: 29 | name: k8s-combat-configmap 30 | env: 31 | - name: PG_PWD 32 | valueFrom: 33 | secretKeyRef: 34 | name: k8s-combat-secret 35 | key: PWD 36 | resources: 37 | limits: 38 | cpu: "1" 39 | memory: 300Mi 40 | requests: 41 | cpu: "0.1" 42 | memory: 100Mi 43 | volumes: 44 | - name: app 45 | configMap: 46 | # configmap's name 47 | name: k8s-combat-configmap 48 | --- 49 | apiVersion: v1 50 | kind: ConfigMap 51 | metadata: 52 | name: k8s-combat-configmap 53 | data: 54 | PG_URL: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" 55 | APP: | 56 | name: k8s-combat 57 | pulsar: 58 | url: "pulsar://localhost:6650" 59 | token: "abc" 60 | 61 | --- 62 | apiVersion: v1 63 | kind: Secret 64 | metadata: 65 | name: k8s-combat-secret 66 | type: Opaque 67 | data: 68 | PWD: YWJjCg== 69 | -------------------------------------------------------------------------------- /deployment/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: ingress-nginx 6 | app.kubernetes.io/name: ingress-nginx 7 | name: ingress-nginx 8 | --- 9 | apiVersion: v1 10 | automountServiceAccountToken: true 11 | kind: ServiceAccount 12 | metadata: 13 | labels: 14 | app.kubernetes.io/component: controller 15 | app.kubernetes.io/instance: ingress-nginx 16 | app.kubernetes.io/name: ingress-nginx 17 | app.kubernetes.io/part-of: ingress-nginx 18 | app.kubernetes.io/version: 1.8.2 19 | name: ingress-nginx 20 | namespace: ingress-nginx 21 | --- 22 | apiVersion: v1 23 | kind: ServiceAccount 24 | metadata: 25 | labels: 26 | app.kubernetes.io/component: admission-webhook 27 | app.kubernetes.io/instance: ingress-nginx 28 | app.kubernetes.io/name: ingress-nginx 29 | app.kubernetes.io/part-of: ingress-nginx 30 | app.kubernetes.io/version: 1.8.2 31 | name: ingress-nginx-admission 32 | namespace: ingress-nginx 33 | --- 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | kind: Role 36 | metadata: 37 | labels: 38 | app.kubernetes.io/component: controller 39 | app.kubernetes.io/instance: ingress-nginx 40 | app.kubernetes.io/name: ingress-nginx 41 | app.kubernetes.io/part-of: ingress-nginx 42 | app.kubernetes.io/version: 1.8.2 43 | name: ingress-nginx 44 | namespace: ingress-nginx 45 | rules: 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - namespaces 50 | verbs: 51 | - get 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - configmaps 56 | - pods 57 | - secrets 58 | - endpoints 59 | verbs: 60 | - get 61 | - list 62 | - watch 63 | - apiGroups: 64 | - "" 65 | resources: 66 | - services 67 | verbs: 68 | - get 69 | - list 70 | - watch 71 | - apiGroups: 72 | - networking.k8s.io 73 | resources: 74 | - ingresses 75 | verbs: 76 | - get 77 | - list 78 | - watch 79 | - apiGroups: 80 | - networking.k8s.io 81 | resources: 82 | - ingresses/status 83 | verbs: 84 | - update 85 | - apiGroups: 86 | - networking.k8s.io 87 | resources: 88 | - ingressclasses 89 | verbs: 90 | - get 91 | - list 92 | - watch 93 | - apiGroups: 94 | - coordination.k8s.io 95 | resourceNames: 96 | - ingress-nginx-leader 97 | resources: 98 | - leases 99 | verbs: 100 | - get 101 | - update 102 | - apiGroups: 103 | - coordination.k8s.io 104 | resources: 105 | - leases 106 | verbs: 107 | - create 108 | - apiGroups: 109 | - "" 110 | resources: 111 | - events 112 | verbs: 113 | - create 114 | - patch 115 | - apiGroups: 116 | - discovery.k8s.io 117 | resources: 118 | - endpointslices 119 | verbs: 120 | - list 121 | - watch 122 | - get 123 | --- 124 | apiVersion: rbac.authorization.k8s.io/v1 125 | kind: Role 126 | metadata: 127 | labels: 128 | app.kubernetes.io/component: admission-webhook 129 | app.kubernetes.io/instance: ingress-nginx 130 | app.kubernetes.io/name: ingress-nginx 131 | app.kubernetes.io/part-of: ingress-nginx 132 | app.kubernetes.io/version: 1.8.2 133 | name: ingress-nginx-admission 134 | namespace: ingress-nginx 135 | rules: 136 | - apiGroups: 137 | - "" 138 | resources: 139 | - secrets 140 | verbs: 141 | - get 142 | - create 143 | --- 144 | apiVersion: rbac.authorization.k8s.io/v1 145 | kind: ClusterRole 146 | metadata: 147 | labels: 148 | app.kubernetes.io/instance: ingress-nginx 149 | app.kubernetes.io/name: ingress-nginx 150 | app.kubernetes.io/part-of: ingress-nginx 151 | app.kubernetes.io/version: 1.8.2 152 | name: ingress-nginx 153 | rules: 154 | - apiGroups: 155 | - "" 156 | resources: 157 | - configmaps 158 | - endpoints 159 | - nodes 160 | - pods 161 | - secrets 162 | - namespaces 163 | verbs: 164 | - list 165 | - watch 166 | - apiGroups: 167 | - coordination.k8s.io 168 | resources: 169 | - leases 170 | verbs: 171 | - list 172 | - watch 173 | - apiGroups: 174 | - "" 175 | resources: 176 | - nodes 177 | verbs: 178 | - get 179 | - apiGroups: 180 | - "" 181 | resources: 182 | - services 183 | verbs: 184 | - get 185 | - list 186 | - watch 187 | - apiGroups: 188 | - networking.k8s.io 189 | resources: 190 | - ingresses 191 | verbs: 192 | - get 193 | - list 194 | - watch 195 | - apiGroups: 196 | - "" 197 | resources: 198 | - events 199 | verbs: 200 | - create 201 | - patch 202 | - apiGroups: 203 | - networking.k8s.io 204 | resources: 205 | - ingresses/status 206 | verbs: 207 | - update 208 | - apiGroups: 209 | - networking.k8s.io 210 | resources: 211 | - ingressclasses 212 | verbs: 213 | - get 214 | - list 215 | - watch 216 | - apiGroups: 217 | - discovery.k8s.io 218 | resources: 219 | - endpointslices 220 | verbs: 221 | - list 222 | - watch 223 | - get 224 | --- 225 | apiVersion: rbac.authorization.k8s.io/v1 226 | kind: ClusterRole 227 | metadata: 228 | labels: 229 | app.kubernetes.io/component: admission-webhook 230 | app.kubernetes.io/instance: ingress-nginx 231 | app.kubernetes.io/name: ingress-nginx 232 | app.kubernetes.io/part-of: ingress-nginx 233 | app.kubernetes.io/version: 1.8.2 234 | name: ingress-nginx-admission 235 | rules: 236 | - apiGroups: 237 | - admissionregistration.k8s.io 238 | resources: 239 | - validatingwebhookconfigurations 240 | verbs: 241 | - get 242 | - update 243 | --- 244 | apiVersion: rbac.authorization.k8s.io/v1 245 | kind: RoleBinding 246 | metadata: 247 | labels: 248 | app.kubernetes.io/component: controller 249 | app.kubernetes.io/instance: ingress-nginx 250 | app.kubernetes.io/name: ingress-nginx 251 | app.kubernetes.io/part-of: ingress-nginx 252 | app.kubernetes.io/version: 1.8.2 253 | name: ingress-nginx 254 | namespace: ingress-nginx 255 | roleRef: 256 | apiGroup: rbac.authorization.k8s.io 257 | kind: Role 258 | name: ingress-nginx 259 | subjects: 260 | - kind: ServiceAccount 261 | name: ingress-nginx 262 | namespace: ingress-nginx 263 | --- 264 | apiVersion: rbac.authorization.k8s.io/v1 265 | kind: RoleBinding 266 | metadata: 267 | labels: 268 | app.kubernetes.io/component: admission-webhook 269 | app.kubernetes.io/instance: ingress-nginx 270 | app.kubernetes.io/name: ingress-nginx 271 | app.kubernetes.io/part-of: ingress-nginx 272 | app.kubernetes.io/version: 1.8.2 273 | name: ingress-nginx-admission 274 | namespace: ingress-nginx 275 | roleRef: 276 | apiGroup: rbac.authorization.k8s.io 277 | kind: Role 278 | name: ingress-nginx-admission 279 | subjects: 280 | - kind: ServiceAccount 281 | name: ingress-nginx-admission 282 | namespace: ingress-nginx 283 | --- 284 | apiVersion: rbac.authorization.k8s.io/v1 285 | kind: ClusterRoleBinding 286 | metadata: 287 | labels: 288 | app.kubernetes.io/instance: ingress-nginx 289 | app.kubernetes.io/name: ingress-nginx 290 | app.kubernetes.io/part-of: ingress-nginx 291 | app.kubernetes.io/version: 1.8.2 292 | name: ingress-nginx 293 | roleRef: 294 | apiGroup: rbac.authorization.k8s.io 295 | kind: ClusterRole 296 | name: ingress-nginx 297 | subjects: 298 | - kind: ServiceAccount 299 | name: ingress-nginx 300 | namespace: ingress-nginx 301 | --- 302 | apiVersion: rbac.authorization.k8s.io/v1 303 | kind: ClusterRoleBinding 304 | metadata: 305 | labels: 306 | app.kubernetes.io/component: admission-webhook 307 | app.kubernetes.io/instance: ingress-nginx 308 | app.kubernetes.io/name: ingress-nginx 309 | app.kubernetes.io/part-of: ingress-nginx 310 | app.kubernetes.io/version: 1.8.2 311 | name: ingress-nginx-admission 312 | roleRef: 313 | apiGroup: rbac.authorization.k8s.io 314 | kind: ClusterRole 315 | name: ingress-nginx-admission 316 | subjects: 317 | - kind: ServiceAccount 318 | name: ingress-nginx-admission 319 | namespace: ingress-nginx 320 | --- 321 | apiVersion: v1 322 | data: 323 | allow-snippet-annotations: "true" 324 | kind: ConfigMap 325 | metadata: 326 | labels: 327 | app.kubernetes.io/component: controller 328 | app.kubernetes.io/instance: ingress-nginx 329 | app.kubernetes.io/name: ingress-nginx 330 | app.kubernetes.io/part-of: ingress-nginx 331 | app.kubernetes.io/version: 1.8.2 332 | name: ingress-nginx-controller 333 | namespace: ingress-nginx 334 | --- 335 | apiVersion: v1 336 | kind: Service 337 | metadata: 338 | labels: 339 | app.kubernetes.io/component: controller 340 | app.kubernetes.io/instance: ingress-nginx 341 | app.kubernetes.io/name: ingress-nginx 342 | app.kubernetes.io/part-of: ingress-nginx 343 | app.kubernetes.io/version: 1.8.2 344 | name: ingress-nginx-controller 345 | namespace: ingress-nginx 346 | spec: 347 | externalTrafficPolicy: Local 348 | ipFamilies: 349 | - IPv4 350 | ipFamilyPolicy: SingleStack 351 | ports: 352 | - appProtocol: http 353 | name: http 354 | port: 80 355 | protocol: TCP 356 | targetPort: http 357 | - appProtocol: https 358 | name: https 359 | port: 443 360 | protocol: TCP 361 | targetPort: https 362 | selector: 363 | app.kubernetes.io/component: controller 364 | app.kubernetes.io/instance: ingress-nginx 365 | app.kubernetes.io/name: ingress-nginx 366 | type: LoadBalancer 367 | --- 368 | apiVersion: v1 369 | kind: Service 370 | metadata: 371 | labels: 372 | app.kubernetes.io/component: controller 373 | app.kubernetes.io/instance: ingress-nginx 374 | app.kubernetes.io/name: ingress-nginx 375 | app.kubernetes.io/part-of: ingress-nginx 376 | app.kubernetes.io/version: 1.8.2 377 | name: ingress-nginx-controller-admission 378 | namespace: ingress-nginx 379 | spec: 380 | ports: 381 | - appProtocol: https 382 | name: https-webhook 383 | port: 443 384 | targetPort: webhook 385 | selector: 386 | app.kubernetes.io/component: controller 387 | app.kubernetes.io/instance: ingress-nginx 388 | app.kubernetes.io/name: ingress-nginx 389 | type: ClusterIP 390 | --- 391 | apiVersion: apps/v1 392 | kind: Deployment 393 | metadata: 394 | labels: 395 | app.kubernetes.io/component: controller 396 | app.kubernetes.io/instance: ingress-nginx 397 | app.kubernetes.io/name: ingress-nginx 398 | app.kubernetes.io/part-of: ingress-nginx 399 | app.kubernetes.io/version: 1.8.2 400 | name: ingress-nginx-controller 401 | namespace: ingress-nginx 402 | spec: 403 | minReadySeconds: 0 404 | revisionHistoryLimit: 10 405 | selector: 406 | matchLabels: 407 | app.kubernetes.io/component: controller 408 | app.kubernetes.io/instance: ingress-nginx 409 | app.kubernetes.io/name: ingress-nginx 410 | strategy: 411 | rollingUpdate: 412 | maxUnavailable: 1 413 | type: RollingUpdate 414 | template: 415 | metadata: 416 | labels: 417 | app.kubernetes.io/component: controller 418 | app.kubernetes.io/instance: ingress-nginx 419 | app.kubernetes.io/name: ingress-nginx 420 | app.kubernetes.io/part-of: ingress-nginx 421 | app.kubernetes.io/version: 1.8.2 422 | spec: 423 | containers: 424 | - args: 425 | - /nginx-ingress-controller 426 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller 427 | - --election-id=ingress-nginx-leader 428 | - --controller-class=k8s.io/ingress-nginx 429 | - --ingress-class=nginx 430 | - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller 431 | - --validating-webhook=:8443 432 | - --validating-webhook-certificate=/usr/local/certificates/cert 433 | - --validating-webhook-key=/usr/local/certificates/key 434 | env: 435 | - name: POD_NAME 436 | valueFrom: 437 | fieldRef: 438 | fieldPath: metadata.name 439 | - name: POD_NAMESPACE 440 | valueFrom: 441 | fieldRef: 442 | fieldPath: metadata.namespace 443 | - name: LD_PRELOAD 444 | value: /usr/local/lib/libmimalloc.so 445 | image: registry.k8s.io/ingress-nginx/controller:v1.8.2@sha256:74834d3d25b336b62cabeb8bf7f1d788706e2cf1cfd64022de4137ade8881ff2 446 | imagePullPolicy: IfNotPresent 447 | lifecycle: 448 | preStop: 449 | exec: 450 | command: 451 | - /wait-shutdown 452 | livenessProbe: 453 | failureThreshold: 5 454 | httpGet: 455 | path: /healthz 456 | port: 10254 457 | scheme: HTTP 458 | initialDelaySeconds: 10 459 | periodSeconds: 10 460 | successThreshold: 1 461 | timeoutSeconds: 1 462 | name: controller 463 | ports: 464 | - containerPort: 80 465 | name: http 466 | protocol: TCP 467 | - containerPort: 443 468 | name: https 469 | protocol: TCP 470 | - containerPort: 8443 471 | name: webhook 472 | protocol: TCP 473 | readinessProbe: 474 | failureThreshold: 3 475 | httpGet: 476 | path: /healthz 477 | port: 10254 478 | scheme: HTTP 479 | initialDelaySeconds: 10 480 | periodSeconds: 10 481 | successThreshold: 1 482 | timeoutSeconds: 1 483 | resources: 484 | requests: 485 | cpu: 100m 486 | memory: 90Mi 487 | securityContext: 488 | allowPrivilegeEscalation: true 489 | capabilities: 490 | add: 491 | - NET_BIND_SERVICE 492 | drop: 493 | - ALL 494 | runAsUser: 101 495 | volumeMounts: 496 | - mountPath: /usr/local/certificates/ 497 | name: webhook-cert 498 | readOnly: true 499 | dnsPolicy: ClusterFirst 500 | nodeSelector: 501 | kubernetes.io/os: linux 502 | serviceAccountName: ingress-nginx 503 | terminationGracePeriodSeconds: 300 504 | volumes: 505 | - name: webhook-cert 506 | secret: 507 | secretName: ingress-nginx-admission 508 | --- 509 | apiVersion: batch/v1 510 | kind: Job 511 | metadata: 512 | labels: 513 | app.kubernetes.io/component: admission-webhook 514 | app.kubernetes.io/instance: ingress-nginx 515 | app.kubernetes.io/name: ingress-nginx 516 | app.kubernetes.io/part-of: ingress-nginx 517 | app.kubernetes.io/version: 1.8.2 518 | name: ingress-nginx-admission-create 519 | namespace: ingress-nginx 520 | spec: 521 | template: 522 | metadata: 523 | labels: 524 | app.kubernetes.io/component: admission-webhook 525 | app.kubernetes.io/instance: ingress-nginx 526 | app.kubernetes.io/name: ingress-nginx 527 | app.kubernetes.io/part-of: ingress-nginx 528 | app.kubernetes.io/version: 1.8.2 529 | name: ingress-nginx-admission-create 530 | spec: 531 | containers: 532 | - args: 533 | - create 534 | - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc 535 | - --namespace=$(POD_NAMESPACE) 536 | - --secret-name=ingress-nginx-admission 537 | env: 538 | - name: POD_NAMESPACE 539 | valueFrom: 540 | fieldRef: 541 | fieldPath: metadata.namespace 542 | image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b 543 | imagePullPolicy: IfNotPresent 544 | name: create 545 | securityContext: 546 | allowPrivilegeEscalation: false 547 | nodeSelector: 548 | kubernetes.io/os: linux 549 | restartPolicy: OnFailure 550 | securityContext: 551 | fsGroup: 2000 552 | runAsNonRoot: true 553 | runAsUser: 2000 554 | serviceAccountName: ingress-nginx-admission 555 | --- 556 | apiVersion: batch/v1 557 | kind: Job 558 | metadata: 559 | labels: 560 | app.kubernetes.io/component: admission-webhook 561 | app.kubernetes.io/instance: ingress-nginx 562 | app.kubernetes.io/name: ingress-nginx 563 | app.kubernetes.io/part-of: ingress-nginx 564 | app.kubernetes.io/version: 1.8.2 565 | name: ingress-nginx-admission-patch 566 | namespace: ingress-nginx 567 | spec: 568 | template: 569 | metadata: 570 | labels: 571 | app.kubernetes.io/component: admission-webhook 572 | app.kubernetes.io/instance: ingress-nginx 573 | app.kubernetes.io/name: ingress-nginx 574 | app.kubernetes.io/part-of: ingress-nginx 575 | app.kubernetes.io/version: 1.8.2 576 | name: ingress-nginx-admission-patch 577 | spec: 578 | containers: 579 | - args: 580 | - patch 581 | - --webhook-name=ingress-nginx-admission 582 | - --namespace=$(POD_NAMESPACE) 583 | - --patch-mutating=false 584 | - --secret-name=ingress-nginx-admission 585 | - --patch-failure-policy=Fail 586 | env: 587 | - name: POD_NAMESPACE 588 | valueFrom: 589 | fieldRef: 590 | fieldPath: metadata.namespace 591 | image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20230407@sha256:543c40fd093964bc9ab509d3e791f9989963021f1e9e4c9c7b6700b02bfb227b 592 | imagePullPolicy: IfNotPresent 593 | name: patch 594 | securityContext: 595 | allowPrivilegeEscalation: false 596 | nodeSelector: 597 | kubernetes.io/os: linux 598 | restartPolicy: OnFailure 599 | securityContext: 600 | fsGroup: 2000 601 | runAsNonRoot: true 602 | runAsUser: 2000 603 | serviceAccountName: ingress-nginx-admission 604 | --- 605 | apiVersion: networking.k8s.io/v1 606 | kind: IngressClass 607 | metadata: 608 | labels: 609 | app.kubernetes.io/component: controller 610 | app.kubernetes.io/instance: ingress-nginx 611 | app.kubernetes.io/name: ingress-nginx 612 | app.kubernetes.io/part-of: ingress-nginx 613 | app.kubernetes.io/version: 1.8.2 614 | name: nginx 615 | spec: 616 | controller: k8s.io/ingress-nginx 617 | --- 618 | apiVersion: admissionregistration.k8s.io/v1 619 | kind: ValidatingWebhookConfiguration 620 | metadata: 621 | labels: 622 | app.kubernetes.io/component: admission-webhook 623 | app.kubernetes.io/instance: ingress-nginx 624 | app.kubernetes.io/name: ingress-nginx 625 | app.kubernetes.io/part-of: ingress-nginx 626 | app.kubernetes.io/version: 1.8.2 627 | name: ingress-nginx-admission 628 | webhooks: 629 | - admissionReviewVersions: 630 | - v1 631 | clientConfig: 632 | service: 633 | name: ingress-nginx-controller-admission 634 | namespace: ingress-nginx 635 | path: /networking/v1/ingresses 636 | failurePolicy: Fail 637 | matchPolicy: Equivalent 638 | name: validate.nginx.ingress.kubernetes.io 639 | rules: 640 | - apiGroups: 641 | - networking.k8s.io 642 | apiVersions: 643 | - v1 644 | operations: 645 | - CREATE 646 | - UPDATE 647 | resources: 648 | - ingresses 649 | sideEffects: None -------------------------------------------------------------------------------- /deployment/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: k8s-combat-ingress 5 | spec: 6 | ingressClassName: nginx 7 | rules: 8 | - host: www.service1.io 9 | http: 10 | paths: 11 | - backend: 12 | service: 13 | name: k8s-combat-service 14 | port: 15 | number: 8081 16 | path: / 17 | pathType: Prefix 18 | - host: www.service2.io 19 | http: 20 | paths: 21 | - backend: 22 | service: 23 | name: k8s-combat-service-2 24 | port: 25 | number: 8081 26 | path: / 27 | pathType: Prefix 28 | -------------------------------------------------------------------------------- /deployment/istio-ingress-gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: istio-ingress-gateway 5 | namespace: default 6 | spec: 7 | servers: 8 | - port: 9 | number: 80 10 | name: http 11 | protocol: HTTP 12 | hosts: 13 | - 'www.service1.io' 14 | selector: 15 | app: istio-ingressgateway #与现有的 gateway 关联 16 | istio: ingressgateway 17 | 18 | --- 19 | apiVersion: networking.istio.io/v1alpha3 20 | kind: VirtualService 21 | metadata: 22 | name: k8s-combat-istio-http-vs 23 | spec: 24 | gateways: 25 | - istio-ingress-gateway 26 | hosts: 27 | - www.service1.io 28 | # match version to other version 29 | http: 30 | # - name: v1 31 | # match: 32 | # - headers: 33 | # version: 34 | # exact: '100' 35 | # route: 36 | # - destination: 37 | # host: k8s-combat-service-istio-mesh 38 | # subset: v1 39 | # - name: v2 40 | # match: 41 | # - headers: 42 | # version: 43 | # exact: '200' 44 | # route: 45 | # - destination: 46 | # host: k8s-combat-service-istio-mesh 47 | # subset: v2 48 | - name: default 49 | route: 50 | - destination: 51 | host: k8s-combat-service-istio-mesh 52 | port: 53 | number: 8081 54 | subset: v1 -------------------------------------------------------------------------------- /deployment/istio-mesh.yaml: -------------------------------------------------------------------------------- 1 | # # deployment-v1 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: k8s-combat-service-v1 7 | name: k8s-combat-service-v1 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: k8s-combat-service-v1 13 | template: 14 | metadata: 15 | labels: 16 | app: k8s-combat-service-v1 17 | appId: "12345" 18 | spec: 19 | containers: 20 | - name: k8s-combat-service-v1 21 | image: crossoverjie/k8s-combat:istio 22 | imagePullPolicy: Always 23 | resources: 24 | limits: 25 | cpu: "1" 26 | memory: 100Mi 27 | requests: 28 | cpu: "0.1" 29 | memory: 10Mi 30 | ports: 31 | - containerPort: 8081 32 | 33 | 34 | --- 35 | # deployment-v2 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | labels: 40 | app: k8s-combat-service-v2 41 | name: k8s-combat-service-v2 42 | spec: 43 | replicas: 1 44 | selector: 45 | matchLabels: 46 | app: k8s-combat-service-v2 47 | template: 48 | metadata: 49 | labels: 50 | app: k8s-combat-service-v2 51 | appId: "12345" 52 | spec: 53 | containers: 54 | - name: k8s-combat-service-v2 55 | image: crossoverjie/k8s-combat:istio 56 | imagePullPolicy: Always 57 | resources: 58 | limits: 59 | cpu: "1" 60 | memory: 100Mi 61 | requests: 62 | cpu: "0.1" 63 | memory: 10Mi 64 | ports: 65 | - containerPort: 8081 66 | 67 | 68 | --- 69 | apiVersion: v1 70 | kind: Service 71 | metadata: 72 | name: k8s-combat-service-istio-mesh 73 | spec: 74 | selector: 75 | appId: "12345" 76 | type: ClusterIP 77 | ports: 78 | - port: 8081 79 | targetPort: 8081 80 | name: app 81 | - name: grpc 82 | port: 50051 83 | targetPort: 50051 84 | appProtocol: grpc 85 | 86 | --- 87 | # wight rule 88 | apiVersion: networking.istio.io/v1alpha3 89 | kind: DestinationRule 90 | metadata: 91 | name: k8s-combat-service-ds 92 | spec: 93 | host: k8s-combat-service-istio-mesh 94 | subsets: 95 | - name: v1 96 | labels: 97 | app: k8s-combat-service-v1 98 | - name: v2 99 | labels: 100 | app: k8s-combat-service-v2 101 | 102 | --- 103 | apiVersion: networking.istio.io/v1alpha3 104 | kind: VirtualService 105 | metadata: 106 | name: k8s-combat-service-vs 107 | spec: 108 | gateways: 109 | - mesh 110 | hosts: 111 | - k8s-combat-service-istio-mesh # match this host 112 | # http: 113 | # - match: 114 | # - uri: 115 | # exact: /helloworld.Greeter/SayHello 116 | # route: 117 | # - destination: 118 | # host: k8s-combat-service-istio-mesh 119 | # subset: v1 120 | # weight: 10 121 | # - destination: 122 | # host: k8s-combat-service-istio-mesh 123 | # subset: v2 124 | # weight: 90 125 | # timeout: 5000ms 126 | 127 | # match version to other version 128 | http: 129 | - name: v1 130 | match: 131 | - headers: 132 | version: 133 | exact: '100' 134 | route: 135 | - destination: 136 | host: k8s-combat-service-istio-mesh 137 | subset: v1 138 | - name: v2 139 | match: 140 | - headers: 141 | version: 142 | exact: '200' 143 | route: 144 | - destination: 145 | host: k8s-combat-service-istio-mesh 146 | subset: v2 147 | - name: default 148 | route: 149 | - destination: 150 | host: k8s-combat-service-istio-mesh 151 | subset: v1 152 | -------------------------------------------------------------------------------- /deployment/probe/probe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: probe 5 | labels: 6 | app: k8s-combat-service # 通过标签选择关联 7 | name: k8s-combat-service 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: k8s-combat-service 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 25% 16 | maxUnavailable: 25% 17 | type: RollingUpdate 18 | template: 19 | metadata: 20 | labels: 21 | app: k8s-combat-service 22 | spec: 23 | containers: 24 | - name: k8s-combat-service 25 | image: crossoverjie/k8s-combat:v1 26 | imagePullPolicy: Always 27 | startupProbe: 28 | failureThreshold: 30 29 | httpGet: 30 | path: /ping 31 | port: 8081 32 | scheme: HTTP 33 | periodSeconds: 5 34 | successThreshold: 1 35 | timeoutSeconds: 1 36 | readinessProbe: 37 | failureThreshold: 3 38 | httpGet: 39 | path: /ping 40 | port: 8081 41 | scheme: HTTP 42 | periodSeconds: 3 43 | successThreshold: 1 44 | timeoutSeconds: 1 45 | livenessProbe: 46 | failureThreshold: 3 47 | httpGet: 48 | path: /ping 49 | port: 8081 50 | scheme: HTTP 51 | periodSeconds: 3 52 | successThreshold: 1 53 | timeoutSeconds: 1 54 | resources: 55 | limits: 56 | cpu: "1" 57 | memory: 100Mi 58 | requests: 59 | cpu: "0.1" 60 | memory: 10Mi 61 | ports: 62 | - containerPort: 8081 63 | --- 64 | apiVersion: v1 65 | kind: Service 66 | metadata: 67 | name: k8s-combat-service 68 | namespace: probe 69 | spec: 70 | selector: 71 | app: k8s-combat-service # 通过标签选择关联 72 | type: ClusterIP 73 | ports: 74 | - port: 8081 # 本 Service 的端口 75 | targetPort: 8081 # 容器端口 76 | name: app -------------------------------------------------------------------------------- /deployment/telemetry/deployment-log.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: k8s-combat 6 | name: k8s-combat 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: k8s-combat 12 | template: 13 | metadata: 14 | labels: 15 | app: k8s-combat 16 | spec: 17 | containers: 18 | - name: k8s-combat 19 | image: crossoverjie/k8s-combat:log 20 | imagePullPolicy: Always 21 | resources: 22 | limits: 23 | cpu: "1" 24 | memory: 100Mi 25 | requests: 26 | cpu: "0.1" 27 | memory: 100Mi 28 | - args: 29 | - -c 30 | - /etc/filebeat/filebeat.yml 31 | - -e 32 | image: elastic/filebeat:8.11.3 33 | imagePullPolicy: IfNotPresent 34 | name: log-beat 35 | resources: 36 | limits: 37 | cpu: 500m 38 | memory: 256Mi 39 | requests: 40 | cpu: 1m 41 | memory: 64Mi 42 | securityContext: 43 | runAsUser: 0 44 | terminationMessagePath: /dev/termination-log 45 | terminationMessagePolicy: File 46 | volumeMounts: 47 | - mountPath: /home/admin/logs 48 | name: logs 49 | readOnly: true 50 | - mountPath: /etc/filebeat/ 51 | name: log-config 52 | readOnly: true 53 | volumes: 54 | - emptyDir: {} 55 | name: logs 56 | - configMap: 57 | defaultMode: 416 58 | name: biz-log-k8s-combat 59 | name: log-config 60 | 61 | 62 | --- 63 | apiVersion: v1 64 | data: 65 | filebeat.yml: |- 66 | setup.template.enabled: false 67 | setup.ilm.enabled: false 68 | 69 | filebeat.inputs: 70 | - type: container 71 | paths: 72 | - '/var/log/containers/*.log' 73 | max_bytes: 20480 74 | json.keys_under_root: true 75 | json.overwrite_keys: true 76 | ignore_decoding_error: true 77 | 78 | 79 | output.elasticsearch: 80 | hosts: ["http://elasticsearch:9200"] 81 | username: "elastic" 82 | password: "y9=gMoRL88squ3nHuC1V" 83 | kind: ConfigMap 84 | metadata: 85 | name: biz-log-k8s-combat 86 | -------------------------------------------------------------------------------- /deployment/telemetry/elasticsearch.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: elasticsearch 6 | 7 | spec: 8 | serviceName: elasticsearch 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: elasticsearch 13 | template: 14 | metadata: 15 | labels: 16 | app: elasticsearch 17 | spec: 18 | containers: 19 | - name: elasticsearch 20 | image: docker.elastic.co/elasticsearch/elasticsearch:8.11.3 21 | ports: 22 | - containerPort: 9200 23 | resources: 24 | limits: 25 | cpu: "2" 26 | memory: 2048Mi 27 | requests: 28 | cpu: "0.1" 29 | memory: 512Mi 30 | env: 31 | - name: discovery.type 32 | value: single-node 33 | 34 | --- 35 | 36 | apiVersion: v1 37 | kind: Service 38 | metadata: 39 | name: elasticsearch 40 | spec: 41 | ports: 42 | - port: 9200 43 | targetPort: 9200 44 | selector: 45 | app: elasticsearch 46 | 47 | --- 48 | 49 | apiVersion: apps/v1 50 | kind: Deployment 51 | metadata: 52 | name: kibana 53 | 54 | spec: 55 | replicas: 1 56 | selector: 57 | matchLabels: 58 | app: kibana 59 | template: 60 | metadata: 61 | labels: 62 | app: kibana 63 | spec: 64 | containers: 65 | - name: kibana 66 | image: docker.elastic.co/kibana/kibana:8.11.3 67 | ports: 68 | - containerPort: 5601 69 | resources: 70 | limits: 71 | cpu: "1" 72 | memory: 2048Mi 73 | requests: 74 | cpu: "0.1" 75 | memory: 1024Mi 76 | env: 77 | - name: ELASTICSEARCH_URL 78 | value: http://elasticsearch:9200 79 | 80 | --- 81 | 82 | apiVersion: v1 83 | kind: Service 84 | metadata: 85 | name: kibana 86 | 87 | spec: 88 | ports: 89 | - port: 5601 90 | targetPort: 5601 91 | selector: 92 | app: kibana -------------------------------------------------------------------------------- /docs/title.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/crossoverJie/k8s-combat/ff93021830b781b004b90146cbe6f19d585a1678/docs/title.png -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module k8s-combat 2 | 3 | go 1.22 4 | 5 | require ( 6 | github.com/open-feature/go-sdk v1.12.0 7 | github.com/open-feature/go-sdk-contrib/hooks/open-telemetry v0.3.3 8 | github.com/open-feature/go-sdk-contrib/providers/flagd v0.2.2 9 | github.com/rs/zerolog v1.31.0 10 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 11 | go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 12 | go.opentelemetry.io/otel v1.28.0 13 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 14 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 15 | go.opentelemetry.io/otel/sdk v1.28.0 16 | go.opentelemetry.io/otel/sdk/metric v1.28.0 17 | go.opentelemetry.io/otel/trace v1.28.0 18 | google.golang.org/grpc v1.65.0 19 | google.golang.org/protobuf v1.34.2 20 | ) 21 | 22 | require ( 23 | buf.build/gen/go/open-feature/flagd/connectrpc/go v1.16.1-20240215170432-1e611e2999cc.1 // indirect 24 | buf.build/gen/go/open-feature/flagd/grpc/go v1.3.0-20240215170432-1e611e2999cc.2 // indirect 25 | buf.build/gen/go/open-feature/flagd/protocolbuffers/go v1.33.0-20240215170432-1e611e2999cc.1 // indirect 26 | connectrpc.com/connect v1.16.1 // indirect 27 | connectrpc.com/otelconnect v0.7.0 // indirect 28 | github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect 29 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect 30 | github.com/diegoholiveira/jsonlogic/v3 v3.5.2 // indirect 31 | github.com/fsnotify/fsnotify v1.7.0 // indirect 32 | github.com/go-logr/logr v1.4.2 // indirect 33 | github.com/go-logr/stdr v1.2.2 // indirect 34 | github.com/go-logr/zapr v1.3.0 // indirect 35 | github.com/gogo/protobuf v1.3.2 // indirect 36 | github.com/google/gofuzz v1.2.0 // indirect 37 | github.com/google/uuid v1.6.0 // indirect 38 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect 39 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 40 | github.com/json-iterator/go v1.1.12 // indirect 41 | github.com/klauspost/cpuid/v2 v2.2.7 // indirect 42 | github.com/mattn/go-colorable v0.1.13 // indirect 43 | github.com/mattn/go-isatty v0.0.19 // indirect 44 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 45 | github.com/modern-go/reflect2 v1.0.2 // indirect 46 | github.com/open-feature/flagd-schemas v0.2.9-0.20240408192555-ea4f119d2bd7 // indirect 47 | github.com/open-feature/flagd/core v0.9.1 // indirect 48 | github.com/twmb/murmur3 v1.1.8 // indirect 49 | github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect 50 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect 51 | github.com/xeipuuv/gojsonschema v1.2.0 // indirect 52 | github.com/zeebo/xxh3 v1.0.2 // indirect 53 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect 54 | go.opentelemetry.io/otel/metric v1.28.0 // indirect 55 | go.opentelemetry.io/proto/otlp v1.3.1 // indirect 56 | go.uber.org/multierr v1.11.0 // indirect 57 | go.uber.org/zap v1.27.0 // indirect 58 | golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect 59 | golang.org/x/mod v0.17.0 // indirect 60 | golang.org/x/net v0.26.0 // indirect 61 | golang.org/x/sys v0.21.0 // indirect 62 | golang.org/x/text v0.16.0 // indirect 63 | google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect 64 | google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect 65 | gopkg.in/inf.v0 v0.9.1 // indirect 66 | gopkg.in/yaml.v2 v2.4.0 // indirect 67 | gopkg.in/yaml.v3 v3.0.1 // indirect 68 | k8s.io/apimachinery v0.29.3 // indirect 69 | k8s.io/klog/v2 v2.120.1 // indirect 70 | k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect 71 | sigs.k8s.io/controller-runtime v0.17.3 // indirect 72 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 73 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 74 | ) 75 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/rs/zerolog/log" 7 | "go.opentelemetry.io/otel/attribute" 8 | "go.opentelemetry.io/otel/metric" 9 | "go.opentelemetry.io/otel/trace" 10 | "google.golang.org/grpc" 11 | "google.golang.org/grpc/metadata" 12 | pb "k8s-combat/api/google.golang.org/grpc/examples/helloworld/helloworld" 13 | 14 | otelhooks "github.com/open-feature/go-sdk-contrib/hooks/open-telemetry/pkg" 15 | flagd "github.com/open-feature/go-sdk-contrib/providers/flagd/pkg" 16 | "github.com/open-feature/go-sdk/openfeature" 17 | "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" 18 | "go.opentelemetry.io/contrib/instrumentation/runtime" 19 | "go.opentelemetry.io/otel" 20 | "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" 21 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" 22 | "go.opentelemetry.io/otel/propagation" 23 | sdkmetric "go.opentelemetry.io/otel/sdk/metric" 24 | sdkresource "go.opentelemetry.io/otel/sdk/resource" 25 | sdktrace "go.opentelemetry.io/otel/sdk/trace" 26 | 27 | "net" 28 | "net/http" 29 | "os" 30 | "os/signal" 31 | "sync" 32 | "syscall" 33 | "time" 34 | ) 35 | 36 | func main() { 37 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 38 | name, _ := os.Hostname() 39 | url := os.Getenv("PG_URL") 40 | pwd := os.Getenv("PG_PWD") 41 | fmt.Fprint(w, fmt.Sprintf("%s-%s-%s", name, url, pwd)) 42 | }) 43 | http.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { 44 | name, _ := os.Hostname() 45 | log.Info().Msgf("%s ping", name) 46 | fmt.Sprintf("%s ping====", name) 47 | fmt.Fprint(w, "pong") 48 | }) 49 | http.HandleFunc("/service", func(w http.ResponseWriter, r *http.Request) { 50 | resp, err := http.Get("http://k8s-combat-service:8081/ping") 51 | if err != nil { 52 | log.Err(err).Msg("get http://k8s-combat-service:8081/ping error") 53 | fmt.Fprint(w, err) 54 | return 55 | } 56 | fmt.Fprint(w, resp.Status) 57 | }) 58 | var ( 59 | once sync.Once 60 | c pb.GreeterClient 61 | ) 62 | http.HandleFunc("/grpc_client", func(w http.ResponseWriter, r *http.Request) { 63 | once.Do(func() { 64 | service := r.URL.Query().Get("name") 65 | conn, err := grpc.Dial(fmt.Sprintf("%s:50051", service), grpc.WithInsecure(), grpc.WithBlock()) 66 | if err != nil { 67 | log.Fatal().Msgf("did not connect: %v", err) 68 | } 69 | c = pb.NewGreeterClient(conn) 70 | }) 71 | version := r.URL.Query().Get("version") 72 | 73 | // Contact the server and print out its response. 74 | name := "world" 75 | ctx, cancel := context.WithTimeout(context.Background(), time.Second) 76 | md := metadata.New(map[string]string{ 77 | "version": version, 78 | }) 79 | ctx = metadata.NewOutgoingContext(ctx, md) 80 | defer cancel() 81 | g, err := c.SayHello(ctx, &pb.HelloRequest{Name: name}) 82 | if err != nil { 83 | log.Fatal().Msgf("could not greet: %v", err) 84 | } 85 | fmt.Fprint(w, fmt.Sprintf("Greeting: %s", g.GetMessage())) 86 | }) 87 | 88 | // Init OpenTelemetry start 89 | tp := initTracerProvider() 90 | defer func() { 91 | if err := tp.Shutdown(context.Background()); err != nil { 92 | log.Printf("Error shutting down tracer provider: %v", err) 93 | } 94 | }() 95 | 96 | mp := initMeterProvider() 97 | defer func() { 98 | if err := mp.Shutdown(context.Background()); err != nil { 99 | log.Printf("Error shutting down meter provider: %v", err) 100 | } 101 | }() 102 | 103 | err := runtime.Start(runtime.WithMinimumReadMemStatsInterval(time.Second)) 104 | if err != nil { 105 | log.Err(err) 106 | } 107 | 108 | var meter = otel.Meter("test.io/k8s/combat") 109 | apiCounter, err = meter.Int64Counter( 110 | "api.counter", 111 | metric.WithDescription("Number of API calls."), 112 | metric.WithUnit("{call}"), 113 | ) 114 | if err != nil { 115 | log.Err(err) 116 | } 117 | 118 | openfeature.SetProvider(flagd.NewProvider()) 119 | openfeature.AddHooks(otelhooks.NewTracesHook()) 120 | 121 | tracer = tp.Tracer("k8s-combat") 122 | // Init OpenTelemetry end 123 | 124 | go func() { 125 | var port = ":50051" 126 | lis, err := net.Listen("tcp", port) 127 | if err != nil { 128 | log.Fatal().Msgf("failed to listen: %v", err) 129 | } 130 | s := grpc.NewServer( 131 | grpc.StatsHandler(otelgrpc.NewServerHandler()), 132 | ) 133 | pb.RegisterGreeterServer(s, &server{}) 134 | if err := s.Serve(lis); err != nil { 135 | log.Fatal().Msgf("failed to serve: %v", err) 136 | } else { 137 | log.Printf("served on %s \n", port) 138 | } 139 | }() 140 | quit := make(chan os.Signal) 141 | signal.Notify(quit, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE) 142 | go func() { 143 | <-quit 144 | log.Printf("quit signal received, exit \n") 145 | os.Exit(0) 146 | }() 147 | http.ListenAndServe(":8081", nil) 148 | } 149 | 150 | // server is used to implement helloworld.GreeterServer. 151 | type server struct { 152 | pb.UnimplementedGreeterServer 153 | } 154 | 155 | // SayHello implements helloworld.GreeterServer 156 | func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { 157 | defer apiCounter.Add(ctx, 1) 158 | md, _ := metadata.FromIncomingContext(ctx) 159 | log.Printf("Received: %v, md: %v", in.GetName(), md) 160 | name, _ := os.Hostname() 161 | span := trace.SpanFromContext(ctx) 162 | span.SetAttributes(attribute.String("request.name", in.Name)) 163 | s.span(ctx) 164 | return &pb.HelloReply{Message: fmt.Sprintf("hostname:%s, in:%s, md:%v", name, in.Name, md)}, nil 165 | } 166 | 167 | func (s *server) span(ctx context.Context) { 168 | ctx, span := tracer.Start(ctx, "hello-span") 169 | defer span.End() 170 | // do some work 171 | log.Printf("create span") 172 | } 173 | 174 | var tracer trace.Tracer 175 | var resource *sdkresource.Resource 176 | var initResourcesOnce sync.Once 177 | 178 | var apiCounter metric.Int64Counter 179 | 180 | func initResource() *sdkresource.Resource { 181 | initResourcesOnce.Do(func() { 182 | extraResources, _ := sdkresource.New( 183 | context.Background(), 184 | sdkresource.WithOS(), 185 | sdkresource.WithProcess(), 186 | sdkresource.WithContainer(), 187 | sdkresource.WithHost(), 188 | ) 189 | resource, _ = sdkresource.Merge( 190 | sdkresource.Default(), 191 | extraResources, 192 | ) 193 | }) 194 | return resource 195 | } 196 | 197 | func initTracerProvider() *sdktrace.TracerProvider { 198 | ctx := context.Background() 199 | 200 | exporter, err := otlptracegrpc.New(ctx) 201 | if err != nil { 202 | log.Printf("new otlp trace grpc exporter failed: %v", err) 203 | } 204 | tp := sdktrace.NewTracerProvider( 205 | sdktrace.WithBatcher(exporter), 206 | sdktrace.WithResource(initResource()), 207 | ) 208 | otel.SetTracerProvider(tp) 209 | otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) 210 | return tp 211 | } 212 | 213 | func initMeterProvider() *sdkmetric.MeterProvider { 214 | ctx := context.Background() 215 | 216 | exporter, err := otlpmetricgrpc.New(ctx) 217 | if err != nil { 218 | log.Printf("new otlp metric grpc exporter failed: %v", err) 219 | } 220 | 221 | mp := sdkmetric.NewMeterProvider( 222 | sdkmetric.WithReader(sdkmetric.NewPeriodicReader(exporter)), 223 | sdkmetric.WithResource(initResource()), 224 | ) 225 | otel.SetMeterProvider(mp) 226 | return mp 227 | } 228 | --------------------------------------------------------------------------------