├── .gitignore ├── docs ├── images │ ├── arch.jpg │ ├── github-wp.png │ └── ikukantai_wp.jpg └── publication │ └── CloudNet_2024.pdf ├── manifest ├── prometheus │ └── values.yaml ├── demo │ ├── alpha.yaml │ ├── beta.yaml │ └── hello.yaml ├── miporin │ ├── configmap.yaml │ ├── miporin.yaml │ └── rbac.yaml ├── 4-serving-default-domain.yaml └── 3-kourier.yaml ├── LICENSE ├── hack ├── fix-otel-collector-cfg.sh └── replace-image.sh └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | images/circle.png 2 | -------------------------------------------------------------------------------- /docs/images/arch.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bonavadeur/ikukantai/HEAD/docs/images/arch.jpg -------------------------------------------------------------------------------- /docs/images/github-wp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bonavadeur/ikukantai/HEAD/docs/images/github-wp.png -------------------------------------------------------------------------------- /docs/images/ikukantai_wp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bonavadeur/ikukantai/HEAD/docs/images/ikukantai_wp.jpg -------------------------------------------------------------------------------- /docs/publication/CloudNet_2024.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bonavadeur/ikukantai/HEAD/docs/publication/CloudNet_2024.pdf -------------------------------------------------------------------------------- /manifest/prometheus/values.yaml: -------------------------------------------------------------------------------- 1 | kube-state-metrics: 2 | metricLabelsAllowlist: 3 | - pods=[*] 4 | - deployments=[app.kubernetes.io/name,app.kubernetes.io/component,app.kubernetes.io/instance] 5 | prometheus: 6 | prometheusSpec: 7 | serviceMonitorSelectorNilUsesHelmValues: false 8 | podMonitorSelectorNilUsesHelmValues: false 9 | grafana: 10 | sidecar: 11 | dashboards: 12 | enabled: true 13 | searchNamespace: ALL 14 | -------------------------------------------------------------------------------- /manifest/demo/alpha.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: alpha 5 | namespace: default 6 | spec: 7 | template: 8 | metadata: 9 | annotations: 10 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "0s" 11 | autoscaling.knative.dev/target-burst-capacity: "-1" 12 | autoscaling.knative.dev/window: "12s" 13 | # DO NOT set the following two lines 14 | # autoscaling.knative.dev/min-scale: "3" 15 | # autoscaling.knative.dev/max-scale: "3" 16 | autoscaling.knative.dev/target: "10" 17 | spec: 18 | containers: 19 | - image: docker.io/bonavadeur/shuka:v1.3 20 | resources: 21 | limits: 22 | cpu: 500m 23 | memory: 640M 24 | env: 25 | - name: TARGET 26 | value: "alpha" 27 | -------------------------------------------------------------------------------- /manifest/demo/beta.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: beta 5 | namespace: default 6 | spec: 7 | template: 8 | metadata: 9 | annotations: 10 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "0s" 11 | autoscaling.knative.dev/target-burst-capacity: "-1" 12 | autoscaling.knative.dev/window: "12s" 13 | # DO NOT set the following two lines 14 | # autoscaling.knative.dev/min-scale: "3" 15 | # autoscaling.knative.dev/max-scale: "3" 16 | autoscaling.knative.dev/target: "10" 17 | spec: 18 | containers: 19 | - image: docker.io/bonavadeur/shuka:v1.3 20 | resources: 21 | limits: 22 | cpu: 500m 23 | memory: 640M 24 | env: 25 | - name: TARGET 26 | value: "beta" 27 | -------------------------------------------------------------------------------- /manifest/demo/hello.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1 2 | kind: Service 3 | metadata: 4 | name: hello 5 | namespace: default 6 | spec: 7 | template: 8 | metadata: 9 | annotations: 10 | autoscaling.knative.dev/scale-to-zero-pod-retention-period: "0s" 11 | autoscaling.knative.dev/target-burst-capacity: "-1" 12 | autoscaling.knative.dev/window: "12s" 13 | # DO NOT set the following two lines 14 | # autoscaling.knative.dev/min-scale: "3" 15 | # autoscaling.knative.dev/max-scale: "3" 16 | autoscaling.knative.dev/target: "10" 17 | spec: 18 | containers: 19 | - image: docker.io/bonavadeur/shuka:v1.3 20 | resources: 21 | limits: 22 | cpu: 500m 23 | memory: 640M 24 | env: 25 | - name: TARGET 26 | value: "Konnichiwa" 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | All Rights Reserved 2 | 3 | Copyright (c) 2024 Bonavadeur 4 | 5 | Created by Bonavadeur 6 | 7 | The software is provided "as is", without warranty of any kind, express or 8 | implied, including but not limited to the warranties of merchantability, 9 | fitness for a particular purpose and noninfringement. In no event shall the 10 | authors or copyright holders be liable for any claim, damages or other 11 | liability, whether in an action of contract, tort or otherwise, arising from, 12 | out of or in connection with the software or the use or other dealings in the 13 | software. 14 | 15 | The software is provided as closed-source with binary and installation guide 16 | only, not including source code. ALL usage of the software must be with the 17 | author's approval. 18 | 19 | Please contact the author via email daodaihiep22ussr@gmail.com for approval. 20 | -------------------------------------------------------------------------------- /hack/fix-otel-collector-cfg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl patch configmap otel-collector-config -n metrics --type='merge' -p='{ 4 | "metadata": { 5 | "annotations": { 6 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"collector.yaml\":\"receivers:\\n opencensus:\\n endpoint: \\\"0.0.0.0:55678\\\"\\n\\nexporters:\\n debug:\\n prometheus:\\n endpoint: \\\"0.0.0.0:8889\\\"\\nextensions:\\n health_check:\\n pprof:\\n zpages:\\nservice:\\n extensions: [health_check, pprof, zpages]\\n pipelines:\\n metrics:\\n receivers: [opencensus]\\n processors: []\\n exporters: [prometheus]\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"name\":\"otel-collector-config\",\"namespace\":\"metrics\"}}" 7 | } 8 | } 9 | }' 10 | 11 | kubectl patch configmap otel-collector-config -n metrics --type='merge' -p='{ 12 | "data": { 13 | "collector.yaml": "receivers:\n opencensus:\n endpoint: \"0.0.0.0:55678\"\n\nexporters:\n debug:\n prometheus:\n endpoint: \"0.0.0.0:8889\"\nextensions:\n health_check:\n pprof:\n zpages:\nservice:\n extensions: [health_check, pprof, zpages]\n pipelines:\n metrics:\n receivers: [opencensus]\n processors: []\n exporters: [prometheus]" 14 | } 15 | }' -------------------------------------------------------------------------------- /manifest/miporin/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: config-ikukantai 5 | namespace: knative-serving 6 | data: 7 | autoscaler-test: autoscaler-tesuto 8 | rein-greeting: konnichiwa 9 | activator-serving-local: "disabled" 10 | ikukantai-miporin-mode: "careful" 11 | ikukantai-autoscaler-enablekpa: "false" 12 | ikukantai-ignore-container-concur-kpa: "true" 13 | ikukantai-miporin-weighted: | 14 | "100,0,0" 15 | "0,100,0" 16 | "0,0,100" 17 | queue-meter-exporter-scan-frequency: "2" 18 | queue-max-meter-exporter-age: "10" 19 | # v2.1 20 | ikukantai-enable-nonna: "false" 21 | nonna-threads: "10" 22 | # v2.2 23 | yuza: "qFBZgYUShZU0CaGjmdUY822yuR8WFiKN" 24 | kagi: "7iADCcbwhoyrGcTmrCZt4JrK8jUYwxgCxp2o7rpKxdyvN_VomcV82s1D8lYAg2Db" 25 | ikukantai-enable-katyusha: "true" 26 | katyusha-enable-fukabunsan: "true" 27 | katyusha-enable-junbanmachi: "true" 28 | katyusha-enable-outoushuugou: "true" 29 | katyusha-junbanmachi-concurrent-request: "10" 30 | katyusha-threads: "10" 31 | --- 32 | apiVersion: v1 33 | kind: ConfigMap 34 | metadata: 35 | name: config-ikukantai 36 | namespace: default 37 | data: 38 | queue-tesuto: ohayou 39 | queue-meter-exporter-scan-frequency: "2" 40 | queue-max-meter-exporter-age: "10" 41 | ikukantai-miporin-enable-yukari: "true" 42 | --- 43 | -------------------------------------------------------------------------------- /manifest/miporin/miporin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: miporin 5 | namespace: knative-serving 6 | spec: 7 | selector: 8 | app: miporin 9 | ports: 10 | - port: 80 11 | targetPort: 18080 12 | --- 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: miporin 17 | namespace: knative-serving 18 | labels: 19 | app: miporin 20 | spec: 21 | replicas: 1 22 | selector: 23 | matchLabels: 24 | app: miporin 25 | template: 26 | metadata: 27 | labels: 28 | app: miporin 29 | spec: 30 | serviceAccount: miporin 31 | hostname: miporin 32 | containers: 33 | - name: miporin 34 | image: docker.io/bonavadeur/miporin:v1.2-cnsm-15nov24 35 | imagePullPolicy: IfNotPresent 36 | ports: 37 | - containerPort: 18080 38 | env: 39 | - name: MIPORIN_ENVIRONMENT 40 | value: "container" 41 | envFrom: 42 | - configMapRef: 43 | name: config-ikukantai 44 | affinity: 45 | nodeAffinity: 46 | requiredDuringSchedulingIgnoredDuringExecution: 47 | nodeSelectorTerms: 48 | - matchExpressions: 49 | - key: kubernetes.io/hostname 50 | operator: In 51 | values: 52 | - node1 53 | --- 54 | -------------------------------------------------------------------------------- /hack/replace-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TAG="v2.2" 4 | 5 | # replace image of net-kourier, controller, activator, autoscaler 6 | kubectl -n knative-serving patch deploy net-kourier-controller --patch \ 7 | '{"spec":{"template":{"spec":{"containers":[{"name":"controller","image":"docker.io/bonavadeur/ikukantai-kourier:'${TAG}'"}]}}}}' 8 | kubectl -n knative-serving patch deploy controller --patch \ 9 | '{"spec":{"template":{"spec":{"containers":[{"name":"controller","image":"docker.io/bonavadeur/ikukantai-controller:'${TAG}'"}]}}}}' 10 | kubectl -n knative-serving patch daemonset activator --patch \ 11 | '{"spec":{"template":{"spec":{"containers":[{"name":"activator","image":"docker.io/bonavadeur/ikukantai-activator:'${TAG}'"}]}}}}' 12 | kubectl -n knative-serving patch deploy autoscaler --patch \ 13 | '{"spec":{"template":{"spec":{"containers":[{"name":"autoscaler","image":"docker.io/bonavadeur/ikukantai-autoscaler:'${TAG}'"}]}}}}' 14 | 15 | # replace image of queue-proxy 16 | kubectl -n knative-serving patch image queue-proxy --type=merge --patch \ 17 | '{"spec":{"image":"docker.io/bonavadeur/ikukantai-queue:'${TAG}'"}}' 18 | kubectl -n knative-serving patch configmap config-deployment --patch \ 19 | '{"data":{"queue-sidecar-image":"docker.io/bonavadeur/ikukantai-queue:'${TAG}'"}}' 20 | 21 | # replace image of miporin 22 | kubectl -n knative-serving patch deploy miporin --patch \ 23 | '{"spec":{"template":{"spec":{"containers":[{"name":"miporin","image":"docker.io/bonavadeur/miporin:'${TAG}'"}]}}}}' 24 | -------------------------------------------------------------------------------- /manifest/miporin/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: miporin 5 | namespace: knative-serving 6 | labels: 7 | app.kubernetes.io/component: miporin 8 | app.kubernetes.io/name: miporin 9 | app.kubernetes.io/version: "1.12.1" 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: Role 13 | metadata: 14 | name: miporin 15 | namespace: default 16 | rules: 17 | - apiGroups: [""] 18 | resources: ["pods", "configmaps"] 19 | verbs: ["get", "watch", "list"] 20 | - apiGroups: ["apps"] 21 | resources: ["deployments"] 22 | verbs: ["get", "watch", "list"] 23 | - apiGroups: ["serving.knative.dev"] 24 | resources: ["services"] 25 | verbs: ["get", "watch", "list"] 26 | - apiGroups: ["batch.bonavadeur.io"] 27 | resources: ["seikas"] 28 | verbs: ["get", "watch", "list", "patch", "delete", "create"] 29 | - apiGroups: ["monitoring.coreos.com"] 30 | resources: ["servicemonitors"] 31 | verbs: ["get", "watch", "list", "patch", "delete", "create"] 32 | --- 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | kind: RoleBinding 35 | metadata: 36 | name: miporin 37 | namespace: default 38 | subjects: 39 | - kind: ServiceAccount 40 | name: miporin 41 | namespace: knative-serving 42 | - kind: ServiceAccount 43 | name: activator 44 | namespace: knative-serving 45 | - kind: ServiceAccount 46 | name: default 47 | namespace: default 48 | roleRef: 49 | kind: Role 50 | name: miporin 51 | apiGroup: rbac.authorization.k8s.io 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | kind: ClusterRole 55 | metadata: 56 | name: miporin 57 | rules: 58 | - apiGroups: [""] 59 | resources: ["nodes"] 60 | verbs: ["list"] 61 | --- 62 | apiVersion: rbac.authorization.k8s.io/v1 63 | kind: ClusterRoleBinding 64 | metadata: 65 | name: miporin 66 | subjects: 67 | - kind: ServiceAccount 68 | name: miporin 69 | namespace: knative-serving 70 | - kind: ServiceAccount 71 | name: default 72 | namespace: default 73 | roleRef: 74 | kind: ClusterRole 75 | name: miporin 76 | apiGroup: rbac.authorization.k8s.io 77 | --- 78 | -------------------------------------------------------------------------------- /manifest/4-serving-default-domain.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The Knative Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: batch/v1 16 | kind: Job 17 | metadata: 18 | name: default-domain 19 | namespace: knative-serving 20 | labels: 21 | app: "default-domain" 22 | app.kubernetes.io/component: default-domain-job 23 | app.kubernetes.io/name: knative-serving 24 | app.kubernetes.io/version: "1.12.1" 25 | spec: 26 | template: 27 | metadata: 28 | annotations: 29 | sidecar.istio.io/inject: "false" 30 | labels: 31 | app: "default-domain" 32 | app.kubernetes.io/component: default-domain-job 33 | app.kubernetes.io/name: knative-serving 34 | app.kubernetes.io/version: "1.12.1" 35 | spec: 36 | serviceAccountName: controller 37 | containers: 38 | - name: default-domain 39 | # This is the Go import path for the binary that is containerized 40 | # and substituted here. 41 | image: gcr.io/knative-releases/knative.dev/serving/cmd/default-domain@sha256:f12eb412fe620067cf383e2aaa5e32be3db423146681a78184c135f56be1dfdb 42 | args: ["-magic-dns=sslip.io"] 43 | ports: 44 | - name: http 45 | containerPort: 8080 46 | readinessProbe: 47 | httpGet: 48 | port: 8080 49 | livenessProbe: 50 | httpGet: 51 | port: 8080 52 | failureThreshold: 6 53 | resources: 54 | requests: 55 | cpu: 100m 56 | memory: 100Mi 57 | limits: 58 | cpu: 1000m 59 | memory: 1000Mi 60 | securityContext: 61 | allowPrivilegeEscalation: false 62 | readOnlyRootFilesystem: true 63 | runAsNonRoot: true 64 | capabilities: 65 | drop: 66 | - ALL 67 | seccompProfile: 68 | type: RuntimeDefault 69 | env: 70 | - name: POD_NAME 71 | valueFrom: 72 | fieldRef: 73 | fieldPath: metadata.name 74 | - name: SYSTEM_NAMESPACE 75 | valueFrom: 76 | fieldRef: 77 | fieldPath: metadata.namespace 78 | restartPolicy: Never 79 | backoffLimit: 10 80 | --- 81 | apiVersion: v1 82 | kind: Service 83 | metadata: 84 | name: default-domain-service 85 | namespace: knative-serving 86 | labels: 87 | app: default-domain 88 | app.kubernetes.io/component: default-domain-job 89 | app.kubernetes.io/name: knative-serving 90 | app.kubernetes.io/version: "1.12.1" 91 | spec: 92 | selector: 93 | app: default-domain 94 | ports: 95 | - name: http 96 | port: 80 97 | targetPort: 8080 98 | type: ClusterIP 99 | 100 | --- 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ikukantai - 行く艦隊 2 | 3 | ### (The Iku Fleet - Hạm Đội Ikư - 行く艦隊) 4 | 5 | [![release](https://img.shields.io/badge/ikukantai--v2.1-log?style=flat&label=release&color=crimson)]() 6 | [![license](https://img.shields.io/badge/closed--source-log?style=flat&label=license&color=darkred)](LICENSE) 7 | [![CloudNet2024](https://img.shields.io/badge/IEEE--CloudNet--2024-log?style=flat&label=publication&color=dodgerblue)](https://cloudnet2024.ieee-cloudnet.org) 8 | 9 | [![Kubernetes](https://img.shields.io/badge/kubernetes-%23326ce5.svg?style=for-the-badge&logo=kubernetes&logoColor=white&link=https%3A%2F%2Fkubernetes.io)](https://kubernetes.io/) 10 | [![Linux](https://img.shields.io/badge/Linux-FCC624?style=for-the-badge&logo=linux&logoColor=black)]() 11 | [![Knative](https://img.shields.io/badge/knative-log?style=for-the-badge&logo=knative&logoColor=white&labelColor=%230865AD&color=%230865AD)](https://knative.dev/docs/) 12 | [![Go](https://img.shields.io/badge/go-%2300ADD8.svg?style=for-the-badge&logo=go&logoColor=white)](https://go.dev/) 13 | [![Prometheus](https://img.shields.io/badge/Prometheus-E6522C?style=for-the-badge&logo=Prometheus&logoColor=white)](https://prometheus.io/) 14 | [![EnvoyProxy](https://img.shields.io/badge/envoy-log?style=for-the-badge&logo=envoyproxy&logoColor=white&labelColor=%23AC6199&color=%23AC6199)](https://www.envoyproxy.io/) 15 | [![AWS](https://img.shields.io/badge/AWS-%23FF9900.svg?style=for-the-badge&logo=amazon-aws&logoColor=white)](https://aws.amazon.com/) 16 | [![Protobuf](https://img.shields.io/badge/Protobuf-log?style=for-the-badge&logo=nani&logoColor=green&labelColor=red&color=darkgreen)](https://protobuf.dev/) 17 | 18 | 19 | `ikukantai` is a Knative Serving based Serverless Platform designed for Distributed Computing Infrastructure. `ikukantai` is easy-to-develop platform supports research and the integration of Load-Balancing, Scheduling and Queuing algorithms in Container Virtualization environment. 20 | 21 | `ANNOUNCEMENT`: **`ikukantai`'s publication has been accepted at IEEE CloudNet Conference 2024 (International Conference on Cloud Networking)** 22 | 23 | ![ikukantai](docs/images/ikukantai_wp.jpg) 24 | 25 | ## 1. Motivation 26 | 27 | Container virtualization is becoming an inevitable trend in modern computing and infrastructure technology. Since its release in 2014, **Kubernetes** has established itself as the de facto standard for application deployment and system operations in the industry. Numerous projects under the CNCF umbrella have further strengthened **Kubernetes**'s position in cloud computing. Knative, a serverless project nurtured within the CNCF ecosystem, has introduced scale-to-zero capabilities to **Kubernetes**, offering new opportunities for resource optimization and sustainable energy solutions in the scientific community. 28 | 29 | The built-in Load-Balancing, Scheduling, Queuing algorithms implemented in both Vanilla **Kubernetes** and Vanilla Knative are designed for very general purposes but require significant improvements for specialized use cases. During [our research](docs/publication/CloudNet_2024.pdf) on deploying and evaluating Serverless technology in Edge-Cloud environment, we discovered that the Load-Balancing and Scheduling mechanism of **Kubernetes**/Knative is inadequate for such heterogeneous setups. `ikukantai` was born not only to address this issue but also to serve as a programmable platform that allows solving classic problems related to container Load-Balancing, Scheduling and Queing in the real testbed implementation approach while removing the complex programming barrier for scientists. 30 | 31 | In this project, we propose an approach that improves Knative from the inside, a Unified Serverless Platform for Distributed Systems. It is `ikukantai` (行く艦隊 - The iku Fleet - Hạm Đội Ikư - translated from Japanese). 32 | 33 | ## 2. Architecture 34 | 35 | ![Arch](docs/images/arch.jpg) 36 | 37 | To understand functions of modules: `monlat`, `Katyusha`, `Nonna`, `Seika` and `Miporin`, please jump to [5.`ikukantai` ecosystem](#ecosystem) 38 | 39 | ## 3. Installation 40 | 41 | ### 3.1. System requirements 42 | 43 | + Some nodes are Physical Machine or Virtual Machine, least 4 CPU and 16GB RAM for master-node and 3 CPU 6GB RAM for each worker-nodes 44 | + Ubuntu-Server or Ubuntu Desktop version 20.04 45 | + Kubernetes version 1.26.3 46 | + Calico installed on Kubernetes cluster 47 | + MetalLB installed on Kubernetes cluster (for laboratory experiments, we deploy system on a bare-metal cluster) 48 | + Helm is installed 49 | 50 | ### 3.2. Install support mechanisms 51 | 52 | #### 3.2.1. Monlat - the network latency monitoring system for Kubernetes 53 | 54 | We develop a network latency monitoring system named `monlat`, for more detail and installation please visit [monlat](https://github.com/bonavadeur/monlat). First, let's install Prometheus Stack on Kubernetes Cluster, then install `monlat` later. The network latency metrics will be collected by Prometheus. 55 | 56 | #### Install Prometheus Stack 57 | 58 | We follow Prometheus Stacks installation guide from [Knative's Docs](https://knative.dev/docs/serving/observability/metrics/collecting-metrics/) 59 | 60 | ```bash 61 | $ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 62 | $ helm repo update 63 | $ helm install prometheus prometheus-community/kube-prometheus-stack -n default -f manifest/prometheus/values.yaml 64 | 65 | $ kubectl apply -f https://raw.githubusercontent.com/knative-extensions/monitoring/main/grafana/dashboards.yaml 66 | 67 | $ kubectl create namespace metrics 68 | $ kubectl apply -f https://raw.githubusercontent.com/knative/docs/main/docs/serving/observability/metrics/collector.yaml 69 | ``` 70 | 71 | Note: [The OpenTelemetry Collector](https://grafana.com/docs/alloy/latest/reference/components/otelcol/otelcol.exporter.logging/#:~:text=The%20OpenTelemetry%20Collector%20logging%20exporter,Collector%20repository%20in%20September%202024.) logging exporter is deprecated and removed from the upstream Collector repository in September 2024, so that you have to change all `logging` fields to `debug` using the following commands: 72 | 73 | ```bash 74 | chmod +x ./hack/fix-otel-collector-cfg.sh 75 | ./hack/fix-otel-collector-cfg.sh 76 | ``` 77 | 78 | #### Install monlat 79 | 80 | Follow [monlat installation guide](https://github.com/bonavadeur/monlat) to install `monlat` correctly. `monlat` is released under Apache License. 81 | 82 | #### 3.2.2. Seika - Kubernetes Custom Resource maintains quantity of Pod in each Node 83 | 84 | To control the ability of create and delete Function in each Node exactly, we develop a Kubernetes Custom Resource named [Seika](https://github.com/bonavadeur/seika). Seika operate like a bunch of Deployments that each Deployment control number of Pod in only one Node. By using Seika, we can create more Functions in Node that have more traffic and delete less Functions in Node that have less traffic. To install, please visit [Seika](https://github.com/bonavadeur/seika). The Seika's document includes guides for installation, usage, development. `Seika` is released under Apache License. 85 | 86 | ### 3.3. Install Knative Serving with Kourier is networking option and Our Extra-Controller 87 | 88 | In this step we install Knative Serving's components (CRD, Knative's Pods) by applying .yaml files. Notes that the applied manifests is modified by ours, we do not use the original images and configurations. Our images are developed base on [Knative-Serving](https://github.com/knative/serving/tree/release-1.12) version 1.12.1 and [Kourier](https://github.com/knative-extensions/net-kourier/tree/release-1.12) version 1.12.1 89 | 90 | ```bash 91 | # Install CRD 92 | kubectl apply -f manifest/1-serving-crd.yaml 93 | # Install Knative's Pod 94 | kubectl apply -f manifest/2-serving-core.yaml 95 | # Extra configmap and RBAC 96 | kubectl apply -f manifest/miporin/configmap.yaml 97 | kubectl apply -f manifest/miporin/rbac.yaml 98 | # Install Networking Plugin 99 | kubectl apply -f manifest/3-kourier.yaml 100 | # Run domain config job 101 | kubectl apply -f manifest/4-serving-default-domain.yaml 102 | ``` 103 | 104 | Wait until job/default-domain is success 105 | 106 | ```bash 107 | # check if default-domain job is success 108 | kubectl -n knative-serving get job | grep default-domain 109 | NAME COMPLETIONS DURATION AGE 110 | default-domain 1/1 13s 71s 111 | # delete config job 112 | kubectl delete -f manifest/4-serving-default-domain.yaml 113 | ``` 114 | 115 | Install extra-controller `miporin` 116 | 117 | ```bash 118 | kubectl apply -f manifest/miporin/miporin.yaml 119 | ``` 120 | 121 | `miporin` is the extra-controller working alongside and is independently of Knative's controller. For more information about `miporin`, please visit [bonavadeur/miporin](https://github.com/bonavadeur/miporin). Miporin is released under Apache License. 122 | 123 | Install correct images by version 124 | 125 | ```bash 126 | # Replace Knative's images by Ikukantai's images 127 | chmod +x -R hack/* 128 | ./hack/replace-image.sh 129 | ``` 130 | 131 | ### 3.4. Making some changes 132 | 133 | #### 3.4.1. Kourier Gateway 134 | 135 | ```bash 136 | # use local 3scale-kourier-gateway pod for every request 137 | kubectl -n kourier-system patch service kourier --patch '{"spec":{"internalTrafficPolicy":"Local","externalTrafficPolicy":"Local"}}' 138 | kubectl -n kourier-system patch service kourier-internal --patch '{"spec":{"internalTrafficPolicy":"Local"}}' 139 | ``` 140 | 141 | ### 3.5. Check your setup 142 | 143 | You must see **3scale-kourier-gateway** and **activator** present in all nodes, each node has one **activator** and one **3scale-kourier-gateway** 144 | 145 | ```bash 146 | $ kubectl -n knative-serving get pod -o wide | grep activator 147 | activator-5cd6cb5f45-5nnnb 1/1 Running 0 156m 10.233.75.29 node2 148 | activator-5cd6cb5f45-fkp2r 1/1 Running 0 156m 10.233.102.181 node1 149 | activator-5cd6cb5f45-j6bqq 1/1 Running 0 156m 10.233.71.47 node3 150 | 151 | $ kubectl -n kourier-system get pod -o wide 152 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 153 | 3scale-kourier-gateway-864554589-5dgxl 1/1 Running 11 (5h26m ago) 2d5h 10.233.75.28 node2 154 | 3scale-kourier-gateway-864554589-btfqf 1/1 Running 12 (5h21m ago) 2d5h 10.233.71.29 node3 155 | 3scale-kourier-gateway-864554589-p7q56 1/1 Running 13 (5h29m ago) 2d5h 10.233.102.176 node1 156 | 157 | $ kubectl -n knative-serving get pod | grep miporin 158 | miporin-597dcddbc-qvlc6 1/1 Running 0 143m 159 | ``` 160 | 161 | ## 4. Try it out 162 | 163 | Each time you deploy a **ksvc** (in API service.serving.knative.dev), `ikukantai` will create two custom resources automatically: one Seika and one ServiceMonitor. 164 | 165 | + **Seika** (in API seika.batch.bonavadeur.io) is used for ability of controlling Function creation and deletion precisely in each Node 166 | + **ServiceMonitor** (in API servicemonitor.monitoring.coreos.com) is used for scraping metrics of each Function to Prometheus 167 | 168 | First, apply a simple web application named *hello*. The annotation *autoscaling.knative.dev/window: "12s"* means that if there is not traffic come to system in 12s, the Function will be scaled down. Immediately after you apply *hello* Function, the first Pod created is not under your control *hello*. You need to wait until this pod deleted after 12s, the system is now under your control. 169 | 170 | ```bash 171 | # install a demo app 172 | $ kubectl apply -f manifest/demo/hello.yaml 173 | Warning: Kubernetes default value is insecure, Knative may default this to secure in a future release: spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation, spec.template.spec.containers[0].securityContext.capabilities, spec.template.spec.containers[0].securityContext.runAsNonRoot, spec.template.spec.containers[0].securityContext.seccompProfile 174 | service.serving.knative.dev/hello created 175 | ``` 176 | 177 | Get all relevant resources: 178 | 179 | ```bash 180 | $ kubectl get ksvc,pod,seika,servicemonitor | grep hello 181 | service.serving.knative.dev/hello http://hello.default.192.168.133.2.sslip.io hello-00001 hello-00001 True 182 | pod/hello-00001-deployment-7df54dc57f-rx7xr 2/2 Running 0 11s 183 | seika.batch.bonavadeur.io/hello ["node1","node2","node3"] 0-0-0/0-0-0 184 | servicemonitor.monitoring.coreos.com/hello 185 | ``` 186 | 187 | Wait until the first Pod is deleted (after the period set by annotation autoscaling.knative.dev/window: "12s") 188 | 189 | ```bash 190 | $ kubectl get ksvc,pod,seika,servicemonitor | grep hello 191 | service.serving.knative.dev/hello http://hello.default.192.168.133.2.sslip.io hello-00001 hello-00001 True 192 | seika.batch.bonavadeur.io/hello ["node1","node2","node3"] 0-0-0/0-0-0 193 | servicemonitor.monitoring.coreos.com/hello 194 | ``` 195 | 196 | Use `netem` setup latency between nodes. In this experiment, I setup latency from **node1**, **node2** to **node3** is 50ms. So, when make request from node3, a new Pod is prefer scheduled on **node3** instead of the remain nodes. 197 | 198 | ```bash 199 | # make request from node3 200 | root@node3:~$ curl hello.default.svc.cluster.local 201 | Konnichiwa from hello-node3-xgvq5 in node3 202 | 203 | # list all resources 204 | root@node1:~$ kubectl get ksvc,pod,seika,servicemonitor | grep hello 205 | service.serving.knative.dev/hello http://hello.default.192.168.133.2.sslip.io hello-00001 hello-00001 True 206 | pod/hello-node3-xgvq5 2/2 Running 0 40s 207 | seika.batch.bonavadeur.io/hello ["node1","node2","node3"] 0-0-1/0-0-1 208 | servicemonitor.monitoring.coreos.com/hello 209 | ``` 210 | 211 | The Scheduling Algorithm is implemented in [miporin](https://github.com/bonavadeur/miporin), package `github.com/bonavadeur/miporin/pkg/yukari`. To enable Scheduling Feature of `ikukantai` Fleet, set config `ikukantai-miporin-enable-yukari: "true"` in `configmap/config-ikukantai`, namespace `default` 212 | 213 | ## 5. `ikukantai` ecosystem 214 | 215 | ### 5.1. Support tools 216 | 217 | The following tools support `ikukantai` Fleet operation and can work independently from `ikukantai` in any Kubernetes Cluster. 218 | 219 | [Monlat](https://github.com/bonavadeur/monlat) - the latency monitoring system for Kubernetes 220 | 221 | [Seika](https://github.com/bonavadeur/seika) - the Kubernetes Custom Resource that maintains quantity of Pods in each Node 222 | 223 | ### 5.2. The tanks on the Fleet 224 | 225 | `ikukantai` is closed-source, but you can exploit all extra power by using tanks deployed on the flight deck of the Fleet. We have a plan for developing 4 extra-components that make algorithm implementation easier in the near future. 226 | 227 | [Miporin](https://github.com/bonavadeur/miporin) - tank commander, the extra-controller working alongside and is independently of Knative's controller. `miporin` also act as Scheduler of the Fleet. `miporin` is written in Go and release under Apache-2.0 License. 228 | 229 | [Nonna](https://github.com/bonavadeur/nonna) - Queue Modifier Module on the Fleet, written in Go, released under Apache-2.0 License. 230 | 231 | [Katyusha](https://github.com/bonavadeur/katyusha) - Load Balancing Algorithm Implementation Module on the Fleet, written in Go, released under Apache-2.0 License. 232 | 233 | Panzer vor! 234 | 235 | ## 6. Author 236 | 237 | Đào Hiệp - Bonavadeur - ボナちゃん 238 | The Future Internet Laboratory, Room E711, C7 Building, Hanoi University of Science and Technology, Vietnam. 239 | 未来のインターネット研究室, C7 の E 711、ハノイ百科大学、ベトナム。 240 | 241 | ![](docs/images/github-wp.png) 242 | -------------------------------------------------------------------------------- /manifest/3-kourier.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 The Knative Authors 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # https://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: v1 16 | kind: Namespace 17 | metadata: 18 | name: kourier-system 19 | labels: 20 | networking.knative.dev/ingress-provider: kourier 21 | app.kubernetes.io/name: knative-serving 22 | app.kubernetes.io/component: net-kourier 23 | app.kubernetes.io/version: "1.12.1" 24 | 25 | --- 26 | # Copyright 2020 The Knative Authors 27 | # 28 | # Licensed under the Apache License, Version 2.0 (the "License"); 29 | # you may not use this file except in compliance with the License. 30 | # You may obtain a copy of the License at 31 | # 32 | # https://www.apache.org/licenses/LICENSE-2.0 33 | # 34 | # Unless required by applicable law or agreed to in writing, software 35 | # distributed under the License is distributed on an "AS IS" BASIS, 36 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 37 | # See the License for the specific language governing permissions and 38 | # limitations under the License. 39 | 40 | apiVersion: v1 41 | kind: ConfigMap 42 | metadata: 43 | name: kourier-bootstrap 44 | namespace: kourier-system 45 | labels: 46 | networking.knative.dev/ingress-provider: kourier 47 | app.kubernetes.io/component: net-kourier 48 | app.kubernetes.io/version: "1.12.1" 49 | app.kubernetes.io/name: knative-serving 50 | data: 51 | envoy-bootstrap.yaml: | 52 | dynamic_resources: 53 | ads_config: 54 | transport_api_version: V3 55 | api_type: GRPC 56 | rate_limit_settings: {} 57 | grpc_services: 58 | - envoy_grpc: {cluster_name: xds_cluster} 59 | cds_config: 60 | resource_api_version: V3 61 | ads: {} 62 | lds_config: 63 | resource_api_version: V3 64 | ads: {} 65 | node: 66 | cluster: kourier-knative 67 | id: 3scale-kourier-gateway 68 | static_resources: 69 | listeners: 70 | - name: stats_listener 71 | address: 72 | socket_address: 73 | address: 0.0.0.0 74 | port_value: 9000 75 | filter_chains: 76 | - filters: 77 | - name: envoy.filters.network.http_connection_manager 78 | typed_config: 79 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager 80 | stat_prefix: stats_server 81 | http_filters: 82 | - name: envoy.filters.http.router 83 | typed_config: 84 | "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router 85 | route_config: 86 | virtual_hosts: 87 | - name: admin_interface 88 | domains: 89 | - "*" 90 | routes: 91 | - match: 92 | safe_regex: 93 | regex: '/(certs|stats(/prometheus)?|server_info|clusters|listeners|ready)?' 94 | headers: 95 | - name: ':method' 96 | string_match: 97 | exact: GET 98 | route: 99 | cluster: service_stats 100 | clusters: 101 | - name: service_stats 102 | connect_timeout: 0.250s 103 | type: static 104 | load_assignment: 105 | cluster_name: service_stats 106 | endpoints: 107 | lb_endpoints: 108 | endpoint: 109 | address: 110 | pipe: 111 | path: /tmp/envoy.admin 112 | - name: xds_cluster 113 | # This keepalive is recommended by envoy docs. 114 | # https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol 115 | typed_extension_protocol_options: 116 | envoy.extensions.upstreams.http.v3.HttpProtocolOptions: 117 | "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions 118 | explicit_http_config: 119 | http2_protocol_options: 120 | connection_keepalive: 121 | interval: 30s 122 | timeout: 5s 123 | connect_timeout: 1s 124 | load_assignment: 125 | cluster_name: xds_cluster 126 | endpoints: 127 | lb_endpoints: 128 | endpoint: 129 | address: 130 | socket_address: 131 | address: "net-kourier-controller.knative-serving" 132 | port_value: 18000 133 | type: STRICT_DNS 134 | admin: 135 | access_log: 136 | - name: envoy.access_loggers.stdout 137 | typed_config: 138 | "@type": type.googleapis.com/envoy.extensions.access_loggers.stream.v3.StdoutAccessLog 139 | address: 140 | pipe: 141 | path: /tmp/envoy.admin 142 | layered_runtime: 143 | layers: 144 | - name: static-layer 145 | static_layer: 146 | envoy.reloadable_features.override_request_timeout_by_gateway_timeout: false 147 | 148 | --- 149 | # Copyright 2021 The Knative Authors 150 | # 151 | # Licensed under the Apache License, Version 2.0 (the "License"); 152 | # you may not use this file except in compliance with the License. 153 | # You may obtain a copy of the License at 154 | # 155 | # https://www.apache.org/licenses/LICENSE-2.0 156 | # 157 | # Unless required by applicable law or agreed to in writing, software 158 | # distributed under the License is distributed on an "AS IS" BASIS, 159 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 160 | # See the License for the specific language governing permissions and 161 | # limitations under the License. 162 | 163 | apiVersion: v1 164 | kind: ConfigMap 165 | metadata: 166 | name: config-kourier 167 | namespace: knative-serving 168 | labels: 169 | networking.knative.dev/ingress-provider: kourier 170 | app.kubernetes.io/component: net-kourier 171 | app.kubernetes.io/version: "1.12.1" 172 | app.kubernetes.io/name: knative-serving 173 | data: 174 | _example: | 175 | ################################ 176 | # # 177 | # EXAMPLE CONFIGURATION # 178 | # # 179 | ################################ 180 | 181 | # This block is not actually functional configuration, 182 | # but serves to illustrate the available configuration 183 | # options and document them in a way that is accessible 184 | # to users that `kubectl edit` this config map. 185 | # 186 | # These sample configuration options may be copied out of 187 | # this example block and unindented to be in the data block 188 | # to actually change the configuration. 189 | 190 | # Specifies whether requests reaching the Kourier gateway 191 | # in the context of services should be logged. Readiness 192 | # probes etc. must be configured via the bootstrap config. 193 | enable-service-access-logging: "true" 194 | 195 | # Specifies whether to use proxy-protocol in order to safely 196 | # transport connection information such as a client's address 197 | # across multiple layers of TCP proxies. 198 | # NOTE THAT THIS IS AN EXPERIMENTAL / ALPHA FEATURE 199 | enable-proxy-protocol: "false" 200 | 201 | # The server certificates to serve the internal TLS traffic for Kourier Gateway. 202 | # It is specified by the secret name in controller namespace, which has 203 | # the "tls.crt" and "tls.key" data field. 204 | # Use an empty value to disable the feature (default). 205 | # 206 | # NOTE: This flag is in an alpha state and is mostly here to enable internal testing 207 | # for now. Use with caution. 208 | cluster-cert-secret: "" 209 | 210 | # Specifies the amount of time that Kourier waits for the incoming requests. 211 | # The default, 0s, imposes no timeout at all. 212 | stream-idle-timeout: "0s" 213 | 214 | # Specifies whether to use CryptoMB private key provider in order to 215 | # acclerate the TLS handshake. 216 | # NOTE THAT THIS IS AN EXPERIMENTAL / ALPHA FEATURE. 217 | enable-cryptomb: "false" 218 | 219 | # Configures the number of additional ingress proxy hops from the 220 | # right side of the x-forwarded-for HTTP header to trust. 221 | trusted-hops-count: "0" 222 | 223 | # Specifies the cipher suites for TLS external listener. 224 | # Use ',' separated values like "ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-ECDSA-CHACHA20-POLY1305" 225 | # The default uses the default cipher suites of the envoy version. 226 | cipher-suites: "" 227 | 228 | --- 229 | # Copyright 2020 The Knative Authors 230 | # 231 | # Licensed under the Apache License, Version 2.0 (the "License"); 232 | # you may not use this file except in compliance with the License. 233 | # You may obtain a copy of the License at 234 | # 235 | # https://www.apache.org/licenses/LICENSE-2.0 236 | # 237 | # Unless required by applicable law or agreed to in writing, software 238 | # distributed under the License is distributed on an "AS IS" BASIS, 239 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 240 | # See the License for the specific language governing permissions and 241 | # limitations under the License. 242 | 243 | apiVersion: v1 244 | kind: ServiceAccount 245 | metadata: 246 | name: net-kourier 247 | namespace: knative-serving 248 | labels: 249 | networking.knative.dev/ingress-provider: kourier 250 | app.kubernetes.io/component: net-kourier 251 | app.kubernetes.io/version: "1.12.1" 252 | app.kubernetes.io/name: knative-serving 253 | --- 254 | apiVersion: rbac.authorization.k8s.io/v1 255 | kind: ClusterRole 256 | metadata: 257 | name: net-kourier 258 | labels: 259 | networking.knative.dev/ingress-provider: kourier 260 | app.kubernetes.io/component: net-kourier 261 | app.kubernetes.io/version: "1.12.1" 262 | app.kubernetes.io/name: knative-serving 263 | rules: 264 | - apiGroups: [""] 265 | resources: ["events"] 266 | verbs: ["create", "update", "patch"] 267 | - apiGroups: [""] 268 | resources: ["pods", "endpoints", "services", "secrets"] 269 | verbs: ["get", "list", "watch"] 270 | - apiGroups: [""] 271 | resources: ["configmaps"] 272 | verbs: ["get", "list", "watch"] 273 | - apiGroups: ["coordination.k8s.io"] 274 | resources: ["leases"] 275 | verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] 276 | - apiGroups: ["networking.internal.knative.dev"] 277 | resources: ["ingresses"] 278 | verbs: ["get", "list", "watch", "patch"] 279 | - apiGroups: ["networking.internal.knative.dev"] 280 | resources: ["ingresses/status"] 281 | verbs: ["update"] 282 | - apiGroups: ["crd.projectcalico.org"] 283 | resources: ["ipamblocks"] 284 | verbs: ["list", "get"] 285 | - apiGroups: [""] 286 | resources: ["nodes"] 287 | verbs: ["list", "get"] 288 | --- 289 | apiVersion: rbac.authorization.k8s.io/v1 290 | kind: ClusterRoleBinding 291 | metadata: 292 | name: net-kourier 293 | labels: 294 | networking.knative.dev/ingress-provider: kourier 295 | app.kubernetes.io/component: net-kourier 296 | app.kubernetes.io/version: "1.12.1" 297 | app.kubernetes.io/name: knative-serving 298 | roleRef: 299 | apiGroup: rbac.authorization.k8s.io 300 | kind: ClusterRole 301 | name: net-kourier 302 | subjects: 303 | - kind: ServiceAccount 304 | name: net-kourier 305 | namespace: knative-serving 306 | 307 | --- 308 | # Copyright 2020 The Knative Authors 309 | # 310 | # Licensed under the Apache License, Version 2.0 (the "License"); 311 | # you may not use this file except in compliance with the License. 312 | # You may obtain a copy of the License at 313 | # 314 | # https://www.apache.org/licenses/LICENSE-2.0 315 | # 316 | # Unless required by applicable law or agreed to in writing, software 317 | # distributed under the License is distributed on an "AS IS" BASIS, 318 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 319 | # See the License for the specific language governing permissions and 320 | # limitations under the License. 321 | 322 | apiVersion: apps/v1 323 | kind: Deployment 324 | metadata: 325 | name: net-kourier-controller 326 | namespace: knative-serving 327 | labels: 328 | networking.knative.dev/ingress-provider: kourier 329 | app.kubernetes.io/component: net-kourier 330 | app.kubernetes.io/version: "1.12.1" 331 | app.kubernetes.io/name: knative-serving 332 | spec: 333 | strategy: 334 | type: RollingUpdate 335 | rollingUpdate: 336 | maxUnavailable: 0 337 | maxSurge: 100% 338 | replicas: 1 339 | selector: 340 | matchLabels: 341 | app: net-kourier-controller 342 | template: 343 | metadata: 344 | annotations: 345 | prometheus.io/scrape: "true" 346 | prometheus.io/port: "9090" 347 | prometheus.io/path: "/metrics" 348 | labels: 349 | app: net-kourier-controller 350 | spec: 351 | nodeSelector: 352 | kubernetes.io/hostname: node1 353 | containers: 354 | - name: controller 355 | image: gcr.io/knative-releases/knative.dev/net-kourier/cmd/kourier@sha256:9cd4d69a708a8cf8e597efe3f511494d71cf8eab1b2fd85545097069ad47d3f6 356 | # image: docker.io/bonavadeur/ikukantai-kourier:v1.2-cnsm-15nov24 357 | envFrom: 358 | - configMapRef: 359 | name: config-ikukantai 360 | env: 361 | - name: CERTS_SECRET_NAMESPACE 362 | value: "" 363 | - name: CERTS_SECRET_NAME 364 | value: "" 365 | - name: SYSTEM_NAMESPACE 366 | valueFrom: 367 | fieldRef: 368 | fieldPath: metadata.namespace 369 | - name: METRICS_DOMAIN 370 | value: "knative.dev/samples" 371 | - name: KOURIER_GATEWAY_NAMESPACE 372 | value: "kourier-system" 373 | - name: ENABLE_SECRET_INFORMER_FILTERING_BY_CERT_UID 374 | value: "false" 375 | # KUBE_API_BURST and KUBE_API_QPS allows to configure maximum burst for throttle and maximum QPS to the server from the client. 376 | # Setting these values using env vars is possible since https://github.com/knative/pkg/pull/2755. 377 | # 200 is an arbitrary value, but it speeds up kourier startup duration, and the whole ingress reconciliation process as a whole. 378 | - name: KUBE_API_BURST 379 | value: "200" 380 | - name: KUBE_API_QPS 381 | value: "200" 382 | ports: 383 | - name: http2-xds 384 | containerPort: 18000 385 | protocol: TCP 386 | readinessProbe: 387 | grpc: 388 | port: 18000 389 | periodSeconds: 10 390 | failureThreshold: 3 391 | livenessProbe: 392 | grpc: 393 | port: 18000 394 | periodSeconds: 10 395 | failureThreshold: 6 396 | securityContext: 397 | allowPrivilegeEscalation: false 398 | readOnlyRootFilesystem: true 399 | runAsNonRoot: true 400 | capabilities: 401 | drop: 402 | - ALL 403 | seccompProfile: 404 | type: RuntimeDefault 405 | resources: 406 | requests: 407 | cpu: 200m 408 | memory: 200Mi 409 | limits: 410 | cpu: "1" 411 | memory: 500Mi 412 | restartPolicy: Always 413 | serviceAccountName: net-kourier 414 | --- 415 | apiVersion: v1 416 | kind: Service 417 | metadata: 418 | name: net-kourier-controller 419 | namespace: knative-serving 420 | labels: 421 | networking.knative.dev/ingress-provider: kourier 422 | app.kubernetes.io/component: net-kourier 423 | app.kubernetes.io/version: "1.12.1" 424 | app.kubernetes.io/name: knative-serving 425 | spec: 426 | ports: 427 | - name: grpc-xds 428 | port: 18000 429 | protocol: TCP 430 | targetPort: 18000 431 | selector: 432 | app: net-kourier-controller 433 | type: ClusterIP 434 | 435 | --- 436 | # Copyright 2020 The Knative Authors 437 | # 438 | # Licensed under the Apache License, Version 2.0 (the "License"); 439 | # you may not use this file except in compliance with the License. 440 | # You may obtain a copy of the License at 441 | # 442 | # https://www.apache.org/licenses/LICENSE-2.0 443 | # 444 | # Unless required by applicable law or agreed to in writing, software 445 | # distributed under the License is distributed on an "AS IS" BASIS, 446 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 447 | # See the License for the specific language governing permissions and 448 | # limitations under the License. 449 | 450 | apiVersion: apps/v1 451 | kind: DaemonSet 452 | metadata: 453 | name: 3scale-kourier-gateway 454 | namespace: kourier-system 455 | labels: 456 | networking.knative.dev/ingress-provider: kourier 457 | app.kubernetes.io/component: net-kourier 458 | app.kubernetes.io/version: "1.12.1" 459 | app.kubernetes.io/name: knative-serving 460 | spec: 461 | # strategy: 462 | # type: RollingUpdate 463 | # rollingUpdate: 464 | # maxUnavailable: 0 465 | # maxSurge: 100% 466 | selector: 467 | matchLabels: 468 | app: 3scale-kourier-gateway 469 | template: 470 | metadata: 471 | labels: 472 | app: 3scale-kourier-gateway 473 | annotations: 474 | # v0.26 supports envoy v3 API, so 475 | # adding this label to restart pod. 476 | networking.knative.dev/poke: "v0.26" 477 | prometheus.io/scrape: "true" 478 | prometheus.io/port: "9000" 479 | prometheus.io/path: "/stats/prometheus" 480 | spec: 481 | # nodeSelector: 482 | # kubernetes.io/hostname: node1 483 | containers: 484 | - args: 485 | - --base-id 1 486 | - -c /tmp/config/envoy-bootstrap.yaml 487 | - --log-level info 488 | command: 489 | - /usr/local/bin/envoy 490 | image: docker.io/envoyproxy/envoy:v1.25-latest 491 | name: kourier-gateway 492 | ports: 493 | - name: http2-external 494 | containerPort: 8080 495 | protocol: TCP 496 | - name: http2-internal 497 | containerPort: 8081 498 | protocol: TCP 499 | - name: https-external 500 | containerPort: 8443 501 | protocol: TCP 502 | - name: http-probe 503 | containerPort: 8090 504 | protocol: TCP 505 | - name: https-probe 506 | containerPort: 9443 507 | protocol: TCP 508 | securityContext: 509 | allowPrivilegeEscalation: false 510 | readOnlyRootFilesystem: false 511 | runAsNonRoot: true 512 | runAsUser: 65534 513 | runAsGroup: 65534 514 | capabilities: 515 | drop: 516 | - ALL 517 | seccompProfile: 518 | type: RuntimeDefault 519 | volumeMounts: 520 | - name: config-volume 521 | mountPath: /tmp/config 522 | lifecycle: 523 | preStop: 524 | exec: 525 | command: ["/bin/sh", "-c", "curl -X POST --unix /tmp/envoy.admin http://localhost/healthcheck/fail; sleep 15"] 526 | readinessProbe: 527 | httpGet: 528 | httpHeaders: 529 | - name: Host 530 | value: internalkourier 531 | path: /ready 532 | port: 8081 533 | scheme: HTTP 534 | initialDelaySeconds: 10 535 | periodSeconds: 5 536 | failureThreshold: 3 537 | livenessProbe: 538 | httpGet: 539 | httpHeaders: 540 | - name: Host 541 | value: internalkourier 542 | path: /ready 543 | port: 8081 544 | scheme: HTTP 545 | initialDelaySeconds: 10 546 | periodSeconds: 5 547 | failureThreshold: 6 548 | resources: 549 | requests: 550 | cpu: 200m 551 | memory: 200Mi 552 | limits: 553 | cpu: 500m 554 | memory: 500Mi 555 | volumes: 556 | - name: config-volume 557 | configMap: 558 | name: kourier-bootstrap 559 | restartPolicy: Always 560 | --- 561 | apiVersion: v1 562 | kind: Service 563 | metadata: 564 | name: kourier 565 | namespace: kourier-system 566 | labels: 567 | networking.knative.dev/ingress-provider: kourier 568 | app.kubernetes.io/component: net-kourier 569 | app.kubernetes.io/version: "1.12.1" 570 | app.kubernetes.io/name: knative-serving 571 | spec: 572 | ports: 573 | - name: http2 574 | port: 80 575 | protocol: TCP 576 | targetPort: 8080 577 | - name: https 578 | port: 443 579 | protocol: TCP 580 | targetPort: 8443 581 | selector: 582 | app: 3scale-kourier-gateway 583 | type: LoadBalancer 584 | --- 585 | apiVersion: v1 586 | kind: Service 587 | metadata: 588 | name: kourier-internal 589 | namespace: kourier-system 590 | labels: 591 | networking.knative.dev/ingress-provider: kourier 592 | app.kubernetes.io/component: net-kourier 593 | app.kubernetes.io/version: "1.12.1" 594 | app.kubernetes.io/name: knative-serving 595 | spec: 596 | ports: 597 | - name: http2 598 | port: 80 599 | protocol: TCP 600 | targetPort: 8081 601 | - name: https 602 | port: 443 603 | protocol: TCP 604 | targetPort: 8444 605 | selector: 606 | app: 3scale-kourier-gateway 607 | type: ClusterIP 608 | --- 609 | # apiVersion: autoscaling/v2 610 | # kind: HorizontalPodAutoscaler 611 | # metadata: 612 | # name: 3scale-kourier-gateway 613 | # namespace: kourier-system 614 | # labels: 615 | # networking.knative.dev/ingress-provider: kourier 616 | # app.kubernetes.io/component: net-kourier 617 | # app.kubernetes.io/version: "1.12.1" 618 | # app.kubernetes.io/name: knative-serving 619 | # spec: 620 | # minReplicas: 1 621 | # maxReplicas: 10 622 | # scaleTargetRef: 623 | # apiVersion: apps/v1 624 | # kind: Deployment 625 | # name: 3scale-kourier-gateway 626 | # metrics: 627 | # - type: Resource 628 | # resource: 629 | # name: cpu 630 | # target: 631 | # type: Utilization 632 | # # Percentage of the requested CPU 633 | # averageUtilization: 100 634 | --- 635 | apiVersion: policy/v1 636 | kind: PodDisruptionBudget 637 | metadata: 638 | name: 3scale-kourier-gateway-pdb 639 | namespace: kourier-system 640 | labels: 641 | networking.knative.dev/ingress-provider: kourier 642 | app.kubernetes.io/component: net-kourier 643 | app.kubernetes.io/version: "1.12.1" 644 | app.kubernetes.io/name: knative-serving 645 | spec: 646 | minAvailable: 80% 647 | selector: 648 | matchLabels: 649 | app: 3scale-kourier-gateway 650 | 651 | --- 652 | --------------------------------------------------------------------------------