├── .gitmodules
├── .hugo_build.lock
├── content
└── en
│ ├── docs
│ ├── 10
│ │ ├── _index.md
│ │ ├── 01
│ │ │ ├── ccwnp.yaml
│ │ │ └── _index.md
│ │ └── 02
│ │ │ └── _index.md
│ ├── 11
│ │ ├── default.json
│ │ ├── ingress.yaml
│ │ ├── cnp-l7-sm.yaml
│ │ ├── backend2.yaml
│ │ ├── envoyconfig.yaml
│ │ └── _index.md
│ ├── 12
│ │ ├── helloworld
│ │ │ ├── helloworld.bpf.c
│ │ │ └── helloworld.go
│ │ └── _index.md
│ ├── 13
│ │ ├── hubble-ui-policy.png
│ │ ├── hubble-ui-dashboard.png
│ │ ├── hubble-ui-edit-policy.png
│ │ ├── hubble-ui-servicemap.png
│ │ ├── hubble-ui-empty-policy.png
│ │ ├── hubble-ui-process-tree.png
│ │ ├── hubble-ui-network-policies.png
│ │ ├── hubble-ui-observe-dropped-flow.png
│ │ ├── 02
│ │ │ ├── backend-allow-ingress-frontend.yaml
│ │ │ └── _index.md
│ │ ├── 04
│ │ │ └── _index.md
│ │ ├── 03
│ │ │ └── _index.md
│ │ ├── _index.md
│ │ └── 01
│ │ │ └── _index.md
│ ├── 03
│ │ ├── _index.md
│ │ ├── cilium_choose_ns.png
│ │ ├── hubble_ui_flows.png
│ │ ├── hubble_ui_servicemap.png
│ │ ├── hubble_ui_visual_options.png
│ │ ├── cilium_hubble_connectivity_test.png
│ │ ├── 01
│ │ │ ├── simple-app.yaml
│ │ │ └── _index.md
│ │ └── 02
│ │ │ └── _index.md
│ ├── 02
│ │ ├── _index.md
│ │ └── 02
│ │ │ └── _index.md
│ ├── 09
│ │ ├── _index.md
│ │ ├── 02
│ │ │ ├── svc.yaml
│ │ │ ├── cluster1.yaml
│ │ │ ├── cluster2.yaml
│ │ │ └── _index.md
│ │ ├── 03
│ │ │ ├── cnp-cm.yaml
│ │ │ └── _index.md
│ │ └── 01
│ │ │ └── _index.md
│ ├── 01
│ │ ├── 01
│ │ │ ├── overview.png
│ │ │ ├── intro_tracing.png
│ │ │ ├── intro_networking.png
│ │ │ ├── intro_security.png
│ │ │ ├── intro_observability.png
│ │ │ └── _index.md
│ │ └── _index.md
│ ├── 08
│ │ ├── patch.yaml
│ │ └── _index.md
│ ├── 06
│ │ ├── cilium_editor_1.png
│ │ ├── cilium_editor_add.png
│ │ ├── cilium_editor_edit_name.png
│ │ ├── cilium_editor_backend-allow-ingress.png
│ │ ├── backend-ingress-deny.yaml
│ │ ├── backend-allow-ingress-frontend.yaml
│ │ └── _index.md
│ ├── 07
│ │ ├── 01
│ │ │ ├── cilium_dns_policy.png
│ │ │ ├── backend-egress-allow-fqdn.yaml
│ │ │ └── _index.md
│ │ ├── _index.md
│ │ └── 02
│ │ │ ├── cnp.yaml
│ │ │ ├── cnp-l7.yaml
│ │ │ ├── sw-app.yaml
│ │ │ └── _index.md
│ ├── _index.md
│ ├── 04
│ │ └── _index.md
│ └── 05
│ │ └── _index.md
│ ├── pdf
│ ├── _index.md
│ ├── footer.md
│ └── header.md
│ ├── acend_background.jpg
│ ├── puzzle_background.jpg
│ ├── setup
│ ├── cleanup.md
│ └── _index.md
│ ├── slides
│ └── _index.md
│ └── _index.html
├── .husky
└── pre-commit
├── renovate.json
├── config
├── techlab
│ └── config.toml
└── _default
│ └── config.toml
├── INSTALL.md
├── .gitignore
├── object-count-quota.yaml
├── .markdownlint.json
├── go.mod
├── package.json
├── helm-chart
└── values.yaml
├── CONTRIBUTING.md
├── Dockerfile
├── .github
└── workflows
│ ├── pr-cleanup.yaml
│ ├── push-main.yaml
│ └── build.yaml
├── README.md
└── go.sum
/.gitmodules:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.hugo_build.lock:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/content/en/docs/03/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Hubble"
3 | weight: 3
4 | ---
5 |
--------------------------------------------------------------------------------
/content/en/pdf/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "PDF"
3 | weight: 10
4 | ---
5 |
6 |
--------------------------------------------------------------------------------
/content/en/docs/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Install Cilium"
3 | weight: 2
4 | ---
5 |
--------------------------------------------------------------------------------
/.husky/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | . "$(dirname "$0")/_/husky.sh"
3 |
4 | #npm run mdlint
5 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "github>acend/hugo-training-template"
4 | ]
5 | }
6 |
--------------------------------------------------------------------------------
/content/en/docs/09/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Cluster Mesh"
3 | weight: 9
4 | OnlyWhenNot: techlab
5 | ---
6 |
--------------------------------------------------------------------------------
/content/en/docs/10/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Advanced Networking"
3 | weight: 10
4 | OnlyWhenNot: techlab
5 | ---
6 |
--------------------------------------------------------------------------------
/content/en/acend_background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/acend_background.jpg
--------------------------------------------------------------------------------
/content/en/puzzle_background.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/puzzle_background.jpg
--------------------------------------------------------------------------------
/content/en/docs/01/01/overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/01/01/overview.png
--------------------------------------------------------------------------------
/config/techlab/config.toml:
--------------------------------------------------------------------------------
1 | [params]
2 | enabledModule = "base techlab"
3 |
4 | [Languages]
5 | [Languages.en]
6 | title = "Cilium Techlab"
--------------------------------------------------------------------------------
/content/en/docs/08/patch.yaml:
--------------------------------------------------------------------------------
1 | spec:
2 | template:
3 | spec:
4 | nodeSelector:
5 | kubernetes.io/hostname: cluster1-m02
--------------------------------------------------------------------------------
/content/en/docs/01/01/intro_tracing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/01/01/intro_tracing.png
--------------------------------------------------------------------------------
/content/en/docs/03/cilium_choose_ns.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/03/cilium_choose_ns.png
--------------------------------------------------------------------------------
/content/en/docs/03/hubble_ui_flows.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/03/hubble_ui_flows.png
--------------------------------------------------------------------------------
/content/en/docs/06/cilium_editor_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/06/cilium_editor_1.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-policy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-policy.png
--------------------------------------------------------------------------------
/content/en/docs/01/01/intro_networking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/01/01/intro_networking.png
--------------------------------------------------------------------------------
/content/en/docs/01/01/intro_security.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/01/01/intro_security.png
--------------------------------------------------------------------------------
/content/en/docs/06/cilium_editor_add.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/06/cilium_editor_add.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-dashboard.png
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 | # CHANGEME Training
2 |
3 | This file explains how to set up the infrastructure for running the labs on.
4 |
5 |
6 | ## Prerequisites
7 |
--------------------------------------------------------------------------------
/content/en/docs/03/hubble_ui_servicemap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/03/hubble_ui_servicemap.png
--------------------------------------------------------------------------------
/content/en/docs/07/01/cilium_dns_policy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/07/01/cilium_dns_policy.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-edit-policy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-edit-policy.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-servicemap.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-servicemap.png
--------------------------------------------------------------------------------
/content/en/docs/01/01/intro_observability.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/01/01/intro_observability.png
--------------------------------------------------------------------------------
/content/en/docs/03/hubble_ui_visual_options.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/03/hubble_ui_visual_options.png
--------------------------------------------------------------------------------
/content/en/docs/06/cilium_editor_edit_name.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/06/cilium_editor_edit_name.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-empty-policy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-empty-policy.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-process-tree.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-process-tree.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-network-policies.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-network-policies.png
--------------------------------------------------------------------------------
/content/en/docs/03/cilium_hubble_connectivity_test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/03/cilium_hubble_connectivity_test.png
--------------------------------------------------------------------------------
/content/en/docs/13/hubble-ui-observe-dropped-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/13/hubble-ui-observe-dropped-flow.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | Thumbs.db
2 | .DS_Store
3 | .dist
4 | .tmp
5 | .sass-cache
6 | npm-debug.log
7 | node_modules
8 | builds
9 | public
10 | .env
11 | resources
12 | .idea/
13 |
--------------------------------------------------------------------------------
/content/en/docs/06/cilium_editor_backend-allow-ingress.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/acend/cilium-basics-training/HEAD/content/en/docs/06/cilium_editor_backend-allow-ingress.png
--------------------------------------------------------------------------------
/object-count-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: object-counts
5 | spec:
6 | hard:
7 | count/deployments.apps: "20"
8 | pods: "50"
9 | configmaps: "50"
10 | secrets: "50"
--------------------------------------------------------------------------------
/content/en/docs/11/default.json:
--------------------------------------------------------------------------------
1 |
2 |
3 | {
4 | "private": [
5 | { "id": 1, "body": "another secret information" }
6 | ],
7 | "public": [
8 | { "id": 1, "body": " another public information" }
9 | ]
10 | }
11 |
--------------------------------------------------------------------------------
/content/en/docs/06/backend-ingress-deny.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: NetworkPolicy
3 | apiVersion: networking.k8s.io/v1
4 | metadata:
5 | name: backend-ingress-deny
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: backend
10 | policyTypes:
11 | - Ingress
--------------------------------------------------------------------------------
/content/en/docs/09/02/svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: rebel-base
6 | annotations:
7 | io.cilium/global-service: "true"
8 | spec:
9 | type: ClusterIP
10 | ports:
11 | - port: 80
12 | selector:
13 | name: rebel-base
--------------------------------------------------------------------------------
/content/en/pdf/footer.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Footer"
3 | weight: 10
4 | ---
5 |
6 |
7 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/content/en/docs/07/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Cilium Network Policies"
3 | weight: 7
4 | ---
5 |
6 | ## Cilium Network Policies
7 |
8 | On top of the default Kubernetes network policies, Cilium provides extended policy enforcement capabilities (such as Identity-aware, HTTP-aware and DNS-aware) via Cilium Network Policies.
9 |
10 |
--------------------------------------------------------------------------------
/content/en/pdf/header.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Header"
3 | weight: 10
4 | ---
5 |
6 |
7 |
10 |
11 |
--------------------------------------------------------------------------------
/content/en/setup/cleanup.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Cleanup"
3 | weight: 11
4 | type: docs
5 | ---
6 |
7 | ## Remove Kubernetes Cluster
8 |
9 | You can list and then remove the minikube Kubernetes clusters with the following command
10 |
11 | ```bash
12 | minikube profile list
13 | minikube delete -p cluster1
14 | minkube delete -p cluster2
15 | ```
16 |
--------------------------------------------------------------------------------
/content/en/docs/10/01/ccwnp.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: "cilium.io/v2"
3 | kind: CiliumClusterwideNetworkPolicy
4 | metadata:
5 | name: "demo-host-policy"
6 | spec:
7 | description: ""
8 | nodeSelector:
9 | matchLabels:
10 | node-access: ssh
11 | ingress:
12 | - toPorts:
13 | - ports:
14 | - port: "22"
15 | protocol: TCP
--------------------------------------------------------------------------------
/content/en/docs/11/ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: backend
6 | spec:
7 | ingressClassName: cilium
8 | rules:
9 | - http:
10 | paths:
11 | - backend:
12 | service:
13 | name: backend
14 | port:
15 | number: 8080
16 | path: /
17 | pathType: Prefix
--------------------------------------------------------------------------------
/content/en/docs/06/backend-allow-ingress-frontend.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: NetworkPolicy
4 | metadata:
5 | name: "backend-allow-ingress-frontend"
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: backend
10 | policyTypes:
11 | - Ingress
12 | ingress:
13 | - from:
14 | - podSelector:
15 | matchLabels:
16 | app: frontend
17 |
--------------------------------------------------------------------------------
/content/en/docs/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Labs"
3 | weight: 2
4 | menu:
5 | main:
6 | weight: 2
7 | ---
8 |
9 | The purpose of these labs is to convey Cilium basics by providing hands-on tasks for people.
10 |
11 | Goals of these labs:
12 |
13 | * Help you get started with this modern technology
14 | * Explain the basic concepts to you
15 |
16 |
17 | ## Additional Docs
18 |
19 | * [Cilium documentation](https://docs.cilium.io/en/v1.10/)
20 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": true,
3 | "MD003": {
4 | "style": "atx"
5 | },
6 | "MD004": {
7 | "style": "asterisk"
8 | },
9 | "MD012": {
10 | "maximum": 2
11 | },
12 | "MD013": false,
13 | "MD022": {
14 | "lines_above": 2
15 | },
16 | "MD024": false,
17 | "MD031": false,
18 | "MD034": false,
19 | "MD035": {
20 | "style": "---"
21 | },
22 | "MD040": false,
23 | "MD048": {
24 | "style": "backtick"
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/content/en/docs/07/02/cnp.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: "cilium.io/v2"
3 | kind: CiliumNetworkPolicy
4 | metadata:
5 | name: "rule1"
6 | spec:
7 | description: "L3-L4 policy to restrict deathstar access to empire ships only"
8 | endpointSelector:
9 | matchLabels:
10 | org: empire
11 | class: deathstar
12 | ingress:
13 | - fromEndpoints:
14 | - matchLabels:
15 | org: empire
16 | toPorts:
17 | - ports:
18 | - port: "80"
19 | protocol: TCP
--------------------------------------------------------------------------------
/content/en/slides/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Slides"
3 | weight: 3
4 | menu:
5 | main:
6 | weight: 3
7 | ---
8 |
9 | {{< blocks/section color="light">}}
10 |
11 | {{% blocks/feature icon="fa-chalkboard-teacher" url="https://drive.google.com/uc?export=download&id=1Zrse75mWHxFtqYn88RsBiK_BoeDL_5uq" title="Cilium Basics" %}}
12 | {{% /blocks/feature %}}
13 |
14 | {{% blocks/feature icon="fa-chalkboard-teacher" url="../pdf/pdf.pdf" title="Lab PDF" %}}
15 | {{% /blocks/feature %}}
16 |
17 |
18 | {{< /blocks/section >}}
19 |
--------------------------------------------------------------------------------
/content/en/docs/07/02/cnp-l7.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: "cilium.io/v2"
3 | kind: CiliumNetworkPolicy
4 | metadata:
5 | name: "rule1"
6 | spec:
7 | description: "L7 policy to restrict access to specific HTTP call"
8 | endpointSelector:
9 | matchLabels:
10 | org: empire
11 | class: deathstar
12 | ingress:
13 | - fromEndpoints:
14 | - matchLabels:
15 | org: empire
16 | toPorts:
17 | - ports:
18 | - port: "80"
19 | protocol: TCP
20 | rules:
21 | http:
22 | - method: "POST"
23 | path: "/v1/request-landing"
--------------------------------------------------------------------------------
/content/en/docs/13/02/backend-allow-ingress-frontend.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cilium.io/v2
3 | kind: CiliumNetworkPolicy
4 | metadata:
5 | name: backend-allow-ingress-frontend
6 | namespace: default
7 | spec:
8 | endpointSelector:
9 | matchLabels:
10 | app: backend
11 | ingress:
12 | - fromEndpoints:
13 | - matchLabels:
14 | k8s:app: frontend
15 | k8s:io.cilium.k8s.namespace.labels.kubernetes.io/metadata.name: default
16 | k8s:io.kubernetes.pod.namespace: default
17 | toPorts:
18 | - ports:
19 | - port: "8080"
20 |
--------------------------------------------------------------------------------
/content/en/docs/12/helloworld/helloworld.bpf.c:
--------------------------------------------------------------------------------
1 |
2 | #include "common.h"
3 |
4 | // SEC is a macro that expands to create an ELF section which bpf loaders parse.
5 | // we want our function to be executed whenever syscall execve (program execution) is called
6 | SEC("tracepoint/syscalls/sys_enter_execve")
7 | int bpf_prog(void *ctx) {
8 | char msg[] = "Hello world";
9 | // bpf_printk is a bpf helper function which writes strings to /sys/kernel/debug/tracing/trace_pipe (good for debugging purposes)
10 | bpf_printk("%s", msg);
11 | // bpf programs need to return an int
12 | return 0;
13 | }
14 |
15 | char LICENSE[] SEC("license") = "GPL";
--------------------------------------------------------------------------------
/content/en/docs/07/01/backend-egress-allow-fqdn.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: CiliumNetworkPolicy
3 | apiVersion: cilium.io/v2
4 | metadata:
5 | name: backend-egress-allow-fqdn
6 | spec:
7 | endpointSelector:
8 | matchLabels:
9 | app: backend
10 | egress:
11 | - toEndpoints:
12 | - matchLabels:
13 | "k8s:io.kubernetes.pod.namespace": kube-system
14 | "k8s:k8s-app": kube-dns
15 | toPorts:
16 | - ports:
17 | - port: "53"
18 | protocol: ANY
19 | rules:
20 | dns:
21 | - matchPattern: "*"
22 | - toFQDNs:
23 | - matchName: kubernetes.io
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/acend/cilium-basics-training
2 |
3 | go 1.19
4 |
5 | require github.com/cilium/ebpf v0.12.0
6 |
7 | require (
8 | github.com/FortAwesome/Font-Awesome v0.0.0-20240716171331-37eff7fa00de // indirect
9 | github.com/acend/docsy-acend v1.0.0 // indirect
10 | github.com/acend/docsy-plus v1.2.0 // indirect
11 | github.com/google/docsy v0.11.0 // indirect
12 | github.com/google/docsy/dependencies v0.7.2 // indirect
13 | github.com/puzzle/docsy-puzzle v0.0.0-20230123144731-757054047a02 // indirect
14 | github.com/twbs/bootstrap v5.3.7+incompatible // indirect
15 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect
16 | golang.org/x/sys v0.27.0 // indirect
17 | )
18 |
--------------------------------------------------------------------------------
/content/en/docs/09/03/cnp-cm.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "cilium.io/v2"
2 | kind: CiliumNetworkPolicy
3 | metadata:
4 | name: "allow-cross-cluster"
5 | spec:
6 | description: "Allow x-wing in cluster1 to only contact rebel-base in cluster1"
7 | endpointSelector:
8 | matchLabels:
9 | name: x-wing
10 | io.cilium.k8s.policy.cluster: cluster1
11 | egress:
12 | - toEndpoints:
13 | - matchLabels:
14 | "k8s:io.kubernetes.pod.namespace": kube-system
15 | "k8s:k8s-app": kube-dns
16 | toPorts:
17 | - ports:
18 | - port: "53"
19 | protocol: ANY
20 | rules:
21 | dns:
22 | - matchPattern: "*"
23 | - toEndpoints:
24 | - matchLabels:
25 | name: rebel-base
26 | io.cilium.k8s.policy.cluster: cluster1
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "changeme-training",
3 | "version": "0.0.9",
4 | "description": "changeme Training",
5 | "repository": {
6 | "type": "git",
7 | "url": "git+https://github.com/changeme/changeme-training.git"
8 | },
9 | "author": "changeme",
10 | "scripts": {
11 | "start": "bash -c \"docker run --rm --interactive --publish 8080:8080 -v $(pwd):/src:Z klakegg/hugo:$(grep \"FROM klakegg/hugo\" Dockerfile | sed 's/FROM klakegg\\/hugo://g' | sed 's/ AS builder//g') server -p 8080 --bind 0.0.0.0\"",
12 | "mdlint": "markdownlint --config .markdownlint.json content *.md",
13 | "prepare": "husky install"
14 | },
15 | "bugs": {
16 | "url": "https://github.com/changeme/changeme-training/issues"
17 | },
18 | "homepage": "https://github.com/changeme/changeme-training#readme",
19 | "devDependencies": {
20 | "husky": "9.1.7",
21 | "markdownlint-cli": "0.43.0"
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/content/en/docs/12/helloworld/helloworld.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "log"
5 |
6 | "github.com/cilium/ebpf/link"
7 | "github.com/cilium/ebpf/rlimit"
8 | )
9 |
10 | func main() {
11 |
12 | // Allow the current process to lock memory for eBPF resources.
13 | if err := rlimit.RemoveMemlock(); err != nil {
14 | log.Fatal(err)
15 | }
16 |
17 | // Here we load our bpf code into the kernel, these functions are in the
18 | // .go file created by bpf2go
19 | objs := bpfObjects{}
20 | if err := loadBpfObjects(&objs, nil); err != nil {
21 | log.Fatalf("loading objects: %s", err)
22 | }
23 | defer objs.Close()
24 |
25 | //SEC("tracepoint/syscalls/sys_enter_execve")
26 | kp, err := link.Tracepoint("syscalls", "sys_enter_execve", objs.BpfProg, nil)
27 | if err != nil {
28 | log.Fatalf("opening tracepoint: %s", err)
29 | }
30 | defer kp.Close()
31 |
32 | for {
33 | }
34 |
35 | log.Println("Received signal, exiting program..")
36 | }
37 |
--------------------------------------------------------------------------------
/content/en/docs/11/cnp-l7-sm.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: "cilium.io/v2"
3 | kind: CiliumNetworkPolicy
4 | metadata:
5 | name: "rule1"
6 | spec:
7 | description: "enable L7 without blocking"
8 | endpointSelector:
9 | matchLabels:
10 | app: backend
11 | ingress:
12 | - fromEntities:
13 | - "all"
14 | toPorts:
15 | - ports:
16 | - port: "8080"
17 | protocol: TCP
18 | rules:
19 | http:
20 | - method: "GET"
21 | path: "/private"
22 | ---
23 | apiVersion: "cilium.io/v2"
24 | kind: CiliumNetworkPolicy
25 | metadata:
26 | name: "rule2"
27 | spec:
28 | description: "enable L7 without blocking"
29 | endpointSelector:
30 | matchLabels:
31 | app: backend-2
32 | ingress:
33 | - fromEntities:
34 | - "all"
35 | toPorts:
36 | - ports:
37 | - port: "8080"
38 | protocol: TCP
39 | rules:
40 | http:
41 | - method: "GET"
42 | path: "/private"
--------------------------------------------------------------------------------
/helm-chart/values.yaml:
--------------------------------------------------------------------------------
1 | # CHANGEME
2 |
3 |
4 | acendTraining:
5 | servicePort: 8080
6 | deployments:
7 | -
8 | name: acend
9 | replicaCount: 2
10 | image:
11 | repository: quay.io/acend/cilium-basics-training
12 | pullPolicy: Always
13 | tag: ""
14 | tagsuffix: ""
15 | ingress:
16 | secretOverride: acend-wildcard
17 | appname: cilium-basics
18 | domain: training.acend.ch
19 | -
20 | name: techlab
21 | replicaCount: 2
22 | image:
23 | repository: quay.io/acend/cilium-basics-training
24 | pullPolicy: Always
25 | tag: ""
26 | tagsuffix: "-techlab"
27 | ingress:
28 | secretOverride: acend-wildcard
29 | appname: cilium-techlab
30 | domain: training.acend.ch
31 |
32 |
33 |
34 | nameOverride: "cilium-basics-training"
35 | fullnameOverride: ""
36 |
37 | serviceAccount:
38 | # Specifies whether a service account should be created
39 | create: true
40 | # Annotations to add to the service account
41 | annotations: {}
42 | # The name of the service account to use.
43 | # If not set and create is true, a name is generated using the fullname template
44 | name: ""
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to contribute to our labs
2 |
3 | :+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
4 |
5 |
6 | ## Did you find a bug
7 |
8 | * **Ensure the bug was not already reported** by searching on GitHub under [Issues](/../../issues).
9 |
10 | * If you're unable to find an open issue addressing the problem, [open a new one](/../../issues/new). Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring.
11 |
12 |
13 | ## Did you write a patch that fixes a bug
14 |
15 | * Open a new GitHub pull request with the patch.
16 |
17 | * Ensure the PR description clearly describes the problem and solution. Include the relevant issue number if applicable.
18 |
19 |
20 | ## Do you intend to add a new feature or change an existing one
21 |
22 | * **Feature Request**: open an issue on GitHub and describe your feature.
23 |
24 | * **New Feature**: Implement your Feature on a fork and create a pull request. The core team will gladly check and eventually merge your pull request.
25 |
26 |
27 | ## Do you have questions about the training
28 |
29 | * Ask your question as an issue on GitHub.
30 |
31 | Thanks!
32 |
--------------------------------------------------------------------------------
/content/en/docs/07/02/sw-app.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: deathstar
6 | labels:
7 | app.kubernetes.io/name: deathstar
8 | spec:
9 | type: ClusterIP
10 | ports:
11 | - port: 80
12 | selector:
13 | org: empire
14 | class: deathstar
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: deathstar
20 | labels:
21 | app.kubernetes.io/name: deathstar
22 | spec:
23 | replicas: 2
24 | selector:
25 | matchLabels:
26 | org: empire
27 | class: deathstar
28 | template:
29 | metadata:
30 | labels:
31 | org: empire
32 | class: deathstar
33 | app.kubernetes.io/name: deathstar
34 | spec:
35 | containers:
36 | - name: deathstar
37 | image: docker.io/cilium/starwars
38 | ---
39 | apiVersion: v1
40 | kind: Pod
41 | metadata:
42 | name: tiefighter
43 | labels:
44 | org: empire
45 | class: tiefighter
46 | app.kubernetes.io/name: tiefighter
47 | spec:
48 | containers:
49 | - name: spaceship
50 | image: docker.io/tgraf/netperf
51 | ---
52 | apiVersion: v1
53 | kind: Pod
54 | metadata:
55 | name: xwing
56 | labels:
57 | app.kubernetes.io/name: xwing
58 | org: alliance
59 | class: xwing
60 | spec:
61 | containers:
62 | - name: spaceship
63 | image: docker.io/tgraf/netperf
--------------------------------------------------------------------------------
/content/en/_index.html:
--------------------------------------------------------------------------------
1 | +++
2 | title = "Cilium Basics Training"
3 | linkTitle = "Cilium Basics Training"
4 |
5 | +++
6 | {{< onlyWhenNot techlab >}}
7 | {{< blocks/cover title="Welcome to the Cilium Basics Training" image_anchor="top" height="full" color="primary" >}}
8 |
19 | {{< /blocks/cover >}}
20 | {{< /onlyWhenNot >}}
21 | {{< onlyWhen techlab >}}
22 | {{< blocks/cover title="Welcome to the Cilium Techlab" image_anchor="top" height="full" color="primary" >}}
23 |
34 | {{< /blocks/cover >}}
35 | {{< /onlyWhen >}}
36 |
--------------------------------------------------------------------------------
/content/en/docs/11/backend2.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | data:
4 | default.json: |
5 | {
6 | "private": [
7 | { "id": 1, "body": "another secret information from a different backend" }
8 | ],
9 | "public": [
10 | { "id": 1, "body": "another public information from a different backend" }
11 | ]
12 | }
13 | kind: ConfigMap
14 | metadata:
15 | name: default-json
16 | ---
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | metadata:
20 | name: backend-2
21 | labels:
22 | app: backend-2
23 | spec:
24 | replicas: 1
25 | selector:
26 | matchLabels:
27 | app: backend-2
28 | template:
29 | metadata:
30 | labels:
31 | app: backend-2
32 | spec:
33 | volumes:
34 | - name: default-json
35 | configMap:
36 | name: default-json
37 | containers:
38 | - name: backend-container
39 | env:
40 | - name: PORT
41 | value: "8080"
42 | ports:
43 | - containerPort: 8080
44 | image: docker.io/cilium/json-mock:1.2
45 | imagePullPolicy: IfNotPresent
46 | volumeMounts:
47 | - name: default-json
48 | mountPath: /default.json
49 | subPath: default.json
50 | ---
51 | apiVersion: v1
52 | kind: Service
53 | metadata:
54 | name: backend-2
55 | labels:
56 | app: backend-2
57 | spec:
58 | type: ClusterIP
59 | selector:
60 | app: backend-2
61 | ports:
62 | - name: http
63 | port: 8080
--------------------------------------------------------------------------------
/content/en/docs/09/02/cluster1.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: rebel-base
6 | spec:
7 | selector:
8 | matchLabels:
9 | name: rebel-base
10 | replicas: 2
11 | template:
12 | metadata:
13 | labels:
14 | name: rebel-base
15 | spec:
16 | containers:
17 | - name: rebel-base
18 | image: docker.io/nginx:1.15.8
19 | volumeMounts:
20 | - name: html
21 | mountPath: /usr/share/nginx/html/
22 | livenessProbe:
23 | httpGet:
24 | path: /
25 | port: 80
26 | periodSeconds: 1
27 | readinessProbe:
28 | httpGet:
29 | path: /
30 | port: 80
31 | volumes:
32 | - name: html
33 | configMap:
34 | name: rebel-base-response
35 | items:
36 | - key: message
37 | path: index.html
38 | ---
39 | apiVersion: v1
40 | kind: ConfigMap
41 | metadata:
42 | name: rebel-base-response
43 | data:
44 | message: "{\"Galaxy\": \"Alderaan\", \"Cluster\": \"Cluster-1\"}\n"
45 | ---
46 | apiVersion: apps/v1
47 | kind: Deployment
48 | metadata:
49 | name: x-wing
50 | spec:
51 | selector:
52 | matchLabels:
53 | name: x-wing
54 | replicas: 2
55 | template:
56 | metadata:
57 | labels:
58 | name: x-wing
59 | spec:
60 | containers:
61 | - name: x-wing-container
62 | image: docker.io/cilium/json-mock:1.2
63 | livenessProbe:
64 | exec:
65 | command:
66 | - curl
67 | - -sS
68 | - -o
69 | - /dev/null
70 | - localhost
71 | readinessProbe:
72 | exec:
73 | command:
74 | - curl
75 | - -sS
76 | - -o
77 | - /dev/null
78 | - localhost
--------------------------------------------------------------------------------
/content/en/docs/09/02/cluster2.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: rebel-base
6 | spec:
7 | selector:
8 | matchLabels:
9 | name: rebel-base
10 | replicas: 2
11 | template:
12 | metadata:
13 | labels:
14 | name: rebel-base
15 | spec:
16 | containers:
17 | - name: rebel-base
18 | image: docker.io/nginx:1.15.8
19 | volumeMounts:
20 | - name: html
21 | mountPath: /usr/share/nginx/html/
22 | livenessProbe:
23 | httpGet:
24 | path: /
25 | port: 80
26 | periodSeconds: 1
27 | readinessProbe:
28 | httpGet:
29 | path: /
30 | port: 80
31 | volumes:
32 | - name: html
33 | configMap:
34 | name: rebel-base-response
35 | items:
36 | - key: message
37 | path: index.html
38 | ---
39 | apiVersion: v1
40 | kind: ConfigMap
41 | metadata:
42 | name: rebel-base-response
43 | data:
44 | message: "{\"Galaxy\": \"Alderaan\", \"Cluster\": \"Cluster-2\"}\n"
45 | ---
46 | apiVersion: apps/v1
47 | kind: Deployment
48 | metadata:
49 | name: x-wing
50 | spec:
51 | selector:
52 | matchLabels:
53 | name: x-wing
54 | replicas: 2
55 | template:
56 | metadata:
57 | labels:
58 | name: x-wing
59 | spec:
60 | containers:
61 | - name: x-wing-container
62 | image: docker.io/cilium/json-mock:1.2
63 | livenessProbe:
64 | exec:
65 | command:
66 | - curl
67 | - -sS
68 | - -o
69 | - /dev/null
70 | - localhost
71 | readinessProbe:
72 | exec:
73 | command:
74 | - curl
75 | - -sS
76 | - -o
77 | - /dev/null
78 | - localhost
--------------------------------------------------------------------------------
/content/en/docs/03/01/simple-app.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: frontend
6 | labels:
7 | app: frontend
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: frontend
13 | template:
14 | metadata:
15 | labels:
16 | app: frontend
17 | spec:
18 | containers:
19 | - name: frontend-container
20 | image: docker.io/byrnedo/alpine-curl:0.1.8
21 | imagePullPolicy: IfNotPresent
22 | command: [ "/bin/ash", "-c", "sleep 1000000000" ]
23 | ---
24 | apiVersion: apps/v1
25 | kind: Deployment
26 | metadata:
27 | name: not-frontend
28 | labels:
29 | app: not-frontend
30 | spec:
31 | replicas: 1
32 | selector:
33 | matchLabels:
34 | app: not-frontend
35 | template:
36 | metadata:
37 | labels:
38 | app: not-frontend
39 | spec:
40 | containers:
41 | - name: not-frontend-container
42 | image: docker.io/byrnedo/alpine-curl:0.1.8
43 | imagePullPolicy: IfNotPresent
44 | command: [ "/bin/ash", "-c", "sleep 1000000000" ]
45 | ---
46 | apiVersion: apps/v1
47 | kind: Deployment
48 | metadata:
49 | name: backend
50 | labels:
51 | app: backend
52 | spec:
53 | replicas: 1
54 | selector:
55 | matchLabels:
56 | app: backend
57 | template:
58 | metadata:
59 | labels:
60 | app: backend
61 | spec:
62 | containers:
63 | - name: backend-container
64 | env:
65 | - name: PORT
66 | value: "8080"
67 | ports:
68 | - containerPort: 8080
69 | image: docker.io/cilium/json-mock:1.2
70 | imagePullPolicy: IfNotPresent
71 | ---
72 | apiVersion: v1
73 | kind: Service
74 | metadata:
75 | name: backend
76 | labels:
77 | app: backend
78 | spec:
79 | type: ClusterIP
80 | selector:
81 | app: backend
82 | ports:
83 | - name: http
84 | port: 8080
--------------------------------------------------------------------------------
/content/en/docs/13/04/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Exporting Events"
3 | weight: 134
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 |
8 | ## {{% task %}} Export Network Events
9 |
10 | Edit your `cilium-enterprise-values.yaml` file and include export-file-path field to export network events:
11 |
12 | ```yaml
13 | cilium:
14 | (...)
15 | extraConfig:
16 | # Enable network event export
17 | export-file-path: "/var/run/cilium/hubble/hubble.log"
18 | (...)
19 | hubble-enterprise:
20 | enabled: true
21 |
22 | ```
23 |
24 | Then, run helm upgrade command to apply the new configuration:
25 |
26 | ```bash
27 | helm upgrade cilium-enterprise isovalent/cilium-enterprise --version {{% param "ciliumVersion.enterprise" %}}
28 | --namespace kube-system -f cilium-enterprise-values.yaml --wait
29 | ```
30 |
31 | and restart cilium daemonset for the new filters to take effect:
32 |
33 | ```yaml
34 | kubectl rollout restart -n kube-system ds/cilium
35 | ```
36 |
37 |
38 | ## {{% task %}} Export Process Events
39 |
40 |
41 | Edit your `cilium-enterprise-values.yaml` file and include exportFilename field to export process events:
42 |
43 | ```yaml
44 | cilium:
45 | (...)
46 | hubble-enterprise:
47 | enabled: true
48 | enterprise:
49 | # Enable process event export
50 | exportFilename: "fgs.log"
51 | (...)
52 | ```
53 |
54 | Then, run helm upgrade command to apply the new configuration:
55 |
56 | ```bash
57 | helm upgrade cilium-enterprise isovalent/cilium-enterprise --version {{% param "ciliumVersion.enterprise" %}}
58 | --namespace kube-system -f cilium-enterprise-values.yaml --wait
59 | ```
60 |
61 |
62 | ## {{% task %}} Observe Exported Events
63 |
64 | Run the following command to observe exported events in `export-stdout` container logs:
65 |
66 | ```bash
67 | kubectl logs -n kube-system -l app.kubernetes.io/name=hubble-enterprise -c export-stdout -f
68 | ```
69 |
70 | Those exported events can now be sent to Splunk, Elasticsearch or similar.
71 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/floryn90/hugo:0.143.1-ext-ubuntu AS builder
2 |
3 | ARG TRAINING_HUGO_ENV=default
4 | USER root
5 | COPY . /src
6 |
7 | RUN hugo --environment ${TRAINING_HUGO_ENV} --minify
8 |
9 | RUN apt-get update \
10 | && apt-get install -y imagemagick
11 |
12 | RUN find /src/public/docs/ -regex '.*\(jpg\|jpeg\|png\|gif\)' -exec mogrify -path /src/public/pdf -resize 800\> -unsharp 0.25x0.25+8+0.065 "{}" \;
13 | RUN find /src/public/docs/ -regex '.*\(jpg\|jpeg\|png\|gif\)' -exec mogrify -path /src/public -resize 800\> -unsharp 0.25x0.25+8+0.065 "{}" \;
14 |
15 | FROM ubuntu:noble AS wkhtmltopdf
16 | RUN apt-get update \
17 | && apt-get install -y curl \
18 | && curl -L https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6.1-2/wkhtmltox_0.12.6.1-2.jammy_amd64.deb --output wkhtmltox_0.12.6.1-2.jammy_amd64.deb \
19 | && ls -la \
20 | && apt-get install -y /wkhtmltox_0.12.6.1-2.jammy_amd64.deb \
21 | && rm -rf /var/lib/apt/lists/* \
22 | && rm -rf /wkhtmltox_0.12.6.1-2.jammy_amd64.deb
23 |
24 | COPY --from=builder /src/public /
25 |
26 | RUN wkhtmltopdf --enable-internal-links --enable-local-file-access \
27 | --margin-top 35mm --margin-bottom 22mm --margin-left 15mm --margin-right 10mm \
28 | --enable-internal-links --enable-local-file-access \
29 | --header-html /pdf/header/index.html --footer-html /pdf/footer/index.html \
30 | --dpi 600 \
31 | /pdf/index.html /pdf.pdf
32 |
33 | FROM nginxinc/nginx-unprivileged:1.27-alpine
34 |
35 | LABEL maintainer acend.ch
36 | LABEL org.opencontainers.image.title "acend.ch's Cilium Basics Training"
37 | LABEL org.opencontainers.image.description "Container with the Training Content for acend.ch's Cilium Basics Training"
38 | LABEL org.opencontainers.image.authors acend.ch
39 | LABEL org.opencontainers.image.authors https://github.com/acend/cilium-basics-training/
40 | LABEL org.opencontainers.image.licenses CC-BY-SA-4.0
41 |
42 | EXPOSE 8080
43 |
44 | COPY --from=builder /src/public /usr/share/nginx/html
45 | COPY --from=wkhtmltopdf /pdf.pdf /usr/share/nginx/html/pdf/pdf.pdf
46 |
--------------------------------------------------------------------------------
/content/en/setup/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Setup"
3 | weight: 1
4 | type: docs
5 | menu:
6 | main:
7 | weight: 1
8 | ---
9 |
10 | This training can be done in two ways:
11 |
12 | * on a local machine (proceed with [Local Machine Setup](#local-machine-setup))
13 | * on a provided virtual machine using the webshell (proceed with [Webshell access](#webshell-access))
14 |
15 |
16 | ## Local machine setup
17 |
18 |
19 | ### Technical prerequisites
20 |
21 | To run this training on your local machine please make sure the following requirements are met:
22 |
23 | * Operating System: Linux with Kernel >= 4.9.17 or MacOS
24 | * Docker [installed](https://docs.docker.com/get-docker/)
25 | * kubectl >= 1.24 [installed](https://kubernetes.io/docs/tasks/tools/#kubectl)
26 | * minikube >= 1.26 installed
27 | * helm installed
28 | * Minimum 8GB RAM
29 |
30 | A note on Windows with WSL2: As of August 2022 the default kernel in WSL is missing some Netfilter modules. You can compile it [yourself](https://github.com/cilium/cilium/issues/17745#issuecomment-1004299480), but the training staff cannot give you any support with cluster related issues.
31 |
32 |
33 | ## Install minikube
34 |
35 | This training uses [minikube](https://minikube.sigs.k8s.io/docs/) to provide a Kubernetes Cluster.
36 |
37 | Check the [minikube start Guide](https://minikube.sigs.k8s.io/docs/start/) for instructions on how to install minikube on your system. If you are using the provided virtual machine minikube is already installed.
38 |
39 |
40 | ## Install helm
41 |
42 | For a complete overview refer to the helm installation [website](https://helm.sh/docs/intro/install/). If you have helm 3 already installed you can skip this step.
43 |
44 | Use your package manager (`apt`, `yum`, `brew` etc), download the [latest Release](https://github.com/helm/helm/releases) or use the following command to install [helm](https://helm.sh/docs/intro/install/) helm:
45 |
46 | ```bash
47 | curl -s https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
48 | ```
49 |
50 |
51 | ## Webshell access
52 |
53 | Your trainer will give you the necessary details.
54 |
--------------------------------------------------------------------------------
/content/en/docs/11/envoyconfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cilium.io/v2
2 | kind: CiliumEnvoyConfig
3 | metadata:
4 | name: envoy-lb-listener
5 | spec:
6 | services:
7 | - name: backend
8 | namespace: default
9 | - name: backend-2
10 | namespace: default
11 | resources:
12 | - "@type": type.googleapis.com/envoy.config.listener.v3.Listener
13 | name: envoy-lb-listener
14 | filter_chains:
15 | - filters:
16 | - name: envoy.filters.network.http_connection_manager
17 | typed_config:
18 | "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
19 | stat_prefix: envoy-lb-listener
20 | rds:
21 | route_config_name: lb_route
22 | http_filters:
23 | - name: envoy.filters.http.router
24 | typed_config:
25 | "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
26 | - "@type": type.googleapis.com/envoy.config.route.v3.RouteConfiguration
27 | name: lb_route
28 | virtual_hosts:
29 | - name: "lb_route"
30 | domains: ["*"]
31 | routes:
32 | - match:
33 | prefix: "/private"
34 | route:
35 | weighted_clusters:
36 | clusters:
37 | - name: "default/backend"
38 | weight: 50
39 | - name: "default/backend-2"
40 | weight: 50
41 | retry_policy:
42 | retry_on: 5xx
43 | num_retries: 3
44 | per_try_timeout: 1s
45 | - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster
46 | name: "default/backend"
47 | connect_timeout: 5s
48 | lb_policy: ROUND_ROBIN
49 | type: EDS
50 | outlier_detection:
51 | split_external_local_origin_errors: true
52 | consecutive_local_origin_failure: 2
53 | - "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster
54 | name: "default/backend-2"
55 | connect_timeout: 3s
56 | lb_policy: ROUND_ROBIN
57 | type: EDS
58 | outlier_detection:
59 | split_external_local_origin_errors: true
60 | consecutive_local_origin_failure: 2
--------------------------------------------------------------------------------
/content/en/docs/13/03/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Process Visibility"
3 | weight: 133
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 |
8 | ## {{% task %}} Enable Process Visibility
9 |
10 |
11 | Edit your cilium-enterprise-values.yaml file so that it reads:
12 |
13 | ```yaml
14 | cilium:
15 | (...)
16 | hubble-enterprise:
17 | enabled: true
18 | enterprise:
19 | enabled: true
20 | (...)
21 | ```
22 |
23 | Then, run `helm upgrade` command to apply the new configuration:
24 |
25 | ```bash
26 | helm upgrade cilium-enterprise isovalent/cilium-enterprise --version {{% param "ciliumVersion.enterprise" %}}
27 | --namespace kube-system -f cilium-enterprise-values.yaml --wait
28 | ```
29 |
30 |
31 | ## {{% task %}} Validate the Installation
32 |
33 |
34 | First, please run:
35 |
36 | ```bash
37 | kubectl get ds -n kube-system hubble-enterprise
38 | ```
39 |
40 | and ensure that all the pods for `hubble-enterprise` daemonset are in `READY` state.
41 |
42 |
43 | Run `hubble-enterprise` command to validate that Cilium Enterprise is configured with process visibility enabled:
44 |
45 | ```bash
46 | kubectl exec -n kube-system ds/hubble-enterprise -c enterprise -- hubble-enterprise getevents
47 | ```
48 |
49 | and you will see process events from one of the `hubble-enterprise` pods in JSON format.
50 |
51 |
52 | ## {{% task %}} Export logs and visualize in Hubble UI Process Tree
53 |
54 | Execute the connectivity test from `frontend` to `backend` again to make sure we have some data to visualize:
55 |
56 | ```bash
57 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
58 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
59 | ```
60 |
61 | Then, use the following command to export process events from hubbe-enterprise:
62 |
63 | ```bash
64 | kubectl logs -n kube-system ds/hubble-enterprise -c export-stdout --since=1h > export.log
65 | ```
66 |
67 | In the Hubble-UI open the Process Tree and click on the `Upload` Button. Upload the previously created `export.log`. Now you can select the `default` Namespace and one of the Pods, e.g. `frontend-xxxxx-xxx`.
68 |
69 | .
70 |
71 | We see our previously executed `curl` command and that the process opened a connection to an IP on Port 8080.
72 |
73 | By clicking on one of the event, e.g. the `Connect` event for the `curl` command, you get some more details for the selected event.
74 |
--------------------------------------------------------------------------------
/content/en/docs/09/03/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Network Policies"
3 | weight: 93
4 | ---
5 |
6 | ## {{% task %}} Allowing Specific Communication Between Clusters
7 |
8 |
9 | The following policy illustrates how to allow particular pods to communicate between two clusters.
10 |
11 | {{< readfile file="/content/en/docs/09/03/cnp-cm.yaml" code="true" lang="yaml" >}}
12 |
13 | {{% alert title="Note" color="primary" %}}
14 | For the Pods to resolve the `rebel-base` service name they still need connectivity to Kubernetes DNS Service. Therefore access to that is also allowed.
15 | {{% /alert %}}
16 |
17 | Kubernetes security policies are not automatically distributed across clusters, it is your responsibility to apply `CiliumNetworkPolicy` or `NetworkPolicy` in all clusters.
18 |
19 | Create a file `cnp-cm.yaml` with the above content and apply the `CiliumNetworkPolicy` to both clusters:
20 |
21 | ```bash
22 | kubectl --context cluster1 apply -f cnp-cm.yaml
23 | kubectl --context cluster2 apply -f cnp-cm.yaml
24 | ```
25 |
26 | Let us run our `curl` `for` loop again
27 |
28 | ```bash
29 | XWINGPOD=$(kubectl --context cluster1 get pod -l name=x-wing -o jsonpath="{.items[0].metadata.name}")
30 | for i in {1..10}; do
31 | kubectl --context cluster1 exec -it $XWINGPOD -- curl -m 1 rebel-base
32 | done
33 | ```
34 |
35 | and as an result you see:
36 |
37 | ```
38 | curl: (28) Connection timed out after 1001 milliseconds
39 | command terminated with exit code 28
40 | curl: (28) Connection timed out after 1000 milliseconds
41 | command terminated with exit code 28
42 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
43 | curl: (28) Connection timed out after 1000 milliseconds
44 | command terminated with exit code 28
45 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
46 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
47 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
48 | curl: (28) Connection timed out after 1000 milliseconds
49 | command terminated with exit code 28
50 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
51 | curl: (28) Connection timed out after 1000 milliseconds
52 | command terminated with exit code 28
53 | ```
54 |
55 | All connections to `cluster2` are dropped while the ones to `cluster1` are still working.
56 |
57 |
58 | ## {{% task %}} Cleanup
59 |
60 | We will disconnect our cluster mesh again and delete the second cluster:
61 |
62 | ````bash
63 | cilium clustermesh disconnect --context cluster1 --destination-context cluster2
64 | minikube delete --profile cluster2
65 | minikube profile cluster1
66 |
67 | ````
68 |
--------------------------------------------------------------------------------
/content/en/docs/13/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Network Policies"
3 | weight: 132
4 |
5 | OnlyWhenNot: techlab
6 | ---
7 |
8 |
9 | ## {{% task %}} Create a network policy with the Hubble UI
10 |
11 | The Enterprise Hubble UI has an intergate Network Policy Editor similar to the one we already know from lab Cilium Network Policy. The Enterprise Network Policies Editor allows you to use knowlege of the current flows to easealy create new policies.
12 |
13 | Go to `Network Policies`:
14 |
15 |
16 | 
17 |
18 | And the create a new empty policy:
19 |
20 | .
21 |
22 | We now want to allow traffic from the `frontend` pod to the `backend` pod while traffic from `not-frontend` to `backend` is blocked. In the right panel you the see existing flows. Select the flow from `frontend` to `backend` and then click on the `Add rule to policy` Button. The Network Policy Editor now visualice the policy.
23 |
24 | .
25 |
26 | Edit the Policy Name to `backend-allow-ingress-frontend` and also add `app=backend` as the endpoint selector:
27 |
28 | .
29 |
30 | Afterwards download the CiliumNetworkPolicy which should look like:
31 |
32 | {{< readfile file="/content/en/docs/13/02/backend-allow-ingress-frontend.yaml" code="true" lang="yaml" >}}
33 |
34 |
35 | ## {{% task %}} Apply Network Policy
36 |
37 | Apply the file with:
38 |
39 | ```bash
40 | kubectl apply -f backend-allow-ingress-frontend.yaml
41 | ```
42 |
43 | and then execute the connectivity test again:
44 |
45 | ```bash
46 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
47 | ```
48 |
49 | and
50 |
51 | ```bash
52 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
53 | ```
54 |
55 | And you see the `frontend` application is able to connect to the `backend` but the `not-frontend` application cannot connect to the `backend`:
56 |
57 | ```
58 | # Frontend
59 | HTTP/1.1 200 OK
60 | X-Powered-By: Express
61 | Vary: Origin, Accept-Encoding
62 | Access-Control-Allow-Credentials: true
63 | Accept-Ranges: bytes
64 | Cache-Control: public, max-age=0
65 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
66 | ETag: W/"83d-7438674ba0"
67 | Content-Type: text/html; charset=UTF-8
68 | Content-Length: 2109
69 | Date: Tue, 23 Nov 2021 13:08:27 GMT
70 | Connection: keep-alive
71 |
72 | # Not Frontend
73 | curl: (28) Connection timed out after 5001 milliseconds
74 | command terminated with exit code 28
75 |
76 | ```
77 |
78 |
79 | ## {{% task %}} Observe the Network Flows
80 |
81 | In the Hubble UI Service map you see now some `dropped` flows.
82 |
83 | .
84 |
85 | By clicking on the `Review` button, the enterprise Hubble UI allows you to see which Network Policy was the reason for the `dropped` verdict.
86 |
--------------------------------------------------------------------------------
/content/en/docs/12/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "eBPF"
3 | weight: 12
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 | To deepen our understanding of eBPF we will write and compile a small eBPF app:
8 |
9 |
10 | ## {{% task %}} Hello World
11 |
12 | ebpf-go is a pure Go library that provides utilities for loading, compiling, and debugging eBPF programs written by the cilium project.
13 |
14 | We will use this library and add our own hello world app as an example to it:
15 |
16 | ```bash
17 | git clone https://github.com/cilium/ebpf.git
18 | cd ebpf/
19 | git checkout v0.9.3
20 | cd examples
21 | mkdir helloworld
22 | cd helloworld
23 | ```
24 |
25 | In the `helloworld` directory create two files named `helloworld.bpf.c` (eBPF code) and `helloworld.go` (loading, user side):
26 |
27 | helloworld.bpf.c:
28 | {{< readfile file="/content/en/docs/12/helloworld/helloworld.bpf.c" code="true" lang="c" >}}
29 |
30 | helloworld.go:
31 | {{< readfile file="/content/en/docs/12/helloworld/helloworld.go" code="true" lang="go" >}}
32 |
33 |
34 | To compile the C code into ebpf bytecode with the corresponding Go source files we use a tool named bpf2go along with clang.
35 | For a stable outcome we use the toolchain inside a docker container:
36 |
37 | ```bash
38 | docker pull "ghcr.io/cilium/ebpf-builder:1666886595"
39 | docker run -it --rm -v "$(pwd)/../..":/ebpf \
40 | -w /ebpf/examples/helloworld \
41 | --env MAKEFLAGS \
42 | --env CFLAGS="-fdebug-prefix-map=/ebpf=." \
43 | --env HOME="/tmp" \
44 | "ghcr.io/cilium/ebpf-builder:1666886595" /bin/bash
45 | ```
46 | Now in the container we generate the ELF and go files:
47 | ```bash
48 | GOPACKAGE=main go run github.com/cilium/ebpf/cmd/bpf2go -cc clang-14 -cflags '-O2 -g -Wall -Werror' bpf helloworld.bpf.c -- -I../headers
49 | ```
50 |
51 | Let us examine the newly created files: `bpf_bpfel.go`/`bpf_bpfeb.go` contain the go code for the user state side of our app.
52 | The `bpf_bpfel.o`/`bpf_bpfeb.o` files are ELF files and can be examined using readelf:
53 |
54 | ```bash
55 | readelf --section-details --headers bpf_bpfel.o
56 | ```
57 |
58 | We see two things:
59 |
60 | * that Machine reads "Linux BPF" and
61 | * our tracepoint sys_enter_execve in the sections part (tracepoint/syscalls/sys_enter_execve).
62 |
63 |
64 | {{% alert title="Note" color="primary" %}}
65 | There are always two files created: bpf_bpfel.o for little endian systems (like x86) and bpfen.o for big endian systems.
66 | {{% /alert %}}
67 |
68 | Now we have everything in place to build our app:
69 |
70 |
71 | ```bash
72 | go mod tidy
73 | go build helloworld.go bpf_bpfel.go
74 | exit #exit container
75 | ```
76 |
77 | Let us cat tracepipe first in a second terminal (webshell: don't forget to connect to the vm first):
78 |
79 | ```bash
80 | sudo cat /sys/kernel/debug/tracing/trace_pipe
81 | ```
82 |
83 | and in the first terminal execute our eBPF app:
84 |
85 | ```bash
86 | sudo ./helloworld
87 | ```
88 |
89 | Now we can see, that for each programm called in linux, our code is executed and writes "Hello world" to trace_pipe.
90 |
91 | Close now apps by hitting Ctrl+c, you can also close the second terminal.
92 |
--------------------------------------------------------------------------------
/content/en/docs/13/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Cilium Enterprise"
3 | weight: 13
4 |
5 | OnlyWhenNot: techlab
6 | ---
7 |
8 | So far, we used the Cilium CNI in the Open Source Software (OSS) version. Cilium OSS has [joined the CNCF as an incubating project](https://www.cncf.io/blog/2021/10/13/cilium-joins-cncf-as-an-incubating-project/) and only recently during KubeCon 2022 NA [applied to become a CNCF graduated project](https://cilium.io/blog/2022/10/27/cilium-applies-for-graduation/). [Isovalent](https://isovalent.com/), the company behind Cilium also offers enterprise support for the Cilium CNI. In this lab, we are going to look at some of the enterprise features.
9 |
10 |
11 | ## {{% task %}} Create a Kubernetes Cluster and install Cilium Enterprise
12 |
13 | We are going to spin up a new Kubernetes cluster with the following command:
14 |
15 |
16 | ```bash
17 | minikube start --network-plugin=cni --cni=false --kubernetes-version={{% param "kubernetesVersion" %}} -p cilium-enterprise
18 | ```
19 |
20 | Now check that everything is up and running:
21 |
22 | ```bash
23 | kubectl get node
24 | ```
25 |
26 | This should produce a similar output:
27 |
28 | ```
29 | NAME STATUS ROLES AGE VERSION
30 | cilium-enterprise Ready control-plane,master 86s v{{% param "kubernetesVersion" %}}
31 | ```
32 |
33 | Alright, everything is up and running and we can continue with the Cilium Enterprise Installation. First we need to add the Helm chart repository:
34 |
35 | ```
36 | helm repo add isovalent https://....
37 | ```
38 |
39 | {{% alert title="Note" color="primary" %}}
40 | Your trainer will provide you with the Helm chart url.
41 | {{% /alert %}}
42 |
43 |
44 | Next, create a `cilium-enterprise-values.yaml` file with the following content:
45 |
46 | ```yaml
47 | cilium:
48 | hubble:
49 | enabled: false
50 | relay:
51 | enabled: false
52 | nodeinit:
53 | enabled: true
54 | ipam:
55 | mode: cluster-pool
56 | hubble-enterprise:
57 | enabled: false
58 | enterprise:
59 | enabled: false
60 | hubble-ui:
61 | enabled: false
62 | ```
63 |
64 | And then install Cilium enterprise with Helm:
65 |
66 | ```bash
67 | helm install cilium-enterprise isovalent/cilium-enterprise --version {{% param "ciliumVersion.enterprise" %}} \
68 | --namespace kube-system -f cilium-enterprise-values.yaml
69 | ```
70 |
71 |
72 | To confirm that the cilium daemonset is running Cilium Enterprise, execute the following command and verify that the container registry for `cilium-agent` is set to `quay.io/isovalent/cilium`:
73 |
74 | ```bash
75 | kubectl get ds -n kube-system cilium -o jsonpath='{.spec.template.spec.containers[0].image}' | cut -d: -f1
76 | ```
77 |
78 | Run the following command and validate that cilium daemonset is up and running:
79 |
80 | ```bash
81 | kubectl get ds -n kube-system cilium
82 | ```
83 |
84 | This should give you an output similar to this:
85 |
86 | ```
87 | NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
88 | cilium 1 1 1 1 1 91s
89 | ```
90 |
91 |
--------------------------------------------------------------------------------
/content/en/docs/09/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Load-balancing with Global Services"
3 | weight: 92
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 | This lab will guide you to perform load-balancing and service discovery across multiple Kubernetes clusters.
8 |
9 |
10 | ## {{% task %}} Load-balancing with Global Services
11 |
12 | Establishing load-balancing between clusters is achieved by defining a Kubernetes service with an identical name and Namespace in each cluster and adding the `annotation io.cilium/global-service: "true"` to declare it global. Cilium will automatically perform load-balancing to pods in both clusters.
13 |
14 | We are going to deploy a global service and a sample application on both of our connected clusters.
15 |
16 | First the Kubernetes service. Create a file `svc.yaml` with the following content:
17 |
18 | {{< readfile file="/content/en/docs/09/02/svc.yaml" code="true" lang="yaml" >}}
19 |
20 | Apply this with:
21 |
22 | ```bash
23 | kubectl --context cluster1 apply -f svc.yaml
24 | kubectl --context cluster2 apply -f svc.yaml
25 | ```
26 |
27 | Then deploy our sample application on both clusters.
28 |
29 | `cluster1.yaml`:
30 |
31 | {{< readfile file="/content/en/docs/09/02/cluster1.yaml" code="true" lang="yaml" >}}
32 |
33 | ```bash
34 | kubectl --context cluster1 apply -f cluster1.yaml
35 | ```
36 |
37 | `cluster2.yaml`:
38 |
39 | {{< readfile file="/content/en/docs/09/02/cluster2.yaml" code="true" lang="yaml" >}}
40 |
41 | ```bash
42 | kubectl --context cluster2 apply -f cluster2.yaml
43 | ```
44 |
45 | Now you can execute from either cluster the following command (there are two x-wing pods, simply select one):
46 |
47 | ```bash
48 | XWINGPOD=$(kubectl --context cluster1 get pod -l name=x-wing -o jsonpath="{.items[0].metadata.name}")
49 | for i in {1..10}; do
50 | kubectl --context cluster1 exec -it $XWINGPOD -- curl -m 1 rebel-base
51 | done
52 | ```
53 |
54 | as a result you get the following output:
55 |
56 | ```
57 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
58 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
59 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
60 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
61 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
62 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
63 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
64 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
65 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"}
66 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
67 | ```
68 |
69 | and as you see, you get results from both clusters. Even if you scale down your `rebel-base` Deployment on `cluster1` with
70 |
71 | ```bash
72 | kubectl --context cluster1 scale deployment rebel-base --replicas=0
73 | ```
74 |
75 | and then execute the `curl` `for` loop again, you still get answers, this time only from `cluster2`:
76 |
77 | ```
78 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
79 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
80 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
81 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
82 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
83 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
84 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
85 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
86 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
87 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"}
88 |
89 | ```
90 |
91 | Scale your `rebel-base` Deployment back to one replica:
92 |
93 | ```bash
94 | kubectl --context cluster1 scale deployment rebel-base --replicas=1
95 | ```
96 |
--------------------------------------------------------------------------------
/content/en/docs/07/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "DNS-aware Network Policy"
3 | weight: 71
4 | ---
5 |
6 |
7 | ## {{% task %}} Create and use a DNS-aware Network Policy
8 |
9 | In this task, we want to keep our backend pods from reaching anything except FQDN kubernetes.io.
10 |
11 | First we store the `backend` Pod name into an environment variable:
12 |
13 | ```bash
14 | BACKEND=$(kubectl get pods -l app=backend -o jsonpath='{.items[0].metadata.name}')
15 | echo ${BACKEND}
16 | ```
17 |
18 | and then let us check if we can reach `https://kubernetes.io` and `https://cilium.io`:
19 |
20 | ```bash
21 | kubectl exec -ti ${BACKEND} -- curl -Ik --connect-timeout 5 https://kubernetes.io | head -1
22 | ```
23 |
24 | ```bash
25 | kubectl exec -ti ${BACKEND} -- curl -Ik --connect-timeout 5 https://cilium.io | head -1
26 | ```
27 |
28 | ```
29 | # Call to https://kubernetes.io
30 | HTTP/2 200
31 | # Call to https://cilium.io
32 | HTTP/2 200
33 | ```
34 |
35 | Again, in Kubernetes, all traffic is allowed by default, and since we did not apply any Egress network policy for now, connections from the backend pods are not blocked.
36 |
37 | Let us have a look at the following `CiliumNetworkPolicy`:
38 |
39 | {{< readfile file="/content/en/docs/07/01/backend-egress-allow-fqdn.yaml" code="true" lang="yaml" >}}
40 |
41 | The policy will deny all egress traffic from pods labeled `app=backend` except when traffic is destined for `kubernetes.io` or is a DNS request (necessary for resolving `kubernetes.io` from coredns). In the policy editor this looks like this:
42 |
43 | 
44 |
45 | Create the file `backend-egress-allow-fqdn.yaml` with the above content and apply the network policy:
46 |
47 | ```bash
48 | kubectl apply -f backend-egress-allow-fqdn.yaml
49 | ```
50 |
51 | and check if the `CiliumNetworkPolicy` was created:
52 |
53 | ```bash
54 | kubectl get cnp
55 | ```
56 |
57 | ```
58 | NAME AGE
59 | backend-egress-allow-fqdn 2s
60 | ```
61 |
62 | Note the usage of `cnp` (standing for `CiliumNetworkPolicy`) instead of the default netpol since we are using custom Cilium resources.
63 |
64 | And check that the traffic is now only authorized when destined for `kubernetes.io`:
65 |
66 | ```bash
67 | kubectl exec -ti ${BACKEND} -- curl -Ik --connect-timeout 5 https://kubernetes.io | head -1
68 | ```
69 |
70 | ```bash
71 | kubectl exec -ti ${BACKEND} -- curl -Ik --connect-timeout 5 https://cilium.io | head -1
72 | ```
73 |
74 | ```
75 | # Call to https://kubernetes.io
76 | HTTP/2 200
77 | # Call to https://cilium.io
78 | curl: (28) Connection timed out after 5001 milliseconds
79 | command terminated with exit code 28
80 |
81 | ```
82 | {{% alert title="Note" color="primary" %}}
83 | You can now check the `Hubble Metrics` dashboard in Grafana again. The graphs under DNS should soon show some data as well. This is because with a Layer 7 Policy we have enabled the Envoy in Cilium Agent.
84 | {{% /alert %}}
85 |
86 | With the ingress and egress policies in place on `app=backend` pods, we have implemented a simple zero-trust model to all traffic to and from our backend. In a real-world scenario, cluster administrators may leverage network policies and overlay them at all levels and for all kinds of traffic.
87 |
88 |
89 | ## {{% task %}} Cleanup
90 |
91 | To not mess up the proceeding labs we are going to delete the `CiliumNetworkPolicy` again and therefore allow all egress traffic again:
92 |
93 | ```bash
94 | kubectl delete cnp backend-egress-allow-fqdn
95 | ```
96 |
--------------------------------------------------------------------------------
/.github/workflows/pr-cleanup.yaml:
--------------------------------------------------------------------------------
1 | # changeme
2 | # Change the following parts to your current installation
3 | # - Helm Target Namespace
4 | # - Repository: you might need to change the name of your repository, depending on the choosen name
5 | # - Secrets
6 | # - QUAYIO_API_TOKEN is needed for house keeping and removing old tagged images
7 | # - KUBECONFIG_TEST content of the Kubeconfig File, Account must have access to the k8s namespace. It's used in the helm steps
8 | name: PRCleanup
9 | on:
10 | pull_request:
11 | types: [closed]
12 |
13 | jobs:
14 | pr-cleanup:
15 | runs-on: 'ubuntu-latest'
16 | steps:
17 | -
18 | name: Checkout
19 | uses: actions/checkout@v4
20 | with:
21 | submodules: recursive
22 | -
23 | name: 'Install Helm'
24 | uses: azure/setup-helm@v4
25 | with:
26 | version: v3.6.2
27 | -
28 | name: Install Kubectl
29 | uses: azure/setup-kubectl@v4
30 | with:
31 | version: v1.21.2
32 | -
33 | name: Create KUBECONFIG
34 | env:
35 | KUBE_CONFIG: '${{ secrets.KUBECONFIG_K8S_ACEND_TEST }}'
36 | run: |
37 | mkdir -p $HOME/.kube
38 | echo "$KUBE_CONFIG" > $HOME/.kube/config
39 | -
40 | name: Remove PR Environment Helm Release
41 | env:
42 | TRAINING_HELM_RELEASE: 'pr-${{ github.event.pull_request.number }}'
43 | TRAINING_NAMESPACE: 'acend-cilium-basics-training-test'
44 | TRAINING_VERSION: '${{ github.sha }}'
45 | run: |
46 | helm uninstall $TRAINING_HELM_RELEASE --kubeconfig $HOME/.kube/config --namespace=$TRAINING_NAMESPACE
47 | -
48 | name: Delete Tags on Quay
49 | id: delete_tags
50 | env:
51 | PR_NUMBER: '${{ github.event.pull_request.number }}'
52 | REPOSITORY: '${{ github.repository }}'
53 | run: |
54 | curl -X DELETE -H "Authorization: Bearer ${QUAYIO_API_TOKEN}" https://quay.io/api/v1/repository/${REPOSITORY}/tag/pr-${PR_NUMBER}{,-puzzle,-techlab}
55 | -
56 | name: Delete PR Tag on ghcr.io
57 | env:
58 | PR_NUMBER: '${{ github.event.pull_request.number }}'
59 | REPOSITORY: '${{ github.repository }}'
60 | run : |
61 | ORG=$(echo $REPOSITORY | cut -d "/" -f1)
62 | REPO=$(echo $REPOSITORY | cut -d "/" -f2)
63 | for PACKAGE_VERSION_ID in $(curl -s -H "Authorization: Bearer ${{ secrets.GH_PAT_DELETE_PACKAGES }}" https://api.github.com/orgs/${ORG}/packages/container/${REPO}/versions | jq --arg PR pr-$PR_NUMBER '.[] | select (.metadata.container.tags[] | contains ($PR)) | .id')
64 | do
65 | echo "Delete package with id ${PACKAGE_VERSION_ID}"
66 | curl -X DELETE -H "Authorization: Bearer ${{ secrets.GH_PAT_DELETE_PACKAGES }}" https://api.github.com/orgs/${ORG}/packages/container/${REPO}/versions/${PACKAGE_VERSION_ID}
67 | done
68 | -
69 | name: Delete untagged on ghcr.io
70 | env:
71 | PR_NUMBER: '${{ github.event.pull_request.number }}'
72 | REPOSITORY: '${{ github.repository }}'
73 | run : |
74 | ORG=$(echo $REPOSITORY | cut -d "/" -f1)
75 | REPO=$(echo $REPOSITORY | cut -d "/" -f2)
76 | for PACKAGE_VERSION_ID in $(curl -s -H "Authorization: Bearer ${{ secrets.GH_PAT_DELETE_PACKAGES }}" https://api.github.com/orgs/${ORG}/packages/container/${REPO}/versions | jq '.[] | select( (.metadata.container.tags | length) == 0) | .id')
77 | do
78 | echo "Delete untagged package with id ${PACKAGE_VERSION_ID}"
79 | curl -X DELETE -H "Authorization: Bearer ${{ secrets.GH_PAT_DELETE_PACKAGES }}" https://api.github.com/orgs/${ORG}/packages/container/${REPO}/versions/${PACKAGE_VERSION_ID}
80 | done
81 |
--------------------------------------------------------------------------------
/content/en/docs/13/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Network Visibility with Hubble"
3 | weight: 131
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 |
8 | ## {{% task %}} Enable Hubble, Hubble Relay, and Hubble UI
9 |
10 | Edit your `cilium-enterprise-values.yaml` file so that it reads:
11 |
12 | ```yaml
13 | cilium:
14 | (...)
15 | extraConfig:
16 | # Disable Hubble flow export.
17 | export-file-path: ""
18 | hubble:
19 | enabled: true
20 | tls:
21 | enabled: true
22 | relay:
23 | enabled: true
24 | (...)
25 | hubble-ui:
26 | enabled: true
27 | ```
28 |
29 | Then, run helm upgrade command to apply the new configuration:
30 |
31 | {{% alert title="Note" color="primary" %}}
32 | Running the helm upgrade command below will restart Cilium daemonset.
33 | {{% /alert %}}
34 |
35 | ```bash
36 | helm upgrade cilium-enterprise isovalent/cilium-enterprise --version 1.12.3+1 \
37 | --namespace kube-system -f cilium-enterprise-values.yaml --wait
38 | ```
39 |
40 |
41 | ## {{% task %}} Deploy a simple application
42 |
43 | To accually see something with Hubble, we first deploy our `simple-app.yaml` from lab 03 again to run some experiments. Run the following command using the `simple-app.yaml` from lab 03:
44 |
45 | ```bash
46 | kubectl apply -f simple-app.yaml
47 | ```
48 |
49 | Now let us redo the task from lab 03.
50 |
51 | Let's make life again a bit easier by storing the Pod's name into an environment variable so we can reuse it later again:
52 |
53 | ```bash
54 | FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
55 | echo ${FRONTEND}
56 | NOT_FRONTEND=$(kubectl get pods -l app=not-frontend -o jsonpath='{.items[0].metadata.name}')
57 | echo ${NOT_FRONTEND}
58 | ```
59 |
60 | Then execute
61 |
62 | ```bash
63 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
64 | ```
65 |
66 | and
67 |
68 | ```bash
69 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
70 | ```
71 |
72 | You see that altought we have no `kube-proxy` running, the backend service can still be reached.
73 |
74 | ```
75 | HTTP/1.1 200 OK
76 | X-Powered-By: Express
77 | Vary: Origin, Accept-Encoding
78 | Access-Control-Allow-Credentials: true
79 | Accept-Ranges: bytes
80 | Cache-Control: public, max-age=0
81 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
82 | ETag: W/"83d-7438674ba0"
83 | Content-Type: text/html; charset=UTF-8
84 | Content-Length: 2109
85 | Date: Tue, 14 Dec 2021 10:01:16 GMT
86 | Connection: keep-alive
87 |
88 | HTTP/1.1 200 OK
89 | X-Powered-By: Express
90 | Vary: Origin, Accept-Encoding
91 | Access-Control-Allow-Credentials: true
92 | Accept-Ranges: bytes
93 | Cache-Control: public, max-age=0
94 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
95 | ETag: W/"83d-7438674ba0"
96 | Content-Type: text/html; charset=UTF-8
97 | Content-Length: 2109
98 | Date: Tue, 14 Dec 2021 10:01:16 GMT
99 | Connection: keep-alive
100 | ```
101 |
102 |
103 | ## {{% task %}} Access Hubble UI
104 |
105 | To access Hubble UI, forward a local port to the Hubble UI service:
106 |
107 | ```
108 | kubectl port-forward -n kube-system svc/hubble-ui 12000:80 &
109 | ```
110 |
111 | In our Webshell environment you can use the public IP of the VM to access Hubble. A simple way is to execute
112 |
113 | ```bash
114 | echo "http://$(curl -s ifconfig.me):12000"
115 | ```
116 | and copy the output in a new browser tab. If you are working locally, open your browser and go to http://localhost:12000/.
117 |
118 | 
119 |
120 | Select the `default` Namespace and go to Service Map:
121 |
122 | 
123 |
124 | You should see our already deployed simple app with the `frontend`, `notfrondend` and `backend` Pod.
125 |
--------------------------------------------------------------------------------
/config/_default/config.toml:
--------------------------------------------------------------------------------
1 | baseurl = "/"
2 | title = "Cilium Basics Training"
3 |
4 | enableGitInfo = true
5 | enableEmoji = true
6 |
7 | languageCode = "en-us"
8 |
9 | contentDir = "content/en"
10 |
11 | [minify]
12 | disableSVG = true
13 |
14 | # default content language
15 | defaultContentLanguage = "en"
16 | defaultContentLanguageInSubdir = false
17 |
18 | disableKinds = ["taxonomy", "taxonomyTerm"]
19 |
20 | # Highlighting config
21 | pygmentsCodeFences = true
22 | pygmentsUseClasses = false
23 | # Use the new Chroma Go highlighter in Hugo.
24 | pygmentsUseClassic = false
25 | #pygmentsOptions = "linenos=table"
26 | # See https://help.farbox.com/pygments.html
27 | pygmentsStyle = "tango"
28 |
29 | # Configure how URLs look like per section.
30 | [permalinks]
31 | blog = "/:section/:year/:month/:day/:slug/"
32 |
33 | ## Configuration for BlackFriday markdown parser: https://github.com/russross/blackfriday
34 | [blackfriday]
35 | plainIDAnchors = true
36 | hrefTargetBlank = true
37 | angledQuotes = false
38 | latexDashes = true
39 |
40 | # Image processing configuration.
41 | [imaging]
42 | resampleFilter = "CatmullRom"
43 | quality = 75
44 | anchor = "smart"
45 |
46 | [Languages]
47 | [Languages.en]
48 | title = "Cilium Basic Training"
49 | languageName = "English"
50 | weight = 1
51 |
52 | # navigation
53 | [Languages.en.menu]
54 |
55 | [[Languages.en.menu.main]]
56 | weight = 10
57 | name = "acend gmbh"
58 | url = "https://www.acend.ch"
59 |
60 | [markup]
61 | [markup.goldmark]
62 | [markup.goldmark.renderer]
63 | unsafe = true
64 | [markup.highlight]
65 | # See a complete list of available styles at https://xyproto.github.io/splash/docs/all.html
66 | style = "tango"
67 | # Uncomment if you want your chosen highlight style used for code blocks without a specified language
68 | # guessSyntax = "true"
69 |
70 | [params]
71 | automaticSectionNumbers = true
72 | copyright = "acend gmbh"
73 | github_repo = "https://github.com/acend/cilium-basics-training"
74 | github_branch = "main"
75 |
76 | enabledModule = "base"
77 |
78 | kubernetesVersion = "1.24.3"
79 | hubbleVersion = "0.11.1"
80 |
81 | [params.ciliumVersion]
82 | preUpgrade = "1.11.7"
83 | postUpgrade = "1.12.10"
84 | cli = "0.12.12"
85 | enterprise = "1.12.7"
86 |
87 | # Enable Lunr.js offline search
88 | offlineSearch = true
89 |
90 | [params.ui]
91 | # Enable to show the side bar menu in its compact state.
92 | sidebar_menu_compact = false
93 | # Set to true to disable breadcrumb navigation.
94 | breadcrumb_disable = false
95 | # Set to true to hide the sidebar search box (the top nav search box will still be displayed if search is enabled)
96 | sidebar_search_disable = false
97 | # Set to false if you don't want to display a logo (/assets/icons/logo.svg) in the top nav bar
98 | navbar_logo = true
99 | # Set to true to disable the About link in the site footer
100 | footer_about_enable = false
101 |
102 | ############################## social links ##############################
103 | [params.links]
104 | [[params.links.developer]]
105 | name = "GitHub"
106 | icon = "fab fa-github"
107 | url = "https://github.com/acend/cilium-basics-training"
108 |
109 | [[params.links.user]]
110 | name = "Twitter"
111 | icon = "fab fa-twitter"
112 | url = "https://twitter.com/acendch"
113 |
114 | [[params.links.user]]
115 | name = "LinkedIn"
116 | icon = "fab fa-linkedin-in"
117 | url = "https://linkedin.com/company/acendch/"
118 |
119 | [module]
120 | [module.hugoVersion]
121 | extended = true
122 | min = "0.100.0"
123 | [[module.imports]]
124 | path = "github.com/acend/docsy-acend"
125 | disable = false
126 | [[module.imports]]
127 | path = "github.com/acend/docsy-plus"
128 | disable = false
129 | [[module.imports]]
130 | path = "github.com/google/docsy"
131 | disable = false
132 | [[module.imports]]
133 | path = "github.com/google/docsy/dependencies"
134 | disable = false
135 |
--------------------------------------------------------------------------------
/.github/workflows/push-main.yaml:
--------------------------------------------------------------------------------
1 | # changeme
2 | # Change the following parts to your current installation
3 | # - entire Docker Build steps accordingly to the amount of different versions or tags your building TRAINING_HUGO_ENV
4 | # - Tags
5 | # - Helm Target Namespace
6 | # - Name of K8S Deployment to trigger and namespace
7 | # - Secrets
8 | # - QUAYIO_USERNAME and QUAYIO_TOKEN must be set accordingly to your registry, existing users must have access to the repos
9 | # - KUBECONFIG content of the Kubeconfig File, Account must have access to the k8s namespace. It's used in the helm steps
10 |
11 | name: Publish Main Version
12 |
13 | on:
14 | push:
15 | branches:
16 | - main
17 | - master
18 |
19 | jobs:
20 | build:
21 | runs-on: ubuntu-latest
22 | steps:
23 | -
24 | name: Checkout
25 | uses: actions/checkout@v4
26 | with:
27 | submodules: recursive
28 | -
29 | name: Set up npm for linting
30 | uses: actions/setup-node@v4
31 | with:
32 | node-version: '18.x'
33 | -
34 | name: Lint Markdown
35 | run: npm ci && npm run mdlint
36 | -
37 | name: Set up QEMU
38 | uses: docker/setup-qemu-action@v3
39 | -
40 | name: Set up Docker Buildx
41 | uses: docker/setup-buildx-action@v3
42 | -
43 | name: Login to Quay.io Container Registry
44 | uses: docker/login-action@v3
45 | with:
46 | registry: quay.io
47 | username: ${{ secrets.QUAYIO_USERNAME }}
48 | password: ${{ secrets.QUAYIO_TOKEN }}
49 | -
50 | name: Log in to the ghcr.io Container registry
51 | uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1
52 | with:
53 | registry: ghcr.io
54 | username: ${{ github.actor }}
55 | password: ${{ secrets.GITHUB_TOKEN }}
56 | -
57 | name: Build Latest Version
58 | uses: docker/build-push-action@v6
59 | with:
60 | context: .
61 | file: ./Dockerfile
62 | push: true
63 | tags: |
64 | quay.io/acend/cilium-basics-training:latest
65 | ghcr.io/acend/cilium-basics-training:latest
66 | -
67 | name: Build Techlab Version
68 | uses: docker/build-push-action@v6
69 | with:
70 | context: .
71 | file: ./Dockerfile
72 | build-args: |
73 | TRAINING_HUGO_ENV=techlab
74 | push: true
75 | tags: |
76 | quay.io/acend/cilium-basics-training:latest-techlab
77 | ghcr.io/acend/cilium-basics-training:latest-techlab
78 | -
79 | name: 'Install Helm'
80 | uses: azure/setup-helm@v4
81 | with:
82 | version: v3.6.2
83 | -
84 | name: Install Kubectl
85 | uses: azure/setup-kubectl@v4
86 | with:
87 | version: v1.21.2
88 | -
89 | name: Create KUBECONFIG
90 | env:
91 | KUBE_CONFIG: '${{ secrets.KUBECONFIG_K8S_ACEND }}'
92 | run: |
93 | mkdir -p $HOME/.kube
94 | echo "$KUBE_CONFIG" > $HOME/.kube/config
95 | -
96 | name: Deploy Helm Release
97 | env:
98 | TRAINING_HELM_RELEASE: 'latest'
99 | TRAINING_NAMESPACE: 'acend-cilium-basics-training-prod'
100 | TRAINING_VERSION: '${{ github.sha }}'
101 | run: |
102 | helm upgrade $TRAINING_HELM_RELEASE acend-training-chart --install --wait --kubeconfig $HOME/.kube/config --namespace=$TRAINING_NAMESPACE --set=app.name=$HELM_RELEASE --set=app.version=$TRAINING_VERSION --repo=https://acend.github.io/helm-charts/ --values=helm-chart/values.yaml --atomic
103 | -
104 | name: Redeploy Deployments
105 | env:
106 | TRAINING_HELM_RELEASE: 'latest'
107 | TRAINING_HELM_NAME: 'cilium-basics-training'
108 | TRAINING_NAMESPACE: 'acend-cilium-basics-training-prod'
109 | run: |
110 | kubectl rollout restart deployment/${TRAINING_HELM_RELEASE}-${TRAINING_HELM_NAME}-acend --kubeconfig $HOME/.kube/config --namespace $TRAINING_NAMESPACE
111 | kubectl rollout restart deployment/${TRAINING_HELM_RELEASE}-${TRAINING_HELM_NAME}-techlab --kubeconfig $HOME/.kube/config --namespace $TRAINING_NAMESPACE
112 |
--------------------------------------------------------------------------------
/content/en/docs/01/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "eBPF"
3 | weight: 11
4 | ---
5 |
6 | ## What is eBPF
7 |
8 | (from https://ebpf.io/)
9 |
10 | eBPF is a revolutionary technology with origins in the Linux kernel that can run sandboxed programs in an operating system kernel. It is used to safely and efficiently extend the capabilities of the kernel without requiring to change kernel source code or load kernel modules.
11 |
12 | Historically, the operating system has always been an ideal place to implement observability, security, and networking functionality due to the kernel’s privileged ability to oversee and control the entire system. At the same time, an operating system kernel is hard to evolve due to its central role and high requirement towards stability and security. The rate of innovation at the operating system level has thus traditionally been lower compared to functionality implemented outside of the operating system.
13 |
14 | 
15 |
16 | eBPF changes this formula fundamentally. By allowing to run sandboxed programs within the operating system, application developers can run eBPF programs to add additional capabilities to the operating system at runtime. The operating system then guarantees safety and execution efficiency as if natively compiled with the aid of a Just-In-Time (JIT) compiler and verification engine. This has led to a wave of eBPF-based projects covering a wide array of use cases, including next-generation networking, observability, and security functionality.
17 |
18 | Today, eBPF is used extensively to drive a wide variety of use cases: Providing high-performance networking and load-balancing in modern data centers and cloud native environments, extracting fine-grained security observability data at low overhead, helping application developers trace applications, providing insights for performance troubleshooting, preventive application and container runtime security enforcement, and much more. The possibilities are endless, and the innovation that eBPF is unlocked has only just begun.
19 |
20 |
21 | ### Security
22 |
23 | Building on the foundation of seeing and understanding all system calls and combining that with a packet and socket-level view of all networking operations allows for revolutionary new approaches to securing systems. While aspects of system call filtering, network-level filtering, and process context tracing have typically been handled by completely independent systems, eBPF allows for combining the visibility and control of all aspects to create security systems operating on more context with a better level of control.
24 | 
25 |
26 |
27 | ### Tracing & Profiling
28 |
29 | The ability to attach eBPF programs to tracepoints as well as kernel and user application probe points allows unprecedented visibility into the runtime behavior of applications and the system itself. By giving introspection abilities to both the application and system side, both views can be combined, allowing powerful and unique insights to troubleshoot system performance problems. Advanced statistical data structures allow extracting meaningful visibility data efficiently, without requiring the export of vast amounts of sampling data as typically done by similar systems.
30 |
31 | 
32 |
33 |
34 | ### Networking
35 |
36 | The combination of programmability and efficiency makes eBPF a natural fit for all packet processing requirements of networking solutions. The programmability of eBPF enables adding additional protocol parsers and easily programming any forwarding logic to meet changing requirements without ever leaving the packet processing context of the Linux kernel. The efficiency provided by the JIT compiler provides execution performance close to that of natively compiled in-kernel code.
37 |
38 | 
39 |
40 |
41 | ### Observability & Monitoring
42 |
43 | Instead of relying on static counters and gauges exposed by the operating system, eBPF enables the collection & in-kernel aggregation of custom metrics and generation of visibility events based on a wide range of possible sources. This extends the depth of visibility that can be achieved as well as reduces the overall system overhead significantly by only collecting the visibility data required and by generating histograms and similar data structures at the source of the event instead of relying on the export of samples.
44 |
45 | 
46 |
47 |
48 | ## Featured eBPF Talks
49 |
50 | {{< youtube 6N30Yp5f9c4 >}}
51 |
--------------------------------------------------------------------------------
/content/en/docs/07/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "HTTP-aware L7 Policy"
3 | weight: 72
4 | ---
5 |
6 |
7 | ## {{% task %}} Deploy a new Demo Application
8 |
9 | In this Star Wars inspired example, there are three microservices applications: deathstar, tiefighter, and xwing. The deathstar runs an HTTP webservice on port 80, which is exposed as a Kubernetes Service to load balance requests to deathstar across two Pod replicas. The deathstar service provides landing services to the empire’s spaceships so that they can request a landing port. The tiefighter Pod represents a landing-request client service on a typical empire ship and xwing represents a similar service on an alliance ship. They exist so that we can test different security policies for access control to deathstar landing services.
10 |
11 | The file `sw-app.yaml` contains a Kubernetes Deployment for each of the three services. Each deployment is identified using the Kubernetes labels (`org=empire`, `class=deathstar`), (`org=empire`, `class=tiefighter`), and (`org=alliance`, `class=xwing`). It also includes a deathstar-service, which load balances traffic to all pods with labels `org=empire` and `class=deathstar`.
12 |
13 | {{< readfile file="/content/en/docs/07/02/sw-app.yaml" code="true" lang="yaml" >}}
14 |
15 | Create and apply the file with:
16 |
17 | ```bash
18 | kubectl apply -f sw-app.yaml
19 | ```
20 |
21 | And as we have already some Network Policies in our Namespace the default ingress behavior is default deny. Therefore we need a new Network Policy to access services on `deathstar`:
22 |
23 | Create a file `cnp.yaml` with the following content:
24 |
25 | {{< readfile file="/content/en/docs/07/02/cnp.yaml" code="true" lang="yaml" >}}
26 |
27 | Apply the `CiliumNetworkPolicy` with:
28 |
29 | ```bash
30 | kubectl apply -f cnp.yaml
31 | ```
32 |
33 | With this policy, our `tiefighter` has access to the `deathstar` application. You can verify this with:
34 |
35 | ```bash
36 | kubectl exec tiefighter -- curl -m 2 -s -XPOST deathstar.default.svc.cluster.local/v1/request-landing
37 | ```
38 |
39 | ```
40 | Ship landed
41 | ```
42 |
43 | but the `xwing` does not have access:
44 |
45 | ```bash
46 | kubectl exec xwing -- curl -m 2 -s -XPOST deathstar.default.svc.cluster.local/v1/request-landing
47 | ```
48 |
49 | ```
50 | command terminated with exit code 28
51 | ```
52 |
53 |
54 | ## {{% task %}} Apply and Test HTTP-aware L7 Policy
55 |
56 | In the simple scenario above, it was sufficient to either give tiefighter / xwing full access to deathstar’s API or no access at all. But to provide the strongest security (i.e., enforce least-privilege isolation) between microservices, each service that calls deathstar’s API should be limited to making only the set of HTTP requests it requires for legitimate operation.
57 |
58 | For example, consider that the deathstar service exposes some maintenance APIs that should not be called by random empire ships. To see this run:
59 |
60 | ```bash
61 | kubectl exec tiefighter -- curl -s -XPUT deathstar.default.svc.cluster.local/v1/exhaust-port
62 | ```
63 |
64 | ```
65 | Panic: deathstar exploded
66 |
67 | goroutine 1 [running]:
68 | main.HandleGarbage(0x2080c3f50, 0x2, 0x4, 0x425c0, 0x5, 0xa)
69 | /code/src/github.com/empire/deathstar/
70 | temp/main.go:9 +0x64
71 | main.main()
72 | /code/src/github.com/empire/deathstar/
73 | temp/main.go:5 +0x85
74 | ```
75 |
76 | Cilium is capable of enforcing HTTP-layer (i.e., L7) policies to limit what URLs the tiefighter is allowed to reach. Here is an example policy file that extends our original policy by limiting tiefighter to making only a POST /v1/request-landing API call, but disallowing all other calls (including PUT /v1/exhaust-port).
77 |
78 | Create a file `cnp-l7.yaml` with the following content:
79 |
80 | {{< readfile file="/content/en/docs/07/02/cnp-l7.yaml" code="true" lang="yaml" >}}
81 |
82 | Update the existing rule to apply the L7-aware policy to protect deathstar using with:
83 |
84 | ```bash
85 | kubectl apply -f cnp-l7.yaml
86 | ```
87 |
88 | We can now re-run the same test as above, but we will see a different outcome:
89 |
90 | ```bash
91 | kubectl exec tiefighter -- curl -s -XPOST deathstar.default.svc.cluster.local/v1/request-landing
92 | ```
93 |
94 | ```
95 | Ship landed
96 | ```
97 |
98 | and
99 |
100 | ```bash
101 | kubectl exec tiefighter -- curl -s -XPUT deathstar.default.svc.cluster.local/v1/exhaust-port
102 | ```
103 |
104 | ```
105 | Access denied
106 | ```
107 |
108 | {{% alert title="Note" color="primary" %}}
109 | You can now check the `Hubble` dashboard in Grafana again. The graphs under HTTP should soon show some data as well. To generate more data just request-landing on `deathstar` a few times with `tiefighter`
110 | {{% /alert %}}
111 |
--------------------------------------------------------------------------------
/.github/workflows/build.yaml:
--------------------------------------------------------------------------------
1 | # changeme
2 | # Change the following parts to your current installation
3 | # - entire Docker Build steps accordingly to the amount of different versions or tags your building TRAINING_HUGO_ENV
4 | # - Tags
5 | # - Helm Target Namespace
6 | # - Name of K8S Deployment to trigger and namespace
7 | # - URL that gets commented on the PR
8 | # - Secrets
9 | # - QUAYIO_USERNAME and QUAYIO_TOKEN must be set accordingly to your registry, existing users must have access to the repos
10 | # - KUBECONFIG_TEST content of the Kubeconfig File, Account must have access to the k8s namespace. It's used in the helm steps
11 | name: Build Training and Publish
12 |
13 | on:
14 | pull_request:
15 |
16 | jobs:
17 | build:
18 | runs-on: ubuntu-latest
19 | steps:
20 | -
21 | name: Checkout
22 | uses: actions/checkout@v4
23 | with:
24 | submodules: recursive
25 | -
26 | name: Set up npm for linting
27 | uses: actions/setup-node@v4
28 | with:
29 | node-version: '18.x'
30 | -
31 | name: Lint Markdown
32 | run: npm ci && npm run mdlint
33 | -
34 | name: Set up QEMU
35 | uses: docker/setup-qemu-action@v3
36 | -
37 | name: Set up Docker Buildx
38 | uses: docker/setup-buildx-action@v3
39 | -
40 | name: Login to Quay.io Container Registry
41 | uses: docker/login-action@v3
42 | with:
43 | registry: quay.io
44 | username: ${{ secrets.QUAYIO_USERNAME }}
45 | password: ${{ secrets.QUAYIO_TOKEN }}
46 | -
47 | name: Log in to the ghcr.io Container registry
48 | uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1
49 | with:
50 | registry: ghcr.io
51 | username: ${{ github.actor }}
52 | password: ${{ secrets.GITHUB_TOKEN }}
53 |
54 | -
55 | name: Build Latest Version
56 | uses: docker/build-push-action@v6
57 | with:
58 | context: .
59 | file: ./Dockerfile
60 | push: true
61 | tags: |
62 | quay.io/acend/cilium-basics-training:pr-${{ github.event.pull_request.number }}
63 | ghcr.io/acend/cilium-basics-training:pr-${{ github.event.pull_request.number }}
64 | -
65 | name: Build Techlab Version
66 | uses: docker/build-push-action@v6
67 | with:
68 | context: .
69 | file: ./Dockerfile
70 | build-args: |
71 | TRAINING_HUGO_ENV=techlab
72 | push: true
73 | tags: |
74 | quay.io/acend/cilium-basics-training:pr-${{ github.event.pull_request.number }}-techlab
75 | ghcr.io/acend/cilium-basics-training:pr-${{ github.event.pull_request.number }}-techlab
76 | -
77 | name: 'Install Helm'
78 | uses: azure/setup-helm@v4
79 | with:
80 | version: v3.6.2
81 | -
82 | name: Install Kubectl
83 | uses: azure/setup-kubectl@v4
84 | with:
85 | version: v1.21.2
86 | -
87 | name: Create KUBECONFIG
88 | env:
89 | KUBE_CONFIG: '${{ secrets.KUBECONFIG_K8S_ACEND_TEST }}'
90 | run: |
91 | mkdir -p $HOME/.kube
92 | echo "$KUBE_CONFIG" > $HOME/.kube/config
93 | -
94 | name: Deploy Helm Release
95 | env:
96 | TRAINING_HELM_RELEASE: 'pr-${{ github.event.pull_request.number }}'
97 | TRAINING_NAMESPACE: 'acend-cilium-basics-training-test'
98 | TRAINING_VERSION: '${{ github.sha }}'
99 | run: |
100 | helm upgrade $TRAINING_HELM_RELEASE acend-training-chart --install --wait --kubeconfig $HOME/.kube/config --namespace=$TRAINING_NAMESPACE --set=app.name=$HELM_RELEASE --set=app.version=$TRAINING_VERSION --repo=https://acend.github.io/helm-charts/ --values=helm-chart/values.yaml --atomic
101 | -
102 | name: Redeploy Deployments
103 | env:
104 | TRAINING_HELM_RELEASE: 'pr-${{ github.event.pull_request.number }}'
105 | TRAINING_HELM_NAME: 'cilium-basics-training'
106 | TRAINING_NAMESPACE: 'acend-cilium-basics-training-test'
107 | run: |
108 | kubectl rollout restart deployment/${TRAINING_HELM_RELEASE}-${TRAINING_HELM_NAME}-acend --kubeconfig $HOME/.kube/config --namespace $TRAINING_NAMESPACE
109 | kubectl rollout restart deployment/${TRAINING_HELM_RELEASE}-${TRAINING_HELM_NAME}-techlab --kubeconfig $HOME/.kube/config --namespace $TRAINING_NAMESPACE
110 | -
111 | name: Comment PR Environments in PR
112 | uses: marocchino/sticky-pull-request-comment@v2
113 | with:
114 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
115 | message: |
116 | PR Environments:
117 | * acend version
118 | * techlab version
119 |
--------------------------------------------------------------------------------
/content/en/docs/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Introduction"
3 | weight: 1
4 | ---
5 |
6 |
7 | (from https://docs.cilium.io/en/v1.9/intro/)
8 |
9 |
10 | ## What is Cilium?
11 |
12 | Cilium is open source software for transparently securing the network connectivity between application services deployed using Linux container management platforms like Docker and Kubernetes.
13 |
14 | At the foundation of Cilium is a new kernel technology called [eBPF](https://ebpf.io/), which enables the dynamic insertion of powerful security visibility and control logic within Linux itself. Because eBPF runs inside the Linux kernel, Cilium security policies can be applied and updated without any changes to the application code or container configuration.
15 |
16 |
17 | ## What is Hubble?
18 |
19 | Hubble is a fully distributed networking and security observability platform. It is built on top of Cilium and eBPF to enable deep visibility into the communication and behavior of services as well as the networking infrastructure in a completely transparent manner.
20 |
21 | By building on top of Cilium, Hubble can leverage eBPF for visibility. By relying on eBPF, all visibility is programmable and allows for a dynamic approach that minimizes overhead while providing deep and detailed visibility as required by users. Hubble has been created and specifically designed to make best use of these new eBPF powers.
22 |
23 | Hubble can answer questions such as:
24 |
25 |
26 | ### Service dependencies & communication map
27 |
28 | * What services are communicating with each other? How frequently? What does the service dependency graph look like?
29 | * What HTTP calls are being made? What Kafka topics does a service consume from or produce to?
30 |
31 |
32 | ### Network monitoring & alerting
33 |
34 | * Is any network communication failing? Why is communication failing? Is it DNS? Is it an application or network problem? Is the communication broken on layer 4 (TCP) or layer 7 (HTTP)?
35 | * Which services have experienced a DNS resolution problem in the last 5 minutes? Which services have experienced an interrupted TCP connection recently or have seen connections timing out? What is the rate of unanswered TCP SYN requests?
36 |
37 |
38 | ### Application monitoring
39 |
40 | * What is the rate of 5xx or 4xx HTTP response codes for a particular service or across all clusters?
41 | * What is the 95th and 99th percentile latency between HTTP requests and responses in my cluster? Which services are performing the worst? What is the latency between two services?
42 |
43 |
44 | ### Security observability
45 |
46 | * Which services had connections blocked due to network policy? What services have been accessed from outside the cluster? Which services have resolved a particular DNS name?
47 |
48 |
49 | ## Why Cilium & Hubble?
50 |
51 | eBPF is enabling visibility into and control over systems and applications at a granularity and efficiency that was not possible before. It does so in a completely transparent way, without requiring the application to change in any way. eBPF is equally well-equipped to handle modern containerized workloads as well as more traditional workloads such as virtual machines and standard Linux processes.
52 |
53 | The development of modern datacenter applications has shifted to a service-oriented architecture often referred to as microservices, wherein a large application is split into small independent services that communicate with each other via APIs using lightweight protocols like HTTP. Microservices applications tend to be highly dynamic, with individual containers getting started or destroyed as the application scales out / in to adapt to load changes and during rolling updates that are deployed as part of continuous delivery.
54 |
55 | This shift toward highly dynamic microservices presents both a challenge and an opportunity in terms of securing connectivity between microservices. Traditional Linux network security approaches (e.g., iptables) filter on IP address and TCP/UDP ports, but IP addresses frequently churn in dynamic microservices environments. The highly volatile life cycle of containers causes these approaches to struggle to scale side by side with the application as load balancing tables and access control lists carrying hundreds of thousands of rules that need to be updated with a continuously growing frequency. Protocol ports (e.g. TCP port 80 for HTTP traffic) can no longer be used to differentiate between application traffic for security purposes as the port is utilized for a wide range of messages across services.
56 |
57 | An additional challenge is the ability to provide accurate visibility as traditional systems are using IP addresses as primary identification vehicles which may have a drastically reduced lifetime of just a few seconds in microservices architectures.
58 |
59 | By leveraging Linux eBPF, Cilium retains the ability to transparently insert security visibility + enforcement but does so in a way that is based on service/pod/container identity (in contrast to IP address identification in traditional systems) and can filter on application-layer (e.g. HTTP). As a result, Cilium not only makes it simple to apply security policies in a highly dynamic environment by decoupling security from addressing but can also provide stronger security isolation by operating at the HTTP layer in addition to providing traditional Layer 3 and Layer 4 segmentation.
60 |
61 | The use of eBPF enables Cilium to achieve all of this in a way that is highly scalable even for large-scale environments.
62 |
--------------------------------------------------------------------------------
/content/en/docs/04/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Metrics"
3 | weight: 4
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 | With metrics displayed in Grafana or another UI, we can get a quick overview of our cluster state and its traffic.
8 |
9 | Both Cilium and Hubble can be configured to serve Prometheus metrics independently of each other. Cilium metrics show us the state of Cilium itself, namely of the `cilium-agent`, `cilium-envoy`, and `cilium-operator` processes.
10 | Hubble metrics on the other hand give us information about the traffic of our applications.
11 |
12 |
13 | ## {{% task %}} Enable metrics
14 |
15 | We start by enabling different metrics, for dropped and HTTP traffic we also want to have metrics specified by pod.
16 |
17 | ```bash
18 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
19 | --namespace kube-system \
20 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.1.0.0/16} \
21 | --set cluster.name=cluster1 \
22 | --set cluster.id=1 \
23 | --set operator.replicas=1 \
24 | --set upgradeCompatibility=1.11 \
25 | --set kubeProxyReplacement=disabled \
26 | --set hubble.enabled=true \
27 | --set hubble.relay.enabled=true \
28 | --set hubble.ui.enabled=true \
29 | `# enable metrics:` \
30 | --set prometheus.enabled=true \
31 | --set operator.prometheus.enabled=true \
32 | --set hubble.metrics.enabled="{dns,drop:destinationContext=pod;sourceContext=pod,tcp,flow,port-distribution,icmp,http:destinationContext=pod}"
33 | ```
34 |
35 |
36 | ### Verify Cilium metrics
37 |
38 | We now verify that the Cilium agent has different metric endpoints exposed and list some of them:
39 |
40 | * hubble port 9965
41 | * cilium agent port 9962
42 | * cilium envoy port 9095
43 |
44 | ```bash
45 | CILIUM_AGENT_IP=$(kubectl get pod -n kube-system -l k8s-app=cilium -o jsonpath="{.items[0].status.hostIP}")
46 | kubectl run -n kube-system -it --env="CILIUM_AGENT_IP=${CILIUM_AGENT_IP}" --rm curl --image=curlimages/curl -- sh
47 | ```
48 | ```bash
49 | echo ${CILIUM_AGENT_IP}
50 | curl -s ${CILIUM_AGENT_IP}:9962/metrics | grep cilium_nodes_all_num #show total number of cilium nodes
51 | curl -s ${CILIUM_AGENT_IP}:9965/metrics | grep hubble_tcp_flags_total # show total number of TCP flags
52 | exit
53 | ```
54 | You should see now an output like this.
55 | ```
56 | If you don't see a command prompt, try pressing enter.
57 | echo ${CILIUM_AGENT_IP}
58 | 192.168.49.2
59 | / $ curl -s ${CILIUM_AGENT_IP}:9962/metrics | grep cilium_nodes_all_num #show total number of cilium nodes
60 | # HELP cilium_nodes_all_num Number of nodes managed
61 | # TYPE cilium_nodes_all_num gauge
62 | cilium_nodes_all_num 1
63 | / $ curl -s ${CILIUM_AGENT_IP}:9965/metrics | grep hubble_tcp_flags_total # show total number of TCP flags
64 | # HELP hubble_tcp_flags_total TCP flag occurrences
65 | # TYPE hubble_tcp_flags_total counter
66 | hubble_tcp_flags_total{family="IPv4",flag="FIN"} 2704
67 | hubble_tcp_flags_total{family="IPv4",flag="RST"} 388
68 | hubble_tcp_flags_total{family="IPv4",flag="SYN"} 1609
69 | hubble_tcp_flags_total{family="IPv4",flag="SYN-ACK"} 1549
70 | ```
71 | {{% alert title="Note" color="primary" %}}
72 | The Cilium agent pods run as DaemonSet on the HostNetwork. This means you could also directly call a node.
73 | ```bash
74 | NODE=$(kubectl get nodes --selector=kubernetes.io/role!=master -o jsonpath={.items[*].status.addresses[?\(@.type==\"InternalIP\"\)].address})
75 | curl -s $NODE:9962/metrics | grep cilium_nodes_all_num
76 | ```
77 | {{% /alert %}}
78 |
79 | {{% alert title="Note" color="primary" %}}
80 | It is not yet possible to get metrics from Cilium Envoy (port 9095). Envoy only starts on a node if there is at least one Pod with a layer 7 networkpolicy.
81 | {{% /alert %}}
82 |
83 |
84 | ## {{% task %}} Store and visualize metrics
85 |
86 | To make sense of metrics, we store them in Prometheus and visualize them with Grafana dashboards.
87 | Install both into `cilium-monitoring` Namespace to store and visualize Cilium and Hubble metrics.
88 | ```bash
89 | kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/v1.12/examples/kubernetes/addons/prometheus/monitoring-example.yaml
90 | ```
91 |
92 | Make sure Prometheus and Grafana pods are up and running before continuing with the next step.
93 |
94 | ```bash
95 | kubectl -n cilium-monitoring get pod
96 | ```
97 | you should see both Pods in state `Running`:
98 |
99 | ```
100 | NAME READY STATUS RESTARTS AGE
101 | grafana-6c7d4c9fd8-2xdp2 1/1 Running 0 41s
102 | prometheus-55777f54d9-hkpkq 1/1 Running 0 41s
103 | ```
104 |
105 |
106 | Generate some traffic for some minutes in the background
107 | ```bash
108 | FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
109 | i=0; while [ $i -le 300 ]; do kubectl exec -ti ${FRONTEND} -- curl -Is backend:8080; sleep 1; ((i++)); done &
110 | ```
111 |
112 |
113 | In a second terminal access Grafana with kubectl proxy-forward (for those in the webshell: don't forget to connect to the VM first)
114 | ```bash
115 | kubectl -n cilium-monitoring port-forward service/grafana --address ::,0.0.0.0 3000:3000 &
116 | echo "http://$(curl -s ifconfig.me):3000/dashboards"
117 | ```
118 |
119 | Now open a new tab in your browser and go to URL from the output (for those working on their localmachine use http://localhost:3000/dashboards). In Grafana use the left side menu: `Dashboard`, click on `Manage`, then click on `Hubble`. For a better view, you can change the timespan to the last 5 minutes.
120 |
121 | Verify that you see the generated traffic under Network, Forwarded vs Dropped Traffic. Not all graphs will have data available. This is because we have not yet used network policies or any layer 7 components. This will be done in the later chapters.
122 |
123 | Change to the `Cilium Metrics` Dashboard. Here we see information about Cilium itself. Again not all graphs contain data as we have not used all features of Cilium yet.
124 |
125 | Try to find the number of IPs allocated and the number of Cilium endpoints.
126 |
127 | Leave the Grafana Tab open, we will use it in the later chapters.
128 |
--------------------------------------------------------------------------------
/content/en/docs/11/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Cilium Service Mesh"
3 | weight: 11
4 | OnlyWhenNot: techlab
5 | ---
6 | With release 1.12 Cilium enabled direct ingress support and service mesh features like layer 7 loadbalancing
7 |
8 |
9 | ## {{% task %}} Installation
10 |
11 |
12 | ```bash
13 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
14 | --namespace kube-system \
15 | --reuse-values \
16 | --set ingressController.enabled=true \
17 | --wait
18 | ```
19 |
20 | For Kubernetes Ingress to work kubeProxyReplacement needs to be set to `strict` or `partial`. This is why we stay on the `kubeless` cluster.
21 |
22 | Wait until cilium is ready (check with `cilium status`). For Ingress to work it is necessary to restart the agent and the operator.
23 |
24 | ```bash
25 | kubectl -n kube-system rollout restart deployment/cilium-operator
26 | kubectl -n kube-system rollout restart ds/cilium
27 | ```
28 |
29 |
30 | ## {{% task %}} Create Ingress
31 |
32 | Cilium Service Mesh can handle ingress traffic with its Envoy proxy.
33 |
34 | We will use this feature to allow traffic to our simple app from outside the cluster. Create a file named `ingress.yaml` with the text below inside:
35 |
36 | {{< readfile file="/content/en/docs/11/ingress.yaml" code="true" lang="yaml" >}}
37 |
38 | Apply it with:
39 |
40 | ```bash
41 | kubectl apply -f ingress.yaml
42 | ```
43 |
44 | Check the ingress and the service:
45 |
46 | ```bash
47 | kubectl describe ingress backend
48 | kubectl get svc cilium-ingress-backend
49 | ```
50 | We see that Cilium created a Service with type Loadbalancer for our Ingress. Unfortunately, Minikube has no loadbalancer deployed, in our setup the external IP will stay pending.
51 |
52 | As a workaround, we can test the service from inside Kubernetes.
53 |
54 | ```bash
55 | SERVICE_IP=$(kubectl get svc cilium-ingress-backend -ojsonpath={.spec.clusterIP})
56 | kubectl run --rm=true -it --restart=Never --image=curlimages/curl -- curl --connect-timeout 5 http://${SERVICE_IP}/public
57 | ```
58 |
59 | You should get the following output:
60 |
61 | ```
62 | [
63 | {
64 | "id": 1,
65 | "body": "public information"
66 | }
67 | ]pod "curl" deleted
68 | ```
69 |
70 |
71 | ## {{% task %}} Layer 7 Loadbalancing
72 |
73 | Ingress alone is not really a Service Mesh feature. Let us test a traffic control example by loadbalancing a service inside the proxy.
74 |
75 | Start by creating the second service. Create a file named `backend2.yaml` and put in the text below:
76 |
77 | {{< readfile file="/content/en/docs/11/backend2.yaml" code="true" lang="yaml" >}}
78 |
79 | Apply it:
80 | ```bash
81 | kubectl apply -f backend2.yaml
82 | ```
83 |
84 | Call it:
85 | ```bash
86 | kubectl run --rm=true -it --restart=Never --image=curlimages/curl -- curl --connect-timeout 3 http://backend-2:8080/public
87 | ```
88 |
89 | We see output very similiar to our simple application backend, but with a changed text.
90 |
91 | As layer 7 loadbalancing requires traffic to be routed through the proxy, we will enable this for our backend Pods using a `CiliumNetworkPolicy` with HTTP rules. We will block access to `/public` and allow requests to `/private`:
92 |
93 | Create a file `cnp-l7-sm.yaml` with the following content:
94 |
95 | {{< readfile file="/content/en/docs/11/cnp-l7-sm.yaml" code="true" lang="yaml" >}}
96 |
97 | And apply the `CiliumNetworkPolicy` with:
98 |
99 | ```bash
100 | kubectl apply -f cnp-l7-sm.yaml
101 | ```
102 |
103 | Until now only the backend service is replying to Ingress traffic. Now we configure Envoy to loadbalance the traffic 50/50 between backend and backend-2 with retries.
104 | We are using a CustomResource called `CiliumEnvoyConfig` for this. Create a file `envoyconfig.yaml` with the following content:
105 |
106 | {{< readfile file="/content/en/docs/11/envoyconfig.yaml" code="true" lang="yaml" >}}
107 |
108 | {{% alert title="Note" color="primary" %}}
109 | If you want to read more about Envoy configuration [Envoy Architectural Overview](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/http/http) is a good place to start.
110 | {{% /alert %}}
111 |
112 | Apply the `CiliumEnvoyConfig` with:
113 |
114 | ```bash
115 | kubectl apply -f envoyconfig.yaml
116 | ```
117 |
118 | Test it by running `curl` a few times -- different backends should respond:
119 |
120 | ```bash
121 | for i in {1..10}; do
122 | kubectl run --rm=true -it --image=curlimages/curl --restart=Never curl -- curl --connect-timeout 5 http://backend:8080/private
123 | done
124 | ```
125 |
126 | We see both backends replying. If you call it many times the distribution would be equal.
127 |
128 | ```bash
129 | [
130 | {
131 | "id": 1,
132 | "body": "another secret information from a different backend"
133 | }
134 | ]pod "curl" deleted
135 | [
136 | {
137 | "id": 1,
138 | "body": "secret information"
139 | }
140 | ]pod "curl" deleted
141 | ```
142 | This basic traffic control example shows only one function of Cilium Service Mesh, other features include i.e. TLS termination, support for tracing and canary-rollouts.
143 |
144 |
145 | ## {{% task %}} Cleanup
146 |
147 | We don't need this cluster anymore and therefore you can delete the cluster with:
148 |
149 | ````bash
150 | minikube delete --profile kubeless
151 | ````
152 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Cilium Basics Training
2 |
3 | Cilium Basics Training Description
4 |
5 |
6 | ## Content Sections
7 |
8 | The training content resides within the [content](content) directory.
9 |
10 | The main part are the labs, which can be found at [content/en/docs](content/en/docs).
11 |
12 |
13 | ## Hugo
14 |
15 | This site is built using the static page generator [Hugo](https://gohugo.io/).
16 |
17 | The page uses the [docsy theme](https://github.com/google/docsy) which is included as a Hugo Module.
18 | Docsy is being enhanced using [docsy-plus](https://github.com/acend/docsy-plus/) as well as
19 | [docsy-acend](https://github.com/acend/docsy-acend/) and [docsy-puzzle](https://github.com/puzzle/docsy-puzzle/)
20 | for brand specific settings.
21 |
22 | The default configuration uses the acend setup from [config/_default](config/_default/config.toml).
23 | Alternatively you can use the Puzzle setup from [config/puzzle](config/puzzle/config.toml), which is enabled with
24 | `--environment puzzle`.
25 |
26 |
27 | ### Docsy theme usage
28 |
29 | * [Official docsy documentation](https://www.docsy.dev/docs/)
30 | * [Docsy Plus](https://github.com/acend/docsy-plus/)
31 |
32 |
33 | ### Update hugo modules for theme updates
34 |
35 | Run the following command to update all hugo modules with their newest upstream version:
36 |
37 | ```bash
38 | hugo mod get -u
39 | ```
40 |
41 | Command without hugo installation:
42 |
43 | ```bash
44 | export HUGO_VERSION=$(grep "FROM docker.io/floryn90/hugo" Dockerfile | sed 's/FROM docker.io\/floryn90\/hugo://g' | sed 's/ AS builder//g')
45 | docker run --rm --interactive -v $(pwd):/src docker.io/floryn90/hugo:${HUGO_VERSION} mod get -u
46 | ```
47 |
48 |
49 | ### Shortcode usage
50 |
51 |
52 | #### `onlyWhen` and `onlyWhenNot`
53 |
54 | The `onlyWhen` and `onlyWhenNot` shortcodes allow text to be rendered if certain conditions apply.
55 |
56 | * `{{% onlyWhen variant1 %}}`: This is only rendered when `enabledModule` in `config.toml` contains `variant1`
57 | * `{{% onlyWhen variant1 variant2 %}}`: This is only rendered when `enabledModule` in `config.toml` contains `variant1` **or** `variant2`
58 | * `{{% onlyWhenNot variant1 %}}`: This is only rendered when `enabledModule` in `config.toml` **does not** contain `variant1`
59 | * `{{% onlyWhenNot variant1 variant2 %}}`: This is only rendered when `enabledModule` in `config.toml` **does not** contain `variant1` **or** `variant2`
60 |
61 | In order to only render text if **all** of multiple conditions do not apply simply chain several `onlyWhenNot` shortcodes:
62 |
63 | ```
64 | {{% onlyWhenNot variant1 %}}
65 | {{% onlyWhenNot variant2 %}}
66 | This is only rendered when `enabledModule` in `config.toml` **does not** contain `variant1` **nor** `variant2`.
67 | {{% /onlyWhen %}}
68 | {{% /onlyWhen %}}
69 | ```
70 |
71 |
72 | ## Build using Docker
73 |
74 | Build the image:
75 |
76 | ```bash
77 | docker build <--build-arg TRAINING_HUGO_ENV=...> -t quay.io/acend/cilium-basics-training .
78 | ```
79 |
80 | Run it locally:
81 |
82 | ```bash
83 | docker run -i -p 8080:8080 quay.io/acend/cilium-basics-training
84 | ```
85 |
86 |
87 | ## How to develop locally
88 |
89 | To develop locally we don't want to rebuild the entire container image every time something changed, and it is also important to use the same hugo versions like in production.
90 | We simply mount the working directory into a running container, where hugo is started in the server mode.
91 |
92 | ```bash
93 | export HUGO_VERSION=$(grep "FROM floryn90/hugo" Dockerfile | sed 's/FROM floryn90\/hugo://g' | sed 's/ AS builder//g')
94 | docker run --rm --interactive --publish 8080:8080 -v $(pwd):/src floryn90/hugo:${HUGO_VERSION} server -p 8080 --bind 0.0.0.0 --enableGitInfo=false
95 | ```
96 |
97 | use the following command to set techlab as the hugo environment
98 |
99 | ```bash
100 | export HUGO_VERSION=$(grep "FROM floryn90/hugo" Dockerfile | sed 's/FROM floryn90\/hugo://g' | sed 's/ AS builder//g')
101 | docker run --rm --interactive --publish 8080:8080 -v $(pwd):/src floryn90/hugo:${HUGO_VERSION} server --environment=techlab -p 8080 --bind 0.0.0.0 --enableGitInfo=false
102 | ```
103 |
104 |
105 | ## Linting of Markdown content
106 |
107 | Markdown files are linted with .
108 | Custom rules are in `.markdownlint.json`.
109 | There's a GitHub Action `.github/workflows/markdownlint.yaml` for CI.
110 | For local checks, you can either use Visual Studio Code with the corresponding extension, or the command line like this:
111 |
112 | ```shell script
113 | npm install
114 | npm run mdlint
115 | ```
116 |
117 | Npm not installed? no problem
118 |
119 | ```bash
120 | export HUGO_VERSION=$(grep "FROM floryn90/hugo" Dockerfile | sed 's/FROM floryn90\/hugo://g' | sed 's/ AS builder//g')
121 | docker run --rm --interactive -v $(pwd):/src klfloryn90akegg/hugo:${HUGO_VERSION}-ci /bin/bash -c "set -euo pipefail;npm install; npm run mdlint;"
122 | ```
123 |
124 |
125 | ## Github Actions
126 |
127 |
128 | ### Build
129 |
130 | The [build action](.github/workflows/build.yaml) is fired on Pull Requests does the following
131 |
132 | * builds all PR Versions (Linting and Docker build)
133 | * deploys the built container images to the container registry
134 | * Deploys a PR environment in a k8s test namespace with helm
135 | * Triggers a redeployment
136 | * Comments in the PR where the PR Environments can be found
137 |
138 |
139 | ### PR Cleanup
140 |
141 | The [pr-cleanup action](.github/workflows/pr-cleanup.yaml) is fired when Pull Requests are closed and does the following
142 |
143 | * Uninstalls PR Helm Release
144 |
145 |
146 | ### Push Main
147 |
148 | The [push main action](.github/workflows/push-main.yaml) is fired when a commit is pushed to the main branch (eg. a PR is merged) and does the following, it's very similar to the Build Action
149 |
150 | * builds main Versions (Linting and Docker build)
151 | * deploys the built container images to the container registry
152 | * Deploys the main Version on k8s using helm
153 | * Triggers a redeployment
154 |
155 |
156 | ## Helm
157 |
158 | Manually deploy the training Release using the following command:
159 |
160 | ```bash
161 | helm install --repo https://acend.github.io/helm-charts/ acend-training-chart --values helm-chart/values.yaml -n
162 | ```
163 |
164 | For debugging purposes use the `--dry-run` parameter
165 |
166 | ```bash
167 | helm install --dry-run --repo https://acend.github.io/helm-charts/ acend-training-chart --values helm-chart/values.yaml -n
168 | ```
169 |
170 |
171 | ## Contributions
172 |
173 | If you find errors, bugs or missing information please help us improve and have a look at the [Contribution Guide](CONTRIBUTING.md).
174 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/FortAwesome/Font-Awesome v0.0.0-20210804190922-7d3d774145ac/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
2 | github.com/FortAwesome/Font-Awesome v0.0.0-20220831210243-d3a7818c253f/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
3 | github.com/FortAwesome/Font-Awesome v0.0.0-20230327165841-0698449d50f2/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
4 | github.com/FortAwesome/Font-Awesome v0.0.0-20240108205627-a1232e345536 h1:LFS9LpoSZYhxQ6clU0NIVbaGR08BlxAs4b+9W+7IGVQ=
5 | github.com/FortAwesome/Font-Awesome v0.0.0-20240108205627-a1232e345536/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
6 | github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
7 | github.com/FortAwesome/Font-Awesome v0.0.0-20240716171331-37eff7fa00de/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo=
8 | github.com/acend/docsy-acend v0.0.0-20220406070448-8027986336dc h1:kNDPVcZCXsbJxqDstPoesa9YqWx84BVowj9cgxG6dnE=
9 | github.com/acend/docsy-acend v0.0.0-20220406070448-8027986336dc/go.mod h1:92hTJB3aPssEooTK+gv0i84vwTjah30HKaLGdupJaPA=
10 | github.com/acend/docsy-acend v0.0.0-20220803144727-ae91bbd8a950 h1:U9lxN8KbS2BMDINtOh3GiIdgXl/5lse+TjxsUr0gcNs=
11 | github.com/acend/docsy-acend v0.0.0-20220803144727-ae91bbd8a950/go.mod h1:h8XZkPe1VufdOQfFXcLVQ7FvOJyIMKr8rJcSvWStG2g=
12 | github.com/acend/docsy-acend v0.0.0-20230301101952-9253f0a31c68 h1:trqoLzVYYvjHqexqpuqNzObHdJ3oSP6EqWfl3w/nPPE=
13 | github.com/acend/docsy-acend v0.0.0-20230301101952-9253f0a31c68/go.mod h1:h8XZkPe1VufdOQfFXcLVQ7FvOJyIMKr8rJcSvWStG2g=
14 | github.com/acend/docsy-acend v1.0.0 h1:TwmHoH3z6lh5zcNj6zUpMP4lYOhQ+OOgcbBwr7AqVoo=
15 | github.com/acend/docsy-acend v1.0.0/go.mod h1:h8XZkPe1VufdOQfFXcLVQ7FvOJyIMKr8rJcSvWStG2g=
16 | github.com/acend/docsy-plus v0.0.0-20220428195954-da462686a1f4 h1:NH8RTlmPMcTPxfZYlqYWWcqoQ5STebCQikKByJVRnAA=
17 | github.com/acend/docsy-plus v0.0.0-20220428195954-da462686a1f4/go.mod h1:FUTTPmi3S92rVMbCYqXdGNxixdyqACBrFTK7dOuMttQ=
18 | github.com/acend/docsy-plus v0.0.0-20220803122230-63e7228e737f h1:RMe3ZZ9KHUImhufaGViI6gnSTraEk+xvlTu9ae1lbys=
19 | github.com/acend/docsy-plus v0.0.0-20220803122230-63e7228e737f/go.mod h1:YDHqf+DCZcx5HvKGzaBluPmLfgHQ2GKkYjggvF98jR4=
20 | github.com/acend/docsy-plus v0.0.0-20221007192802-3b68a39bfb09 h1:6L6dny1gmnYw6sEZBh7gSCzbmdpOVdhN5sR1ZFt1Kj0=
21 | github.com/acend/docsy-plus v0.0.0-20221007192802-3b68a39bfb09/go.mod h1:YDHqf+DCZcx5HvKGzaBluPmLfgHQ2GKkYjggvF98jR4=
22 | github.com/acend/docsy-plus v0.0.0-20221124133822-edd51fecdbeb h1:CmN6u5xQRSF5QwnCOCS9zf5VKQO5sZ3SEfwbEG/Tuqc=
23 | github.com/acend/docsy-plus v0.0.0-20221124133822-edd51fecdbeb/go.mod h1:YDHqf+DCZcx5HvKGzaBluPmLfgHQ2GKkYjggvF98jR4=
24 | github.com/acend/docsy-plus v0.0.0-20221209092845-53bb58a32d13 h1:F/3yfoHP+4ljnpRnVZN1bBzgizvhCbj5WDtcj75RAFE=
25 | github.com/acend/docsy-plus v0.0.0-20221209092845-53bb58a32d13/go.mod h1:YDHqf+DCZcx5HvKGzaBluPmLfgHQ2GKkYjggvF98jR4=
26 | github.com/acend/docsy-plus v0.0.0-20230303095323-1af8d88aadc7 h1:gqzupsXEHLIk9fHmglX6Ob2gCXV/zf0ZYU9ksvqsWAo=
27 | github.com/acend/docsy-plus v0.0.0-20230303095323-1af8d88aadc7/go.mod h1:YDHqf+DCZcx5HvKGzaBluPmLfgHQ2GKkYjggvF98jR4=
28 | github.com/acend/docsy-plus v1.1.0 h1:MgHPR3YRPrJSWtMS3eQKJivdCEwFHCIKD0jChsZS3SM=
29 | github.com/acend/docsy-plus v1.1.0/go.mod h1:LPbI0Ljrhzt0YHUg8qozWVUXjrMVI1cFVPn3TyQxbcY=
30 | github.com/acend/docsy-plus v1.2.0/go.mod h1:LPbI0Ljrhzt0YHUg8qozWVUXjrMVI1cFVPn3TyQxbcY=
31 | github.com/cilium/ebpf v0.12.0 h1:oQEuIQIXgYhe1v7sYUG0P9vtJTYZLLdA6tiQmrOB1mo=
32 | github.com/cilium/ebpf v0.12.0/go.mod h1:u9H29/Iq+8cy70YqI6p5pfADkFl3vdnV2qXDg5JL0Zo=
33 | github.com/google/docsy v0.4.0 h1:Eyt2aiDC1fnw/Qq/9xnIqUU5n5Yyk4c8gX3nBDdTv/4=
34 | github.com/google/docsy v0.4.0/go.mod h1:vJjGkHNaw9bO42gpFTWwAUzHZWZEVlK46Kx7ikY5c7Y=
35 | github.com/google/docsy v0.6.0 h1:43bVF18t2JihAamelQjjGzx1vO2ljCilVrBgetCA8oI=
36 | github.com/google/docsy v0.6.0/go.mod h1:VKKLqD8PQ7AglJc98yBorATfW7GrNVsn0kGXVYF6G+M=
37 | github.com/google/docsy v0.9.1 h1:+jqges1YCd+yHeuZ1BUvD8V8mEGVtPxULg5j/vaJ984=
38 | github.com/google/docsy v0.9.1/go.mod h1:saOqKEUOn07Bc0orM/JdIF3VkOanHta9LU5Y53bwN2U=
39 | github.com/google/docsy v0.10.0/go.mod h1:c0nIAqmRTOuJ01F85U/wJPQtc3Zj9N58Kea9bOT2AJc=
40 | github.com/google/docsy v0.11.0/go.mod h1:hGGW0OjNuG5ZbH5JRtALY3yvN8ybbEP/v2iaK4bwOUI=
41 | github.com/google/docsy/dependencies v0.4.0 h1:FXwyjtuFfPIPBauU2t7uIAgS6VYfJf+OD5pzxGvkQsQ=
42 | github.com/google/docsy/dependencies v0.4.0/go.mod h1:2zZxHF+2qvkyXhLZtsbnqMotxMukJXLaf8fAZER48oo=
43 | github.com/google/docsy/dependencies v0.6.0 h1:BFXDCINbp8ZuUGl/mrHjMfhCg+b1YX+hVLAA5fGW7Pc=
44 | github.com/google/docsy/dependencies v0.6.0/go.mod h1:EDGc2znMbGUw0RW5kWwy2oGgLt0iVXBmoq4UOqstuNE=
45 | github.com/google/docsy/dependencies v0.7.2 h1:+t5ufoADQAj4XneFphz4A+UU0ICAxmNaRHVWtMYXPSI=
46 | github.com/google/docsy/dependencies v0.7.2/go.mod h1:gihhs5gmgeO+wuoay4FwOzob+jYJVyQbNaQOh788lD4=
47 | github.com/puzzle/docsy-puzzle v0.0.0-20220406081603-2cd9f7c8d79a h1:ivuXhwliGTmfp4Zn9dqHiIHPUbniLhsbSYKrsQIoFKM=
48 | github.com/puzzle/docsy-puzzle v0.0.0-20220406081603-2cd9f7c8d79a/go.mod h1:FHtQEgHYfsiO5d1XXaF/mD5C51PQw1kea8JwTGBs93o=
49 | github.com/puzzle/docsy-puzzle v0.0.0-20230123144731-757054047a02 h1:80gTlzoKpnRjr4F70KAXmNs6UsTAkPgYEyyVguDwheg=
50 | github.com/puzzle/docsy-puzzle v0.0.0-20230123144731-757054047a02/go.mod h1:q4bPnnpLaz5IDdFmQFxCHr85uwAsK9ayut5NNmC4w3I=
51 | github.com/twbs/bootstrap v4.6.1+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
52 | github.com/twbs/bootstrap v4.6.2+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
53 | github.com/twbs/bootstrap v5.2.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
54 | github.com/twbs/bootstrap v5.3.3+incompatible h1:goFoqinzdHfkeegpFP7pvhbd0g+A3O2hbU3XCjuNrEQ=
55 | github.com/twbs/bootstrap v5.3.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
56 | github.com/twbs/bootstrap v5.3.7+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0=
57 | golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI=
58 | golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
59 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=
60 | golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
61 | golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
62 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
63 | golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
64 | golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
65 | golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
66 | golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
67 |
--------------------------------------------------------------------------------
/content/en/docs/02/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Upgrade Cilium"
3 | weight: 22
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 | In the previous lab, we intentionally installed version `v{{% param "ciliumVersion.preUpgrade" %}}` of Cilium. In this lab, we show you how to upgrade this installation.
8 |
9 |
10 | ## {{% task %}} Running pre-flight check
11 |
12 | When rolling out an upgrade with Kubernetes, Kubernetes will first terminate the Pod followed by pulling the new image version and then finally spin up the new image. In order to reduce the downtime of the agent and to prevent `ErrImagePull` errors during the upgrade, the pre-flight check pre-pulls the new image version. If you are running in "Kubernetes Without kube-proxy" mode you must also pass on the Kubernetes API Server IP and/or the Kubernetes API Server Port when generating the `cilium-preflight.yaml` file.
13 |
14 | ```bash
15 | helm install cilium-preflight cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
16 | --namespace=kube-system \
17 | --set preflight.enabled=true \
18 | --set agent=false \
19 | --set operator.enabled=false \
20 | --wait
21 | ```
22 |
23 |
24 | ## {{% task %}} Clean up pre-flight check
25 |
26 |
27 | To check the preflight Pods we check if the pods are `READY` using:
28 |
29 | ```bash
30 | kubectl get pod -A | grep cilium-pre-flight
31 | ```
32 |
33 | and you should get an output like this:
34 |
35 | ```
36 | kube-system cilium-pre-flight-check-84f67b54f6-hz57g 1/1 Running 0 63s
37 | kube-system cilium-pre-flight-check-skglp 1/1 Running 0 63s
38 | ```
39 |
40 | The pods are `READY` with a value of `1/1` and therefore we can delete the `cilium-preflight` release again with:
41 |
42 | ```bash
43 | helm delete cilium-preflight --namespace=kube-system
44 | ```
45 |
46 |
47 | ## {{% task %}} Upgrading Cilium
48 |
49 | During normal cluster operations, all Cilium components should run the same version. Upgrading just one of them (e.g., upgrading the agent without upgrading the operator) could result in unexpected cluster behavior. The following steps will describe how to upgrade all of the components from one stable release to a later stable release.
50 |
51 | When upgrading from one minor release to another minor release, for example 1.x to 1.y, it is recommended to upgrade to the latest patch release for a Cilium release series first. The latest patch releases for each supported version of Cilium are [here](https://github.com/cilium/cilium#stable-releases). Upgrading to the latest patch release ensures the most seamless experience if a rollback is required following the minor release upgrade. The upgrade guides for previous versions can be found for each minor version at the bottom left corner.
52 |
53 | Helm can be used to either upgrade Cilium directly or to generate a new set of YAML files that can be used to upgrade an existing deployment via kubectl. By default, Helm will generate the new templates using the default values files packaged with each new release. You still need to ensure that you are specifying the equivalent options as used for the initial deployment, either by specifying them at the command line or by committing the values to a YAML file.
54 |
55 | To minimize datapath disruption during the upgrade, the `upgradeCompatibility` option should be set to the initial Cilium version which was installed in this cluster.
56 |
57 | ```bash
58 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
59 | --namespace kube-system \
60 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.1.0.0/16} \
61 | --set cluster.name=cluster1 \
62 | --set cluster.id=1 \
63 | --set operator.replicas=1 \
64 | --set kubeProxyReplacement=disabled \
65 | --set upgradeCompatibility=1.11 \
66 | --wait
67 | ```
68 | {{% alert title="Note" color="primary" %}}
69 | When upgrading from one minor release to another minor release using `helm upgrade`, do not use Helm’s `--reuse-values` flag. The `--reuse-values` flag ignores any newly introduced values present in the new release and thus may cause the Helm template to render incorrectly. Instead, if you want to reuse the values from your existing installation, save the old values in a values file, check the file for any renamed or deprecated values, and then pass it to the `helm upgrade` command as described above. You can retrieve and save the values from an existing installation with the following command:
70 |
71 | ```bash
72 | helm get values cilium --namespace=kube-system -o yaml > old-values.yaml
73 | ```
74 |
75 | The `--reuse-values` flag may only be safely used if the Cilium chart version remains unchanged, for example when `helm upgrade` is used to apply configuration changes without upgrading Cilium.
76 | {{% /alert %}}
77 |
78 |
79 | ## {{% task %}} Explore your installation after the upgrade
80 |
81 | We can run:
82 |
83 | ```bash
84 | cilium status --wait
85 | ```
86 |
87 | again to verify the upgrade to the new version succeded
88 |
89 | ```
90 | /¯¯\
91 | /¯¯\__/¯¯\ Cilium: OK
92 | \__/¯¯\__/ Operator: OK
93 | /¯¯\__/¯¯\ Hubble: disabled
94 | \__/¯¯\__/ ClusterMesh: disabled
95 | \__/
96 |
97 | DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
98 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
99 | Containers: cilium-operator Running: 1
100 | cilium Running: 1
101 | Cluster Pods: 1/1 managed by Cilium
102 | Image versions cilium quay.io/cilium/cilium:v{{% param "ciliumVersion.postUpgrade" %}}:: 1
103 | cilium-operator quay.io/cilium/operator-generic:v{{% param "ciliumVersion.postUpgrade" %}}@: 1
104 | ```
105 |
106 | And we see the right version in the `cilium` and `cilium-operator` images.
107 |
108 |
109 | ### Nice to know
110 |
111 | In Cilium release 1.11.0 automatic mount of eBPF maps in the host filesystem were enabled. These eBPF maps are basically very efficient key-value stores used by Cilium. Having them mounted in the filesystem, allows the datapath to continue operating even if the `cilium-agent` is restarting. We can verify that Cilium created global traffic control eBPF maps on the node in /sys/fs/bpf/tc/globals/:
112 |
113 | ```bash
114 | docker exec cluster1 ls /sys/fs/bpf/tc/globals/
115 | ```
116 |
117 |
118 | ## Rolling Back
119 |
120 | Occasionally, it may be necessary to undo the rollout because a step was missed or something went wrong during the upgrade. To undo the rollout run:
121 |
122 | ```
123 | helm history cilium --namespace=kube-system
124 | # helm rollback cilium [REVISION] --namespace=kube-system
125 | ```
126 |
127 | This will revert the latest changes to the Cilium DaemonSet and return Cilium to the state it was in prior to the upgrade.
128 |
--------------------------------------------------------------------------------
/content/en/docs/03/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Hubble UI"
3 | weight: 32
4 | ---
5 |
6 | Not only does Hubble allow us to inspect flows from the command line, but it also allows us to see them in real-time on a graphical service map via Hubble UI. Again, this also is an optional component that is disabled by default.
7 |
8 |
9 | ## {{% task %}} Enable the Hubble UI component
10 |
11 | Enabling the optional Hubble UI component with Helm looks like this:
12 |
13 | ```bash
14 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
15 | --namespace kube-system \
16 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.1.0.0/16} \
17 | --set cluster.name=cluster1 \
18 | --set cluster.id=1 \
19 | --set operator.replicas=1 \
20 | --set upgradeCompatibility=1.11 \
21 | --set kubeProxyReplacement=disabled \
22 | --set hubble.enabled=true \
23 | --set hubble.relay.enabled=true \
24 | `# enable hubble ui variable:` \
25 | --set hubble.ui.enabled=true \
26 | --wait
27 | ```
28 |
29 | {{% alert title="Note" color="primary" %}}
30 | When using the `cilium` CLI, you can execute the following command to enable the Hubble UI:
31 |
32 | ```
33 | # cilium hubble enable --ui
34 | ```
35 | {{% /alert %}}
36 |
37 | Take a look at the pods again to see what happened under the hood:
38 |
39 | ```bash
40 | kubectl get pods -A
41 | ```
42 |
43 | We see, there is again a new Pod running for the `hubble-ui` component.
44 |
45 | ```
46 | NAMESPACE NAME READY STATUS RESTARTS AGE
47 | default backend-6f884b6495-v7bvt 1/1 Running 0 94m
48 | default frontend-77d99ffc5d-lcsph 1/1 Running 0 94m
49 | default not-frontend-7db9747986-snjwp 1/1 Running 0 94m
50 | kube-system cilium-ksr7h 1/1 Running 0 102m
51 | kube-system cilium-operator-6f5c6f768d-r2qgn 1/1 Running 0 102m
52 | kube-system coredns-6d4b75cb6d-nf8wz 1/1 Running 0 115m
53 | kube-system etcd-cluster1 1/1 Running 0 115m
54 | kube-system hubble-relay-84b4ddb556-nr7c8 1/1 Running 0 93m
55 | kube-system hubble-ui-579fdfbc58-578g9 2/2 Running 0 19s
56 | kube-system kube-apiserver-cluster1 1/1 Running 0 115m
57 | kube-system kube-controller-manager-cluster1 1/1 Running 0 115m
58 | kube-system kube-proxy-7l6qk 1/1 Running 0 115m
59 | kube-system kube-scheduler-cluster1 1/1 Running 0 115m
60 | kube-system storage-provisioner 1/1 Running 1 (115m ago) 115m
61 | ```
62 |
63 | Cilium agents are restarting, and a new Hubble UI Pod is now present on top of the Hubble Relay pod. As above, we can wait for Cilium and Hubble to be ready by running:
64 |
65 | ```bash
66 | cilium status --wait
67 | ```
68 |
69 | ```
70 | cilium status --wait
71 | /¯¯\
72 | /¯¯\__/¯¯\ Cilium: OK
73 | \__/¯¯\__/ Operator: OK
74 | /¯¯\__/¯¯\ Hubble: OK
75 | \__/¯¯\__/ ClusterMesh: disabled
76 | \__/
77 |
78 | Deployment hubble-relay Desired: 1, Ready: 1/1, Available: 1/1
79 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
80 | Deployment hubble-ui Desired: 1, Ready: 1/1, Available: 1/1
81 | DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
82 | Containers: cilium Running: 1
83 | hubble-ui Running: 1
84 | hubble-relay Running: 1
85 | cilium-operator Running: 1
86 | Cluster Pods: 6/6 managed by Cilium
87 | Image versions cilium quay.io/cilium/cilium:v1.12.1:: 1
88 | hubble-ui quay.io/cilium/hubble-ui:v0.9.1: 1
89 | hubble-ui quay.io/cilium/hubble-ui-backend:v0.9.1: 1
90 | hubble-relay quay.io/cilium/hubble-relay:v1.12.1: 1
91 | cilium-operator quay.io/cilium/operator-generic:v1.12.1: 1
92 | ```
93 |
94 |
95 | And then check Hubble status:
96 |
97 | ```bash
98 | hubble status
99 | ```
100 |
101 | {{% alert title="Note" color="primary" %}}
102 | Our earlier command kubectl port-forward should still be running (can be checked by running jobs or `ps aux | grep "port-forward"`). If it does not, Hubble status will fail and we have to run it again:
103 |
104 | ```bash
105 | cilium hubble port-forward&
106 | hubble status
107 | ```
108 |
109 | {{% /alert %}}
110 |
111 |
112 | To start Hubble UI execute
113 |
114 | ```bash
115 | kubectl port-forward -n kube-system --address ::,0.0.0.0 svc/hubble-ui 12000:80 &
116 | ```
117 |
118 | In our Webshell environment you can use the public IP of the VM to access Hubble. A simple way is to execute
119 |
120 | ```bash
121 | echo "http://$(curl -s ifconfig.me):12000"
122 | ```
123 | and copy the output in a new browser tab. If you are working locally, the browser should open http://localhost:12000/ (open it manually if not).
124 |
125 | We can then access the graphical service map by selecting our `default` Namespace:
126 |
127 | 
128 |
129 | If you see a spinning circle and the message "Waiting for service map data..." you can generate some network activity again:
130 |
131 | ```bash
132 | for i in {1..10}; do
133 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
134 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
135 | done
136 | ```
137 |
138 | and then you should see a service map in the Hubble UI
139 |
140 | 
141 |
142 | and also a table with the already familiar flow output previously seen in the `hubble observe` command:
143 |
144 | 
145 |
146 | Hubble flows are displayed in real-time at the bottom, with a visualization of the namespace objects in the center. Click on any flow, and click on any property from the right-side panel: notice that the filters at the top of the UI have been updated accordingly.
147 |
148 | Let's run a connectivity test again and see what happens in Hubble UI in the `cilium-test` namespace. In the Hubble UI dropdown change to `cilium-test`. Since this test runs for a few minutes this could be a good time to grab a :coffee:.
149 |
150 | ```bash
151 | cilium connectivity test --test 'client-egress-to-echo-service-account' --test to-entities-world --test to-fqdns
152 | ```
153 |
154 | We can see that Hubble UI is not only capable of displaying flows within a Namespace, it also helps visualize flows going in or out of it.
155 |
156 | 
157 |
158 | And there are also several visual options in the Hubble UI:
159 |
160 | 
161 |
162 | Once done, clean up the connectivity test Namespace again:
163 |
164 | ```bash
165 | kubectl delete ns cilium-test --wait=false
166 | ```
167 |
--------------------------------------------------------------------------------
/content/en/docs/08/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Transparent Encryption"
3 | weight: 8
4 | OnlyWhenNot: techlab
5 | ---
6 | ## Host traffic/endpoint traffic encryption
7 |
8 | To secure communication inside a Kubernetes cluster Cilium supports transparent encryption of traffic between Cilium-managed endpoints either using IPsec or [WireGuard®](https://www.wireguard.com/).
9 |
10 |
11 | ## {{% task %}} Increase cluster size
12 |
13 | By default Minikube creates single-node clusters. Add a second node to the cluster:
14 |
15 | ```bash
16 | minikube -p cluster1 node add
17 | ```
18 |
19 |
20 | ## {{% task %}} Move frontend app to a different node
21 |
22 | To see traffic between nodes, we move the frontend pod from Chapter 3 to the newly created node:
23 |
24 | Create a file `patch.yaml` with the follwing content_
25 |
26 | {{< readfile file="/content/en/docs/08/patch.yaml" code="true" lang="yaml" >}}
27 |
28 | You can patch the frontend deployment now:
29 |
30 | ```bash
31 | kubectl patch deployments.apps frontend --type merge --patch-file patch.yaml
32 | ```
33 | We should see the frontend now running on the new node `cluster1-m02`:
34 |
35 | ```bash
36 | kubectl get pods -o wide
37 | ```
38 |
39 | ```
40 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
41 | backend-65f7c794cc-hh6pw 1/1 Running 0 22m 10.1.0.39 cluster1
42 | deathstar-6c94dcc57b-6chpk 1/1 Running 1 (10m ago) 11m 10.1.0.207 cluster1
43 | deathstar-6c94dcc57b-vtt8b 1/1 Running 0 11m 10.1.0.220 cluster1
44 | frontend-6db4b77ff6-kznfl 1/1 Running 0 35s 10.1.1.7 cluster1-m02
45 | not-frontend-8f467ccbd-4jl6z 1/1 Running 0 22m 10.1.0.115 cluster1
46 | tiefighter 1/1 Running 0 11m 10.1.0.185 cluster1
47 | xwing 1/1 Running 0 11m 10.1.0.205 cluster1
48 |
49 | ```
50 |
51 |
52 | ## {{% task %}} Sniff traffic between nodes
53 |
54 | To check if we see unencrypted traffic between nodes we will use tcpdump.
55 | Let us filter on the host interfce for all packets containing the string `password`:
56 |
57 | ```bash
58 | CILIUM_AGENT=$(kubectl get pod -n kube-system -l k8s-app=cilium -o jsonpath="{.items[0].metadata.name}")
59 | kubectl debug -n kube-system -i ${CILIUM_AGENT} --image=nicolaka/netshoot -- tcpdump -ni eth0 -vv | grep password
60 | ```
61 |
62 | In a second terminal we will call our backend service with a password. For those using the Webshell a second Terminal can be opened using the menu `Terminal` then `Split Terminal`, also don't forget to ssh into the VM again. Now in this second terminal run:
63 |
64 | ```bash
65 | FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
66 | for i in {1..10}; do
67 | kubectl exec -ti ${FRONTEND} -- curl -Is backend:8080?password=secret
68 | done
69 | ```
70 |
71 | You should now see our string `password` sniffed in the network traffic. Hit `Ctrl+C` to stop sniffing but keep the second terminal open.
72 |
73 |
74 | ## {{% task %}} Enable node traffic encryption with WireGuard
75 |
76 | Enabling WireGuard based encryption with Helm is simple:
77 |
78 | ```bash
79 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
80 | --namespace kube-system \
81 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.1.0.0/16} \
82 | --set cluster.name=cluster1 \
83 | --set cluster.id=1 \
84 | --set operator.replicas=1 \
85 | --set upgradeCompatibility=1.11 \
86 | --set kubeProxyReplacement=disabled \
87 | --set hubble.enabled=true \
88 | --set hubble.relay.enabled=true \
89 | --set hubble.ui.enabled=true \
90 | --set prometheus.enabled=true \
91 | --set operator.prometheus.enabled=true \
92 | --set hubble.enabled=true \
93 | --set hubble.metrics.enabled="{dns,drop:destinationContext=pod;sourceContext=pod,tcp,flow,port-distribution,icmp,http:destinationContext=pod}" \
94 | `# enable wireguard:` \
95 | --set l7Proxy=false \
96 | --set encryption.enabled=true \
97 | --set encryption.type=wireguard \
98 | --set encryption.wireguard.userspaceFallback=true \
99 | --wait
100 | ```
101 |
102 | Afterwards restart the Cilium DaemonSet:
103 |
104 |
105 | ```bash
106 | kubectl -n kube-system rollout restart ds cilium
107 | ```
108 |
109 | Currently, L7 policy enforcement and visibility is [not supported](https://github.com/cilium/cilium/issues/15462) with WireGuard, this is why we have to disable it.
110 |
111 |
112 | ## {{% task %}} Verify encryption is working
113 |
114 |
115 | Verify the number of peers in encryption is 1 (this can take a while, the number is sum of nodes - 1)
116 | ```bash
117 | kubectl -n kube-system exec -ti ds/cilium -- cilium status | grep Encryption
118 | ```
119 |
120 | You should see something similar to this (in this example we have a two-node cluster):
121 |
122 | ```
123 | Encryption: Wireguard [cilium_wg0 (Pubkey: XbTJd5Gnp7F8cG2Ymj6q11dBx8OtP1J5ZOAhswPiYAc=, Port: 51871, Peers: 1)]
124 | ```
125 |
126 | We now check if the traffic is really encrypted, we start sniffing again:
127 |
128 | ```bash
129 | CILIUM_AGENT=$(kubectl get pod -n kube-system -l k8s-app=cilium -o jsonpath="{.items[0].metadata.name}")
130 | kubectl debug -n kube-system -i ${CILIUM_AGENT} --image=nicolaka/netshoot -- tcpdump -ni eth0 -vv | grep password
131 | ```
132 |
133 | Now in the other terminal generate traffic:
134 |
135 | ```bash
136 | FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
137 | for i in {1..10}; do
138 | kubectl exec -ti ${FRONTEND} -- curl -Is backend:8080?password=secret
139 | done
140 | ```
141 | As you should see the traffic is encrypted now and we can't find our string anymore in plaintext on eth0. To sniff the traffic before it is encrypted replace the interface `eth0` with the WireGuard interface `cilium_wg0`.
142 |
143 | Hit `Ctrl+C` to stop sniffing. You can close the second terminal with `exit`.
144 |
145 |
146 | ## {{% task %}} CleanUp
147 |
148 | To not mess up the next ClusterMesh Lab we are going to disable WireGuard encryption again:
149 |
150 | ```bash
151 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}}\
152 | --namespace kube-system \
153 | --reuse-values \
154 | --set l7Proxy=true \
155 | --set encryption.enabled=false \
156 | --wait
157 | ```
158 |
159 | and then restart the Cilium Daemonset:
160 |
161 | ```bash
162 | kubectl -n kube-system rollout restart ds cilium
163 | ```
164 |
165 | Verify that it is disabled again:
166 |
167 | ```bash
168 | kubectl -n kube-system exec -ti ds/cilium -- cilium status | grep Encryption
169 | ```
170 |
171 | ```
172 | Encryption: Disabled
173 | ```
174 |
175 |
176 | remove the second node and move backend back to first node
177 |
178 | ```bash
179 | kubectl delete -f simple-app.yaml
180 | minikube node delete cluster1-m02 --profile cluster1
181 | kubectl apply -f simple-app.yaml
182 |
183 | ```
184 |
185 |
--------------------------------------------------------------------------------
/content/en/docs/10/02/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Kubernetes without kube-proxy"
3 | weight: 102
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 | In this lab, we are going to provision a new Kubernetes cluster without `kube-proxy` to use Cilium as a full replacement for it.
8 |
9 |
10 | ## {{% task %}} Deploy a new Kubernetes Cluster without `kube-proxy`
11 |
12 |
13 | Create a new Kubernetes cluster using `minikube`. As `minikube` uses `kubeadm` we can skip the phase where `kubeadm` installs the `kube-proxy` addon. Execute the following command to create a third cluster:
14 |
15 | ```bash
16 | minikube start --network-plugin=cni --cni=false --kubernetes-version={{% param "kubernetesVersion" %}} --extra-config=kubeadm.skip-phases=addon/kube-proxy -p kubeless
17 | ```
18 |
19 | ```
20 | 😄 [cluster3] minikube v{{% param "kubernetesVersion" %}} on Ubuntu 20.04
21 | ✨ Automatically selected the docker driver. Other choices: virtualbox, ssh
22 | ❗ With --network-plugin=cni, you will need to provide your own CNI. See --cni flag as a user-friendly alternative
23 | 👍 Starting control plane node cluster3 in cluster cluster3
24 | 🚜 Pulling base image ...
25 | 🔥 Creating docker container (CPUs=2, Memory=8000MB) ...
26 | 🐳 Preparing Kubernetes v{{% param "kubernetesVersion" %}} on Docker 20.10.8 ...
27 | ▪ kubeadm.skip-phases=addon/kube-proxy
28 | ▪ Generating certificates and keys ...
29 | ▪ Booting up control plane ...
30 | ▪ Configuring RBAC rules ...
31 | 🔎 Verifying Kubernetes components...
32 | ▪ Using image gcr.io/k8s-minikube/storage-provisioner:v5
33 | 🌟 Enabled addons: storage-provisioner, default-storageclass
34 | 🏄 Done! kubectl is now configured to use "cluster3" cluster and "default" namespace by default
35 | ```
36 |
37 |
38 | ## {{% task %}} Deploy Cilium and enable the Kube Proxy replacement
39 |
40 | As the `cilium` and `cilium-operator` Pods by default try to communicate with the Kubernetes API using the default `kubernetes` service IP, they cannot do this with disabled `kube-proxy`. We, therefore, need to set the `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment variables to tell the two Pods how to connect to the Kubernetes API.
41 |
42 | To find the correct IP address execute the following command:
43 |
44 | ```bash
45 | API_SERVER_IP=$(kubectl config view -o jsonpath='{.clusters[?(@.name == "kubeless")].cluster.server}' | cut -f 3 -d / | cut -f1 -d:)
46 | API_SERVER_PORT=$(kubectl config view -o jsonpath='{.clusters[?(@.name == "kubeless")].cluster.server}' | cut -f 3 -d / | cut -f2 -d:)
47 | echo "$API_SERVER_IP:$API_SERVER_PORT"
48 | ```
49 |
50 | Use the shown IP address and port in the next Helm command to install Cilium:
51 |
52 | ```bash
53 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
54 | --namespace kube-system \
55 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.3.0.0/16} \
56 | --set cluster.name=kubeless \
57 | --set cluster.id=3 \
58 | --set operator.replicas=1 \
59 | --set kubeProxyReplacement=strict \
60 | --set k8sServiceHost=$API_SERVER_IP \
61 | --set k8sServicePort=$API_SERVER_PORT \
62 | --wait
63 | ```
64 |
65 | {{% alert title="Note" color="primary" %}}
66 | Having a cluster running with kubeProxyReplacement set to partial breaks other minikube clusters running on the same host. If you still want to play around with `cluster1` after this chapter, you need to reboot your maching and start only cluster1 with `minikube start --profile cluster1`
67 | {{% /alert %}}
68 |
69 | We can now compare the running Pods on `cluster1` and `kubeless` in the `kube-system` Namespace.
70 |
71 | ```bash
72 | kubectl --context cluster1 -n kube-system get pod
73 | ```
74 |
75 | Here we see the running `kube-proxy` pod:
76 |
77 | ```
78 | NAME READY STATUS RESTARTS AGE
79 | cilium-operator-cb65bcb9b-cnxnq 1/1 Running 0 19m
80 | cilium-tq9kk 1/1 Running 0 8m42s
81 | clustermesh-apiserver-67fd99fd9b-x2svr 2/2 Running 0 61m
82 | coredns-6d4b75cb6d-fd6vk 1/1 Running 1 (82m ago) 97m
83 | etcd-cluster1 1/1 Running 1 (82m ago) 98m
84 | hubble-relay-84b4ddb556-nvftg 1/1 Running 0 19m
85 | hubble-ui-579fdfbc58-t6xst 2/2 Running 0 19m
86 | kube-apiserver-cluster1 1/1 Running 1 (81m ago) 98m
87 | kube-controller-manager-cluster1 1/1 Running 1 (82m ago) 98m
88 | kube-proxy-5j84l 1/1 Running 1 (82m ago) 97m
89 | kube-scheduler-cluster1 1/1 Running 1 (81m ago) 98m
90 | storage-provisioner 1/1 Running 2 (82m ago) 98m
91 | ```
92 |
93 | On `kubeless` there is no `kube-proxy` Pod anymore:
94 |
95 | ```bash
96 | kubectl --context kubeless -n kube-system get pod
97 | ```
98 |
99 | ```
100 | NAME READY STATUS RESTARTS AGE
101 | cilium-operator-68bfb94678-785dk 1/1 Running 0 17m
102 | cilium-vrqms 1/1 Running 0 17m
103 | coredns-64897985d-fk5lj 1/1 Running 0 59m
104 | etcd-cluster3 1/1 Running 0 59m
105 | kube-apiserver-cluster3 1/1 Running 0 59m
106 | kube-controller-manager-cluster3 1/1 Running 0 59m
107 | kube-scheduler-cluster3 1/1 Running 0 59m
108 | storage-provisioner 1/1 Running 13 (17m ago) 59m
109 | ```
110 |
111 |
112 | ## {{% task %}} Deploy our simple app again to the new cluster
113 |
114 | As this is a new cluster we want to deploy our `simple-app.yaml` from lab 03 again to run some experiments. Run the following command using the `simple-app.yaml` from lab 03:
115 |
116 | ```bash
117 | kubectl apply -f simple-app.yaml
118 | ```
119 |
120 | Now let us redo the task from lab 03.
121 |
122 | Let's make life again a bit easier by storing the Pod's name into an environment variable so we can reuse it later again:
123 |
124 | ```bash
125 | FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
126 | echo ${FRONTEND}
127 | NOT_FRONTEND=$(kubectl get pods -l app=not-frontend -o jsonpath='{.items[0].metadata.name}')
128 | echo ${NOT_FRONTEND}
129 | ```
130 |
131 | Then execute:
132 |
133 | ```bash
134 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
135 | ```
136 |
137 | and
138 |
139 | ```bash
140 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
141 | ```
142 |
143 | You see that altought we have no `kube-proxy` running, the backend service can still be reached.
144 |
145 | ```
146 | HTTP/1.1 200 OK
147 | X-Powered-By: Express
148 | Vary: Origin, Accept-Encoding
149 | Access-Control-Allow-Credentials: true
150 | Accept-Ranges: bytes
151 | Cache-Control: public, max-age=0
152 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
153 | ETag: W/"83d-7438674ba0"
154 | Content-Type: text/html; charset=UTF-8
155 | Content-Length: 2109
156 | Date: Tue, 14 Dec 2021 10:01:16 GMT
157 | Connection: keep-alive
158 |
159 | HTTP/1.1 200 OK
160 | X-Powered-By: Express
161 | Vary: Origin, Accept-Encoding
162 | Access-Control-Allow-Credentials: true
163 | Accept-Ranges: bytes
164 | Cache-Control: public, max-age=0
165 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
166 | ETag: W/"83d-7438674ba0"
167 | Content-Type: text/html; charset=UTF-8
168 | Content-Length: 2109
169 | Date: Tue, 14 Dec 2021 10:01:16 GMT
170 | Connection: keep-alive
171 | ```
172 |
--------------------------------------------------------------------------------
/content/en/docs/05/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Troubleshooting"
3 | weight: 5
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 |
8 | For more details on Troubleshooting, have a look into [Cilium's Troubleshooting Documentation](https://docs.cilium.io/en/stable/operations/troubleshooting/).
9 |
10 |
11 | ## Component & Cluster Health
12 |
13 | An initial overview of Cilium can be retrieved by listing all pods to verify whether all pods have the status `Running`:
14 |
15 | ```bash
16 | kubectl -n kube-system get pods -l k8s-app=cilium
17 | ```
18 | In our single node cluster there is only one cilium pod running:
19 | ```
20 | NAME READY STATUS RESTARTS AGE
21 | cilium-ksr7h 1/1 Running 0 12m16
22 | ```
23 |
24 | If Cilium encounters a problem that it cannot recover from, it will automatically report the failure state via `cilium status` which is regularly queried by the Kubernetes liveness probe to automatically restart Cilium pods. If a Cilium Pod is in state `CrashLoopBackoff` then this indicates a permanent failure scenario.
25 |
26 | If a particular Cilium Pod is not in a running state, the status and health of the agent on that node can be retrieved by running `cilium status` in the context of that pod:
27 |
28 | ```bash
29 | kubectl -n kube-system exec ds/cilium -- cilium status
30 | ```
31 |
32 | The output looks similar to this:
33 |
34 | ```
35 | Defaulted container "cilium-agent" out of: cilium-agent, mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init)
36 | KVStore: Ok Disabled
37 | Kubernetes: Ok 1.24 (v1.24.3) [linux/amd64]
38 | Kubernetes APIs: ["cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "core/v1::Namespace", "core/v1::Node", "core/v1::Pods", "core/v1::Service", "discovery/v1::EndpointSlice", "networking.k8s.io/v1::NetworkPolicy"]
39 | KubeProxyReplacement: Disabled
40 | Host firewall: Disabled
41 | CNI Chaining: none
42 | Cilium: Ok 1.12.5 (v1.12.5-701acde)
43 | NodeMonitor: Listening for events on 8 CPUs with 64x4096 of shared memory
44 | Cilium health daemon: Ok
45 | IPAM: IPv4: 10/254 allocated from 10.1.0.0/24,
46 | ClusterMesh: 0/0 clusters ready, 0 global-services
47 | BandwidthManager: Disabled
48 | Host Routing: Legacy
49 | Masquerading: IPTables [IPv4: Enabled, IPv6: Disabled]
50 | Controller Status: 50/50 healthy
51 | Proxy Status: OK, ip 10.1.0.182, 0 redirects active on ports 10000-20000
52 | Global Identity Range: min 256, max 65535
53 | Hubble: Ok Current/Max Flows: 4095/4095 (100.00%), Flows/s: 8.71 Metrics: Ok
54 | Encryption: Disabled
55 | Cluster health: 1/1 reachable (2023-01-26T08:23:50Z)
56 | ```
57 |
58 | More detailed information about the status of Cilium can be inspected with:
59 |
60 |
61 | ```bash
62 | kubectl -n kube-system exec ds/cilium -- cilium status --verbose
63 | ```
64 |
65 | Verbose output includes detailed IPAM state (allocated addresses), Cilium controller status, and details of the Proxy status.
66 |
67 |
68 | ## Logs
69 |
70 | To retrieve log files of a cilium pod, run:
71 |
72 | ```bash
73 | kubectl -n kube-system logs --timestamps
74 | ```
75 |
76 | The `` can be determined with the following command and by selecting the name of one of the pods:
77 |
78 | ```bash
79 | kubectl -n kube-system get pods -l k8s-app=cilium
80 | ```
81 |
82 | If the Cilium Pod was already restarted due to the liveness problem after encountering an issue, it can be useful to retrieve the logs of the Pod previous to the last restart:
83 |
84 | ```bash
85 | kubectl -n kube-system logs --timestamps -p
86 | ```
87 |
88 |
89 | ## Policy Troubleshooting - Ensure Pod is managed by Cilium
90 |
91 | A potential cause for policy enforcement not functioning as expected is that the networking of the Pod selected by the policy is not being managed by Cilium. The following situations result in unmanaged pods:
92 |
93 | * The Pod is running in host networking and will use the host’s IP address directly. Such pods have full network connectivity but Cilium will not provide security policy enforcement for such pods.
94 | * The Pod was started before Cilium was deployed. Cilium only manages pods that have been deployed after Cilium itself was started. Cilium will not provide security policy enforcement for such pods.
95 |
96 | If Pod networking is not managed by Cilium, ingress and egress policy rules selecting the respective pods will not be applied. See the section Network Policy for more details.
97 |
98 | For a quick assessment of whether any pods are not managed by Cilium, the Cilium CLI will print the number of managed pods. If this prints that all of the pods are managed by Cilium, then there is no problem:
99 |
100 | ```bash
101 | cilium status
102 | ```
103 |
104 | ```
105 | /¯¯\
106 | /¯¯\__/¯¯\ Cilium: OK
107 | \__/¯¯\__/ Operator: OK
108 | /¯¯\__/¯¯\ Hubble: OK
109 | \__/¯¯\__/ ClusterMesh: disabled
110 | \__/
111 |
112 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2
113 | Deployment hubble-relay Desired: 1, Ready: 1/1, Available: 1/1
114 | Deployment hubble-ui Desired: 1, Ready: 1/1, Available: 1/1
115 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2
116 | Containers: cilium-operator Running: 2
117 | hubble-relay Running: 1
118 | hubble-ui Running: 1
119 | cilium Running: 2
120 | Cluster Pods: 5/5 managed by Cilium
121 | ```
122 |
123 | You can run the following script to list the pods which are not managed by Cilium:
124 |
125 | ```bash
126 | curl -sLO https://raw.githubusercontent.com/cilium/cilium/master/contrib/k8s/k8s-unmanaged.sh
127 | chmod +x k8s-unmanaged.sh
128 | ./k8s-unmanaged.sh
129 | ```
130 |
131 | {{% alert title="Note" color="primary" %}}
132 | It's ok if you don't see any Pods listed with the above command. We don't have any unmanaged Pods in our setup.
133 | {{% /alert %}}
134 |
135 |
136 | ## Reporting a problem - Automatic log & state collection
137 |
138 | Before you report a problem, make sure to retrieve the necessary information from your cluster before the failure state is lost.
139 |
140 | Execute the `cilium sysdump` command to collect troubleshooting information from your Kubernetes cluster:
141 |
142 | ```bash
143 | cilium sysdump
144 | ```
145 |
146 | Note that by default `cilium sysdump` will attempt to collect as many logs as possible for all the nodes in the cluster. If your cluster size is above 20 nodes, consider setting the following options to limit the size of the sysdump. This is not required, but is useful for those who have a constraint on bandwidth or upload size.
147 |
148 | * set the `--node-list` option to pick only a few nodes in case the cluster has many of them.
149 | * set the `--logs-since-time` option to go back in time to when the issues started.
150 | * set the `--logs-limit-bytes` option to limit the size of the log files (note: passed onto kubectl logs; does not apply to entire collection archive).
151 | Ideally, a sysdump that has a full history of select nodes, rather than a brief history of all the nodes, would be preferred (by using `--node-list`). The second recommended way would be to use `--logs-since-time` if you are able to narrow down when the issues started. Lastly, if the Cilium agent and Operator logs are too large, consider `--logs-limit-bytes`.
152 |
153 | Use `--help` to see more options:
154 |
155 | ```bash
156 | cilium sysdump --help
157 | ```
158 |
--------------------------------------------------------------------------------
/content/en/docs/10/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Host Firewall"
3 | weight: 101
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 |
8 | Cilium is capable to act as a host firewall to enforce security policies for Kubernetes nodes. In this lab, we are going to show you briefly how this works.
9 |
10 |
11 | ## {{% task %}} Enable the Host Firewall in Cilium
12 |
13 | We need to enable the host firewall in the Cilium config. This can be done using Helm:
14 |
15 |
16 | ```bash
17 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
18 | --namespace kube-system \
19 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.1.0.0/16} \
20 | --set cluster.name=cluster1 \
21 | --set cluster.id=1 \
22 | --set operator.replicas=1 \
23 | --set upgradeCompatibility=1.11 \
24 | --set kubeProxyReplacement=disabled \
25 | --set hubble.enabled=true \
26 | --set hubble.relay.enabled=true \
27 | --set hubble.ui.enabled=true \
28 | --set prometheus.enabled=true \
29 | --set operator.prometheus.enabled=true \
30 | --set hubble.enabled=true \
31 | --set hubble.metrics.enabled="{dns,drop:destinationContext=pod;sourceContext=pod,tcp,flow,port-distribution,icmp,http:destinationContext=pod}" \
32 | `# enable host firewall:` \
33 | --set hostFirewall.enabled=true \
34 | --set devices='{eth0}' \
35 | --wait
36 | ```
37 |
38 | The devices flag refers to the network devices Cilium is configured on such as `eth0`. Omitting this option leads Cilium to auto-detect what interfaces the host firewall applies to.
39 |
40 | Make sure to restart the `cilium` Pods with:
41 |
42 | ```bash
43 | kubectl -n kube-system rollout restart ds/cilium
44 | ```
45 |
46 | At this point, the Cilium-managed nodes are ready to enforce Network Policies.
47 |
48 |
49 | ## {{% task %}} Attach a Label to the Node
50 |
51 | In this lab, we will apply host policies only to nodes with the label `node-access=ssh`. We thus first need to attach that label to a node in the cluster.
52 |
53 | ```bash
54 | kubectl label node cluster1 node-access=ssh
55 | ```
56 |
57 |
58 | ## {{% task %}} Enable Policy Audit Mode for the Host Endpoint
59 |
60 | [Host Policies](https://docs.cilium.io/en/latest/policy/language/#hostpolicies) enforce access control over connectivity to and from nodes. Particular care must be taken to ensure that when host policies are imported, Cilium does not block access to the nodes or break the cluster’s normal behavior (for example by blocking communication with kube-apiserver).
61 |
62 | To avoid such issues, we can switch the host firewall in audit mode, to validate the impact of host policies before enforcing them. When Policy Audit Mode is enabled, no network policy is enforced so this setting is not recommended for production deployment.
63 |
64 | ```bash
65 | CILIUM_POD_NAME=$(kubectl -n kube-system get pods -l "k8s-app=cilium" -o jsonpath="{.items[?(@.spec.nodeName=='cluster1')].metadata.name}")
66 | HOST_EP_ID=$(kubectl -n kube-system exec $CILIUM_POD_NAME -- cilium endpoint list -o jsonpath='{[?(@.status.identity.id==1)].id}')
67 | kubectl -n kube-system exec $CILIUM_POD_NAME -- cilium endpoint config $HOST_EP_ID PolicyAuditMode=Enabled
68 | ```
69 |
70 | Verification:
71 |
72 | ```bash
73 | kubectl -n kube-system exec $CILIUM_POD_NAME -- cilium endpoint config $HOST_EP_ID | grep PolicyAuditMode
74 | ```
75 |
76 | The output should show you:
77 |
78 | ```
79 | PolicyAuditMode Enabled
80 | ```
81 |
82 |
83 | ## {{% task %}} Apply a Host Network Policy
84 |
85 | Host Policies match on node labels using a Node Selector to identify the nodes to which the policy applies. The following policy applies to all nodes. It allows communications from outside the cluster only on port TCP/22. All communications from the cluster to the hosts are allowed.
86 |
87 | Host policies don’t apply to communications between pods or between pods and the outside of the cluster, except if those pods are host-networking pods.
88 |
89 | Create a file `ccwnp.yaml` with the following content:
90 |
91 | {{< readfile file="/content/en/docs/10/01/ccwnp.yaml" code="true" lang="yaml" >}}
92 |
93 | And then apply this `CiliumClusterwideNetworkPolicy` with:
94 |
95 | ```bash
96 | kubectl apply -f ccwnp.yaml
97 | ```
98 |
99 | The host is represented as a special endpoint, with label `reserved:host`, in the output of the command `cilium endpoint list`. You can therefore inspect the status of the policy using that command:
100 |
101 | ```bash
102 | kubectl -n kube-system exec $(kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[0].metadata.name}') -- cilium endpoint list
103 | ```
104 | You will see that the ingress policy enforcement for the `reserved:host` endpoint is `Disabled` but with `Audit` enabled:
105 |
106 | ```
107 | Defaulted container "cilium-agent" out of: cilium-agent, mount-cgroup (init), clean-cilium-state (init)
108 | ENDPOINT POLICY (ingress) POLICY (egress) IDENTITY LABELS (source:key[=value]) IPv6 IPv4 STATUS
109 | ENFORCEMENT ENFORCEMENT
110 | 671 Disabled (Audit) Disabled 1 k8s:minikube.k8s.io/commit=3e64b11ed75e56e4898ea85f96b2e4af0301f43d ready
111 | k8s:minikube.k8s.io/name=cluster1
112 | k8s:minikube.k8s.io/updated_at=2022_02_14T13_45_35_0700
113 | k8s:minikube.k8s.io/version=v1.25.1
114 | k8s:node-access=ssh
115 | k8s:node-role.kubernetes.io/control-plane
116 | k8s:node-role.kubernetes.io/master
117 | k8s:node.kubernetes.io/exclude-from-external-load-balancers
118 | reserved:host
119 | 810 Disabled Disabled 129160 k8s:io.cilium.k8s.namespace.labels.kubernetes.io/metadata.name=kube-system 10.1.0.249 ready
120 | k8s:io.cilium.k8s.policy.cluster=cluster1
121 | k8s:io.cilium.k8s.policy.serviceaccount=coredns
122 | k8s:io.kubernetes.pod.namespace=kube-system
123 | k8s:k8s-app=kube-dns
124 | 4081 Disabled Disabled 4 reserved:health
125 | ```
126 |
127 |
128 | As long as the host endpoint is running in audit mode, communications disallowed by the policy won’t be dropped. They will however be reported by `cilium monitor` as `action audit`. The audit mode thus allows you to adjust the host policy to your environment, to avoid unexpected connection breakages.
129 |
130 | You can montitor the policy verdicts with:
131 |
132 | ```bash
133 | kubectl -n kube-system exec $(kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[0].metadata.name}') -- cilium monitor -t policy-verdict --related-to $HOST_EP_ID
134 | ```
135 |
136 | Open a second terminal to produce some traffic:
137 |
138 | {{% alert title="Note" color="primary" %}}
139 | If you are working in our Webshell environment, make sure to first login again to your VM after opening the second terminal.
140 | {{% /alert %}}
141 |
142 | ```bash
143 | curl -k https://192.168.49.2:8443
144 | ```
145 |
146 | Also try to start an SSH session (you can cancel the command when the password promt is shown):
147 |
148 | ```bash
149 | ssh 192.168.49.2
150 | ```
151 |
152 | In the verdict log you should see an output similar to the following one. For the `curl` request you see that the action is set to `audit`:
153 |
154 | ```
155 | Policy verdict log: flow 0xfd71ed86 local EP ID 671, remote ID world, proto 6, ingress, action audit, match none, 192.168.49.1:50760 -> 192.168.49.2:8443 tcp SYN
156 | Policy verdict log: flow 0xfd71ed86 local EP ID 671, remote ID world, proto 6, ingress, action audit, match none, 192.168.49.1:50760 -> 192.168.49.2:8443 tcp SYN
157 | ```
158 |
159 | The request to the SSH port has action `allow`:
160 |
161 | ```
162 | Policy verdict log: flow 0x6b5b1b60 local EP ID 671, remote ID world, proto 6, ingress, action allow, match L4-Only, 192.168.49.1:48254 -> 192.168.49.2:22 tcp SYN
163 | Policy verdict log: flow 0x6b5b1b60 local EP ID 671, remote ID world, proto 6, ingress, action allow, match L4-Only, 192.168.49.1:48254 -> 192.168.49.2:22 tcp SYN
164 | ```
165 |
166 |
167 | ## {{% task %}} Clean Up
168 |
169 | Once you are confident all required communication to the host from outside the cluster is allowed, you can disable policy audit mode to enforce the host policy.
170 |
171 | {{% alert title="Note" color="primary" %}}
172 | When enforcing the host policy, make sure that none of the communications required to access the cluster or for the cluster to work properly are denied. They should appear as `action allow`.
173 | {{% /alert %}}
174 |
175 | We are not going to do this extended task (as it would require some more rules for the cluster to continue working). But the command to disable the audit mode looks like this:
176 |
177 | ```
178 | # kubectl -n kube-system exec $CILIUM_POD_NAME -- cilium endpoint config $HOST_EP_ID PolicyAuditMode=Disabled
179 | ```
180 |
181 | Simply cleanup and continue:
182 |
183 | ```bash
184 | kubectl delete ccnp demo-host-policy
185 | kubectl label node cluster1 node-access-
186 | ```
187 |
--------------------------------------------------------------------------------
/content/en/docs/09/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Enable Cluster Mesh"
3 | weight: 91
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 |
8 | ## {{% task %}} Create a second Kubernetes Cluster
9 |
10 | To create a Cluster Mesh, we need a second Kubernetes cluster. For the Cluster Mesh to work, the PodCIDR ranges in all clusters and nodes must be non-conflicting and have unique IP addresses. The nodes in all clusters must have IP connectivity between each other and the network between the clusters must allow inter-cluster communication.
11 |
12 | {{% alert title="Note" color="primary" %}}
13 | The exact ports are documented in the [Firewall Rules](https://docs.cilium.io/en/v1.12/operations/system_requirements/#firewall-requirements) section.
14 | {{% /alert %}}
15 |
16 | To start a second cluster run the following command:
17 |
18 | ```bash
19 | minikube start --network-plugin=cni --cni=false --kubernetes-version={{% param "kubernetesVersion" %}} -p cluster2
20 | ```
21 |
22 | As Minikube with the Docker driver uses separated Docker networks, we need to make sure that your system forwards traffic between the two networks. To enable forwarding by default execute:
23 |
24 | ```bash
25 | sudo iptables -I DOCKER-USER -j ACCEPT
26 | ```
27 |
28 | Then install Cilium using Helm. Remember, we need a different PodCIDR for the second cluster, therefore while installing Cilium, we have to change this config:
29 |
30 | ```bash
31 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
32 | --namespace kube-system \
33 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.2.0.0/16} \
34 | --set cluster.name=cluster2 \
35 | --set cluster.id=2 \
36 | --set operator.replicas=1 \
37 | --set kubeProxyReplacement=disabled \
38 | --wait
39 | ```
40 |
41 | Then wait until the cluster and Cilium is ready.
42 |
43 | ```bash
44 | cilium status --wait
45 | ```
46 |
47 | ```
48 | /¯¯\
49 | /¯¯\__/¯¯\ Cilium: OK
50 | \__/¯¯\__/ Operator: OK
51 | /¯¯\__/¯¯\ Hubble: disabled
52 | \__/¯¯\__/ ClusterMesh: disabled
53 | \__/
54 |
55 | DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
56 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
57 | Containers: cilium-operator Running: 1
58 | cilium Running: 1
59 | Cluster Pods: 1/1 managed by Cilium
60 | Image versions cilium quay.io/cilium/cilium:v{{% param "ciliumVersion.postUpgrade" %}}: 1
61 | cilium-operator quay.io/cilium/operator-generic:v{{% param "ciliumVersion.postUpgrade" %}}: 1
62 | ```
63 |
64 | You can verify the correct PodCIDR using:
65 |
66 | ```bash
67 | kubectl get pod -A -o wide
68 | ```
69 |
70 | Have a look at the `coredns-` Pod and verify that it's IP is from your defined `10.2.0.0/16` range.
71 |
72 | ```
73 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
74 | kube-system cilium-operator-776958f5bb-m5hww 1/1 Running 0 29s 192.168.58.2 cluster2
75 | kube-system cilium-qg9xj 1/1 Running 0 29s 192.168.58.2 cluster2
76 | kube-system coredns-558bd4d5db-z6cxh 1/1 Running 0 38s 10.2.0.240 cluster2
77 | kube-system etcd-cluster2 1/1 Running 0 44s 192.168.58.2 cluster2
78 | kube-system kube-apiserver-cluster2 1/1 Running 0 44s 192.168.58.2 cluster2
79 | kube-system kube-controller-manager-cluster2 1/1 Running 0 44s 192.168.58.2 cluster2
80 | kube-system kube-proxy-bqk4r 1/1 Running 0 38s 192.168.58.2 cluster2
81 | kube-system kube-scheduler-cluster2 1/1 Running 0 44s 192.168.58.2 cluster2
82 | kube-system storage-provisioner 1/1 Running 1 49s 192.168.58.2 cluster2
83 | ```
84 |
85 | The second cluster and Cilium is ready to use.
86 |
87 |
88 | ## {{% task %}} Enable Cluster Mesh on both Cluster
89 |
90 | Now let us enable the Cluster Mesh using the `cilium` CLI on both clusters:
91 |
92 |
93 | {{% alert title="Note" color="primary" %}}
94 | Although so far we used Helm to install and update Cilium, enabling Cilium Service Mesh using Helm is currently [undocumented](https://github.com/cilium/cilium/issues/19057). We make an exception from the rule to never mix Helm and CLI installations and do it with the CLI.
95 | {{% /alert %}}
96 |
97 | ```bash
98 | cilium clustermesh enable --context cluster1 --service-type NodePort
99 | cilium clustermesh enable --context cluster2 --service-type NodePort
100 | ```
101 |
102 | You can now verify the Cluster Mesh status using:
103 |
104 | ```bash
105 | cilium clustermesh status --context cluster1 --wait
106 | ```
107 |
108 | ```
109 | ⚠️ Service type NodePort detected! Service may fail when nodes are removed from the cluster!
110 | ✅ Cluster access information is available:
111 | - 192.168.49.2:31839
112 | ✅ Service "clustermesh-apiserver" of type "NodePort" found
113 | ⌛ [cluster1] Waiting for deployment clustermesh-apiserver to become ready...
114 | 🔌 Cluster Connections:
115 | 🔀 Global services: [ min:0 / avg:0.0 / max:0 ]
116 | ```
117 |
118 | To connect the two clusters, the following step needs to be done in one direction only. The connection will automatically be established in both directions:
119 |
120 | ```bash
121 | cilium clustermesh connect --context cluster1 --destination-context cluster2
122 | ```
123 |
124 | The output should look something like this:
125 |
126 | ```
127 | ✨ Extracting access information of cluster cluster2...
128 | 🔑 Extracting secrets from cluster cluster2...
129 | ⚠️ Service type NodePort detected! Service may fail when nodes are removed from the cluster!
130 | ℹ️ Found ClusterMesh service IPs: [192.168.58.2]
131 | ✨ Extracting access information of cluster cluster1...
132 | 🔑 Extracting secrets from cluster cluster1...
133 | ⚠️ Service type NodePort detected! Service may fail when nodes are removed from the cluster!
134 | ℹ️ Found ClusterMesh service IPs: [192.168.49.2]
135 | ✨ Connecting cluster cluster1 -> cluster2...
136 | 🔑 Secret cilium-clustermesh does not exist yet, creating it...
137 | 🔑 Patching existing secret cilium-clustermesh...
138 | ✨ Patching DaemonSet with IP aliases cilium-clustermesh...
139 | ✨ Connecting cluster cluster2 -> cluster1...
140 | 🔑 Secret cilium-clustermesh does not exist yet, creating it...
141 | 🔑 Patching existing secret ciliugm-clustermesh...
142 | ✨ Patching DaemonSet with IP aliases cilium-clustermesh...
143 | ✅ Connected cluster cluster1 and cluster2!
144 | ```
145 |
146 | It may take a bit for the clusters to be connected. You can execute the following command
147 |
148 | ```bash
149 | cilium clustermesh status --context cluster1 --wait
150 | ```
151 | to wait for the connection to be successful. The output should be:
152 |
153 | ```
154 | ⚠️ Service type NodePort detected! Service may fail when nodes are removed from the cluster!
155 | ✅ Cluster access information is available:
156 | - 192.168.58.2:32117
157 | ✅ Service "clustermesh-apiserver" of type "NodePort" found
158 | ⌛ [cluster2] Waiting for deployment clustermesh-apiserver to become ready...
159 | ✅ All 1 nodes are connected to all clusters [min:1 / avg:1.0 / max:1]
160 | 🔌 Cluster Connections:
161 | - cluster1: 1/1 configured, 1/1 connected
162 | 🔀 Global services: [ min:3 / avg:3.0 / max:3 ]
163 | ```
164 |
165 | The two clusters are now connected.
166 |
167 |
168 | ## {{% task %}} Cluster Mesh Troubleshooting
169 |
170 | Use the following list of steps to troubleshoot issues with Cluster Mesh:
171 |
172 | ```bash
173 | cilium status --context cluster1
174 | ```
175 |
176 | or
177 |
178 | ```bash
179 | cilium status --context cluster2
180 | ```
181 |
182 | which gives you an output similar to this:
183 |
184 | ```
185 | /¯¯\
186 | /¯¯\__/¯¯\ Cilium: OK
187 | \__/¯¯\__/ Operator: OK
188 | /¯¯\__/¯¯\ Hubble: OK
189 | \__/¯¯\__/ ClusterMesh: OK
190 | \__/
191 |
192 | DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
193 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
194 | Deployment hubble-relay Desired: 1, Ready: 1/1, Available: 1/1
195 | Deployment clustermesh-apiserver Desired: 1, Ready: 1/1, Available: 1/1
196 | Containers: cilium Running: 1
197 | cilium-operator Running: 1
198 | hubble-relay Running: 1
199 | clustermesh-apiserver Running: 1
200 | Cluster Pods: 6/6 managed by Cilium
201 | Image versions cilium quay.io/cilium/cilium:v{{% param "ciliumVersion.postUpgrade" %}}: 1
202 | cilium-operator quay.io/cilium/operator-generic:v{{% param "ciliumVersion.postUpgrade" %}}: 1
203 | hubble-relay quay.io/cilium/hubble-relay:v{{% param "ciliumVersion.postUpgrade" %}}: 1
204 | clustermesh-apiserver quay.io/coreos/etcd:v3.4.13: 1
205 | clustermesh-apiserver quay.io/cilium/clustermesh-apiserver:v{{% param "ciliumVersion.postUpgrade" %}}: 1
206 |
207 | ```
208 |
209 |
210 | If you cannot resolve the issue with the above commands, follow the steps in [Cilium's Cluster Mesh Troubleshooting Guide](https://docs.cilium.io/en/v1.12/operations/troubleshooting/#troubleshooting-clustermesh).
211 |
--------------------------------------------------------------------------------
/content/en/docs/06/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Network Policies"
3 | weight: 6
4 | OnlyWhenNot: techlab
5 | ---
6 |
7 | ## Network Policies
8 |
9 | One CNI function is the ability to enforce network policies and implement an in-cluster zero-trust container strategy. Network policies are a default Kubernetes object for controlling network traffic, but a CNI such as Cilium is required to enforce them. We will demonstrate traffic blocking with our simple app.
10 |
11 | {{% alert title="Note" color="primary" %}}
12 | If you are not yet familiar with Kubernetes Network Policies we suggest going to the [Kubernetes Documentation](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
13 | {{% /alert %}}
14 |
15 |
16 | ## {{% task %}} Cilium Endpoints and Identities
17 |
18 | Each Pod from our simple application is represented in Cilium as an [Endpoint](https://docs.cilium.io/en/stable/gettingstarted/terminology/#endpoint). We can use the `cilium` tool inside a Cilium Pod to list them.
19 |
20 | First get all Cilium pods with:
21 |
22 | ```bash
23 | kubectl -n kube-system get pods -l k8s-app=cilium
24 | ```
25 |
26 | ```
27 | NAME READY STATUS RESTARTS AGE
28 | cilium-ksr7h 1/1 Running 0 13m16
29 | ```
30 |
31 | and then run:
32 |
33 | ```bash
34 | kubectl -n kube-system exec -- cilium endpoint list
35 | ```
36 |
37 | {{% alert title="Note" color="primary" %}}
38 | Or we just execute the first Pod of the DaemonSet:
39 |
40 | ```bash
41 | kubectl -n kube-system exec ds/cilium -- cilium endpoint list
42 | ```
43 | {{% /alert %}}
44 |
45 | Cilium will match these endpoints with labels and generate identities as a result. The identity is what is used to enforce basic connectivity between endpoints. We can see this change of identity:
46 |
47 | ```bash
48 | kubectl run test-identity --image=nginx
49 | sleep 5 # just wait for the pod to get ready
50 | kubectl -n kube-system exec daemonset/cilium -- cilium endpoint list | grep -E -B4 -A1 'IDENTITY|run'
51 | kubectl label pod test-identity this=that
52 | sleep 5 # give some time to process
53 | kubectl -n kube-system exec daemonset/cilium -- cilium endpoint list | grep -E -B4 -A1 'IDENTITY|run'
54 | kubectl delete pod test-identity
55 | ```
56 |
57 | We see that the number for this Pod in the column IDENTITY has changed after we added another label. If you run `endpoint list` right after pod-labeling you might also see `waiting-for-identity` as the status of the endpoint.
58 |
59 |
60 | ## {{% task %}} Verify connectivity
61 |
62 | Make sure your `FRONTEND` and `NOT_FRONTEND` environment variable are still set. Otherwise set them again:
63 |
64 | ```bash
65 | FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
66 | echo ${FRONTEND}
67 | NOT_FRONTEND=$(kubectl get pods -l app=not-frontend -o jsonpath='{.items[0].metadata.name}')
68 | echo ${NOT_FRONTEND}
69 | ```
70 |
71 | Now we generate some traffic as a baseline test.
72 |
73 | ```bash
74 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
75 | ```
76 |
77 | and
78 |
79 |
80 | ```bash
81 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
82 | ```
83 |
84 | This will execute a simple `curl` call from the `frontend` and `not-frondend` application to the `backend` application:
85 |
86 | ```
87 | # Frontend
88 | HTTP/1.1 200 OK
89 | X-Powered-By: Express
90 | Vary: Origin, Accept-Encoding
91 | Access-Control-Allow-Credentials: true
92 | Accept-Ranges: bytes
93 | Cache-Control: public, max-age=0
94 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
95 | ETag: W/"83d-7438674ba0"
96 | Content-Type: text/html; charset=UTF-8
97 | Content-Length: 2109
98 | Date: Tue, 23 Nov 2021 12:50:44 GMT
99 | Connection: keep-alive
100 |
101 | # Not Frontend
102 | HTTP/1.1 200 OK
103 | X-Powered-By: Express
104 | Vary: Origin, Accept-Encoding
105 | Access-Control-Allow-Credentials: true
106 | Accept-Ranges: bytes
107 | Cache-Control: public, max-age=0
108 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
109 | ETag: W/"83d-7438674ba0"
110 | Content-Type: text/html; charset=UTF-8
111 | Content-Length: 2109
112 | Date: Tue, 23 Nov 2021 12:50:44 GMT
113 | Connection: keep-alive
114 | ```
115 |
116 | and we see, both applications can connect to the `backend` application.
117 |
118 | Until now ingress and egress policy enforcement are still disabled on all of our pods because no network policy has been imported yet selecting any of the pods. Let us change this.
119 |
120 |
121 | ## {{% task %}} Deny traffic with a Network Policy
122 |
123 | We block traffic by applying a network policy. Create a file `backend-ingress-deny.yaml` with the following content:
124 |
125 | {{< readfile file="/content/en/docs/06/backend-ingress-deny.yaml" code="true" lang="yaml" >}}
126 |
127 | The policy will deny all ingress traffic as it is of type Ingress but specifies no allow rule, and will be applied to all pods with the `app=backend` label thanks to the podSelector.
128 |
129 | Ok, then let's create the policy with:
130 |
131 | ```bash
132 | kubectl apply -f backend-ingress-deny.yaml
133 | ```
134 |
135 | and you can verify the created `NetworkPolicy` with:
136 |
137 | ```bash
138 | kubectl get netpol
139 | ```
140 |
141 | which gives you an output similar to this:
142 |
143 | ```
144 |
145 | NAME POD-SELECTOR AGE
146 | backend-ingress-deny app=backend 2s
147 |
148 | ```
149 |
150 |
151 | ## {{% task %}} Verify connectivity again
152 |
153 | We can now execute the connectivity check again:
154 |
155 | ```bash
156 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
157 | ```
158 |
159 | and
160 |
161 | ```bash
162 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
163 | ```
164 |
165 | but this time you see that the `frontend` and `not-frontend` application cannot connect anymore to the `backend`:
166 |
167 | ```
168 | # Frontend
169 | curl: (28) Connection timed out after 5001 milliseconds
170 | command terminated with exit code 28
171 | # Not Frontend
172 | curl: (28) Connection timed out after 5001 milliseconds
173 | command terminated with exit code 28
174 | ```
175 |
176 | The network policy correctly switched the default ingress behavior from default allow to default deny. We can also check this in Grafana.
177 |
178 | {{% alert title="Note" color="primary" %}}
179 | Note: our earlier Grafana port-forward should still be running (can be checked by running jobs or `ps aux | grep "grafana"`). If it does not open the URL from the command output below (or http://localhost:3000/dashboards with a local setup).
180 |
181 | ```bash
182 | kubectl -n cilium-monitoring port-forward service/grafana --address 0.0.0.0 --address :: 3000:3000 &
183 | echo "http://$(curl -s ifconfig.me):3000/dashboards"
184 | ```
185 | {{% /alert %}}
186 |
187 |
188 | In Grafana browse to the dashboard `Hubble`. You should see now data in more graphs. Check the graphs `Drop Reason`, `Forwarded vs Dropped`. In `Top 10 Source Pods with Denied Packets` you should find the name of the pods from our simple application.
189 |
190 | Let's now selectively re-allow traffic again, but only from frontend to backend.
191 |
192 |
193 | ## {{% task %}} Allow traffic from frontend to backend
194 |
195 | We can do it by crafting a new network policy manually, but we can also use the Network Policy Editor to help us out:
196 |
197 | 
198 |
199 | Above you see our original policy, we create an new one with the editor now.
200 |
201 | * Go to https://networkpolicy.io/editor.
202 | * Name the network policy to backend-allow-ingress-frontend (using the Edit button in the center).
203 | * add app=backend as Pod Selector
204 | * Set Ingress to default deny
205 |
206 | 
207 |
208 | * On the ingress side, add `app=frontend` as podSelector for pods in the same Namespace.
209 |
210 | 
211 |
212 | * Inspect the ingress flow colors: the policy will deny all ingress traffic to pods labeled `app=backend`, except for traffic coming from pods labeled `app=frontend`.
213 |
214 | 
215 |
216 |
217 | * Copy the policy YAML into a file named `backend-allow-ingress-frontend.yaml`. Make sure to use the `Networkpolicy` and not the `CiliumNetworkPolicy`.
218 |
219 | The file should look like this:
220 |
221 | {{< readfile file="/content/en/docs/06/backend-allow-ingress-frontend.yaml" code="true" lang="yaml" >}}
222 |
223 | Apply the new policy:
224 |
225 | ```bash
226 | kubectl apply -f backend-allow-ingress-frontend.yaml
227 | ```
228 |
229 | and then execute the connectivity test again:
230 |
231 | ```bash
232 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
233 | ```
234 |
235 | and
236 |
237 | ```bash
238 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
239 | ```
240 |
241 | This time, the `frontend` application is able to connect to the `backend` but the `not-frontend` application still cannot connect to the `backend`:
242 |
243 | ```
244 | # Frontend
245 | HTTP/1.1 200 OK
246 | X-Powered-By: Express
247 | Vary: Origin, Accept-Encoding
248 | Access-Control-Allow-Credentials: true
249 | Accept-Ranges: bytes
250 | Cache-Control: public, max-age=0
251 | Last-Modified: Sat, 26 Oct 1985 08:15:00 GMT
252 | ETag: W/"83d-7438674ba0"
253 | Content-Type: text/html; charset=UTF-8
254 | Content-Length: 2109
255 | Date: Tue, 23 Nov 2021 13:08:27 GMT
256 | Connection: keep-alive
257 |
258 | # Not Frontend
259 | curl: (28) Connection timed out after 5001 milliseconds
260 | command terminated with exit code 28
261 |
262 | ```
263 |
264 | Note that this is working despite the fact we did not delete the previous `backend-ingress-deny` policy:
265 |
266 | ```bash
267 | kubectl get netpol
268 | ```
269 |
270 | ```
271 | NAME POD-SELECTOR AGE
272 | backend-allow-ingress-frontend app=backend 2m7s
273 | backend-ingress-deny app=backend 12m
274 |
275 | ```
276 |
277 | Network policies are additive. Just like with firewalls, it is thus a good idea to have default DENY policies and then add more specific ALLOW policies as needed.
278 |
279 | We can verify our connection being blocked with Hubble.
280 |
281 | Generate some traffic.
282 |
283 | ```bash
284 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
285 | ```
286 |
287 | With `hubble observe` you can now check the packet being dropped as well as the reason why (Policy denied).
288 |
289 | {{% alert title="Note" color="primary" %}}
290 | Our earlier port-forward should still be running (can be checked by running jobs or `ps aux | grep "port-forward svc/hubble-relay"`). If it does not, Hubble status will fail and we have to run it again:
291 |
292 | ```bash
293 | kubectl -n kube-system port-forward svc/hubble-relay 4245:80 &
294 | hubble status
295 | ```
296 |
297 | {{% /alert %}}
298 |
299 | ```bash
300 | hubble observe --from-label app=not-frontend --to-label app=backend
301 | ```
302 |
303 | And the output should look like this:
304 | ```bash
305 | Jan 26 09:07:03.396: default/not-frontend-7db9747986-gktg6:45002 (ID:84671) <> default/backend-6f884b6495-69bbh:8080 (ID:68421) policy-verdict:none INGRESS DENIED (TCP Flags: SYN)
306 | Jan 26 09:07:03.396: default/not-frontend-7db9747986-gktg6:45002 (ID:84671) <> default/backend-6f884b6495-69bbh:8080 (ID:68421) Policy denied DROPPED (TCP Flags: SYN)
307 | Jan 26 09:07:04.401: default/not-frontend-7db9747986-gktg6:45002 (ID:84671) <> default/backend-6f884b6495-69bbh:8080 (ID:68421) policy-verdict:none INGRESS DENIED (TCP Flags: SYN)
308 | Jan 26 09:07:04.401: default/not-frontend-7db9747986-gktg6:45002 (ID:84671) <> default/backend-6f884b6495-69bbh:8080 (ID:68421) Policy denied DROPPED (TCP Flags: SYN)
309 | Jan 26 09:07:06.418: default/not-frontend-7db9747986-gktg6:45002 (ID:84671) <> default/backend-6f884b6495-69bbh:8080 (ID:68421) policy-verdict:none INGRESS DENIED (TCP Flags: SYN)
310 | Jan 26 09:07:06.418: default/not-frontend-7db9747986-gktg6:45002 (ID:84671) <> default/backend-6f884b6495-69bbh:8080 (ID:68421) Policy denied DROPPED (TCP Flags: SYN)
311 | ```
312 |
313 |
314 | ## {{% task %}} Inspecting the Cilium endpoints again
315 |
316 | We can now check the Cilium endpoints again.
317 |
318 | ```bash
319 | kubectl -n kube-system exec -it ds/cilium -- cilium endpoint list
320 | ```
321 |
322 | And now we see that the pods with the label `app=backend` now have ingress policy enforcement enabled.
323 |
324 |
325 | ```
326 | ENDPOINT POLICY (ingress) POLICY (egress) IDENTITY LABELS (source:key[=value]) IPv6 IPv4 STATUS
327 | ENFORCEMENT ENFORCEMENT
328 | 248 Enabled Disabled 68421 k8s:app=backend 10.1.0.1 ready
329 | k8s:io.cilium.k8s.namespace.labels.kubernetes.io/metadata.name=default
330 | k8s:io.cilium.k8s.policy.cluster=cluster1
331 | k8s:io.cilium.k8s.policy.serviceaccount=default
332 | k8s:io.kubernetes.pod.namespace=default
333 | ```
334 |
--------------------------------------------------------------------------------
/content/en/docs/03/01/_index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Hubble"
3 | weight: 31
4 | ---
5 |
6 | Before we start with the CNI functionality of Cilium and its security components we want to enable the optional Hubble component (which is disabled by default). So we can take full advantage of its eBFP observability capabilities.
7 |
8 |
9 | ## {{% task %}} Install the Hubble CLI
10 |
11 | Similar to the `cilium` CLI, the `hubble` CLI interfaces with Hubble and allows observing network traffic within Kubernetes.
12 |
13 | So let us install the `hubble` CLI.
14 |
15 |
16 | ### Linux/Webshell Setup
17 |
18 | Execute the following command to download the `hubble` CLI:
19 |
20 | ```bash
21 | curl -L --remote-name-all https://github.com/cilium/hubble/releases/download/v{{% param "hubbleVersion" %}}/hubble-linux-amd64.tar.gz{,.sha256sum}
22 | sha256sum --check hubble-linux-amd64.tar.gz.sha256sum
23 | sudo tar xzvfC hubble-linux-amd64.tar.gz /usr/local/bin
24 | rm hubble-linux-amd64.tar.gz{,.sha256sum}
25 |
26 | ```
27 |
28 |
29 | ### macOS Setup
30 |
31 | Execute the following command to download the `hubble` CLI:
32 |
33 | ```bash
34 | curl -L --remote-name-all https://github.com/cilium/hubble/releases/download/v{{% param "hubbleVersion" %}}/hubble-darwin-amd64.tar.gz{,.sha256sum}
35 | shasum -a 256 -c hubble-darwin-amd64.tar.gz.sha256sum
36 | sudo tar xzvfC hubble-darwin-amd64.tar.gz /usr/local/bin
37 | rm hubble-darwin-amd64.tar.gz{,.sha256sum}
38 |
39 | ```
40 |
41 |
42 | ## Hubble CLI
43 |
44 | Now that we have the `hubble` CLI let's have a look at some commands:
45 |
46 | ```bash
47 | hubble version
48 | ```
49 |
50 | should show
51 |
52 | ```
53 | hubble {{% param "hubbleVersion" %}} compiled with go1.19.5 on linux/amd64
54 | ```
55 |
56 | or
57 | ```bash
58 | hubble help
59 | ```
60 | should show
61 | ```
62 | Hubble is a utility to observe and inspect recent Cilium routed traffic in a cluster.
63 |
64 | Usage:
65 | hubble [command]
66 |
67 | Available Commands:
68 | completion Output shell completion code
69 | config Modify or view hubble config
70 | help Help about any command
71 | list List Hubble objects
72 | observe Observe flows of a Hubble server
73 | status Display status of Hubble server
74 | version Display detailed version information
75 |
76 | Global Flags:
77 | --config string Optional config file (default "/home/user/.config/hubble/config.yaml")
78 | -D, --debug Enable debug messages
79 |
80 | Get help:
81 | -h, --help Help for any command or subcommand
82 |
83 | Use "hubble [command] --help" for more information about a command.
84 |
85 | ```
86 |
87 |
88 | ## {{% task %}} Deploy a simple application
89 |
90 | Before we enable Hubble in Cilium we want to make sure we have at least one application to observe.
91 |
92 | Let's have a look at the following resource definitions:
93 |
94 | {{< readfile file="/content/en/docs/03/01/simple-app.yaml" code="true" lang="yaml" >}}
95 |
96 | The application consists of two client deployments (`frontend` and `not-frontend`) and one backend deployment (`backend`). We are going to send requests from the frontend and not-frontend pods to the backend pod.
97 |
98 | Create a file `simple-app.yaml` with the above content.
99 |
100 | Deploy the app:
101 |
102 | ```bash
103 | kubectl apply -f simple-app.yaml
104 | ```
105 |
106 | this gives you the following output:
107 |
108 | ```
109 | deployment.apps/frontend created
110 | deployment.apps/not-frontend created
111 | deployment.apps/backend created
112 | service/backend created
113 | ```
114 |
115 | Verify with the following command that everything is up and running:
116 |
117 | ```bash
118 | kubectl get all,cep,ciliumid
119 | ```
120 |
121 | ```
122 | NAME READY STATUS RESTARTS AGE
123 | pod/backend-65f7c794cc-b9j66 1/1 Running 0 3m17s
124 | pod/frontend-76fbb99468-mbzcm 1/1 Running 0 3m17s
125 | pod/not-frontend-8f467ccbd-cbks8 1/1 Running 0 3m17s
126 |
127 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
128 | service/backend ClusterIP 10.97.228.29 8080/TCP 3m17s
129 | service/kubernetes ClusterIP 10.96.0.1 443/TCP 45m
130 |
131 | NAME READY UP-TO-DATE AVAILABLE AGE
132 | deployment.apps/backend 1/1 1 1 3m17s
133 | deployment.apps/frontend 1/1 1 1 3m17s
134 | deployment.apps/not-frontend 1/1 1 1 3m17s
135 |
136 | NAME DESIRED CURRENT READY AGE
137 | replicaset.apps/backend-65f7c794cc 1 1 1 3m17s
138 | replicaset.apps/frontend-76fbb99468 1 1 1 3m17s
139 | replicaset.apps/not-frontend-8f467ccbd 1 1 1 3m17s
140 |
141 | NAME ENDPOINT ID IDENTITY ID INGRESS ENFORCEMENT EGRESS ENFORCEMENT VISIBILITY POLICY ENDPOINT STATE IPV4 IPV6
142 | ciliumendpoint.cilium.io/backend-65f7c794cc-b9j66 144 67823 ready 10.1.0.44
143 | ciliumendpoint.cilium.io/frontend-76fbb99468-mbzcm 1898 76556 ready 10.1.0.161
144 | ciliumendpoint.cilium.io/not-frontend-8f467ccbd-cbks8 208 127021 ready 10.1.0.128
145 |
146 | NAME NAMESPACE AGE
147 | ciliumidentity.cilium.io/127021 default 3m15s
148 | ciliumidentity.cilium.io/67688 kube-system 41m
149 | ciliumidentity.cilium.io/67823 default 3m15s
150 | ciliumidentity.cilium.io/76556 default 3m15s
151 |
152 | ```
153 |
154 | Let us make life a bit easier by storing the pods name into an environment variable so we can reuse it later again:
155 |
156 | ```bash
157 | export FRONTEND=$(kubectl get pods -l app=frontend -o jsonpath='{.items[0].metadata.name}')
158 | echo ${FRONTEND}
159 | export NOT_FRONTEND=$(kubectl get pods -l app=not-frontend -o jsonpath='{.items[0].metadata.name}')
160 | echo ${NOT_FRONTEND}
161 | ```
162 |
163 |
164 | ## {{% task %}} Enable Hubble in Cilium
165 |
166 | When you install Cilium using Helm, then Hubble is already enabled. The value for this is `hubble.enabled` which is set to `true` in the `values.yaml` of the Cilium Helm Chart. But we also want to enable Hubble Relay. With the following Helm command you can enable Hubble with Hubble Relay:
167 |
168 | ```bash
169 | helm upgrade -i cilium cilium/cilium --version {{% param "ciliumVersion.postUpgrade" %}} \
170 | --namespace kube-system \
171 | --set ipam.operator.clusterPoolIPv4PodCIDRList={10.1.0.0/16} \
172 | --set cluster.name=cluster1 \
173 | --set cluster.id=1 \
174 | --set operator.replicas=1 \
175 | --set upgradeCompatibility=1.11 \
176 | --set kubeProxyReplacement=disabled \
177 | `# hubble and hubble relay variables:` \
178 | --set hubble.enabled=true \
179 | --set hubble.relay.enabled=true \
180 | --wait
181 | ```
182 |
183 | If you have installed Cilium with the `cilium` CLI then Hubble component is not enabled by default (nor is Hubble Relay). You can enable Hubble using the following `cilium` CLI command:
184 |
185 |
186 | ```
187 | # cilium hubble enable
188 | ```
189 |
190 | and then wait until Hubble is enabled:
191 |
192 | ```
193 | 🔑 Found existing CA in secret cilium-ca
194 | ✨ Patching ConfigMap cilium-config to enable Hubble...
195 | ♻️ Restarted Cilium pods
196 | ⌛ Waiting for Cilium to become ready before deploying other Hubble component(s)...
197 | 🔑 Generating certificates for Relay...
198 | ✨ Deploying Relay from quay.io/cilium/hubble-relay:v{{% param "ciliumVersion.postUpgrade" %}}...
199 | ⌛ Waiting for Hubble to be installed...
200 | ✅ Hubble was successfully enabled!
201 | ```
202 |
203 | When you have a look at your running pods with `kubectl get pod -A` you should see a Pod with a name starting with `hubble-relay`:
204 |
205 | ```bash
206 | kubectl get pod -A
207 | ```
208 |
209 | ```
210 | NAMESPACE NAME READY STATUS RESTARTS AGE
211 | default backend-6f884b6495-v7bvt 1/1 Running 0 52s
212 | default frontend-77d99ffc5d-lcsph 1/1 Running 0 52s
213 | default not-frontend-7db9747986-snjwp 1/1 Running 0 52s
214 | kube-system cilium-ksr7h 1/1 Running 0 9m16s
215 | kube-system cilium-operator-6f5c6f768d-r2qgn 1/1 Running 0 9m17s
216 | kube-system coredns-6d4b75cb6d-nf8wz 1/1 Running 0 22m
217 | kube-system etcd-cluster1 1/1 Running 0 22m
218 | kube-system hubble-relay-84b4ddb556-nr7c8 1/1 Running 0 10s
219 | kube-system kube-apiserver-cluster1 1/1 Running 0 22m
220 | kube-system kube-controller-manager-cluster1 1/1 Running 0 22m
221 | kube-system kube-proxy-7l6qk 1/1 Running 0 22m
222 | kube-system kube-scheduler-cluster1 1/1 Running 0 22m
223 | kube-system storage-provisioner 1/1 Running 1 (21m ago) 22m
224 | ```
225 |
226 | Cilium agents are restarting, and a new Hubble Relay pod is now present. We can wait for Cilium and Hubble to be ready by running:
227 |
228 | ```bash
229 | cilium status --wait
230 | ```
231 |
232 | which should give you an output similar to this:
233 |
234 | ```
235 | /¯¯\
236 | /¯¯\__/¯¯\ Cilium: OK
237 | \__/¯¯\__/ Operator: OK
238 | /¯¯\__/¯¯\ Hubble: OK
239 | \__/¯¯\__/ ClusterMesh: disabled
240 | \__/
241 |
242 | DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
243 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
244 | Deployment hubble-relay Desired: 1, Ready: 1/1, Available: 1/1
245 | Containers: cilium Running: 1
246 | cilium-operator Running: 1
247 | hubble-relay Running: 1
248 | Cluster Pods: 9/9 managed by Cilium
249 | Image versions cilium quay.io/cilium/cilium:v1.11.2@sha256:ea677508010800214b0b5497055f38ed3bff57963fa2399bcb1c69cf9476453a: 1
250 | cilium-operator quay.io/cilium/operator-generic:v1.11.2@sha256:b522279577d0d5f1ad7cadaacb7321d1b172d8ae8c8bc816e503c897b420cfe3: 1
251 | hubble-relay quay.io/cilium/hubble-relay:v1.11.2@sha256:306ce38354a0a892b0c175ae7013cf178a46b79f51c52adb5465d87f14df0838: 1
252 | ```
253 |
254 | Hubble is now enabled. We can now locally port-forward to the Hubble pod:
255 |
256 | ```bash
257 | cilium hubble port-forward&
258 | ```
259 |
260 | {{% alert title="Note" color="primary" %}}
261 | The port-forwarding is needed as the hubble Kubernetes service is only a `ClusterIP` service and not exposed outside of the cluster network. With the port-forwarding you can access the hubble service from your localhost.
262 | {{% /alert %}}
263 |
264 | {{% alert title="Note" color="primary" %}}
265 | Note the `&` after the command which puts the process in the background so we can continue working in the shell.
266 | {{% /alert %}}
267 |
268 | And then check Hubble status via the Hubble CLI (which uses the port-forwarding just opened):
269 |
270 | ```bash
271 | hubble status
272 | ```
273 |
274 | ```
275 | Healthcheck (via localhost:4245): Ok
276 | Current/Max Flows: 947/4095 (23.13%)
277 | Flows/s: 3.84
278 | Connected Nodes: 1/1
279 | ```
280 |
281 | {{% alert title="Note" color="primary" %}}
282 | If the nodes are not yet connected, give it some time and try again. There is a Certificate Authority thats first needs to be fully loaded by the components.
283 | {{% /alert %}}
284 |
285 | The Hubble CLI is now primed for observing network traffic within the cluster.
286 |
287 |
288 | ## {{% task %}} Observing flows with Hubble
289 |
290 | We now want to use the `hubble` CLI to observe some network flows in our Kubernetes cluster. Let us have a look at the following command:
291 |
292 | ```bash
293 | hubble observe
294 | ```
295 |
296 | which gives you a list of network flows:
297 |
298 | ```
299 | Nov 23 14:49:03.030: 10.0.0.113:46274 <- kube-system/hubble-relay-f6d85866c-csthd:4245 to-stack FORWARDED (TCP Flags: ACK, PSH)
300 | Nov 23 14:49:03.030: 10.0.0.113:46274 -> kube-system/hubble-relay-f6d85866c-csthd:4245 to-endpoint FORWARDED (TCP Flags: RST)
301 | Nov 23 14:49:04.011: 10.0.0.113:44840 <- 10.0.0.114:4240 to-stack FORWARDED (TCP Flags: ACK)
302 | Nov 23 14:49:04.011: 10.0.0.113:44840 -> 10.0.0.114:4240 to-endpoint FORWARDED (TCP Flags: ACK)
303 | Nov 23 14:49:04.226: 10.0.0.113:32898 -> kube-system/coredns-558bd4d5db-xzvc9:8080 to-endpoint FORWARDED (TCP Flags: SYN)
304 | Nov 23 14:49:04.226: 10.0.0.113:32898 <- kube-system/coredns-558bd4d5db-xzvc9:8080 to-stack FORWARDED (TCP Flags: SYN, ACK)
305 | Nov 23 14:49:04.227: 10.0.0.113:32898 -> kube-system/coredns-558bd4d5db-xzvc9:8080 to-endpoint FORWARDED (TCP Flags: ACK)
306 | Nov 23 14:49:04.227: 10.0.0.113:32898 -> kube-system/coredns-558bd4d5db-xzvc9:8080 to-endpoint FORWARDED (TCP Flags: ACK, PSH)
307 | Nov 23 14:49:04.227: 10.0.0.113:32898 <- kube-system/coredns-558bd4d5db-xzvc9:8080 to-stack FORWARDED (TCP Flags: ACK, PSH)
308 | Nov 23 14:49:04.227: 10.0.0.113:32898 -> kube-system/coredns-558bd4d5db-xzvc9:8080 to-endpoint FORWARDED (TCP Flags: ACK, FIN)
309 | Nov 23 14:49:04.227: 10.0.0.113:32898 <- kube-system/coredns-558bd4d5db-xzvc9:8080 to-stack FORWARDED (TCP Flags: ACK, FIN)
310 | Nov 23 14:49:04.227: 10.0.0.113:32898 -> kube-system/coredns-558bd4d5db-xzvc9:8080 to-endpoint FORWARDED (TCP Flags: ACK)
311 | Nov 23 14:49:04.842: 10.0.0.113:34716 -> kube-system/coredns-558bd4d5db-xzvc9:8181 to-endpoint FORWARDED (TCP Flags: SYN)
312 | Nov 23 14:49:04.842: 10.0.0.113:34716 <- kube-system/coredns-558bd4d5db-xzvc9:8181 to-stack FORWARDED (TCP Flags: SYN, ACK)
313 | Nov 23 14:49:04.842: 10.0.0.113:34716 -> kube-system/coredns-558bd4d5db-xzvc9:8181 to-endpoint FORWARDED (TCP Flags: ACK)
314 | Nov 23 14:49:04.842: 10.0.0.113:34716 -> kube-system/coredns-558bd4d5db-xzvc9:8181 to-endpoint FORWARDED (TCP Flags: ACK, PSH)
315 | Nov 23 14:49:04.842: 10.0.0.113:34716 <- kube-system/coredns-558bd4d5db-xzvc9:8181 to-stack FORWARDED (TCP Flags: ACK, PSH)
316 | Nov 23 14:49:04.843: 10.0.0.113:34716 <- kube-system/coredns-558bd4d5db-xzvc9:8181 to-stack FORWARDED (TCP Flags: ACK, FIN)
317 | Nov 23 14:49:04.843: 10.0.0.113:34716 -> kube-system/coredns-558bd4d5db-xzvc9:8181 to-endpoint FORWARDED (TCP Flags: ACK, FIN)
318 | Nov 23 14:49:05.971: kube-system/hubble-relay-f6d85866c-csthd:40844 -> 192.168.49.2:4244 to-stack FORWARDED (TCP Flags: ACK, PSH)
319 |
320 | ```
321 |
322 | with
323 |
324 | ```bash
325 | hubble observe -f
326 | ```
327 |
328 | you can observe and follow the currently active flows in your Kubernetes cluster. Stop the command with `CTRL+C`.
329 |
330 | Let us produce some traffic:
331 |
332 | ```bash
333 | for i in {1..10}; do
334 | kubectl exec -ti ${FRONTEND} -- curl -I --connect-timeout 5 backend:8080
335 | kubectl exec -ti ${NOT_FRONTEND} -- curl -I --connect-timeout 5 backend:8080
336 | done
337 | ```
338 |
339 | We can now use the `hubble` CLI to filter traffic we are interested in. Here are some examples to specifically retrieve the network activity between our frontends and backend:
340 |
341 | ```bash
342 | hubble observe --to-pod backend
343 | hubble observe --namespace default --protocol tcp --port 8080
344 | ```
345 |
346 | Note that Hubble tells us the action, here `FORWARDED`, but it could also be `DROPPED`. If you only want to see `DROPPED` traffic. You can execute
347 |
348 | ```bash
349 | hubble observe --verdict DROPPED
350 | ```
351 | For now this should only show some packets that have been sent to an already deleted pod. After we configured NetworkPolicies we will see other dropped packets.
352 |
--------------------------------------------------------------------------------