├── .github └── FUNDING.yml ├── tools ├── helm.txt ├── terraform.txt ├── cilium-cli.txt ├── kubectl.txt ├── eksctl.txt ├── aws-iam-authenticator.txt └── hubble.txt ├── files ├── env ├── np-deny-all.yaml ├── cnp-securitygroup.yaml ├── eks-cilium-argo.yaml ├── cilium-argocd.yaml ├── eks-cilium-arm.yaml ├── eks-cilium.yaml ├── eks-cilium-prefix.yaml ├── eks-cilium-ipv6.yaml └── main.tf ├── install-cilium-eks-hubble.md ├── install-cilium-eks-terraform.md ├── README.md ├── install-cilium-eks-dev.md ├── install-cilium-eks-prometheus.md ├── install-cilium-eks-kube-proxy-free.md ├── install-cilium-eks-chaining.md ├── install-cilium-eks-argocd.md ├── install-cilium-eks-arm.md ├── install-cilium-eks.md ├── install-cilium-eks-ipsec.md ├── install-cilium-eks-prefix.md ├── install-cilium-eks-wireguard.md ├── install-cilium-eks-sg.md ├── install-cilium-eks-api-gateway.md ├── install-cilium-eks-overlay.md ├── install-cilium-eks-ipv6.md ├── install-cilium-eks-helm.md ├── install-cilium-eks-clustermesh.md └── install-cilium-eks-kube-proxy-free-ebp-hostrouting.md /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | 2 | github: [littlejo] 3 | -------------------------------------------------------------------------------- /tools/helm.txt: -------------------------------------------------------------------------------- 1 | https://helm.sh/docs/intro/install/ 2 | 3 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash 4 | -------------------------------------------------------------------------------- /files/env: -------------------------------------------------------------------------------- 1 | export AWS_DEFAULT_REGION=us-east-1 2 | export AWS_ACCESS_KEY_ID="XXXXXXXXXXXXX" 3 | export AWS_SECRET_ACCESS_KEY="YYYYYYYYYYYYYYYYYYYYYY" 4 | -------------------------------------------------------------------------------- /tools/terraform.txt: -------------------------------------------------------------------------------- 1 | 2 | VERSION=1.5.0 3 | curl -o terraform.zip -sL0 https://releases.hashicorp.com/terraform/$VERSION/terraform_${VERSION}_linux_amd64.zip 4 | unzip terraform.zip 5 | mv terraform /usr/local/bin 6 | -------------------------------------------------------------------------------- /tools/cilium-cli.txt: -------------------------------------------------------------------------------- 1 | https://github.com/cilium/cilium-cli/releases 2 | 3 | curl -sLO "https://github.com/cilium/cilium-cli/releases/download/v0.14.6/cilium-linux-amd64.tar.gz" 4 | tar xvf cilium-linux-amd64.tar.gz 5 | mv cilium /usr/local/bin/ 6 | -------------------------------------------------------------------------------- /tools/kubectl.txt: -------------------------------------------------------------------------------- 1 | https://kubernetes.io/docs/tasks/tools/ 2 | 3 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 4 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 5 | -------------------------------------------------------------------------------- /files/np-deny-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-all 5 | namespace: default 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | - Egress 11 | ingress: [] 12 | egress: [] 13 | -------------------------------------------------------------------------------- /files/cnp-securitygroup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2 2 | kind: CiliumNetworkPolicy 3 | metadata: 4 | name: egress-default 5 | namespace: default 6 | spec: 7 | egress: 8 | - toGroups: 9 | - aws: 10 | securityGroupsIds: 11 | - TOCHANGE 12 | endpointSelector: {} 13 | -------------------------------------------------------------------------------- /files/eks-cilium-argo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: argo-cilium 6 | region: us-east-1 7 | version: "1.27" 8 | 9 | managedNodeGroups: 10 | - name: ng-1 11 | instanceType: t3.medium 12 | taints: 13 | - key: "node.cilium.io/agent-not-ready" 14 | value: "true" 15 | effect: "NoExecute" 16 | - name: ng-2 17 | instanceType: t3.medium 18 | -------------------------------------------------------------------------------- /files/cilium-argocd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: cilium-cd 5 | spec: 6 | destination: 7 | name: '' 8 | namespace: argocd 9 | server: 'https://kubernetes.default.svc' 10 | source: 11 | path: aws-eni 12 | repoURL: 'https://github.com/littlejo/argocd-cilium' 13 | targetRevision: main 14 | sources: [] 15 | project: default 16 | syncPolicy: 17 | automated: 18 | prune: true 19 | selfHeal: true 20 | -------------------------------------------------------------------------------- /tools/eksctl.txt: -------------------------------------------------------------------------------- 1 | # for ARM systems, set ARCH to: `arm64`, `armv6` or `armv7` 2 | ARCH=amd64 3 | PLATFORM=$(uname -s)_$ARCH 4 | 5 | curl -sLO "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$PLATFORM.tar.gz" 6 | 7 | # (Optional) Verify checksum 8 | curl -sL "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_checksums.txt" | grep $PLATFORM | sha256sum --check 9 | 10 | tar -xzf eksctl_$PLATFORM.tar.gz -C /tmp && rm eksctl_$PLATFORM.tar.gz 11 | 12 | sudo mv /tmp/eksctl /usr/local/bin 13 | -------------------------------------------------------------------------------- /files/eks-cilium-arm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: arm-cilium 6 | region: us-east-1 7 | version: "1.27" 8 | 9 | managedNodeGroups: 10 | - name: ng-1 11 | instanceType: t4g.medium 12 | # taint nodes so that application pods are 13 | # not scheduled/executed until Cilium is deployed. 14 | # Alternatively, see the note above regarding taint effects. 15 | taints: 16 | - key: "node.cilium.io/agent-not-ready" 17 | value: "true" 18 | effect: "NoExecute" 19 | -------------------------------------------------------------------------------- /files/eks-cilium.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: basic-cilium 6 | region: us-east-1 7 | version: "1.27" 8 | 9 | managedNodeGroups: 10 | - name: ng-1 11 | instanceType: t3.medium 12 | # taint nodes so that application pods are 13 | # not scheduled/executed until Cilium is deployed. 14 | # Alternatively, see the note above regarding taint effects. 15 | taints: 16 | - key: "node.cilium.io/agent-not-ready" 17 | value: "true" 18 | effect: "NoExecute" 19 | -------------------------------------------------------------------------------- /tools/aws-iam-authenticator.txt: -------------------------------------------------------------------------------- 1 | https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html 2 | 3 | curl -Lo aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.5.9/aws-iam-authenticator_0.5.9_linux_amd64 4 | chmod a+x aws-iam-authenticator 5 | 6 | https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html 7 | 8 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 9 | unzip awscliv2.zip 10 | sudo ./aws/install 11 | -------------------------------------------------------------------------------- /tools/hubble.txt: -------------------------------------------------------------------------------- 1 | export HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt) 2 | HUBBLE_ARCH=amd64 3 | if [ "$(uname -m)" = "aarch64" ]; then HUBBLE_ARCH=arm64; fi 4 | curl -L --fail --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-${HUBBLE_ARCH}.tar.gz{,.sha256sum} 5 | sha256sum --check hubble-linux-${HUBBLE_ARCH}.tar.gz.sha256sum 6 | sudo tar xzvfC hubble-linux-${HUBBLE_ARCH}.tar.gz /usr/local/bin 7 | rm hubble-linux-${HUBBLE_ARCH}.tar.gz{,.sha256sum} 8 | -------------------------------------------------------------------------------- /files/eks-cilium-prefix.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: basic-cilium 6 | region: us-east-1 7 | version: "1.27" 8 | 9 | managedNodeGroups: 10 | - name: ng-2 11 | instanceType: t3.medium 12 | # taint nodes so that application pods are 13 | # not scheduled/executed until Cilium is deployed. 14 | # Alternatively, see the note above regarding taint effects. 15 | taints: 16 | - key: "node.cilium.io/agent-not-ready" 17 | value: "true" 18 | effect: "NoExecute" 19 | maxPodsPerNode: 110 20 | -------------------------------------------------------------------------------- /files/eks-cilium-ipv6.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: ipv6-cilium 6 | region: us-east-1 7 | version: "1.27" 8 | 9 | availabilityZones: ['us-east-1a', 'us-east-1b'] 10 | 11 | kubernetesNetworkConfig: 12 | ipFamily: IPv6 13 | 14 | addons: 15 | - name: vpc-cni 16 | - name: coredns 17 | - name: kube-proxy 18 | 19 | iam: 20 | withOIDC: true 21 | 22 | managedNodeGroups: 23 | - name: ng-1 24 | instanceType: t3.medium 25 | # taint nodes so that application pods are 26 | # not scheduled/executed until Cilium is deployed. 27 | # Alternatively, see the note above regarding taint effects. 28 | taints: 29 | - key: "node.cilium.io/agent-not-ready" 30 | value: "true" 31 | effect: "NoExecute" 32 | -------------------------------------------------------------------------------- /install-cilium-eks-hubble.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium and hubble to observe the network 4 | * ipam mode: eni 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | * [hubble](tools/hubble.txt) 13 | 14 | # Cluster installation 15 | 16 | exactly the same as install-cilium-eks.md 17 | 18 | # Cilium installation 19 | 20 | exactly the same as install-cilium-eks.md 21 | 22 | # Hubble installation 23 | 24 | https://docs.cilium.io/en/stable/gettingstarted/hubble_setup/ 25 | 26 | > cilium hubble enable 27 | 28 | ## Test 29 | 30 | ### Example 31 | 32 | ``` 33 | kubectl create ns cilium-test 34 | kubectl apply -n cilium-test -f https://raw.githubusercontent.com/cilium/cilium/v1.13/examples/kubernetes/connectivity-check/connectivity-check.yaml 35 | 36 | cilium hubble port-forward& 37 | ``` 38 | 39 | ``` 40 | hubble status 41 | Healthcheck (via localhost:4245): Ok 42 | Current/Max Flows: 429/8,190 (5.24%) 43 | Flows/s: 6.14 44 | Connected Nodes: 2/2 45 | ``` 46 | 47 | > hubble observe 48 | 49 | ## Hubble ui installation 50 | 51 | ``` 52 | cilium hubble disable 53 | cilium hubble enable --ui 54 | ``` 55 | -------------------------------------------------------------------------------- /files/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "this" { 2 | most_recent = true 3 | owners = ["amazon"] 4 | 5 | filter { 6 | name = "name" 7 | values = ["al2023-ami-*-x86_64"] 8 | } 9 | } 10 | 11 | data "aws_vpc" "this" { 12 | tags = { "Name" : "eksctl-basic-cilium-cluster/VPC" } 13 | } 14 | 15 | data "aws_subnets" "this" { 16 | filter { 17 | name = "tag:alpha.eksctl.io/cluster-name" 18 | values = ["basic-cilium"] 19 | } 20 | } 21 | 22 | resource "aws_instance" "this" { 23 | ami = data.aws_ami.this.id 24 | instance_type = "t3.micro" 25 | subnet_id = data.aws_subnets.this.ids[0] 26 | 27 | tags = { 28 | Name = "HelloWorld" 29 | } 30 | 31 | vpc_security_group_ids = [aws_security_group.this.id] 32 | 33 | user_data = <<-EOF 34 | #!/bin/bash 35 | yum install -y nginx 36 | systemctl start nginx 37 | EOF 38 | } 39 | 40 | resource "aws_security_group" "this" { 41 | name = "web_80" 42 | description = "Allow web inbound traffic" 43 | vpc_id = data.aws_vpc.this.id 44 | 45 | ingress { 46 | description = "80 from VPC" 47 | from_port = 80 48 | to_port = 80 49 | protocol = "tcp" 50 | cidr_blocks = ["0.0.0.0/0"] 51 | } 52 | 53 | egress { 54 | from_port = 0 55 | to_port = 0 56 | protocol = "-1" 57 | cidr_blocks = ["0.0.0.0/0"] 58 | ipv6_cidr_blocks = ["::/0"] 59 | } 60 | 61 | tags = { 62 | Name = "web_80" 63 | } 64 | } 65 | 66 | output "security_group_id" { 67 | description = "security group id" 68 | value = aws_security_group.this.id 69 | } 70 | 71 | output "private_ip" { 72 | description = "private ip" 73 | value = aws_instance.this.private_ip 74 | } 75 | -------------------------------------------------------------------------------- /install-cilium-eks-terraform.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium and eks clusters with terraform 4 | * cilium with eni mode but other scenarii in the future 5 | 6 | # Requirements 7 | 8 | * [terraform](tools/terraform.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | 13 | # All installation 14 | 15 | ``` 16 | export AWS_DEFAULT_REGION=ch-ange-1 17 | export AWS_ACCESS_KEY_ID="CHANGEME" 18 | export AWS_SECRET_ACCESS_KEY="CHANGEME" 19 | ``` 20 | 21 | ``` 22 | git clone https://github.com/littlejo/terraform-eks-cilium.git 23 | cd terraform-eks-cilium 24 | ``` 25 | 26 | * modify variables.tf 27 | 28 | In particularly, if you want another azs: 29 | 30 | ``` 31 | variable "azs" { 32 | description = "List of availability zones to install eks" 33 | type = list(string) 34 | default = ["us-east-1a", "us-east-1b"] 35 | } 36 | ``` 37 | 38 | ``` 39 | terraform init 40 | terraform apply 41 | => yes 42 | ``` 43 | 44 | Type to update kubeconfig: 45 | > aws eks update-kubeconfig --name terraform-cilium --kubeconfig ~/.kube/config 46 | 47 | 48 | ``` 49 | cilium status 50 | /¯¯\ 51 | /¯¯\__/¯¯\ Cilium: OK 52 | \__/¯¯\__/ Operator: OK 53 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 54 | \__/¯¯\__/ Hubble Relay: disabled 55 | \__/ ClusterMesh: disabled 56 | 57 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 58 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 59 | Containers: cilium-operator Running: 2 60 | cilium Running: 2 61 | Cluster Pods: 2/2 managed by Cilium 62 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 63 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 64 | ``` 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is a cookbook of installing cilium on eks on different use cases. I tried to simplify a lot the installation. So it's not for production, it's ideal for a POC. 2 | 3 | # Easy ways 4 | 5 | * [Quickstart to install cilium on eks](install-cilium-eks.md) 6 | * [Quickstart to install cilium on arm eks](install-cilium-eks-arm.md) 7 | * [Quickstart to install a dev version of cilium on eks](install-cilium-eks-dev.md) 8 | 9 | # Different ways for the deployment 10 | 11 | * [Quickstart to install cilium on eks with Terraform](install-cilium-eks-terraform.md) 12 | * [Quickstart to install cilium on eks with Helm](install-cilium-eks-helm.md) 13 | * [Quickstart to install cilium on eks with ArgoCD](install-cilium-eks-argocd.md) 14 | 15 | 16 | # Different ways for the networking 17 | 18 | * [Quickstart to install cilium on eks using chaining mode](install-cilium-eks-chaining.md) 19 | * [Quickstart to install cilium on eks using overlay mode](install-cilium-eks-overlay.md) 20 | * [Quickstart to install cilium on eks using IPv6](install-cilium-eks-ipv6.md) 21 | * [Quickstart to install cilium on eks using Wireguard](install-cilium-eks-wireguard.md) 22 | * [Quickstart to install cilium on eks using ipsec](install-cilium-eks-ipsec.md) 23 | * [Quickstart to install cilium on eks using eni prefix delegation](install-cilium-eks-prefix.md) 24 | * [Quickstart to install cilium on eks without kube-proxy](install-cilium-eks-kube-proxy-free.md) 25 | * [Quickstart to install cilium on eks using clustermesh](install-cilium-eks-clustermesh.md) 26 | 27 | # Different networking add-ons 28 | 29 | * [Quickstart to install cilium and api gateway on eks](install-cilium-eks-api-gateway.md) 30 | * [Quickstart to install cilium, prometheus and grafana on eks](install-cilium-eks-prometheus.md) 31 | * [Quickstart to install cilium and hubble on eks](install-cilium-eks-hubble.md) 32 | * [Quickstart to install cilium and use security group with network policy on eks](install-cilium-eks-sg.md) 33 | 34 | # Related article 35 | 36 | * [(FRENCH) Mon été avec Cilium et EKS](https://medium.com/@littel.jo/mon-%C3%A9t%C3%A9-avec-cilium-et-eks-partie-1-99a66ed6671f) 37 | 38 | -------------------------------------------------------------------------------- /install-cilium-eks-dev.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install dev version of cilium on eks clusters 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | 12 | # Cluster installation 13 | 14 | exactly the same as install-cilium-eks.md 15 | 16 | # Cilium installation 17 | 18 | > cilium install --version=v1.14.0-snapshot.3 19 | ``` 20 | 🔮 Auto-detected Kubernetes kind: EKS 21 | ℹ️ Using Cilium version 1.14.0-snapshot.3 22 | 🔮 Auto-detected cluster name: basic-cilium-us-east-1-eksctl-io 23 | 🔮 Auto-detected datapath mode: aws-eni 24 | 🔮 Auto-detected kube-proxy has been installed 25 | 🔥 Patching the "aws-node" DaemonSet to evict its pods... 26 | ℹ️ helm template --namespace kube-system cilium cilium/cilium --version 1.14.0-snapshot.3 --set cluster.id=0,cluster.name=basic-cilium-us-east-1-eksctl-io,egressMasqueradeInterfaces=eth0,encryption.nodeEncryption=false,eni.enabled=true,ipam.mode=eni,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=disabled 27 | ℹ️ Storing helm values file in kube-system/cilium-cli-helm-values Secret 28 | 🔑 Created CA in secret cilium-ca 29 | 🔑 Generating certificates for Hubble... 30 | 🚀 Creating Service accounts... 31 | 🚀 Creating Cluster roles... 32 | 🚀 Creating ConfigMap for Cilium version 1.14.0-snapshot.3... 33 | 🚀 Creating Agent DaemonSet... 34 | 🚀 Creating Operator Deployment... 35 | ⌛ Waiting for Cilium to be installed and ready... 36 | ✅ Cilium was successfully installed! Run 'cilium status' to view installation health 37 | ``` 38 | 39 | > cilium status --wait 40 | 41 | ``` 42 | /¯¯\ 43 | /¯¯\__/¯¯\ Cilium: OK 44 | \__/¯¯\__/ Operator: OK 45 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 46 | \__/¯¯\__/ Hubble Relay: disabled 47 | \__/ ClusterMesh: disabled 48 | 49 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 50 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 51 | Containers: cilium Running: 2 52 | cilium-operator Running: 1 53 | Cluster Pods: 2/2 managed by Cilium 54 | Image versions cilium quay.io/cilium/cilium:v1.14.0-snapshot.3: 2 55 | cilium-operator quay.io/cilium/operator-aws:v1.14.0-snapshot.3: 1 56 | ``` 57 | 58 | # Test 59 | 60 | > cilium connectivity test 61 | -------------------------------------------------------------------------------- /install-cilium-eks-prometheus.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install prometheus to outsource metrics from cilium and hubble 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | * [helm](tools/helm.txt) 12 | 13 | # Cluster installation 14 | 15 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md) 16 | 17 | # Cilium installation 18 | 19 | > kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 20 | 21 | installation of cilium with prometheus metrics activated: 22 | 23 | ``` 24 | helm repo add cilium https://helm.cilium.io/ 25 | helm install cilium cilium/cilium --namespace kube-system \ 26 | --set eni.enabled=true \ 27 | --set ipam.mode=eni \ 28 | --set egressMasqueradeInterfaces=eth0 \ 29 | --set tunnel=disabled \ 30 | --set prometheus.enabled=true \ 31 | --set operator.prometheus.enabled=true 32 | ``` 33 | 34 | Check 35 | 36 | ``` 37 | kubectl get pod -n kube-system --selector k8s-app=cilium -o json | jq '.items[0].metadata.annotations' 38 | { 39 | "container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites": "unconfined", 40 | "container.apparmor.security.beta.kubernetes.io/cilium-agent": "unconfined", 41 | "container.apparmor.security.beta.kubernetes.io/clean-cilium-state": "unconfined", 42 | "container.apparmor.security.beta.kubernetes.io/mount-cgroup": "unconfined", 43 | "prometheus.io/port": "9962", 44 | "prometheus.io/scrape": "true" 45 | } 46 | ``` 47 | 48 | # Hubble 49 | 50 | To use metrics from hubble: 51 | 52 | ``` 53 | helm upgrade cilium cilium/cilium --namespace kube-system --reuse-values --set hubble.enabled=true --set hubble.metrics.enabled="{dns,drop:sourceContext=pod;destinationContext=pod,tcp,flow,port-distribution,httpV2}" 54 | kubectl rollout restart daemonset/cilium -n kube-system 55 | ``` 56 | 57 | # Prometheus and Grafana 58 | 59 | To install prometheus and to see this metrics on grafana: 60 | 61 | ``` 62 | kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/v1.13/examples/kubernetes/addons/prometheus/monitoring-example.yaml 63 | kubectl -n cilium-monitoring port-forward service/grafana --address 0.0.0.0 --address :: 3000:3000 64 | ``` 65 | 66 | You can connect to this url to see grafana and cilium metrics: http://localhost:3000 67 | 68 | # Example of test 69 | 70 | If you have no idea what to test, example: 71 | 72 | ``` 73 | kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/v1.13/examples/minikube/http-sw-app.yaml 74 | kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/v1.13/examples/minikube/sw_l3_l4_l7_policy.yaml 75 | ``` 76 | -------------------------------------------------------------------------------- /install-cilium-eks-kube-proxy-free.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium on eks clusters with kube-proxy free 4 | * ipam mode: eni 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | 13 | # Cluster installation 14 | 15 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md#cluster-installation) 16 | 17 | # Cilium installation 18 | 19 | ## Patch 20 | 21 | ``` 22 | kubectl -n kube-system delete ds kube-proxy 23 | kubectl -n kube-system delete cm kube-proxy 24 | kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 25 | ``` 26 | 27 | ## Find eks endpoint and install 28 | 29 | ``` 30 | aws eks describe-cluster --name basic-cilium | jq -r .cluster.endpoint 31 | https://29F17965D68DB5502F627B2D22596152.gr7.us-east-1.eks.amazonaws.com 32 | API_SERVER_IP=29F17965D68DB5502F627B2D22596152.gr7.us-east-1.eks.amazonaws.com 33 | API_SERVER_PORT=443 34 | helm repo add cilium https://helm.cilium.io/ 35 | helm repo update 36 | helm install cilium cilium/cilium --version 1.13.4 \ 37 | --namespace kube-system \ 38 | --set eni.enabled=true \ 39 | --set ipam.mode=eni \ 40 | --set egressMasqueradeInterfaces=eth0 \ 41 | --set kubeProxyReplacement=strict \ 42 | --set tunnel=disabled \ 43 | --set k8sServiceHost=${API_SERVER_IP} \ 44 | --set k8sServicePort=${API_SERVER_PORT} 45 | ``` 46 | 47 | > cilium status --wait 48 | 49 | ``` 50 | /¯¯\ 51 | /¯¯\__/¯¯\ Cilium: OK 52 | \__/¯¯\__/ Operator: OK 53 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 54 | \__/¯¯\__/ Hubble Relay: disabled 55 | \__/ ClusterMesh: disabled 56 | 57 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 58 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 59 | Containers: cilium Running: 2 60 | cilium-operator Running: 2 61 | Cluster Pods: 2/2 managed by Cilium 62 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 63 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 64 | ``` 65 | 66 | ## Check 67 | 68 | ``` 69 | kubectl -n kube-system exec ds/cilium -- cilium status | grep KubeProxyReplacement 70 | KubeProxyReplacement: Strict [eth0 192.168.27.176 (Direct Routing), eth1 192.168.18.89] 71 | ``` 72 | 73 | # Test 74 | 75 | > cilium connectivity test 76 | -------------------------------------------------------------------------------- /install-cilium-eks-chaining.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * You don't want to remove vpc cni but you want to use cilium network policy on eks clusters 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | 12 | # Cluster installation 13 | 14 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md#cluster-installation) 15 | 16 | # Cilium installation 17 | 18 | Version of vpc cni (minimum): v1.11.2 19 | 20 | How to see: 21 | 22 | ``` 23 | kubectl -n kube-system get ds/aws-node -o json | jq -r '.spec.template.spec.containers[0].image' 24 | XXXXXXXXXXX.dkr.ecr.us-east-1.amazonaws.com/amazon-k8s-cni:v1.12.6-eksbuild.2 25 | ``` 26 | 27 | ``` 28 | helm repo add cilium https://helm.cilium.io/ 29 | helm repo update 30 | helm install cilium cilium/cilium --version 1.13.4 \ 31 | --namespace kube-system \ 32 | --set cni.chainingMode=aws-cni \ 33 | --set cni.exclusive=false \ 34 | --set enableIPv4Masquerade=false \ 35 | --set tunnel=disabled \ 36 | --set endpointRoutes.enabled=true 37 | ``` 38 | 39 | If you have already pod launched on you cluster: 40 | ``` 41 | for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}'); do 42 | ceps=$(kubectl -n "${ns}" get cep \ 43 | -o jsonpath='{.items[*].metadata.name}') 44 | pods=$(kubectl -n "${ns}" get pod \ 45 | -o custom-columns=NAME:.metadata.name,NETWORK:.spec.hostNetwork \ 46 | | grep -E '\s(|false)' | awk '{print $1}' | tr '\n' ' ') 47 | ncep=$(echo "${pods} ${ceps}" | tr ' ' '\n' | sort | uniq -u | paste -s -d ' ' -) 48 | for pod in $(echo $ncep); do 49 | echo "${ns}/${pod}"; 50 | done 51 | done 52 | ``` 53 | 54 | > cilium status --wait 55 | 56 | ``` 57 | /¯¯\ 58 | /¯¯\__/¯¯\ Cilium: OK 59 | \__/¯¯\__/ Operator: OK 60 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 61 | \__/¯¯\__/ Hubble Relay: disabled 62 | \__/ ClusterMesh: disabled 63 | 64 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 65 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 66 | Containers: cilium Running: 2 67 | cilium-operator Running: 2 68 | Cluster Pods: 2/2 managed by Cilium 69 | Image versions cilium-operator quay.io/cilium/operator-generic:v1.13.4@sha256:09ab77d324ef4d31f7d341f97ec5a2a4860910076046d57a2d61494d426c6301: 2 70 | cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 71 | ``` 72 | 73 | # Test 74 | 75 | > cilium connectivity test 76 | 77 | # Create a security group for pods 78 | 79 | It's not possible with t3 instance family: 80 | https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html 81 | 82 | so i can't do it. 83 | All information are here: https://docs.cilium.io/en/stable/installation/cni-chaining-aws-cni/ 84 | 85 | -------------------------------------------------------------------------------- /install-cilium-eks-argocd.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Deploy cilium on eks clusters with argocd 4 | * ipam: eni 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | 13 | # Cluster installation 14 | 15 | ``` 16 | export AWS_DEFAULT_REGION=ch-ange-1 17 | export AWS_ACCESS_KEY_ID="CHANGEME" 18 | export AWS_SECRET_ACCESS_KEY="CHANGEME" 19 | ``` 20 | 21 | > source ./files/env 22 | 23 | ```yaml: 24 | apiVersion: eksctl.io/v1alpha5 25 | kind: ClusterConfig 26 | 27 | metadata: 28 | name: argo-cilium 29 | region: us-east-1 30 | version: "1.27" 31 | 32 | managedNodeGroups: 33 | - name: ng-1 34 | instanceType: t3.medium 35 | taints: 36 | - key: "node.cilium.io/agent-not-ready" 37 | value: "true" 38 | effect: "NoExecute" 39 | - name: ng-2 40 | instanceType: t3.medium 41 | ``` 42 | 43 | * ng-2 is for argocd deployment 44 | 45 | > eksctl create cluster -f ./files/eks-cilium-argo.yaml 46 | 47 | > kubectl get node 48 | ``` 49 | NAME STATUS ROLES AGE VERSION 50 | ip-192-168-13-125.ec2.internal Ready 3m34s v1.27.1-eks-2f008fe 51 | ip-192-168-13-211.ec2.internal Ready 3m55s v1.27.1-eks-2f008fe 52 | ip-192-168-48-127.ec2.internal Ready 3m53s v1.27.1-eks-2f008fe 53 | ip-192-168-58-192.ec2.internal Ready 3m47s v1.27.1-eks-2f008fe 54 | ``` 55 | 56 | # Argocd installation 57 | 58 | ``` 59 | kubectl create namespace argocd 60 | kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/v2.8.0-rc1/manifests/install.yaml 61 | ``` 62 | 63 | # Cilium installation 64 | 65 | > kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 66 | 67 | ``` 68 | apiVersion: argoproj.io/v1alpha1 69 | kind: Application 70 | metadata: 71 | name: cilium-cd 72 | spec: 73 | destination: 74 | name: '' 75 | namespace: argocd 76 | server: 'https://kubernetes.default.svc' 77 | source: 78 | path: aws-eni 79 | repoURL: 'https://github.com/littlejo/argocd-cilium' 80 | targetRevision: main 81 | sources: [] 82 | project: default 83 | syncPolicy: 84 | automated: 85 | prune: true 86 | selfHeal: true 87 | ``` 88 | 89 | > kubectl apply -f files/cilium-argocd.yaml -n argocd 90 | 91 | 92 | > eksctl delete nodegroup ng-2 --cluster argo-cilium 93 | 94 | > cilium status --wait 95 | ``` 96 | /¯¯\ 97 | /¯¯\__/¯¯\ Cilium: OK 98 | \__/¯¯\__/ Operator: OK 99 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 100 | \__/¯¯\__/ Hubble Relay: disabled 101 | \__/ ClusterMesh: disabled 102 | 103 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 104 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 105 | Containers: cilium Running: 2 106 | cilium-operator Running: 2 107 | Cluster Pods: 9/9 managed by Cilium 108 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 109 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 110 | ``` 111 | 112 | # Test 113 | 114 | > cilium connectivity test 115 | -------------------------------------------------------------------------------- /install-cilium-eks-arm.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium on arm eks clusters 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | 12 | # Cluster installation 13 | 14 | ``` 15 | export AWS_DEFAULT_REGION=ch-ange-1 16 | export AWS_ACCESS_KEY_ID="CHANGEME" 17 | export AWS_SECRET_ACCESS_KEY="CHANGEME" 18 | ``` 19 | 20 | > source ./files/env 21 | 22 | ```yaml: 23 | apiVersion: eksctl.io/v1alpha5 24 | kind: ClusterConfig 25 | 26 | metadata: 27 | name: basic-cilium 28 | region: us-east-1 29 | version: "1.27" 30 | 31 | managedNodeGroups: 32 | - name: ng-1 33 | instanceType: t4g.medium 34 | # taint nodes so that application pods are 35 | # not scheduled/executed until Cilium is deployed. 36 | # Alternatively, see the note above regarding taint effects. 37 | taints: 38 | - key: "node.cilium.io/agent-not-ready" 39 | value: "true" 40 | effect: "NoExecute" 41 | ``` 42 | 43 | > eksctl create cluster -f ./files/eks-cilium-arm.yaml 44 | 45 | ``` 46 | kubectl get node 47 | NAME STATUS ROLES AGE VERSION 48 | ip-192-168-15-103.ec2.internal Ready 64s v1.27.1-eks-2f008fe 49 | ip-192-168-40-36.ec2.internal Ready 63s v1.27.1-eks-2f008fe 50 | ``` 51 | 52 | 53 | # Cilium installation 54 | 55 | > cilium install 56 | ``` 57 | 🔮 Auto-detected Kubernetes kind: EKS 58 | ℹ️ Using Cilium version 1.13.3 59 | 🔮 Auto-detected cluster name: arm-cilium-us-east-1-eksctl-io 60 | 🔮 Auto-detected datapath mode: aws-eni 61 | 🔮 Auto-detected kube-proxy has been installed 62 | 🔥 Patching the "aws-node" DaemonSet to evict its pods... 63 | ℹ️ helm template --namespace kube-system cilium cilium/cilium --version 1.13.3 --set cluster.id=0,cluster.name=arm-cilium-us-east-1-eksctl-io,egressMasqueradeInterfaces=eth0,encryption.nodeEncryption=false,eni.enabled=true,ipam.mode=eni,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=disabled 64 | ℹ️ Storing helm values file in kube-system/cilium-cli-helm-values Secret 65 | 🔑 Created CA in secret cilium-ca 66 | 🔑 Generating certificates for Hubble... 67 | 🚀 Creating Service accounts... 68 | 🚀 Creating Cluster roles... 69 | 🚀 Creating ConfigMap for Cilium version 1.13.3... 70 | 🚀 Creating Agent DaemonSet... 71 | 🚀 Creating Operator Deployment... 72 | ⌛ Waiting for Cilium to be installed and ready... 73 | ✅ Cilium was successfully installed! Run 'cilium status' to view installation health 74 | ``` 75 | 76 | > cilium status --wait 77 | 78 | ``` 79 | /¯¯\ 80 | /¯¯\__/¯¯\ Cilium: OK 81 | \__/¯¯\__/ Operator: OK 82 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 83 | \__/¯¯\__/ Hubble Relay: disabled 84 | \__/ ClusterMesh: disabled 85 | 86 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 87 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 88 | Containers: cilium Running: 2 89 | cilium-operator Running: 1 90 | Cluster Pods: 2/2 managed by Cilium 91 | Image versions cilium-operator quay.io/cilium/operator-aws:v1.13.3@sha256:394c40d156235d3c2004f77bb73402457092351cc6debdbc5727ba36fbd863ae: 1 92 | cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 93 | ``` 94 | 95 | # Test 96 | 97 | > cilium connectivity test 98 | -------------------------------------------------------------------------------- /install-cilium-eks.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install eks clusters 4 | * Install cilium on eks clusters 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | 13 | # Cluster installation 14 | 15 | ``` 16 | export AWS_DEFAULT_REGION=ch-ange-1 17 | export AWS_ACCESS_KEY_ID="CHANGEME" 18 | export AWS_SECRET_ACCESS_KEY="CHANGEME" 19 | ``` 20 | 21 | > source ./files/env 22 | 23 | ```yaml: 24 | apiVersion: eksctl.io/v1alpha5 25 | kind: ClusterConfig 26 | 27 | metadata: 28 | name: basic-cilium 29 | region: us-east-1 30 | version: "1.27" 31 | 32 | managedNodeGroups: 33 | - name: ng-1 34 | instanceType: t3.medium 35 | # taint nodes so that application pods are 36 | # not scheduled/executed until Cilium is deployed. 37 | # Alternatively, see the note above regarding taint effects. 38 | taints: 39 | - key: "node.cilium.io/agent-not-ready" 40 | value: "true" 41 | effect: "NoExecute" 42 | ``` 43 | 44 | > eksctl create cluster -f ./files/eks-cilium.yaml 45 | 46 | > kubectl get node 47 | ``` 48 | NAME STATUS ROLES AGE VERSION 49 | ip-192-168-11-135.ec2.internal Ready 4m18s v1.27.1-eks-2f008fe 50 | ip-192-168-56-129.ec2.internal Ready 4m22s v1.27.1-eks-2f008fe 51 | ``` 52 | 53 | # Cilium installation 54 | 55 | > cilium install 56 | ``` 57 | 🔮 Auto-detected Kubernetes kind: EKS 58 | ℹ️ Using Cilium version 1.13.3 59 | 🔮 Auto-detected cluster name: basic-cilium-us-east-1-eksctl-io 60 | 🔮 Auto-detected datapath mode: aws-eni 61 | 🔮 Auto-detected kube-proxy has been installed 62 | 🔥 Patching the "aws-node" DaemonSet to evict its pods... 63 | ℹ️ helm template --namespace kube-system cilium cilium/cilium --version 1.13.3 --set cluster.id=0,cluster.name=basic-cilium-us-east-1-eksctl-io,egressMasqueradeInterfaces=eth0,encryption.nodeEncryption=false,eni.enabled=true,ipam.mode=eni,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=disabled 64 | ℹ️ Storing helm values file in kube-system/cilium-cli-helm-values Secret 65 | 🔑 Created CA in secret cilium-ca 66 | 🔑 Generating certificates for Hubble... 67 | 🚀 Creating Service accounts... 68 | 🚀 Creating Cluster roles... 69 | 🚀 Creating ConfigMap for Cilium version 1.13.3... 70 | 🚀 Creating Agent DaemonSet... 71 | 🚀 Creating Operator Deployment... 72 | ⌛ Waiting for Cilium to be installed and ready... 73 | ✅ Cilium was successfully installed! Run 'cilium status' to view installation health 74 | ``` 75 | 76 | > cilium status --wait 77 | 78 | ``` 79 | /¯¯\ 80 | /¯¯\__/¯¯\ Cilium: OK 81 | \__/¯¯\__/ Operator: OK 82 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 83 | \__/¯¯\__/ Hubble Relay: disabled 84 | \__/ ClusterMesh: disabled 85 | 86 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 87 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 88 | Containers: cilium-operator Running: 1 89 | cilium Running: 2 90 | Cluster Pods: 2/2 managed by Cilium 91 | Image versions cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 92 | cilium-operator quay.io/cilium/operator-aws:v1.13.3@sha256:394c40d156235d3c2004f77bb73402457092351cc6debdbc5727ba36fbd863ae: 1 93 | ``` 94 | 95 | # Test 96 | 97 | > cilium connectivity test 98 | -------------------------------------------------------------------------------- /install-cilium-eks-ipsec.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium with ipsec encryption enabled on eks clusters 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | 12 | # Cluster installation 13 | 14 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md) 15 | 16 | # Cilium installation 17 | 18 | ``` 19 | PSK=($(dd if=/dev/urandom count=20 bs=1 2> /dev/null | xxd -p -c 64)) 20 | kubectl create -n kube-system secret generic cilium-ipsec-keys \ 21 | --from-literal=keys="3 rfc4106(gcm(aes)) $PSK 128" 22 | ``` 23 | 24 | > cilium install --encryption ipsec 25 | 26 | ``` 27 | 🔮 Auto-detected Kubernetes kind: EKS 28 | ℹ️ Using Cilium version 1.13.3 29 | 🔮 Auto-detected cluster name: basic-cilium-us-east-1-eksctl-io 30 | 🔮 Auto-detected datapath mode: aws-eni 31 | 🔮 Auto-detected kube-proxy has been installed 32 | 🔥 Patching the "aws-node" DaemonSet to evict its pods... 33 | ℹ️ helm template --namespace kube-system cilium cilium/cilium --version 1.13.3 --set cluster.id=0,cluster.name=basic-cilium-us-east-1-eksctl-io,egressMasqueradeInterfaces=eth0,encryption.enabled=true,encryption.nodeEncryption=false,encryption.type=ipsec,eni.enabled=true,ipam.mode=eni,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=disabled 34 | ℹ️ Storing helm values file in kube-system/cilium-cli-helm-values Secret 35 | 🔑 Created CA in secret cilium-ca 36 | 🔑 Generating certificates for Hubble... 37 | 🚀 Creating Service accounts... 38 | 🚀 Creating Cluster roles... 39 | 🔑 Found existing encryption secret cilium-ipsec-keys 40 | 🚀 Creating ConfigMap for Cilium version 1.13.3... 41 | 🚀 Creating Agent DaemonSet... 42 | 🚀 Creating Operator Deployment... 43 | ⌛ Waiting for Cilium to be installed and ready... 44 | ✅ Cilium was successfully installed! Run 'cilium status' to view installation health 45 | ``` 46 | 47 | ``` 48 | cilium status --wait 49 | /¯¯\ 50 | /¯¯\__/¯¯\ Cilium: OK 51 | \__/¯¯\__/ Operator: OK 52 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 53 | \__/¯¯\__/ Hubble Relay: disabled 54 | \__/ ClusterMesh: disabled 55 | 56 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 57 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 58 | Containers: cilium Running: 2 59 | cilium-operator Running: 1 60 | Cluster Pods: 2/2 managed by Cilium 61 | Image versions cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 62 | cilium-operator quay.io/cilium/operator-aws:v1.13.3@sha256:394c40d156235d3c2004f77bb73402457092351cc6debdbc5727ba36fbd863ae: 1 63 | 64 | cilium config view | grep enable-ipsec 65 | enable-ipsec true 66 | ``` 67 | 68 | # Rotate your key 69 | 70 | ``` 71 | read KEYID ALGO PSK KEYSIZE < <(kubectl get secret -n kube-system cilium-ipsec-keys -o go-template='{{.data.keys | base64decode}}') 72 | NEW_PSK=($(dd if=/dev/urandom count=20 bs=1 2> /dev/null | xxd -p -c 64)) 73 | data=$(echo "{\"stringData\":{\"keys\":\"$((($KEYID+1))) "rfc4106\(gcm\(aes\)\)" $NEW_PSK 128\"}}") 74 | kubectl patch secret -n kube-system cilium-ipsec-keys -p="${data}" -v=1 75 | ``` 76 | 77 | # Test 78 | 79 | > cilium connectivity test 80 | 81 | # More infos 82 | 83 | https://isovalent.com/blog/post/tutorial-transparent-encryption-with-ipsec-and-wireguard/ 84 | -------------------------------------------------------------------------------- /install-cilium-eks-prefix.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium with aws eni prefix delegation 4 | * Use less AWS ENI by pods 5 | * To be out of limits of ENI per node 6 | 7 | # Requirements 8 | 9 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 10 | * [kubectl](tools/kubectl.txt) 11 | * [cilium cli](tools/cilium-cli.txt) 12 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 13 | 14 | # Cluster installation 15 | 16 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md) 17 | 18 | # Cilium installation 19 | 20 | > cilium install --helm-set "eni.awsEnablePrefixDelegation=true" 21 | ``` 22 | 🔮 Auto-detected Kubernetes kind: EKS 23 | ℹ️ Using Cilium version 1.13.3 24 | 🔮 Auto-detected cluster name: basic-cilium-us-east-1-eksctl-io 25 | 🔮 Auto-detected datapath mode: aws-eni 26 | 🔮 Auto-detected kube-proxy has been installed 27 | 🔥 Patching the "aws-node" DaemonSet to evict its pods... 28 | ℹ️ helm template --namespace kube-system cilium cilium/cilium --version 1.13.3 --set cluster.id=0,cluster.name=basic-cilium-us-east-1-eksctl-io,egressMasqueradeInterfaces=eth0,encryption.nodeEncryption=false,eni.awsEnablePrefixDelegation=true,eni.enabled=true,ipam.mode=eni,kubeProxyReplacement=disabled,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=disabled 29 | ℹ️ Storing helm values file in kube-system/cilium-cli-helm-values Secret 30 | 🔑 Created CA in secret cilium-ca 31 | 🔑 Generating certificates for Hubble... 32 | 🚀 Creating Service accounts... 33 | 🚀 Creating Cluster roles... 34 | 🚀 Creating ConfigMap for Cilium version 1.13.3... 35 | 🚀 Creating Agent DaemonSet... 36 | 🚀 Creating Operator Deployment... 37 | ⌛ Waiting for Cilium to be installed and ready... 38 | ✅ Cilium was successfully installed! Run 'cilium status' to view installation health 39 | ``` 40 | 41 | > cilium status --wait 42 | 43 | ``` 44 | /¯¯\ 45 | /¯¯\__/¯¯\ Cilium: OK 46 | \__/¯¯\__/ Operator: OK 47 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 48 | \__/¯¯\__/ Hubble Relay: disabled 49 | \__/ ClusterMesh: disabled 50 | 51 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 52 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 53 | Containers: cilium Running: 2 54 | cilium-operator Running: 1 55 | Cluster Pods: 2/2 managed by Cilium 56 | Image versions cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 57 | cilium-operator quay.io/cilium/operator-aws:v1.13.3@sha256:394c40d156235d3c2004f77bb73402457092351cc6debdbc5727ba36fbd863ae: 1 58 | ``` 59 | 60 | Now you need to create new ec2 instance to apply prefix delegation. So i create another managed node group and i remove the old one: 61 | 62 | ``` 63 | apiVersion: eksctl.io/v1alpha5 64 | kind: ClusterConfig 65 | 66 | metadata: 67 | name: basic-cilium 68 | region: us-east-1 69 | version: "1.27" 70 | 71 | managedNodeGroups: 72 | - name: ng-2 73 | instanceType: t3.medium 74 | # taint nodes so that application pods are 75 | # not scheduled/executed until Cilium is deployed. 76 | # Alternatively, see the note above regarding taint effects. 77 | taints: 78 | - key: "node.cilium.io/agent-not-ready" 79 | value: "true" 80 | effect: "NoExecute" 81 | maxPodsPerNode: 110 82 | ``` 83 | 84 | * You can note the option maxPodsPerNode to increase the number of pods per node (to be out of limit of number of eni). 85 | 86 | ``` 87 | eksctl create nodegroup -f files/eks-cilium-prefix.yaml 88 | eksctl delete nodegroup --cluster basic-cilium --name ng-1 89 | ``` 90 | 91 | # Test 92 | 93 | What is the limit of t3.medium of pods per node: [eni-max-pod](https://github.com/awslabs/amazon-eks-ami/blob/master/files/eni-max-pods.txt) 94 | => 17 95 | 96 | ``` 97 | kubectl create deployment nginx --image nginx --replicas 100 98 | ``` 99 | 100 | After some minutes, you can see: 101 | 102 | ``` 103 | kubectl get deployment 104 | NAME READY UP-TO-DATE AVAILABLE AGE 105 | nginx 100/100 100 100 2m12s 106 | ``` 107 | 108 | So you can have more than 34 pods on 2 nodes. 109 | -------------------------------------------------------------------------------- /install-cilium-eks-wireguard.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium with wireguard encryption enabled on eks clusters 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | 12 | # Cluster installation 13 | 14 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md) 15 | 16 | # Cilium installation 17 | 18 | > cilium install --encryption wireguard 19 | 20 | ``` 21 | 🔮 Auto-detected Kubernetes kind: EKS 22 | ℹ️ Using Cilium version 1.13.3 23 | 🔮 Auto-detected cluster name: basic-cilium-us-east-1-eksctl-io 24 | ℹ️ L7 proxy disabled due to Wireguard encryption 25 | 🔮 Auto-detected datapath mode: aws-eni 26 | 🔮 Auto-detected kube-proxy has been installed 27 | ℹ️ L7 proxy disabled due to Wireguard encryption 28 | 🔥 Patching the "aws-node" DaemonSet to evict its pods... 29 | ℹ️ L7 proxy disabled due to Wireguard encryption 30 | ℹ️ helm template --namespace kube-system cilium cilium/cilium --version 1.13.3 --set cluster.id=0,cluster.name=basic-cilium-us-east-1-eksctl-io,egressMasqueradeInterfaces=eth0,encryption.enabled=true,encryption.nodeEncryption=false,encryption.type=wireguard,eni.enabled=true,ipam.mode=eni,kubeProxyReplacement=disabled,l7Proxy=false,operator.replicas=1,serviceAccounts.cilium.name=cilium,serviceAccounts.operator.name=cilium-operator,tunnel=disabled 31 | ℹ️ Storing helm values file in kube-system/cilium-cli-helm-values Secret 32 | 🔑 Created CA in secret cilium-ca 33 | 🔑 Generating certificates for Hubble... 34 | 🚀 Creating Service accounts... 35 | 🚀 Creating Cluster roles... 36 | 🚀 Creating ConfigMap for Cilium version 1.13.3... 37 | 🚀 Creating Agent DaemonSet... 38 | 🚀 Creating Operator Deployment... 39 | ⌛ Waiting for Cilium to be installed and ready... 40 | ✅ Cilium was successfully installed! Run 'cilium status' to view installation health 41 | ``` 42 | 43 | ``` 44 | cilium status --wait 45 | /¯¯\ 46 | /¯¯\__/¯¯\ Cilium: OK 47 | \__/¯¯\__/ Operator: OK 48 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 49 | \__/¯¯\__/ Hubble Relay: disabled 50 | \__/ ClusterMesh: disabled 51 | 52 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 53 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 54 | Containers: cilium-operator Running: 1 55 | cilium Running: 2 56 | Cluster Pods: 2/2 managed by Cilium 57 | Image versions cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 58 | cilium-operator quay.io/cilium/operator-aws:v1.13.3@sha256:394c40d156235d3c2004f77bb73402457092351cc6debdbc5727ba36fbd863ae: 1 59 | ``` 60 | 61 | ``` 62 | kubectl get ciliumnodes 63 | NAME CILIUMINTERNALIP INTERNALIP AGE 64 | ip-192-168-20-215.ec2.internal 192.168.19.240 192.168.20.215 2m50s 65 | ip-192-168-50-79.ec2.internal 192.168.62.252 192.168.50.79 2m50s 66 | ``` 67 | 68 | ``` 69 | kubectl get ciliumnodes ip-192-168-50-79.ec2.internal -o json | jq .metadata.annotations 70 | { 71 | "network.cilium.io/wg-pub-key": "HMfZu016CF/0EYMl0tACI3qeaT2TePs831EfJZmzdQw=" 72 | } 73 | kubectl exec -n kube-system -ti ds/cilium -- cilium status |grep Encryption 74 | Encryption: Wireguard [cilium_wg0 (Pubkey: HMfZu016CF/0EYMl0tACI3qeaT2TePs831EfJZmzdQw=, Port: 51871, Peers: 1)] 75 | ``` 76 | 77 | * you can see cilium_wg0: 78 | ``` 79 | kubectl exec -n kube-system -ti ds/cilium -- ip link |grep cilium 80 | Defaulted container "cilium-agent" out of: cilium-agent, config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init) 81 | 3: cilium_wg0: mtu 8921 qdisc noqueue state UNKNOWN mode DEFAULT group default 82 | 5: cilium_net@cilium_host: mtu 9001 qdisc noqueue state UP mode DEFAULT group default qlen 1000 83 | 6: cilium_host@cilium_net: mtu 9001 qdisc noqueue state UP mode DEFAULT group default qlen 1000 84 | link/ether 62:ff:e0:48:bb:60 brd ff:ff:ff:ff:ff:ff link-netns cilium-health 85 | ``` 86 | 87 | # Test 88 | 89 | > cilium connectivity test 90 | -------------------------------------------------------------------------------- /install-cilium-eks-sg.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Create a "link" between security group and network policy using cilium 4 | * ipam mode: eni 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | * [terraform](tools/terraform.txt) 13 | 14 | # Cluster installation 15 | 16 | exactly the same as install-cilium-eks.md 17 | 18 | # Cilium installation 19 | 20 | exactly the same as install-cilium-eks.md 21 | 22 | # Create EC2 with nginx installed and security group 23 | 24 | I used terraform for that: 25 | 26 | ``` 27 | cd files/ 28 | terraform init 29 | [...] 30 | terraform apply 31 | [...] 32 | Outputs: 33 | 34 | private_ip = "192.168.124.214" 35 | security_group_id = "sg-0ce5e337befa5e3eb" 36 | ``` 37 | 38 | # Create network policy 39 | 40 | ## Deny all 41 | 42 | ``` 43 | apiVersion: networking.k8s.io/v1 44 | kind: NetworkPolicy 45 | metadata: 46 | name: deny-all 47 | namespace: default 48 | spec: 49 | podSelector: {} 50 | policyTypes: 51 | - Ingress 52 | - Egress 53 | ingress: [] 54 | egress: [] 55 | ``` 56 | 57 | > kubectl apply -f np-deny-all.yaml 58 | 59 | > kubectl get networkpolicy 60 | ``` 61 | NAME POD-SELECTOR AGE 62 | deny-all 2m22s 63 | ``` 64 | 65 | ### Test 66 | 67 | ``` 68 | kubectl run -it --image=alpine -- check 69 | If you don't see a command prompt, try pressing enter. 70 | / # wget 192.168.124.214 71 | Connecting to 192.168.124.214 (192.168.124.214:80) 72 | wget: can't connect to remote host (192.168.124.214): Operation timed out 73 | ``` 74 | 75 | ## security group egress access 76 | 77 | ``` 78 | apiVersion: cilium.io/v2 79 | kind: CiliumNetworkPolicy 80 | metadata: 81 | name: egress-default 82 | namespace: default 83 | spec: 84 | egress: 85 | - toGroups: 86 | - aws: 87 | securityGroupsIds: 88 | - sg-0ce5e337befa5e3eb 89 | endpointSelector: {} 90 | ``` 91 | 92 | * change sg-0ce5e337befa5e3eb by your own security group id 93 | 94 | ``` 95 | kubectl apply -f files/cnp-securitygroup.yaml 96 | ``` 97 | 98 | you see the network policy and the derivative policy: 99 | 100 | ``` 101 | kubectl get cnp 102 | NAME AGE 103 | egress-default 3s 104 | egress-default-togroups-b698c313-8f61-4f49-8d8a-0268259707b4 2s 105 | ``` 106 | 107 | * you refind the good egress ip (192.168.124.214): 108 | 109 | ``` 110 | kubectl describe cnp egress-default-togroups-b698c313-8f61-4f49-8d8a-0268259707b4 111 | Name: egress-default-togroups-b698c313-8f61-4f49-8d8a-0268259707b4 112 | Namespace: default 113 | Labels: io.cilium.network.policy.kind=derivative 114 | io.cilium.network.policy.parent.uuid=b698c313-8f61-4f49-8d8a-0268259707b4 115 | Annotations: 116 | API Version: cilium.io/v2 117 | Kind: CiliumNetworkPolicy 118 | Metadata: 119 | Creation Timestamp: 2023-06-15T14:58:50Z 120 | Generation: 1 121 | Owner References: 122 | API Version: cilium.io/v2 123 | Kind: CiliumNetworkPolicy 124 | Name: egress-default 125 | UID: b698c313-8f61-4f49-8d8a-0268259707b4 126 | Resource Version: 25612 127 | UID: 07bd59e0-0406-4f0e-9193-e595b8b926d1 128 | Specs: 129 | Egress: 130 | To CIDR Set: 131 | Cidr: 192.168.124.214/32 132 | Endpoint Selector: 133 | Match Labels: 134 | k8s:io.kubernetes.pod.namespace: default 135 | Labels: 136 | Key: io.cilium.k8s.policy.derived-from 137 | Source: k8s 138 | Value: CiliumNetworkPolicy 139 | Key: io.cilium.k8s.policy.name 140 | Source: k8s 141 | Value: egress-default 142 | Key: io.cilium.k8s.policy.namespace 143 | Source: k8s 144 | Value: default 145 | Key: io.cilium.k8s.policy.uid 146 | Source: k8s 147 | Value: b698c313-8f61-4f49-8d8a-0268259707b4 148 | Events: 149 | ``` 150 | 151 | ### Check 152 | 153 | ``` 154 | kubectl run -it --image=alpine -- work 155 | If you don't see a command prompt, try pressing enter. 156 | / # wget 192.168.124.214 157 | Connecting to 192.168.124.214 (192.168.124.214:80) 158 | saving to 'index.html' 159 | index.html 100% |****************************************************************************************************************************************************| 615 0:00:00 ETA 160 | 'index.html' saved 161 | ``` 162 | -------------------------------------------------------------------------------- /install-cilium-eks-api-gateway.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium and api gateway on eks clusters 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | * [helm](tools/helm.txt) 12 | 13 | # Cluster installation 14 | 15 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md#cluster-installation) 16 | 17 | # Cilium installation 18 | 19 | ## Patch 20 | 21 | ``` 22 | kubectl -n kube-system delete ds kube-proxy 23 | kubectl -n kube-system delete cm kube-proxy 24 | kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 25 | ``` 26 | 27 | ## Find eks endpoint and install 28 | 29 | * Install CRD: 30 | 31 | ``` 32 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/v0.7.0/config/crd/standard/gateway.networking.k8s.io_gatewayclasses.yaml 33 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/v0.7.0/config/crd/standard/gateway.networking.k8s.io_gateways.yaml 34 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/v0.7.0/config/crd/standard/gateway.networking.k8s.io_httproutes.yaml 35 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/v0.7.0/config/crd/standard/gateway.networking.k8s.io_referencegrants.yaml 36 | kubectl apply -f https://raw.githubusercontent.com/kubernetes-sigs/gateway-api/v0.7.0/config/crd/experimental/gateway.networking.k8s.io_tlsroutes.yaml 37 | ``` 38 | 39 | * This CRD depends on the version of cilium (version tested: 1.13) 40 | 41 | ``` 42 | aws eks describe-cluster --name basic-cilium | jq -r .cluster.endpoint 43 | https://29F17965D68DB5502F627B2D22596152.gr7.us-east-1.eks.amazonaws.com 44 | API_SERVER_IP=29F17965D68DB5502F627B2D22596152.gr7.us-east-1.eks.amazonaws.com 45 | API_SERVER_PORT=443 46 | 47 | cilium install --version 1.14.2 \ 48 | --set kubeProxyReplacement=true \ 49 | --set gatewayAPI.enabled=true \ 50 | --set eni.enabled=true \ 51 | --set ipam.mode=eni \ 52 | --set egressMasqueradeInterfaces=eth0 \ 53 | --set tunnel=disabled \ 54 | --set k8sServiceHost=${API_SERVER_IP} \ 55 | --set k8sServicePort=${API_SERVER_PORT} 56 | ``` 57 | 58 | > cilium status --wait 59 | 60 | ``` 61 | /¯¯\ 62 | /¯¯\__/¯¯\ Cilium: OK 63 | \__/¯¯\__/ Operator: OK 64 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 65 | \__/¯¯\__/ Hubble Relay: disabled 66 | \__/ ClusterMesh: disabled 67 | 68 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 69 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 70 | Containers: cilium Running: 2 71 | cilium-operator Running: 1 72 | Cluster Pods: 2/2 managed by Cilium 73 | Helm chart version: 1.14.2 74 | Image versions cilium quay.io/cilium/cilium:v1.14.2@sha256:6263f3a3d5d63b267b538298dbeb5ae87da3efacf09a2c620446c873ba807d35: 2 75 | cilium-operator quay.io/cilium/operator-aws:v1.14.2@sha256:8d514a9eaa06b7a704d1ccead8c7e663334975e6584a815efe2b8c15244493f1: 1 76 | ``` 77 | 78 | 79 | ## Check 80 | 81 | ``` 82 | cilium config view | grep "enable-gateway-api" 83 | enable-gateway-api true 84 | enable-gateway-api-secrets-sync true 85 | ``` 86 | 87 | ``` 88 | kubectl get gatewayclasses.gateway.networking.k8s.io 89 | NAME CONTROLLER ACCEPTED AGE 90 | cilium io.cilium/gateway-controller True 115s 91 | ``` 92 | 93 | * Install a webapps test: 94 | > kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.12/samples/bookinfo/platform/kube/bookinfo.yaml 95 | 96 | * Install Gateway and http route 97 | > kubectl apply -f https://raw.githubusercontent.com/cilium/cilium/1.13.0/examples/kubernetes/gateway/basic-http.yaml 98 | 99 | * Get the url: 100 | 101 | ``` 102 | kubectl get svc cilium-gateway-my-gateway 103 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 104 | cilium-gateway-my-gateway LoadBalancer 10.100.228.21 abe9a5b2820814677afd5b280b49909f-827090963.us-east-1.elb.amazonaws.com 80:31921/TCP 63s 105 | kubectl get gateway 106 | NAME CLASS ADDRESS READY AGE 107 | my-gateway cilium abe9a5b2820814677afd5b280b49909f-827090963.us-east-1.elb.amazonaws.com True 2m49s 108 | ``` 109 | 110 | * It works: 111 | ``` 112 | GATEWAY=$(kubectl get gateway my-gateway -o jsonpath='{.status.addresses[0].value}') 113 | curl --fail -s http://"$GATEWAY"/details/1 | jq 114 | { 115 | "id": 1, 116 | "author": "William Shakespeare", 117 | "year": 1595, 118 | "type": "paperback", 119 | "pages": 200, 120 | "publisher": "PublisherA", 121 | "language": "English", 122 | "ISBN-10": "1234567890", 123 | "ISBN-13": "123-1234567890" 124 | } 125 | ``` 126 | 127 | If you see on AWS Console or in aws cli: 128 | ``` 129 | aws elbv2 describe-load-balancers 130 | { 131 | "LoadBalancers": [] 132 | } 133 | aws elb describe-load-balancers | jq .LoadBalancerDescriptions[].DNSName 134 | "abe9a5b2820814677afd5b280b49909f-827090963.us-east-1.elb.amazonaws.com" 135 | ``` 136 | 137 | By default it creates a classic load balancer which is deprecated. How to create an alb or a nlb and its options? 138 | More information: https://github.com/cilium/cilium/issues/25357 139 | 140 | # Test 141 | 142 | > cilium connectivity test 143 | -------------------------------------------------------------------------------- /install-cilium-eks-overlay.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium on eks clusters with overlay network instead of aws eni network 4 | 5 | # Requirements 6 | 7 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 8 | * [kubectl](tools/kubectl.txt) 9 | * [cilium cli](tools/cilium-cli.txt) 10 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 11 | * [helm](tools/helm.txt) 12 | 13 | # Cluster installation 14 | 15 | exactly the same as install-cilium-eks.md 16 | 17 | # Cilium installation 18 | 19 | launch: 20 | 21 | ``` 22 | kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 23 | helm repo add cilium https://helm.cilium.io/ 24 | helm repo update 25 | helm install cilium cilium/cilium --version 1.13.4 \ 26 | --namespace kube-system \ 27 | --set egressMasqueradeInterfaces=eth0 28 | ``` 29 | 30 | ``` 31 | kubectl get node 32 | NAME STATUS ROLES AGE VERSION 33 | ip-192-168-11-230.ec2.internal Ready 8m41s v1.27.1-eks-2f008fe 34 | ip-192-168-59-196.ec2.internal Ready 8m24s v1.27.1-eks-2f008fe 35 | ``` 36 | 37 | On ec2 instance, clean this iptables rules: 38 | 39 | ``` 40 | iptables -t nat -F AWS-SNAT-CHAIN-0 41 | iptables -t nat -F AWS-SNAT-CHAIN-1 42 | iptables -t nat -F AWS-CONNMARK-CHAIN-0 43 | iptables -t nat -F AWS-CONNMARK-CHAIN-1 44 | ``` 45 | 46 | # Test 47 | 48 | ## Long 49 | cilium connectivity test 50 | 51 | ## Short 52 | kubectl create ns cilium-test 53 | kubectl apply -n cilium-test -f https://raw.githubusercontent.com/cilium/cilium/v1.13/examples/kubernetes/connectivity-check/connectivity-check.yaml 54 | 55 | ``` 56 | kubectl get pod -A -o wide 57 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 58 | cilium-test echo-a-6575c98b7d-th9ql 1/1 Running 0 32s 10.0.0.10 ip-192-168-9-0.ec2.internal 59 | cilium-test echo-b-54b86d8976-vxsvl 1/1 Running 0 31s 10.0.1.169 ip-192-168-55-48.ec2.internal 60 | cilium-test echo-b-host-54d5cc5fcd-qjlbs 1/1 Running 0 31s 192.168.55.48 ip-192-168-55-48.ec2.internal 61 | cilium-test host-to-b-multi-node-clusterip-846b574bbc-qj7zw 1/1 Running 0 29s 192.168.9.0 ip-192-168-9-0.ec2.internal 62 | cilium-test host-to-b-multi-node-headless-5b4bf5459f-8lkzq 1/1 Running 0 29s 192.168.9.0 ip-192-168-9-0.ec2.internal 63 | cilium-test pod-to-a-6578dd7fbf-8qbjd 1/1 Running 0 31s 10.0.0.66 ip-192-168-9-0.ec2.internal 64 | cilium-test pod-to-a-allowed-cnp-57fd79848c-9v9lf 1/1 Running 0 30s 10.0.1.72 ip-192-168-55-48.ec2.internal 65 | cilium-test pod-to-a-denied-cnp-d984d7757-2p6mk 1/1 Running 0 31s 10.0.0.90 ip-192-168-9-0.ec2.internal 66 | cilium-test pod-to-b-intra-node-nodeport-6654886dc9-g97j8 1/1 Running 0 29s 10.0.1.189 ip-192-168-55-48.ec2.internal 67 | cilium-test pod-to-b-multi-node-clusterip-54847b87b9-6k22l 1/1 Running 0 30s 10.0.0.214 ip-192-168-9-0.ec2.internal 68 | cilium-test pod-to-b-multi-node-headless-64b4d78855-bnqlz 1/1 Running 0 30s 10.0.0.193 ip-192-168-9-0.ec2.internal 69 | cilium-test pod-to-b-multi-node-nodeport-64757f6d5f-8jj57 1/1 Running 0 29s 10.0.0.49 ip-192-168-9-0.ec2.internal 70 | cilium-test pod-to-external-1111-76c448d975-k545p 1/1 Running 0 31s 10.0.1.56 ip-192-168-55-48.ec2.internal 71 | cilium-test pod-to-external-fqdn-allow-google-cnp-56c545c6b9-lzhrv 1/1 Running 0 30s 10.0.0.246 ip-192-168-9-0.ec2.internal 72 | kube-system cilium-bg2rv 1/1 Running 0 14m 192.168.55.48 ip-192-168-55-48.ec2.internal 73 | kube-system cilium-operator-85c44f5b6b-nn6qb 1/1 Running 0 14m 192.168.9.0 ip-192-168-9-0.ec2.internal 74 | kube-system cilium-operator-85c44f5b6b-zvf2q 1/1 Running 0 14m 192.168.55.48 ip-192-168-55-48.ec2.internal 75 | kube-system cilium-pp4ml 1/1 Running 0 14m 192.168.9.0 ip-192-168-9-0.ec2.internal 76 | kube-system coredns-79df7fff65-5bmml 1/1 Running 0 45m 10.0.1.99 ip-192-168-55-48.ec2.internal 77 | kube-system coredns-79df7fff65-j4mbh 1/1 Running 0 45m 10.0.0.180 ip-192-168-9-0.ec2.internal 78 | kube-system kube-proxy-cqwqs 1/1 Running 0 16m 192.168.55.48 ip-192-168-55-48.ec2.internal 79 | kube-system kube-proxy-pxgrr 1/1 Running 0 16m 192.168.9.0 ip-192-168-9-0.ec2.internal 80 | ``` 81 | 82 | As you see pod (not created on daemonset) has range IPs is 10.0.0.0/16 and is different from range ips of vpc (192.168.0.0/16). 83 | -------------------------------------------------------------------------------- /install-cilium-eks-ipv6.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium on eks clusters using IPv6 instead of IPv4 4 | * Doesn't support eni (https://github.com/cilium/cilium/issues/18405) 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | 13 | # Cluster installation 14 | 15 | ``` 16 | export AWS_DEFAULT_REGION=ch-ange-1 17 | export AWS_ACCESS_KEY_ID="CHANGEME" 18 | export AWS_SECRET_ACCESS_KEY="CHANGEME" 19 | ``` 20 | 21 | > source ./files/env 22 | 23 | ```yaml: 24 | apiVersion: eksctl.io/v1alpha5 25 | kind: ClusterConfig 26 | 27 | metadata: 28 | name: ipv6-cilium 29 | region: us-east-1 30 | version: "1.27" 31 | 32 | availabilityZones: ['us-east-1a', 'us-east-1b'] 33 | 34 | kubernetesNetworkConfig: 35 | ipFamily: IPv6 36 | 37 | addons: 38 | - name: vpc-cni 39 | - name: coredns 40 | - name: kube-proxy 41 | 42 | iam: 43 | withOIDC: true 44 | 45 | managedNodeGroups: 46 | - name: ng-1 47 | instanceType: t3.medium 48 | # taint nodes so that application pods are 49 | # not scheduled/executed until Cilium is deployed. 50 | # Alternatively, see the note above regarding taint effects. 51 | taints: 52 | - key: "node.cilium.io/agent-not-ready" 53 | value: "true" 54 | effect: "NoExecute" 55 | ``` 56 | 57 | > eksctl create cluster -f ./files/eks-cilium-ipv6.yaml 58 | 59 | 60 | # Cilium installation 61 | 62 | > kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 63 | helm repo add cilium https://helm.cilium.io/ 64 | helm repo update 65 | helm install cilium cilium/cilium --version 1.13.3 \ 66 | --namespace kube-system \ 67 | --set ipv6.enabled=true \ 68 | --set egressMasqueradeInterfaces=eth0 69 | 70 | 71 | Restart pods : 72 | 73 | > kubectl get pods --all-namespaces -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,HOSTNETWORK:.spec.hostNetwork --no-headers=true | grep '' | awk '{print "-n "$1" "$2}' | xargs -L 1 kubectl delete pod 74 | 75 | # Test 76 | 77 | ``` 78 | cilium status 79 | /¯¯\ 80 | /¯¯\__/¯¯\ Cilium: OK 81 | \__/¯¯\__/ Operator: OK 82 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 83 | \__/¯¯\__/ Hubble Relay: disabled 84 | \__/ ClusterMesh: disabled 85 | 86 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 87 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 88 | Containers: cilium Running: 2 89 | cilium-operator Running: 2 90 | Cluster Pods: 2/2 managed by Cilium 91 | Image versions cilium-operator quay.io/cilium/operator-generic:v1.13.3@sha256:fa7003cbfdf8358cb71786afebc711b26e5e44a2ed99bd4944930bba915b8910: 2 92 | cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 93 | ``` 94 | 95 | > cilium connectivity test 96 | 97 | ``` 98 | kubectl get svc -A 99 | NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 100 | cilium-test echo-other-node NodePort fd36:eaee:439::cd0c 8080:30166/TCP 52m 101 | cilium-test echo-same-node NodePort fd36:eaee:439::9de 8080:30530/TCP 52m 102 | default kubernetes ClusterIP fd36:eaee:439::1 443/TCP 65m 103 | kube-system hubble-peer ClusterIP fd36:eaee:439::2113 443/TCP 53m 104 | kube-system kube-dns ClusterIP fd36:eaee:0439::a 53/UDP,53/TCP 65m 105 | ``` 106 | 107 | ``` 108 | kubectl get pod -o wide -A 109 | NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 110 | cilium-test client-6965d549d5-9pg8l 1/1 Running 0 53m fd00::84 ip-192-168-23-163.ec2.internal 111 | cilium-test client2-76f4d7c5bc-vm2v8 1/1 Running 0 53m fd00::81 ip-192-168-23-163.ec2.internal 112 | cilium-test echo-external-node-545d98c9b4-566xg 0/1 Pending 0 53m 113 | cilium-test echo-other-node-545c9b778b-4bzdw 2/2 Running 0 53m fd00::18c ip-192-168-61-94.ec2.internal 114 | cilium-test echo-same-node-965bbc7d4-qn868 2/2 Running 0 53m fd00::9f ip-192-168-23-163.ec2.internal 115 | cilium-test host-netns-mdvln 1/1 Running 0 53m 2600:1f10:400b:9c01:b557:7ec4:235c:5e59 ip-192-168-61-94.ec2.internal 116 | cilium-test host-netns-n2km9 1/1 Running 0 53m 2600:1f10:400b:9c00:7373:c69e:2aad:da3d ip-192-168-23-163.ec2.internal 117 | default iptables-test 1/1 Running 0 39m 2600:1f10:400b:9c01:b557:7ec4:235c:5e59 ip-192-168-61-94.ec2.internal 118 | default nginx 1/1 Running 0 51m fd00::137 ip-192-168-61-94.ec2.internal 119 | kube-system cilium-jgg2q 1/1 Running 0 54m 2600:1f10:400b:9c00:7373:c69e:2aad:da3d ip-192-168-23-163.ec2.internal 120 | kube-system cilium-nrtgj 1/1 Running 0 54m 2600:1f10:400b:9c01:b557:7ec4:235c:5e59 ip-192-168-61-94.ec2.internal 121 | kube-system cilium-operator-85c44f5b6b-t9zwd 1/1 Running 0 54m 2600:1f10:400b:9c01:b557:7ec4:235c:5e59 ip-192-168-61-94.ec2.internal 122 | kube-system cilium-operator-85c44f5b6b-zq7h6 1/1 Running 0 54m 2600:1f10:400b:9c00:7373:c69e:2aad:da3d ip-192-168-23-163.ec2.internal 123 | kube-system coredns-79df7fff65-pbn4h 1/1 Running 0 54m fd00::110 ip-192-168-61-94.ec2.internal 124 | kube-system coredns-79df7fff65-xxrwg 1/1 Running 0 54m fd00::54 ip-192-168-23-163.ec2.internal 125 | kube-system kube-proxy-rhhlq 1/1 Running 0 57m 2600:1f10:400b:9c00:7373:c69e:2aad:da3d ip-192-168-23-163.ec2.internal 126 | kube-system kube-proxy-w2qln 1/1 Running 0 57m 2600:1f10:400b:9c01:b557:7ec4:235c:5e59 ip-192-168-61-94.ec2.internal 127 | ``` 128 | 129 | ``` 130 | kubectl get node -o wide 131 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 132 | ip-192-168-23-163.ec2.internal Ready 58m v1.27.1-eks-2f008fe 2600:1f10:400b:9c00:7373:c69e:2aad:da3d Amazon Linux 2 5.10.179-168.710.amzn2.x86_64 containerd://1.6.19 133 | ip-192-168-61-94.ec2.internal Ready 58m v1.27.1-eks-2f008fe 2600:1f10:400b:9c01:b557:7ec4:235c:5e59 Amazon Linux 2 5.10.179-168.710.amzn2.x86_64 containerd://1.6.19 134 | ``` 135 | -------------------------------------------------------------------------------- /install-cilium-eks-helm.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium using helm on eks cluster. 4 | * eni mode 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | * [helm](tools/helm.txt) 13 | 14 | # Cluster installation 15 | 16 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md#cluster-installation) 17 | 18 | # Cilium installation 19 | 20 | > kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 21 | ``` 22 | helm repo add cilium https://helm.cilium.io/ 23 | helm repo update 24 | helm install cilium cilium/cilium --version 1.13.4 \ 25 | --namespace kube-system \ 26 | --set eni.enabled=true \ 27 | --set ipam.mode=eni \ 28 | --set egressMasqueradeInterfaces=eth0 \ 29 | --set tunnel=disabled 30 | ``` 31 | 32 | > cilium status --wait 33 | 34 | ``` 35 | /¯¯\ 36 | /¯¯\__/¯¯\ Cilium: OK 37 | \__/¯¯\__/ Operator: OK 38 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 39 | \__/¯¯\__/ Hubble Relay: disabled 40 | \__/ ClusterMesh: disabled 41 | 42 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 43 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 44 | Containers: cilium Running: 2 45 | cilium-operator Running: 2 46 | Cluster Pods: 2/2 managed by Cilium 47 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 48 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 49 | ``` 50 | 51 | ``` 52 | kubectl exec -n kube-system ds/cilium -- cilium status --verbose 53 | Defaulted container "cilium-agent" out of: cilium-agent, config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init) 54 | KVStore: Ok Disabled 55 | Kubernetes: Ok 1.27+ (v1.27.3-eks-a5565ad) [linux/amd64] 56 | Kubernetes APIs: ["cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "core/v1::Namespace", "core/v1::Node", "core/v1::Pods", "core/v1::Service", "discovery/v1::EndpointSlice", "networking.k8s.io/v1::NetworkPolicy"] 57 | KubeProxyReplacement: Disabled 58 | Host firewall: Disabled 59 | CNI Chaining: none 60 | CNI Config file: CNI configuration file management disabled 61 | Cilium: Ok 1.13.4 (v1.13.4-4061cdfc) 62 | NodeMonitor: Listening for events on 2 CPUs with 64x4096 of shared memory 63 | Cilium health daemon: Ok 64 | IPAM: IPv4: 4/12 allocated, 65 | Allocated addresses: 66 | 192.168.18.93 (kube-system/coredns-79df7fff65-md6hc) 67 | 192.168.28.58 (router) 68 | 192.168.30.127 (kube-system/coredns-79df7fff65-2x4sj) 69 | 192.168.9.25 (health) 70 | IPv6 BIG TCP: Disabled 71 | BandwidthManager: Disabled 72 | Host Routing: Legacy 73 | Masquerading: IPTables [IPv4: Enabled, IPv6: Disabled] 74 | Clock Source for BPF: jiffies [250 Hz] 75 | Controller Status: 26/26 healthy 76 | Name Last success Last error Count Message 77 | cilium-health-ep 1m3s ago never 0 no error 78 | dns-garbage-collector-job 12s ago never 0 no error 79 | endpoint-200-regeneration-recovery never never 0 no error 80 | endpoint-2986-regeneration-recovery never never 0 no error 81 | endpoint-34-regeneration-recovery never never 0 no error 82 | endpoint-3867-regeneration-recovery never never 0 no error 83 | endpoint-gc 3m12s ago never 0 no error 84 | ipcache-inject-labels 3m1s ago 3m5s ago 0 no error 85 | k8s-heartbeat 12s ago never 0 no error 86 | link-cache 19s ago never 0 no error 87 | metricsmap-bpf-prom-sync 7s ago never 0 no error 88 | resolve-identity-200 3m4s ago never 0 no error 89 | resolve-identity-2986 3m3s ago never 0 no error 90 | resolve-identity-34 3m2s ago never 0 no error 91 | resolve-identity-3867 3m2s ago never 0 no error 92 | sync-endpoints-and-host-ips 4s ago never 0 no error 93 | sync-lb-maps-with-k8s-services 3m4s ago never 0 no error 94 | sync-policymap-200 1m0s ago never 0 no error 95 | sync-policymap-2986 1m0s ago never 0 no error 96 | sync-policymap-34 1m0s ago never 0 no error 97 | sync-policymap-3867 1m0s ago never 0 no error 98 | sync-to-k8s-ciliumendpoint (200) 4s ago never 0 no error 99 | sync-to-k8s-ciliumendpoint (2986) 13s ago never 0 no error 100 | sync-to-k8s-ciliumendpoint (34) 12s ago never 0 no error 101 | sync-to-k8s-ciliumendpoint (3867) 12s ago never 0 no error 102 | template-dir-watcher never never 0 no error 103 | Proxy Status: OK, ip 192.168.28.58, 0 redirects active on ports 10000-20000 104 | Global Identity Range: min 256, max 65535 105 | Hubble: Ok Current/Max Flows: 607/4095 (14.82%), Flows/s: 3.05 Metrics: Disabled 106 | KubeProxyReplacement Details: 107 | Status: Disabled 108 | Socket LB: Disabled 109 | Socket LB Tracing: Disabled 110 | Socket LB Coverage: Full 111 | Session Affinity: Disabled 112 | Graceful Termination: Enabled 113 | NAT46/64 Support: Disabled 114 | Services: 115 | - ClusterIP: Enabled 116 | - NodePort: Disabled 117 | - LoadBalancer: Disabled 118 | - externalIPs: Disabled 119 | - HostPort: Disabled 120 | BPF Maps: dynamic sizing: on (ratio: 0.002500) 121 | Name Size 122 | Non-TCP connection tracking 65536 123 | TCP connection tracking 131072 124 | Endpoint policy 65535 125 | Events 2 126 | IP cache 512000 127 | IP masquerading agent 16384 128 | IPv4 fragmentation 8192 129 | IPv4 service 65536 130 | IPv6 service 65536 131 | IPv4 service backend 65536 132 | IPv6 service backend 65536 133 | IPv4 service reverse NAT 65536 134 | IPv6 service reverse NAT 65536 135 | Metrics 1024 136 | NAT 131072 137 | Neighbor table 131072 138 | Global policy 16384 139 | Per endpoint policy 65536 140 | Session affinity 65536 141 | Signal 2 142 | Sockmap 65535 143 | Sock reverse NAT 65536 144 | Tunnel 65536 145 | Encryption: Disabled 146 | Cluster health: 2/2 reachable (2023-07-03T11:01:07Z) 147 | Name IP Node Endpoints 148 | ip-192-168-18-119.ec2.internal (localhost) 192.168.18.119 reachable reachable 149 | ip-192-168-43-133.ec2.internal 192.168.43.133 reachable reachable 150 | ``` 151 | 152 | # Test 153 | 154 | > cilium connectivity test 155 | -------------------------------------------------------------------------------- /install-cilium-eks-clustermesh.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium on 2 eks clusters and communicate with clustermesh 4 | * Terraform deployment 5 | * ipam mode: eni 6 | 7 | # Requirements 8 | 9 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 10 | * [kubectl](tools/kubectl.txt) 11 | * [cilium cli](tools/cilium-cli.txt) 12 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 13 | * [terraform](tools/terraform.txt) 14 | 15 | # Cluster installation 1 16 | 17 | ``` 18 | mkdir cluster1 19 | cd cluster1 20 | git clone https://github.com/littlejo/terraform-eks-cilium.git 21 | cd terraform-eks-cilium 22 | terraform init 23 | terraform apply -var-file=clustermesh-1.tfvars 24 | ... 25 | Plan: 49 to add, 0 to change, 0 to destroy. 26 | 27 | Changes to Outputs: 28 | + update_kubeconfig = "aws eks update-kubeconfig --name cluster-mesh-cilium-1 --kubeconfig ~/.kube/config" 29 | 30 | ``` 31 | 32 | ``` 33 | aws eks update-kubeconfig --name cluster-mesh-cilium-1 --kubeconfig ~/.kube/config 34 | CONTEXT1=arn:aws:eks:us-east-1:xxxxxxxxxx:cluster/cluster-mesh-cilium-1 35 | ``` 36 | 37 | # Cluster installation 2 38 | 39 | ``` 40 | mkdir cluster2 41 | cd cluster2 42 | git clone https://github.com/littlejo/terraform-eks-cilium.git 43 | cd terraform-eks-cilium 44 | terraform init 45 | terraform apply -var-file=clustermesh-2.tfvars 46 | Plan: 49 to add, 0 to change, 0 to destroy. 47 | 48 | Changes to Outputs: 49 | + update_kubeconfig = "aws eks update-kubeconfig --name cluster-mesh-cilium-2 --kubeconfig ~/.kube/config" 50 | ``` 51 | 52 | ``` 53 | aws eks update-kubeconfig --name cluster-mesh-cilium-2 --kubeconfig ~/.kube/config 54 | CONTEXT2=arn:aws:eks:us-east-1:xxxxxxxxxxxx:cluster/cluster-mesh-cilium-2 55 | ``` 56 | 57 | # VPC peering 58 | 59 | You need create a vpc peering to communicate between LoadBalancer cluster mesh. 60 | 61 | ``` 62 | git clone https://github.com/littlejo/terraform-vpc-peering-example 63 | cd terraform-vpc-peering-example 64 | terraform init 65 | terraform apply 66 | ``` 67 | 68 | # Cilium installation on cluster 1 69 | 70 | ## Patch 71 | 72 | ``` 73 | kubectl --context $CONTEXT1 -n kube-system delete ds kube-proxy 74 | kubectl --context $CONTEXT1 -n kube-system delete cm kube-proxy 75 | kubectl --context $CONTEXT1 -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 76 | ``` 77 | 78 | ## Find eks endpoint and install 79 | 80 | ``` 81 | API_SERVER_IP=$(aws eks describe-cluster --name cluster-mesh-cilium-1 | jq -r .cluster.endpoint | awk -F/ '{print $3}') 82 | API_SERVER_PORT=443 83 | helm repo add cilium https://helm.cilium.io/ 84 | helm repo update 85 | helm install cilium cilium/cilium --kube-context $CONTEXT1 \ 86 | --version 1.13.4 \ 87 | --namespace kube-system \ 88 | --set eni.enabled=true \ 89 | --set ipam.mode=eni \ 90 | --set egressMasqueradeInterfaces=eth0 \ 91 | --set kubeProxyReplacement=strict \ 92 | --set tunnel=disabled \ 93 | --set k8sServiceHost=${API_SERVER_IP} \ 94 | --set k8sServicePort=${API_SERVER_PORT} \ 95 | --set cluster.name=cluster-mesh-cilium-1 \ 96 | --set cluster.id=1 \ 97 | --set encryption.enabled=true \ 98 | --set encryption.type=wireguard \ 99 | --set l7Proxy=false 100 | ``` 101 | 102 | > cilium status --context $CONTEXT1 --wait 103 | 104 | ``` 105 | /¯¯\ 106 | /¯¯\__/¯¯\ Cilium: OK 107 | \__/¯¯\__/ Operator: OK 108 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 109 | \__/¯¯\__/ Hubble Relay: disabled 110 | \__/ ClusterMesh: disabled 111 | 112 | Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1 113 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 114 | Containers: cilium Running: 2 115 | cilium-operator Running: 1 116 | Cluster Pods: 2/2 managed by Cilium 117 | Image versions cilium quay.io/cilium/cilium:v1.13.3@sha256:77176464a1e11ea7e89e984ac7db365e7af39851507e94f137dcf56c87746314: 2 118 | cilium-operator quay.io/cilium/operator-aws:v1.13.3@sha256:394c40d156235d3c2004f77bb73402457092351cc6debdbc5727ba36fbd863ae: 1 119 | ``` 120 | 121 | ``` 122 | kubectl --context $CONTEXT1 get secret -n kube-system cilium-ca -o yaml > cilium-ca.yaml 123 | kubectl --context $CONTEXT1 get secret -n kube-system hubble-ca-secret -o yaml > hubble-ca-secret.yaml 124 | ``` 125 | 126 | # Cilium installation on cluster 2 127 | 128 | ## Patch 129 | 130 | ``` 131 | kubectl --context $CONTEXT2 -n kube-system delete ds kube-proxy 132 | kubectl --context $CONTEXT2 -n kube-system delete cm kube-proxy 133 | kubectl --context $CONTEXT2 -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 134 | ``` 135 | 136 | ## Apply secrets from cluster 1 137 | 138 | ``` 139 | kubectl --context $CONTEXT2 apply -f cilium-ca.yaml 140 | kubectl --context $CONTEXT2 apply -f hubble-ca-secret.yaml 141 | ``` 142 | 143 | ## Find eks endpoint and install 144 | 145 | ``` 146 | API_SERVER_IP=$(aws eks describe-cluster --name cluster-mesh-cilium-2 | jq -r .cluster.endpoint | awk -F/ '{print $3}') 147 | API_SERVER_PORT=443 148 | helm repo add cilium https://helm.cilium.io/ 149 | helm repo update 150 | helm install cilium cilium/cilium --kube-context $CONTEXT2 \ 151 | --version 1.13.4 \ 152 | --namespace kube-system \ 153 | --set eni.enabled=true \ 154 | --set ipam.mode=eni \ 155 | --set egressMasqueradeInterfaces=eth0 \ 156 | --set kubeProxyReplacement=strict \ 157 | --set tunnel=disabled \ 158 | --set k8sServiceHost=${API_SERVER_IP} \ 159 | --set k8sServicePort=${API_SERVER_PORT} \ 160 | --set cluster.name=cluster-mesh-cilium-2 \ 161 | --set cluster.id=2 \ 162 | --set encryption.enabled=true \ 163 | --set encryption.type=wireguard \ 164 | --set l7Proxy=false 165 | ``` 166 | 167 | > cilium status --context $CONTEXT2 --wait 168 | 169 | 170 | ``` 171 | /¯¯\ 172 | /¯¯\__/¯¯\ Cilium: OK 173 | \__/¯¯\__/ Operator: OK 174 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 175 | \__/¯¯\__/ Hubble Relay: disabled 176 | \__/ ClusterMesh: disabled 177 | 178 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 179 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 180 | Containers: cilium Running: 2 181 | cilium-operator Running: 2 182 | Cluster Pods: 2/2 managed by Cilium 183 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 184 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 185 | ``` 186 | 187 | # clustermesh enable on cluster 1 188 | 189 | ``` 190 | cilium clustermesh enable --context $CONTEXT1 191 | ``` 192 | 193 | ``` 194 | cilium status --context $CONTEXT1 195 | /¯¯\ 196 | /¯¯\__/¯¯\ Cilium: OK 197 | \__/¯¯\__/ Operator: OK 198 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 199 | \__/¯¯\__/ Hubble Relay: disabled 200 | \__/ ClusterMesh: OK 201 | 202 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 203 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 204 | Deployment clustermesh-apiserver Desired: 1, Ready: 1/1, Available: 1/1 205 | Containers: cilium-operator Running: 2 206 | clustermesh-apiserver Running: 1 207 | cilium Running: 2 208 | Cluster Pods: 6/6 managed by Cilium 209 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 210 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 211 | clustermesh-apiserver quay.io/coreos/etcd:v3.5.4: 1 212 | clustermesh-apiserver quay.io/cilium/clustermesh-apiserver:v1.13.4: 1 213 | 214 | cilium clustermesh status --context $CONTEXT1 215 | Hostname based ingress detected, trying to resolve it 216 | Hostname resolved, using the found ip(s) 217 | ✅ Cluster access information is available: 218 | - 10.1.11.252:2379 219 | - 10.1.224.194:2379 220 | ✅ Service "clustermesh-apiserver" of type "LoadBalancer" found 221 | 🔌 Cluster Connections: 222 | 🔀 Global services: [ min:0 / avg:0.0 / max:0 ] 223 | ``` 224 | 225 | # clustermesh enable on cluster 2 226 | 227 | ``` 228 | cilium clustermesh enable --context $CONTEXT2 229 | ``` 230 | 231 | ``` 232 | cilium status --context $CONTEXT2 233 | /¯¯\ 234 | /¯¯\__/¯¯\ Cilium: OK 235 | \__/¯¯\__/ Operator: OK 236 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 237 | \__/¯¯\__/ Hubble Relay: disabled 238 | \__/ ClusterMesh: OK 239 | 240 | Deployment clustermesh-apiserver Desired: 1, Ready: 1/1, Available: 1/1 241 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 242 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 243 | Containers: cilium Running: 2 244 | cilium-operator Running: 2 245 | clustermesh-apiserver Running: 1 246 | Cluster Pods: 4/4 managed by Cilium 247 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 248 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 249 | clustermesh-apiserver quay.io/coreos/etcd:v3.5.4: 1 250 | clustermesh-apiserver quay.io/cilium/clustermesh-apiserver:v1.13.4: 1 251 | cilium clustermesh status --context $CONTEXT2 252 | Hostname based ingress detected, trying to resolve it 253 | Hostname resolved, using the found ip(s) 254 | ✅ Cluster access information is available: 255 | - 10.2.241.85:2379 256 | - 10.2.183.144:2379 257 | ✅ Service "clustermesh-apiserver" of type "LoadBalancer" found 258 | 🔌 Cluster Connections: 259 | 🔀 Global services: [ min:0 / avg:0.0 / max:0 ] 260 | ``` 261 | 262 | # Connect Clusters 263 | 264 | > cilium clustermesh connect --context $CONTEXT1 --destination-context $CONTEXT2 265 | ``` 266 | ✨ Extracting access information of cluster cluster-mesh-cilium-2... 267 | 🔑 Extracting secrets from cluster cluster-mesh-cilium-2... 268 | ⚠️ Service type NodePort detected! Service may fail when nodes are removed from the cluster! 269 | ℹ️ Found ClusterMesh service IPs: [10.2.105.236] 270 | ✨ Extracting access information of cluster cluster-mesh-cilium-1... 271 | 🔑 Extracting secrets from cluster cluster-mesh-cilium-1... 272 | ⚠️ Service type NodePort detected! Service may fail when nodes are removed from the cluster! 273 | ℹ️ Found ClusterMesh service IPs: [10.1.121.93] 274 | ✨ Connecting cluster arn:aws:eks:us-east-1:621304841877:cluster/cluster-mesh-cilium-1 -> arn:aws:eks:us-east-1:621304841877:cluster/cluster-mesh-cilium-2... 275 | 🔑 Secret cilium-clustermesh does not exist yet, creating it... 276 | 🔑 Patching existing secret cilium-clustermesh... 277 | ✨ Patching DaemonSet with IP aliases cilium-clustermesh... 278 | ✨ Connecting cluster arn:aws:eks:us-east-1:621304841877:cluster/cluster-mesh-cilium-2 -> arn:aws:eks:us-east-1:621304841877:cluster/cluster-mesh-cilium-1... 279 | 🔑 Secret cilium-clustermesh does not exist yet, creating it... 280 | 🔑 Patching existing secret cilium-clustermesh... 281 | ✨ Patching DaemonSet with IP aliases cilium-clustermesh... 282 | ✅ Connected cluster arn:aws:eks:us-east-1:621304841877:cluster/cluster-mesh-cilium-1 and arn:aws:eks:us-east-1:621304841877:cluster/cluster-mesh-cilium-2! 283 | ``` 284 | 285 | # Check 286 | 287 | ``` 288 | cilium clustermesh status --context $CONTEXT1 289 | Hostname based ingress detected, trying to resolve it 290 | Hostname resolved, using the found ip(s) 291 | ✅ Cluster access information is available: 292 | - 10.1.11.252:2379 293 | - 10.1.224.194:2379 294 | ✅ Service "clustermesh-apiserver" of type "LoadBalancer" found 295 | ✅ All 2 nodes are connected to all clusters [min:1 / avg:1.0 / max:1] 296 | 🔌 Cluster Connections: 297 | - cluster-mesh-cilium-2: 2/2 configured, 2/2 connected 298 | 🔀 Global services: [ min:4 / avg:4.0 / max:4 ] 299 | 300 | cilium clustermesh status --context $CONTEXT2 301 | Hostname based ingress detected, trying to resolve it 302 | Hostname resolved, using the found ip(s) 303 | ✅ Cluster access information is available: 304 | - 10.2.183.144:2379 305 | - 10.2.241.85:2379 306 | ✅ Service "clustermesh-apiserver" of type "LoadBalancer" found 307 | ✅ All 2 nodes are connected to all clusters [min:1 / avg:1.0 / max:1] 308 | 🔌 Cluster Connections: 309 | - cluster-mesh-cilium-1: 2/2 configured, 2/2 connected 310 | 🔀 Global services: [ min:4 / avg:4.0 / max:4 ] 311 | ``` 312 | 313 | # Automatised Test 314 | 315 | > cilium connectivity test --context $CONTEXT1 --multi-cluster $CONTEXT2 316 | 317 | # Manual Test 318 | 319 | ``` 320 | kubectl apply --context $CONTEXT1 -f https://raw.githubusercontent.com/cilium/cilium/1.13.1/examples/kubernetes/clustermesh/global-service-example/cluster1.yaml 321 | kubectl apply --context $CONTEXT2 -f https://raw.githubusercontent.com/cilium/cilium/1.13.1/examples/kubernetes/clustermesh/global-service-example/cluster2.yaml 322 | 323 | kubectl get service/rebel-base --context $CONTEXT2 -o json | jq .metadata.annotations 324 | { 325 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{\"service.cilium.io/global\":\"true\"},\"name\":\"rebel-base\",\"namespace\":\"default\"},\"spec\":{\"ports\":[{\"port\":80}],\"selector\":{\"name\":\"rebel-base\"},\"type\":\"ClusterIP\"}}\n", 326 | "service.cilium.io/global": "true" 327 | } 328 | ``` 329 | 330 | ``` 331 | for i in $(seq 1 10) 332 | do 333 | kubectl --context $CONTEXT1 exec -ti deployment/x-wing -- curl rebel-base 334 | done 335 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 336 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 337 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 338 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 339 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 340 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 341 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 342 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 343 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 344 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 345 | ``` 346 | 347 | ``` 348 | for i in $(seq 1 10) 349 | do 350 | kubectl --context $CONTEXT2 exec -ti deployment/x-wing -- curl rebel-base 351 | done 352 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 353 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 354 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 355 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 356 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 357 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 358 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 359 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 360 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 361 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 362 | ``` 363 | 364 | * set affinity to local: 365 | > kubectl --context=$CONTEXT1 annotate service rebel-base service.cilium.io/affinity=local --overwrite 366 | 367 | ``` 368 | for i in $(seq 1 10) 369 | do 370 | kubectl --context $CONTEXT1 exec -ti deployment/x-wing -- curl rebel-base 371 | done 372 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 373 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 374 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 375 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 376 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 377 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 378 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 379 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 380 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 381 | {"Galaxy": "Alderaan", "Cluster": "Cluster-1"} 382 | ``` 383 | 384 | > kubectl --context $CONTEXT1 scale --replicas=0 deploy/rebel-base 385 | 386 | ``` 387 | for i in $(seq 1 10) 388 | do 389 | kubectl --context $CONTEXT1 exec -ti deployment/x-wing -- curl rebel-base 390 | done 391 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 392 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 393 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 394 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 395 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 396 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 397 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 398 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 399 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 400 | {"Galaxy": "Alderaan", "Cluster": "Cluster-2"} 401 | ``` 402 | 403 | > kubectl --context $CONTEXT1 scale --replicas=2 deploy/rebel-base 404 | -------------------------------------------------------------------------------- /install-cilium-eks-kube-proxy-free-ebp-hostrouting.md: -------------------------------------------------------------------------------- 1 | # Use case 2 | 3 | * Install cilium on eks clusters with kube-proxy free 4 | * ipam mode: eni 5 | 6 | # Requirements 7 | 8 | * [eksctl (tested version: 0.143.0)](tools/eksctl.txt) 9 | * [kubectl](tools/kubectl.txt) 10 | * [cilium cli](tools/cilium-cli.txt) 11 | * [aws-iam-authenticator](tools/aws-iam-authenticator.txt) 12 | 13 | # Cluster installation 14 | 15 | exactly the same as [install-cilium-eks.md](install-cilium-eks.md#cluster-installation) 16 | 17 | * i added to sg of nodegroup ec2 all traffic access rule from 0.0.0.0/0 18 | 19 | # Cilium installation 20 | 21 | ## Patch 22 | 23 | ``` 24 | kubectl -n kube-system delete ds kube-proxy 25 | kubectl -n kube-system delete cm kube-proxy 26 | kubectl -n kube-system patch daemonset aws-node --type='strategic' -p='{"spec":{"template":{"spec":{"nodeSelector":{"io.cilium/aws-node-enabled":"true"}}}}}' 27 | ``` 28 | 29 | ## Find eks endpoint and install 30 | 31 | ``` 32 | aws eks describe-cluster --name basic-cilium | jq -r .cluster.endpoint 33 | https://92E99371B87ECA152191821C3596B241.gr7.us-east-1.eks.amazonaws.com 34 | API_SERVER_IP=92E99371B87ECA152191821C3596B241.gr7.us-east-1.eks.amazonaws.com 35 | API_SERVER_PORT=443 36 | 37 | helm repo add cilium https://helm.cilium.io/ 38 | helm repo update 39 | helm install cilium cilium/cilium --version 1.13.4 \ 40 | --namespace kube-system \ 41 | --set eni.enabled=true \ 42 | --set ipam.mode=eni \ 43 | --set egressMasqueradeInterfaces=eth0 \ 44 | --set kubeProxyReplacement=strict \ 45 | --set tunnel=disabled \ 46 | --set hubble.enabled=true \ 47 | --set hubble.relay.enabled=true \ 48 | --set hubble.ui.enabled=true \ 49 | --set k8sServiceHost=${API_SERVER_IP} \ 50 | --set k8sServicePort=${API_SERVER_PORT} 51 | ``` 52 | 53 | > cilium status --wait 54 | 55 | ``` 56 | /¯¯\ 57 | /¯¯\__/¯¯\ Cilium: OK 58 | \__/¯¯\__/ Operator: OK 59 | /¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode) 60 | \__/¯¯\__/ Hubble Relay: OK 61 | \__/ ClusterMesh: disabled 62 | 63 | Deployment cilium-operator Desired: 2, Ready: 2/2, Available: 2/2 64 | Deployment hubble-ui Desired: 1, Ready: 1/1, Available: 1/1 65 | Deployment hubble-relay Desired: 1, Ready: 1/1, Available: 1/1 66 | DaemonSet cilium Desired: 2, Ready: 2/2, Available: 2/2 67 | Containers: hubble-ui Running: 1 68 | hubble-relay Running: 1 69 | cilium Running: 2 70 | cilium-operator Running: 2 71 | Cluster Pods: 4/4 managed by Cilium 72 | Image versions cilium quay.io/cilium/cilium:v1.13.4@sha256:bde8800d61aaad8b8451b10e247ac7bdeb7af187bb698f83d40ad75a38c1ee6b: 2 73 | cilium-operator quay.io/cilium/operator-aws:v1.13.4@sha256:c6bde19bbfe1483577f9ef375ff6de19402ac20277c451fe05729fcb9bc02a84: 2 74 | hubble-ui quay.io/cilium/hubble-ui:v0.11.0@sha256:bcb369c47cada2d4257d63d3749f7f87c91dde32e010b223597306de95d1ecc8: 1 75 | hubble-ui quay.io/cilium/hubble-ui-backend:v0.11.0@sha256:14c04d11f78da5c363f88592abae8d2ecee3cbe009f443ef11df6ac5f692d839: 1 76 | hubble-relay quay.io/cilium/hubble-relay:v1.13.4@sha256:bac057a5130cf75adf5bc363292b1f2642c0c460ac9ff018fcae3daf64873871: 1 77 | 78 | ``` 79 | 80 | ## Test 81 | 82 | 83 | > cilium hubble port-forward& 84 | 85 | ``` 86 | kubectl exec -it ds/cilium -n kube-system -c cilium-agent -- cilium status 87 | KVStore: Ok Disabled 88 | Kubernetes: Ok 1.27+ (v1.27.3-eks-a5565ad) [linux/amd64] 89 | Kubernetes APIs: ["cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "core/v1::Namespace", "core/v1::Node", "core/v1::Pods", "core/v1::Service", "discovery/v1::EndpointSlice", "networking.k8s.io/v1::NetworkPolicy"] 90 | KubeProxyReplacement: Strict [eth0 192.168.7.5 (Direct Routing)] 91 | Host firewall: Disabled 92 | CNI Chaining: none 93 | CNI Config file: CNI configuration file management disabled 94 | Cilium: Ok 1.13.4 (v1.13.4-4061cdfc) 95 | NodeMonitor: Listening for events on 2 CPUs with 64x4096 of shared memory 96 | Cilium health daemon: Ok 97 | IPAM: IPv4: 6/15 allocated, 98 | IPv6 BIG TCP: Disabled 99 | BandwidthManager: Disabled 100 | Host Routing: Legacy 101 | Masquerading: IPTables [IPv4: Enabled, IPv6: Disabled] 102 | Controller Status: 35/35 healthy 103 | Proxy Status: OK, ip 192.168.7.45, 0 redirects active on ports 10000-20000 104 | Global Identity Range: min 256, max 65535 105 | Hubble: Ok Current/Max Flows: 3058/4095 (74.68%), Flows/s: 10.57 Metrics: Disabled 106 | Encryption: Disabled 107 | Cluster health: 2/2 reachable (2023-07-22T04:58:04Z) 108 | ``` 109 | 110 | > cilium connectivity test 111 | 112 | ``` 113 | ✅ All 42 tests (304 actions) successful, 12 tests skipped, 0 scenarios skipped. 114 | ``` 115 | 116 | ## Workaround 117 | 118 | 119 | * I backup the configmap: 120 | 121 | ``` 122 | kubectl get cm -n kube-system cilium-config -o yaml > cilium-config-origin.yaml 123 | ``` 124 | 125 | I changed: 126 | ``` 127 | diff -u cilium-config-origin.yaml cilium-config.yaml 128 | --- cilium-config-origin.yaml 2023-07-22 05:22:08.303311094 +0000 129 | +++ cilium-config.yaml 2023-07-22 05:23:41.211665878 +0000 130 | @@ -1,5 +1,9 @@ 131 | apiVersion: v1 132 | data: 133 | + devices: "eth+" 134 | + enable-bpf-masquerade: "true" 135 | + ipv4-native-routing-cidr: "192.168.0.0/16" 136 | + enable-host-legacy-routing: "false" 137 | agent-not-ready-taint-key: node.cilium.io/agent-not-ready 138 | arping-refresh-period: 30s 139 | auto-create-cilium-node-resource: "true" 140 | @@ -21,12 +25,11 @@ 141 | disable-cnp-status-updates: "true" 142 | disable-endpoint-crd: "false" 143 | ec2-api-endpoint: "" 144 | - egress-masquerade-interfaces: eth0 145 | enable-auto-protect-node-port-range: "true" 146 | enable-bgp-control-plane: "false" 147 | enable-bpf-clock-probe: "true" 148 | enable-endpoint-health-checking: "true" 149 | - enable-endpoint-routes: "true" 150 | + enable-endpoint-routes: "false" 151 | enable-health-check-nodeport: "true" 152 | enable-health-checking: "true" 153 | enable-hubble: "true" 154 | ``` 155 | 156 | * I apply this the configmap: 157 | 158 | ``` 159 | kubectl delete -f cilium-config-origin.yaml 160 | kubectl apply -f cilium-config.yaml 161 | ``` 162 | 163 | ``` 164 | kubectl rollout restart daemonset -n kube-system cilium 165 | kubectl rollout restart deployment -n kube-system cilium-operator 166 | kubectl rollout restart deployment -n kube-system coredns 167 | kubectl rollout restart deployment -n kube-system hubble-relay 168 | kubectl delete namespace cilium-test 169 | ``` 170 | 171 | # Test 172 | 173 | ``` 174 | kubectl exec -it ds/cilium -n kube-system -c cilium-agent -- cilium status 175 | KVStore: Ok Disabled 176 | Kubernetes: Ok 1.27+ (v1.27.3-eks-a5565ad) [linux/amd64] 177 | Kubernetes APIs: ["cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "core/v1::Namespace", "core/v1::Node", "core/v1::Pods", "core/v1::Service", "discovery/v1::EndpointSlice", "networking.k8s.io/v1::NetworkPolicy"] 178 | KubeProxyReplacement: Strict [eth0 192.168.7.5 (Direct Routing), eth1 192.168.22.217, eth2 192.168.31.188] 179 | Host firewall: Disabled 180 | CNI Chaining: none 181 | CNI Config file: CNI configuration file management disabled 182 | Cilium: Ok 1.13.4 (v1.13.4-4061cdfc) 183 | NodeMonitor: Listening for events on 2 CPUs with 64x4096 of shared memory 184 | Cilium health daemon: Ok 185 | IPAM: IPv4: 4/15 allocated, 186 | IPv6 BIG TCP: Disabled 187 | BandwidthManager: Disabled 188 | Host Routing: BPF 189 | Masquerading: BPF [eth0, eth1, eth2] 192.168.0.0/16 [IPv4: Enabled, IPv6: Disabled] 190 | Controller Status: 29/29 healthy 191 | Proxy Status: OK, ip 192.168.7.45, 0 redirects active on ports 10000-20000 192 | Global Identity Range: min 256, max 65535 193 | Hubble: Ok Current/Max Flows: 2327/4095 (56.83%), Flows/s: 10.75 Metrics: Disabled 194 | Encryption: Disabled 195 | Cluster health: 2/2 reachable (2023-07-22T05:28:17Z) 196 | ``` 197 | 198 | > cilium connectivity test --sysdump-debug --test-namespace t1 199 | 200 | ``` 201 | 📋 Test Report 202 | ❌ 10/42 tests failed (58/304 actions), 12 tests skipped, 0 scenarios skipped: 203 | Test [no-policies]: 204 | ❌ no-policies/pod-to-host/ping-ipv4-3: t1/client-6965d549d5-94lpz (192.168.15.219) -> 54.81.30.178 (54.81.30.178:0) 205 | ❌ no-policies/pod-to-host/ping-ipv4-5: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> 54.81.30.178 (54.81.30.178:0) 206 | Test [allow-all-except-world]: 207 | ❌ allow-all-except-world/pod-to-service/curl-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> t1/echo-other-node (echo-other-node:8080) 208 | ❌ allow-all-except-world/pod-to-service/curl-1: t1/client-6965d549d5-94lpz (192.168.15.219) -> t1/echo-same-node (echo-same-node:8080) 209 | ❌ allow-all-except-world/pod-to-service/curl-2: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> t1/echo-other-node (echo-other-node:8080) 210 | ❌ allow-all-except-world/pod-to-service/curl-3: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> t1/echo-same-node (echo-same-node:8080) 211 | ❌ allow-all-except-world/pod-to-host/ping-ipv4-1: t1/client-6965d549d5-94lpz (192.168.15.219) -> 54.81.30.178 (54.81.30.178:0) 212 | ❌ allow-all-except-world/pod-to-host/ping-ipv4-5: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> 54.81.30.178 (54.81.30.178:0) 213 | Test [host-entity]: 214 | ❌ host-entity/pod-to-host/ping-ipv4-3: t1/client-6965d549d5-94lpz (192.168.15.219) -> 54.81.30.178 (54.81.30.178:0) 215 | ❌ host-entity/pod-to-host/ping-ipv4-5: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> 54.81.30.178 (54.81.30.178:0) 216 | Test [echo-ingress-l7]: 217 | ❌ echo-ingress-l7/pod-to-pod-with-endpoints/curl-ipv4-2-public: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-2-public (192.168.36.39:8080) 218 | ❌ echo-ingress-l7/pod-to-pod-with-endpoints/curl-ipv4-2-private: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-2-private (192.168.36.39:8080) 219 | ❌ echo-ingress-l7/pod-to-pod-with-endpoints/curl-ipv4-2-privatewith-header: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-2-privatewith-header (192.168.36.39:8080) 220 | ❌ echo-ingress-l7/pod-to-pod-with-endpoints/curl-ipv4-3-public: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-3-public (192.168.18.47:8080) 221 | ❌ echo-ingress-l7/pod-to-pod-with-endpoints/curl-ipv4-3-private: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-3-private (192.168.18.47:8080) 222 | ❌ echo-ingress-l7/pod-to-pod-with-endpoints/curl-ipv4-3-privatewith-header: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-3-privatewith-header (192.168.18.47:8080) 223 | Test [echo-ingress-l7-named-port]: 224 | ❌ echo-ingress-l7-named-port/pod-to-pod-with-endpoints/curl-ipv4-0-public: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-0-public (192.168.36.39:8080) 225 | ❌ echo-ingress-l7-named-port/pod-to-pod-with-endpoints/curl-ipv4-0-private: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-0-private (192.168.36.39:8080) 226 | ❌ echo-ingress-l7-named-port/pod-to-pod-with-endpoints/curl-ipv4-0-privatewith-header: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-0-privatewith-header (192.168.36.39:8080) 227 | ❌ echo-ingress-l7-named-port/pod-to-pod-with-endpoints/curl-ipv4-1-public: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-public (192.168.18.47:8080) 228 | ❌ echo-ingress-l7-named-port/pod-to-pod-with-endpoints/curl-ipv4-1-private: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-private (192.168.18.47:8080) 229 | ❌ echo-ingress-l7-named-port/pod-to-pod-with-endpoints/curl-ipv4-1-privatewith-header: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-privatewith-header (192.168.18.47:8080) 230 | Test [client-egress-l7-method]: 231 | ❌ client-egress-l7-method/pod-to-pod-with-endpoints/curl-ipv4-1-public: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-public (192.168.18.47:8080) 232 | ❌ client-egress-l7-method/pod-to-pod-with-endpoints/curl-ipv4-1-private: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-private (192.168.18.47:8080) 233 | ❌ client-egress-l7-method/pod-to-pod-with-endpoints/curl-ipv4-1-privatewith-header: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-privatewith-header (192.168.18.47:8080) 234 | ❌ client-egress-l7-method/pod-to-pod-with-endpoints/curl-ipv4-1-public: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-public (192.168.36.39:8080) 235 | ❌ client-egress-l7-method/pod-to-pod-with-endpoints/curl-ipv4-1-private: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-private (192.168.36.39:8080) 236 | ❌ client-egress-l7-method/pod-to-pod-with-endpoints/curl-ipv4-1-privatewith-header: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> curl-ipv4-1-privatewith-header (192.168.36.39:8080) 237 | Test [client-egress-l7]: 238 | ❌ client-egress-l7/pod-to-pod/curl-ipv4-2: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> t1/echo-other-node-66bdd89578-phzxz (192.168.36.39:8080) 239 | ❌ client-egress-l7/pod-to-pod/curl-ipv4-3: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> t1/echo-same-node-55db76dd44-ckhch (192.168.18.47:8080) 240 | ❌ client-egress-l7/pod-to-world/http-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-http (one.one.one.one:80) 241 | ❌ client-egress-l7/pod-to-world/https-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https (one.one.one.one:443) 242 | ❌ client-egress-l7/pod-to-world/https-to-one.one.one.one-index-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https-index (one.one.one.one:443) 243 | ❌ client-egress-l7/pod-to-world/http-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-http (one.one.one.one:80) 244 | ❌ client-egress-l7/pod-to-world/https-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https (one.one.one.one:443) 245 | ❌ client-egress-l7/pod-to-world/https-to-one.one.one.one-index-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https-index (one.one.one.one:443) 246 | Test [client-egress-l7-named-port]: 247 | ❌ client-egress-l7-named-port/pod-to-pod/curl-ipv4-2: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> t1/echo-same-node-55db76dd44-ckhch (192.168.18.47:8080) 248 | ❌ client-egress-l7-named-port/pod-to-pod/curl-ipv4-3: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> t1/echo-other-node-66bdd89578-phzxz (192.168.36.39:8080) 249 | ❌ client-egress-l7-named-port/pod-to-world/http-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-http (one.one.one.one:80) 250 | ❌ client-egress-l7-named-port/pod-to-world/https-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https (one.one.one.one:443) 251 | ❌ client-egress-l7-named-port/pod-to-world/https-to-one.one.one.one-index-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https-index (one.one.one.one:443) 252 | ❌ client-egress-l7-named-port/pod-to-world/http-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-http (one.one.one.one:80) 253 | ❌ client-egress-l7-named-port/pod-to-world/https-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https (one.one.one.one:443) 254 | ❌ client-egress-l7-named-port/pod-to-world/https-to-one.one.one.one-index-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https-index (one.one.one.one:443) 255 | Test [dns-only]: 256 | ❌ dns-only/pod-to-world/http-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-http (one.one.one.one:80) 257 | ❌ dns-only/pod-to-world/https-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https (one.one.one.one:443) 258 | ❌ dns-only/pod-to-world/https-to-one.one.one.one-index-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https-index (one.one.one.one:443) 259 | ❌ dns-only/pod-to-world/http-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-http (one.one.one.one:80) 260 | ❌ dns-only/pod-to-world/https-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https (one.one.one.one:443) 261 | ❌ dns-only/pod-to-world/https-to-one.one.one.one-index-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https-index (one.one.one.one:443) 262 | Test [to-fqdns]: 263 | ❌ to-fqdns/pod-to-world/http-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-http (one.one.one.one:80) 264 | ❌ to-fqdns/pod-to-world/https-to-one.one.one.one-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https (one.one.one.one:443) 265 | ❌ to-fqdns/pod-to-world/https-to-one.one.one.one-index-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> one.one.one.one-https-index (one.one.one.one:443) 266 | ❌ to-fqdns/pod-to-world/http-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-http (one.one.one.one:80) 267 | ❌ to-fqdns/pod-to-world/https-to-one.one.one.one-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https (one.one.one.one:443) 268 | ❌ to-fqdns/pod-to-world/https-to-one.one.one.one-index-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> one.one.one.one-https-index (one.one.one.one:443) 269 | ❌ to-fqdns/pod-to-world-2/https-cilium-io-0: t1/client-6965d549d5-94lpz (192.168.15.219) -> cilium-io-https (cilium.io:443) 270 | ❌ to-fqdns/pod-to-world-2/https-cilium-io-1: t1/client2-76f4d7c5bc-5b2jc (192.168.24.162) -> cilium-io-https (cilium.io:443) 271 | connectivity test failed: 10 tests failed 272 | ``` 273 | --------------------------------------------------------------------------------