├── .gitignore ├── Jenkinsfile ├── README.md ├── chapitres ├── 4 │ ├── mailpit-deployment.yaml │ ├── mailpit-service.yaml │ └── mailpit │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ └── service.yaml ├── 5 │ ├── custom-priority-class.yaml │ ├── deployment-flask.yaml │ ├── mailpit-deployment.yaml │ ├── mailpit-tcp-port.yaml │ └── mailpit.yaml ├── 6 │ ├── email.txt │ ├── mailpit-with-nfs.yaml │ ├── mailpit-with-pvc-and-security-context.yaml │ ├── mailpit-with-pvc.yaml │ ├── pv-mailpit.yaml │ ├── pvc-mailpit.yaml │ ├── pvc.yaml │ └── storage-class.yaml ├── 7 │ ├── configmap.yaml │ ├── mariadb-deployment-v1.yaml │ ├── mariadb-deployment-v2.yaml │ ├── mariadb-deployment-v3.yaml │ ├── mariadb-deployment-v4.yaml │ ├── mariadb-deployment-v5.yaml │ ├── mariadb-deployment.yaml │ ├── mariadb-statefulset-v1.yaml │ ├── mariadb-statefulset.yaml │ ├── pvc.yaml │ ├── secret-v1.yaml │ ├── secret.yaml │ └── service.yaml ├── 8 │ ├── configmap.yaml │ ├── mariadb-statefulset-v1.yaml │ ├── mariadb-statefulset-v2.yaml │ └── mariadb-statefulset.yaml ├── 9 │ ├── glances-v1.yaml │ └── glances.yaml ├── 11 │ ├── bash-rc.sh │ ├── change-color.sh │ ├── cronjob.yaml │ ├── job.yaml │ ├── k8s_required-labels.yaml │ ├── krew │ │ └── install-krew.sh │ ├── policy │ │ └── deployment.rego │ └── zsh-rc.sh ├── 12 │ ├── gatekeeper │ │ ├── constraint-template-v1.yaml │ │ └── k8srequiredlabel.yaml │ └── trivy │ │ ├── clusterrolebinding.yaml │ │ ├── cronjob.yaml │ │ ├── job.yaml │ │ ├── serviceaccount.yaml │ │ └── trivy-scan-result.txt ├── 13 │ ├── azure-file.yaml │ ├── ovh │ │ ├── .gitignore │ │ ├── network.tf │ │ ├── output.tf │ │ ├── ovh-cluster.tf │ │ ├── provider.tf │ │ └── variable.tf │ ├── pvc-nfs-client.yml │ └── pvc-nfs.yml ├── 14 │ ├── config-cluster.yaml │ ├── hosts.ini │ └── hosts.yml ├── 15 │ ├── dns-policy.json │ ├── external-dns.yaml │ ├── ingress-nginx-config.yaml │ ├── ingress-nginx.yaml │ ├── ingress-traefik-dashboard.yaml │ ├── ingress-traefik.yaml │ ├── kustomization.yaml │ ├── original │ │ ├── deployment.yaml │ │ ├── ingress-v1.yaml │ │ ├── ingress.yaml │ │ ├── pvc.yaml │ │ └── service.yaml │ ├── patches │ │ └── ingress-traefik.yaml │ └── traefik-config.yaml ├── 16 │ ├── certificate-prod.yaml │ ├── certificate-staging.yaml │ ├── chart │ │ ├── .gitignore │ │ ├── cert-manager.yaml │ │ ├── nginx-ingress.yaml │ │ └── oauth2-proxy.yaml.template │ ├── ingress-nginx-annotations.yaml │ ├── ingress-nginx-basic.yaml │ ├── ingress-nginx-oauth.yaml │ ├── ingress-nginx-tls-using-annotations.yaml │ ├── ingress-nginx-tls.yaml │ ├── ingress-traefik-basic.yaml │ ├── letsencrypt-issuer-prod.yaml │ ├── letsencrypt-issuer-staging.yaml │ ├── middleware-traefik.yaml │ └── nginx-ingress-default-certificate.yaml ├── 17 │ ├── allow-nginx-ingress.yaml │ ├── allow-wordpress-mariadb.yaml │ ├── chart │ │ └── wordpress-test.yaml │ ├── deny-all-except-ingress.yaml │ ├── deny-all.yaml │ ├── deny-egress.yaml │ ├── mailhog │ │ ├── deployment.yaml │ │ ├── pvc.yaml │ │ └── service.yaml │ └── mailpit │ │ ├── deployment.yaml │ │ ├── pvc.yaml │ │ └── service.yaml ├── 18 │ ├── deployment+tolerations+selector.yaml │ ├── hpa.yaml │ ├── mailhog │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ └── service.yaml │ ├── mailpit │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ └── service.yaml │ ├── nodes-autoscaler │ │ └── deploy-wp.sh │ └── ovh-nodepool.yaml ├── 19 │ ├── chart │ │ └── prometheus-operator.yaml │ ├── priority-class.yaml │ └── unique-ingress-host.yaml ├── 20 │ ├── cloudwatch-config.yaml │ ├── cloudwatch-policy.json │ ├── config.yaml │ ├── cwagent-fluent-bit-quickstart.yaml │ ├── datasource-grafana.yaml │ ├── elasticsearch │ │ ├── chart │ │ │ ├── elasticsearch.yaml │ │ │ ├── filebeat.yaml │ │ │ ├── kibana-oauth.yaml │ │ │ └── kibana.yaml │ │ └── datasource-elasticsearch.yaml │ └── opensearch │ │ ├── chart │ │ ├── fluent-bit.yaml │ │ ├── opensearch-dashboards-oauth.yaml │ │ ├── opensearch-dashboards.yaml │ │ └── opensearch.yaml │ │ └── datasource.yaml ├── 21 │ ├── api-gateway │ │ ├── gateway.yaml │ │ ├── httproute.yaml │ │ └── referencegrant.yaml │ ├── chart │ │ ├── external-dns.yaml │ │ ├── istio-gateway.yaml │ │ ├── istio-grafana.yaml │ │ ├── istio-jaeger.yaml │ │ ├── istio-kiali.yaml │ │ ├── istio.yaml │ │ ├── set-trace-sampling.yaml │ │ └── wordpress-test.yaml │ ├── destination-rule-mtls.yaml │ ├── istio │ │ └── .gitignore │ ├── mailhog │ │ ├── certificate.yaml │ │ ├── deployment.yaml │ │ ├── gateway.yaml │ │ ├── service.yaml │ │ └── virtualservice.yaml │ ├── mailpit │ │ ├── certificate.yaml │ │ ├── deployment.yaml │ │ ├── gateway.yaml │ │ ├── service.yaml │ │ └── virtualservice.yaml │ └── policy-strict.yaml ├── 22 │ ├── .gitignore │ ├── .trivyignore │ ├── Dockerfile │ ├── app.py │ ├── deployment.yaml │ └── requirements.txt ├── 23 │ ├── Jenkinsfile │ ├── chart │ │ └── jenkins.yaml │ ├── gitlab-ci-cache.yaml │ ├── gitlab-ci-simple.yaml │ ├── role-binding.yaml │ ├── role.yaml │ └── service-account.yaml ├── 24 │ ├── package │ │ ├── mailhog │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── charts │ │ │ │ └── .gitignore │ │ │ ├── requirements.lock │ │ │ ├── requirements.yaml │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── configmap.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── persistentvolumeclaim.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── tests │ │ │ │ │ └── test-connection.yaml │ │ │ └── values.yaml │ │ └── mailpit │ │ │ ├── .helmignore │ │ │ ├── Chart.lock │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── hpa.yaml │ │ │ ├── ingress.yaml │ │ │ ├── persistentvolumeclaim.yaml │ │ │ ├── service.yaml │ │ │ ├── serviceaccount.yaml │ │ │ └── tests │ │ │ │ └── test-connection.yaml │ │ │ └── values.yaml │ ├── role-binding.yaml │ └── tiller-account.yaml ├── 25 │ ├── admin-cluster-binding.yaml │ ├── admin-cluster.yaml │ ├── anonymous-viewer-binding.yaml │ ├── bad-deployment.yaml │ ├── good-deployment.yaml │ ├── limit-range.yaml │ ├── namespace-admin-binding.yaml │ ├── namespace-admin.yaml │ ├── resource-quota-priority-class.yaml │ ├── resource-quota.yaml │ ├── service-account │ │ ├── cicd-account.yaml │ │ ├── role-binding.yaml │ │ └── secret-token.yaml │ └── yannig-admin.yaml ├── 26 │ ├── simple-database.yaml │ ├── simple-mariadb.yaml │ └── simple-mysql.yaml ├── .gitignore └── make-archive.sh ├── flask-healthcheck ├── .gitignore ├── Dockerfile ├── Makefile ├── app.py ├── kube │ └── deployment.yaml └── requirements.txt ├── makefile └── misc ├── azure └── ingress.yaml ├── concourse └── chart │ └── concourse.yaml ├── namespace ├── cluster-role-binding.yaml ├── cluster-role.yaml ├── role-binding.yaml ├── role.yaml └── service-account.yaml └── operator └── postgres └── simple-cluster.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | config 2 | *.tar.gz 3 | .idea 4 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | podTemplate( 2 | label: "python-builder", 3 | // Keep docker container started for 10 minutes before deleting it 4 | // idleMinutes: 10, 5 | // Affect service account to update develop 6 | serviceAccount: 'jenkins-updater', 7 | // Uncomment to use secret to pull secret image 8 | // imagePullSecrets: [ 'github-cred' ], 9 | // Containers to create 10 | containers: [ 11 | containerTemplate( 12 | name: 'python', 13 | image: 'python:3', 14 | ttyEnabled: true, 15 | command: 'cat', 16 | resourceRequestMemory: "200M", 17 | resourceLimitMemory: "250M" 18 | ), 19 | containerTemplate( 20 | name: 'kaniko', 21 | image: 'gcr.io/kaniko-project/executor:v1.8.0-debug', 22 | ttyEnabled: true, 23 | command: 'cat' 24 | ), 25 | containerTemplate( 26 | name: 'kubectl', 27 | image: 'gcr.io/cloud-builders/kubectl', 28 | ttyEnabled: true, 29 | command: 'cat' 30 | ), 31 | ], 32 | volumes: [ 33 | secretVolume( 34 | secretName: 'docker-hub-cred', 35 | mountPath: '/home/jenkins/docker' 36 | ) 37 | ] 38 | ) { 39 | node("python-builder") { 40 | stage("checkout") { 41 | checkout scm 42 | } 43 | container('python') { 44 | stage("prereq") { 45 | sh("pip install -r flask-healthcheck/requirements.txt") 46 | sh("pip install pylint") 47 | } 48 | stage('linter') { 49 | sh("pylint flask-healthcheck --exit-zero") 50 | } 51 | } 52 | container('kaniko') { 53 | def imageName = "yannig/flask-healthcheck:latest" 54 | stage('login') { 55 | sh(''' 56 | mkdir -p /kaniko/.docker 57 | cp /home/jenkins/docker/.dockerconfigjson /kaniko/.docker/config.json 58 | sed -i 's|docker.io|https://index.docker.io/v1/|g' /kaniko/.docker/config.json 59 | ''') 60 | } 61 | stage('build') { 62 | sh("/kaniko/executor --context flask-healthcheck --destination docker.io/${imageName}") 63 | } 64 | } 65 | container('kubectl') { 66 | stage('update-develop') { 67 | def time = System.currentTimeMillis() 68 | def patch = """{"spec":{"template":{"metadata":{"labels":{"date":"${time}"}}}}}""" 69 | patch = patch.replace('"', '\\"') 70 | sh("kubectl -n develop patch deployment test -p \"${patch}\"") 71 | } 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Exemples pour le livre Kubernetes ENI 2 | 3 | Ce repository contient un ensemble d'exemples pour le livre Kubernetes aux éditions ENI. 4 | 5 | Ci-dessous un descriptif des éléments présents : 6 | 7 | - Répertoire chapitres : fichiers présents dans l'ouvrage regroupés par chapitre. Certains fichiers sont versionnés à l'aide de -v1/2/3 8 | - Répertoire flask-healthcheck : répertoire de l'application d'exemple flask-healthcheck présentée dans le livre 9 | - Fichier Jenkinsfile : fichier d'exemple de CI-CD étudié dans le livre 10 | - Répertoire misc : ressources d'exemple 11 | - Fichier README.md : le fichier que vous être en train de lire 12 | 13 | 14 | # Correctifs chapitres v1 15 | 16 | ## Sécurisation : accès aux applications 17 | 18 | Depuis la version 0.11 du chart Helm stable/cert-manager, le contenu du champ apiVersion a évolué. Ce champ ne doit plus contenir "certmanager.k8s.io/v1alpha1" mais "cert-manager.io/v1alpha2". 19 | 20 | Les exemples du livre ont été mis à jour en conséquence. 21 | 22 | Une autre solution est de déployer la version 0.10 (v0.10.1) à l'aide des options --version v0.10.1. 23 | -------------------------------------------------------------------------------- /chapitres/.gitignore: -------------------------------------------------------------------------------- 1 | *.tgz 2 | *.gz 3 | -------------------------------------------------------------------------------- /chapitres/11/bash-rc.sh: -------------------------------------------------------------------------------- 1 | PS1='[\u@\h \W $(change_color)$(kube_ps1)]\$ ' 2 | -------------------------------------------------------------------------------- /chapitres/11/change-color.sh: -------------------------------------------------------------------------------- 1 | # Change colors using current context 2 | function change_color() { 3 | RETVAL=$? 4 | context=$(command kubectl config current-context 2>/dev/null) 5 | if [ -z "$context" ] || [ "$context" = "minikube" ]; then 6 | command konsoleprofile colors=Breeze 7 | else 8 | command konsoleprofile colors=RedOnBlack 9 | fi 10 | return $RETVAL 11 | 12 | } 13 | -------------------------------------------------------------------------------- /chapitres/11/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: kube-hunter 5 | spec: 6 | schedule: "0 12 * * *" 7 | concurrencyPolicy: "Forbid" 8 | successfulJobsHistoryLimit: 1 9 | jobTemplate: 10 | spec: 11 | template: 12 | metadata: 13 | annotations: 14 | sidecar.istio.io/inject: "false" 15 | spec: 16 | containers: 17 | - name: kube-hunter 18 | image: aquasec/kube-hunter:aqua 19 | command: ["kube-hunter"] 20 | args: 21 | - "--pod" 22 | - "--token" 23 | - "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0aW1lIjoxNjQwOTUwNzE3LjgyMDExODQsImVtYWlsIjoieWFubmlnLnBlcnJlQGxhcG9zdGUubmV0IiwiciI6IjlmOWQzNzI3In0.XUlw9V_LpI_uIe_MEX2UP7isxkf-LI4edj9YxIKpPuE" 24 | restartPolicy: Never 25 | backoffLimit: 4 26 | -------------------------------------------------------------------------------- /chapitres/11/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: kube-hunter 5 | spec: 6 | template: 7 | metadata: 8 | annotations: 9 | sidecar.istio.io/inject: "false" 10 | spec: 11 | containers: 12 | - name: kube-hunter 13 | image: aquasec/kube-hunter 14 | command: ["kube-hunter"] 15 | args: ["--pod"] 16 | restartPolicy: Never 17 | backoffLimit: 4 18 | -------------------------------------------------------------------------------- /chapitres/11/k8s_required-labels.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8srequiredlabels 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sRequiredLabels 10 | listKind: K8sRequiredLabelsList 11 | plural: k8srequiredlabels 12 | singular: k8srequiredlabels 13 | validation: 14 | # Schema for the `parameters` field 15 | openAPIV3Schema: 16 | properties: 17 | labels: 18 | type: array 19 | items: string 20 | targets: 21 | - target: admission.k8s.gatekeeper.sh 22 | rego: | 23 | package k8srequiredlabels 24 | 25 | deny[{"msg": msg, "details": {"missing_labels": missing}}] { 26 | provided := {label | input.review.object.metadata.labels[label]} 27 | required := {label | label := input.parameters.labels[_]} 28 | missing := required - provided 29 | count(missing) > 0 30 | msg := sprintf("you must provide labels: %v", [missing]) 31 | } -------------------------------------------------------------------------------- /chapitres/11/krew/install-krew.sh: -------------------------------------------------------------------------------- 1 | ( 2 | set -x; cd "$(mktemp -d)" && 3 | curl -fsSLO "https://storage.googleapis.com/krew/v0.2.1/krew.{tar.gz,yaml}" && 4 | tar zxvf krew.tar.gz && 5 | ./krew-"$(uname | tr '[:upper:]' '[:lower:]')_amd64" install \ 6 | --manifest=krew.yaml --archive=krew.tar.gz 7 | ) 8 | -------------------------------------------------------------------------------- /chapitres/11/policy/deployment.rego: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | deny[msg] { 4 | input.kind == "Deployment" 5 | not input.metadata.labels["app.kubernetes.io/name"] 6 | msg := "Deployment must define app.kubernetes.io/name label" 7 | } 8 | -------------------------------------------------------------------------------- /chapitres/11/zsh-rc.sh: -------------------------------------------------------------------------------- 1 | PROMPT='$(change_color)$(kube_ps1)$(build_prompt) ' 2 | -------------------------------------------------------------------------------- /chapitres/12/gatekeeper/constraint-template-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8srequiredlabelappkubernetesioname 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sRequiredLabelAppKubernetesIoName 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | rego: | 13 | package k8srequiredlabelappkubernetesioname 14 | 15 | violation[{"msg": msg}] { 16 | not input.review.object.metadata.labels["app.kubernetes.io/name"] 17 | msg := "Objects must have an annotation 'app.kubernetes.io/name'" 18 | } 19 | -------------------------------------------------------------------------------- /chapitres/12/gatekeeper/k8srequiredlabel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sRequiredLabelAppKubernetesIoName 3 | metadata: 4 | name: k8s-required-label-app-kubernetes-io-name 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["apps"] 9 | kinds: ["Deployment"] 10 | namespaces: ["test"] 11 | enforcementAction: "deny" -------------------------------------------------------------------------------- /chapitres/12/trivy/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: trivy 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cluster-admin 10 | subjects: 11 | - kind: ServiceAccount 12 | name: trivy 13 | namespace: default 14 | -------------------------------------------------------------------------------- /chapitres/12/trivy/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: scan-trivy 5 | spec: 6 | schedule: "0 12 * * *" 7 | timeZone: Europe/Paris 8 | concurrencyPolicy: "Forbid" 9 | successfulJobsHistoryLimit: 1 10 | jobTemplate: 11 | spec: 12 | template: 13 | metadata: 14 | annotations: 15 | sidecar.istio.io/inject: "false" 16 | spec: 17 | serviceAccountName: trivy 18 | containers: 19 | - command: 20 | - trivy 21 | - kubernetes 22 | - --report=summary 23 | - --timeout=20m 24 | image: docker.io/aquasec/trivy:0.53.0 25 | name: scan-trivy 26 | restartPolicy: Never 27 | backoffLimit: 4 28 | -------------------------------------------------------------------------------- /chapitres/12/trivy/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: scan-trivy 5 | spec: 6 | template: 7 | metadata: 8 | annotations: 9 | sidecar.istio.io/inject: "false" 10 | spec: 11 | serviceAccountName: trivy 12 | containers: 13 | - command: 14 | - trivy 15 | - kubernetes 16 | - --report=summary 17 | - --timeout=20m 18 | image: docker.io/aquasec/trivy:0.53.0 19 | name: scan-trivy 20 | restartPolicy: Never 21 | -------------------------------------------------------------------------------- /chapitres/12/trivy/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: trivy 6 | -------------------------------------------------------------------------------- /chapitres/12/trivy/trivy-scan-result.txt: -------------------------------------------------------------------------------- 1 | 2024-07-26T10:00:55+02:00 INFO Node scanning is enabled 2 | 2024-07-26T10:00:55+02:00 INFO If you want to disable Node scanning via an in-cluster Job, please try '--disable-node-collector' to disable the Node-Collector job. 3 | 441 / 441 [------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------] 100.00% 1 p/s 4 | 2024-07-26T10:10:55+02:00 ERROR Error during vulnerabilities or misconfiguration scan err="scan error: unable to initialize a scanner: unable to initialize an image scanner: unable to find the specified image \"auto\" in [\"docker\" \"containerd\" \"podman\" \"remote\"]: 4 errors occurred:\n\t* docker error: unable to inspect the image (auto): Error response from daemon: No such image: auto:latest\n\t* containerd error: failed to parse image reference: auto\n\t* podman error: unable to initialize Podman client: no podman socket found: stat /run/user/1000/podman/podman.sock: no such file or directory\n\t* remote error: GET https://index.docker.io/v2/library/auto/manifests/latest: UNAUTHORIZED: authentication required; [map[Action:pull Class: Name:library/auto Type:repository]]\n\n" 5 | 6 | Summary Report for minikube 7 | 8 | 9 | Workload Assessment 10 | ┌───────────────┬────────────────────────────────────────────────────────────────────────┬────────────────────────────┬────────────────────┬───────────────────┐ 11 | │ Namespace │ Resource │ Vulnerabilities │ Misconfigurations │ Secrets │ 12 | │ │ ├────┬─────┬──────┬─────┬────┼───┬───┬───┬────┬───┼───┬───┬───┬───┬───┤ 13 | │ │ │ C │ H │ M │ L │ U │ C │ H │ M │ L │ U │ C │ H │ M │ L │ U │ 14 | ├───────────────┼────────────────────────────────────────────────────────────────────────┼────┼─────┼──────┼─────┼────┼───┼───┼───┼────┼───┼───┼───┼───┼───┼───┤ 15 | │ test-istio │ Deployment/mailpit │ │ │ 2 │ │ │ │ 1 │ 4 │ 9 │ │ │ │ │ │ │ 16 | │ monitoring │ Deployment/prometheus-grafana │ │ 10 │ 79 │ 9 │ │ │ 4 │ 2 │ 27 │ │ │ │ │ │ │ 17 | │ monitoring │ StatefulSet/prometheus-prometheus-kube-prometheus-prometheus │ │ │ 6 │ 2 │ │ │ │ │ 18 │ │ │ │ │ │ │ 18 | │ monitoring │ DaemonSet/prometheus-prometheus-node-exporter │ 1 │ │ 2 │ │ │ │ 3 │ 3 │ 9 │ │ │ │ │ │ │ 19 | │ monitoring │ Deployment/prometheus-kube-prometheus-operator │ │ │ 1 │ │ │ │ │ │ 6 │ │ │ │ │ │ │ 20 | │ monitoring │ ConfigMap/prometheus-operator-opensearch-datasource │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ │ 21 | │ monitoring │ Deployment/prometheus-kube-state-metrics │ │ │ │ │ │ │ │ │ 6 │ │ │ │ │ │ │ 22 | │ monitoring │ ConfigMap/prometheus-prometheus-kube-prometheus-prometheus-rulefiles-0 │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 23 | │ jenkins │ StatefulSet/jenkins │ 8 │ 38 │ 60 │ 228 │ │ │ 1 │ 6 │ 38 │ │ │ │ │ │ │ 24 | │ jenkins │ ConfigMap/jenkins-jenkins-jcasc-config │ │ │ │ │ │ │ 1 │ 1 │ │ │ │ │ │ │ │ 25 | │ istio-system │ ConfigMap/istio-sidecar-injector │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ │ 26 | │ istio-system │ Deployment/grafana │ 3 │ 5 │ 44 │ 2 │ │ │ 1 │ 1 │ 6 │ │ │ │ │ │ │ 27 | │ istio-system │ Deployment/istiod │ │ │ 6 │ 38 │ │ │ │ 1 │ 5 │ │ │ │ │ │ │ 28 | │ istio-system │ Deployment/prometheus │ 3 │ 6 │ 24 │ │ │ │ 2 │ 6 │ 18 │ │ │ │ │ │ │ 29 | │ istio-system │ ConfigMap/kiali │ │ │ │ │ │ │ 1 │ 1 │ │ │ │ │ │ │ │ 30 | │ istio-system │ Deployment/jaeger │ 1 │ 5 │ 6 │ │ │ │ 1 │ 3 │ 8 │ │ │ │ │ │ │ 31 | │ istio-system │ Deployment/kiali │ 1 │ 8 │ 6 │ 48 │ │ │ │ 1 │ 4 │ │ │ │ │ │ │ 32 | │ istio-system │ Deployment/istio-ingress │ │ │ │ │ │ │ │ 3 │ 3 │ │ │ │ │ │ │ 33 | │ istio-ingress │ Deployment/gateway-istio │ │ │ 6 │ 38 │ │ │ │ 2 │ 3 │ │ │ │ │ │ │ 34 | │ ingress-nginx │ Job/ingress-nginx-admission-patch │ │ │ │ │ │ │ 1 │ 1 │ 9 │ │ │ │ │ │ │ 35 | │ ingress-nginx │ Job/ingress-nginx-admission-create │ │ │ │ │ │ │ 1 │ 1 │ 9 │ │ │ │ │ │ │ 36 | │ ingress-nginx │ Deployment/ingress-nginx-controller │ 3 │ 5 │ 42 │ │ │ │ 3 │ 4 │ 5 │ │ │ │ │ │ │ 37 | │ gatekeeper │ Deployment/gatekeeper-controller-manager │ │ │ │ │ │ │ │ │ 3 │ │ │ │ │ │ │ 38 | │ gatekeeper │ Deployment/gatekeeper-audit │ │ │ │ │ │ │ │ │ 3 │ │ │ │ │ │ │ 39 | │ develop │ Deployment/test │ 73 │ 859 │ 1994 │ 875 │ 14 │ │ 4 │ 8 │ 19 │ │ │ │ │ │ │ 40 | │ cert-manager │ Deployment/cert-manager-webhook │ │ │ │ │ │ │ │ │ 6 │ │ │ │ │ │ │ 41 | │ cert-manager │ Deployment/cert-manager-cainjector │ │ │ │ │ │ │ │ │ 6 │ │ │ │ │ │ │ 42 | │ cert-manager │ Deployment/cert-manager │ │ │ │ │ │ │ │ │ 6 │ │ │ │ │ │ │ 43 | └───────────────┴────────────────────────────────────────────────────────────────────────┴────┴─────┴──────┴─────┴────┴───┴───┴───┴────┴───┴───┴───┴───┴───┴───┘ 44 | Severities: C=CRITICAL H=HIGH M=MEDIUM L=LOW U=UNKNOWN 45 | 46 | 47 | Infra Assessment 48 | ┌─────────────┬───────────────────────────────────────────────┬──────────────────────┬────────────────────┬───────────────────┐ 49 | │ Namespace │ Resource │ Vulnerabilities │ Misconfigurations │ Secrets │ 50 | │ │ ├───┬────┬────┬────┬───┼───┬───┬───┬────┬───┼───┬───┬───┬───┬───┤ 51 | │ │ │ C │ H │ M │ L │ U │ C │ H │ M │ L │ U │ C │ H │ M │ L │ U │ 52 | ├─────────────┼───────────────────────────────────────────────┼───┼────┼────┼────┼───┼───┼───┼───┼────┼───┼───┼───┼───┼───┼───┤ 53 | │ kube-system │ Service/prometheus-kube-prometheus-kube-etcd │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 54 | │ kube-system │ Service/prometheus-kube-prometheus-kubelet │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 55 | │ kube-system │ Service/external-dns │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 56 | │ kube-system │ Service/prometheus-kube-prometheus-coredns │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 57 | │ kube-system │ DaemonSet/kube-proxy │ 2 │ 5 │ 14 │ 15 │ │ │ 3 │ 5 │ 9 │ │ │ │ │ │ │ 58 | │ kube-system │ Pod/etcd-minikube │ │ │ │ │ │ │ 2 │ 4 │ 6 │ │ │ │ │ │ │ 59 | │ kube-system │ Pod/kube-apiserver-minikube │ │ │ │ │ │ │ 2 │ 5 │ 16 │ │ │ │ │ │ │ 60 | │ kube-system │ Service/prometheus-kube-prometheus-kube-proxy │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 61 | │ kube-system │ Pod/kube-controller-manager-minikube │ │ │ │ │ │ │ 2 │ 4 │ 10 │ │ │ │ │ │ │ 62 | │ kube-system │ ConfigMap/extension-apiserver-authentication │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 63 | │ kube-system │ Pod/kube-scheduler-minikube │ │ │ │ │ │ │ 2 │ 4 │ 8 │ │ │ │ │ │ │ 64 | │ kube-system │ NetworkPolicy/external-dns │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 65 | │ kube-system │ Pod/storage-provisioner │ 4 │ 51 │ 32 │ 1 │ │ │ 2 │ 5 │ 9 │ │ │ │ │ │ │ 66 | │ kube-system │ Deployment/external-dns │ 1 │ 4 │ 25 │ 68 │ │ │ │ 1 │ 2 │ │ │ │ │ │ │ 67 | │ kube-system │ DaemonSet/kindnet │ 3 │ 13 │ 47 │ 20 │ │ │ 3 │ 6 │ 5 │ │ │ │ │ │ │ 68 | │ kube-system │ Service/kube-dns │ │ │ │ │ │ │ │ 1 │ │ │ │ │ │ │ │ 69 | │ kube-system │ Deployment/coredns │ │ │ │ │ │ │ 1 │ 4 │ 4 │ │ │ │ │ │ │ 70 | │ │ Node/minikube │ │ │ 1 │ │ │ │ 4 │ │ 2 │ │ │ │ │ │ │ 71 | └─────────────┴───────────────────────────────────────────────┴───┴────┴────┴────┴───┴───┴───┴───┴────┴───┴───┴───┴───┴───┴───┘ 72 | Severities: C=CRITICAL H=HIGH M=MEDIUM L=LOW U=UNKNOWN 73 | 74 | 75 | RBAC Assessment 76 | ┌───────────────┬────────────────────────────────────────────────────────────────────┬───────────────────┐ 77 | │ Namespace │ Resource │ RBAC Assessment │ 78 | │ │ ├───┬───┬───┬───┬───┤ 79 | │ │ │ C │ H │ M │ L │ U │ 80 | ├───────────────┼────────────────────────────────────────────────────────────────────┼───┼───┼───┼───┼───┤ 81 | │ kube-system │ Role/system::leader-locking-kube-scheduler │ │ │ 1 │ │ │ 82 | │ kube-system │ Role/system::leader-locking-kube-controller-manager │ │ │ 1 │ │ │ 83 | │ kube-system │ Role/system:controller:cloud-provider │ │ │ 1 │ │ │ 84 | │ kube-system │ Role/system:controller:bootstrap-signer │ │ │ 1 │ │ │ 85 | │ kube-system │ Role/system:controller:token-cleaner │ │ │ 1 │ │ │ 86 | │ kube-system │ Role/system:persistent-volume-provisioner │ │ 2 │ │ │ │ 87 | │ kube-public │ Role/system:controller:bootstrap-signer │ │ │ 1 │ │ │ 88 | │ kube-public │ RoleBinding/kubeadm:bootstrap-signer-clusterinfo │ 1 │ │ │ │ │ 89 | │ jenkins │ Role/jenkins-schedule-agents │ │ 1 │ 1 │ │ │ 90 | │ istio-system │ Role/kiali-controlplane │ │ │ 1 │ │ │ 91 | │ istio-system │ Role/istio-ingress │ │ │ 1 │ │ │ 92 | │ istio-system │ Role/istiod │ │ │ 2 │ │ │ 93 | │ ingress-nginx │ Role/ingress-nginx-admission │ │ │ 1 │ │ │ 94 | │ ingress-nginx │ Role/ingress-nginx │ │ │ 3 │ │ │ 95 | │ gatekeeper │ Role/gatekeeper-manager-role │ │ │ 1 │ │ │ 96 | │ develop │ Role/jenkins-updater │ │ │ 1 │ │ │ 97 | │ cert-manager │ Role/cert-manager-webhook:dynamic-serving │ │ │ 2 │ │ │ 98 | │ │ ClusterRole/system:controller:persistent-volume-binder │ 1 │ 2 │ 1 │ │ │ 99 | │ │ ClusterRole/cert-manager-controller-issuers │ 1 │ │ │ │ │ 100 | │ │ ClusterRole/istio-reader-clusterrole-istio-system │ 2 │ │ │ │ │ 101 | │ │ ClusterRole/cert-manager-controller-certificates │ 1 │ │ │ │ │ 102 | │ │ ClusterRole/system:controller:endpoint-controller │ │ 1 │ │ │ │ 103 | │ │ ClusterRole/system:node │ 1 │ │ 1 │ │ │ 104 | │ │ ClusterRole/system:aggregate-to-admin │ 1 │ │ │ │ │ 105 | │ │ ClusterRole/system:kube-controller-manager │ 5 │ │ │ │ │ 106 | │ │ ClusterRole/edit │ 2 │ 4 │ 6 │ │ │ 107 | │ │ ClusterRole/system:controller:node-controller │ │ │ 1 │ │ │ 108 | │ │ ClusterRole/system:controller:legacy-service-account-token-cleaner │ 1 │ │ │ │ │ 109 | │ │ ClusterRole/system:controller:horizontal-pod-autoscaler │ 2 │ │ │ │ │ 110 | │ │ ClusterRole/system:controller:replication-controller │ │ │ 2 │ │ │ 111 | │ │ ClusterRole/istiod-gateway-controller-istio-system │ │ 1 │ 1 │ │ │ 112 | │ │ ClusterRole/system:controller:endpointslice-controller │ │ 1 │ │ │ │ 113 | │ │ ClusterRole/system:controller:deployment-controller │ │ │ 3 │ │ │ 114 | │ │ ClusterRole/prometheus-grafana-clusterrole │ 1 │ │ │ │ │ 115 | │ │ ClusterRole/system:controller:generic-garbage-collector │ 1 │ │ │ │ │ 116 | │ │ ClusterRole/system:aggregate-to-edit │ 2 │ 4 │ 6 │ │ │ 117 | │ │ ClusterRole/system:controller:daemon-set-controller │ │ │ 1 │ │ │ 118 | │ │ ClusterRole/system:controller:root-ca-cert-publisher │ │ │ 1 │ │ │ 119 | │ │ ClusterRoleBinding/minikube-rbac │ │ │ 1 │ │ │ 120 | │ │ ClusterRole/system:controller:job-controller │ │ │ 2 │ │ │ 121 | │ │ ClusterRole/system:controller:expand-controller │ 1 │ │ │ │ │ 122 | │ │ ClusterRole/system:kube-scheduler │ │ │ 1 │ │ │ 123 | │ │ ClusterRole/cert-manager-controller-challenges │ 2 │ 2 │ 1 │ │ │ 124 | │ │ ClusterRole/prometheus-kube-prometheus-operator │ 3 │ 1 │ 3 │ │ │ 125 | │ │ ClusterRole/cert-manager-cainjector │ 2 │ │ │ │ │ 126 | │ │ ClusterRole/system:controller:namespace-controller │ 1 │ │ │ │ │ 127 | │ │ ClusterRole/cert-manager-controller-clusterissuers │ 1 │ │ │ │ │ 128 | │ │ ClusterRoleBinding/cluster-admin │ │ │ 1 │ │ │ 129 | │ │ ClusterRole/admin │ 3 │ 4 │ 6 │ │ │ 130 | │ │ ClusterRole/system:controller:statefulset-controller │ │ │ 1 │ │ │ 131 | │ │ ClusterRole/kiali │ 1 │ 1 │ 3 │ │ │ 132 | │ │ ClusterRole/istiod-clusterrole-istio-system │ 6 │ │ 1 │ │ │ 133 | │ │ ClusterRole/prometheus-kube-state-metrics │ 1 │ │ │ │ │ 134 | │ │ ClusterRole/system:controller:replicaset-controller │ │ │ 2 │ │ │ 135 | │ │ ClusterRole/system:controller:resourcequota-controller │ 1 │ │ │ │ │ 136 | │ │ ClusterRole/system:controller:endpointslicemirroring-controller │ │ 1 │ │ │ │ 137 | │ │ ClusterRole/ingress-nginx-admission │ 1 │ │ │ │ │ 138 | │ │ ClusterRole/system:controller:pod-garbage-collector │ │ │ 1 │ │ │ 139 | │ │ ClusterRoleBinding/kubeadm:cluster-admins │ │ │ 1 │ │ │ 140 | │ │ ClusterRole/prometheus │ │ 1 │ │ │ │ 141 | │ │ ClusterRole/cert-manager-controller-orders │ 1 │ │ │ │ │ 142 | │ │ ClusterRole/gatekeeper-manager-role │ 7 │ │ │ │ │ 143 | │ │ ClusterRole/ingress-nginx │ 1 │ │ │ │ │ 144 | │ │ ClusterRole/system:controller:ttl-after-finished-controller │ │ │ 1 │ │ │ 145 | │ │ ClusterRole/system:controller:cronjob-controller │ │ │ 3 │ │ │ 146 | │ │ ClusterRole/cluster-admin │ 2 │ │ │ │ │ 147 | └───────────────┴────────────────────────────────────────────────────────────────────┴───┴───┴───┴───┴───┘ 148 | Severities: C=CRITICAL H=HIGH M=MEDIUM L=LOW U=UNKNOWN 149 | 150 | NAME REPOSITORY TAG SCANNER AGE 151 | rep...69457bd7bb ...-cainjector v1.15.1 Trivy 23m 152 | rep...ger-controller ...-controller v1.15.1 Trivy 25m 153 | rep...cert-manager-webhook ...-webhook v1.15.1 Trivy 24m 154 | -------------------------------------------------------------------------------- /chapitres/13/azure-file.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: azure-file 5 | provisioner: kubernetes.io/azure-file 6 | volumeBindingMode: Immediate 7 | reclaimPolicy: Retain 8 | parameters: 9 | skuName: Standard_LRS 10 | -------------------------------------------------------------------------------- /chapitres/13/ovh/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform* 2 | terraform.tfstate* 3 | kubeconfig 4 | -------------------------------------------------------------------------------- /chapitres/13/ovh/network.tf: -------------------------------------------------------------------------------- 1 | resource "ovh_cloud_project_network_private" "network" { 2 | service_name = var.service_name 3 | name = "k8s_private_network" 4 | regions = [var.region] 5 | provider = ovh 6 | vlan_id = 168 7 | } 8 | 9 | resource "ovh_cloud_project_network_private_subnet" "subnet" { 10 | service_name = var.service_name 11 | # Identifiant de la ressource ovh_cloud_network_private nommée network 12 | network_id = ovh_cloud_project_network_private.network.id 13 | start = "192.168.168.10" 14 | end = "192.168.168.250" 15 | network = "192.168.168.0/24" 16 | dhcp = true 17 | region = var.region 18 | provider = ovh 19 | no_gateway = true 20 | } 21 | -------------------------------------------------------------------------------- /chapitres/13/ovh/output.tf: -------------------------------------------------------------------------------- 1 | resource "local_file" "kubeconfig" { 2 | content = ovh_cloud_project_kube.k8s_cluster.kubeconfig 3 | filename = "${path.module}/kubeconfig" 4 | file_permission = "0600" 5 | } 6 | -------------------------------------------------------------------------------- /chapitres/13/ovh/ovh-cluster.tf: -------------------------------------------------------------------------------- 1 | resource "ovh_cloud_project_kube" "k8s_cluster" { 2 | service_name = var.service_name 3 | name = "tofu_test" 4 | region = var.region 5 | version = "1.29" 6 | private_network_id = tolist(ovh_cloud_project_network_private.network.regions_attributes[*].openstackid)[0] 7 | 8 | private_network_configuration { 9 | default_vrack_gateway = "" 10 | private_network_routing_as_default = false 11 | } 12 | depends_on = [ovh_cloud_project_network_private_subnet.subnet] 13 | } 14 | 15 | resource "ovh_cloud_project_kube_nodepool" "node_pool" { 16 | service_name = var.service_name 17 | kube_id = ovh_cloud_project_kube.k8s_cluster.id 18 | name = "discovery-pool" //Warning: "_" char is not allowed! 19 | flavor_name = "d2-8" 20 | desired_nodes = 1 21 | max_nodes = 2 22 | min_nodes = 0 23 | } 24 | -------------------------------------------------------------------------------- /chapitres/13/ovh/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | ovh = { 4 | source = "ovh/ovh" 5 | } 6 | } 7 | } 8 | 9 | provider "ovh" {} 10 | -------------------------------------------------------------------------------- /chapitres/13/ovh/variable.tf: -------------------------------------------------------------------------------- 1 | variable "service_name" { 2 | type = string 3 | default = "0ac0d8dd56ed4f5f8c93b98f759a9796" 4 | } 5 | 6 | variable "region" { 7 | type = string 8 | default = "BHS5" 9 | } -------------------------------------------------------------------------------- /chapitres/13/pvc-nfs-client.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: test-nfs-client 5 | spec: 6 | storageClassName: "nfs-client" 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 100Mi 12 | -------------------------------------------------------------------------------- /chapitres/13/pvc-nfs.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: test-nfs 5 | spec: 6 | storageClassName: "nfs" 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 100Mi 12 | -------------------------------------------------------------------------------- /chapitres/14/config-cluster.yaml: -------------------------------------------------------------------------------- 1 | # Disable dashboard 2 | dashboard_enabled: no 3 | 4 | # Retrieve cluster kubeconfig 5 | kubeconfig_localhost: yes 6 | 7 | # Activate Ingress controller 8 | ingress_nginx_enabled: yes 9 | 10 | # Activate Helm Tiller 11 | helm_enabled: true 12 | -------------------------------------------------------------------------------- /chapitres/14/hosts.ini: -------------------------------------------------------------------------------- 1 | [kube-node] 2 | worker1 3 | worker2 4 | 5 | [kube-master] 6 | master 7 | 8 | [etcd] 9 | master 10 | 11 | [bastion] 12 | rebond ansible_user=roger ansible_become=no 13 | 14 | [k8s-cluster:children] 15 | node 16 | master 17 | 18 | [all:vars] 19 | ansible_user=admin 20 | ansible_become=yes 21 | masters="master" 22 | workers="worker1 worker2" 23 | -------------------------------------------------------------------------------- /chapitres/14/hosts.yml: -------------------------------------------------------------------------------- 1 | k8s-cluster: 2 | vars: 3 | ansible_become: yes 4 | ansible_user: "root" 5 | 6 | children: 7 | kube-node: 8 | hosts: 9 | worker1: {} 10 | worker2: {} 11 | 12 | kube-master: 13 | hosts: 14 | master: {} 15 | 16 | etcd: 17 | hosts: 18 | master: {} 19 | 20 | bastion: 21 | hosts: 22 | rebond: 23 | ansible_become: no 24 | ansible_user: "roger" 25 | masters: "master" 26 | workers: "worker1 worker2" 27 | -------------------------------------------------------------------------------- /chapitres/15/dns-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "route53:ChangeResourceRecordSets" 8 | ], 9 | "Resource": [ 10 | "arn:aws:route53:::hostedzone/*" 11 | ] 12 | }, 13 | { 14 | "Effect": "Allow", 15 | "Action": [ 16 | "route53:ListHostedZones", 17 | "route53:ListResourceRecordSets", 18 | "route53:ListHostedZonesByName" 19 | ], 20 | "Resource": [ 21 | "*" 22 | ] 23 | }, 24 | { 25 | "Effect": "Allow", 26 | "Action": [ 27 | "route53:GetChange" 28 | ], 29 | "Resource": [ 30 | "arn:aws:route53:::change/*" 31 | ] 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /chapitres/15/external-dns.yaml: -------------------------------------------------------------------------------- 1 | # All available values: 2 | # https://github.com/bitnami/charts/blob/master/bitnami/external-dns/values.yaml 3 | 4 | provider: google # or azure or aws (default = aws) 5 | 6 | # Google cloudDNS credentials 7 | google: 8 | project: "eni-kubernetes" 9 | serviceAccountSecret: "cloud-dns-key" 10 | 11 | # Azure DNS credentials 12 | azure: 13 | secretName: "cloud-dns-key" 14 | 15 | # AWS route53 credentials 16 | aws: 17 | region: "eu-central-1" 18 | secretKey: "AKFKXXXXXXXXXXXX" 19 | accessKey: "YxYxYxYxYxYxYxYxYxYxYxYx" 20 | 21 | # TXT prefix 22 | txtPrefix: "k8s-" 23 | -------------------------------------------------------------------------------- /chapitres/15/ingress-nginx-config.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/kubernetes/ingress-nginx/tree/master/charts/ingress-nginx 2 | 3 | controller: 4 | config: 5 | # Preserve source ip 6 | use-forwarded-headers: true 7 | 8 | # Resources limitations/reservations 9 | resources: 10 | requests: 11 | cpu: 100m 12 | memory: 90Mi 13 | limits: 14 | cpu: 500m 15 | memory: 120Mi 16 | 17 | affinity: 18 | podAntiAffinity: 19 | preferredDuringSchedulingIgnoredDuringExecution: 20 | - weight: 100 21 | podAffinityTerm: 22 | labelSelector: 23 | matchExpressions: 24 | - key: app.kubernetes.io/name 25 | operator: In 26 | values: 27 | - ingress-nginx 28 | topologyKey: kubernetes.io/hostname 29 | 30 | # high availability 31 | replicaCount: 2 32 | 33 | # Default ingress 34 | watchIngressWithoutClass: true 35 | ingressClassResource: 36 | default: true 37 | 38 | service: 39 | # Needed annotations for AWS 40 | # annotations: 41 | # service.beta.kubernetes.io/aws-load-balancer-type: "nlb" 42 | externalTrafficPolicy: Local 43 | -------------------------------------------------------------------------------- /chapitres/15/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | spec: 6 | ingressClassName: nginx 7 | rules: 8 | - host: "mailpit.eni.yannig.ovh" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: "Prefix" 13 | backend: 14 | service: 15 | name: mailpit 16 | port: 17 | number: 8025 18 | -------------------------------------------------------------------------------- /chapitres/15/ingress-traefik-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: traefik-dashboard 5 | namespace: traefik 6 | spec: 7 | ingressClassName: nginx 8 | rules: 9 | - host: "traefik.admin.eni.yannig.ovh" 10 | http: 11 | paths: 12 | - path: / 13 | pathType: Prefix 14 | backend: 15 | service: 16 | name: traefik 17 | port: 18 | name: web 19 | -------------------------------------------------------------------------------- /chapitres/15/ingress-traefik.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit-traefik 5 | spec: 6 | ingressClassName: traefik 7 | rules: 8 | - host: "mailpit-traefik.eni.yannig.ovh" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: mailpit 16 | port: 17 | number: 8025 18 | -------------------------------------------------------------------------------- /chapitres/15/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - ingress-nginx.yaml 4 | 5 | patches: 6 | - target: 7 | name: mailpit 8 | kind: Ingress 9 | patch: |- 10 | - op: replace 11 | path: /spec/rules/0/host 12 | value: mailpit-traefik.eni.yannig.ovh 13 | - op: replace 14 | path: /spec/ingressClassName 15 | value: traefik 16 | -------------------------------------------------------------------------------- /chapitres/15/original/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: mailpit 6 | labels: 7 | app: mailpit 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: mailpit 13 | template: 14 | metadata: 15 | labels: 16 | app: mailpit 17 | spec: 18 | volumes: 19 | - name: maildir 20 | persistentVolumeClaim: { claimName: pvc-mailpit } 21 | containers: 22 | - image: axllent/mailpit 23 | name: mailpit 24 | imagePullPolicy: IfNotPresent 25 | volumeMounts: 26 | - mountPath: /maildir 27 | name: maildir 28 | command: 29 | - "./mailpit" 30 | - "--db-file=/maildir/mailpit.db" 31 | -------------------------------------------------------------------------------- /chapitres/15/original/ingress-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | spec: 6 | rules: 7 | - host: "mailpit.eni.yannig.ovh" 8 | http: 9 | paths: 10 | - path: / 11 | pathType: Exact 12 | backend: 13 | service: 14 | name: mailpit 15 | port: 16 | number: 8025 17 | -------------------------------------------------------------------------------- /chapitres/15/original/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | spec: 6 | rules: 7 | - host: "mailpit.eni.yannig.ovh" 8 | http: 9 | paths: 10 | - path: / 11 | pathType: Prefix 12 | backend: 13 | service: 14 | name: mailpit 15 | port: 16 | number: 8025 17 | -------------------------------------------------------------------------------- /chapitres/15/original/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-mailpit 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 10Mi 11 | -------------------------------------------------------------------------------- /chapitres/15/original/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mailpit 6 | name: mailpit 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - name: smtp 11 | port: 1025 12 | protocol: TCP 13 | targetPort: 1025 14 | - name: http 15 | port: 8025 16 | protocol: TCP 17 | targetPort: 8025 18 | selector: 19 | app: mailpit 20 | -------------------------------------------------------------------------------- /chapitres/15/patches/ingress-traefik.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | spec: 6 | ingressClassName: traefik 7 | rules: 8 | - host: "mailpit-traefik.eni.yannig.ovh" 9 | -------------------------------------------------------------------------------- /chapitres/15/traefik-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://github.com/traefik/traefik-helm-chart/blob/master/traefik/values.yaml 3 | 4 | ingressClass: 5 | enabled: true 6 | # Set this value to use Traefik as a default Ingress controller 7 | # isDefaultClass: true 8 | 9 | # Update Ingress status 10 | providers: 11 | kubernetesIngress: 12 | publishedService: 13 | enabled: true 14 | # Set this parameter to change the ingress class name 15 | # kubernetesCRD: 16 | # ingressClass: traefik-internal 17 | 18 | # enable access logs 19 | logs: 20 | # Traefik log level 21 | # general: 22 | # level: DEBUG 23 | access: 24 | enabled: true 25 | 26 | # Set pods count to 2 for high availability 27 | deployment: 28 | replicas: 2 29 | 30 | # Pod anti affinity 31 | affinity: 32 | podAntiAffinity: 33 | preferredDuringSchedulingIgnoredDuringExecution: 34 | - weight: 100 35 | podAffinityTerm: 36 | labelSelector: 37 | matchExpressions: 38 | - key: app.kubernetes.io/name 39 | operator: In 40 | values: 41 | - "traefik" 42 | topologyKey: kubernetes.io/hostname 43 | 44 | # Resources limitations/reservations 45 | resources: 46 | requests: 47 | cpu: "30m" 48 | memory: "50Mi" 49 | limits: 50 | cpu: "100m" 51 | memory: "60Mi" 52 | -------------------------------------------------------------------------------- /chapitres/16/certificate-prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: eni-yannig-ovh 5 | spec: 6 | secretName: eni-yannig-ovh-prod-tls 7 | issuerRef: 8 | name: letsencrypt-prod 9 | kind: ClusterIssuer 10 | commonName: "eni.yannig.ovh" 11 | dnsNames: ["eni.yannig.ovh", "*.eni.yannig.ovh"] 12 | -------------------------------------------------------------------------------- /chapitres/16/certificate-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: eni-yannig-ovh 5 | spec: 6 | secretName: eni-yannig-ovh-staging-tls 7 | issuerRef: 8 | name: letsencrypt-staging 9 | kind: ClusterIssuer 10 | commonName: "eni.yannig.ovh" 11 | dnsNames: ["eni.yannig.ovh", "*.eni.yannig.ovh"] 12 | -------------------------------------------------------------------------------- /chapitres/16/chart/.gitignore: -------------------------------------------------------------------------------- 1 | oauth2-proxy.yaml 2 | -------------------------------------------------------------------------------- /chapitres/16/chart/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | ingressShim: 2 | defaultIssuerName: "letsencrypt-prod" 3 | defaultIssuerKind: "ClusterIssuer" 4 | 5 | crds: 6 | enabled: true -------------------------------------------------------------------------------- /chapitres/16/chart/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | rbac: 2 | create: true 3 | controller: 4 | publishService: 5 | enabled: true 6 | extraArgs: 7 | default-ssl-certificate: "default/eni-yannig-ovh-prod-tls" 8 | -------------------------------------------------------------------------------- /chapitres/16/chart/oauth2-proxy.yaml.template: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | config: 4 | clientID: "024xxxxxxxxxxxxxx7b" 5 | clientSecret: "8bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb8" 6 | cookieSecret: "7JYxxxxxxxxxxxxxxxxxxxxxxxxxxxxxFOx" 7 | 8 | extraArgs: 9 | provider: "github" 10 | # You can restrict access using email domain 11 | # ie: my-company.com 12 | email-domain: "*" 13 | github-org: EditionsENI 14 | -------------------------------------------------------------------------------- /chapitres/16/ingress-nginx-annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit-auth 5 | annotations: 6 | # type of authentication 7 | nginx.ingress.kubernetes.io/auth-type: basic 8 | # name of the secret that contains the user/password definitions 9 | nginx.ingress.kubernetes.io/auth-secret: mailpit-secret 10 | # message to display why the authentication is required 11 | nginx.ingress.kubernetes.io/auth-realm: "mailpit authentication" 12 | spec: 13 | ingressClassName: nginx 14 | rules: 15 | - host: "mailpit-auth.eni.yannig.ovh" 16 | http: 17 | paths: 18 | - path: / 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: mailpit 23 | port: 24 | number: 8025 25 | tls: 26 | - secretName: eni-yannig-ovh-prod-tls 27 | -------------------------------------------------------------------------------- /chapitres/16/ingress-nginx-basic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | annotations: 6 | nginx.ingress.kubernetes.io/auth-type: basic 7 | # name of the secret that contains the user/password definitions 8 | nginx.ingress.kubernetes.io/auth-secret: mailpit-secret 9 | # message to display with an appropiate context why the authentication is required 10 | nginx.ingress.kubernetes.io/auth-realm: "Authentication Required for mailpit" 11 | spec: 12 | ingressClassName: nginx 13 | rules: 14 | - host: "mailpit.eni.yannig.ovh" 15 | http: 16 | paths: 17 | - path: / 18 | pathType: Prefix 19 | backend: 20 | service: 21 | name: mailpit 22 | port: 23 | number: 8025 24 | tls: 25 | - secretName: eni-yannig-ovh-prod-tls 26 | hosts: 27 | - "mailpit.eni.yannig.ovh" 28 | -------------------------------------------------------------------------------- /chapitres/16/ingress-nginx-oauth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | annotations: 6 | nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" 7 | nginx.ingress.kubernetes.io/auth-signin: >- 8 | https://$host/oauth2/start?rd=$request_uri 9 | spec: 10 | ingressClassName: nginx 11 | rules: 12 | - host: "mailpit.eni.yannig.ovh" 13 | http: 14 | paths: 15 | - path: / 16 | pathType: Prefix 17 | backend: 18 | service: 19 | name: mailpit 20 | port: 21 | number: 8025 22 | tls: 23 | - secretName: eni-yannig-ovh-prod-tls 24 | hosts: 25 | - "mailpit.eni.yannig.ovh" 26 | 27 | --- 28 | apiVersion: networking.k8s.io/v1 29 | kind: Ingress 30 | metadata: 31 | name: mailpit-oauth2-proxy 32 | namespace: ingress-nginx 33 | annotations: 34 | kubernetes.io/ingress.class: nginx 35 | kubernetes.io/tls-acme: "true" 36 | spec: 37 | rules: 38 | - host: "mailpit.eni.yannig.ovh" 39 | http: 40 | paths: 41 | - path: /oauth2 42 | pathType: Prefix 43 | backend: 44 | service: 45 | name: oauth2-proxy 46 | port: 47 | number: 80 48 | tls: 49 | - hosts: 50 | - "mailpit.eni.yannig.ovh" 51 | -------------------------------------------------------------------------------- /chapitres/16/ingress-nginx-tls-using-annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | annotations: 6 | kubernetes.io/tls-acme: 'true' 7 | cert-manager.io/cluster-issuer: "letsencrypt-prod" 8 | spec: 9 | ingressClassName: nginx 10 | rules: 11 | - host: "mailpit.eni.yannig.ovh" 12 | http: 13 | paths: 14 | - path: / 15 | pathType: "Prefix" 16 | backend: 17 | service: 18 | name: mailpit 19 | port: 20 | name: http 21 | tls: 22 | - secretName: "mailpit.eni.yannig.ovh" 23 | hosts: ["mailpit.eni.yannig.ovh"] 24 | -------------------------------------------------------------------------------- /chapitres/16/ingress-nginx-tls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | spec: 6 | ingressClassName: nginx 7 | rules: 8 | - host: "mailpit.eni.yannig.ovh" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: "Prefix" 13 | backend: 14 | service: 15 | name: mailpit 16 | port: 17 | name: http 18 | tls: 19 | - secretName: eni-yannig-ovh-prod-tls 20 | hosts: ["mailpit.eni.yannig.ovh"] 21 | -------------------------------------------------------------------------------- /chapitres/16/ingress-traefik-basic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit-auth-traefik 5 | annotations: 6 | # reference previous objet defined (prefix: -) 7 | traefik.ingress.kubernetes.io/router.middlewares: >- 8 | default-mailpit-auth-traefik@kubernetescrd 9 | spec: 10 | ingressClassName: traefik 11 | rules: 12 | - host: "mailpit-auth-traefik.eni.yannig.ovh" 13 | http: 14 | paths: 15 | - path: / 16 | pathType: Prefix 17 | backend: 18 | service: 19 | name: mailpit 20 | port: 21 | number: 8025 22 | tls: 23 | - secretName: eni-yannig-ovh-prod-tls 24 | -------------------------------------------------------------------------------- /chapitres/16/letsencrypt-issuer-prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-prod 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | email: mon.adresse@email.com 9 | privateKeySecretRef: 10 | name: letsencrypt-prod 11 | solvers: 12 | - http01: 13 | ingress: {} 14 | # Set class to use a specific http entrypoint 15 | # class: nginx 16 | - dns01: 17 | cloudDNS: 18 | # A secretKeyRef to a google cloud json service account 19 | serviceAccountSecretRef: 20 | name: cloud-dns-key 21 | key: credentials.json 22 | # The project in which to update the DNS zone 23 | project: "eni-kubernetes" 24 | selector: 25 | dnsZones: 26 | - eni.yannig.ovh 27 | -------------------------------------------------------------------------------- /chapitres/16/letsencrypt-issuer-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-staging 5 | spec: 6 | acme: 7 | server: https://acme-staging-v02.api.letsencrypt.org/directory 8 | email: mon.adresse@email.com 9 | privateKeySecretRef: 10 | name: letsencrypt-staging 11 | solvers: 12 | - http01: 13 | ingress: {} 14 | # Set class to use a specific http entrypoint 15 | # class: nginx 16 | - dns01: 17 | cloudDNS: 18 | # A secretKeyRef to a google cloud json service account 19 | serviceAccountSecretRef: 20 | name: cloud-dns-key 21 | key: credentials.json 22 | # The project in which to update the DNS zone 23 | project: "eni-kubernetes" 24 | selector: 25 | dnsZones: 26 | - eni.yannig.ovh 27 | -------------------------------------------------------------------------------- /chapitres/16/middleware-traefik.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: mailpit-auth-traefik 5 | spec: 6 | basicAuth: 7 | removeHeader: true 8 | secret: mailpit-secret 9 | -------------------------------------------------------------------------------- /chapitres/16/nginx-ingress-default-certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Set default certificate for Nginx 3 | controller: 4 | extraArgs: 5 | default-ssl-certificate: "default/eni-yannig-ovh-prod-tls" 6 | -------------------------------------------------------------------------------- /chapitres/17/allow-nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-nginx-ingress 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app.kubernetes.io/name: wordpress 9 | policyTypes: ["Ingress"] 10 | ingress: 11 | - from: 12 | - namespaceSelector: 13 | matchLabels: 14 | role: ingress 15 | -------------------------------------------------------------------------------- /chapitres/17/allow-wordpress-mariadb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-wordpress-mariadb 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app.kubernetes.io/name: mariadb 9 | ingress: 10 | - from: 11 | - podSelector: 12 | matchLabels: 13 | app.kubernetes.io/name: wordpress 14 | ports: 15 | - protocol: TCP 16 | port: 3306 17 | -------------------------------------------------------------------------------- /chapitres/17/chart/wordpress-test.yaml: -------------------------------------------------------------------------------- 1 | # https://github.com/bitnami/charts/blob/master/bitnami/wordpress/values.yaml 2 | 3 | ingress: 4 | ingressClassName: nginx 5 | enabled: true 6 | tls: true 7 | annotations: 8 | kubernetes.io/tls-acme: "true" 9 | cert-manager.io/cluster-issuer: "letsencrypt-prod" 10 | hostname: "wordpress.eni.yannig.ovh" 11 | 12 | mariadb: 13 | networkPolicy: &disable 14 | enabled: false 15 | networkPolicy: *disable 16 | readinessProbe: *disable 17 | -------------------------------------------------------------------------------- /chapitres/17/deny-all-except-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-ingress 5 | spec: 6 | podSelector: {} 7 | policyTypes: ["Ingress"] 8 | ingress: 9 | - from: 10 | - namespaceSelector: 11 | matchLabels: 12 | role: ingress 13 | -------------------------------------------------------------------------------- /chapitres/17/deny-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-all 5 | spec: 6 | podSelector: {} 7 | -------------------------------------------------------------------------------- /chapitres/17/deny-egress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-egress 5 | spec: 6 | podSelector: {} 7 | policyTypes: ["Egress"] 8 | egress: [] 9 | -------------------------------------------------------------------------------- /chapitres/17/mailhog/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: mailhog 6 | labels: 7 | app: mailhog 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: mailhog 13 | template: 14 | metadata: 15 | labels: 16 | app: mailhog 17 | spec: 18 | volumes: 19 | - name: mailhog-maildir 20 | persistentVolumeClaim: { claimName: pvc-mailhog } 21 | containers: 22 | - image: mailhog/mailhog 23 | name: mailhog 24 | imagePullPolicy: IfNotPresent 25 | volumeMounts: 26 | - mountPath: /maildir 27 | name: mailhog-maildir 28 | command: 29 | - "MailHog" 30 | - "-storage=maildir" 31 | - "-maildir-path=/maildir" 32 | -------------------------------------------------------------------------------- /chapitres/17/mailhog/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-mailhog 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 10Mi 11 | -------------------------------------------------------------------------------- /chapitres/17/mailhog/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mailhog 6 | name: mailhog 7 | spec: 8 | type: NodePort 9 | ports: 10 | - name: port-1 11 | port: 1025 12 | protocol: TCP 13 | targetPort: 1025 14 | - name: port-2 15 | port: 8025 16 | protocol: TCP 17 | targetPort: 8025 18 | selector: 19 | app: mailhog 20 | -------------------------------------------------------------------------------- /chapitres/17/mailpit/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mailpit 5 | labels: 6 | app: mailpit 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mailpit 12 | template: 13 | metadata: 14 | labels: 15 | app: mailpit 16 | spec: 17 | volumes: 18 | - name: maildir 19 | persistentVolumeClaim: { claimName: pvc-mailpit } 20 | containers: 21 | - image: axllent/mailpit 22 | name: mailpit 23 | imagePullPolicy: IfNotPresent 24 | volumeMounts: 25 | - mountPath: /maildir 26 | name: maildir 27 | command: 28 | - "./mailpit" 29 | - "--db-file=/maildir/mailpit.db" 30 | -------------------------------------------------------------------------------- /chapitres/17/mailpit/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-mailpit 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 10Mi 11 | -------------------------------------------------------------------------------- /chapitres/17/mailpit/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mailpit 6 | name: mailpit 7 | spec: 8 | type: NodePort 9 | ports: 10 | - name: port-1 11 | port: 1025 12 | protocol: TCP 13 | targetPort: 1025 14 | - name: port-2 15 | port: 8025 16 | protocol: TCP 17 | targetPort: 8025 18 | selector: 19 | app: mailpit 20 | -------------------------------------------------------------------------------- /chapitres/18/deployment+tolerations+selector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: mailpit 6 | name: mailpit 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mailpit 12 | template: 13 | metadata: 14 | labels: 15 | app: mailpit 16 | spec: 17 | nodeSelector: 18 | workload: stateless 19 | tolerations: 20 | - key: "kind" 21 | operator: "Equal" 22 | value: "spot" 23 | effect: "NoSchedule" 24 | - key: "kubernetes.azure.com/scalesetpriority" 25 | operator: "Equal" 26 | value: "spot" 27 | effect: "NoSchedule" 28 | containers: 29 | - image: axllent/mailpit 30 | name: mailpit 31 | -------------------------------------------------------------------------------- /chapitres/18/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: mailpit 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: mailpit 10 | minReplicas: 1 11 | maxReplicas: 5 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | target: 17 | type: AverageValue 18 | averageUtilization: 50 19 | -------------------------------------------------------------------------------- /chapitres/18/mailhog/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: mailhog 6 | name: mailhog 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mailhog 12 | template: 13 | metadata: 14 | labels: 15 | app: mailhog 16 | spec: 17 | containers: 18 | - image: mailhog/mailhog 19 | name: mailhog 20 | resources: 21 | requests: 22 | memory: "64Mi" 23 | cpu: "250m" 24 | limits: 25 | memory: "128Mi" 26 | cpu: "500m" 27 | -------------------------------------------------------------------------------- /chapitres/18/mailhog/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailhog 5 | spec: 6 | ingressClassName: nginx 7 | rules: 8 | - host: "mailhog.eni.yannig.ovh" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: mailhog 16 | port: 17 | number: 8025 18 | tls: 19 | - secretName: eni-yannig-ovh-prod-tls 20 | hosts: 21 | - "mailhog.eni.yannig.ovh" 22 | -------------------------------------------------------------------------------- /chapitres/18/mailhog/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mailhog 6 | name: mailhog 7 | spec: 8 | type: NodePort 9 | ports: 10 | - name: port-1 11 | port: 1025 12 | protocol: TCP 13 | targetPort: 1025 14 | - name: port-2 15 | port: 8025 16 | protocol: TCP 17 | targetPort: 8025 18 | selector: 19 | app: mailhog 20 | -------------------------------------------------------------------------------- /chapitres/18/mailpit/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: mailpit 6 | name: mailpit 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mailpit 12 | template: 13 | metadata: 14 | labels: 15 | app: mailpit 16 | spec: 17 | containers: 18 | - image: axllent/mailpit 19 | name: mailpit 20 | resources: 21 | requests: 22 | memory: "64Mi" 23 | cpu: "10m" 24 | limits: 25 | memory: "128Mi" 26 | cpu: "10m" 27 | -------------------------------------------------------------------------------- /chapitres/18/mailpit/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailpit 5 | spec: 6 | ingressClassName: nginx 7 | rules: 8 | - host: "mailpit.eni.yannig.ovh" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: mailpit 16 | port: 17 | number: 8025 18 | tls: 19 | - secretName: eni-yannig-ovh-prod-tls 20 | hosts: 21 | - "mailpit.eni.yannig.ovh" 22 | -------------------------------------------------------------------------------- /chapitres/18/mailpit/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mailpit 6 | name: mailpit 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - name: port-1 11 | port: 1025 12 | protocol: TCP 13 | targetPort: 1025 14 | - name: port-2 15 | port: 8025 16 | protocol: TCP 17 | targetPort: 8025 18 | selector: 19 | app: mailpit 20 | -------------------------------------------------------------------------------- /chapitres/18/nodes-autoscaler/deploy-wp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | for i in 1 2 3 4 | do 5 | helm upgrade --install wordpress-$i stable/wordpress \ 6 | --namespace wp-$i 7 | done 8 | -------------------------------------------------------------------------------- /chapitres/18/ovh-nodepool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kube.cloud.ovh.com/v1alpha1 2 | kind: NodePool 3 | metadata: 4 | name: statelessworkload 5 | spec: 6 | antiAffinity: true 7 | autoscale: true 8 | desiredNodes: 1 9 | flavor: b2-7 10 | maxNodes: 5 11 | minNodes: 0 12 | monthlyBilled: false 13 | template: 14 | metadata: 15 | labels: 16 | workload: stateless 17 | annotations: {} 18 | finalizers: [] 19 | spec: 20 | unschedulable: false 21 | taints: 22 | - effect: NoSchedule 23 | key: kind 24 | value: spot 25 | -------------------------------------------------------------------------------- /chapitres/19/chart/prometheus-operator.yaml: -------------------------------------------------------------------------------- 1 | alertmanager: 2 | ingress: 3 | enabled: true 4 | annotations: 5 | kubernetes.io/tls-acme: "true" 6 | cert-manager.io/cluster-issuer: letsencrypt-prod 7 | hosts: 8 | - "alertmanager.eni.yannig.ovh" 9 | tls: 10 | - secretName: alertmanager.eni.yannig.ovh 11 | hosts: 12 | - "alertmanager.eni.yannig.ovh" 13 | 14 | config: 15 | global: 16 | slack_api_url: 'URL_PRESSE_PAPIER' 17 | 18 | receivers: 19 | - name: 'null' 20 | - name: default-receiver 21 | slack_configs: 22 | - channel: '#monitoring' 23 | send_resolved: true 24 | route: 25 | receiver: default-receiver 26 | repeat_interval: 3h 27 | routes: 28 | - match: 29 | alertname: Watchdog 30 | receiver: 'null' 31 | - match: 32 | severity: warning 33 | receiver: 'null' 34 | 35 | grafana: 36 | persistence: 37 | enabled: true 38 | ingress: 39 | enabled: true 40 | annotations: 41 | kubernetes.io/tls-acme: "true" 42 | cert-manager.io/cluster-issuer: letsencrypt-prod 43 | hosts: 44 | - "grafana.eni.yannig.ovh" 45 | tls: 46 | - secretName: grafana.eni.yannig.ovh 47 | hosts: 48 | - "grafana.eni.yannig.ovh" 49 | 50 | kubeControllerManager: 51 | enabled: false 52 | 53 | kubeScheduler: 54 | enabled: false 55 | -------------------------------------------------------------------------------- /chapitres/19/priority-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.k8s.io/v1 2 | kind: PriorityClass 3 | metadata: 4 | name: monitoring-node 5 | description: Priority for monitoring nodes. 6 | value: 101000 7 | --- 8 | apiVersion: scheduling.k8s.io/v1 9 | kind: PriorityClass 10 | metadata: 11 | name: monitoring-cluster 12 | description: Priority for monitoring cluster. 13 | value: 100000 14 | -------------------------------------------------------------------------------- /chapitres/19/unique-ingress-host.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sUniqueIngressHost 3 | metadata: 4 | name: unique-ingress-host 5 | spec: 6 | enforcementAction: warn 7 | match: 8 | kinds: 9 | - apiGroups: ["extensions", "networking.k8s.io"] 10 | kinds: ["Ingress"] 11 | -------------------------------------------------------------------------------- /chapitres/20/cloudwatch-config.yaml: -------------------------------------------------------------------------------- 1 | # Basic informations 2 | cluster_name: "eni-test" 3 | region_name: "eu-central-1" 4 | 5 | # Http server is disabled 6 | http_server_toggle: "'Off'" 7 | http_server_port: "'8080'" 8 | 9 | # Read from the tail of the file, not the beginning 10 | # Invert both value to read already existing file 11 | read_from_head: "'Off'" 12 | read_from_tail: "'On'" 13 | -------------------------------------------------------------------------------- /chapitres/20/cloudwatch-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "logs", 6 | "Effect": "Allow", 7 | "Action": [ 8 | "logs:CreateLogStream", 9 | "logs:PutLogEvents", 10 | "logs:DescribeLogGroups", 11 | "logs:DescribeLogStreams" 12 | ], 13 | "Resource": [ 14 | "arn:aws:logs:*:*:*" 15 | ] 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /chapitres/20/config.yaml: -------------------------------------------------------------------------------- 1 | # create amazon-cloudwatch namespace 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: amazon-cloudwatch 6 | labels: 7 | name: amazon-cloudwatch 8 | --- 9 | 10 | # create cwagent service account and role binding 11 | apiVersion: v1 12 | kind: ServiceAccount 13 | metadata: 14 | name: cloudwatch-agent 15 | namespace: amazon-cloudwatch 16 | 17 | --- 18 | kind: ClusterRole 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | metadata: 21 | name: cloudwatch-agent-role 22 | rules: 23 | - apiGroups: [""] 24 | resources: ["pods", "nodes", "endpoints"] 25 | verbs: ["list", "watch"] 26 | - apiGroups: ["apps"] 27 | resources: ["replicasets"] 28 | verbs: ["list", "watch"] 29 | - apiGroups: ["batch"] 30 | resources: ["jobs"] 31 | verbs: ["list", "watch"] 32 | - apiGroups: [""] 33 | resources: ["nodes/proxy"] 34 | verbs: ["get"] 35 | - apiGroups: [""] 36 | resources: ["nodes/stats", "configmaps", "events"] 37 | verbs: ["create"] 38 | - apiGroups: [""] 39 | resources: ["configmaps"] 40 | resourceNames: ["cwagent-clusterleader"] 41 | verbs: ["get","update"] 42 | 43 | --- 44 | kind: ClusterRoleBinding 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | metadata: 47 | name: cloudwatch-agent-role-binding 48 | subjects: 49 | - kind: ServiceAccount 50 | name: cloudwatch-agent 51 | namespace: amazon-cloudwatch 52 | roleRef: 53 | kind: ClusterRole 54 | name: cloudwatch-agent-role 55 | apiGroup: rbac.authorization.k8s.io 56 | --- 57 | 58 | # create configmap for cwagent config 59 | apiVersion: v1 60 | data: 61 | # Configuration is in Json format. No matter what configure change you make, 62 | # please keep the Json blob valid. 63 | cwagentconfig.json: | 64 | { 65 | "agent": { 66 | "region": "eu-central-1" 67 | }, 68 | "logs": { 69 | "metrics_collected": { 70 | "kubernetes": { 71 | "cluster_name": "eni-test", 72 | "metrics_collection_interval": 60 73 | } 74 | }, 75 | "force_flush_interval": 5 76 | } 77 | } 78 | kind: ConfigMap 79 | metadata: 80 | name: cwagentconfig 81 | namespace: amazon-cloudwatch 82 | --- 83 | 84 | # deploy cwagent as daemonset 85 | apiVersion: apps/v1 86 | kind: DaemonSet 87 | metadata: 88 | name: cloudwatch-agent 89 | namespace: amazon-cloudwatch 90 | spec: 91 | selector: 92 | matchLabels: 93 | name: cloudwatch-agent 94 | template: 95 | metadata: 96 | labels: 97 | name: cloudwatch-agent 98 | spec: 99 | containers: 100 | - name: cloudwatch-agent 101 | image: amazon/cloudwatch-agent:1.247350.0b251780 102 | #ports: 103 | # - containerPort: 8125 104 | # hostPort: 8125 105 | # protocol: UDP 106 | resources: 107 | limits: 108 | cpu: 200m 109 | memory: 200Mi 110 | requests: 111 | cpu: 200m 112 | memory: 200Mi 113 | # Please don't change below envs 114 | env: 115 | - name: HOST_IP 116 | valueFrom: 117 | fieldRef: 118 | fieldPath: status.hostIP 119 | - name: HOST_NAME 120 | valueFrom: 121 | fieldRef: 122 | fieldPath: spec.nodeName 123 | - name: K8S_NAMESPACE 124 | valueFrom: 125 | fieldRef: 126 | fieldPath: metadata.namespace 127 | - name: CI_VERSION 128 | value: "k8s/1.3.9" 129 | # Please don't change the mountPath 130 | volumeMounts: 131 | - name: cwagentconfig 132 | mountPath: /etc/cwagentconfig 133 | - name: rootfs 134 | mountPath: /rootfs 135 | readOnly: true 136 | - name: dockersock 137 | mountPath: /var/run/docker.sock 138 | readOnly: true 139 | - name: varlibdocker 140 | mountPath: /var/lib/docker 141 | readOnly: true 142 | - name: containerdsock 143 | mountPath: /run/containerd/containerd.sock 144 | readOnly: true 145 | - name: sys 146 | mountPath: /sys 147 | readOnly: true 148 | - name: devdisk 149 | mountPath: /dev/disk 150 | readOnly: true 151 | volumes: 152 | - name: cwagentconfig 153 | configMap: 154 | name: cwagentconfig 155 | - name: rootfs 156 | hostPath: 157 | path: / 158 | - name: dockersock 159 | hostPath: 160 | path: /var/run/docker.sock 161 | - name: varlibdocker 162 | hostPath: 163 | path: /var/lib/docker 164 | - name: containerdsock 165 | hostPath: 166 | path: /run/containerd/containerd.sock 167 | - name: sys 168 | hostPath: 169 | path: /sys 170 | - name: devdisk 171 | hostPath: 172 | path: /dev/disk/ 173 | terminationGracePeriodSeconds: 60 174 | serviceAccountName: cloudwatch-agent 175 | 176 | --- 177 | 178 | # create configmap for cluster name and aws region for CloudWatch Logs 179 | # need to replace the placeholders eni-test and eu-central-1 180 | # and need to replace 'Off' and '8080' 181 | # and need to replace 'Off' and 'On' 182 | apiVersion: v1 183 | data: 184 | cluster.name: eni-test 185 | logs.region: eu-central-1 186 | http.server: 'Off' 187 | http.port: '8080' 188 | read.head: 'Off' 189 | read.tail: 'On' 190 | kind: ConfigMap 191 | metadata: 192 | name: fluent-bit-cluster-info 193 | namespace: amazon-cloudwatch 194 | --- 195 | 196 | apiVersion: v1 197 | kind: ServiceAccount 198 | metadata: 199 | name: fluent-bit 200 | namespace: amazon-cloudwatch 201 | --- 202 | apiVersion: rbac.authorization.k8s.io/v1 203 | kind: ClusterRole 204 | metadata: 205 | name: fluent-bit-role 206 | rules: 207 | - nonResourceURLs: 208 | - /metrics 209 | verbs: 210 | - get 211 | - apiGroups: [""] 212 | resources: 213 | - namespaces 214 | - pods 215 | - pods/logs 216 | verbs: ["get", "list", "watch"] 217 | --- 218 | apiVersion: rbac.authorization.k8s.io/v1 219 | kind: ClusterRoleBinding 220 | metadata: 221 | name: fluent-bit-role-binding 222 | roleRef: 223 | apiGroup: rbac.authorization.k8s.io 224 | kind: ClusterRole 225 | name: fluent-bit-role 226 | subjects: 227 | - kind: ServiceAccount 228 | name: fluent-bit 229 | namespace: amazon-cloudwatch 230 | --- 231 | apiVersion: v1 232 | kind: ConfigMap 233 | metadata: 234 | name: fluent-bit-config 235 | namespace: amazon-cloudwatch 236 | labels: 237 | k8s-app: fluent-bit 238 | data: 239 | fluent-bit.conf: | 240 | [SERVICE] 241 | Flush 5 242 | Log_Level info 243 | Daemon off 244 | Parsers_File parsers.conf 245 | HTTP_Server ${HTTP_SERVER} 246 | HTTP_Listen 0.0.0.0 247 | HTTP_Port ${HTTP_PORT} 248 | storage.path /var/fluent-bit/state/flb-storage/ 249 | storage.sync normal 250 | storage.checksum off 251 | storage.backlog.mem_limit 5M 252 | 253 | @INCLUDE application-log.conf 254 | @INCLUDE dataplane-log.conf 255 | @INCLUDE host-log.conf 256 | 257 | application-log.conf: | 258 | [INPUT] 259 | Name tail 260 | Tag application.* 261 | Exclude_Path /var/log/containers/cloudwatch-agent*, /var/log/containers/fluent-bit*, /var/log/containers/aws-node*, /var/log/containers/kube-proxy* 262 | Path /var/log/containers/*.log 263 | Docker_Mode On 264 | Docker_Mode_Flush 5 265 | Docker_Mode_Parser container_firstline 266 | Parser docker 267 | DB /var/fluent-bit/state/flb_container.db 268 | Mem_Buf_Limit 50MB 269 | Skip_Long_Lines On 270 | Refresh_Interval 10 271 | Rotate_Wait 30 272 | storage.type filesystem 273 | Read_from_Head ${READ_FROM_HEAD} 274 | 275 | [INPUT] 276 | Name tail 277 | Tag application.* 278 | Path /var/log/containers/fluent-bit* 279 | Parser docker 280 | DB /var/fluent-bit/state/flb_log.db 281 | Mem_Buf_Limit 5MB 282 | Skip_Long_Lines On 283 | Refresh_Interval 10 284 | Read_from_Head ${READ_FROM_HEAD} 285 | 286 | [INPUT] 287 | Name tail 288 | Tag application.* 289 | Path /var/log/containers/cloudwatch-agent* 290 | Docker_Mode On 291 | Docker_Mode_Flush 5 292 | Docker_Mode_Parser cwagent_firstline 293 | Parser docker 294 | DB /var/fluent-bit/state/flb_cwagent.db 295 | Mem_Buf_Limit 5MB 296 | Skip_Long_Lines On 297 | Refresh_Interval 10 298 | Read_from_Head ${READ_FROM_HEAD} 299 | 300 | [FILTER] 301 | Name kubernetes 302 | Match application.* 303 | Kube_URL https://kubernetes.default.svc:443 304 | Kube_Tag_Prefix application.var.log.containers. 305 | Merge_Log On 306 | Merge_Log_Key log_processed 307 | K8S-Logging.Parser On 308 | K8S-Logging.Exclude Off 309 | Labels Off 310 | Annotations Off 311 | 312 | [OUTPUT] 313 | Name cloudwatch_logs 314 | Match application.* 315 | region ${AWS_REGION} 316 | log_group_name /aws/containerinsights/${CLUSTER_NAME}/application 317 | log_stream_prefix ${HOST_NAME}- 318 | auto_create_group true 319 | extra_user_agent container-insights 320 | 321 | dataplane-log.conf: | 322 | [INPUT] 323 | Name systemd 324 | Tag dataplane.systemd.* 325 | Systemd_Filter _SYSTEMD_UNIT=docker.service 326 | Systemd_Filter _SYSTEMD_UNIT=kubelet.service 327 | DB /var/fluent-bit/state/systemd.db 328 | Path /var/log/journal 329 | Read_From_Tail ${READ_FROM_TAIL} 330 | 331 | [INPUT] 332 | Name tail 333 | Tag dataplane.tail.* 334 | Path /var/log/containers/aws-node*, /var/log/containers/kube-proxy* 335 | Docker_Mode On 336 | Docker_Mode_Flush 5 337 | Docker_Mode_Parser container_firstline 338 | Parser docker 339 | DB /var/fluent-bit/state/flb_dataplane_tail.db 340 | Mem_Buf_Limit 50MB 341 | Skip_Long_Lines On 342 | Refresh_Interval 10 343 | Rotate_Wait 30 344 | storage.type filesystem 345 | Read_from_Head ${READ_FROM_HEAD} 346 | 347 | [FILTER] 348 | Name modify 349 | Match dataplane.systemd.* 350 | Rename _HOSTNAME hostname 351 | Rename _SYSTEMD_UNIT systemd_unit 352 | Rename MESSAGE message 353 | Remove_regex ^((?!hostname|systemd_unit|message).)*$ 354 | 355 | [FILTER] 356 | Name aws 357 | Match dataplane.* 358 | imds_version v1 359 | 360 | [OUTPUT] 361 | Name cloudwatch_logs 362 | Match dataplane.* 363 | region ${AWS_REGION} 364 | log_group_name /aws/containerinsights/${CLUSTER_NAME}/dataplane 365 | log_stream_prefix ${HOST_NAME}- 366 | auto_create_group true 367 | extra_user_agent container-insights 368 | 369 | host-log.conf: | 370 | [INPUT] 371 | Name tail 372 | Tag host.dmesg 373 | Path /var/log/dmesg 374 | Parser syslog 375 | DB /var/fluent-bit/state/flb_dmesg.db 376 | Mem_Buf_Limit 5MB 377 | Skip_Long_Lines On 378 | Refresh_Interval 10 379 | Read_from_Head ${READ_FROM_HEAD} 380 | 381 | [INPUT] 382 | Name tail 383 | Tag host.messages 384 | Path /var/log/messages 385 | Parser syslog 386 | DB /var/fluent-bit/state/flb_messages.db 387 | Mem_Buf_Limit 5MB 388 | Skip_Long_Lines On 389 | Refresh_Interval 10 390 | Read_from_Head ${READ_FROM_HEAD} 391 | 392 | [INPUT] 393 | Name tail 394 | Tag host.secure 395 | Path /var/log/secure 396 | Parser syslog 397 | DB /var/fluent-bit/state/flb_secure.db 398 | Mem_Buf_Limit 5MB 399 | Skip_Long_Lines On 400 | Refresh_Interval 10 401 | Read_from_Head ${READ_FROM_HEAD} 402 | 403 | [FILTER] 404 | Name aws 405 | Match host.* 406 | imds_version v1 407 | 408 | [OUTPUT] 409 | Name cloudwatch_logs 410 | Match host.* 411 | region ${AWS_REGION} 412 | log_group_name /aws/containerinsights/${CLUSTER_NAME}/host 413 | log_stream_prefix ${HOST_NAME}. 414 | auto_create_group true 415 | extra_user_agent container-insights 416 | 417 | parsers.conf: | 418 | [PARSER] 419 | Name docker 420 | Format json 421 | Time_Key time 422 | Time_Format %Y-%m-%dT%H:%M:%S.%LZ 423 | 424 | [PARSER] 425 | Name syslog 426 | Format regex 427 | Regex ^(?