├── namespaces
├── ns.yaml
└── commands.txt
├── .gitignore
├── security
├── application
│ ├── username.txt
│ ├── password.txt
│ ├── secret.yaml
│ ├── example-secret.yaml
│ ├── commands.txt
│ ├── secret-file.yaml
│ └── pod-with-secret.yaml
├── psp
│ ├── test-pod.yaml
│ ├── commands.txt
│ └── example-psp.yaml
├── infrastructure
│ ├── role.yaml
│ ├── role-binding.yaml
│ └── commands.txt
└── commands.txt
├── helm
├── values.yaml
├── helm-init-commands.txt
├── chartmuseum
│ ├── ci
│ │ └── ingress-values.yaml
│ ├── Chart.yaml
│ ├── .helmignore
│ ├── templates
│ │ ├── serviceaccount.yaml
│ │ ├── secret.yaml
│ │ ├── pv.yaml
│ │ ├── pvc.yaml
│ │ ├── servicemonitor.yaml
│ │ ├── service.yaml
│ │ ├── ingress.yaml
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ └── deployment.yaml
│ └── values.yaml
├── deploy-helm-chart.txt
├── deploy-repository.txt
├── chartmuseum-values.yaml
├── get_info.sh
└── commands.txt
├── exercises
├── 1
│ ├── ns.yaml
│ └── bookstack.yaml
├── 2
│ ├── commands.txt
│ ├── index.html
│ ├── pod-with-config-map.yaml
│ ├── cheese-ingress.yaml
│ ├── cheese-deployment.yaml
│ └── cheese-secret.yaml
├── 3
│ ├── example-secret.yaml
│ └── pod-with-secret.yaml
└── 4
│ └── todo
│ ├── Chart.yaml
│ ├── templates
│ ├── serviceaccount.yaml
│ ├── tests
│ │ └── test-connection.yaml
│ ├── service.yaml
│ ├── ingress.yaml
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ └── deployment.yaml
│ ├── .helmignore
│ └── values.yaml
├── on-premise
├── kubespray
│ ├── test_env
│ │ ├── group_vars
│ │ │ ├── k8s-cluster
│ │ │ │ ├── k8s-net-cilium.yml
│ │ │ │ ├── k8s-net-macvlan.yml
│ │ │ │ ├── k8s-net-canal.yml
│ │ │ │ ├── k8s-net-flannel.yml
│ │ │ │ ├── k8s-net-contiv.yml
│ │ │ │ ├── k8s-net-calico.yml
│ │ │ │ ├── k8s-net-kube-router.yml
│ │ │ │ ├── k8s-net-weave.yml
│ │ │ │ ├── addons.yml
│ │ │ │ └── k8s-cluster.yml
│ │ │ ├── all
│ │ │ │ ├── coreos.yml
│ │ │ │ ├── azure.yml
│ │ │ │ ├── openstack.yml
│ │ │ │ ├── oci.yml
│ │ │ │ ├── docker.yml
│ │ │ │ └── all.yml
│ │ │ └── etcd.yml
│ │ ├── credentials
│ │ │ └── kubeadm_certificate_key.creds
│ │ └── inventory.ini
│ ├── Vagrantfile
│ └── commands.txt
└── rancher
│ ├── provision_docker.sh
│ ├── provision_rancher.sh
│ ├── commands.txt
│ └── Vagrantfile
├── volumes
├── configs
│ ├── ui.properties
│ └── game.properties
├── commands_volumes.txt
├── pvc.yaml
├── commands.txt
├── pv.yaml
├── pod-empty-dir.yaml
├── config-map.yaml
├── pod-with-pvc.yaml
└── pod-with-config-map.yaml
├── assigning-pods-to-nodes
├── pod-without-resources.yaml
├── memory-defaults.yaml
├── pod-limits-only.yaml
├── pod-node-selector.yaml
├── pod-with-toleration.yaml
├── pod-with-resources.yaml
├── commands.txt
├── pod-with-node-affinity.yaml
└── pod-with-pod-affinity.yaml
├── guestbook
├── commands.txt
├── redis-slave-service.yaml
├── redis-master-service.yaml
├── frontend-service.yaml
├── redis-master-deployment.yaml
├── frontend-deployment.yaml
└── redis-slave-deployment.yaml
├── internal-networking
├── deny-traffic-from-other-namespaces
│ ├── deny-from-other-namepspaces.yaml
│ └── commands.txt
├── two-containers-pod.yaml
├── commands.txt
├── two-containers-shared-volume-pod.yaml
├── pod-with-service.yaml
└── example-network-policy.yaml
├── advanced-orchestration
├── commands.txt
├── replicaset.yaml
├── cronjob.yaml
├── daemonset.yaml
├── statefulset.yaml
└── sts2.yaml
├── ingress-contoller
├── commands.txt
├── cafe-ingress.yaml
├── cafe-deployment.yaml
└── cafe-secret.yaml
├── logging-and-monitoring
├── weave-scope
│ ├── Chart.yaml
│ ├── charts
│ │ ├── weave-scope-cluster-agent
│ │ │ ├── templates
│ │ │ │ ├── serviceaccount.yaml
│ │ │ │ ├── clusterrole.yaml
│ │ │ │ ├── clusterrolebinding.yaml
│ │ │ │ ├── deployment.yaml
│ │ │ │ └── _helpers.tpl
│ │ │ └── Chart.yaml
│ │ ├── weave-scope-agent
│ │ │ ├── Chart.yaml
│ │ │ └── templates
│ │ │ │ ├── _helpers.tpl
│ │ │ │ └── daemonset.yaml
│ │ └── weave-scope-frontend
│ │ │ ├── Chart.yaml
│ │ │ └── templates
│ │ │ ├── service.yaml
│ │ │ ├── ingress.yaml
│ │ │ ├── deployment.yaml
│ │ │ └── _helpers.tpl
│ ├── templates
│ │ ├── test-config.yaml
│ │ ├── weave-scope-tests.yaml
│ │ ├── _helpers.tpl
│ │ └── NOTES.txt
│ ├── values.yaml
│ └── README.md
├── commands.txt
├── fluentd
│ ├── fluentd-rbac.yaml
│ ├── fluentd-daemonset.yaml
│ └── fluentd-config.yaml
└── logging
│ ├── kibana.yaml
│ └── elastic-stack.yaml
├── hpa
├── heapster-clusterrole.yaml
├── commands.txt
└── php-apache.yaml
├── kube-start
├── commands.txt
└── pod-with-service.yaml
└── kubeadm-deployment
├── commands.txt
└── Vagrantfile
/namespaces/ns.yaml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | **/.vagrant
2 | .idea
--------------------------------------------------------------------------------
/security/application/username.txt:
--------------------------------------------------------------------------------
1 | admin
--------------------------------------------------------------------------------
/security/application/password.txt:
--------------------------------------------------------------------------------
1 | 1f2d1e2e67df
--------------------------------------------------------------------------------
/helm/values.yaml:
--------------------------------------------------------------------------------
1 | service:
2 | type: NodePort
3 |
--------------------------------------------------------------------------------
/exercises/2/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl create configmap indexhtml --from-file=index.html
2 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-cilium.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/cilium/defaults/main.yml
2 |
--------------------------------------------------------------------------------
/volumes/configs/ui.properties:
--------------------------------------------------------------------------------
1 | color.good=purple
2 | color.bad=yellow
3 | allow.textmode=true
4 | how.nice.to.look=fairlyNice
5 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/credentials/kubeadm_certificate_key.creds:
--------------------------------------------------------------------------------
1 | D1ffC9Fd4A8a08Ab1333BfbDB16cD44eDAbfdb4Ef3dFECA45BDd1d3Eab4AfcaB
2 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/all/coreos.yml:
--------------------------------------------------------------------------------
1 | ## Does coreos need auto upgrade, default is true
2 | # coreos_auto_upgrade: true
3 |
--------------------------------------------------------------------------------
/exercises/4/todo/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | appVersion: "1.0"
3 | description: A Helm chart for Kubernetes
4 | name: todo
5 | version: 0.1.0
6 |
--------------------------------------------------------------------------------
/volumes/commands_volumes.txt:
--------------------------------------------------------------------------------
1 | kubectl exec -it task-pv-pod bash
2 | cd /usr/share/nginx/html
3 | echo 1 > index.html
4 | exit
5 |
6 | minikube ssh
7 | cd /data/pv0001
8 |
--------------------------------------------------------------------------------
/security/psp/test-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pause
5 | spec:
6 | containers:
7 | - name: pause
8 | image: k8s.gcr.io/pause
9 |
--------------------------------------------------------------------------------
/security/application/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: mysecret
5 | type: Opaque
6 | data:
7 | username: YWRtaW4=
8 | password: MWYyZDFlMmU2N2Rm
9 |
--------------------------------------------------------------------------------
/security/application/example-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: secret-example
5 | type: Opaque
6 | data:
7 | username: YWRtaW4=
8 | password: MWYyZDFlMmU2N2Rm
9 |
--------------------------------------------------------------------------------
/helm/helm-init-commands.txt:
--------------------------------------------------------------------------------
1 | curl -L https://git.io/get_helm.sh | bash
2 | helm init # setup helm with our cluster
3 | helm repo update # sync all helm charts info
4 | helm repo list
5 | helm list
6 |
7 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-without-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: default-mem-demo
5 | spec:
6 | containers:
7 | - name: default-mem-demo-ctr
8 | image: nginx
--------------------------------------------------------------------------------
/volumes/configs/game.properties:
--------------------------------------------------------------------------------
1 | enemies=aliens
2 | lives=3
3 | enemies.cheat=true
4 | enemies.cheat.level=noGoodRotten
5 | secret.code.passphrase=UUDDLRLRBABAS
6 | secret.code.allowed=true
7 | secret.code.lives=30
--------------------------------------------------------------------------------
/on-premise/rancher/provision_docker.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
3 | yum install docker-ce -y
4 | systemctl start docker
5 | systemctl enable docker
--------------------------------------------------------------------------------
/volumes/pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: task-pv-claim
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | resources:
9 | requests:
10 | storage: 3Gi
11 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-macvlan.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # private interface, on a l2-network
3 | macvlan_interface: "eth1"
4 |
5 | # Enable nat in default gateway network interface
6 | enable_nat_default_gateway: true
7 |
--------------------------------------------------------------------------------
/exercises/3/example-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: secret-example
5 | type: Opaque
6 | data:
7 | username: cm9vdF91c2VyCg==
8 | password: RXhwZXJ0cyExMjMK
9 | db_url: bXlzcWwuc2VydmljZXMuY29tCg==
10 |
--------------------------------------------------------------------------------
/security/application/commands.txt:
--------------------------------------------------------------------------------
1 | # Create files needed for rest of example.
2 | echo -n 'admin' > ./username.txt
3 | echo -n '1f2d1e2e67df' > ./password.txt
4 | kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt
5 |
--------------------------------------------------------------------------------
/volumes/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl apply -f pod-empty-dir.yaml
2 | kubectl create configmap game-config --from-file=configs/
3 | kubectl describe configmap game-config
4 | kubectl exec -it pod-env-var cat /app/game.properties
5 | kubectl exec -it pod-env-var env
6 |
7 |
--------------------------------------------------------------------------------
/helm/chartmuseum/ci/ingress-values.yaml:
--------------------------------------------------------------------------------
1 | ingress:
2 | enabled: true
3 | annotations:
4 | kubernetes.io/ingress.class: nginx
5 | kubernetes.io/tls-acme: "true"
6 | hosts:
7 | - name: chartmuseum.domain1.com
8 | path: /
9 | tls: false
10 |
--------------------------------------------------------------------------------
/security/application/secret-file.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: mysecret
5 | type: Opaque
6 | stringData:
7 | config.yaml: |-
8 | apiUrl: "https://my.api.com/api/v1"
9 | username: admin
10 | password: superpassword
11 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/memory-defaults.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: LimitRange
3 | metadata:
4 | name: mem-limit-range
5 | spec:
6 | limits:
7 | - default:
8 | memory: 512Mi
9 | defaultRequest:
10 | memory: 256Mi
11 | type: Container
--------------------------------------------------------------------------------
/exercises/4/todo/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ template "todo.serviceAccountName" . }}
6 | labels:
7 | {{ include "todo.labels" . | indent 4 }}
8 | {{- end -}}
9 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-limits-only.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: default-mem-demo-2
5 | spec:
6 | containers:
7 | - name: default-mem-demo-2-ctr
8 | image: nginx
9 | resources:
10 | limits:
11 | memory: "1Gi"
--------------------------------------------------------------------------------
/helm/deploy-helm-chart.txt:
--------------------------------------------------------------------------------
1 | helm install stable/wordpress myblog -f values.yaml # install a specific helm chart from the stable repository
2 | kubectl get all
3 | helm list
4 | # wait for few minutes for the deployment to complete
5 | kubectl port-forward svc/myblog-wordpress 8080:80
6 |
--------------------------------------------------------------------------------
/volumes/pv.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: task-pv-volume
5 | labels:
6 | type: local
7 | spec:
8 | capacity:
9 | storage: 10Gi
10 | accessModes:
11 | - ReadWriteOnce
12 | hostPath:
13 | path: "/data/pv0001/"
14 |
--------------------------------------------------------------------------------
/guestbook/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl apply -f redis-master-deployment.yaml
2 | kubectl apply -f redis-master-service.yaml
3 | kubectl apply -f redis-slave-deployment.yaml
4 | kubectl apply -f redis-slave-service.yaml
5 | kubectl apply -f frontend-deployment.yaml
6 | kubectl apply -f frontend-service.yaml
7 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-node-selector.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | labels:
6 | env: test
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx
11 | imagePullPolicy: IfNotPresent
12 | nodeSelector:
13 | my.key: my.value
14 |
--------------------------------------------------------------------------------
/on-premise/rancher/provision_rancher.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 | yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
3 | yum install docker-ce -y
4 | systemctl start docker
5 | systemctl enable docker
6 | docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher:v2.4.5
--------------------------------------------------------------------------------
/guestbook/redis-slave-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis-slave
5 | labels:
6 | app: redis
7 | role: slave
8 | tier: backend
9 | spec:
10 | ports:
11 | - port: 6379
12 | selector:
13 | app: redis
14 | role: slave
15 | tier: backend
16 |
--------------------------------------------------------------------------------
/helm/deploy-repository.txt:
--------------------------------------------------------------------------------
1 | helm install chartmuseum ./chartmuseum -f chartmuseum-values.yaml
2 | helm plugin install https://github.com/chartmuseum/helm-push
3 | helm repo add chartmuseum http://$(minikube ip):32688
4 | helm repo update
5 | helm push mychart-0.1.0.tgz chartmuseum
6 | helm install test --debug --dry-run ./
--------------------------------------------------------------------------------
/security/infrastructure/role.yaml:
--------------------------------------------------------------------------------
1 | kind: Role
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | namespace: default
5 | name: pod-reader
6 | rules:
7 | - apiGroups: [""] # "” indicates the core API group
8 | resources: ["pods"]
9 | verbs:
10 | - get
11 | - watch
12 | - list
13 |
--------------------------------------------------------------------------------
/exercises/2/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | This is the title of the webpage!
4 |
5 |
6 | This is an example paragraph. Anything in the body tag will appear on the page, just like this p tag and its contents.
7 |
8 |
9 |
--------------------------------------------------------------------------------
/internal-networking/deny-traffic-from-other-namespaces/deny-from-other-namepspaces.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | namespace: secondary
5 | name: deny-from-other-namespaces
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | ingress:
10 | - from:
11 | - podSelector: {}
12 |
--------------------------------------------------------------------------------
/internal-networking/two-containers-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: two-containers
5 | spec:
6 | restartPolicy: Never
7 | containers:
8 | - name: nginx-container
9 | image: nginx
10 |
11 | - name: debian-container
12 | image: debian
13 | command: ["sleep"]
14 | args: ["5000"]
15 |
--------------------------------------------------------------------------------
/guestbook/redis-master-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis-master
5 | labels:
6 | app: redis
7 | role: master
8 | tier: backend
9 | spec:
10 | ports:
11 | - port: 6379
12 | targetPort: 6379
13 | selector:
14 | app: redis
15 | role: master
16 | tier: backend
17 |
--------------------------------------------------------------------------------
/volumes/pod-empty-dir.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: test-pd
6 | spec:
7 | containers:
8 | - image: k8s.gcr.io/test-webserver
9 | name: test-container
10 | volumeMounts:
11 | - mountPath: /cache
12 | name: cache-volume
13 | volumes:
14 | - name: cache-volume
15 | emptyDir: {}
16 |
--------------------------------------------------------------------------------
/helm/chartmuseum-values.yaml:
--------------------------------------------------------------------------------
1 | #helm install stable/chartmuseum -n chartmuseum --set service.type=NodePort --set service.nodePort=32688 --set env.open.DISABLE_API=false
2 | service:
3 | type: NodePort
4 | nodePort: 32688
5 |
6 | persistence:
7 | enabled: true
8 | accessMode: ReadWriteOnce
9 |
10 | env:
11 | open:
12 | DISABLE_API: false
13 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-with-toleration.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | labels:
6 | env: test
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx
11 | imagePullPolicy: IfNotPresent
12 | tolerations:
13 | - key: "example-key"
14 | operator: "Exists"
15 | effect: "NoSchedule"
16 |
--------------------------------------------------------------------------------
/security/psp/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl create namespace psp-example
2 | kubectl create serviceaccount -n psp-example fake-user
3 | kubectl create rolebinding -n psp-example fake-editor --clusterrole=edit --serviceaccount=psp-example:fake-user
4 | alias kubectl-admin='kubectl -n psp-example'
5 | alias kubectl-user='kubectl --as=system:serviceaccount:psp-example:fake-user -n psp-example'
6 |
7 |
--------------------------------------------------------------------------------
/helm/chartmuseum/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | description: DEPRECATED Host your own Helm Chart Repository
3 | name: chartmuseum
4 | version: 2.14.2
5 | appVersion: 0.12.0
6 | home: https://github.com/helm/chartmuseum
7 | icon: https://raw.githubusercontent.com/helm/chartmuseum/master/logo2.png
8 | keywords:
9 | - chartmuseum
10 | - helm
11 | - charts repo
12 | deprecated: true
13 |
--------------------------------------------------------------------------------
/volumes/config-map.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: example-configmap
5 | data:
6 | # Configuration values can be set as key-value properties
7 | database: mongodb
8 | database_uri: mongodb://localhost:27017
9 |
10 | # Or set as complete file contents (even JSON!)
11 | keys: |
12 | image.public.key=771
13 | rsa.public.key=42
14 |
--------------------------------------------------------------------------------
/advanced-orchestration/commands.txt:
--------------------------------------------------------------------------------
1 | cd k8s-experts/advanced-orchestration
2 | cat statefulset.yaml
3 | kubectl apply -f statefulset.yaml
4 | kubectl get all
5 | kubectl scale statefulset web --replicas 10
6 | kubectl get all
7 |
8 | cd k8s-experts/advanced-orchestration
9 | cat daemonset.yaml
10 | kubectl apply -f daemonset.yaml
11 | kubectl get all
12 | kubectl logs -l name=cleanup
13 |
14 |
--------------------------------------------------------------------------------
/helm/get_info.sh:
--------------------------------------------------------------------------------
1 | base_url="http://"$(minikube ip)":"$(kubectl get svc myblog-wordpress -o=jsonpath='{.spec.ports[?(@.port==80)].nodePort}')
2 | echo "WordPress URL is: $base_url"
3 | echo "Management Console: ${base_url}/wp-login.php"
4 | echo -e "Username: user\nPassword: "$(kubectl get secret --namespace default myblog-wordpress -o jsonpath="{.data.wordpress-password}" | base64 --decode) # to extract credentials
5 |
6 |
--------------------------------------------------------------------------------
/security/psp/example-psp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: example
5 | spec:
6 | privileged: false # Don't allow privileged pods!
7 | # The rest fills in some required fields.
8 | seLinux:
9 | rule: RunAsAny
10 | supplementalGroups:
11 | rule: RunAsAny
12 | runAsUser:
13 | rule: RunAsAny
14 | fsGroup:
15 | rule: RunAsAny
16 | volumes:
17 | - '*'
18 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-canal.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/canal/defaults/main.yml
2 |
3 | # The interface used by canal for host <-> host communication.
4 | # If left blank, then the interface is choosing using the node's
5 | # default route.
6 | # canal_iface: ""
7 |
8 | # Whether or not to masquerade traffic to destinations not within
9 | # the pod network.
10 | # canal_masquerade: "true"
11 |
--------------------------------------------------------------------------------
/security/infrastructure/role-binding.yaml:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: read-pods
5 | namespace: default
6 | subjects:
7 | - kind: User
8 | name: user1 # Name is case sensitive
9 | apiGroup: rbac.authorization.k8s.io
10 | roleRef:
11 | kind: Role #this must be Role or ClusterRole
12 | name: pod-reader # must match the name of the Role
13 | apiGroup: rbac.authorization.k8s.io
14 |
--------------------------------------------------------------------------------
/security/commands.txt:
--------------------------------------------------------------------------------
1 | Open up a terminal window on your Docker server
2 | Download the script with the command git clone https://github.com/docker/docker-bench-security.git
3 | Change into the newly created directory with the command cd docker-bench-security
4 | Run the script with the command sudo sh docker-bench-security.sh
5 |
6 |
7 | git clone https://github.com/aquasecurity/kube-bench.git
8 | cd kube-bench
9 | kubectl apply -f job.yaml
10 |
11 |
--------------------------------------------------------------------------------
/ingress-contoller/commands.txt:
--------------------------------------------------------------------------------
1 | https://kubernetes.github.io/ingress-nginx/deploy/#docker-for-mac
2 | minikube addons enable ingress
3 | cd ingress-contoller
4 | kubectl apply -f ./
5 | add to /etc/hosts: 127.0.0.1 cafe.example.com
6 | for windows users: https://gist.github.com/zenorocha/18b10a14b2deb214dc4ce43a2d2e2992
7 | run: minikube tunnel
8 | In chrome go to: https://cafe.example.com/tea
9 | In chrome go to: https://cafe.example.com/coffee
10 |
11 |
--------------------------------------------------------------------------------
/helm/commands.txt:
--------------------------------------------------------------------------------
1 | 1. download helm binary from: https://github.com/helm/helm/releases
2 | 2. copy helm[.exe] to /usr/local/bin or c:/Windows/System32
3 | 3. helm repo add stable https://kubernetes-charts.storage.googleapis.com
4 | 4. helm repo update
5 | 5. helm install redis-prod stable/redis --set password="secretpass"
6 | 6. kubectl get all
7 | 7. helm upgrade redis-prod stable/redis --set cluster.slaveCount=5 --set password="secretpass"
8 | 8. helm delete redis-prod
9 |
--------------------------------------------------------------------------------
/exercises/4/todo/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 | .vscode/
23 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | name: weave-scope
3 | version: 1.1.12
4 | appVersion: 1.12.0
5 | description: DEPRECATED - A Helm chart for the Weave Scope cluster visualizer.
6 | keywords:
7 | - containers
8 | - dashboard
9 | - monitoring
10 | home: https://www.weave.works/oss/scope/
11 | sources:
12 | - https://github.com/weaveworks/scope
13 | deprecated: true
14 | icon: https://avatars1.githubusercontent.com/u/9976052?s=64
15 |
--------------------------------------------------------------------------------
/exercises/4/todo/templates/tests/test-connection.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ include "todo.fullname" . }}-test-connection"
5 | labels:
6 | {{ include "todo.labels" . | indent 4 }}
7 | annotations:
8 | "helm.sh/hook": test-success
9 | spec:
10 | containers:
11 | - name: wget
12 | image: busybox
13 | command: ['wget']
14 | args: ['{{ include "todo.fullname" . }}:{{ .Values.service.port }}']
15 | restartPolicy: Never
16 |
--------------------------------------------------------------------------------
/volumes/pod-with-pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: Pod
2 | apiVersion: v1
3 | metadata:
4 | name: task-pv-pod
5 | spec:
6 | volumes:
7 | - name: task-pv-storage
8 | persistentVolumeClaim:
9 | claimName: task-pv-claim
10 | containers:
11 | - name: task-pv-container
12 | image: nginx
13 | ports:
14 | - containerPort: 80
15 | name: "http-server"
16 | volumeMounts:
17 | - mountPath: "/usr/share/nginx/html"
18 | name: task-pv-storage
19 |
--------------------------------------------------------------------------------
/helm/chartmuseum/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 | # OWNERS file for Kubernetes
23 | OWNERS
24 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/all/azure.yml:
--------------------------------------------------------------------------------
1 | ## When azure is used, you need to also set the following variables.
2 | ## see docs/azure.md for details on how to get these values
3 |
4 | # azure_tenant_id:
5 | # azure_subscription_id:
6 | # azure_aad_client_id:
7 | # azure_aad_client_secret:
8 | # azure_resource_group:
9 | # azure_location:
10 | # azure_subnet_name:
11 | # azure_security_group_name:
12 | # azure_vnet_name:
13 | # azure_vnet_resource_group:
14 | # azure_route_table_name:
15 |
--------------------------------------------------------------------------------
/exercises/4/todo/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "todo.fullname" . }}
5 | labels:
6 | {{ include "todo.labels" . | indent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | app.kubernetes.io/name: {{ include "todo.name" . }}
16 | app.kubernetes.io/instance: {{ .Release.Name }}
17 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-cluster-agent/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | {{- if .Values.serviceAccount.create }}
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | labels:
7 | {{- include "weave-scope.helm_std_labels" . | indent 4 }}
8 | component: agent
9 | name: {{ template "weave-scope-agent.serviceAccountName" . }}
10 | annotations:
11 | {{- include "weave-scope.annotations" . | indent 4 }}
12 | {{- end }}
13 | {{- end -}}
14 |
--------------------------------------------------------------------------------
/volumes/pod-with-config-map.yaml:
--------------------------------------------------------------------------------
1 | kind: Pod
2 | apiVersion: v1
3 | metadata:
4 | name: pod-env-var
5 | spec:
6 | containers:
7 | - name: env-var-configmap
8 | image: nginx
9 | envFrom:
10 | - configMapRef:
11 | name: example-configmap
12 | volumeMounts:
13 | - name: config-volume
14 | mountPath: /app/game.properties
15 | subPath: game.properties
16 | volumes:
17 | - name: config-volume
18 | configMap:
19 | name: game-config
20 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/inventory.ini:
--------------------------------------------------------------------------------
1 | [all]
2 | kub-1 ansible_host=127.0.0.1 ansible_port=2222 ip=10.0.20.101 etcd_member_name=etcd1
3 | kub-2 ansible_host=127.0.0.1 ansible_port=2200 ip=10.0.20.102 etcd_member_name=etcd2
4 | kub-3 ansible_host=127.0.0.1 ansible_port=2201 ip=10.0.20.103 etcd_member_name=etcd3
5 |
6 | [kube-master]
7 | kub-1
8 |
9 | [etcd]
10 | kub-1
11 |
12 | [kube-node]
13 | kub-2
14 | kub-3
15 |
16 | [calico-rr]
17 |
18 | [k8s-cluster:children]
19 | kube-master
20 | kube-node
21 | calico-rr
22 |
--------------------------------------------------------------------------------
/advanced-orchestration/replicaset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: ReplicaSet
3 | metadata:
4 | name: frontend
5 | labels:
6 | app: guestbook
7 | tier: frontend
8 | spec:
9 | # modify replicas according to your case
10 | replicas: 3
11 | selector:
12 | matchLabels:
13 | tier: frontend
14 | template:
15 | metadata:
16 | labels:
17 | tier: frontend
18 | spec:
19 | containers:
20 | - name: php-redis
21 | image: gcr.io/google_samples/gb-frontend:v3
22 |
23 |
24 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-agent/Chart.yaml:
--------------------------------------------------------------------------------
1 | description: A Helm chart for the Weave Scope cluster visualizer node agent.
2 | name: weave-scope-agent
3 | version: 1.1.10
4 | appVersion: 1.12.0
5 | keywords:
6 | - containers
7 | - dashboard
8 | - monitoring
9 | home: https://www.weave.works/oss/scope/
10 | sources:
11 | - https://github.com/weaveworks/scope
12 | maintainers:
13 | - name: omkensey
14 | email: github@orion-com.com
15 | icon: https://avatars1.githubusercontent.com/u/9976052?s=64
16 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-frontend/Chart.yaml:
--------------------------------------------------------------------------------
1 | description: A Helm chart for the Weave Scope cluster visualizer frontend.
2 | name: weave-scope-frontend
3 | version: 1.1.10
4 | appVersion: 1.12.0
5 | keywords:
6 | - containers
7 | - dashboard
8 | - monitoring
9 | home: https://www.weave.works/oss/scope/
10 | sources:
11 | - https://github.com/weaveworks/scope
12 | maintainers:
13 | - name: omkensey
14 | email: github@orion-com.com
15 | icon: https://avatars1.githubusercontent.com/u/9976052?s=64
16 |
--------------------------------------------------------------------------------
/namespaces/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl create namespace development
2 | kubectl create namespace production
3 | kubectl config current-context
4 | kubectl config set-context dev --namespace=development --cluster=minikube --user=minikube
5 | kubectl config set-context prod --namespace=production --cluster=minikube --user=minikube
6 | kubectl config view
7 | kubectl config use-context dev
8 | kubectl run nginx --image=nginx:1.15.12-alpine --generator=run-pod/v1
9 | kubectl get all
10 | kubectl config use-context prod
11 | kubectl get all
12 |
13 |
--------------------------------------------------------------------------------
/guestbook/frontend-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: frontend
5 | labels:
6 | app: guestbook
7 | tier: frontend
8 | spec:
9 | # comment or delete the following line if you want to use a LoadBalancer
10 | type: NodePort
11 | # if your cluster supports it, uncomment the following to automatically create
12 | # an external load-balanced IP for the frontend service.
13 | # type: LoadBalancer
14 | ports:
15 | - port: 80
16 | selector:
17 | app: guestbook
18 | tier: frontend
19 |
--------------------------------------------------------------------------------
/internal-networking/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl apply -f two-containers-pod.yaml
2 | kubectl exec -it two-containers -c debian-container bash
3 | apt update && apt install -y net-tools procps curl dnsutils # install networking tools
4 | ps -ef # list all processes
5 | curl localhost
6 |
7 |
8 | kubectl apply -f pod-with-service.yaml
9 | kubectl exec -it debian bash
10 | apt update && apt install -y net-tools procps curl dnsutils
11 | dig fe-gate
12 | curl fe-gate
13 |
14 | kubectl run tmp-shell --rm -i --tty --image nicolaka/netshoot -- /bin/bash
15 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-cluster-agent/Chart.yaml:
--------------------------------------------------------------------------------
1 | description: A Helm chart for the Weave Scope cluster visualizer node agent.
2 | name: weave-scope-cluster-agent
3 | version: 1.1.10
4 | appVersion: 1.12.0
5 | keywords:
6 | - containers
7 | - dashboard
8 | - monitoring
9 | home: https://www.weave.works/oss/scope/
10 | sources:
11 | - https://github.com/weaveworks/scope
12 | maintainers:
13 | - name: omkensey
14 | email: github@orion-com.com
15 | icon: https://avatars1.githubusercontent.com/u/9976052?s=64
16 |
--------------------------------------------------------------------------------
/advanced-orchestration/cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: hello
5 | spec:
6 | schedule: "*/1 * * * *"
7 | successfulJobsHistoryLimit: 10
8 | failedJobsHistoryLimit: 1
9 | jobTemplate:
10 | spec:
11 | template:
12 | spec:
13 | containers:
14 | - name: hello
15 | image: nginx
16 | args:
17 | - /bin/sh
18 | - -c
19 | - date; echo aviel Hello from the Kubernetes cluster
20 | restartPolicy: OnFailure
21 |
--------------------------------------------------------------------------------
/hpa/heapster-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: system:heapster
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - events
10 | - namespaces
11 | - nodes
12 | - pods
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | - apiGroups:
18 | - apps
19 | resources:
20 | - deployments
21 | - statefulsets
22 | verbs:
23 | - get
24 | - list
25 | - watch
26 | - apiGroups:
27 | - ""
28 | resources:
29 | - nodes/stats
30 | verbs:
31 | - get
32 |
--------------------------------------------------------------------------------
/kube-start/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl run nginx --image=nginx
2 | kubectl set image deployment/nginx nginx=nginx:1.9.1
3 |
4 | kubectl create deployment hello-node --image=nginx:1.15.12-alpine
5 | kubectl get deployments
6 | kubectl describe deployment hello-node
7 | kubectl get pods
8 | kubectl set image deployment/hello-node nginx=nginx:1.16
9 | kubectl set image deployment/hello-node
10 | kubectl get all
11 | kubectl expose deployment hello-node --type=ClusterIP --port=8080
12 | kubectl scale deployment/hello-node --replicas=3
13 | kubectl get services
14 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | ---
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | {{- if .Values.serviceAccount.name }}
7 | name: {{ .Values.serviceAccount.name }}
8 | {{- else }}
9 | name: {{ include "chartmuseum.fullname" . }}
10 | {{- end }}
11 | labels:
12 | {{ include "chartmuseum.labels.standard" . | indent 4 }}
13 | {{- if .Values.serviceAccount.annotations }}
14 | annotations:
15 | {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
16 | {{- end }}
17 | {{- end -}}
18 |
--------------------------------------------------------------------------------
/hpa/commands.txt:
--------------------------------------------------------------------------------
1 | # start minikube and deploy application
2 | minikube start
3 | minikube addons enable metrics-server
4 | # fix minikube roles issue
5 | kubectl delete clusterrole system:heapster
6 | kubectl apply -f heapster-clusterrole.yaml
7 | kubectl apply -f php-apache.yaml
8 | # enable HPA
9 | kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
10 | kubectl get -o yaml hpa php-apache
11 | kubectl describe hpa php-apache
12 | # run load
13 | kubectl run -it --rm load-generator --image=busybox /bin/sh
14 | while true; do wget -q -O- http://php-apache; done
15 |
--------------------------------------------------------------------------------
/exercises/2/pod-with-config-map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: fe-gate
5 | spec:
6 | selector:
7 | name: nginx
8 | type: NodePort
9 | ports:
10 | - name: foo # Actually, no port is needed.
11 | port: 80
12 | targetPort: 80
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: nginx
18 | labels:
19 | name: nginx
20 | spec:
21 | containers:
22 | - name: nginx
23 | image: nginx
24 | volumeMounts:
25 | - name: config-volume
26 | mountPath: /usr/share/nginx/html
27 | volumes:
28 | - name: config-volume
29 | configMap:
30 | name: indexhtml
31 |
--------------------------------------------------------------------------------
/internal-networking/deny-traffic-from-other-namespaces/commands.txt:
--------------------------------------------------------------------------------
1 | minikube delete
2 | minikube start --network-plugin=cni --memory=4096
3 | kubectl create -f https://raw.githubusercontent.com/cilium/cilium/1.6.5/install/kubernetes/quick-install.yaml
4 | kubectl -n kube-system get pods --watch
5 | kubectl create ns secondary
6 | kubectl run web --namespace secondary --image=nginx --labels=app=web --expose --port 80
7 | kubectl run test1 --namespace=default --rm -i -t --image=alpine -- sh
8 | # wget -qO- --timeout=2 http://web.secondary
9 | kubectl run test2 --namespace=secondary --rm -i -t --image=alpine -- sh
10 | # wget -qO- --timeout=2 http://web.secondary
11 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-with-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: frontend
5 | spec:
6 | containers:
7 | - name: db
8 | image: mysql
9 | env:
10 | - name: MYSQL_ROOT_PASSWORD
11 | value: "password"
12 | resources:
13 | requests:
14 | memory: "64Mi"
15 | cpu: "250m"
16 | limits:
17 | memory: "128Mi"
18 | cpu: "500m"
19 | - name: wp
20 | image: wordpress
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "250m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "500m"
--------------------------------------------------------------------------------
/exercises/2/cheese-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: cheese-ingress
5 | spec:
6 | tls:
7 | - hosts:
8 | - ams.cheese.com
9 | secretName: cheese-secret
10 | rules:
11 | - host: ams.cheese.com
12 | http:
13 | paths:
14 | - path: /gauda
15 | pathType: Prefix
16 | backend:
17 | service:
18 | name: gauda-svc
19 | port:
20 | number: 80
21 | - path: /cheddar
22 | pathType: Prefix
23 | backend:
24 | service:
25 | name: cheddar-svc
26 | port:
27 | number: 80
28 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/secret.yaml:
--------------------------------------------------------------------------------
1 | {{- if not .Values.env.existingSecret -}}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ include "chartmuseum.fullname" . }}
6 | labels:
7 | {{- if .Values.secret.labels }}
8 | {{ toYaml .Values.secret.labels | indent 4 }}
9 | {{- end }}
10 | {{ include "chartmuseum.labels.standard" . | indent 4 }}
11 | type: Opaque
12 | data:
13 | {{- range $name, $value := .Values.env.secret }}
14 | {{- if not (empty $value) }}
15 | {{- if eq $name "GOOGLE_CREDENTIALS_JSON" }}
16 | {{ $name }}: {{ $value }}
17 | {{- else }}
18 | {{ $name }}: {{ $value | b64enc }}
19 | {{- end }}
20 | {{- end }}
21 | {{- end }}
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/advanced-orchestration/daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: cleanup
5 | labels:
6 | tier: system
7 | app: cleanup
8 | version: v3
9 | spec:
10 | selector:
11 | matchLabels:
12 | name: cleanup
13 | template:
14 | metadata:
15 | labels:
16 | name: cleanup
17 | spec:
18 | containers:
19 | - image: paralin/kube-cleanup:v3
20 | name: cleanup
21 | volumeMounts:
22 | - name: docker
23 | mountPath: /var/run/docker.sock
24 | volumes:
25 | - name: docker
26 | hostPath:
27 | path: /var/run/docker.sock
28 |
29 |
--------------------------------------------------------------------------------
/internal-networking/two-containers-shared-volume-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: two-containers-shared-volume
5 | spec:
6 |
7 | restartPolicy: Never
8 |
9 | volumes:
10 | - name: shared-data
11 | emptyDir: {}
12 |
13 | containers:
14 |
15 | - name: nginx-container
16 | image: nginx
17 | volumeMounts:
18 | - name: shared-data
19 | mountPath: /usr/share/nginx/html
20 |
21 | - name: debian-container
22 | image: debian
23 | volumeMounts:
24 | - name: shared-data
25 | mountPath: /pod-data
26 | command: ["/bin/sh"]
27 | args: ["-c", "echo Hello from the debian container > /pod-data/index.html"]
28 |
--------------------------------------------------------------------------------
/ingress-contoller/cafe-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: cafe-ingress
5 | spec:
6 | ingressClassName: nginx
7 | tls:
8 | - hosts:
9 | - cafe.example.com
10 | secretName: cafe-secret
11 | rules:
12 | - host: cafe.example.com
13 | http:
14 | paths:
15 | - path: /tea
16 | pathType: Prefix
17 | backend:
18 | service:
19 | name: tea-svc
20 | port:
21 | number: 80
22 | - path: /coffee
23 | pathType: Prefix
24 | backend:
25 | service:
26 | name: coffee-svc
27 | port:
28 | number: 80
29 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-cluster-agent/templates/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | {{- if .Values.rbac.create }}
3 | apiVersion: rbac.authorization.k8s.io/v1beta1
4 | kind: ClusterRole
5 | metadata:
6 | labels:
7 | {{- include "weave-scope.helm_std_labels" . | indent 4 }}
8 | component: agent
9 | name: {{ template "weave-scope-agent.serviceAccountName" . }}
10 | annotations:
11 | {{- include "weave-scope.annotations" . | indent 4 }}
12 | rules:
13 | - apiGroups:
14 | - '*'
15 | resources:
16 | - '*'
17 | verbs:
18 | - '*'
19 | - nonResourceURLs:
20 | - '*'
21 | verbs:
22 | - '*'
23 | {{- end }}
24 | {{- end -}}
25 |
--------------------------------------------------------------------------------
/security/infrastructure/commands.txt:
--------------------------------------------------------------------------------
1 | create a new folder for our certificates
2 | mkdir cert && cd cert
3 | generate a new certificate and sign it with minikube certificate
4 | openssl genrsa -out user1.key 2048
5 | openssl req -new -key user1.key -out user1.csr -subj "/CN=user1/O=group1"
6 | openssl x509 -req -in user1.csr -CA ~/.minikube/ca.crt -CAkey ~/.minikube/ca.key -CAcreateserial -out user1.crt -days 500
7 | create a credential definition called “user1” and set it’s certificate and client key
8 | kubectl config set-credentials user1 --client-certificate=user1.crt --client-key=user1.key
9 | kubectl config set-context user1-context --cluster=minikube --user=user1
10 | kubectl config use-context user1-context
11 |
12 |
--------------------------------------------------------------------------------
/exercises/3/pod-with-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-secret
5 | labels:
6 | app: nginx-secret
7 | spec:
8 | containers:
9 | - name: nginx-secret
10 | image: nginx
11 | ports:
12 | - containerPort: 80
13 | volumeMounts:
14 | - name: secret
15 | mountPath: "/usr/share/nginx/html"
16 | readOnly: true
17 | volumes:
18 | - name: secret
19 | secret:
20 | secretName: secret-example
21 | ---
22 | apiVersion: v1
23 | kind: Secret
24 | metadata:
25 | name: secret-example
26 | type: Opaque
27 | data:
28 | username: cm9vdF91c2VyCg==
29 | password: RXhwZXJ0cyExMjMK
30 | db_url: bXlzcWwuc2VydmljZXMuY29tCg==
31 |
32 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/pv.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.persistence.pv.enabled -}}
2 | apiVersion: v1
3 | kind: PersistentVolume
4 | metadata:
5 | {{- if .Values.persistence.pv.pvname }}
6 | name: {{ .Values.persistence.pv.pvname }}
7 | {{- else }}
8 | name: {{ include "chartmuseum.fullname" . }}
9 | {{- end }}
10 | labels:
11 | app: {{ include "chartmuseum.fullname" . }}
12 | release: {{ .Release.Name | quote }}
13 | spec:
14 | capacity:
15 | storage: {{ .Values.persistence.pv.capacity.storage }}
16 | accessModes:
17 | - {{ .Values.persistence.pv.accessMode | quote }}
18 | nfs:
19 | server: {{ .Values.persistence.pv.nfs.server }}
20 | path: {{ .Values.persistence.pv.nfs.path | quote }}
21 | {{- end }}
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/templates/test-config.yaml:
--------------------------------------------------------------------------------
1 | {{- $frontend := index .Values "weave-scope-frontend" -}}
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: {{ template "weave-scope.fullname" . }}-tests
6 | labels:
7 | {{- include "weave-scope.helm_std_labels" . | indent 4 }}
8 | data:
9 | run.sh: |-
10 | {{ if $frontend.enabled }}
11 | @test "Testing Weave Scope UI is accessible" {
12 | curl --retry 12 --retry-delay 10 http://{{ .Values.global.service.name | default (include "toplevel.fullname" .) }}.{{ .Release.Namespace }}.svc:{{ .Values.global.service.port }}
13 | }
14 | {{- else }}
15 | @test "Null test if the frontend is not installed" {
16 | true
17 | }
18 | {{- end }}
19 |
--------------------------------------------------------------------------------
/guestbook/redis-master-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
2 | kind: Deployment
3 | metadata:
4 | name: redis-master
5 | labels:
6 | app: redis
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: redis
11 | role: master
12 | tier: backend
13 | replicas: 1
14 | template:
15 | metadata:
16 | labels:
17 | app: redis
18 | role: master
19 | tier: backend
20 | spec:
21 | containers:
22 | - name: master
23 | image: k8s.gcr.io/redis:e2e # or just image: redis
24 | resources:
25 | requests:
26 | cpu: 100m
27 | memory: 100Mi
28 | ports:
29 | - containerPort: 6379
30 |
--------------------------------------------------------------------------------
/logging-and-monitoring/commands.txt:
--------------------------------------------------------------------------------
1 | EFK Stack
2 | ---
3 | kubectl apply -f fluentd/
4 | kubectl create ns logging
5 | kubectl -n logging apply -f logging
6 |
7 | # After few minutes
8 | kubectl port-forward svc/kibana -n logging 5601:5601
9 | # Sign in and configure index pattern for logstash
10 | http://127.0.0.1:5601/app/kibana#/management/elasticsearch/index_management/indices?_g=()
11 |
12 | # Deploy test workload
13 | kubectl create deployment hello-moshe --image=redis:alpine
14 | kubectl create deployment hello-node --image=redis:alpine
15 |
16 | Weave-Scope
17 | ---
18 | https://www.weave.works/docs/scope/latest/installing/#k8s
19 |
20 | Jenkins Installation
21 | ---
22 | helm install jenkins stable/jenkins --set master.serviceType=NodePort
23 |
--------------------------------------------------------------------------------
/hpa/php-apache.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: php-apache
5 | spec:
6 | selector:
7 | matchLabels:
8 | run: php-apache
9 | template:
10 | metadata:
11 | labels:
12 | run: php-apache
13 | spec:
14 | containers:
15 | - name: php-apache
16 | image: registry.k8s.io/hpa-example
17 | ports:
18 | - containerPort: 80
19 | resources:
20 | limits:
21 | cpu: 500m
22 | requests:
23 | cpu: 200m
24 | ---
25 | apiVersion: v1
26 | kind: Service
27 | metadata:
28 | name: php-apache
29 | labels:
30 | run: php-apache
31 | spec:
32 | ports:
33 | - port: 80
34 | selector:
35 | run: php-apache
36 |
37 |
--------------------------------------------------------------------------------
/internal-networking/pod-with-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: fe-gate
5 | spec:
6 | selector:
7 | name: nginx
8 | type: NodePort
9 | ports:
10 | - name: foo # Actually, no port is needed.
11 | port: 80
12 | targetPort: 80
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: nginx
18 | labels:
19 | name: nginx
20 | spec:
21 | #hostname: frontend001
22 | containers:
23 | - image: nginx
24 | name: nginx-container
25 | ---
26 | apiVersion: v1
27 | kind: Pod
28 | metadata:
29 | name: debian
30 | labels:
31 | name: debian
32 | spec:
33 | containers:
34 | - image: debian
35 | name: debian-container
36 | command:
37 | - sleep
38 | - "3600"
39 |
--------------------------------------------------------------------------------
/logging-and-monitoring/fluentd/fluentd-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: fluentd
5 | namespace: kube-system
6 |
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRole
10 | metadata:
11 | name: fluentd
12 | namespace: kube-system
13 | rules:
14 | - apiGroups:
15 | - ""
16 | resources:
17 | - pods
18 | - namespaces
19 | verbs:
20 | - get
21 | - list
22 | - watch
23 |
24 | ---
25 |
26 | kind: ClusterRoleBinding
27 | apiVersion: rbac.authorization.k8s.io/v1
28 | metadata:
29 | name: fluentd
30 | roleRef:
31 | kind: ClusterRole
32 | name: fluentd
33 | apiGroup: rbac.authorization.k8s.io
34 | subjects:
35 | - kind: ServiceAccount
36 | name: fluentd
37 | namespace: kube-system
38 |
--------------------------------------------------------------------------------
/advanced-orchestration/statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | ports:
9 | - port: 80
10 | name: web
11 | clusterIP: None
12 | selector:
13 | app: nginx
14 | ---
15 | apiVersion: apps/v1
16 | kind: StatefulSet
17 | metadata:
18 | name: web
19 | spec:
20 | selector:
21 | matchLabels:
22 | app: nginx # has to match .spec.template.metadata.labels
23 | serviceName: "nginx"
24 | replicas: 3 # by default is 1
25 | template:
26 | metadata:
27 | labels:
28 | app: nginx # has to match .spec.selector.matchLabels
29 | spec:
30 | terminationGracePeriodSeconds: 10
31 | containers:
32 | - name: nginx
33 | image: nginx
34 |
--------------------------------------------------------------------------------
/advanced-orchestration/sts2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: webb
5 | spec:
6 | serviceName: "nginx"
7 | replicas: 2
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: registry.k8s.io/nginx-slim:0.8
19 | ports:
20 | - containerPort: 80
21 | name: web
22 | volumeMounts:
23 | - name: www
24 | mountPath: /usr/share/nginx/html
25 | volumeClaimTemplates:
26 | - metadata:
27 | name: www
28 | spec:
29 | accessModes: [ "ReadWriteOnce" ]
30 | resources:
31 | requests:
32 | storage: 1Gi
33 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-frontend/templates/service.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | labels:
6 | {{- include "weave-scope-frontend.helm_std_labels" . | indent 4 }}
7 | component: frontend
8 | name: {{ .Values.global.service.name | default (include "toplevel.fullname" .) }}
9 | annotations:
10 | {{- include "weave-scope-frontend.annotations" . | indent 4 }}
11 | spec:
12 | ports:
13 | - name: http
14 | port: {{ .Values.global.service.port }}
15 | targetPort: http
16 | protocol: TCP
17 | selector:
18 | app: {{ template "toplevel.name" . }}
19 | release: {{ .Release.Name }}
20 | component: frontend
21 | type: {{ .Values.global.service.type }}
22 | {{- end -}}
23 |
--------------------------------------------------------------------------------
/internal-networking/example-network-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: test-network-policy
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | role: db
10 | policyTypes:
11 | - Ingress
12 | - Egress
13 | ingress:
14 | - from:
15 | - ipBlock:
16 | cidr: 172.17.0.0/16
17 | except:
18 | - 172.17.1.0/24
19 | - namespaceSelector:
20 | matchLabels:
21 | project: myproject
22 | - podSelector:
23 | matchLabels:
24 | role: frontend
25 | ports:
26 | - protocol: TCP
27 | port: 6379
28 | egress:
29 | - to:
30 | - ipBlock:
31 | cidr: 10.0.0.0/24
32 | ports:
33 | - protocol: TCP
34 | port: 5978
35 |
36 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-cluster-agent/templates/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | {{- if .Values.rbac.create }}
3 | apiVersion: rbac.authorization.k8s.io/v1beta1
4 | kind: ClusterRoleBinding
5 | metadata:
6 | labels:
7 | {{- include "weave-scope.helm_std_labels" . | indent 4 }}
8 | component: agent
9 | name: {{ include "toplevel.fullname" . }}
10 | annotations:
11 | {{- include "weave-scope.annotations" . | indent 4 }}
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: {{ template "weave-scope-agent.serviceAccountName" . }}
16 | subjects:
17 | - kind: ServiceAccount
18 | name: {{ template "weave-scope-agent.serviceAccountName" . }}
19 | namespace: {{ .Release.Namespace }}
20 | {{- end }}
21 | {{- end -}}
22 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-flannel.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/flannel/defaults/main.yml
2 |
3 | ## interface that should be used for flannel operations
4 | ## This is actually an inventory cluster-level item
5 | # flannel_interface:
6 |
7 | ## Select interface that should be used for flannel operations by regexp on Name or IP
8 | ## This is actually an inventory cluster-level item
9 | ## example: select interface with ip from net 10.0.0.0/23
10 | ## single quote and escape backslashes
11 | # flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}'
12 |
13 | # You can choose what type of flannel backend to use: 'vxlan' or 'host-gw'
14 | # for experimental backend
15 | # please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md
16 | # flannel_backend_type: "vxlan"
17 |
--------------------------------------------------------------------------------
/on-premise/kubespray/Vagrantfile:
--------------------------------------------------------------------------------
1 | $instance_name_prefix = "kub"
2 | $vm_cpus = 3
3 | $num_instances = 3
4 | $subnet = "10.0.20"
5 | $vm_memory = 2048
6 | $vm_gui = false
7 |
8 | Vagrant.configure("2") do |config|
9 | config.vm.box = "bento/centos-7.6"
10 | (1..$num_instances).each do |i|
11 | config.vm.define vm_name = "%s-%01d" % [$instance_name_prefix, i] do |node|
12 | node.vm.hostname = vm_name
13 | node.vm.provider :virtualbox do |vb|
14 | vb.memory = $vm_memory
15 | vb.cpus = $vm_cpus
16 | vb.gui = $vm_gui
17 | vb.linked_clone = true
18 | vb.customize ["modifyvm", :id, "--vram", "8"]
19 | end
20 | ip = "#{$subnet}.#{i+100}"
21 | node.vm.network :private_network, ip: ip
22 | end
23 | end
24 | end
25 |
--------------------------------------------------------------------------------
/security/application/pod-with-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx-secret
5 | labels:
6 | app: nginx-secret
7 | spec:
8 | containers:
9 | - name: nginx-secret
10 | image: nginx
11 | env:
12 | - name: AVIEL
13 | value: buskila
14 | - name: MYSQL_USER
15 | valueFrom:
16 | secretKeyRef:
17 | name: secret-example
18 | key: username
19 | - name: MYSQL_PASS
20 | valueFrom:
21 | secretKeyRef:
22 | name: secret-example
23 | key: password
24 | ports:
25 | - containerPort: 8080
26 | volumeMounts:
27 | - name: secret
28 | mountPath: "/etc/secret"
29 | readOnly: true
30 | volumes:
31 | - name: secret
32 | secret:
33 | secretName: secret-example
34 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/commands.txt:
--------------------------------------------------------------------------------
1 | 1. kubectl label nodes minikube my.key=my.value
2 | kubectl label nodes minikube another-node-label-key=another-node-label-value
3 | 2. kubectl get nodes -l my.key=my.value
4 | 3. kubectl apply -f pod-node-selector.yaml
5 | 4. kubectl apply -f pod-with-node-affinity.yaml
6 | 5. minikube start --nodes 2
7 | 6. kubectl apply -f pod-with-pod-affinity.yaml
8 | 7. kubectl get pods -o wide
9 | 8. kubectl taint nodes node1 env=test:NoSchedule
10 | 9. kubectl apply -f pod-with-resources.yaml
11 | 10. kubectl get pod frontend -o yaml
12 | 11. kubectl create ns default-mem-example
13 | 12. kubectl apply -f memory-defaults.yaml -n default-mem-example
14 | 13. kubectl apply -f pod-without-resources.yaml -n default-mem-example
15 | 14. kubectl get pod default-mem-demo -o yaml
16 | 15. kubectl apply -f pod-limits-only.yaml -n default-mem-example
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-with-node-affinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: with-node-affinity
5 | spec:
6 | affinity:
7 | nodeAffinity:
8 | requiredDuringSchedulingIgnoredDuringExecution:
9 | nodeSelectorTerms:
10 | - matchExpressions:
11 | - key: my.key
12 | operator: In
13 | values:
14 | - my.value
15 | - my.value1
16 | - my.value2
17 | preferredDuringSchedulingIgnoredDuringExecution:
18 | - weight: 1
19 | preference:
20 | matchExpressions:
21 | - key: another-node-label-key
22 | operator: In
23 | values:
24 | - another-node-label-value
25 | containers:
26 | - name: with-node-affinity
27 | image: k8s.gcr.io/pause:2.0
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-contiv.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/contiv/defaults/main.yml
2 |
3 | # Forwarding mode: bridge or routing
4 | # contiv_fwd_mode: routing
5 |
6 | ## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
7 | ## In this case, you may need to peer with an uplink
8 | ## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
9 | # contiv_peer_with_uplink_leaf: false
10 | # contiv_global_as: "65002"
11 | # contiv_global_neighbor_as: "500"
12 |
13 | # Fabric mode: aci, aci-opflex or default
14 | # contiv_fabric_mode: default
15 |
16 | # Default netmode: vxlan or vlan
17 | # contiv_net_mode: vxlan
18 |
19 | # Dataplane interface
20 | # contiv_vlan_interface: ""
21 |
--------------------------------------------------------------------------------
/on-premise/kubespray/commands.txt:
--------------------------------------------------------------------------------
1 | # setting up machines
2 | vagrant up # will spin up 3 centos 7 machines
3 | ssh-keygen # create ssh keys in case you don't have, hit enter, enter and enter
4 | ssh-copy-id vagrant@localhost -p 2222 # password is: vagrant
5 | ssh-copy-id vagrant@localhost -p 2200 # password is: vagrant
6 | ssh-copy-id vagrant@localhost -p 2201 # password is: vagrant
7 |
8 | # setting up kubespray
9 | git clone https://github.com/kubernetes-sigs/kubespray.git
10 | cd kubespray
11 |
12 | # create python virtual environment of kubespray dependencies
13 | VENVDIR=venv
14 | virtualenv $VENVDIR
15 | source $VENVDIR/bin/activate
16 | pip install -r requirements.txt
17 | cp -r inventory/sample inventory/test_env
18 | cp ../test_env/inventory.ini inventory/test_env/inventory.ini
19 | cd kubespray
20 | venv/bin/ansible-playbook -b -u vagrant -i inventory/test_env/inventory.ini cluster.yml
--------------------------------------------------------------------------------
/logging-and-monitoring/logging/kibana.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: kibana
5 | spec:
6 | selector:
7 | matchLabels:
8 | run: kibana
9 | template:
10 | metadata:
11 | labels:
12 | run: kibana
13 | spec:
14 | containers:
15 | - name: kibana
16 | image: kibana:7.3.2
17 | env:
18 | - name: ELASTICSEARCH_URL
19 | value: http://10.96.201.241:9200
20 | - name: XPACK_SECURITY_ENABLED
21 | value: "true"
22 | ports:
23 | - containerPort: 5601
24 | name: http
25 | protocol: TCP
26 |
27 | ---
28 |
29 | apiVersion: v1
30 | kind: Service
31 | metadata:
32 | name: kibana
33 | labels:
34 | service: kibana
35 | spec:
36 | type: NodePort
37 | selector:
38 | run: kibana
39 | ports:
40 | - port: 5601
41 | targetPort: 5601
42 |
43 |
--------------------------------------------------------------------------------
/kube-start/pod-with-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: fe-gate
5 | spec:
6 | selector:
7 | name: nginx
8 | type: NodePort
9 | ports:
10 | - name: foo # Actually, no port is needed.
11 | port: 80
12 | targetPort: 80
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: nginx
18 | labels:
19 | name: nginx
20 | spec:
21 | #hostname: frontend001
22 | containers:
23 | - image: exoplatform/mysqltuner
24 | name: nginx-container
25 | command:
26 | - perl
27 | - /mysqltuner.pl
28 | - --host
29 | - edge-mariadb
30 | - --user
31 | - root
32 | - --pass
33 | - 22jIBq6szjwC7M6iMogx
34 | - --forcemem
35 | - "1000"
36 | ---
37 | apiVersion: v1
38 | kind: Pod
39 | metadata:
40 | name: debian
41 | labels:
42 | name: debian
43 | spec:
44 | containers:
45 | - image: debian
46 | name: debian-container
47 | command:
48 | - sleep
49 | - "3600"
50 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/etcd.yml:
--------------------------------------------------------------------------------
1 | ## Etcd auto compaction retention for mvcc key value store in hour
2 | # etcd_compaction_retention: 0
3 |
4 | ## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
5 | # etcd_metrics: basic
6 |
7 | ## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
8 | ## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
9 | # etcd_memory_limit: "512M"
10 |
11 | ## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
12 | ## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
13 | ## etcd documentation for more information.
14 | # etcd_quota_backend_bytes: "2G"
15 |
16 | ### ETCD: disable peer client cert authentication.
17 | # This affects ETCD_PEER_CLIENT_CERT_AUTH variable
18 | # etcd_peer_client_auth: true
19 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/pvc.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) -}}
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | name: {{ include "chartmuseum.fullname" . }}
6 | labels:
7 | app: {{ include "chartmuseum.fullname" . }}
8 | release: {{ .Release.Name | quote }}
9 | {{- if .Values.persistence.labels }}
10 | {{ toYaml .Values.persistence.labels | indent 4 }}
11 | {{- end }}
12 | spec:
13 | accessModes:
14 | - {{ .Values.persistence.accessMode | quote }}
15 | resources:
16 | requests:
17 | storage: {{ .Values.persistence.size | quote }}
18 | {{- if .Values.persistence.storageClass }}
19 | {{- if (eq "-" .Values.persistence.storageClass) }}
20 | storageClassName: ""
21 | {{- else }}
22 | storageClassName: "{{ .Values.persistence.storageClass }}"
23 | {{- end }}
24 | {{- else if and .Values.persistence.volumeName (.Values.persistence.pv.enabled) }}
25 | volumeName: "{{ .Values.persistence.volumeName }}"
26 | {{- end }}
27 | {{- end }}
28 |
--------------------------------------------------------------------------------
/exercises/1/ns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: development
6 | ---
7 | apiVersion: v1
8 | kind: Namespace
9 | metadata:
10 | name: production
11 | ---
12 | apiVersion: apps/v1
13 | kind: Deployment
14 | metadata:
15 | name: nginx
16 | namespace: development
17 | spec:
18 | selector:
19 | matchLabels:
20 | app: nginx
21 | replicas: 1
22 | template:
23 | metadata:
24 | labels:
25 | app: nginx
26 | spec:
27 | containers:
28 | - name: nginx
29 | image: nginx:1.7.9
30 | ports:
31 | - containerPort: 80
32 | ---
33 | apiVersion: apps/v1
34 | kind: Deployment
35 | metadata:
36 | name: nginx
37 | namespace: production
38 | spec:
39 | selector:
40 | matchLabels:
41 | app: nginx
42 | replicas: 3
43 | template:
44 | metadata:
45 | labels:
46 | app: nginx
47 | spec:
48 | containers:
49 | - name: nginx
50 | image: nginx:1.7.9
51 | ports:
52 | - containerPort: 80
53 |
54 |
--------------------------------------------------------------------------------
/logging-and-monitoring/logging/elastic-stack.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: elasticsearch
5 | spec:
6 | selector:
7 | matchLabels:
8 | component: elasticsearch
9 | template:
10 | metadata:
11 | labels:
12 | component: elasticsearch
13 | spec:
14 | containers:
15 | - name: elasticsearch
16 | image: elasticsearch:7.3.2
17 | env:
18 | - name: discovery.type
19 | value: single-node
20 | ports:
21 | - containerPort: 9200
22 | name: http
23 | protocol: TCP
24 | resources:
25 | limits:
26 | cpu: 500m
27 | memory: 3Gi
28 | requests:
29 | cpu: 500m
30 | memory: 3Gi
31 |
32 | ---
33 |
34 | apiVersion: v1
35 | kind: Service
36 | metadata:
37 | name: elasticsearch
38 | labels:
39 | service: elasticsearch
40 | spec:
41 | type: NodePort
42 | selector:
43 | component: elasticsearch
44 | ports:
45 | - port: 9200
46 | targetPort: 9200
47 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/templates/weave-scope-tests.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ .Release.Name }}-ui-test-{{ randAlphaNum 5 | lower }}"
5 | annotations:
6 | "helm.sh/hook": test-success
7 | labels:
8 | {{- include "weave-scope.helm_std_labels" . | indent 4 }}
9 | spec:
10 | initContainers:
11 | - name: "test-framework"
12 | image: "dduportal/bats:0.4.0"
13 | command:
14 | - "bash"
15 | - "-c"
16 | - |
17 | set -ex
18 | # copy bats to tools dir
19 | cp -R /usr/local/libexec/ /tools/bats/
20 | volumeMounts:
21 | - mountPath: /tools
22 | name: tools
23 | containers:
24 | - name: {{ .Release.Name }}-ui-test
25 | image: dduportal/bats:0.4.0
26 | command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
27 | volumeMounts:
28 | - mountPath: /tests
29 | name: tests
30 | readOnly: true
31 | - mountPath: /tools
32 | name: tools
33 | volumes:
34 | - name: tests
35 | configMap:
36 | name: {{ template "weave-scope.fullname" . }}-tests
37 | - name: tools
38 | emptyDir: {}
39 | restartPolicy: Never
40 |
--------------------------------------------------------------------------------
/exercises/4/todo/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "todo.fullname" . -}}
3 | {{- $svcPort := .Values.service.port -}}
4 | {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
5 | apiVersion: networking.k8s.io/v1beta1
6 | {{- else -}}
7 | apiVersion: extensions/v1beta1
8 | {{- end }}
9 | kind: Ingress
10 | metadata:
11 | name: {{ $fullName }}
12 | labels:
13 | {{ include "todo.labels" . | indent 4 }}
14 | {{- with .Values.ingress.annotations }}
15 | annotations:
16 | {{- toYaml . | nindent 4 }}
17 | {{- end }}
18 | spec:
19 | {{- if .Values.ingress.tls }}
20 | tls:
21 | {{- range .Values.ingress.tls }}
22 | - hosts:
23 | {{- range .hosts }}
24 | - {{ . | quote }}
25 | {{- end }}
26 | secretName: {{ .secretName }}
27 | {{- end }}
28 | {{- end }}
29 | rules:
30 | {{- range .Values.ingress.hosts }}
31 | - host: {{ .host | quote }}
32 | http:
33 | paths:
34 | {{- range .paths }}
35 | - path: {{ . }}
36 | backend:
37 | serviceName: {{ $fullName }}
38 | servicePort: {{ $svcPort }}
39 | {{- end }}
40 | {{- end }}
41 | {{- end }}
42 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | {{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | {{- if .Values.serviceMonitor.labels }}
6 | labels:
7 | {{ toYaml .Values.serviceMonitor.labels | indent 4 }}
8 | {{- end }}
9 | name: {{ template "chartmuseum.fullname" . }}
10 | namespace: {{ .Release.Namespace }}
11 | {{- if .Values.serviceMonitor.namespace }}
12 | namespace: {{ .Values.serviceMonitor.namespace }}
13 | {{- end }}
14 | spec:
15 | endpoints:
16 | - targetPort: 8080
17 | {{- if .Values.serviceMonitor.interval }}
18 | interval: {{ .Values.serviceMonitor.interval }}
19 | {{- end }}
20 | {{- if .Values.serviceMonitor.metricsPath }}
21 | path: {{ .Values.serviceMonitor.metricsPath }}
22 | {{- end }}
23 | {{- if .Values.serviceMonitor.timeout }}
24 | scrapeTimeout: {{ .Values.serviceMonitor.timeout }}
25 | {{- end }}
26 | jobLabel: {{ template "chartmuseum.fullname" . }}
27 | namespaceSelector:
28 | matchNames:
29 | - {{ .Release.Namespace }}
30 | selector:
31 | matchLabels:
32 | app: {{ template "chartmuseum.name" . }}
33 | release: {{ .Release.Name }}
34 | {{- end }}
35 |
--------------------------------------------------------------------------------
/guestbook/frontend-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
2 | kind: Deployment
3 | metadata:
4 | name: frontend
5 | labels:
6 | app: guestbook
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: guestbook
11 | tier: frontend
12 | replicas: 3
13 | template:
14 | metadata:
15 | labels:
16 | app: guestbook
17 | tier: frontend
18 | spec:
19 | containers:
20 | - name: php-redis
21 | image: gcr.io/google-samples/gb-frontend:v4
22 | resources:
23 | requests:
24 | cpu: 100m
25 | memory: 100Mi
26 | env:
27 | - name: GET_HOSTS_FROM
28 | value: dns
29 | # Using `GET_HOSTS_FROM=dns` requires your cluster to
30 | # provide a dns service. As of Kubernetes 1.3, DNS is a built-in
31 | # service launched automatically. However, if the cluster you are using
32 | # does not have a built-in DNS service, you can instead
33 | # access an environment variable to find the master
34 | # service's host. To do so, comment out the 'value: dns' line above, and
35 | # uncomment the line below:
36 | # value: env
37 | ports:
38 | - containerPort: 80
39 |
40 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/all/openstack.yml:
--------------------------------------------------------------------------------
1 | ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
2 | # openstack_blockstorage_version: "v1/v2/auto (default)"
3 | # openstack_blockstorage_ignore_volume_az: yes
4 | ## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
5 | # openstack_lbaas_enabled: True
6 | # openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
7 | ## To enable automatic floating ip provisioning, specify a subnet.
8 | # openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
9 | ## Override default LBaaS behavior
10 | # openstack_lbaas_use_octavia: False
11 | # openstack_lbaas_method: "ROUND_ROBIN"
12 | # openstack_lbaas_provider: "haproxy"
13 | # openstack_lbaas_create_monitor: "yes"
14 | # openstack_lbaas_monitor_delay: "1m"
15 | # openstack_lbaas_monitor_timeout: "30s"
16 | # openstack_lbaas_monitor_max_retries: "3"
17 |
18 | ## To use Cinder CSI plugin to provision volumes set this value to true
19 | ## Make sure to source in the openstack credentials
20 | # cinder_csi_enabled: true
21 | # cinder_csi_controller_replicas: 1
22 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-frontend/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := .Values.global.service.name | default (include "toplevel.fullname" .) -}}
3 | {{- $ingressPaths := .Values.ingress.paths -}}
4 | apiVersion: extensions/v1beta1
5 | kind: Ingress
6 | metadata:
7 | labels:
8 | {{- include "weave-scope-frontend.helm_std_labels" . | indent 4 }}
9 | component: frontend
10 | name: {{ template "weave-scope-frontend.fullname" . }}
11 | annotations:
12 | {{- include "weave-scope.annotations" . | indent 4 }}
13 | {{- with .Values.ingress.annotations }}
14 | {{- toYaml . | nindent 4 }}
15 | {{- end }}
16 | spec:
17 | {{- if .Values.ingress.tls }}
18 | tls:
19 | {{- range .Values.ingress.tls }}
20 | - hosts:
21 | {{- range .hosts }}
22 | - {{ . | quote }}
23 | {{- end }}
24 | secretName: {{ .secretName }}
25 | {{- end }}
26 | {{- end }}
27 | rules:
28 | {{- range .Values.ingress.hosts }}
29 | - host: {{ . | quote }}
30 | http:
31 | paths:
32 | {{- range $ingressPaths }}
33 | - path: {{ . }}
34 | backend:
35 | serviceName: {{ $fullName }}
36 | servicePort: http
37 | {{- end }}
38 | {{- end }}
39 | {{- end }}
40 |
--------------------------------------------------------------------------------
/ingress-contoller/cafe-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: coffee
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: coffee
10 | template:
11 | metadata:
12 | labels:
13 | app: coffee
14 | spec:
15 | containers:
16 | - name: coffee
17 | image: nginxdemos/hello:plain-text
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: coffee-svc
25 | spec:
26 | ports:
27 | - port: 80
28 | targetPort: 80
29 | protocol: TCP
30 | name: http
31 | selector:
32 | app: coffee
33 | ---
34 | apiVersion: apps/v1
35 | kind: Deployment
36 | metadata:
37 | name: tea
38 | spec:
39 | replicas: 1
40 | selector:
41 | matchLabels:
42 | app: tea
43 | template:
44 | metadata:
45 | labels:
46 | app: tea
47 | spec:
48 | containers:
49 | - name: tea
50 | image: nginxdemos/hello:plain-text
51 | ports:
52 | - containerPort: 80
53 | ---
54 | apiVersion: v1
55 | kind: Service
56 | metadata:
57 | name: tea-svc
58 | labels:
59 | spec:
60 | ports:
61 | - port: 80
62 | targetPort: 80
63 | protocol: TCP
64 | name: http
65 | selector:
66 | app: tea
67 |
--------------------------------------------------------------------------------
/exercises/2/cheese-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: cheddar
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: cheddar
10 | template:
11 | metadata:
12 | labels:
13 | app: cheddar
14 | spec:
15 | containers:
16 | - name: cheddar
17 | image: nginxdemos/hello:plain-text
18 | ports:
19 | - containerPort: 80
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: cheddar-svc
25 | spec:
26 | ports:
27 | - port: 80
28 | targetPort: 80
29 | protocol: TCP
30 | name: http
31 | selector:
32 | app: cheddar
33 | ---
34 | apiVersion: apps/v1
35 | kind: Deployment
36 | metadata:
37 | name: gauda
38 | spec:
39 | replicas: 1
40 | selector:
41 | matchLabels:
42 | app: gauda
43 | template:
44 | metadata:
45 | labels:
46 | app: gauda
47 | spec:
48 | containers:
49 | - name: gauda
50 | image: nginxdemos/hello:plain-text
51 | ports:
52 | - containerPort: 80
53 | ---
54 | apiVersion: v1
55 | kind: Service
56 | metadata:
57 | name: gauda-svc
58 | labels:
59 | spec:
60 | ports:
61 | - port: 80
62 | targetPort: 80
63 | protocol: TCP
64 | name: http
65 | selector:
66 | app: gauda
67 |
--------------------------------------------------------------------------------
/guestbook/redis-slave-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
2 | kind: Deployment
3 | metadata:
4 | name: redis-slave
5 | labels:
6 | app: redis
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: redis
11 | role: slave
12 | tier: backend
13 | replicas: 2
14 | template:
15 | metadata:
16 | labels:
17 | app: redis
18 | role: slave
19 | tier: backend
20 | spec:
21 | containers:
22 | - name: slave
23 | image: gcr.io/google_samples/gb-redisslave:v1
24 | resources:
25 | requests:
26 | cpu: 100m
27 | memory: 100Mi
28 | env:
29 | - name: GET_HOSTS_FROM
30 | value: dns
31 | # Using `GET_HOSTS_FROM=dns` requires your cluster to
32 | # provide a dns service. As of Kubernetes 1.3, DNS is a built-in
33 | # service launched automatically. However, if the cluster you are using
34 | # does not have a built-in DNS service, you can instead
35 | # access an environment variable to find the master
36 | # service's host. To do so, comment out the 'value: dns' line above, and
37 | # uncomment the line below:
38 | # value: env
39 | ports:
40 | - containerPort: 6379
41 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-frontend/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | apiVersion: {{ template "deployment.apiVersion" . }}
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | {{- include "weave-scope-frontend.helm_std_labels" . | indent 4 }}
7 | component: frontend
8 | name: {{ template "weave-scope-frontend.fullname" . }}
9 | annotations:
10 | {{- include "weave-scope.annotations" . | indent 4 }}
11 | spec:
12 | replicas: 1
13 | selector:
14 | matchLabels:
15 | app: {{ template "toplevel.name" . }}
16 | release: {{ .Release.Name }}
17 | component: frontend
18 | template:
19 | metadata:
20 | labels:
21 | {{- include "weave-scope-frontend.helm_std_labels" . | indent 8 }}
22 | component: frontend
23 | spec:
24 | containers:
25 | - name: {{ template "weave-scope-frontend.name" . }}
26 | image: "{{ .Values.global.image.repository }}:{{ .Values.global.image.tag }}"
27 | imagePullPolicy: "{{ .Values.global.image.pullPolicy }}"
28 | args:
29 | - "--no-probe"
30 | {{- range $arg := .Values.flags }}
31 | - {{ $arg | quote }}
32 | {{- end }}
33 | ports:
34 | - name: http
35 | containerPort: 4040
36 | protocol: TCP
37 | resources:
38 | {{ toYaml .Values.resources | indent 12 }}
39 | {{- end -}}
40 |
--------------------------------------------------------------------------------
/on-premise/rancher/commands.txt:
--------------------------------------------------------------------------------
1 | # start rancher server and kubernetes cluster requires 8GB of RAM
2 | vagrant up
3 |
4 | 1. Wait for rancher to start (5 minutes) Browse to: https://10.0.20.101
5 | 2. Login with: admin/admin, set the server address to be https://10.0.20.101 and click ok.
6 | 3. Go to Clusters -> add cluster, Choose: "From existing nodes (Custom)", name it: "sandbox"
7 | Under "Kubernetes Options" on "Network Provider" choose "Calico" and hit Next
8 | 4. Scroll down, under "Customize Node Run Command" select etcd, controlplane together.
9 | Click "Show advanced settings", set the private and public address to: 10.0.20.102
10 | 5. Copy the command generated and do the following from this directory (k8s-experts/on-premise/rancher):
11 | 1. vagrant ssh kub-2
12 | 2. Paste the command : sudo docker run ...
13 | 6. Wait for everything to finish (takes about 5-10 minutes)
14 | 7. Go to the rancher management console, choose "sandbox" cluster. Navigate to Tools -> Monitoring
15 |
16 | # Adding another worker to the cluster
17 | 1. After cluster is configured successfully, click on "Cluster" -> Edit (3 dots icon)
18 | scroll down , under "Customize Node Run Command" select worker only.
19 | click "Show advanced settings", set the private and public address to: 10.0.20.103
20 | 2. Copy the command generated and do the following from this directory (k8s-experts/on-premise/rancher):
21 | 1. vagrant ssh kub-3
22 | 2. Paste the command : sudo docker run ...
--------------------------------------------------------------------------------
/kubeadm-deployment/commands.txt:
--------------------------------------------------------------------------------
1 | Creating the machine:
2 | 1. vagrant init ubuntu/bionic64
3 | 2. vagrant ssh
4 |
5 | When connected to the server:
6 | 1. sudo apt-get update && sudo apt-get install -y apt-transport-https curl
7 | 2. curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
8 | 3. cat <calico-node communication
41 | # typha_secure: false
42 |
43 | # Scaling typha: 1 replica per 100 nodes is adequate
44 | # Number of typha replicas
45 | # typha_replicas: 1
46 |
47 | # Set max typha connections
48 | # typha_max_connections_lower_limit: 300
49 |
--------------------------------------------------------------------------------
/exercises/4/todo/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for todo.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | replicaCount: 3
6 |
7 | image:
8 | repository: prydonius/todo
9 | tag: 1.0.0
10 | pullPolicy: IfNotPresent
11 |
12 | imagePullSecrets: []
13 | nameOverride: ""
14 | fullnameOverride: ""
15 |
16 | serviceAccount:
17 | # Specifies whether a service account should be created
18 | create: true
19 | # The name of the service account to use.
20 | # If not set and create is true, a name is generated using the fullname template
21 | name:
22 |
23 | podSecurityContext: {}
24 | # fsGroup: 2000
25 |
26 | securityContext: {}
27 | # capabilities:
28 | # drop:
29 | # - ALL
30 | # readOnlyRootFilesystem: true
31 | # runAsNonRoot: true
32 | # runAsUser: 1000
33 |
34 | service:
35 | type: NodePort
36 | port: 80
37 |
38 | ingress:
39 | enabled: false
40 | annotations: {}
41 | # kubernetes.io/ingress.class: nginx
42 | # kubernetes.io/tls-acme: "true"
43 | hosts:
44 | - host: chart-example.local
45 | paths: []
46 |
47 | tls: []
48 | # - secretName: chart-example-tls
49 | # hosts:
50 | # - chart-example.local
51 |
52 | resources: {}
53 | # We usually recommend not to specify default resources and to leave this as a conscious
54 | # choice for the user. This also increases chances charts run on environments with little
55 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
56 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
57 | # limits:
58 | # cpu: 100m
59 | # memory: 128Mi
60 | # requests:
61 | # cpu: 100m
62 | # memory: 128Mi
63 |
64 | nodeSelector: {}
65 |
66 | tolerations: []
67 |
68 | affinity: {}
69 |
--------------------------------------------------------------------------------
/logging-and-monitoring/fluentd/fluentd-daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: fluentd
5 | namespace: kube-system
6 | labels:
7 | k8s-app: fluentd-logging
8 | version: v1
9 | kubernetes.io/cluster-service: "true"
10 |
11 | spec:
12 | selector:
13 | matchLabels:
14 | k8s-app: fluentd-logging
15 | version: v1
16 | kubernetes.io/cluster-service: "true"
17 | template:
18 | metadata:
19 | labels:
20 | k8s-app: fluentd-logging
21 | version: v1
22 | kubernetes.io/cluster-service: "true"
23 | spec:
24 | serviceAccount: fluentd
25 | serviceAccountName: fluentd
26 | tolerations:
27 | - key: node-role.kubernetes.io/master
28 | effect: NoSchedule
29 | containers:
30 | - name: fluentd
31 | image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch
32 | env:
33 | - name: FLUENT_ELASTICSEARCH_HOST
34 | value: "elasticsearch.logging"
35 | - name: FLUENT_ELASTICSEARCH_PORT
36 | value: "9200"
37 | - name: FLUENT_ELASTICSEARCH_SCHEME
38 | value: "http"
39 | - name: FLUENT_UID
40 | value: "0"
41 | resources:
42 | limits:
43 | memory: 200Mi
44 | requests:
45 | cpu: 100m
46 | memory: 200Mi
47 | volumeMounts:
48 | - name: varlog
49 | mountPath: /var/log
50 | - name: varlibdockercontainers
51 | mountPath: /var/lib/docker/containers
52 | readOnly: true
53 | terminationGracePeriodSeconds: 30
54 | volumes:
55 | - name: varlog
56 | hostPath:
57 | path: /var/log
58 | - name: varlibdockercontainers
59 | hostPath:
60 | path: /var/lib/docker/containers
61 |
--------------------------------------------------------------------------------
/assigning-pods-to-nodes/pod-with-pod-affinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: redis-cache
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: store
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | app: store
14 | spec:
15 | affinity:
16 | podAntiAffinity:
17 | requiredDuringSchedulingIgnoredDuringExecution:
18 | - labelSelector:
19 | matchExpressions:
20 | - key: app
21 | operator: In
22 | values:
23 | - store
24 | topologyKey: "kubernetes.io/hostname"
25 | containers:
26 | - name: redis-server
27 | image: redis:3.2-alpine
28 | ---
29 | apiVersion: apps/v1
30 | kind: Deployment
31 | metadata:
32 | name: web-server
33 | spec:
34 | selector:
35 | matchLabels:
36 | app: web-store
37 | replicas: 2
38 | template:
39 | metadata:
40 | labels:
41 | app: web-store
42 | spec:
43 | affinity:
44 | podAntiAffinity:
45 | requiredDuringSchedulingIgnoredDuringExecution:
46 | - labelSelector:
47 | matchExpressions:
48 | - key: app
49 | operator: In
50 | values:
51 | - web-store
52 | topologyKey: "kubernetes.io/hostname"
53 | podAffinity:
54 | requiredDuringSchedulingIgnoredDuringExecution:
55 | - labelSelector:
56 | matchExpressions:
57 | - key: app
58 | operator: In
59 | values:
60 | - store
61 | topologyKey: "kubernetes.io/hostname"
62 | containers:
63 | - name: web-app
64 | image: nginx:1.16-alpine
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled }}
2 | {{- $servicePort := .Values.service.externalPort -}}
3 | {{- $serviceName := include "chartmuseum.fullname" . -}}
4 | {{- $ingressExtraPaths := .Values.ingress.extraPaths -}}
5 | ---
6 | {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
7 | apiVersion: networking.k8s.io/v1beta1
8 | {{- else }}
9 | apiVersion: extensions/v1beta1
10 | {{- end }}
11 | kind: Ingress
12 | metadata:
13 | name: {{ include "chartmuseum.fullname" . }}
14 | annotations:
15 | {{ toYaml .Values.ingress.annotations | indent 4 }}
16 | labels:
17 | {{- if .Values.ingress.labels }}
18 | {{ toYaml .Values.ingress.labels | indent 4 }}
19 | {{- end }}
20 | {{ include "chartmuseum.labels.standard" . | indent 4 }}
21 | spec:
22 | rules:
23 | {{- range .Values.ingress.hosts }}
24 | - host: {{ .name }}
25 | http:
26 | paths:
27 | {{- range $ingressExtraPaths }}
28 | - path: {{ default "/" .path | quote }}
29 | backend:
30 | {{- if $.Values.service.servicename }}
31 | serviceName: {{ $.Values.service.servicename }}
32 | {{- else }}
33 | serviceName: {{ default $serviceName .service }}
34 | {{- end }}
35 | servicePort: {{ default $servicePort .port }}
36 | {{- end }}
37 | - path: {{ default "/" .path | quote }}
38 | backend:
39 | {{- if $.Values.service.servicename }}
40 | serviceName: {{ $.Values.service.servicename }}
41 | {{- else }}
42 | serviceName: {{ default $serviceName .service }}
43 | {{- end }}
44 | servicePort: {{ default $servicePort .servicePort }}
45 | {{- end }}
46 | tls:
47 | {{- range .Values.ingress.hosts }}
48 | {{- if .tls }}
49 | - hosts:
50 | - {{ .name }}
51 | secretName: {{ .tlsSecret }}
52 | {{- end }}
53 | {{- end }}
54 | {{- end -}}
55 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | ** Please be patient while the chart is being deployed **
2 |
3 | Get the ChartMuseum URL by running:
4 |
5 | {{- if contains "NodePort" .Values.service.type }}
6 |
7 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "chartmuseum.fullname" . }})
8 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
9 | echo http://$NODE_IP:$NODE_PORT{{ .Values.env.open.CONTEXT_PATH }}/
10 |
11 | {{- else if contains "LoadBalancer" .Values.service.type }}
12 |
13 | ** Please ensure an external IP is associated to the {{ template "chartmuseum.fullname" . }} service before proceeding **
14 | ** Watch the status using: kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "chartmuseum.fullname" . }} **
15 |
16 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
17 | echo http://$SERVICE_IP:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/
18 |
19 | OR
20 |
21 | export SERVICE_HOST=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "chartmuseum.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
22 | echo http://$SERVICE_HOST:{{ .Values.service.externalPort }}{{ .Values.env.open.CONTEXT_PATH }}/
23 |
24 | {{- else if contains "ClusterIP" .Values.service.type }}
25 |
26 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "chartmuseum.name" . }}" -l "release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
27 | echo http://127.0.0.1:8080{{ .Values.env.open.CONTEXT_PATH }}/
28 | kubectl port-forward $POD_NAME 8080:8080 --namespace {{ .Release.Namespace }}
29 |
30 | {{- end }}
31 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | {{- $agent := index .Values "weave-scope-agent" -}}
2 | You should now be able to access the Scope frontend in your web browser, by
3 | {{- if $agent.probeToken }}
4 | logging into https://cloud.weave.works/ with your credentials.
5 | {{- else if $agent.scopeFrontend }} going to http://{{ $agent.scopeFrontend }}.
6 | {{- else if eq .Values.global.service.type "LoadBalancer" }}
7 | going to the URL given by:
8 |
9 | kubectl -n {{ .Release.Namespace }} get svc {{ .Values.global.service.name | default (include "toplevel.fullname" .) }} \
10 | -o jsonpath='http://{.status.loadBalancer.ingress[0].hostname}:{{ .Values.global.service.port }}/{"\n"}'
11 | {{- else if eq .Values.global.service.type "NodePort" }}
12 | going to the address or hostname of any node in the cluster, using http
13 | and the port given by:
14 |
15 | SCOPE_PORT=$(kubectl -n {{ .Release.Namespace }} get svc {{ .Values.global.service.name | default (include "toplevel.fullname" .) }} \
16 | -o jsonpath='{.spec.ports[?(@.name==http)].nodePort}'); echo $SCOPE_PORT
17 |
18 | Most likely one or more of the URLs given by this pipeline will work:
19 |
20 | SCOPE_PORT=$(kubectl -n {{ .Release.Namespace }} get svc {{ .Values.global.service.name }} \
21 | -o jsonpath='{.spec.ports[?(@.name==http)].nodePort}'); \
22 | kubectl get nodes -o jsonpath='{.items[0].status.addresses[*].address}' | \
23 | xargs -I{} -d" " echo http://{}:$SCOPE_PORT
24 | {{- else }}
25 | using kubectl port-forward:
26 |
27 | kubectl -n {{ .Release.Namespace }} port-forward $(kubectl -n {{ .Release.Namespace }} get endpoints \
28 | {{ .Values.global.service.name | default (include "toplevel.fullname" .) }} -o jsonpath='{.subsets[0].addresses[0].targetRef.name}') 8080:4040
29 |
30 | then browsing to http://localhost:8080/.
31 | {{- end }}
32 | For more details on using Weave Scope, see the Weave Scope documentation:
33 |
34 | https://www.weave.works/docs/scope/latest/introducing/
35 |
--------------------------------------------------------------------------------
/exercises/4/todo/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "todo.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "todo.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{/*
28 | Create chart name and version as used by the chart label.
29 | */}}
30 | {{- define "todo.chart" -}}
31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
32 | {{- end -}}
33 |
34 | {{/*
35 | Common labels
36 | */}}
37 | {{- define "todo.labels" -}}
38 | app.kubernetes.io/name: {{ include "todo.name" . }}
39 | helm.sh/chart: {{ include "todo.chart" . }}
40 | app.kubernetes.io/instance: {{ .Release.Name }}
41 | {{- if .Chart.AppVersion }}
42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
43 | {{- end }}
44 | app.kubernetes.io/managed-by: {{ .Release.Service }}
45 | {{- end -}}
46 |
47 | {{/*
48 | Create the name of the service account to use
49 | */}}
50 | {{- define "todo.serviceAccountName" -}}
51 | {{- if .Values.serviceAccount.create -}}
52 | {{ default (include "todo.fullname" .) .Values.serviceAccount.name }}
53 | {{- else -}}
54 | {{ default "default" .Values.serviceAccount.name }}
55 | {{- end -}}
56 | {{- end -}}
57 |
--------------------------------------------------------------------------------
/exercises/4/todo/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "todo.fullname" . }}
5 | labels:
6 | {{ include "todo.labels" . | indent 4 }}
7 | spec:
8 | replicas: {{ .Values.replicaCount }}
9 | selector:
10 | matchLabels:
11 | app.kubernetes.io/name: {{ include "todo.name" . }}
12 | app.kubernetes.io/instance: {{ .Release.Name }}
13 | template:
14 | metadata:
15 | labels:
16 | app.kubernetes.io/name: {{ include "todo.name" . }}
17 | app.kubernetes.io/instance: {{ .Release.Name }}
18 | spec:
19 | {{- with .Values.imagePullSecrets }}
20 | imagePullSecrets:
21 | {{- toYaml . | nindent 8 }}
22 | {{- end }}
23 | serviceAccountName: {{ template "todo.serviceAccountName" . }}
24 | securityContext:
25 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
26 | containers:
27 | - name: {{ .Chart.Name }}
28 | securityContext:
29 | {{- toYaml .Values.securityContext | nindent 12 }}
30 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
31 | imagePullPolicy: {{ .Values.image.pullPolicy }}
32 | ports:
33 | - name: http
34 | containerPort: 80
35 | protocol: TCP
36 | livenessProbe:
37 | httpGet:
38 | path: /
39 | port: http
40 | readinessProbe:
41 | httpGet:
42 | path: /
43 | port: http
44 | resources:
45 | {{- toYaml .Values.resources | nindent 12 }}
46 | {{- with .Values.nodeSelector }}
47 | nodeSelector:
48 | {{- toYaml . | nindent 8 }}
49 | {{- end }}
50 | {{- with .Values.affinity }}
51 | affinity:
52 | {{- toYaml . | nindent 8 }}
53 | {{- end }}
54 | {{- with .Values.tolerations }}
55 | tolerations:
56 | {{- toYaml . | nindent 8 }}
57 | {{- end }}
58 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-cluster-agent/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | apiVersion: {{ template "deployment.apiVersion" . }}
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | {{- include "weave-scope-cluster-agent.helm_std_labels" . | indent 4 }}
7 | component: cluster-agent
8 | name: {{ template "weave-scope-cluster-agent.fullname" . }}
9 | annotations:
10 | {{- include "weave-scope-cluster-agent.annotations" . | indent 4 }}
11 | spec:
12 | selector:
13 | matchLabels:
14 | app: {{ template "toplevel.name" . }}
15 | release: {{ .Release.Name }}
16 | component: cluster-agent
17 | strategy:
18 | type: RollingUpdate
19 | template:
20 | metadata:
21 | labels:
22 | {{- include "weave-scope-cluster-agent.helm_std_labels" . | indent 8 }}
23 | component: cluster-agent
24 | spec:
25 | containers:
26 | - name: {{ template "weave-scope-cluster-agent.name" . }}
27 | image: "{{ .Values.global.image.repository }}:{{ .Values.global.image.tag }}"
28 | imagePullPolicy: "{{ .Values.global.image.pullPolicy }}"
29 | args:
30 | - '--mode=probe'
31 | - '--probe-only'
32 | - '--probe.kubernetes.role=cluster'
33 | {{- range $arg := .Values.flags }}
34 | - {{ $arg | quote }}
35 | {{- end }}
36 | {{if .Values.readOnly}}
37 | - "--probe.no-controls"
38 | {{end}}
39 | {{- if .Values.global.scopeFrontendAddr }}
40 | - {{ .Values.global.scopeFrontendAddr }}
41 | {{- else }}
42 | - {{ .Values.global.service.name | default (include "toplevel.fullname" .) }}.{{ .Release.Namespace }}.svc:{{ .Values.global.service.port }}
43 | {{- end }}
44 | resources:
45 | {{ toYaml .Values.resources | indent 12 }}
46 | serviceAccountName: {{ template "weave-scope-cluster-agent.serviceAccountName" . }}
47 | {{- end -}}
48 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-frontend/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* Helm standard labels */}}
2 | {{- define "weave-scope-frontend.helm_std_labels" }}
3 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
4 | heritage: {{ .Release.Service }}
5 | release: {{ .Release.Name }}
6 | app: {{ template "toplevel.name" . }}
7 | {{- end }}
8 |
9 | {{/* Weave Scope default annotations */}}
10 | {{- define "weave-scope-frontend.annotations" }}
11 | cloud.weave.works/launcher-info: |-
12 | {
13 | "server-version": "master-4fe8efe",
14 | "original-request": {
15 | "url": "/k8s/v1.7/scope.yaml"
16 | },
17 | "email-address": "support@weave.works",
18 | "source-app": "weave-scope",
19 | "weave-cloud-component": "scope"
20 | }
21 | {{- end }}
22 |
23 | {{/*
24 | Expand the name of the chart.
25 | */}}
26 | {{- define "weave-scope-frontend.name" -}}
27 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
28 | {{- end -}}
29 |
30 | {{/*
31 | Expand the name of the top-level chart.
32 | */}}
33 | {{- define "toplevel.name" -}}
34 | {{- default (.Template.BasePath | split "/" )._0 .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
35 | {{- end -}}
36 |
37 | {{/*
38 | Create a default fully qualified app name. We truncate at 63 chars.
39 | */}}
40 | {{- define "weave-scope-frontend.fullname" -}}
41 | {{- printf "%s-%s" .Chart.Name .Release.Name | trunc 63 | trimSuffix "-" -}}
42 | {{- end -}}
43 |
44 | {{/*
45 | Create a fully qualified name that always uses the name of the top-level chart.
46 | */}}
47 | {{- define "toplevel.fullname" -}}
48 | {{- $name := default (.Template.BasePath | split "/" )._0 .Values.nameOverride -}}
49 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
50 | {{- end -}}
51 |
52 | {{/*
53 | Return the apiVerion of deployment.
54 | */}}
55 | {{- define "deployment.apiVersion" -}}
56 | {{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
57 | {{- print "extensions/v1beta1" -}}
58 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
59 | {{- print "apps/v1" -}}
60 | {{- end -}}
61 | {{- end -}}
62 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/all/docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Uncomment this if you want to force overlay/overlay2 as docker storage driver
3 | ## Please note that overlay2 is only supported on newer kernels
4 | # docker_storage_options: -s overlay2
5 |
6 | ## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
7 | docker_container_storage_setup: false
8 |
9 | ## It must be define a disk path for docker_container_storage_setup_devs.
10 | ## Otherwise docker-storage-setup will be executed incorrectly.
11 | # docker_container_storage_setup_devs: /dev/vdb
12 |
13 | ## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
14 | docker_dns_servers_strict: false
15 |
16 | # Path used to store Docker data
17 | docker_daemon_graph: "/var/lib/docker"
18 |
19 | ## Used to set docker daemon iptables options to true
20 | docker_iptables_enabled: "false"
21 |
22 | # Docker log options
23 | # Rotate container stderr/stdout logs at 50m and keep last 5
24 | docker_log_opts: "--log-opt max-size=50m --log-opt max-file=5"
25 |
26 | # define docker bin_dir
27 | docker_bin_dir: "/usr/bin"
28 |
29 | # keep docker packages after installation; speeds up repeated ansible provisioning runs when '1'
30 | # kubespray deletes the docker package on each run, so caching the package makes sense
31 | docker_rpm_keepcache: 0
32 |
33 | ## An obvious use case is allowing insecure-registry access to self hosted registries.
34 | ## Can be ipaddress and domain_name.
35 | ## example define 172.19.16.11 or mirror.registry.io
36 | # docker_insecure_registries:
37 | # - mirror.registry.io
38 | # - 172.19.16.11
39 |
40 | ## Add other registry,example China registry mirror.
41 | # docker_registry_mirrors:
42 | # - https://registry.docker-cn.com
43 | # - https://mirror.aliyuncs.com
44 |
45 | ## If non-empty will override default system MountFlags value.
46 | ## This option takes a mount propagation flag: shared, slave
47 | ## or private, which control whether mounts in the file system
48 | ## namespace set up for docker will receive or propagate mounts
49 | ## and unmounts. Leave empty for system default
50 | # docker_mount_flags:
51 |
52 | ## A string of extra options to pass to the docker daemon.
53 | ## This string should be exactly as you wish it to appear.
54 | docker_options: >-
55 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-agent/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* Helm standard labels */}}
2 | {{- define "weave-scope-agent.helm_std_labels" }}
3 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
4 | heritage: {{ .Release.Service }}
5 | release: {{ .Release.Name }}
6 | app: {{ template "toplevel.name" . }}
7 | {{- end }}
8 |
9 | {{/* Weave Scope default annotations */}}
10 | {{- define "weave-scope-agent.annotations" }}
11 | cloud.weave.works/launcher-info: |-
12 | {
13 | "server-version": "master-4fe8efe",
14 | "original-request": {
15 | "url": "/k8s/v1.7/scope.yaml"
16 | },
17 | "email-address": "support@weave.works",
18 | "source-app": "weave-scope",
19 | "weave-cloud-component": "scope"
20 | }
21 | {{- end }}
22 |
23 | {{/*
24 | Expand the name of the chart.
25 | */}}
26 | {{- define "weave-scope-agent.name" -}}
27 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
28 | {{- end -}}
29 |
30 | {{/*
31 | Expand the name of the top-level chart.
32 | */}}
33 | {{- define "toplevel.name" -}}
34 | {{- default (.Template.BasePath | split "/" )._0 .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
35 | {{- end -}}
36 |
37 | {{/*
38 | Create a default fully qualified app name. We truncate at 63 chars.
39 | */}}
40 | {{- define "weave-scope-agent.fullname" -}}
41 | {{- printf "%s-%s" .Chart.Name .Release.Name | trunc 63 | trimSuffix "-" -}}
42 | {{- end -}}
43 |
44 | {{/*
45 | Create a fully qualified name that always uses the name of the top-level chart.
46 | */}}
47 | {{- define "toplevel.fullname" -}}
48 | {{- $name := default (.Template.BasePath | split "/" )._0 .Values.nameOverride -}}
49 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
50 | {{- end -}}
51 |
52 | {{/*
53 | Create the name of the service account to use
54 | */}}
55 | {{- define "weave-scope-agent.serviceAccountName" -}}
56 | {{- if .Values.serviceAccount.create -}}
57 | {{ default (include "weave-scope-agent.fullname" .) .Values.serviceAccount.name }}
58 | {{- else -}}
59 | {{ default "default" .Values.serviceAccount.name }}
60 | {{- end -}}
61 | {{- end -}}
62 |
63 | {{/*
64 | Return the apiVersion of daemonset.
65 | */}}
66 | {{- define "daemonset.apiVersion" -}}
67 | {{- if semverCompare "<1.9-0" .Capabilities.KubeVersion.GitVersion -}}
68 | {{- print "extensions/v1beta1" -}}
69 | {{- else -}}
70 | {{- print "apps/v1" -}}
71 | {{- end -}}
72 | {{- end -}}
73 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-kube-router.yml:
--------------------------------------------------------------------------------
1 | # See roles/network_plugin/kube-router//defaults/main.yml
2 |
3 | # Enables Pod Networking -- Advertises and learns the routes to Pods via iBGP
4 | # kube_router_run_router: true
5 |
6 | # Enables Network Policy -- sets up iptables to provide ingress firewall for pods
7 | # kube_router_run_firewall: true
8 |
9 | # Enables Service Proxy -- sets up IPVS for Kubernetes Services
10 | # see docs/kube-router.md "Caveats" section
11 | # kube_router_run_service_proxy: false
12 |
13 | # Add Cluster IP of the service to the RIB so that it gets advertises to the BGP peers.
14 | # kube_router_advertise_cluster_ip: false
15 |
16 | # Add External IP of service to the RIB so that it gets advertised to the BGP peers.
17 | # kube_router_advertise_external_ip: false
18 |
19 | # Add LoadbBalancer IP of service status as set by the LB provider to the RIB so that it gets advertised to the BGP peers.
20 | # kube_router_advertise_loadbalancer_ip: false
21 |
22 | # Adjust manifest of kube-router daemonset template with DSR needed changes
23 | # kube_router_enable_dsr: false
24 |
25 | # Array of arbitrary extra arguments to kube-router, see
26 | # https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md
27 | # kube_router_extra_args: []
28 |
29 | # ASN numbers of the BGP peer to which cluster nodes will advertise cluster ip and node's pod cidr.
30 | # kube_router_peer_router_asns: ~
31 |
32 | # The ip address of the external router to which all nodes will peer and advertise the cluster ip and pod cidr's.
33 | # kube_router_peer_router_ips: ~
34 |
35 | # The remote port of the external BGP to which all nodes will peer. If not set, default BGP port (179) will be used.
36 | # kube_router_peer_router_ports: ~
37 |
38 | # Setups node CNI to allow hairpin mode, requires node reboots, see
39 | # https://github.com/cloudnativelabs/kube-router/blob/master/docs/user-guide.md#hairpin-mode
40 | # kube_router_support_hairpin_mode: false
41 |
42 | # Array of annotations for master
43 | # kube_router_annotations_master: []
44 |
45 | # Array of annotations for every node
46 | # kube_router_annotations_node: []
47 |
48 | # Array of common annotations for every node
49 | # kube_router_annotations_all: []
50 |
51 | # Enables scraping kube-router metrics with Prometheus
52 | # kube_router_enable_metrics: false
53 |
54 | # Path to serve Prometheus metrics on
55 | # kube_router_metrics_path: /metrics
56 |
57 | # Prometheus metrics port to use
58 | # kube_router_metrics_port: 9255
59 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-cluster-agent/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* Helm standard labels */}}
2 | {{- define "weave-scope-cluster-agent.helm_std_labels" }}
3 | chart: {{ .Chart.Name }}-{{ .Chart.Version }}
4 | heritage: {{ .Release.Service }}
5 | release: {{ .Release.Name }}
6 | app: {{ template "toplevel.name" . }}
7 | {{- end }}
8 |
9 | {{/* Weave Scope default annotations */}}
10 | {{- define "weave-scope-cluster-agent.annotations" }}
11 | cloud.weave.works/launcher-info: |-
12 | {
13 | "server-version": "master-4fe8efe",
14 | "original-request": {
15 | "url": "/k8s/v1.7/scope.yaml"
16 | },
17 | "email-address": "support@weave.works",
18 | "source-app": "weave-scope",
19 | "weave-cloud-component": "scope"
20 | }
21 | {{- end }}
22 |
23 | {{/*
24 | Expand the name of the chart.
25 | */}}
26 | {{- define "weave-scope-cluster-agent.name" -}}
27 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
28 | {{- end -}}
29 |
30 | {{/*
31 | Expand the name of the top-level chart.
32 | */}}
33 | {{- define "toplevel.name" -}}
34 | {{- default (.Template.BasePath | split "/" )._0 .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
35 | {{- end -}}
36 |
37 | {{/*
38 | Create a default fully qualified app name. We truncate at 63 chars.
39 | */}}
40 | {{- define "weave-scope-cluster-agent.fullname" -}}
41 | {{- printf "%s-%s" .Chart.Name .Release.Name | trunc 63 | trimSuffix "-" -}}
42 | {{- end -}}
43 |
44 | {{/*
45 | Create a fully qualified name that always uses the name of the top-level chart.
46 | */}}
47 | {{- define "toplevel.fullname" -}}
48 | {{- $name := default (.Template.BasePath | split "/" )._0 .Values.nameOverride -}}
49 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
50 | {{- end -}}
51 |
52 | {{/*
53 | Create the name of the service account to use
54 | */}}
55 | {{- define "weave-scope-cluster-agent.serviceAccountName" -}}
56 | {{- if .Values.serviceAccount.create -}}
57 | {{ default (include "weave-scope-cluster-agent.fullname" .) .Values.serviceAccount.name }}
58 | {{- else -}}
59 | {{ default "default" .Values.serviceAccount.name }}
60 | {{- end -}}
61 | {{- end -}}
62 |
63 | {{/*
64 | Return the apiVerion of deployment.
65 | */}}
66 | {{- define "deployment.apiVersion" -}}
67 | {{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}}
68 | {{- print "extensions/v1beta1" -}}
69 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
70 | {{- print "apps/v1" -}}
71 | {{- end -}}
72 | {{- end -}}
73 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-net-weave.yml:
--------------------------------------------------------------------------------
1 | # see roles/network_plugin/weave/defaults/main.yml
2 |
3 | # Weave's network password for encryption, if null then no network encryption.
4 | # weave_password: ~
5 |
6 | # If set to 1, disable checking for new Weave Net versions (default is blank,
7 | # i.e. check is enabled)
8 | # weave_checkpoint_disable: false
9 |
10 | # Soft limit on the number of connections between peers. Defaults to 100.
11 | # weave_conn_limit: 100
12 |
13 | # Weave Net defaults to enabling hairpin on the bridge side of the veth pair
14 | # for containers attached. If you need to disable hairpin, e.g. your kernel is
15 | # one of those that can panic if hairpin is enabled, then you can disable it by
16 | # setting `HAIRPIN_MODE=false`.
17 | # weave_hairpin_mode: true
18 |
19 | # The range of IP addresses used by Weave Net and the subnet they are placed in
20 | # (CIDR format; default 10.32.0.0/12)
21 | # weave_ipalloc_range: "{{ kube_pods_subnet }}"
22 |
23 | # Set to 0 to disable Network Policy Controller (default is on)
24 | # weave_expect_npc: "{{ enable_network_policy }}"
25 |
26 | # List of addresses of peers in the Kubernetes cluster (default is to fetch the
27 | # list from the api-server)
28 | # weave_kube_peers: ~
29 |
30 | # Set the initialization mode of the IP Address Manager (defaults to consensus
31 | # amongst the KUBE_PEERS)
32 | # weave_ipalloc_init: ~
33 |
34 | # Set the IP address used as a gateway from the Weave network to the host
35 | # network - this is useful if you are configuring the addon as a static pod.
36 | # weave_expose_ip: ~
37 |
38 | # Address and port that the Weave Net daemon will serve Prometheus-style
39 | # metrics on (defaults to 0.0.0.0:6782)
40 | # weave_metrics_addr: ~
41 |
42 | # Address and port that the Weave Net daemon will serve status requests on
43 | # (defaults to disabled)
44 | # weave_status_addr: ~
45 |
46 | # Weave Net defaults to 1376 bytes, but you can set a smaller size if your
47 | # underlying network has a tighter limit, or set a larger size for better
48 | # performance if your network supports jumbo frames (e.g. 8916)
49 | # weave_mtu: 1376
50 |
51 | # Set to 1 to preserve the client source IP address when accessing Service
52 | # annotated with `service.spec.externalTrafficPolicy=Local`. The feature works
53 | # only with Weave IPAM (default).
54 | # weave_no_masq_local: true
55 |
56 | # Extra variables that passing to launch.sh, useful for enabling seed mode, see
57 | # https://www.weave.works/docs/net/latest/tasks/ipam/ipam/
58 | # weave_extra_args: ~
59 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/charts/weave-scope-agent/templates/daemonset.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.enabled -}}
2 | apiVersion: {{ template "daemonset.apiVersion" . }}
3 | kind: DaemonSet
4 | metadata:
5 | labels:
6 | {{- include "weave-scope-agent.helm_std_labels" . | indent 4 }}
7 | component: agent
8 | name: {{ template "weave-scope-agent.fullname" . }}
9 | annotations:
10 | {{- include "weave-scope-agent.annotations" . | indent 4 }}
11 | spec:
12 | selector:
13 | matchLabels:
14 | app: {{ template "toplevel.name" . }}
15 | release: {{ .Release.Name }}
16 | component: agent
17 | updateStrategy:
18 | type: RollingUpdate
19 | template:
20 | metadata:
21 | labels:
22 | {{- include "weave-scope-agent.helm_std_labels" . | indent 8 }}
23 | component: agent
24 | spec:
25 | tolerations:
26 | - effect: NoSchedule
27 | operator: Exists
28 | {{- if .Values.priorityClassName }}
29 | priorityClassName: {{ .Values.priorityClassName }}
30 | {{- end }}
31 | containers:
32 | - name: {{ template "weave-scope-agent.name" . }}
33 | image: "{{ .Values.global.image.repository }}:{{ .Values.global.image.tag }}"
34 | imagePullPolicy: "{{ .Values.global.image.pullPolicy }}"
35 | args:
36 | - '--mode=probe'
37 | - '--probe-only'
38 | - '--probe.kubernetes.role=host'
39 | - '--probe.docker.bridge={{ .Values.dockerBridge }}'
40 | - '--probe.docker=true'
41 | - '--probe.kubernetes=true'
42 | {{- range $arg := .Values.flags }}
43 | - {{ $arg | quote }}
44 | {{- end }}
45 | {{if .Values.readOnly}}
46 | - "--probe.no-controls"
47 | {{end}}
48 | {{- if .Values.global.probeToken }}
49 | - '--probe-token={{ .Values.global.probeToken }}'
50 | {{- else if .Values.global.scopeFrontendAddr }}
51 | - {{ .Values.global.scopeFrontendAddr }}
52 | {{- else }}
53 | - {{ .Values.global.service.name | default (include "toplevel.fullname" .) }}.{{ .Release.Namespace }}.svc:{{ .Values.global.service.port }}
54 | {{- end }}
55 | securityContext:
56 | privileged: true
57 | resources:
58 | {{ toYaml .Values.resources | indent 12 }}
59 | volumeMounts:
60 | - name: docker-socket
61 | mountPath: /var/run/docker.sock
62 | - name: scope-plugins
63 | mountPath: /var/run/scope/plugins
64 | - name: sys-kernel-debug
65 | mountPath: /sys/kernel/debug
66 | volumes:
67 | - name: docker-socket
68 | hostPath:
69 | path: /var/run/docker.sock
70 | - name: scope-plugins
71 | hostPath:
72 | path: /var/run/scope/plugins
73 | - name: sys-kernel-debug
74 | hostPath:
75 | path: /sys/kernel/debug
76 | hostPID: true
77 | hostNetwork: true
78 | dnsPolicy: ClusterFirstWithHostNet
79 | {{- end -}}
80 |
--------------------------------------------------------------------------------
/kubeadm-deployment/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
5 | # configures the configuration version (we support older styles for
6 | # backwards compatibility). Please don't change it unless you know what
7 | # you're doing.
8 | Vagrant.configure("2") do |config|
9 | # The most common configuration options are documented and commented below.
10 | # For a complete reference, please see the online documentation at
11 | # https://docs.vagrantup.com.
12 |
13 | # Every Vagrant development environment requires a box. You can search for
14 | # boxes at https://vagrantcloud.com/search.
15 | config.vm.box = "ubuntu/bionic64"
16 |
17 | # Disable automatic box update checking. If you disable this, then
18 | # boxes will only be checked for updates when the user runs
19 | # `vagrant box outdated`. This is not recommended.
20 | # config.vm.box_check_update = false
21 |
22 | # Create a forwarded port mapping which allows access to a specific port
23 | # within the machine from a port on the host machine. In the example below,
24 | # accessing "localhost:8080" will access port 80 on the guest machine.
25 | # NOTE: This will enable public access to the opened port
26 | # config.vm.network "forwarded_port", guest: 80, host: 8080
27 |
28 | # Create a forwarded port mapping which allows access to a specific port
29 | # within the machine from a port on the host machine and only allow access
30 | # via 127.0.0.1 to disable public access
31 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1"
32 |
33 | # Create a private network, which allows host-only access to the machine
34 | # using a specific IP.
35 | # config.vm.network "private_network", ip: "192.168.33.10"
36 |
37 | # Create a public network, which generally matched to bridged network.
38 | # Bridged networks make the machine appear as another physical device on
39 | # your network.
40 | # config.vm.network "public_network"
41 |
42 | # Share an additional folder to the guest VM. The first argument is
43 | # the path on the host to the actual folder. The second argument is
44 | # the path on the guest to mount the folder. And the optional third
45 | # argument is a set of non-required options.
46 | # config.vm.synced_folder "../data", "/vagrant_data"
47 |
48 | # Provider-specific configuration so you can fine-tune various
49 | # backing providers for Vagrant. These expose provider-specific options.
50 | # Example for VirtualBox:
51 | #
52 | # config.vm.provider "virtualbox" do |vb|
53 | # # Display the VirtualBox GUI when booting the machine
54 | # vb.gui = true
55 | #
56 | # # Customize the amount of memory on the VM:
57 | # vb.memory = "1024"
58 | # end
59 | #
60 | # View the documentation for the provider you are using for more
61 | # information on available options.
62 |
63 | # Enable provisioning with a shell script. Additional provisioners such as
64 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
65 | # documentation for more information about their specific syntax and use.
66 | # config.vm.provision "shell", inline: <<-SHELL
67 | # apt-get update
68 | # apt-get install -y apache2
69 | # SHELL
70 | end
71 |
--------------------------------------------------------------------------------
/exercises/2/cheese-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: cheese-secret
5 | type: Opaque
6 | data:
7 | tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
8 | tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
9 |
--------------------------------------------------------------------------------
/ingress-contoller/cafe-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: cafe-secret
5 | type: Opaque
6 | data:
7 | tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRREFPRjl0THNhWFdqQU5CZ2txaGtpRzl3MEJBUXNGQURCYU1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1EwRXhJVEFmQmdOVkJBb01HRWx1ZEdWeWJtVjBJRmRwWkdkcGRITWdVSFI1SUV4MApaREViTUJrR0ExVUVBd3dTWTJGbVpTNWxlR0Z0Y0d4bExtTnZiU0FnTUI0WERURTRNRGt4TWpFMk1UVXpOVm9YCkRUSXpNRGt4TVRFMk1UVXpOVm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01Ba05CTVNFd0h3WUQKVlFRS0RCaEpiblJsY201bGRDQlhhV1JuYVhSeklGQjBlU0JNZEdReEdUQVhCZ05WQkFNTUVHTmhabVV1WlhoaApiWEJzWlM1amIyMHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDcDZLbjdzeTgxCnAwanVKL2N5ayt2Q0FtbHNmanRGTTJtdVpOSzBLdGVjcUcyZmpXUWI1NXhRMVlGQTJYT1N3SEFZdlNkd0kyaloKcnVXOHFYWENMMnJiNENaQ0Z4d3BWRUNyY3hkam0zdGVWaVJYVnNZSW1tSkhQUFN5UWdwaW9iczl4N0RsTGM2SQpCQTBaalVPeWwwUHFHOVNKZXhNVjczV0lJYTVyRFZTRjJyNGtTa2JBajREY2o3TFhlRmxWWEgySTVYd1hDcHRDCm42N0pDZzQyZitrOHdnemNSVnA4WFprWldaVmp3cTlSVUtEWG1GQjJZeU4xWEVXZFowZXdSdUtZVUpsc202OTIKc2tPcktRajB2a29QbjQxRUUvK1RhVkVwcUxUUm9VWTNyemc3RGtkemZkQml6Rk8yZHNQTkZ4MkNXMGpYa05MdgpLbzI1Q1pyT2hYQUhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFLSEZDY3lPalp2b0hzd1VCTWRMClJkSEliMzgzcFdGeW5acS9MdVVvdnNWQTU4QjBDZzdCRWZ5NXZXVlZycTVSSWt2NGxaODFOMjl4MjFkMUpINnIKalNuUXgrRFhDTy9USkVWNWxTQ1VwSUd6RVVZYVVQZ1J5anNNL05VZENKOHVIVmhaSitTNkZBK0NuT0Q5cm4yaQpaQmVQQ0k1ckh3RVh3bm5sOHl3aWozdnZRNXpISXV5QmdsV3IvUXl1aTlmalBwd1dVdlVtNG52NVNNRzl6Q1Y3ClBwdXd2dWF0cWpPMTIwOEJqZkUvY1pISWc4SHc5bXZXOXg5QytJUU1JTURFN2IvZzZPY0s3TEdUTHdsRnh2QTgKN1dqRWVxdW5heUlwaE1oS1JYVmYxTjM0OWVOOThFejM4Zk9USFRQYmRKakZBL1BjQytHeW1lK2lHdDVPUWRGaAp5UkU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
8 | tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcWVpcCs3TXZOYWRJN2lmM01wUHJ3Z0pwYkg0N1JUTnBybVRTdENyWG5LaHRuNDFrCkcrZWNVTldCUU5semtzQndHTDBuY0NObzJhN2x2S2wxd2k5cTIrQW1RaGNjS1ZSQXEzTVhZNXQ3WGxZa1YxYkcKQ0pwaVJ6ejBza0lLWXFHN1BjZXc1UzNPaUFRTkdZMURzcGRENmh2VWlYc1RGZTkxaUNHdWF3MVVoZHErSkVwRwp3SStBM0kreTEzaFpWVng5aU9WOEZ3cWJRcCt1eVFvT05uL3BQTUlNM0VWYWZGMlpHVm1WWThLdlVWQ2cxNWhRCmRtTWpkVnhGbldkSHNFYmltRkNaYkp1dmRySkRxeWtJOUw1S0Q1K05SQlAvazJsUkthaTAwYUZHTjY4NE93NUgKYzMzUVlzeFR0bmJEelJjZGdsdEkxNURTN3lxTnVRbWF6b1Z3QndJREFRQUJBb0lCQVFDUFNkU1luUXRTUHlxbApGZlZGcFRPc29PWVJoZjhzSStpYkZ4SU91UmF1V2VoaEp4ZG01Uk9ScEF6bUNMeUw1VmhqdEptZTIyM2dMcncyCk45OUVqVUtiL1ZPbVp1RHNCYzZvQ0Y2UU5SNThkejhjbk9SVGV3Y290c0pSMXBuMWhobG5SNUhxSkpCSmFzazEKWkVuVVFmY1hackw5NGxvOUpIM0UrVXFqbzFGRnM4eHhFOHdvUEJxalpzVjdwUlVaZ0MzTGh4bndMU0V4eUZvNApjeGI5U09HNU9tQUpvelN0Rm9RMkdKT2VzOHJKNXFmZHZ5dGdnOXhiTGFRTC94MGtwUTYyQm9GTUJEZHFPZVBXCktmUDV6WjYvMDcvdnBqNDh5QTFRMzJQem9idWJzQkxkM0tjbjMyamZtMUU3cHJ0V2wrSmVPRmlPem5CUUZKYk4KNHFQVlJ6NWhBb0dCQU50V3l4aE5DU0x1NFArWGdLeWNrbGpKNkY1NjY4Zk5qNUN6Z0ZScUowOXpuMFRsc05ybwpGVExaY3hEcW5SM0hQWU00MkpFUmgySi9xREZaeW5SUW8zY2czb2VpdlVkQlZHWTgrRkkxVzBxZHViL0w5K3l1CmVkT1pUUTVYbUdHcDZyNmpleHltY0ppbS9Pc0IzWm5ZT3BPcmxEN1NQbUJ2ek5MazRNRjZneGJYQW9HQkFNWk8KMHA2SGJCbWNQMHRqRlhmY0tFNzdJbUxtMHNBRzR1SG9VeDBlUGovMnFyblRuT0JCTkU0TXZnRHVUSnp5K2NhVQprOFJxbWRIQ2JIelRlNmZ6WXEvOWl0OHNaNzdLVk4xcWtiSWN1YytSVHhBOW5OaDFUanNSbmU3NFowajFGQ0xrCmhIY3FIMHJpN1BZU0tIVEU4RnZGQ3haWWRidUI4NENtWmlodnhicFJBb0dBSWJqcWFNWVBUWXVrbENkYTVTNzkKWVNGSjFKelplMUtqYS8vdER3MXpGY2dWQ0thMzFqQXdjaXowZi9sU1JxM0hTMUdHR21lemhQVlRpcUxmZVpxYwpSMGlLYmhnYk9jVlZrSkozSzB5QXlLd1BUdW14S0haNnpJbVpTMGMwYW0rUlk5WUdxNVQ3WXJ6cHpjZnZwaU9VCmZmZTNSeUZUN2NmQ21mb09oREN0enVrQ2dZQjMwb0xDMVJMRk9ycW40M3ZDUzUxemM1em9ZNDR1QnpzcHd3WU4KVHd2UC9FeFdNZjNWSnJEakJDSCtULzZzeXNlUGJKRUltbHpNK0l3eXRGcEFOZmlJWEV0LzQ4WGY2ME54OGdXTQp1SHl4Wlp4L05LdER3MFY4dlgxUE9ucTJBNWVpS2ErOGpSQVJZS0pMWU5kZkR1d29seHZHNmJaaGtQaS80RXRUCjNZMThzUUtCZ0h0S2JrKzdsTkpWZXN3WEU1Y1VHNkVEVXNEZS8yVWE3ZlhwN0ZjanFCRW9hcDFMU3crNlRYcDAKWmdybUtFOEFSek00NytFSkhVdmlpcS9udXBFMTVnMGtKVzNzeWhwVTl6WkxPN2x0QjBLSWtPOVpSY21Vam84UQpjcExsSE1BcWJMSjhXWUdKQ2toaVd4eWFsNmhZVHlXWTRjVmtDMHh0VGwvaFVFOUllTktvCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
9 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/addons.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kubernetes dashboard
3 | # RBAC required. see docs/getting-started.md for access details.
4 | dashboard_enabled: true
5 |
6 | # Helm deployment
7 | helm_enabled: false
8 |
9 | # Registry deployment
10 | registry_enabled: false
11 | # registry_namespace: kube-system
12 | # registry_storage_class: ""
13 | # registry_disk_size: "10Gi"
14 |
15 | # Metrics Server deployment
16 | metrics_server_enabled: false
17 | # metrics_server_kubelet_insecure_tls: true
18 | # metrics_server_metric_resolution: 60s
19 | # metrics_server_kubelet_preferred_address_types: "InternalIP"
20 |
21 | # Rancher Local Path Provisioner
22 | local_path_provisioner_enabled: false
23 | # local_path_provisioner_namespace: "local-path-storage"
24 | # local_path_provisioner_storage_class: "local-path"
25 | # local_path_provisioner_reclaim_policy: Delete
26 | # local_path_provisioner_claim_root: /opt/local-path-provisioner/
27 | # local_path_provisioner_debug: false
28 | # local_path_provisioner_image_repo: "rancher/local-path-provisioner"
29 | # local_path_provisioner_image_tag: "v0.0.2"
30 |
31 | # Local volume provisioner deployment
32 | local_volume_provisioner_enabled: false
33 | # local_volume_provisioner_namespace: kube-system
34 | # local_volume_provisioner_storage_classes:
35 | # local-storage:
36 | # host_dir: /mnt/disks
37 | # mount_dir: /mnt/disks
38 | # volume_mode: Filesystem
39 | # fs_type: ext4
40 | # fast-disks:
41 | # host_dir: /mnt/fast-disks
42 | # mount_dir: /mnt/fast-disks
43 | # block_cleaner_command:
44 | # - "/scripts/shred.sh"
45 | # - "2"
46 | # volume_mode: Filesystem
47 | # fs_type: ext4
48 |
49 | # CephFS provisioner deployment
50 | cephfs_provisioner_enabled: false
51 | # cephfs_provisioner_namespace: "cephfs-provisioner"
52 | # cephfs_provisioner_cluster: ceph
53 | # cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
54 | # cephfs_provisioner_admin_id: admin
55 | # cephfs_provisioner_secret: secret
56 | # cephfs_provisioner_storage_class: cephfs
57 | # cephfs_provisioner_reclaim_policy: Delete
58 | # cephfs_provisioner_claim_root: /volumes
59 | # cephfs_provisioner_deterministic_names: true
60 |
61 | # RBD provisioner deployment
62 | rbd_provisioner_enabled: false
63 | # rbd_provisioner_namespace: rbd-provisioner
64 | # rbd_provisioner_replicas: 2
65 | # rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
66 | # rbd_provisioner_pool: kube
67 | # rbd_provisioner_admin_id: admin
68 | # rbd_provisioner_secret_name: ceph-secret-admin
69 | # rbd_provisioner_secret: ceph-key-admin
70 | # rbd_provisioner_user_id: kube
71 | # rbd_provisioner_user_secret_name: ceph-secret-user
72 | # rbd_provisioner_user_secret: ceph-key-user
73 | # rbd_provisioner_user_secret_namespace: rbd-provisioner
74 | # rbd_provisioner_fs_type: ext4
75 | # rbd_provisioner_image_format: "2"
76 | # rbd_provisioner_image_features: layering
77 | # rbd_provisioner_storage_class: rbd
78 | # rbd_provisioner_reclaim_policy: Delete
79 |
80 | # Nginx ingress controller deployment
81 | ingress_nginx_enabled: false
82 | # ingress_nginx_host_network: false
83 | ingress_publish_status_address: ""
84 | # ingress_nginx_nodeselector:
85 | # beta.kubernetes.io/os: "linux"
86 | # ingress_nginx_tolerations:
87 | # - key: "node-role.kubernetes.io/master"
88 | # operator: "Equal"
89 | # value: ""
90 | # effect: "NoSchedule"
91 | # ingress_nginx_namespace: "ingress-nginx"
92 | # ingress_nginx_insecure_port: 80
93 | # ingress_nginx_secure_port: 443
94 | # ingress_nginx_configmap:
95 | # map-hash-bucket-size: "128"
96 | # ssl-protocols: "SSLv2"
97 | # ingress_nginx_configmap_tcp_services:
98 | # 9000: "default/example-go:8080"
99 | # ingress_nginx_configmap_udp_services:
100 | # 53: "kube-system/coredns:53"
101 | # ingress_nginx_extra_args:
102 | # - --default-ssl-certificate=default/foo-tls
103 |
104 | # Cert manager deployment
105 | cert_manager_enabled: false
106 | # cert_manager_namespace: "cert-manager"
107 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/all/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Directory where etcd data stored
3 | etcd_data_dir: /var/lib/etcd
4 |
5 | ## Experimental kubeadm etcd deployment mode. Available only for new deployment
6 | etcd_kubeadm_enabled: false
7 |
8 | ## Directory where the binaries will be installed
9 | bin_dir: /usr/local/bin
10 |
11 | ## The access_ip variable is used to define how other nodes should access
12 | ## the node. This is used in flannel to allow other flannel nodes to see
13 | ## this node for example. The access_ip is really useful AWS and Google
14 | ## environments where the nodes are accessed remotely by the "public" ip,
15 | ## but don't know about that address themselves.
16 | # access_ip: 1.1.1.1
17 |
18 |
19 | ## External LB example config
20 | ## apiserver_loadbalancer_domain_name: "elb.some.domain"
21 | # loadbalancer_apiserver:
22 | # address: 1.2.3.4
23 | # port: 1234
24 |
25 | ## Internal loadbalancers for apiservers
26 | # loadbalancer_apiserver_localhost: true
27 | # valid options are "nginx" or "haproxy"
28 | # loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"
29 |
30 | ## Local loadbalancer should use this port
31 | ## And must be set port 6443
32 | loadbalancer_apiserver_port: 6443
33 |
34 | ## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
35 | loadbalancer_apiserver_healthcheck_port: 8081
36 |
37 | ### OTHER OPTIONAL VARIABLES
38 | ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
39 | ## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
40 | ## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
41 | ## modules.
42 | # kubelet_load_modules: false
43 |
44 | ## Upstream dns servers
45 | # upstream_dns_servers:
46 | # - 8.8.8.8
47 | # - 8.8.4.4
48 |
49 | ## There are some changes specific to the cloud providers
50 | ## for instance we need to encapsulate packets with some network plugins
51 | ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
52 | ## When openstack is used make sure to source in the openstack credentials
53 | ## like you would do when using openstack-client before starting the playbook.
54 | ## Note: The 'external' cloud provider is not supported.
55 | ## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
56 | # cloud_provider:
57 |
58 | ## Set these proxy values in order to update package manager and docker daemon to use proxies
59 | # http_proxy: ""
60 | # https_proxy: ""
61 |
62 | ## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
63 | # no_proxy: ""
64 |
65 | ## Some problems may occur when downloading files over https proxy due to ansible bug
66 | ## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
67 | ## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
68 | # download_validate_certs: False
69 |
70 | ## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
71 | # additional_no_proxy: ""
72 |
73 | ## Certificate Management
74 | ## This setting determines whether certs are generated via scripts.
75 | ## Chose 'none' if you provide your own certificates.
76 | ## Option is "script", "none"
77 | ## note: vault is removed
78 | # cert_management: script
79 |
80 | ## Set to true to allow pre-checks to fail and continue deployment
81 | # ignore_assert_errors: false
82 |
83 | ## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
84 | # kube_read_only_port: 10255
85 |
86 | ## Set true to download and cache container
87 | # download_container: true
88 |
89 | ## Deploy container engine
90 | # Set false if you want to deploy container engine manually.
91 | # deploy_container_engine: true
92 |
93 | ## Set Pypi repo and cert accordingly
94 | # pyrepo_index: https://pypi.example.com/simple
95 | # pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
96 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{- /*
2 | name defines a template for the name of the chartmuseum chart.
3 |
4 | The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
5 | not exceed 63 characters.
6 |
7 | Parameters:
8 |
9 | - .Values.nameOverride: Replaces the computed name with this given name
10 | - .Values.namePrefix: Prefix
11 | - .Values.global.namePrefix: Global prefix
12 | - .Values.nameSuffix: Suffix
13 | - .Values.global.nameSuffix: Global suffix
14 |
15 | The applied order is: "global prefix + prefix + name + suffix + global suffix"
16 |
17 | Usage: 'name: "{{- template "chartmuseum.name" . -}}"'
18 | */ -}}
19 | {{- define "chartmuseum.name"}}
20 | {{- $global := default (dict) .Values.global -}}
21 | {{- $base := default .Chart.Name .Values.nameOverride -}}
22 | {{- $gpre := default "" $global.namePrefix -}}
23 | {{- $pre := default "" .Values.namePrefix -}}
24 | {{- $suf := default "" .Values.nameSuffix -}}
25 | {{- $gsuf := default "" $global.nameSuffix -}}
26 | {{- $name := print $gpre $pre $base $suf $gsuf -}}
27 | {{- $name | lower | trunc 54 | trimSuffix "-" -}}
28 | {{- end -}}
29 |
30 | {{- /*
31 | fullname defines a suitably unique name for a resource by combining
32 | the release name and the chartmuseum chart name.
33 |
34 | The prevailing wisdom is that names should only contain a-z, 0-9 plus dot (.) and dash (-), and should
35 | not exceed 63 characters.
36 |
37 | Parameters:
38 |
39 | - .Values.fullnameOverride: Replaces the computed name with this given name
40 | - .Values.fullnamePrefix: Prefix
41 | - .Values.global.fullnamePrefix: Global prefix
42 | - .Values.fullnameSuffix: Suffix
43 | - .Values.global.fullnameSuffix: Global suffix
44 |
45 | The applied order is: "global prefix + prefix + name + suffix + global suffix"
46 |
47 | Usage: 'name: "{{- template "chartmuseum.fullname" . -}}"'
48 | */ -}}
49 | {{- define "chartmuseum.fullname"}}
50 | {{- $global := default (dict) .Values.global -}}
51 | {{- $base := default (printf "%s-%s" .Release.Name .Chart.Name) .Values.fullnameOverride -}}
52 | {{- $gpre := default "" $global.fullnamePrefix -}}
53 | {{- $pre := default "" .Values.fullnamePrefix -}}
54 | {{- $suf := default "" .Values.fullnameSuffix -}}
55 | {{- $gsuf := default "" $global.fullnameSuffix -}}
56 | {{- $name := print $gpre $pre $base $suf $gsuf -}}
57 | {{- $name | lower | trunc 54 | trimSuffix "-" -}}
58 | {{- end -}}
59 |
60 |
61 | {{- /*
62 | chartmuseum.labels.standard prints the standard chartmuseum Helm labels.
63 |
64 | The standard labels are frequently used in metadata.
65 | */ -}}
66 | {{- define "chartmuseum.labels.standard" -}}
67 | app: {{ template "chartmuseum.name" . }}
68 | chart: {{ template "chartmuseum.chartref" . }}
69 | heritage: {{ .Release.Service | quote }}
70 | release: {{ .Release.Name | quote }}
71 | {{- end -}}
72 |
73 | {{- /*
74 | chartmuseum.chartref prints a chart name and version.
75 |
76 | It does minimal escaping for use in Kubernetes labels.
77 |
78 | Example output:
79 |
80 | chartmuseum-0.4.5
81 | */ -}}
82 | {{- define "chartmuseum.chartref" -}}
83 | {{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
84 | {{- end -}}
85 |
86 | {{/*
87 | Return the proper image name to change the volume permissions
88 | */}}
89 | {{- define "chartmuseum.volumePermissions.image" -}}
90 | {{- $registryName := .Values.volumePermissions.image.registry -}}
91 | {{- $repositoryName := .Values.volumePermissions.image.repository -}}
92 | {{- $tag := .Values.volumePermissions.image.tag | toString -}}
93 | {{/*
94 | Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
95 | but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
96 | Also, we can't use a single if because lazy evaluation is not an option
97 | */}}
98 | {{- if .Values.global }}
99 | {{- if .Values.global.imageRegistry }}
100 | {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
101 | {{- else -}}
102 | {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
103 | {{- end -}}
104 | {{- else -}}
105 | {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
106 | {{- end -}}
107 | {{- end -}}
108 |
109 | {{/*
110 | Return the proper Docker Image Registry Secret Names
111 | */}}
112 | {{- define "chartmuseum.imagePullSecrets" -}}
113 | {{/*
114 | Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
115 | but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
116 | Also, we can not use a single if because lazy evaluation is not an option
117 | */}}
118 | {{- if .Values.global }}
119 | {{- if .Values.global.imagePullSecrets }}
120 | imagePullSecrets:
121 | {{- range .Values.global.imagePullSecrets }}
122 | - name: {{ . }}
123 | {{- end }}
124 | {{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
125 | imagePullSecrets:
126 | {{- range .Values.image.pullSecrets }}
127 | - name: {{ . }}
128 | {{- end }}
129 | {{- range .Values.volumePermissions.image.pullSecrets }}
130 | - name: {{ . }}
131 | {{- end }}
132 | {{- end -}}
133 | {{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
134 | imagePullSecrets:
135 | {{- range .Values.image.pullSecrets }}
136 | - name: {{ . }}
137 | {{- end }}
138 | {{- range .Values.volumePermissions.image.pullSecrets }}
139 | - name: {{ . }}
140 | {{- end }}
141 | {{- end -}}
142 | {{- end -}}
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/values.yaml:
--------------------------------------------------------------------------------
1 | # Where defaults exist, the values are set to them here.
2 | # Values with no preferred or common defaults are set to empty strings.
3 | global:
4 | # global.image: the image that will be used for this release
5 | image:
6 | repository: weaveworks/scope
7 | tag: 1.12.0
8 | # global.image.pullPolicy: must be Always, IfNotPresent, or Never
9 | pullPolicy: "IfNotPresent"
10 | # global.service.*: the configuration of the service used to access the frontend
11 | service:
12 | # global.service.name: the short name desired for the frontend service
13 | # global.service.name may be specified if you need to use a specific service name, but will be generated if not specified
14 | # global.service.name is a global so we can access its value easily from the agent subchart
15 | # name: "weave-scope-app"
16 | # global.service.port: (required if frontend.enabled == true) the port exposed by the Scope frontend service
17 | # global.service.port is a global so we can access its value easily from the agent subchart
18 | port: 80
19 | # global.service.type: (required if frontend.enabled == true) the type of the frontend service -- must be ClusterIP, NodePort or LoadBalancer
20 | # global.service.type is a global to keep it with the other values for configuring the frontend service
21 | type: "ClusterIP"
22 |
23 | # weave-scope-frontend.* controls how the Scope frontend is installed
24 | weave-scope-frontend:
25 | enabled: true
26 | # weave-scope-frontend.resources.*: controls requests/limits for the frontend
27 | # weave-scope-frontend.resources.* values are all optional but should not be set to empty values
28 | # resources:
29 | # requests:
30 | # weave-scope-frontend.resources.requests.cpu: CPU req. in MHz (m)
31 | # cpu: ""
32 | # weave-scope-frontend.resources.requests.memory: memory req. in MiB (Mi)
33 | # memory: ""
34 | # limits:
35 | # weave-scope-frontend.resources.limits.cpu: CPU limit in MHz (m)
36 | # cpu: ""
37 | # weave-scope-frontend.resources.limits.memory: memory limit in MiB (Mi)
38 | # memory: ""
39 | flags: []
40 | # weave-scope-frontend Ingress
41 | ingress:
42 | # If true, weave-scope-frontend ingress will be created
43 | enabled: false
44 | annotations: {}
45 | # kubernetes.io/ingress.class: nginx
46 | # kubernetes.io/tls-acme: "true"
47 | # weave-scope-frontend path(s) must be provided if Ingress is enabled
48 | paths: []
49 | # weave-scope-frontend hostname(s) must be provided if Ingress is enabled
50 | hosts:
51 | - weave-scope.example.test
52 | # Ingress TLS secret
53 | # Must be created manually in the namespace
54 | tls: []
55 | # - secretName: weave-scope-example-tls
56 | # hosts:
57 | # - weave-scope.example.test
58 |
59 | # weave-scope-agent.* controls how the Weave Scope node agent pods are installed
60 | weave-scope-agent:
61 | enabled: true
62 | flags: []
63 | # priorityClassName:
64 | # weave-scope-agent.dockerBridge: (required if agent.enabled == true) the name of the Docker bridge interface
65 | dockerBridge: "docker0"
66 | # weave-scope-agent.scopeFrontendAddr: the host:port of a Scope frontend to send data to
67 | # weave-scope-agent.scopeFrontendAddr is only needed for some cases where the frontend is deployed separately from the agent
68 | scopeFrontendAddr: ""
69 | # weave-scope-agent.probeToken: the token used to connect to Weave Cloud
70 | # weave-scope-agent.probeToken is not needed for connecting to non-cloud Scope frontends
71 | probeToken: ""
72 | # weave-scope-agent.rbac.*: controls RBAC resource creation/use
73 | # Enabling readOnly adds --probe.no-controls to args list.
74 | readOnly: false
75 | # weave-scope-agent.resources.*: controls requests/limits for the agent
76 | # weave-scope-agent.resources.* values are all optional but should not be set to empty values
77 | # resources:
78 | # requests:
79 | # weave-scope-agent.resources.requests.cpu: CPU req. in MHz (m)
80 | # cpu: ""
81 | # weave-scope-agent.resources.requests.memory: memory req. in MiB (Mi)
82 | # memory: ""
83 | # limits:
84 | # weave-scope-agent.resources.limits.cpu: CPU limit in MHz (m)
85 | # cpu: ""
86 | # weave-scope-agent.resources.limits.memory: memory limit in MiB (Mi)
87 | # memory: ""
88 |
89 | # weave-scope-agent.* controls how the Weave Scope node agent pods are installed
90 | weave-scope-cluster-agent:
91 | enabled: true
92 | flags: []
93 | # weave-scope-cluster-agent.scopeFrontendAddr: the host:port of a Scope frontend to send data to
94 | # weave-scope-cluster-agent.scopeFrontendAddr is only needed for some cases where the frontend is deployed separately from the agent
95 | scopeFrontendAddr: ""
96 | # weave-scope-cluster-agent.probeToken: the token used to connect to Weave Cloud
97 | # weave-scope-cluster-agent.probeToken is not needed for connecting to non-cloud Scope frontends
98 | probeToken: ""
99 | # weave-scope-cluster-agent.rbac.*: controls RBAC resource creation/use
100 | rbac:
101 | # weave-scope-cluster-agent.rbac.create: whether RBAC resources should be created
102 | # weave-scope-cluster-agent.rbac.create *must* be set to false if RBAC is not enabled in the cluster
103 | # weave-scope-cluster-agent.rbac.create *may* be set to false in an RBAC-enabled cluster to allow for external management of RBAC
104 | create: true
105 | # Enabling readOnly adds --probe.no-controls to args list.
106 | readOnly: false
107 | serviceAccount:
108 | # Specifies whether a ServiceAccount should be created
109 | create: true
110 | # The name of the ServiceAccount to use.
111 | # If not set and create is true, a name is generated using the fullname template
112 | # name: "weave-scope"
113 | # weave-scope-cluster-agent.resources.*: controls requests/limits for the agent
114 | # weave-scope-cluster-agent.resources.* values are all optional but should not be set to empty values
115 | # resources:
116 | # requests:
117 | # weave-scope-cluster-agent.resources.requests.cpu: CPU req. in MHz (m)
118 | # cpu: ""
119 | # weave-scope-cluster-agent.resources.requests.memory: memory req. in MiB (Mi)
120 | # memory: ""
121 | # limits:
122 | # weave-scope-cluster-agent.resources.limits.cpu: CPU limit in MHz (m)
123 | # cpu: ""
124 | # weave-scope-cluster-agent.resources.limits.memory: memory limit in MiB (Mi)
125 | # memory: ""
126 |
--------------------------------------------------------------------------------
/logging-and-monitoring/weave-scope/README.md:
--------------------------------------------------------------------------------
1 | # ⚠️ Repo Archive Notice
2 |
3 | As of Nov 13, 2020, charts in this repo will no longer be updated.
4 | For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/).
5 |
6 | # Weave Scope
7 |
8 | ## DEPRECATION NOTICE
9 |
10 | This chart is deprecated and no longer supported.
11 |
12 | ## About this chart
13 |
14 | This chart contains two subcharts (*weave-scope-frontend* and *weave-scope-agent*) which deploy the corresponding components of Weave Scope, an interactive container monitoring and visualization application.
15 |
16 | Either subchart can be deployed on its own (set the "enabled" value to "false" for the chart you want to suppress) or the two can be deployed together (the default).
17 |
18 | ## Compatibility notes
19 |
20 | * This chart is designed and tested with Weave Scope 1.6.2 and 1.6.5 and Kubernetes 1.7.
21 | * Weave Scope 1.6.2 was originally released by WeaveWorks for Kubernetes 1.6 but seems to work fine on 1.7.
22 | * On Kubernetes 1.6 Weave Scope versions as old as 1.3.0 will probably work.
23 |
24 | ## Prerequisites
25 |
26 | * The service account, cluster role, cluster role binding and service specified in the rendered version of this chart must not already exist.
27 |
28 | ## Installing the Chart
29 |
30 | To install the chart with the release name `my-release`:
31 |
32 | ```bash
33 | $ helm install --name my-release stable/weave-scope
34 | ```
35 |
36 | The command deploys Weave Scope on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
37 |
38 | > **Tip**: List all releases using `helm list`
39 |
40 | ## Uninstalling the Chart
41 |
42 | To uninstall/delete the `my-release` deployment:
43 |
44 | ```bash
45 | $ helm delete my-release
46 | ```
47 |
48 | The command removes all the Kubernetes components associated with the chart and deletes the release.
49 |
50 |
51 | ## Configuration
52 |
53 | Note that most of this documentation is repeated in `values.yaml`; if you're in a hurry you can skip this part here and read it there. Values with no default noted have no default.
54 |
55 | ### Global values
56 |
57 | | Parameter | Description | Default |
58 | |----------:|:------------|:--------|
59 | | **image.*** | the parameters of the image pulls for this release | |
60 | | **image.repository** | the image that will be used for this release (required) | `weaveworks/scope` |
61 | | **image.tag** | the version of Weave Scope desired for this release (required) | `1.11.3`
62 | | **image.pullPolicy** | the imagePullPolicy for the container (required): IfNotPresent, Always, or Never | `IfNotPresent`
63 | | **service.*** | the configuration of the service used to access the frontend | |
64 | | **service.name** | the short name desired for the frontend service (optional, but if not specified by the user a value will be calculated) -- this is a global so we can access its value easily from the agent subchart | `weave-scope-app` |
65 | | **service.port** | the port exposed by the Scope frontend service (required if weave-scope-frontend is enabled) -- this is a global so we can access its value easily from the agent subchart | `80` |
66 | | **service.type** | the type of the frontend service (required if weave-scope-frontend is enabled): ClusterIP, NodePort or LoadBalancer -- this is a global to keep it with the other values for configuring the frontend service | `ClusterIP` |
67 |
68 |
69 | ### Weave Scope frontend values
70 |
71 | The **weave-scope-frontend** section controls how the Scope frontend is installed.
72 |
73 | | Parameter | Description | Default |
74 | |----------:|:------------|:--------|
75 | | **enabled** | controls whether the frontend is deployed | `true` |
76 | | **flags** | adds extra flag options for container | [] |
77 | | **resources.*** | controls requests/limits for the frontend (these values are all optional) | |
78 | | **resources.requests.cpu** | CPU request in MHz (m) | |
79 | | **resources.requests.memory** | memory request in MiB (Mi) | |
80 | | **resources.limits.cpu** | CPU limit in MHz (m) | |
81 | | **resources.limits.memory** | memory limit in MiB (Mi) | |
82 | | **ingress.enabled** | Enables Ingress for weave-scope-frontend | false |
83 | | **ingress.annotations** | Ingress annotations | {} |
84 | | **ingress.paths** | Ingress paths | [] |
85 | | **ingress.hosts** | Ingress accepted hostnames | nil |
86 | | **ingress.tls** | Ingress TLS configuration | [] |
87 |
88 | ### Weave Scope agent
89 |
90 | The **agent** section controls how the Weave Scope node agent pods are installed.
91 |
92 | | Parameter | Description | Default |
93 | |----------:|:------------|:--------|
94 | | **enabled** | controls whether the agent is deployed | `true` |
95 | | **flags** | adds extra flag options for container | [] |
96 | | **dockerBridge** | the name of the Docker bridge interface | `docker0` |
97 | | **scopeFrontendAddr** | the host:port of a Scope frontend to send data to -- this is only needed in cases where the frontend is deployed separately from the agent (e.g. an install outside the cluster or a pre-existing install inside it) | |
98 | | **probeToken** | the token used to connect to Weave Cloud -- this is not needed for connecting to non-cloud Scope frontends | |
99 | | **priorityClassName** | The priorityClassName used for the Daemonset | |
100 | | **readOnly** | disables all controls (e.g. start/stop, terminal, logs, etc.) | `false` |
101 | | **resources.*** | controls requests/limits for the agent (these values are all optional) | |
102 | | **resources.requests.cpu** | CPU request in MHz (m) | |
103 | | **resources.requests.memory** | memory request in MiB (Mi)| |
104 | | **resources.limits.cpu** | CPU limit in MHz (m) | |
105 | | **resources.limits.memory** | memory limit in MiB (Mi) | |
106 |
107 | ### Weave Scope cluster agent
108 |
109 | The **agent** section controls how the Weave Scope node agent pods are installed.
110 |
111 | | Parameter | Description | Default |
112 | |----------:|:------------|:--------|
113 | | **enabled** | controls whether the agent is deployed | `true` |
114 | | **flags** | adds extra flag options for container | [] |
115 | | **scopeFrontendAddr** | the host:port of a Scope frontend to send data to -- this is only needed in cases where the frontend is deployed separately from the agent (e.g. an install outside the cluster or a pre-existing install inside it) | |
116 | | **probeToken** | the token used to connect to Weave Cloud -- this is not needed for connecting to non-cloud Scope frontends | |
117 | | **rbac.*** | controls RBAC resource creation/use | |
118 | | **rbac.create** | whether RBAC resources should be created (required) -- this **must** be set to false if RBAC is not enabled in the cluster; it *may* be set to false in an RBAC-enabled cluster to allow for external management of RBAC | `true` |
119 | | **readOnly** | disables all controls (e.g. start/stop, terminal, logs, etc.) | `false` |
120 | | **serviceAccount.create** | whether a new service account name that the agent will use should be created. | `true` |
121 | | **serviceAccount.name** | service account to be used. If not set and serviceAccount.create is `true` a name is generated using the fullname template. | |
122 | | **resources.*** | controls requests/limits for the agent (these values are all optional) | |
123 | | **resources.requests.cpu** | CPU request in MHz (m) | |
124 | | **resources.requests.memory** | memory request in MiB (Mi)| |
125 | | **resources.limits.cpu** | CPU limit in MHz (m) | |
126 | | **resources.limits.memory** | memory limit in MiB (Mi) | |
127 |
128 | ## Other notes
129 |
130 | * The Deployment for the frontend specifies a single replica; multiple replicas of the frontend, although they may run, probably will not work as expected since different agents may end up talking to different replicas.
131 |
--------------------------------------------------------------------------------
/helm/chartmuseum/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "chartmuseum.fullname" . }}
5 | annotations:
6 | {{ toYaml .Values.deployment.annotations | indent 4 }}
7 | labels:
8 | {{ include "chartmuseum.labels.standard" . | indent 4 }}
9 | {{- if .Values.deployment.labels }}
10 | {{ toYaml .Values.deployment.labels | indent 4 }}
11 | {{- end }}
12 | spec:
13 | selector:
14 | matchLabels:
15 | app: {{ template "chartmuseum.name" . }}
16 | release: {{ .Release.Name | quote }}
17 | {{- if .Values.deployment.labels }}
18 | {{ toYaml .Values.deployment.labels | indent 6 }}
19 | {{- end }}
20 | replicas: {{ .Values.replicaCount }}
21 | strategy:
22 | {{ toYaml .Values.strategy | indent 4 }}
23 | revisionHistoryLimit: 10
24 | {{- if .Values.deployment.matchlabes }}
25 | selector:
26 | matchLabels:
27 | {{ toYaml .Values.deployment.matchlabels | indent 6 }}
28 | {{- end }}
29 | template:
30 | metadata:
31 | name: {{ include "chartmuseum.fullname" . }}
32 | annotations:
33 | {{ toYaml .Values.replica.annotations | indent 8 }}
34 | labels:
35 | app: {{ template "chartmuseum.name" . }}
36 | release: {{ .Release.Name | quote }}
37 | {{- if .Values.deployment.labels }}
38 | {{ toYaml .Values.deployment.labels | indent 8 }}
39 | {{- end }}
40 | spec:
41 | {{- if .Values.priorityClassName }}
42 | priorityClassName: "{{ .Values.priorityClassName }}"
43 | {{- end }}
44 | {{- if .Values.securityContext.enabled }}
45 | securityContext:
46 | fsGroup: {{ .Values.securityContext.fsGroup }}
47 | {{- if .Values.securityContext.runAsNonRoot }}
48 | runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }}
49 | {{- end }}
50 | {{- if .Values.securityContext.supplementalGroups }}
51 | supplementalGroups: {{ .Values.securityContext.supplementalGroups }}
52 | {{- end }}
53 | {{- else if .Values.persistence.enabled }}
54 | initContainers:
55 | - name: volume-permissions
56 | image: {{ template "chartmuseum.volumePermissions.image" . }}
57 | imagePullPolicy: "{{ .Values.volumePermissions.image.pullPolicy }}"
58 | securityContext:
59 | {{- toYaml .Values.containerSecurityContext | nindent 10 }}
60 | command: ['sh', '-c', 'chown -R {{ .Values.securityContext.fsGroup }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.path }}']
61 | volumeMounts:
62 | - mountPath: {{ .Values.persistence.path }}
63 | name: storage-volume
64 | {{- end }}
65 | {{- include "chartmuseum.imagePullSecrets" . | indent 6 }}
66 | containers:
67 | - name: {{ .Chart.Name }}
68 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
69 | imagePullPolicy: {{ .Values.image.pullPolicy }}
70 | securityContext:
71 | {{- toYaml .Values.containerSecurityContext | nindent 10 }}
72 | env:
73 | {{- range $name, $value := .Values.env.open }}
74 | {{- if not (empty $value) }}
75 | - name: {{ $name | quote }}
76 | value: {{ $value | quote }}
77 | {{- end }}
78 | {{- end }}
79 | {{- range $name, $value := .Values.env.field }}
80 | {{- if not ( empty $value) }}
81 | - name: {{ $name | quote }}
82 | valueFrom:
83 | fieldRef:
84 | fieldPath: {{ $value | quote }}
85 | {{- end }}
86 | {{- end }}
87 | {{- if .Values.gcp.secret.enabled }}
88 | - name: GOOGLE_APPLICATION_CREDENTIALS
89 | value: "/etc/secrets/google/credentials.json"
90 | {{- end }}
91 | {{- if .Values.env.existingSecret }}
92 | {{- $secret_name := .Values.env.existingSecret }}
93 | {{- range $name, $key := .Values.env.existingSecretMappings }}
94 | {{- if not ( empty $key) }}
95 | - name: {{ $name | quote }}
96 | valueFrom:
97 | secretKeyRef:
98 | name: {{ $secret_name | quote }}
99 | key: {{ $key | quote }}
100 | {{- end }}
101 | {{- end }}
102 | {{- else }}
103 | {{- $secret_name := include "chartmuseum.fullname" . }}
104 | {{- range $name, $value := .Values.env.secret }}
105 | {{- if not ( empty $value) }}
106 | - name: {{ $name | quote }}
107 | valueFrom:
108 | secretKeyRef:
109 | name: {{ $secret_name }}
110 | key: {{ $name | quote }}
111 | {{- end }}
112 | {{- end }}
113 | {{- end }}
114 | {{- if .Values.bearerAuth.secret.enabled }}
115 | - name: AUTH_CERT_PATH
116 | value: /var/keys/public-key.pem
117 | {{ end }}
118 | args:
119 | - --port=8080
120 | {{- if eq .Values.env.open.STORAGE "local" }}
121 | - --storage-local-rootdir={{ .Values.persistence.path }}
122 | {{- end }}
123 | {{- if .Values.extraArgs }}
124 | {{ toYaml .Values.extraArgs | indent 8 }}
125 | {{- end }}
126 | ports:
127 | - name: http
128 | containerPort: 8080
129 | livenessProbe:
130 | httpGet:
131 | path: {{ .Values.env.open.CONTEXT_PATH }}/health
132 | port: http
133 | {{ toYaml .Values.probes.liveness | indent 10 }}
134 | readinessProbe:
135 | httpGet:
136 | path: {{ .Values.env.open.CONTEXT_PATH }}/health
137 | port: http
138 | {{ toYaml .Values.probes.readiness | indent 10 }}
139 | volumeMounts:
140 | {{- if eq .Values.env.open.STORAGE "local" }}
141 | - mountPath: {{ .Values.persistence.path }}
142 | name: storage-volume
143 | {{- end }}
144 | {{- if .Values.gcp.secret.enabled }}
145 | - mountPath: /etc/secrets/google
146 | name: {{ include "chartmuseum.fullname" . }}-gcp
147 | {{- end }}
148 | {{- if .Values.oracle.secret.enabled }}
149 | - mountPath: /home/chartmuseum/.oci
150 | name: {{ include "chartmuseum.fullname" . }}-oracle
151 | {{- end }}
152 | {{- if .Values.bearerAuth.secret.enabled }}
153 | - name: public-key
154 | mountPath: /var/keys
155 | readOnly: true
156 | {{- end }}
157 | {{- with .Values.resources }}
158 | resources:
159 | {{ toYaml . | indent 10 }}
160 | {{- end }}
161 | {{- with .Values.nodeSelector }}
162 | nodeSelector:
163 | {{ toYaml . | indent 8 }}
164 | {{- end }}
165 | {{- with .Values.affinity }}
166 | affinity:
167 | {{ toYaml . | indent 8 }}
168 | {{- end }}
169 | {{- with .Values.tolerations }}
170 | tolerations:
171 | {{ toYaml . | indent 8 }}
172 | {{- end }}
173 | {{- if .Values.deployment.schedulerName }}
174 | schedulerName: {{ .Values.deployment.schedulerName }}
175 | {{- end -}}
176 | {{- if and .Values.serviceAccount.create .Values.serviceAccount.name }}
177 | serviceAccountName: {{ .Values.serviceAccount.name }}
178 | {{- else if .Values.serviceAccount.create }}
179 | serviceAccountName: {{ include "chartmuseum.fullname" . }}
180 | {{- else if .Values.serviceAccount.name }}
181 | serviceAccountName: {{ .Values.serviceAccount.name }}
182 | {{- end }}
183 | volumes:
184 | - name: storage-volume
185 | {{- if .Values.persistence.enabled }}
186 | persistentVolumeClaim:
187 | claimName: {{ .Values.persistence.existingClaim | default (include "chartmuseum.fullname" .) }}
188 | {{- else }}
189 | emptyDir: {}
190 | {{- end -}}
191 | {{ if .Values.gcp.secret.enabled }}
192 | - name: {{ include "chartmuseum.fullname" . }}-gcp
193 | secret:
194 | {{ if .Values.env.secret.GOOGLE_CREDENTIALS_JSON }}
195 | secretName: {{ include "chartmuseum.fullname" . }}
196 | items:
197 | - key: GOOGLE_CREDENTIALS_JSON
198 | path: credentials.json
199 | {{ else }}
200 | secretName: {{ .Values.gcp.secret.name }}
201 | items:
202 | - key: {{ .Values.gcp.secret.key }}
203 | path: credentials.json
204 | {{ end }}
205 | {{ end }}
206 | {{ if .Values.oracle.secret.enabled }}
207 | - name: {{ include "chartmuseum.fullname" . }}-oracle
208 | secret:
209 | secretName: {{ .Values.oracle.secret.name }}
210 | items:
211 | - key: {{ .Values.oracle.secret.config }}
212 | path: config
213 | - key: {{ .Values.oracle.secret.key_file }}
214 | path: oci.key
215 | {{ end }}
216 | {{- if .Values.bearerAuth.secret.enabled }}
217 | - name: public-key
218 | secret:
219 | secretName: {{ .Values.bearerAuth.secret.publicKeySecret }}
220 | {{- end }}
221 |
--------------------------------------------------------------------------------
/helm/chartmuseum/values.yaml:
--------------------------------------------------------------------------------
1 | extraArgs:
2 | # - --storage-timestamp-tolerance 1s
3 | replicaCount: 1
4 | strategy:
5 | type: RollingUpdate
6 | rollingUpdate:
7 | maxUnavailable: 0
8 | image:
9 | repository: chartmuseum/chartmuseum
10 | tag: v0.12.0
11 | pullPolicy: IfNotPresent
12 | secret:
13 | labels: {}
14 | env:
15 | open:
16 | # storage backend, can be one of: local, alibaba, amazon, google, microsoft, oracle
17 | STORAGE: local
18 | # oss bucket to store charts for alibaba storage backend
19 | STORAGE_ALIBABA_BUCKET:
20 | # prefix to store charts for alibaba storage backend
21 | STORAGE_ALIBABA_PREFIX:
22 | # oss endpoint to store charts for alibaba storage backend
23 | STORAGE_ALIBABA_ENDPOINT:
24 | # server side encryption algorithm for alibaba storage backend, can be one
25 | # of: AES256 or KMS
26 | STORAGE_ALIBABA_SSE:
27 | # s3 bucket to store charts for amazon storage backend
28 | STORAGE_AMAZON_BUCKET:
29 | # prefix to store charts for amazon storage backend
30 | STORAGE_AMAZON_PREFIX:
31 | # region of s3 bucket to store charts
32 | STORAGE_AMAZON_REGION:
33 | # alternative s3 endpoint
34 | STORAGE_AMAZON_ENDPOINT:
35 | # server side encryption algorithm
36 | STORAGE_AMAZON_SSE:
37 | # gcs bucket to store charts for google storage backend
38 | STORAGE_GOOGLE_BUCKET:
39 | # prefix to store charts for google storage backend
40 | STORAGE_GOOGLE_PREFIX:
41 | # container to store charts for microsoft storage backend
42 | STORAGE_MICROSOFT_CONTAINER:
43 | # prefix to store charts for microsoft storage backend
44 | STORAGE_MICROSOFT_PREFIX:
45 | # container to store charts for openstack storage backend
46 | STORAGE_OPENSTACK_CONTAINER:
47 | # prefix to store charts for openstack storage backend
48 | STORAGE_OPENSTACK_PREFIX:
49 | # region of openstack container
50 | STORAGE_OPENSTACK_REGION:
51 | # path to a CA cert bundle for your openstack endpoint
52 | STORAGE_OPENSTACK_CACERT:
53 | # compartment id for for oracle storage backend
54 | STORAGE_ORACLE_COMPARTMENTID:
55 | # oci bucket to store charts for oracle storage backend
56 | STORAGE_ORACLE_BUCKET:
57 | # prefix to store charts for oracle storage backend
58 | STORAGE_ORACLE_PREFIX:
59 | # form field which will be queried for the chart file content
60 | CHART_POST_FORM_FIELD_NAME: chart
61 | # form field which will be queried for the provenance file content
62 | PROV_POST_FORM_FIELD_NAME: prov
63 | # levels of nested repos for multitenancy. The default depth is 0 (singletenant server)
64 | DEPTH: 0
65 | # show debug messages
66 | DEBUG: false
67 | # output structured logs as json
68 | LOG_JSON: true
69 | # disable use of index-cache.yaml
70 | DISABLE_STATEFILES: false
71 | # disable Prometheus metrics
72 | DISABLE_METRICS: true
73 | # disable all routes prefixed with /api
74 | DISABLE_API: true
75 | # allow chart versions to be re-uploaded
76 | ALLOW_OVERWRITE: false
77 | # absolute url for .tgzs in index.yaml
78 | CHART_URL:
79 | # allow anonymous GET operations when auth is used
80 | AUTH_ANONYMOUS_GET: false
81 | # sets the base context path
82 | CONTEXT_PATH:
83 | # parallel scan limit for the repo indexer
84 | INDEX_LIMIT: 0
85 | # cache store, can be one of: redis (leave blank for inmemory cache)
86 | CACHE:
87 | # address of Redis service (host:port)
88 | CACHE_REDIS_ADDR:
89 | # Redis database to be selected after connect
90 | CACHE_REDIS_DB: 0
91 | # enable bearer auth
92 | BEARER_AUTH: false
93 | # auth realm used for bearer auth
94 | AUTH_REALM:
95 | # auth service used for bearer auth
96 | AUTH_SERVICE:
97 | field:
98 | # POD_IP: status.podIP
99 | secret:
100 | # username for basic http authentication
101 | BASIC_AUTH_USER:
102 | # password for basic http authentication
103 | BASIC_AUTH_PASS:
104 | # GCP service account json file
105 | GOOGLE_CREDENTIALS_JSON:
106 | # Redis requirepass server configuration
107 | CACHE_REDIS_PASSWORD:
108 | # Name of an existing secret to get the secret values ftom
109 | existingSecret:
110 | # Stores Enviromnt Variable to secret key name mappings
111 | existingSecretMappings:
112 | # username for basic http authentication
113 | BASIC_AUTH_USER:
114 | # password for basic http authentication
115 | BASIC_AUTH_PASS:
116 | # GCP service account json file
117 | GOOGLE_CREDENTIALS_JSON:
118 | # Redis requirepass server configuration
119 | CACHE_REDIS_PASSWORD:
120 |
121 | deployment:
122 | # Define scheduler name. Use of 'default' if empty
123 | schedulerName: ""
124 | ## Chartmuseum Deployment annotations
125 | annotations: {}
126 | # name: value
127 | labels: {}
128 | # name: value
129 | matchlabels: {}
130 | # name: value
131 | replica:
132 | ## Chartmuseum Replicas annotations
133 | annotations: {}
134 | ## Read more about kube2iam to provide access to s3 https://github.com/jtblin/kube2iam
135 | # iam.amazonaws.com/role: role-arn
136 | service:
137 | servicename:
138 | type: ClusterIP
139 | externalTrafficPolicy: Local
140 | ## Limits which cidr blocks can connect to service's load balancer
141 | ## Only valid if service.type: LoadBalancer
142 | loadBalancerSourceRanges: []
143 | # clusterIP: None
144 | externalPort: 8080
145 | nodePort:
146 | annotations: {}
147 | labels: {}
148 |
149 | serviceMonitor:
150 | enabled: false
151 | # namespace: prometheus
152 | labels: {}
153 | metricsPath: "/metrics"
154 | # timeout: 60
155 | # interval: 60
156 |
157 | resources: {}
158 | # limits:
159 | # cpu: 100m
160 | # memory: 128Mi
161 | # requests:
162 | # cpu: 80m
163 | # memory: 64Mi
164 |
165 | probes:
166 | liveness:
167 | initialDelaySeconds: 5
168 | periodSeconds: 10
169 | timeoutSeconds: 1
170 | successThreshold: 1
171 | failureThreshold: 3
172 | readiness:
173 | initialDelaySeconds: 5
174 | periodSeconds: 10
175 | timeoutSeconds: 1
176 | successThreshold: 1
177 | failureThreshold: 3
178 |
179 | serviceAccount:
180 | create: false
181 | # name:
182 | ## Annotations for the Service Account
183 | annotations: {}
184 |
185 | # UID/GID 1000 is the default user "chartmuseum" used in
186 | # the container image starting in v0.8.0 and above. This
187 | # is required for local persistent storage. If your cluster
188 | # does not allow this, try setting securityContext: {}
189 | securityContext:
190 | enabled: true
191 | fsGroup: 1000
192 | ## Optionally, specify supplementalGroups and/or
193 | ## runAsNonRoot for security purposes
194 | # runAsNonRoot: true
195 | # supplementalGroups: [1000]
196 |
197 | containerSecurityContext: {}
198 |
199 | priorityClassName: ""
200 |
201 | nodeSelector: {}
202 |
203 | tolerations: []
204 |
205 | affinity: {}
206 |
207 | persistence:
208 | enabled: false
209 | accessMode: ReadWriteOnce
210 | size: 8Gi
211 | labels: {}
212 | path: /storage
213 | # name: value
214 | ## A manually managed Persistent Volume and Claim
215 | ## Requires persistence.enabled: true
216 | ## If defined, PVC must be created manually before volume will be bound
217 | # existingClaim:
218 |
219 | ## Chartmuseum data Persistent Volume Storage Class
220 | ## If defined, storageClassName:
221 | ## If set to "-", storageClassName: "", which disables dynamic provisioning
222 | ## If undefined (the default) or set to null, no storageClassName spec is
223 | ## set, choosing the default provisioner. (gp2 on AWS, standard on
224 | ## GKE, AWS & OpenStack)
225 | ##
226 | # storageClass: "-"
227 | # volumeName:
228 | pv:
229 | enabled: false
230 | pvname:
231 | capacity:
232 | storage: 8Gi
233 | accessMode: ReadWriteOnce
234 | nfs:
235 | server:
236 | path:
237 |
238 | ## Init containers parameters:
239 | ## volumePermissions: Change the owner of the persistent volume mountpoint to RunAsUser:fsGroup
240 | ##
241 | volumePermissions:
242 | image:
243 | registry: docker.io
244 | repository: bitnami/minideb
245 | tag: buster
246 | pullPolicy: Always
247 | ## Optionally specify an array of imagePullSecrets.
248 | ## Secrets must be manually created in the namespace.
249 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
250 | ##
251 | # pullSecrets:
252 | # - myRegistryKeySecretName
253 |
254 | ## Ingress for load balancer
255 | ingress:
256 | enabled: false
257 | ## Chartmuseum Ingress labels
258 | ##
259 | # labels:
260 | # dns: "route53"
261 |
262 | ## Chartmuseum Ingress annotations
263 | ##
264 | # annotations:
265 | # kubernetes.io/ingress.class: nginx
266 | # kubernetes.io/tls-acme: "true"
267 |
268 | ## Chartmuseum Ingress hostnames
269 | ## Must be provided if Ingress is enabled
270 | ##
271 | # hosts:
272 | # - name: chartmuseum.domain1.com
273 | # path: /
274 | # tls: false
275 | # - name: chartmuseum.domain2.com
276 | # path: /
277 | #
278 | # ## Set this to true in order to enable TLS on the ingress record
279 | # tls: true
280 | #
281 | # ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS
282 | # ## Secrets must be added manually to the namespace
283 | # tlsSecret: chartmuseum.domain2-tls
284 |
285 | # Adding secrets to tiller is not a great option, so If you want to use an existing
286 | # secret that contains the json file, you can use the following entries
287 | gcp:
288 | secret:
289 | enabled: false
290 | # Name of the secret that contains the encoded json
291 | name:
292 | # Secret key that holds the json value.
293 | key: credentials.json
294 | oracle:
295 | secret:
296 | enabled: false
297 | # Name of the secret that contains the encoded config and key
298 | name:
299 | # Secret key that holds the oci config
300 | config: config
301 | # Secret key that holds the oci private key
302 | key_file: key_file
303 | bearerAuth:
304 | secret:
305 | enabled: false
306 | publicKeySecret: chartmuseum-public-key
307 |
--------------------------------------------------------------------------------
/on-premise/kubespray/test_env/group_vars/k8s-cluster/k8s-cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kubernetes configuration dirs and system namespace.
3 | # Those are where all the additional config stuff goes
4 | # the kubernetes normally puts in /srv/kubernetes.
5 | # This puts them in a sane location and namespace.
6 | # Editing those values will almost surely break something.
7 | kube_config_dir: /etc/kubernetes
8 | kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
9 | kube_manifest_dir: "{{ kube_config_dir }}/manifests"
10 |
11 | # This is where all the cert scripts and certs will be located
12 | kube_cert_dir: "{{ kube_config_dir }}/ssl"
13 |
14 | # This is where all of the bearer tokens will be stored
15 | kube_token_dir: "{{ kube_config_dir }}/tokens"
16 |
17 | # This is where to save basic auth file
18 | kube_users_dir: "{{ kube_config_dir }}/users"
19 |
20 | kube_api_anonymous_auth: true
21 |
22 | ## Change this to use another Kubernetes version, e.g. a current beta release
23 | kube_version: v1.16.6
24 |
25 | # kubernetes image repo define
26 | kube_image_repo: "{{ gcr_image_repo }}/google-containers"
27 |
28 | # Where the binaries will be downloaded.
29 | # Note: ensure that you've enough disk space (about 1G)
30 | local_release_dir: "/tmp/releases"
31 | # Random shifts for retrying failed ops like pushing/downloading
32 | retry_stagger: 5
33 |
34 | # This is the group that the cert creation scripts chgrp the
35 | # cert files to. Not really changeable...
36 | kube_cert_group: kube-cert
37 |
38 | # Cluster Loglevel configuration
39 | kube_log_level: 2
40 |
41 | # Directory where credentials will be stored
42 | credentials_dir: "{{ inventory_dir }}/credentials"
43 |
44 | # Users to create for basic auth in Kubernetes API via HTTP
45 | # Optionally add groups for user
46 | kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
47 | kube_users:
48 | kube:
49 | pass: "{{kube_api_pwd}}"
50 | role: admin
51 | groups:
52 | - system:masters
53 |
54 | ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
55 | # kube_oidc_auth: false
56 | # kube_basic_auth: false
57 | # kube_token_auth: false
58 |
59 |
60 | ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
61 | ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
62 |
63 | # kube_oidc_url: https:// ...
64 | # kube_oidc_client_id: kubernetes
65 | ## Optional settings for OIDC
66 | # kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
67 | # kube_oidc_username_claim: sub
68 | # kube_oidc_username_prefix: oidc:
69 | # kube_oidc_groups_claim: groups
70 | # kube_oidc_groups_prefix: oidc:
71 |
72 |
73 | # Choose network plugin (cilium, calico, contiv, weave or flannel. Use cni for generic cni plugin)
74 | # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
75 | kube_network_plugin: calico
76 |
77 | # Setting multi_networking to true will install Multus: https://github.com/intel/multus-cni
78 | kube_network_plugin_multus: false
79 |
80 | # Kubernetes internal network for services, unused block of space.
81 | kube_service_addresses: 10.233.0.0/18
82 |
83 | # internal network. When used, it will assign IP
84 | # addresses from this range to individual pods.
85 | # This network must be unused in your network infrastructure!
86 | kube_pods_subnet: 10.233.64.0/18
87 |
88 | # internal network node size allocation (optional). This is the size allocated
89 | # to each node on your network. With these defaults you should have
90 | # room for 4096 nodes with 254 pods per node.
91 | kube_network_node_prefix: 24
92 |
93 | # The port the API Server will be listening on.
94 | kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
95 | kube_apiserver_port: 6443 # (https)
96 | # kube_apiserver_insecure_port: 8080 # (http)
97 | # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
98 | kube_apiserver_insecure_port: 0 # (disabled)
99 |
100 | # Kube-proxy proxyMode configuration.
101 | # Can be ipvs, iptables
102 | kube_proxy_mode: ipvs
103 |
104 | # configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface
105 | # must be set to true for MetalLB to work
106 | kube_proxy_strict_arp: false
107 |
108 | # A string slice of values which specify the addresses to use for NodePorts.
109 | # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
110 | # The default empty string slice ([]) means to use all local addresses.
111 | # kube_proxy_nodeport_addresses_cidr is retained for legacy config
112 | kube_proxy_nodeport_addresses: >-
113 | {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
114 | [{{ kube_proxy_nodeport_addresses_cidr }}]
115 | {%- else -%}
116 | []
117 | {%- endif -%}
118 |
119 | # If non-empty, will use this string as identification instead of the actual hostname
120 | # kube_override_hostname: >-
121 | # {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
122 | # {%- else -%}
123 | # {{ inventory_hostname }}
124 | # {%- endif -%}
125 |
126 | ## Encrypting Secret Data at Rest (experimental)
127 | kube_encrypt_secret_data: false
128 |
129 | # DNS configuration.
130 | # Kubernetes cluster name, also will be used as DNS domain
131 | cluster_name: cluster.local
132 | # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
133 | ndots: 2
134 | # Can be coredns, coredns_dual, manual or none
135 | dns_mode: coredns
136 | # Set manual server if using a custom cluster DNS server
137 | # manual_dns_server: 10.x.x.x
138 | # Enable nodelocal dns cache
139 | enable_nodelocaldns: true
140 | nodelocaldns_ip: 169.254.25.10
141 | nodelocaldns_health_port: 9254
142 | # Enable k8s_external plugin for CoreDNS
143 | enable_coredns_k8s_external: false
144 | coredns_k8s_external_zone: k8s_external.local
145 | # Enable endpoint_pod_names option for kubernetes plugin
146 | enable_coredns_k8s_endpoint_pod_names: false
147 |
148 | # Can be docker_dns, host_resolvconf or none
149 | resolvconf_mode: docker_dns
150 | # Deploy netchecker app to verify DNS resolve as an HTTP service
151 | deploy_netchecker: false
152 | # Ip address of the kubernetes skydns service
153 | skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
154 | skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
155 | dns_domain: "{{ cluster_name }}"
156 |
157 | ## Container runtime
158 | ## docker for docker, crio for cri-o and containerd for containerd.
159 | container_manager: docker
160 |
161 | ## Settings for containerized control plane (etcd/kubelet/secrets)
162 | etcd_deployment_type: docker
163 | kubelet_deployment_type: host
164 | helm_deployment_type: host
165 |
166 | # Enable kubeadm experimental control plane
167 | kubeadm_control_plane: false
168 | kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}"
169 |
170 | # K8s image pull policy (imagePullPolicy)
171 | k8s_image_pull_policy: IfNotPresent
172 |
173 | # audit log for kubernetes
174 | kubernetes_audit: false
175 |
176 | # dynamic kubelet configuration
177 | dynamic_kubelet_configuration: false
178 |
179 | # define kubelet config dir for dynamic kubelet
180 | # kubelet_config_dir:
181 | default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
182 | dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
183 |
184 | # pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
185 | podsecuritypolicy_enabled: false
186 |
187 | # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
188 | # kubeconfig_localhost: false
189 | # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
190 | # kubectl_localhost: false
191 |
192 |
193 | # Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
194 | # kubelet_cgroups_per_qos: true
195 |
196 | # A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
197 | # Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
198 | # kubelet_enforce_node_allocatable: pods
199 |
200 | ## Optionally reserve resources for OS system daemons.
201 | # system_reserved: true
202 | ## Uncomment to override default values
203 | # system_memory_reserved: 512M
204 | # system_cpu_reserved: 500m
205 | ## Reservation for master hosts
206 | # system_master_memory_reserved: 256M
207 | # system_master_cpu_reserved: 250m
208 |
209 | # An alternative flexvolume plugin directory
210 | # kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
211 |
212 | ## Supplementary addresses that can be added in kubernetes ssl keys.
213 | ## That can be useful for example to setup a keepalived virtual IP
214 | # supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
215 |
216 | ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
217 | ## See https://github.com/kubernetes-sigs/kubespray/issues/2141
218 | ## Set this variable to true to get rid of this issue
219 | volume_cross_zone_attachment: false
220 | # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
221 | persistent_volumes_enabled: false
222 |
223 | ## Container Engine Acceleration
224 | ## Enable container acceleration feature, for example use gpu acceleration in containers
225 | # nvidia_accelerator_enabled: true
226 | ## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
227 | ## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
228 | ## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers.
229 | ## Labels and taints won't be set to nodes if they are not in the array.
230 | # nvidia_gpu_nodes:
231 | # - kube-gpu-001
232 | # nvidia_driver_version: "384.111"
233 | ## flavor can be tesla or gtx
234 | # nvidia_gpu_flavor: gtx
235 | ## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io.
236 | # nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2
237 | # nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63
238 | ## NVIDIA GPU device plugin image.
239 | # nvidia_gpu_device_plugin_container: "k8s.gcr.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
240 |
--------------------------------------------------------------------------------
/logging-and-monitoring/fluentd/fluentd-config.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: fluentd-es-config-v0.1.4
5 | namespace: kube-system
6 | labels:
7 | addonmanager.kubernetes.io/mode: Reconcile
8 | data:
9 | system.conf: |-
10 |
11 | root_dir /tmp/fluentd-buffers/
12 |
13 |
14 | containers.input.conf: |-
15 | # This configuration file for Fluentd / td-agent is used
16 | # to watch changes to Docker log files. The kubelet creates symlinks that
17 | # capture the pod name, namespace, container name & Docker container ID
18 | # to the docker logs for pods in the /var/log/containers directory on the host.
19 | # If running this fluentd configuration in a Docker container, the /var/log
20 | # directory should be mounted in the container.
21 | #
22 | # These logs are then submitted to Elasticsearch which assumes the
23 | # installation of the fluent-plugin-elasticsearch & the
24 | # fluent-plugin-kubernetes_metadata_filter plugins.
25 | # See https://github.com/uken/fluent-plugin-elasticsearch &
26 | # https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter for
27 | # more information about the plugins.
28 | #
29 | # Example
30 | # =======
31 | # A line in the Docker log file might look like this JSON:
32 | #
33 | # {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
34 | # "stream":"stderr",
35 | # "time":"2014-09-25T21:15:03.499185026Z"}
36 | #
37 | # The time_format specification below makes sure we properly
38 | # parse the time format produced by Docker. This will be
39 | # submitted to Elasticsearch and should appear like:
40 | # $ curl 'http://elasticsearch-logging:9200/_search?pretty'
41 | # ...
42 | # {
43 | # "_index" : "logstash-2014.09.25",
44 | # "_type" : "fluentd",
45 | # "_id" : "VBrbor2QTuGpsQyTCdfzqA",
46 | # "_score" : 1.0,
47 | # "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
48 | # "stream":"stderr","tag":"docker.container.all",
49 | # "@timestamp":"2014-09-25T22:45:50+00:00"}
50 | # },
51 | # ...
52 | #
53 | # The Kubernetes fluentd plugin is used to write the Kubernetes metadata to the log
54 | # record & add labels to the log record if properly configured. This enables users
55 | # to filter & search logs on any metadata.
56 | # For example a Docker container's logs might be in the directory:
57 | #
58 | # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
59 | #
60 | # and in the file:
61 | #
62 | # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
63 | #
64 | # where 997599971ee6... is the Docker ID of the running container.
65 | # The Kubernetes kubelet makes a symbolic link to this file on the host machine
66 | # in the /var/log/containers directory which includes the pod name and the Kubernetes
67 | # container name:
68 | #
69 | # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
70 | # ->
71 | # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
72 | #
73 | # The /var/log directory on the host is mapped to the /var/log directory in the container
74 | # running this instance of Fluentd and we end up collecting the file:
75 | #
76 | # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
77 | #
78 | # This results in the tag:
79 | #
80 | # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
81 | #
82 | # The Kubernetes fluentd plugin is used to extract the namespace, pod name & container name
83 | # which are added to the log message as a kubernetes field object & the Docker container ID
84 | # is also added under the docker field object.
85 | # The final tag is:
86 | #
87 | # kubernetes.var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
88 | #
89 | # And the final log record look like:
90 | #
91 | # {
92 | # "log":"2014/09/25 21:15:03 Got request with path wombat\n",
93 | # "stream":"stderr",
94 | # "time":"2014-09-25T21:15:03.499185026Z",
95 | # "kubernetes": {
96 | # "namespace": "default",
97 | # "pod_name": "synthetic-logger-0.25lps-pod",
98 | # "container_name": "synth-lgr"
99 | # },
100 | # "docker": {
101 | # "container_id": "997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b"
102 | # }
103 | # }
104 | #
105 | # This makes it easier for users to search for logs by pod name or by
106 | # the name of the Kubernetes container regardless of how many times the
107 | # Kubernetes pod has been restarted (resulting in a several Docker container IDs).
108 |
109 | # Json Log Example:
110 | # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
111 | # CRI Log Example:
112 | # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
113 |
114 | @id fluentd-containers.log
115 | @type tail
116 | path /var/log/containers/*.log
117 | pos_file /var/log/es-containers.log.pos
118 | time_format %Y-%m-%dT%H:%M:%S.%NZ
119 | tag raw.kubernetes.*
120 | read_from_head true
121 |
122 | @type multi_format
123 |
124 | format json
125 | time_key time
126 | time_format %Y-%m-%dT%H:%M:%S.%NZ
127 |
128 |
129 | format /^(?
132 |
133 |
134 |
135 | # Detect exceptions in the log output and forward them as one log entry.
136 |
137 | @id raw.kubernetes
138 | @type detect_exceptions
139 | remove_tag_prefix raw
140 | message log
141 | stream stream
142 | multiline_flush_interval 5
143 | max_bytes 500000
144 | max_lines 1000
145 |
146 |
147 | system.input.conf: |-
148 | # Example:
149 | # 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
150 |
151 | @id minion
152 | @type tail
153 | format /^(?
159 |
160 | # Example:
161 | # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
162 |
163 | @id startupscript.log
164 | @type tail
165 | format syslog
166 | path /var/log/startupscript.log
167 | pos_file /var/log/es-startupscript.log.pos
168 | tag startupscript
169 |
170 |
171 | # Examples:
172 | # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
173 | # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
174 | # TODO(random-liu): Remove this after cri container runtime rolls out.
175 |
176 | @id docker.log
177 | @type tail
178 | format /^time="(?
183 |
184 | # Example:
185 | # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
186 |
187 | @id etcd.log
188 | @type tail
189 | # Not parsing this, because it doesn't have anything particularly useful to
190 | # parse out of it (like severities).
191 | format none
192 | path /var/log/etcd.log
193 | pos_file /var/log/es-etcd.log.pos
194 | tag etcd
195 |
196 |
197 | # Multi-line parsing is required for all the kube logs because very large log
198 | # statements, such as those that include entire object bodies, get split into
199 | # multiple lines by glog.
200 |
201 | # Example:
202 | # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
203 |
204 | @id kubelet.log
205 | @type tail
206 | format multiline
207 | multiline_flush_interval 5s
208 | format_firstline /^\w\d{4}/
209 | format1 /^(?\w)(?