├── README.md
├── david
├── Helm-Jenkins-Artifactory
│ ├── helm-writeup.md
│ ├── pv-arti-arti.yml
│ ├── pv-arti-nginx.yml
│ └── pv-arti-psql.yml
├── Traefik-Loadbalancer
│ ├── README.md
│ ├── example-nginx.sh
│ ├── example-traefik.sh
│ ├── nginx-ing.yml
│ ├── nginx-svc.yml
│ ├── nginx.yml
│ ├── traefik-deploy.yml
│ ├── traefik-init.sh
│ └── traefik-svc.yml
└── certs-on-example
├── henrik
├── apiEventTrigger
│ ├── README.md
│ └── apiListener.sh
├── apiReader
│ ├── README.md
│ └── apiReader.f
├── confluence-dockerized
│ ├── Dockerfile
│ └── docker-entrypoint.sh
├── docker-workshop-examples
│ ├── nginx-docker-example-1
│ │ ├── Dockerfile
│ │ ├── index.html
│ │ └── run.sh
│ ├── nginx-docker-example-2
│ │ ├── Dockerfile
│ │ └── run.sh
│ └── nginx-docker-example-3
│ │ ├── Dockerfile
│ │ ├── entrypoint.sh
│ │ └── run.sh
├── internal-loadbalancer-nginx
│ ├── README.md
│ └── images
│ │ └── kubernetes_loadbalancer_topoloty.png
├── kubernetes-workshopexamples
│ ├── confluence-mysql
│ │ ├── configmaps
│ │ │ ├── mysql-configmap-confluence.yaml
│ │ │ └── mysql-configmap.yaml
│ │ ├── delete.sh
│ │ ├── deploy.sh
│ │ ├── deployments
│ │ │ ├── confluence-deployment.yaml
│ │ │ └── mysql-deployment.yaml
│ │ ├── images
│ │ │ ├── build.sh
│ │ │ ├── confluence
│ │ │ │ ├── Dockerfile
│ │ │ │ └── docker-entrypoint.sh
│ │ │ └── mysql
│ │ │ │ └── Dockerfile
│ │ ├── ingress
│ │ │ └── confluence-ingress.yaml
│ │ ├── notes.txt
│ │ ├── secrets
│ │ │ └── mysql-secret.yaml
│ │ └── services
│ │ │ ├── confluence-service.yaml
│ │ │ └── mysql-service.yaml
│ ├── simple-deployment
│ │ ├── deploy.sh
│ │ └── nginx.yaml
│ ├── simple-run
│ │ ├── run.sh
│ │ └── scale.sh
│ └── talk-to-apiserver
│ │ └── run.sh
├── nfs-pv
│ ├── README.md
│ └── yaml
│ │ ├── nfs-pv.yaml
│ │ ├── nfs-pvc.yaml
│ │ └── nginx-rc.yaml
├── nodeport-loadbalancer-poc
│ ├── README.md
│ ├── images
│ │ └── apache-loadbalancer.png
│ ├── setHttpdConf.sh
│ └── start-pods.sh
├── nodeport-loadbalancer
│ ├── README.md
│ ├── run.sh
│ ├── testhap.sh
│ └── tools.f
└── traefik_kubernetes
│ ├── install_traefik.sh
│ ├── tool.ingress.yml
│ ├── traefik.toml
│ └── traefik_linux-amd64
├── kamran
├── External-LoadBalancer.md
├── HA-with-Pacemaker-Corosync.md
├── HowToPushDockerImageToDockerHub-Public-Private.md
├── Kubernetes-Atomic-on-Amazon-VPC.md
├── Kubernetes-Atomic-on-Amazon.md
├── Kubernetes-Fedora-Multi-node.md
├── Kubernetes-The-Hard-Way-on-AWS.md
├── Kubernetes-The-Hard-Way-on-BareMetal.md
├── Kubernetes-The-Hard-Way-on-GCE.md
├── Kubernetes-coreOS-rackspace.md
├── Kubernetes-kubectl-cheat-sheet.md
├── Kubernetes-on-Atomic-on-KVM.md
├── Kubernetes-on-GCE-Take2.md
├── Kubernetes-on-Vagrant-Libvirt-part-2.md
├── Kubernetes-on-Vagrant-Libvirt.md
├── Kubernetes-workshop-with-kelsey-aug-2016.txt
├── Learning-Kubernetes-BareMetal-CENTOS.md
├── Learning-Kubernetes-GCE.md
├── SettingUp-SkyDNS.md
├── cluster-setup-scripts.NotUsedAnyMore
│ ├── .gitignore
│ ├── README.md
│ ├── certs
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── SSL-TLS-Certificates.md
│ │ ├── ca-config.json
│ │ ├── ca-csr.json
│ │ ├── ca-csr.json.template
│ │ ├── ca-key.pem
│ │ ├── ca.csr
│ │ ├── ca.pem
│ │ ├── certs.conf
│ │ ├── distribute-certs.sh
│ │ ├── generate-certs.sh
│ │ ├── kubernetes-csr.body
│ │ ├── kubernetes-csr.json
│ │ ├── kubernetes-csr.json.footer
│ │ ├── kubernetes-csr.json.footer.template
│ │ ├── kubernetes-csr.json.header
│ │ ├── kubernetes-csr.json.header.template
│ │ ├── kubernetes-csr.json.template
│ │ ├── kubernetes-key.pem
│ │ ├── kubernetes.csr
│ │ └── kubernetes.pem
│ ├── cluster.conf
│ ├── controllers
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── configure-controllers-HA.sh
│ │ ├── configure-controllers-k8s.sh
│ │ └── controllers.sh
│ ├── etcd
│ │ ├── README.sh
│ │ ├── configure-etcd.sh
│ │ ├── etcd.service
│ │ └── etcd.sh
│ ├── hosts
│ ├── kickstart
│ │ ├── .gitignore
│ │ ├── README.md
│ │ └── kickstart.template
│ ├── libvirt-provisioner
│ │ ├── README.md
│ │ ├── functions.sh
│ │ ├── libvirt-provisioner.sh
│ │ └── prepare-web.sh
│ ├── loadbalancers
│ │ ├── configure-loadbalancer-HA.sh
│ │ ├── configure-loadbalancers.sh
│ │ ├── loadbalancers.md
│ │ └── loadbalancers.sh
│ ├── misc-tools
│ │ ├── README.md
│ │ ├── get-dates.sh
│ │ └── get-fingerprints.sh
│ └── workers
│ │ ├── configure-workers.sh
│ │ ├── show-routes.sh
│ │ ├── workers.md
│ │ └── workers.sh
├── fedora-atomic-cloud-init
│ ├── README.md
│ ├── kamran-id_rsa.pub
│ ├── meta-data
│ └── user-data
├── images
│ ├── AWS-Instance-Creation-with-VPC.png
│ ├── Fedora-Admin-webUI-1.png
│ ├── Fedora-Admin-webUI-2.png
│ ├── GCE-Cluster.png
│ ├── GCE-Credentials-1.png
│ ├── GCE-Credentials-2.png
│ ├── GCE-Kubernetes-Master-Login.png
│ ├── Info-about-Fedora-Admin-webUI-on-terminal.png
│ ├── Kubernetes-Cluster-With-Internal-LoadBalancer.dia
│ ├── Kubernetes-Cluster-With-Internal-LoadBalancer.dia~
│ ├── Kubernetes-Cluster-With-Internal-LoadBalancer.png
│ ├── Networking-Information-From-Client.png
│ ├── Reaching-LoadBalancer-From-Other-Networks.png
│ ├── Reaching-service-on-load-balancer-from-other-networks.png
│ ├── Vagrant-Libvirt-VMs.png
│ ├── Vagrant-Libvirt-Virtual-Network-1.png
│ ├── Vagrant-Libvirt-Virtual-Network-2.png
│ ├── libvirt-new-virtual-network-1.png
│ ├── libvirt-new-virtual-network-2.png
│ ├── libvirt-new-virtual-network-3.png
│ ├── libvirt-new-virtual-network-4.png
│ ├── libvirt-new-virtual-network-5.png
│ └── libvirt-new-virtual-network-6.png
└── temporary-scratch-work-to-go-in-other-docs.txt
└── nicolaj
└── README.md
/README.md:
--------------------------------------------------------------------------------
1 | ---
2 | maintainer: hoeghh
3 | ---
4 |
5 | # LearnKubernetes
6 |
7 | Notes and resources collected together to help learn Kubernetes. This will eventually become a tutorial and later a blog post for praqma website (hopefully!)
8 |
9 | At the moment, people contributing to this repo, please create separate directories with your name and put your stuff into your directory. One of the ideas is to save your notes in this repo instead of (in addition to) your computer so we are protected against accidents like hardware failures and theft, etc.
10 |
--------------------------------------------------------------------------------
/david/Helm-Jenkins-Artifactory/helm-writeup.md:
--------------------------------------------------------------------------------
1 | # A simple introduction to Helm on Minikube
2 |
3 | ## [Step 1: Install minikube](https://github.com/kubernetes/minikube)
4 |
5 | ```
6 | curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
7 | ```
8 |
9 | Start it.
10 | ```
11 | minikube start
12 | ```
13 |
14 | ## [Step 2: Setup Helm client](https://github.com/kubernetes/helm)
15 |
16 | ```
17 | wget https://kubernetes-helm.storage.googleapis.com/helm-v2.7.2-linux-amd64.tar.gz && tar -zxvf helm-v2.0.0-linux-amd64.tgz && mv linux-amd64/helm /usr/local/bin/helm
18 | ```
19 | Initialize Helm:
20 | ```
21 | helm init
22 | ```
23 |
24 | This sets up a Tiller server inside the cluster, which is the Helm server side component.
25 |
26 | ## Step 3: Run Jenkins
27 | ```
28 | helm install --name jenkins stable/jenkins
29 | ```
30 | This downloads and installs the jenkinsci/jenkins image by default, as well as a whole bunch of other settings which can be changed to your heart's content.
31 |
32 | Get your service endpoint:
33 | ```
34 | kubectl get svc jenkins-jenkins (--namespace default)
35 | ```
36 |
37 | or run:
38 |
39 | ```
40 | minikube service jenkins-jenkins
41 | ```
42 |
43 | ### Play with Jenkins : Obtaining admin and password
44 | Get your password running:
45 | ```
46 | printf $(kubectl get secret --namespace default jenkins-jenkins -o jsonpath="{.data.jenkins-admin-password}" | base64 --decode);echo
47 | ```
48 |
49 | Username is 'admin'.
50 |
51 | When trying out a pipeline, most people will immediately notice that the slave is not necessarily there. By default, Helm utilizes the Kubernetes plugin. This means any given job is run by making a container, sending the job to that container, and then killing the container - all within a defined namespace in Kubernetes.
52 |
53 | To set this up, go to Manage System (Manage Jenkins) and find the Kubernetes section.
54 |
55 | Here the Kubernetes URL should be: https://kubernetes.default.svc.cluster.local as this targets our cluster.
56 |
57 | After making sure all plugins are up to date and so on, running a job will create a container in the requested namespace (default if nothing is specified) and send jobs there.
58 |
59 | You can test this, by making a freestyle job with echo "hello" and watching pods:
60 | ```
61 | kubectl get pods --all-namespaces -w
62 | ```
63 |
64 | Which results in something like:
65 | ```
66 | default default-gstvj 0/1 Pending 0s
67 | default default-gstvj 0/1 Pending 0s
68 | default default-gstvj 0/1 ContainerCreating 0s
69 | default default-gstvj 1/1 Running 1s
70 | default default-gstvj 1/1 Terminating 17s
71 | ```
72 |
73 | That then executes the job. First run may take a bit, as the docker image has to be downloaded. Concurrent builds take ~10 sec to spin up the container.
74 |
75 | To clean up run:
76 | ```
77 | helm del --purge jenkins
78 | ```
79 |
80 |
81 | ## Step 4: Artifactory
82 | ```
83 | helm install --name artifactory stable/artifactory
84 | ```
85 | For minikube, the nginx config requires a backend running nginx properly. We are going to sidestep that by running:
86 | ```
87 | kubectl expose svc artifactory --name=external-artifactory --type=NodePort
88 | ```
89 |
90 | Which we can open the same way as we did Jenkins:
91 | ```
92 | minikube service external-artifactory
93 | ```
94 |
95 | Similarly to jenkins, there is a lot of configuration options.
96 |
97 | Default credential for Artifactory:
98 | user: admin
99 | password: password
100 |
101 | ### Artifactory misc: Setting up your own Volumes
102 | While the artifactory deployment natively already creates volumes, you might notice that these are lost when deleting the artifactory deployment.
103 | [In the template folder on the helm chart](https://github.com/kubernetes/charts/tree/master/stable/artifactory/templates) there is no PersistantVolume, so what happens is that the Helm deployment itself creates and maintains the volumes. This also means they are deleted, when the deployment is removed resulting in a data loss.
104 |
105 | To circumvent this problem, merely link three volumes to the existing Claims suggested by the deployment like [this one for nginx](pv-arti-nginx.yml), [this one for artifactory itself](pv-arti-arti.yml) and [this one for psql](pv-arti-psql.yml).
106 |
107 | All the PVC created by Helm's Artifactory look for a Storage Class called Standard, so we give that:
108 | ```
109 | storageClassName: Standard
110 | ```
111 |
112 | And it links these volumes and they survive past death.
--------------------------------------------------------------------------------
/david/Helm-Jenkins-Artifactory/pv-arti-arti.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: custom-pv-arti-arti
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: standard
9 | capacity:
10 | storage: 20Gi
11 | accessModes:
12 | - ReadWriteOnce
13 | hostPath:
14 | path: "/mnt/data"
15 |
--------------------------------------------------------------------------------
/david/Helm-Jenkins-Artifactory/pv-arti-nginx.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: custom-pv-arti-nginx
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: standard
9 | capacity:
10 | storage: 5Gi
11 | accessModes:
12 | - ReadWriteOnce
13 | hostPath:
14 | path: "/mnt/data"
15 |
--------------------------------------------------------------------------------
/david/Helm-Jenkins-Artifactory/pv-arti-psql.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: custom-pv-arti-psql
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: standard
9 | capacity:
10 | storage: 10Gi
11 | accessModes:
12 | - ReadWriteOnce
13 | hostPath:
14 | path: "/mnt/data"
15 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/README.md:
--------------------------------------------------------------------------------
1 | # Traefik as a loadbalancer (in kubernetes)
2 | First of all, all credit goes to traefik.io for a solid solution.
3 |
4 | The next few steps can also be found on https://docs.traefik.io/user-guide/kubernetes/
5 |
6 | ## Deploy Træfik as an ingress controller
7 | All of the steps assume that you are running a small cluster on minikube, which can be found in googles official repository here: https://github.com/kubernetes/minikube
8 |
9 | Run:
10 | ```
11 | kubectl create -f traefik-deploy.yml
12 | ```
13 |
14 | This will create an ingress controller running Træfik.
15 |
16 | ## Deploy services to support Trækfik UI
17 | Next step is to do the same for the two UI services for Træfik.
18 | ```
19 | kubectl create -f traefik-svc.yml
20 | echo "$(minikube ip) traefik-ui.local" | sudo tee -a /etc/hosts
21 | ```
22 |
23 | These two steps adds the services, as well as the host entry that Træfik depends on to route traffic.
24 |
25 | Test it by writing traefik-ui.local in your browser!
26 |
27 | ## Deploy another service - hello world with Nginx!
28 | The last part is running through the same concept for a "real" service, here Nginx.
29 |
30 | It is assumed that you are runnning a service called nginx here. Lets create that one:
31 | ```
32 | kubectl run nginx --image=nginx --replicas=2 --port=80
33 | ```
34 | Then we create the parts that route to this service:
35 |
36 | ```
37 | kubectl create -f nginx-svc.yml
38 | kubectl create -f nginx-ing.yml
39 | echo "$(minikube ip) yournginx.com" | sudo tee -a /etc/hosts
40 | ```
41 |
42 |
43 | Use your own name if you want, but if you copy / pasted the commands then access it on yournginx.com
44 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/example-nginx.sh:
--------------------------------------------------------------------------------
1 | kubectl create -f nginx.yml
2 | kubectl create -f nginx-svc.yml
3 | kubectl create -f nginx-ing.yml
4 | echo "$(minikube ip) nginx-hej.com" | sudo tee -a /etc/hosts
5 |
6 | firefox http://nginx-hej.com
7 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/example-traefik.sh:
--------------------------------------------------------------------------------
1 | kubectl create -f traefik-deploy.yml
2 | kubectl create -f traefik-svc.yml
3 | echo "$(minikube ip) traefik-ui.local" | sudo tee -a /etc/hosts
4 |
5 | firefox http://traefik-ui.local
6 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/nginx-ing.yml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: nginx-ingress
5 | spec:
6 | rules:
7 | - host: nginx-hej.com
8 | http:
9 | paths:
10 | - path: /
11 | backend:
12 | serviceName: nginx
13 | servicePort: 80
14 |
15 |
16 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/nginx-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx
5 | spec:
6 | ports:
7 | - port: 80
8 | targetPort: 80
9 | selector:
10 | run: nginx
11 | type: ClusterIP
12 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/nginx.yml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: nginx
6 | name: nginx
7 | namespace: default
8 | spec:
9 | replicas: 2
10 | selector:
11 | matchLabels:
12 | run: nginx
13 | template:
14 | metadata:
15 | labels:
16 | run: nginx
17 | spec:
18 | containers:
19 | - image: nginx
20 | imagePullPolicy: Always
21 | name: nginx
22 | ports:
23 | - containerPort: 80
24 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/traefik-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Deployment
3 | apiVersion: extensions/v1beta1
4 | metadata:
5 | name: traefik-ingress-controller
6 | namespace: kube-system
7 | labels:
8 | k8s-app: traefik-ingress-lb
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | k8s-app: traefik-ingress-lb
14 | template:
15 | metadata:
16 | labels:
17 | k8s-app: traefik-ingress-lb
18 | name: traefik-ingress-lb
19 | version: v1.0.0
20 | spec:
21 | terminationGracePeriodSeconds: 60
22 | containers:
23 | - image: traefik:v1.0.0
24 | name: traefik-ingress-lb
25 | resources:
26 | limits:
27 | cpu: 200m
28 | memory: 30Mi
29 | requests:
30 | cpu: 100m
31 | memory: 20Mi
32 | ports:
33 | - containerPort: 80
34 | hostPort: 80
35 | - containerPort: 8080
36 | args:
37 | - --web
38 | - --kubernetes
39 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/traefik-init.sh:
--------------------------------------------------------------------------------
1 | kubectl create -f traefik-deploy.yml
2 | kubectl create -f traefik-svc.yml
3 | echo "$(minikube ip) traefik-ui.local" | sudo tee -a /etc/hosts
4 |
5 |
--------------------------------------------------------------------------------
/david/Traefik-Loadbalancer/traefik-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: traefik-web-ui
5 | namespace: kube-system
6 | spec:
7 | selector:
8 | k8s-app: traefik-ingress-lb
9 | ports:
10 | - port: 80
11 | targetPort: 8080
12 | ---
13 | apiVersion: extensions/v1beta1
14 | kind: Ingress
15 | metadata:
16 | name: traefik-web-ui
17 | namespace: kube-system
18 | spec:
19 | rules:
20 | - host: traefik-ui.local
21 | http:
22 | paths:
23 | - backend:
24 | serviceName: traefik-web-ui
25 | servicePort: 80
26 |
--------------------------------------------------------------------------------
/david/certs-on-example:
--------------------------------------------------------------------------------
1 | var/lib/kubernetes/ca.pem
2 | https://controller.example.com:6443
3 |
--------------------------------------------------------------------------------
/henrik/apiEventTrigger/README.md:
--------------------------------------------------------------------------------
1 | # A script that get triggered when Pods are killed or started
2 | ## If we have an external service that needs to be in sync with what our cluster is serving, we need a method to get notified when changes happens and react on them.
3 |
4 | ### A small example
5 | This a a poor mans version of an eventlistener. There are examples online written in Go and Java that tabs directly into the api server.
6 |
7 | kubectl get has an events function that gives us all the events in the namespace default. We can specify another namespace if we want with --namespace=[namespace-name]. By giving it the --watch flag, we will keep getting events, when they happens in the cluster. Now, we only want new events when we start our script, so using --watch-only=true will give us that.
8 |
9 | By using awk on the output, we can react on the output from kubectl and call commands based on that.
10 |
11 | Here, we echo out when new containers are started, and when they are killed.
12 |
13 | ```
14 | kubectl get events --watch-only=true | awk '/Started container/ { system("echo Started container") }
15 | /Killing container/ { system("echo Killed container") }'
16 | ```
17 |
18 | Run the above in a session. Then, in another session start a deployment with two nginx
19 | ```
20 | kubectl run my-nginx --image=nginx --replicas=2 --port=80
21 | ```
22 | Our first session will notify us that it creates two containers.
23 |
24 | Now find a pod to delete
25 | ```
26 | kubectl get pods
27 | ```
28 | ```
29 | NAME READY STATUS RESTARTS AGE
30 | my-nginx-2494149703-69t9h 1/1 Running 0 8m
31 | my-nginx-2494149703-eoj2e 1/1 Running 0 8m
32 | ```
33 | Now delete a pod, and see our script triggered twice. Once when the pod is deleted, and once when a new pod is created
34 | ```
35 | kubectl delete pod my-nginx-2494149703-69t9h
36 | ```
37 |
38 | Heres my output from our event listener:
39 | ```
40 | Started container
41 | Started container
42 | Killed container
43 | Started container
44 | ```
45 | First our deployment created two containers, then we killed one and finaly the deployment re-created a pod.
46 |
--------------------------------------------------------------------------------
/henrik/apiEventTrigger/apiListener.sh:
--------------------------------------------------------------------------------
1 | kubectl get events --watch-only=true | awk '/Started container/ { system("echo Started container") }
2 | /Killing container/ { system("echo Killed container") }'
3 |
--------------------------------------------------------------------------------
/henrik/apiReader/README.md:
--------------------------------------------------------------------------------
1 | # ApiReader for Kubernetes
2 | ## We wanted a simple library of functions to retrieve data from Kubernetes ApiServer implemented in Bash
3 |
4 | First, start the proxy (more advanced connections later) and source the function file.
5 | ```
6 | kubectl proxy &
7 | source apiReader.f
8 | ```
9 |
10 | Now get node IP's by
11 | ```
12 | [hoeghh@localhost apiReader]$ getNodeIPs
13 | 10.245.1.3 10.245.1.4
14 | ```
15 |
16 | Get services, in all namespaces by
17 | ```
18 | hoeghh@localhost apiReader]$ getServices
19 | kubernetes my-nginx weavescope-app heapster kube-dns kubernetes-dashboard monitoring-grafana monitoring-influxdb
20 | ```
21 |
22 | Get services in a specific namespace by
23 | ```
24 | [hoeghh@localhost apiReader]$ getServices default
25 | kubernetes my-nginx weavescope-app
26 | ```
27 |
28 | Get the ports a pod/service is bound to on nodes when using NodePort from namespace
29 | ```
30 | [hoeghh@localhost apiReader]$ getServiceNodePorts my-nginx default
31 | 32226
32 | ```
33 |
34 | get the port a pod/service is bound to in a node when using NodePort
35 | ```
36 | [hoeghh@localhost apiReader]$ getServiceNodePorts my-nginx
37 | 32226
38 | ```
39 |
40 | Get Endports for a Service not knowing the namespace by
41 | ```
42 | [hoeghh@localhost apiReader]$ getServiceEndpoints my-nginx
43 | 10.246.28.3 10.246.90.5
44 | ```
45 |
46 | Get Endports for a Service knowing the namespace by
47 | ```
48 | [hoeghh@localhost apiReader]$ getServiceEndpoints my-nginx default
49 | 10.246.28.3 10.246.90.5
50 |
51 | ```
52 |
53 | Get the namespace of which the pod lives by
54 | ```
55 | [hoeghh@localhost apiReader]$ getPodNamespace my-nginx-3053829504-04408
56 | default
57 | ```
58 |
59 | Get all pods across namespaces by
60 | ```
61 | [hoeghh@localhost apiReader]$ getPods
62 | heapster-v1.1.0-2101778418-sxln1 kube-dns-v17.1-ikbih kube-proxy-kubernetes-node-1 kube-proxy-kubernetes-node-2 kubernetes-dashboard-v1.1.1-qgpyc monitoring-influxdb-grafana-v3-css0t my-nginx-3053829504-04408 my-nginx-3053829504-1zyjb weavescope-app-e03n3 weavescope-probe-c5y9l weavescope-probe-ga47i
63 | ```
64 |
65 | Only get pods in a specific namespace by
66 | ```
67 | [hoeghh@localhost apiReader]$ getPods default
68 | my-nginx-3053829504-04408 my-nginx-3053829504-1zyjb weavescope-app-e03n3 weavescope-probe-c5y9l weavescope-probe-ga47i
69 | ```
70 |
71 | Get the IP of a Pod by
72 | ```
73 | hoeghh@localhost apiReader]$ getPodIp default my-nginx-3053829504-04408
74 | 10.246.24.3
75 | ```
76 |
77 | Get Deployments by
78 | ```
79 | [hoeghh@localhost apiReader]$ getDeployments
80 | heapster-v1.1.0 my-nginx
81 | ```
82 |
83 | getDeployments from namespace by
84 | ```
85 | [hoeghh@localhost apiReader]$ getDeployments default
86 | my-nginx
87 | ```
88 |
89 | getEvents by
90 | ```
91 | [hoeghh@localhost apiReader]$ getEvents
92 | {
93 | "kind": "EventList",
94 | "apiVersion": "v1",
95 | "metadata": {
96 | "selfLink": "/api/v1/events",
97 | "resourceVersion": "266"
98 | },
99 | "items": [
100 | {
101 | "metadata": {
102 | "name": "kubernetes-node-2.147824c40b65f06a",
103 | "namespace": "default",
104 | ...
105 | ...
106 | ```
107 |
108 | getEvents from namespace by
109 | ```
110 | [hoeghh@localhost apiReader]$ getEvents default
111 | {
112 | "kind": "EventList",
113 | "apiVersion": "v1",
114 | "metadata": {
115 | "selfLink": "/api/v1/namespaces/default/events",
116 | "resourceVersion": "276"
117 | },
118 | "items": [
119 | {
120 | "metadata": {
121 | "name": "my-nginx-2494149703-dukkv.1478252045a3d0d3",
122 | "namespace": "default",
123 | ...
124 | ...
125 | ```
126 |
127 | getPodEventStream by
128 | ```
129 | [hoeghh@localhost apiReader]$ getPodEventStream
130 | {"type":"ADDED","object":{"kind":"Pod","apiVersion":"v1","metadata":{"name":"my-nginx-2494149703-ulx3g","generateName":"my-nginx-2494149703-","namespace":"default","selfLink":"/api/v1/namespaces/default/pods/my-nginx-2494149703-ulx3g","uid":"86c83105-8497-11e6-929d-0800277ad4a8","resourceVersion":"268","creationTimestamp":"2016-09-27T09:48:21Z","labels":{"pod-template-hash":"2494149703","run":"my-nginx"},"annotations":{"kubernetes.io/created-by":"{\"kind\":\"SerializedReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"ReplicaSet\",\"namespace\":\"default\",\"name\.....
131 | ...
132 | ...
133 | ```
134 |
135 | getPodEventStream from one pod by
136 | ```
137 | Not working yet
138 | ```
139 |
140 | getServiceEventStream by
141 | ```
142 | [hoeghh@localhost apiReader]$ getServiceEventStream
143 | {"type":"ADDED","object":{"kind":"Service","apiVersion":"v1","metadata":{"name":"kubernetes","namespace":"default","selfLink":"/api/v1/namespaces/default/services/kubernetes","uid":"04846b7b-8495-11e6-929d-0800277ad4a8","resourceVersion":"7","creationTimestamp":"2016-09-27T09:30:24Z","labels":{"component":"apiserver","provider":"kubernetes"}},"spec":{"ports":[{"name":"https","protocol":"TCP","port":443,"targetPort":443}],"clusterIP":"10.247.0.1","type":"ClusterIP","sessionAffinity":"ClientIP"},"status":{"loadBalancer":{}}}}
144 | ...
145 | ...
146 | ```
147 |
148 | getDeploymentEventStream by
149 | ```
150 | [hoeghh@localhost apiReader]$ getDeploymentEventStream
151 | {"type":"ADDED","object":{"kind":"Deployment","apiVersion":"extensions/v1beta1","metadata":{"name":"heapster-v1.1.0","namespace":"kube-system","selfLink":"/apis/extensions/v1beta1/namespaces/kube-system/deployments/heapster-v1.1.0","uid":"1f98330a-8495-11e6-929d-0800277ad4a8","resourceVersion":"109","generation":4,"creationTimestamp":"2016-09-27T09:31:09Z","labels":{"k8s-app":"heapster","kubernetes.io/cluster-service":"true","version":"v1.1.0"},....
152 | ...
153 | ...
154 | ```
155 |
156 |
--------------------------------------------------------------------------------
/henrik/apiReader/apiReader.f:
--------------------------------------------------------------------------------
1 | # API describtion found here :
2 | # http://kubernetes.io/docs/api-reference/v1/operations/
3 | # http://kubernetes.io/kubernetes/third_party/swagger-ui/
4 | url="localhost:8001"
5 |
6 | function getNodeIPs(){
7 | local nodename=$1
8 |
9 | if [ ! -z "$nodename" ]; then
10 | echo $(curl -s $url/api/v1/nodes/$nodename | jq -r '.status.addresses[] | select(.type == "InternalIP") | .address')
11 | else
12 | echo $(curl -s $url/api/v1/nodes | jq -r '.items[].status.addresses[] | select(.type == "InternalIP") | .address')
13 | fi
14 | }
15 |
16 | function getNodeNames(){
17 | echo $(curl -s $url/api/v1/nodes | jq -r '.items[].spec.externalID')
18 | }
19 |
20 | function getServices(){
21 | local namespace=$1
22 |
23 | if [ ! -z "$namespace" ]; then
24 | echo $(curl -s $url/api/v1/namespaces/$namespace/services/ | jq -r '.items[].metadata.name')
25 | else
26 | echo $(curl -s $url/api/v1/services/ | jq -r '.items[].metadata.name')
27 | fi
28 | }
29 |
30 | function getServiceNodePorts(){
31 | local service=$1
32 | local namespace=$2
33 |
34 | if [ ! -z "$namespace" ]; then
35 | echo $(curl -s $url/api/v1/namespaces/$namespace/services/$service | jq -r '.spec.ports[].nodePort')
36 | else
37 | echo $(curl -s $url/api/v1/services/ | jq -r '.items[] | select(.metadata.name == "'$service'") | .spec.ports[].nodePort')
38 | fi
39 |
40 | }
41 |
42 | function getServiceEndpoints(){
43 | local service=$1
44 | local namespace=$2
45 |
46 | if [ "$namespace" == "" ];then
47 | namespace=$(getServiceNamespace $service)
48 | fi
49 |
50 | local subset=$(curl -s $url/api/v1/namespaces/$namespace/endpoints/$service | jq -r '.subsets[]')
51 |
52 | if [ ! -z "$subset" ]; then
53 | echo $(curl -s $url/api/v1/namespaces/$namespace/endpoints/$service | jq -r '.subsets[].addresses[].ip')
54 |
55 | fi
56 | }
57 |
58 | function getServiceNamespace(){
59 | local service=$1
60 | echo $(curl -s $url/api/v1/services/ | jq -r '.items[] | select(.metadata.name == "'$service'") | .metadata.namespace')
61 | }
62 |
63 | function getPods(){
64 | local namespace=$1
65 |
66 | if [ ! -z "$namespace" ]; then
67 | echo $(curl -s $url/api/v1/namespaces/$namespace/pods | jq -r '.items[].metadata.name')
68 | else
69 | echo $(curl -s $url/api/v1/pods | jq -r '.items[].metadata.name')
70 | fi
71 | }
72 |
73 | function getPodNamespace(){
74 | local podName=$1
75 | echo $(curl -s $url/api/v1/pods | jq -r '.items[] | select(.metadata.name == "'$podName'") | .metadata.namespace')
76 | }
77 |
78 | function getPodIp(){
79 | local podName=$1
80 | local namespace=$2
81 |
82 | if [ ! -z "$namespace" ]; then
83 | echo $(curl -s $url/api/v1/namespaces/$namespace/pods/$podName | jq -r '.status.podIP')
84 | fi
85 | }
86 |
87 | function getDeployments(){
88 | local namespace=$1
89 |
90 | if [ ! -z "$namespace" ]; then
91 | echo $(curl -s $url/apis/extensions/v1beta1/namespaces/$namespace/deployments | jq -r '.items[].metadata.name')
92 | else
93 | echo $(curl -s $url/apis/extensions/v1beta1/deployments | jq -r '.items[].metadata.name')
94 | fi
95 |
96 | }
97 |
98 | function getEventsAll(){
99 | local namespace=$1
100 |
101 | if [ ! -z "$namespace" ]; then
102 | curl -s $url/api/v1/watch/namespaces/$namespace/events
103 | else
104 | curl -s $url/api/v1/watch/events
105 | fi
106 |
107 | }
108 |
109 |
110 | function formatEventStream(){
111 | # http://stackoverflow.com/questions/30272651/redirect-curl-to-while-loop
112 | while read -r l; do
113 | resourceVersion=$(echo "$l" | jq -r '.object.metadata.resourceVersion')
114 | reason=$(echo "$l" | jq -r '.object.reason')
115 | message=$(echo "$l" | jq -r '.object.message')
116 |
117 | echo "Event ($resourceVersion) ($reason) : $message"
118 | done < <(getEventsOnlyNew)
119 |
120 | }
121 |
122 | function getEventsOnlyNew(){
123 | local namespace=$1
124 |
125 | if [ ! -z "$namespace" ]; then
126 | local resourceVersion=$(curl -s $url/api/v1/namespaces/$namespace/events | jq -r '.metadata.resourceVersion')
127 | else
128 | local resourceVersion=$(curl -s $url/api/v1/events | jq -r '.metadata.resourceVersion')
129 | fi
130 |
131 | local onlyNew="?resourceVersion=$resourceVersion"
132 |
133 | if [ ! -z "$namespace" ]; then
134 | curl -s -N $url/api/v1/watch/namespaces/$namespace/events$onlyNew --stderr -
135 | else
136 | curl -s -N $url/api/v1/watch/events$onlyNew --stderr -
137 | fi
138 | }
139 |
140 | function getPodEventStream(){
141 | local podname=$1
142 |
143 | if [ ! -z "$podname" ]; then
144 | curl -s $url/api/v1/watch/pods/$podname
145 | else
146 | curl -s $url/api/v1/watch/pods
147 | fi
148 |
149 | }
150 |
151 |
152 | function getPodEventStreamAll(){
153 | local podname=$1
154 |
155 | if [ ! -z "$podname" ]; then
156 | curl -s $url/api/v1/watch/pods/$podname
157 | else
158 | curl -s $url/api/v1/watch/pods
159 | fi
160 |
161 | }
162 |
163 | function getServiceEventStream(){
164 | curl -s $url/api/v1/watch/services
165 | }
166 |
167 | function getDeploymentEventStream(){
168 | curl $url/apis/extensions/v1beta1/watch/deployments
169 | }
170 |
--------------------------------------------------------------------------------
/henrik/confluence-dockerized/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:jessie
2 | MAINTAINER info@praqma.net
3 |
4 | # Update and install basic tools inc. Oracle JDK 1.8
5 | RUN echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee /etc/apt/sources.list.d/webupd8team-java.list && \
6 | echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list && \
7 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886 && \
8 | apt-get update && \
9 | echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
10 | echo debconf shared/accepted-oracle-license-v1-1 seen true | debconf-set-selections && \
11 | apt-get install libapr1 libaprutil1 libtcnative-1 oracle-java8-installer oracle-java8-set-default curl vim wget unzip nmap libtcnative-1 xmlstarlet --force-yes -y && \
12 | apt-get clean
13 |
14 | # Define JAVA_HOME variable
15 | ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
16 |
17 | # Add /srv/java on PATH variable
18 | ENV PATH ${PATH}:${JAVA_HOME}/bin:/srv/java
19 |
20 | # Setup useful environment variables
21 | ENV CONFLUENCE_HOME /var/atlassian/application-data/confluence
22 | ENV CONFLUENCE_INSTALL /opt/atlassian/confluence
23 | ENV CONF_VERSION 6.1.0-beta2
24 |
25 | ENV CONFLUENCE_DOWNLOAD_URL http://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-${CONF_VERSION}.tar.gz
26 |
27 | ENV MYSQL_VERSION 5.1.38
28 | ENV MYSQL_DRIVER_DOWNLOAD_URL http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-${MYSQL_VERSION}.tar.gz
29 |
30 | # Use the default unprivileged account. This could be considered bad practice
31 | # on systems where multiple processes end up being executed by 'daemon' but
32 | # here we only ever run one process anyway.
33 | ENV RUN_USER daemon
34 | ENV RUN_GROUP daemon
35 |
36 |
37 | # Install Atlassian Confluence and helper tools and setup initial home
38 | # directory structure.
39 | RUN set -x \
40 | && apt-get update --quiet \
41 | && apt-get install --quiet --yes --no-install-recommends libtcnative-1 xmlstarlet \
42 | && apt-get clean \
43 | && mkdir -p "${CONFLUENCE_HOME}" \
44 | && mkdir -p "${CONFLUENCE_INSTALL}/conf" \
45 | && curl -Ls "${CONFLUENCE_DOWNLOAD_URL}" | tar -xz --directory "${CONFLUENCE_INSTALL}" --strip-components=1 --no-same-owner \
46 | && curl -Ls "${MYSQL_DRIVER_DOWNLOAD_URL}" | tar -xz --directory "${CONFLUENCE_INSTALL}/confluence/WEB-INF/lib" --strip-components=1 --no-same-owner "mysql-connector-java-${MYSQL_VERSION}/mysql-connector-java-${MYSQL_VERSION}-bin.jar" \
47 | && echo "\nconfluence.home=${CONFLUENCE_HOME}" >> "${CONFLUENCE_INSTALL}/confluence/WEB-INF/classes/confluence-init.properties" \
48 | && xmlstarlet ed --inplace \
49 | --delete "Server/@debug" \
50 | --delete "Server/Service/Connector/@debug" \
51 | --delete "Server/Service/Connector/@useURIValidationHack" \
52 | --delete "Server/Service/Connector/@minProcessors" \
53 | --delete "Server/Service/Connector/@maxProcessors" \
54 | --delete "Server/Service/Engine/@debug" \
55 | --delete "Server/Service/Engine/Host/@debug" \
56 | --delete "Server/Service/Engine/Host/Context/@debug" \
57 | "${CONFLUENCE_INSTALL}/conf/server.xml" \
58 | && touch -d "@0" "${CONFLUENCE_INSTALL}/conf/server.xml"
59 |
60 | RUN chmod -R 700 "${CONFLUENCE_INSTALL}" \
61 | && chown -R ${RUN_USER}:${RUN_GROUP} "${CONFLUENCE_INSTALL}" \
62 | && chmod -R 700 "${CONFLUENCE_HOME}" \
63 | && chown -R ${RUN_USER}:${RUN_GROUP} "${CONFLUENCE_HOME}"
64 |
65 |
66 | # Use the default unprivileged account. This could be considered bad practice
67 | # on systems where multiple processes end up being executed by 'daemon' but
68 | # here we only ever run one process anyway.
69 | USER ${RUN_USER}:${RUN_GROUP}
70 |
71 | # Expose default HTTP connector port.
72 | EXPOSE 8090
73 | EXPOSE 8091
74 |
75 | # Set volume mount points for installation and home directory. Changes to the
76 | # home directory needs to be persisted as well as parts of the installation
77 | # directory due to eg. logs.
78 | VOLUME ["${CONFLUENCE_INSTALL}/logs", "${CONFLUENCE_HOME}"]
79 |
80 | # Set the default working directory as the Confluence installation directory.
81 | WORKDIR ${CONFLUENCE_INSTALL}
82 |
83 | COPY docker-entrypoint.sh /
84 | ENTRYPOINT ["/docker-entrypoint.sh"]
85 |
86 | # Run Atlassian Confluence as a foreground process by default.
87 | CMD ["./bin/catalina.sh", "run"]
88 |
--------------------------------------------------------------------------------
/henrik/confluence-dockerized/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # check if the `server.xml` file has been changed since the creation of this
4 | # Docker image. If the file has been changed the entrypoint script will not
5 | # perform modifications to the configuration file.
6 | if [ "$(stat --format "%Y" "${CONFLUENCE_INSTALL}/conf/server.xml")" -eq "0" ]; then
7 | if [ -n "${X_PROXY_NAME}" ]; then
8 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "proxyName" --value "${X_PROXY_NAME}" "${CONFLUENCE_INSTALL}/conf/server.xml"
9 | fi
10 | if [ -n "${X_PROXY_PORT}" ]; then
11 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "proxyPort" --value "${X_PROXY_PORT}" "${CONFLUENCE_INSTALL}/conf/server.xml"
12 | fi
13 | if [ -n "${X_PROXY_SCHEME}" ]; then
14 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "scheme" --value "${X_PROXY_SCHEME}" "${CONFLUENCE_INSTALL}/conf/server.xml"
15 | fi
16 | if [ -n "${X_PROXY_SECURE}" ]; then
17 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "secure" --value "${X_PROXY_SECURE}" "${CONFLUENCE_INSTALL}/conf/server.xml"
18 | fi
19 | if [ -n "${X_PATH}" ]; then
20 | xmlstarlet ed --inplace --pf --ps --update '//Context/@path' --value "${X_PATH}" "${CONFLUENCE_INSTALL}/conf/server.xml"
21 | fi
22 | fi
23 |
24 | if [ -f "${CERTIFICATE}" ]; then
25 | keytool -noprompt -storepass changeit -keystore ${JAVA_CACERTS} -import -file ${CERTIFICATE} -alias CompanyCA
26 | fi
27 |
28 |
29 | exec "$@"
30 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-1/Dockerfile:
--------------------------------------------------------------------------------
1 | from nginx:stable-alpine
2 |
3 | COPY index.html /usr/share/nginx/html/
4 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-1/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World
4 |
5 |
6 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-1/run.sh:
--------------------------------------------------------------------------------
1 | docker run -d -p 80:80 hoeghh/nginx-example:v1
2 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx:stable-alpine
2 | ENV MYVAR "Praqma"
3 | RUN echo "Hello $MYVAR" > /usr/share/nginx/html/index.html
4 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-2/run.sh:
--------------------------------------------------------------------------------
1 | docker run -d -p 80:80 hoeghh/nginx-example:v2
2 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-3/Dockerfile:
--------------------------------------------------------------------------------
1 | from nginx:stable-alpine
2 |
3 | COPY entrypoint.sh /
4 |
5 | ENTRYPOINT ["/entrypoint.sh"]
6 |
7 | CMD ["nginx", "-g", "daemon off;"]
8 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-3/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | echo "
4 |
5 | Welcome to Docker
6 | Please... behave.
7 |
8 | " > /usr/share/nginx/html/index.html
9 |
10 | exec "$@"
11 |
--------------------------------------------------------------------------------
/henrik/docker-workshop-examples/nginx-docker-example-3/run.sh:
--------------------------------------------------------------------------------
1 | docker build -t hoeghh/nginx-example:v3 .
2 | docker run -d -p 80:80 hoeghh/nginx-example:v3
3 |
--------------------------------------------------------------------------------
/henrik/internal-loadbalancer-nginx/images/kubernetes_loadbalancer_topoloty.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Praqma/LearnKubernetes/9c90b617e92a965fdfc4f6baec7a4aa961c74ae8/henrik/internal-loadbalancer-nginx/images/kubernetes_loadbalancer_topoloty.png
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/configmaps/mysql-configmap-confluence.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: mysql-confluence-db
5 | data:
6 | create-confluence-db.sh: |-
7 | echo "CREATE DATABASE IF NOT EXISTS confluence CHARACTER SET utf8 COLLATE utf8_bin" | mysql --host=localhost --user=root --password=$MYSQL_ROOT_PASSWORD
8 | echo "GRANT ALL PRIVILEGES ON confluence.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD'" | mysql --host=localhost --user=root --password=$MYSQL_ROOT_PASSWORD
9 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/configmaps/mysql-configmap.yaml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: mysql-config
5 | data:
6 | confluence.cnf: |-
7 | [mysqld]
8 | bind-address = 0.0.0.0
9 | character-set-server=utf8
10 | collation-server=utf8_bin
11 | default-storage-engine=INNODB
12 | max_allowed_packet=256M
13 | innodb_log_file_size=2GB
14 | transaction-isolation=READ-COMMITTED
15 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/delete.sh:
--------------------------------------------------------------------------------
1 | # Deploy to Kubernetes
2 | (cd configmaps;
3 | kubectl delete -f mysql-configmap.yaml
4 | kubectl delete -f mysql-configmap-confluence.yaml)
5 | (cd secrets;
6 | kubectl delete -f mysql-secret.yaml)
7 | (cd services;
8 | kubectl delete -f mysql-service.yaml;
9 | kubectl delete -f confluence-service.yaml)
10 | (cd ingress;
11 | kubectl delete -f confluence-ingress.yaml)
12 | (cd deployments;
13 | kubectl delete -f mysql-deployment.yaml;
14 | kubectl delete -f confluence-deployment.yaml)
15 |
16 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/deploy.sh:
--------------------------------------------------------------------------------
1 | # Build the Docker images
2 | #(cd images/jira; docker build -t hoeghh/jira:7.4.4 .) &
3 | #(cd images/mysql; docker build -t hoeghh/mysql:5.6 .) &
4 |
5 | # Wait for them to be finish
6 | #wait
7 |
8 | # Push our Jira and MySql Docker image to docker hub
9 | #docker push hoeghh/jira:7.4.4
10 | #docker push hoeghh/mysql:5.6
11 |
12 | # Deploy to Kubernetes
13 | (cd configmaps;
14 | kubectl apply -f mysql-configmap.yaml
15 | kubectl apply -f mysql-configmap-confluence.yaml)
16 | (cd secrets;
17 | kubectl apply -f mysql-secret.yaml)
18 | (cd services;
19 | kubectl apply -f mysql-service.yaml;
20 | kubectl apply -f confluence-service.yaml)
21 | (cd ingress;
22 | kubectl apply -f confluence-ingress.yaml)
23 | (cd deployments;
24 | kubectl apply -f mysql-deployment.yaml;
25 | kubectl apply -f confluence-deployment.yaml)
26 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/deployments/confluence-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: confluence
6 | name: confluence
7 | spec:
8 | replicas: 1
9 | template:
10 | metadata:
11 | labels:
12 | app: confluence
13 | name: confluence
14 | spec:
15 | terminationGracePeriodSeconds: 50
16 | containers:
17 | - name: confluence
18 | image: "hoeghh/confluence:6.1.beta2"
19 | ports:
20 | - containerPort: 8090
21 | resources:
22 | requests:
23 | cpu: "800m"
24 | memory: "1.5G"
25 | env:
26 | - name: X_PROXY_NAME
27 | value: "confluence.example.com"
28 | - name: X_PROXY_PORT
29 | value: "443"
30 | - name: X_PROXY_SCHEME
31 | value: "https"
32 | - name: X_PROXY_SECURE
33 | value: "true"
34 | - name: X_PATH
35 | value: ""
36 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/deployments/mysql-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: mysql
5 | spec:
6 | replicas: 1
7 | template:
8 | metadata:
9 | labels:
10 | app: mysql
11 | spec:
12 | terminationGracePeriodSeconds: 10
13 | containers:
14 | - name: mysql
15 | image: mysql:5.6
16 | resources:
17 | requests:
18 | cpu: "500m"
19 | memory: "1.0G"
20 | ports:
21 | - containerPort: 3306
22 | name: mysql
23 | env:
24 | - name: MYSQL_ROOT_PASSWORD
25 | valueFrom:
26 | secretKeyRef:
27 | name: mysql-secrets
28 | key: mysql-root-password
29 | - name: MYSQL_USER
30 | valueFrom:
31 | secretKeyRef:
32 | name: mysql-secrets
33 | key: mysql-user
34 | - name: MYSQL_PASSWORD
35 | valueFrom:
36 | secretKeyRef:
37 | name: mysql-secrets
38 | key: mysql-password
39 | volumeMounts:
40 | - name: mysql-confluence-db
41 | mountPath: /docker-entrypoint-initdb.d/
42 | - name: config-volume
43 | mountPath: /etc/mysql/conf.d
44 | volumes:
45 | - name: config-volume
46 | configMap:
47 | name: mysql-config
48 | - name: mysql-confluence-db
49 | configMap:
50 | name: mysql-confluence-db
51 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/images/build.sh:
--------------------------------------------------------------------------------
1 | (cd confluence
2 | docker build -t hoeghh/confluence:6.1.beta2 .
3 | docker push hoeghh/confluence:6.1.beta2)
4 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/images/confluence/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM debian:jessie
2 | MAINTAINER info@praqma.net
3 |
4 | # Update and install basic tools inc. Oracle JDK 1.8
5 | RUN echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee /etc/apt/sources.list.d/webupd8team-java.list && \
6 | echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | tee -a /etc/apt/sources.list.d/webupd8team-java.list && \
7 | apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886 && \
8 | apt-get update && \
9 | echo debconf shared/accepted-oracle-license-v1-1 select true | debconf-set-selections && \
10 | echo debconf shared/accepted-oracle-license-v1-1 seen true | debconf-set-selections && \
11 | apt-get install libapr1 libaprutil1 libtcnative-1 oracle-java8-installer oracle-java8-set-default curl vim wget unzip nmap libtcnative-1 xmlstarlet --force-yes -y && \
12 | apt-get clean
13 |
14 | # Define JAVA_HOME variable
15 | ENV JAVA_HOME /usr/lib/jvm/java-8-oracle
16 |
17 | # Add /srv/java on PATH variable
18 | ENV PATH ${PATH}:${JAVA_HOME}/bin:/srv/java
19 |
20 | # Setup useful environment variables
21 | ENV CONFLUENCE_HOME /var/atlassian/application-data/confluence
22 | ENV CONFLUENCE_INSTALL /opt/atlassian/confluence
23 | ENV CONF_VERSION 6.1.0-beta2
24 |
25 | ENV CONFLUENCE_DOWNLOAD_URL http://www.atlassian.com/software/confluence/downloads/binary/atlassian-confluence-${CONF_VERSION}.tar.gz
26 |
27 | ENV MYSQL_VERSION 5.1.38
28 | ENV MYSQL_DRIVER_DOWNLOAD_URL http://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-${MYSQL_VERSION}.tar.gz
29 |
30 | # Use the default unprivileged account. This could be considered bad practice
31 | # on systems where multiple processes end up being executed by 'daemon' but
32 | # here we only ever run one process anyway.
33 | ENV RUN_USER daemon
34 | ENV RUN_GROUP daemon
35 |
36 |
37 | # Install Atlassian Confluence and helper tools and setup initial home
38 | # directory structure.
39 | RUN set -x \
40 | && apt-get update --quiet \
41 | && apt-get install --quiet --yes --no-install-recommends libtcnative-1 xmlstarlet \
42 | && apt-get clean \
43 | && mkdir -p "${CONFLUENCE_HOME}" \
44 | && mkdir -p "${CONFLUENCE_INSTALL}/conf" \
45 | && curl -Ls "${CONFLUENCE_DOWNLOAD_URL}" | tar -xz --directory "${CONFLUENCE_INSTALL}" --strip-components=1 --no-same-owner \
46 | && curl -Ls "${MYSQL_DRIVER_DOWNLOAD_URL}" | tar -xz --directory "${CONFLUENCE_INSTALL}/confluence/WEB-INF/lib" --strip-components=1 --no-same-owner "mysql-connector-java-${MYSQL_VERSION}/mysql-connector-java-${MYSQL_VERSION}-bin.jar" \
47 | && echo "\nconfluence.home=${CONFLUENCE_HOME}" >> "${CONFLUENCE_INSTALL}/confluence/WEB-INF/classes/confluence-init.properties" \
48 | && xmlstarlet ed --inplace \
49 | --delete "Server/@debug" \
50 | --delete "Server/Service/Connector/@debug" \
51 | --delete "Server/Service/Connector/@useURIValidationHack" \
52 | --delete "Server/Service/Connector/@minProcessors" \
53 | --delete "Server/Service/Connector/@maxProcessors" \
54 | --delete "Server/Service/Engine/@debug" \
55 | --delete "Server/Service/Engine/Host/@debug" \
56 | --delete "Server/Service/Engine/Host/Context/@debug" \
57 | "${CONFLUENCE_INSTALL}/conf/server.xml" \
58 | && touch -d "@0" "${CONFLUENCE_INSTALL}/conf/server.xml"
59 |
60 | RUN chmod -R 700 "${CONFLUENCE_INSTALL}" \
61 | && chown -R ${RUN_USER}:${RUN_GROUP} "${CONFLUENCE_INSTALL}" \
62 | && chmod -R 700 "${CONFLUENCE_HOME}" \
63 | && chown -R ${RUN_USER}:${RUN_GROUP} "${CONFLUENCE_HOME}"
64 |
65 |
66 | # Use the default unprivileged account. This could be considered bad practice
67 | # on systems where multiple processes end up being executed by 'daemon' but
68 | # here we only ever run one process anyway.
69 | USER ${RUN_USER}:${RUN_GROUP}
70 |
71 | # Expose default HTTP connector port.
72 | EXPOSE 8090
73 | EXPOSE 8091
74 |
75 | # Set volume mount points for installation and home directory. Changes to the
76 | # home directory needs to be persisted as well as parts of the installation
77 | # directory due to eg. logs.
78 | VOLUME ["${CONFLUENCE_INSTALL}/logs", "${CONFLUENCE_HOME}"]
79 |
80 | # Set the default working directory as the Confluence installation directory.
81 | WORKDIR ${CONFLUENCE_INSTALL}
82 |
83 | COPY docker-entrypoint.sh /
84 | ENTRYPOINT ["/docker-entrypoint.sh"]
85 |
86 | # Run Atlassian Confluence as a foreground process by default.
87 | CMD ["./bin/catalina.sh", "run"]
88 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/images/confluence/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # check if the `server.xml` file has been changed since the creation of this
4 | # Docker image. If the file has been changed the entrypoint script will not
5 | # perform modifications to the configuration file.
6 | if [ "$(stat --format "%Y" "${CONFLUENCE_INSTALL}/conf/server.xml")" -eq "0" ]; then
7 | if [ -n "${X_PROXY_NAME}" ]; then
8 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "proxyName" --value "${X_PROXY_NAME}" "${CONFLUENCE_INSTALL}/conf/server.xml"
9 | fi
10 | if [ -n "${X_PROXY_PORT}" ]; then
11 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "proxyPort" --value "${X_PROXY_PORT}" "${CONFLUENCE_INSTALL}/conf/server.xml"
12 | fi
13 | if [ -n "${X_PROXY_SCHEME}" ]; then
14 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "scheme" --value "${X_PROXY_SCHEME}" "${CONFLUENCE_INSTALL}/conf/server.xml"
15 | fi
16 | if [ -n "${X_PROXY_SECURE}" ]; then
17 | xmlstarlet ed --inplace --pf --ps --insert '//Connector[@port="8090"]' --type "attr" --name "secure" --value "${X_PROXY_SECURE}" "${CONFLUENCE_INSTALL}/conf/server.xml"
18 | fi
19 | if [ -n "${X_PATH}" ]; then
20 | xmlstarlet ed --inplace --pf --ps --update '//Context/@path' --value "${X_PATH}" "${CONFLUENCE_INSTALL}/conf/server.xml"
21 | fi
22 | fi
23 |
24 | if [ -f "${CERTIFICATE}" ]; then
25 | keytool -noprompt -storepass changeit -keystore ${JAVA_CACERTS} -import -file ${CERTIFICATE} -alias CompanyCA
26 | fi
27 |
28 |
29 | exec "$@"
30 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/images/mysql/Dockerfile:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Praqma/LearnKubernetes/9c90b617e92a965fdfc4f6baec7a4aa961c74ae8/henrik/kubernetes-workshopexamples/confluence-mysql/images/mysql/Dockerfile
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/ingress/confluence-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: confluence
5 | spec:
6 | rules:
7 | - host: confluence.example.com
8 | http:
9 | paths:
10 | - path: /
11 | backend:
12 | serviceName: confluence
13 | servicePort: 8090
14 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/notes.txt:
--------------------------------------------------------------------------------
1 | jdbc:mysql://mysql.default.svc.cluster.local/confluence?sessionVariables=storage_engine%3DInnoDB&useUnicode=true&characterEncoding=utf8
2 | confluence
3 | confluencepassword
4 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/secrets/mysql-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: mysql-secrets
5 | type: Opaque
6 | data:
7 | mysql-root-password: cm9vdHBhc3N3b3Jk
8 | mysql-user: Y29uZmx1ZW5jZQ==
9 | mysql-password: Y29uZmx1ZW5jZXBhc3N3b3Jk
10 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/services/confluence-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: confluence
5 | spec:
6 | ports:
7 | - port: 8090
8 | protocol: TCP
9 | targetPort: 8090
10 | selector:
11 | app: confluence
12 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/confluence-mysql/services/mysql-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mysql
5 | spec:
6 | ports:
7 | - port: 3306
8 | protocol: TCP
9 | targetPort: mysql
10 | selector:
11 | app: mysql
12 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/simple-deployment/deploy.sh:
--------------------------------------------------------------------------------
1 | kubectl create -f nginx.yaml
2 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/simple-deployment/nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 1
7 | template:
8 | metadata:
9 | labels:
10 | app: nginx
11 | spec:
12 | containers:
13 | - name: nginx
14 | image: nginx:stable-alpine
15 |
16 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/simple-run/run.sh:
--------------------------------------------------------------------------------
1 | kubectl run my-nginx --image=nginx --replicas=1 --port=80
2 | kubectl expose deployment my-nginx --port=80 --type=NodePort
3 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/simple-run/scale.sh:
--------------------------------------------------------------------------------
1 | kubectl scale deployment my-nginx --replicas=2
2 |
--------------------------------------------------------------------------------
/henrik/kubernetes-workshopexamples/talk-to-apiserver/run.sh:
--------------------------------------------------------------------------------
1 | echo "run kubectl proxy &"
2 | echo "then curl -s localhost:8001/api/v1/nodes"
3 | echo "get all rest endpoints here : http://kubernetes.io/docs/api-reference/v1/operations"
4 |
--------------------------------------------------------------------------------
/henrik/nfs-pv/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes NFS share as Persistant Volume
2 | ## Here we will create a persistant volume in our Kubernetes cluster that mounts a NFS share from another server. This Persisant volume will server our pods with persistant data storrage via persistant volume claims.
3 |
4 | ### Get a NFS share up and running
5 | See the getting-nfs-running.md for help on getting a test NFS share up and running for this example to work.
6 |
7 | ### First, lets contain our project to a namespace called production
8 | ```
9 | kubectl create namespace production
10 | ```
11 |
12 | View namespaces in the cluster to see if it was created
13 | ```
14 | kubectl get namespaces --show-labels
15 | ```
16 |
17 | ### Create the Persistant Volume (pv)
18 | Change the ip to the ip of your host service the NFS share.
19 | ```
20 | apiVersion: v1
21 | kind: PersistentVolume
22 | metadata:
23 | name: nfs
24 | spec:
25 | capacity:
26 | storage: 50Mi
27 | accessModes:
28 | - ReadWriteMany
29 | nfs:
30 | server: 10.245.1.1
31 | path: /opt/nfsshare
32 | ```
33 | ```
34 | kubectl create -f yaml/nfs-pv.yaml --namespace=production
35 | ```
36 |
37 | To see information about this persistant volume, run
38 | ```
39 | kubectl describe pv nfs --namespace=production
40 | ```
41 |
42 | ### Create a Persistant Volume Claim (pvc)
43 | Now we need to claim some of the storage, so we make a persistant volume claim.
44 | ```
45 | kind: PersistentVolumeClaim
46 | apiVersion: v1
47 | metadata:
48 | name: nfs
49 | spec:
50 | accessModes:
51 | - ReadWriteMany
52 | resources:
53 | requests:
54 | storage: 1Mi
55 | ```
56 | ```
57 | kubectl create -f yaml/nfs-pvc.yaml --namespace=production
58 | ```
59 |
60 | To see the pvc (persistant volume claim) run
61 | ```
62 | kubectl get pvc --namespace=production
63 | ```
64 |
65 | ### Create a Replication Controller (rc) that uses our PV
66 | Now we will start a RC that spins up some nginx pods, service a html file from our NFS share.
67 |
68 | ```
69 | apiVersion: v1
70 | kind: ReplicationController
71 | metadata:
72 | name: nfs-web
73 | spec:
74 | replicas: 2
75 | selector:
76 | role: web-frontend
77 | template:
78 | metadata:
79 | labels:
80 | role: web-frontend
81 | spec:
82 | containers:
83 | - name: web
84 | image: nginx
85 | ports:
86 | - name: web
87 | containerPort: 80
88 | volumeMounts:
89 | - name: nfs
90 | mountPath: "/usr/share/nginx/html"
91 | volumes:
92 | - name: nfs
93 | persistentVolumeClaim:
94 | claimName: nfs
95 | ```
96 | ```
97 | kubectl create -f yaml/nginx-rc.yaml --namespace=production
98 | ```
99 |
100 | ### Testing
101 | So, to see that this works, you can create a portforward to the pods we created.
102 |
103 | First we need to find the names of our pods
104 | ```
105 | kubectl get pods --namespace=production
106 | NAME READY STATUS RESTARTS AGE
107 | nfs-web-1f8ds 1/1 Running 0 41m
108 | nfs-web-330sq 1/1 Running 0 39m
109 |
110 | ```
111 |
112 | Then run the following for each pod
113 | ```
114 | kubectl port-forward nfs-web-1f8ds 8081:80 --namespace=production &
115 | kubectl port-forward nfs-web-330sq 8082:80 --namespace=production &
116 | ```
117 |
118 | Now lets see if we can get the result of our NFS index.html file from the webservers.
119 | ```
120 | curl localhost:8081
121 | curl localhost:8082
122 | ```
123 |
124 | ### Test editing of our html file.
125 | Now edit the index.html file on your host, and run the two curl commands again. They should service the new content instantly.
126 |
--------------------------------------------------------------------------------
/henrik/nfs-pv/yaml/nfs-pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: nfs
5 | spec:
6 | capacity:
7 | storage: 50Mi
8 | accessModes:
9 | - ReadWriteMany
10 | nfs:
11 | server: 10.245.1.1
12 | path: /opt/nfsshare
13 |
--------------------------------------------------------------------------------
/henrik/nfs-pv/yaml/nfs-pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: nfs
5 | spec:
6 | accessModes:
7 | - ReadWriteMany
8 | resources:
9 | requests:
10 | storage: 1Mi
11 |
--------------------------------------------------------------------------------
/henrik/nfs-pv/yaml/nginx-rc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ReplicationController
3 | metadata:
4 | name: nfs-web
5 | spec:
6 | replicas: 2
7 | selector:
8 | role: web-frontend
9 | template:
10 | metadata:
11 | labels:
12 | role: web-frontend
13 | spec:
14 | containers:
15 | - name: web
16 | image: nginx
17 | ports:
18 | - name: web
19 | containerPort: 80
20 | volumeMounts:
21 | - name: nfs
22 | mountPath: "/usr/share/nginx/html"
23 | volumes:
24 | - name: nfs
25 | persistentVolumeClaim:
26 | claimName: nfs
27 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer-poc/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes loadbalancer using NodePort
2 | ## In this example we are creating a Deployment with two pods, and a Service that exposes these on every node on the cluster.
3 | This example uses our apiReader to get data from Kubernetes now.
4 | It will only configure services with a nodePort.
5 |
6 | > Remember to turn off SELinux if you are running apache locally
7 |
8 | If you run the following command, these will be deployed
9 | ```
10 | source start-pods.sh
11 | ```
12 |
13 | Now we need to have Apache installed. Here we have done it on the main host machine.
14 |
15 | We now run the following, to add the service to the loadbalancer.
16 |
17 | ```
18 | source setHttpdConf.sh > httpd-loadbalancer.conf
19 | sudo cp httpd-loadbalancer.conf /etc/httpd/conf.d/
20 | sudo service httpd reload
21 | ```
22 |
23 | Now go to http://localhost/balancer-manager and you can see the nodes and loadbalancer status.
24 | If you have more services/Deployments, you can run the script again, and it will add it to Apache.
25 |
26 | 
27 |
28 | Now we can see that the path to each service, is its own service name in the cluster.
29 | Eg. localhost/my-apache
30 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer-poc/images/apache-loadbalancer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Praqma/LearnKubernetes/9c90b617e92a965fdfc4f6baec7a4aa961c74ae8/henrik/nodeport-loadbalancer-poc/images/apache-loadbalancer.png
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer-poc/setHttpdConf.sh:
--------------------------------------------------------------------------------
1 | source ../apiReader/apiReader.f
2 |
3 | Services=$(getServices default | tr " " "\n")
4 | Nodes=$(getNodeNames | tr " " "\n")
5 |
6 | echo "
7 | ProxyRequests off
8 |
9 | ServerName example.org
10 | ProxyPreserveHost On"
11 |
12 | printf '%s\n' "$Services" | while IFS= read -r line
13 | do
14 | ServicePort=$(getServiceNodePorts $line "default")
15 | Endpoints=$(getServiceEndpoints $line "default")
16 |
17 | if [ ! "$ServicePort" == "null" ] && [ ! "$Endpoints" == "" ]; then
18 | echo "
19 | "
20 |
21 |
22 | printf '%s\n' "$Nodes" | while IFS= read -r line
23 | do
24 | nodeIP=$(getNodeIPs $line)
25 | echo " BalancerMember http://$nodeIP:$ServicePort"
26 | done
27 | echo "
28 | # Security technically we arent blocking
29 | # anyone but this is the place to make
30 | # those changes.
31 | #Order Allow
32 | #Require all granted
33 | # In this example all requests are allowed.
34 |
35 | # Load Balancer Settings
36 | # We will be configuring a simple Round
37 | # Robin style load balancer. This means
38 | # that all webheads take an equal share of
39 | # of the load.
40 | ProxySet lbmethod=byrequests
41 | "
42 | fi
43 | done
44 | echo " # balancer-manager
45 | # This tool is built into the mod_proxy_balancer
46 | # module and will allow you to do some simple
47 | # modifications to the balanced group via a gui
48 | # web interface.
49 |
50 | SetHandler balancer-manager
51 |
52 | # I recommend locking this one down to your
53 | # your office
54 | # Require host example.org
55 |
56 | "
57 |
58 |
59 | echo "
60 | # Point of Balance
61 | # This setting will allow to explicitly name the
62 | # the location in the site that we want to be
63 | # balanced, in this example we will balance "/"
64 | # or everything in the site.
65 | ProxyPass /balancer-manager !
66 | "
67 |
68 |
69 | printf '%s\n' "$Services" | while IFS= read -r line
70 | do
71 | ServicePort=$(getServiceNodePorts $line "default")
72 | Endpoints=$(getServiceEndpoints $line "default")
73 |
74 | if [ ! "$ServicePort" == "null" ] && [ ! "$Endpoints" == "" ]; then
75 | echo " ProxyPass /$line balancer://$line
76 | ProxyPassReverse /$line balancer://$line"
77 | fi
78 | done
79 |
80 | echo "
81 | "
82 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer-poc/start-pods.sh:
--------------------------------------------------------------------------------
1 | echo "
2 | Starting pod with NginX"
3 | kubectl run my-nginx --image=kamranazeem/centos-multitool --replicas=2 --port=80
4 |
5 | echo "
6 | Starting service to expose NginX service as NodePort"
7 | kubectl expose deployment my-nginx --port=80 --type=NodePort
8 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer/README.md:
--------------------------------------------------------------------------------
1 | **Update:** This project is now part of [praqma/k8s-cloud-loadbalancer](https://github.com/Praqma/k8s-cloud-loadbalancer). All future work will be done over there.
2 |
3 |
4 |
5 | # NodePort Loadbalancer
6 | ## Introduction
7 | This folder contains an example of how to update a Apache webserver to reflect services running in a Kubernetes Cluster by loadbalancing them. It will create a loadbalancer for each exposed service with endpoints.
8 |
9 | The file tools.f contains two functions. createLoadBalancer and createServiceLB.
10 |
11 | createServiceLB creates the lines needed for each service. It finds the ip's and port of each node and add them to a BalancerMember for that service. It write this to a file ending with .bal. At the end of each .bal file, we add a ProxyPass and ProxyPassRevers for the service as well.
12 |
13 | createLoadBalancer will create the outer VirtualHost part, and then including alle the .bal files in it, when loaded by Apache. It saves this in a file called kubernetes.services.conf.
14 |
15 | If kubernetes.services.conf and all the .bal files are copied to eg /etc/httpd/conf.d and apache is reloaded, you will have a funtionel loadbalancer.
16 |
17 | ## How to use
18 | We have created a run.sh script that shows how you can use the functions. In this example, we have an Apache running on the host machine. We call createLoadBalancer and it creates the files we need. It then copies them to /etc/httpd/conf.d/ and reloads the Apache webserver.
19 |
20 | Then, go to localhost/balancer-manager to see your loadbalancer reflect your services in your Kubernetes cluster. As mentioned earlier, only services with endpoints and a nodeport assigned, will be processed.
21 |
22 |
23 | So start by creating a deployment
24 | ```
25 | kubectl run my-nginx --image=nginx --replicas=2 --port=80
26 | ```
27 |
28 | Then expose this as a service, type=NodePort
29 | ```
30 | kubectl expose deployment my-nginx --port=80 --type=NodePort
31 | ```
32 |
33 | Now run ./run.sh to update your Apache LoadBalancer (for HAProxy, see bottom of this page)
34 | ```
35 | [hoeghh@localhost nodeport-loadbalancer]$ ./run.sh
36 | - Running createLoadBalancer
37 | - Cleaning up old files
38 | - Copying files
39 | - Restarting httpd
40 | Redirecting to /bin/systemctl reload httpd.service
41 | ```
42 |
43 | Go to http://localhost/balancer-manager to see your deployment being loadbalanced.
44 | Then curl your deployment
45 | ```
46 | [hoeghh@localhost nodeport-loadbalancer]$ curl localhost/my-nginx
47 |
48 |
49 |
50 | Welcome to nginx!
51 |
58 |
59 |
60 | Welcome to nginx!
61 | If you see this page, the nginx web server is successfully installed and
62 | working. Further configuration is required.
63 |
64 | For online documentation and support please refer to
65 | nginx.org.
66 | Commercial support is available at
67 | nginx.com.
68 |
69 | Thank you for using nginx.
70 |
71 |
72 | ```
73 |
74 | ## HAProxy
75 | If you want to output a haproxy.conf instead, then call the createLoadbalancer with the argument 'haproxy' instead. I will later split up run.sh in an Apache and HAProxy script.
76 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source tools.f
3 |
4 | echo " - Running createLoadBalancer"
5 | createLoadBalancer
6 |
7 | echo " - Cleaning up old files"
8 | rm -f /etc/httpd/conf.d/*.bl
9 |
10 | echo " - Copying files"
11 | mv -f kubernetes.services.conf /etc/httpd/conf.d/
12 | mv -f *.service.bl /etc/httpd/conf.d/
13 |
14 | echo " - Restarting httpd"
15 | sudo service httpd reload
16 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer/testhap.sh:
--------------------------------------------------------------------------------
1 | source tools.f
2 | rm -f *.bl
3 | createLoadBalancer haproxy
4 | cat haproxy.conf
5 |
--------------------------------------------------------------------------------
/henrik/nodeport-loadbalancer/tools.f:
--------------------------------------------------------------------------------
1 | source ../apiReader/apiReader.f
2 |
3 | function createLoadBalancer(){
4 | local LBType=$1
5 |
6 | # Setting LBType to apache, if not specified
7 | if [ -z "$LBType" ];then
8 | echo "Setting Loadbalancer type to Apache (default"
9 | LBType="apache"
10 | fi
11 |
12 | rm -f *.conf
13 | rm -f *.bl
14 |
15 | if [ "$LBType" = "apache" ]; then
16 | createLBApache
17 | elif [ "$LBType" = "haproxy" ]; then
18 | createLBHaproxy
19 | fi
20 | }
21 |
22 | function createLBHaproxy(){
23 | local Services=$(getServices default | tr " " "\n")
24 | local Nodes=$(getNodeNames)
25 | local nodeIP=""
26 | local line=""
27 |
28 | echo "global
29 | stats timeout 30s" > haproxy.conf
30 |
31 | echo "defaults
32 | log global
33 | mode http
34 | option httplog
35 | option dontlognull
36 | timeout connect 5000
37 | timeout client 50000
38 | timeout server 50000
39 | " >> haproxy.conf
40 |
41 | printf '%s\n' "$Services" | (while IFS= read -r line
42 | do
43 | createServiceLBHaproxy "$line" "$Nodes" &
44 | done
45 | wait
46 | )
47 |
48 | # echo "" >> haproxy.conf
49 |
50 | echo "listen stats *:1936
51 | stats enable
52 | stats uri /stats
53 | stats hide-version
54 | stats auth admin:Praqma" >> haproxy.conf
55 |
56 | $(cat *.bl >> haproxy.conf)
57 | rm -f *.bl
58 | }
59 |
60 | function createServiceLBHaproxy(){
61 | local Service=$1
62 | local Nodes=$(echo $2 | tr " " "\n")
63 | local line=""
64 | local i=0
65 |
66 | local ServicePort=$(getServiceNodePorts $Service "default")
67 | local Endpoints=$(getServiceEndpoints $Service "default")
68 |
69 | if [ ! "$ServicePort" == "null" ] && [ ! "$Endpoints" == "" ]; then
70 | echo "
71 | frontend $Service
72 | bind *:$ServicePort
73 | mode http
74 | default_backend "$Service"_BackEnds
75 | " >> $Service.service.bl
76 |
77 |
78 | echo "backend "$Service"_BackEnds
79 | mode http
80 | balance roundrobin
81 | option forwardfor
82 | http-request set-header X-Forwarded-Port %[dst_port]
83 | http-request add-header X-Forwarded-Proto https if { ssl_fc }
84 | option httpchk HEAD / HTTP/1.1\r\nHost:localhost " >> $Service.service.bl
85 |
86 | i=0
87 | printf '%s\n' "$Nodes" | while IFS= read -r line
88 | do
89 | local nodeIP=$(getNodeIPs $line)
90 | echo " server "$Service"_node"$i" $nodeIP:$ServicePort check" >> $Service.service.bl
91 | i=$((i+1))
92 | done
93 | fi
94 |
95 | }
96 |
97 | function createLBApache(){
98 | local Services=$(getServices default | tr " " "\n")
99 | local Nodes=$(getNodeNames)
100 | local nodeIP=""
101 | local line=""
102 |
103 | echo "
104 | ProxyRequests off
105 |
106 | ServerName example.org
107 | ProxyPreserveHost On
108 |
109 | IncludeOptional conf.d/*.bl" > kubernetes.services.conf
110 |
111 |
112 | printf '%s\n' "$Services" | (while IFS= read -r line
113 | do
114 | createServiceLBApache "$line" "$Nodes" &
115 | done
116 | wait
117 | )
118 |
119 | echo " # balancer-manager
120 | # This tool is built into the mod_proxy_balancer
121 | # module and will allow you to do some simple
122 | # modifications to the balanced group via a gui
123 | # web interface.
124 |
125 | SetHandler balancer-manager
126 |
127 | # I recommend locking this one down to your
128 | # your office
129 | # Require host example.org
130 |
131 |
132 |
133 | # Point of Balance
134 | # This setting will allow to explicitly name the
135 | # the location in the site that we want to be
136 | # balanced, in this example we will balance "/"
137 | # or everything in the site.
138 | ProxyPass /balancer-manager !
139 |
140 | " >> kubernetes.services.conf
141 |
142 | }
143 |
144 | function createServiceLBApache(){
145 | local Service=$1
146 | local Nodes=$(echo $2 | tr " " "\n")
147 | local line=""
148 |
149 | local ServicePort=$(getServiceNodePorts $Service "default")
150 | local Endpoints=$(getServiceEndpoints $Service "default")
151 |
152 | if [ ! "$ServicePort" == "null" ] && [ ! "$Endpoints" == "" ]; then
153 | echo " " > $Service.service.bl
154 |
155 | printf '%s\n' "$Nodes" | while IFS= read -r line
156 | do
157 | local nodeIP=$(getNodeIPs $line)
158 | echo " BalancerMember http://$nodeIP:$ServicePort" >> $Service.service.bl
159 | done
160 |
161 | echo "
162 | # Security technically we arent blocking
163 | # anyone but this is the place to make
164 | # those changes.
165 | #Order Allow
166 | #Require all granted
167 | # In this example all requests are allowed.
168 |
169 | # Load Balancer Settings
170 | # We will be configuring a simple Round
171 | # Robin style load balancer. This means
172 | # that all webheads take an equal share of
173 | # of the load.
174 | ProxySet lbmethod=byrequests
175 |
176 |
177 | ProxyPass /$Service balancer://$Service
178 | ProxyPassReverse /$Service balancer://$Service" >> $Service.service.bl
179 | fi
180 | }
181 |
--------------------------------------------------------------------------------
/henrik/traefik_kubernetes/install_traefik.sh:
--------------------------------------------------------------------------------
1 | # Prepare token and certificate
2 | mkdir -p /var/run/secrets/kubernetes.io/serviceaccount
3 | ln -s /var/lib/kubernetes/kubernetes.pem /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
4 | touch /var/run/secrets/kubernetes.io/serviceaccount/token
5 |
6 | # Run traefik
7 | cd traefik_kubernetes/
8 | ./traefik_linux-amd64 -c traefik.toml
9 |
--------------------------------------------------------------------------------
/henrik/traefik_kubernetes/tool.ingress.yml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: tool
5 | spec:
6 | rules:
7 | - host: tool.example.com
8 | http:
9 | paths:
10 | - path: /
11 | backend:
12 | serviceName: tool
13 | servicePort: 80
14 |
15 |
--------------------------------------------------------------------------------
/henrik/traefik_kubernetes/traefik.toml:
--------------------------------------------------------------------------------
1 | [web]
2 |
3 | # Web administration port
4 | address = ":888"
5 |
6 | # SSL certificate and key used
7 | # CertFile = "traefik.crt"
8 | # KeyFile = "traefik.key"
9 |
10 |
11 | # Enable more detailed statistics
12 | [web.statistics]
13 |
14 | ################################################################
15 | # Kubernetes Ingress configuration backend
16 | ################################################################
17 | [kubernetes]
18 |
19 | # Kubernetes server endpoint
20 | #
21 | endpoint = "http://controller.example.com:8080"
22 | namespaces = ["default","production"]
23 |
24 |
--------------------------------------------------------------------------------
/henrik/traefik_kubernetes/traefik_linux-amd64:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Praqma/LearnKubernetes/9c90b617e92a965fdfc4f6baec7a4aa961c74ae8/henrik/traefik_kubernetes/traefik_linux-amd64
--------------------------------------------------------------------------------
/kamran/HowToPushDockerImageToDockerHub-Public-Private.md:
--------------------------------------------------------------------------------
1 |
2 | # Docker custom image push to docker hub
3 |
4 | First, from my laptop computer. Docker -v says 1.11 .
5 |
6 |
7 | ```
8 | [kamran@registry tmp]$ docker pull nginx
9 | Using default tag: latest
10 | latest: Pulling from library/nginx
11 | 51f5c6a04d83: Pull complete
12 | a3ed95caeb02: Pull complete
13 | 51d229e136d0: Pull complete
14 | bcd41daec8cc: Pull complete
15 | Digest: sha256:0fe6413f3e30fcc5920bc8fa769280975b10b1c26721de956e1428b9e2f29d04
16 | Status: Downloaded newer image for nginx:latest
17 | [kamran@registry tmp]$
18 | ```
19 |
20 |
21 | ```
22 | [kamran@registry tmp]$ cat Dockerfile
23 | FROM nginx:latest
24 | COPY index.html /usr/share/nginx/html/
25 |
26 |
27 |
28 | [kamran@registry tmp]$ cat index.html
29 | Custom NGINX image to test push to dockerhub.
30 | [kamran@registry tmp]$
31 | ```
32 |
33 | ```
34 | [kamran@registry test]$ docker build --rm -t kamranazeem/mynginx .
35 | Sending build context to Docker daemon 3.072 kB
36 | Step 1 : FROM nginx:latest
37 | ---> 0d409d33b27e
38 | Step 2 : COPY index.html /usr/share/nginx/html/
39 | ---> fefe0a98edc7
40 | Removing intermediate container f71413920622
41 | Successfully built fefe0a98edc7
42 | [kamran@registry test]$
43 |
44 | ```
45 |
46 |
47 | ```
48 | [kamran@registry test]$ docker images | grep nginx
49 | [kamran@registry test]$ docker images | grep nginx
50 | kamranazeem/mynginx latest fefe0a98edc7 21 seconds ago 182.7 MB
51 | nginx latest 0d409d33b27e 12 days ago 182.7 MB
52 | [kamran@registry test]$
53 | ```
54 |
55 | ```
56 | [kamran@registry test]$ docker login
57 | Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
58 | Username (kamranazeem):
59 | Password: (mysupersecretpassword)
60 | Login Succeeded
61 | [kamran@registry test]$
62 | ```
63 |
64 | Push to a public repo on Dockerhub:
65 |
66 | ```
67 | [kamran@registry test]$ docker push kamranazeem/mynginx
68 | The push refers to a repository [docker.io/kamranazeem/mynginx]
69 | 96376ad6d505: Pushed
70 | 5f70bf18a086: Mounted from library/nginx
71 | bbf4634aee1a: Mounted from library/nginx
72 | 64d0c8aee4b0: Mounted from library/nginx
73 | 4dcab49015d4: Mounted from library/nginx
74 | latest: digest: sha256:0e6937b2e0f209677e142645862d1a48671ecb52d16011538f91b23216f979e8 size: 2185
75 | [kamran@registry test]$
76 | ```
77 |
78 | Now we try to push it to a private repo on dockerhub
79 |
80 | Login to docker hub (already did in the previous step).
81 |
82 | Create a private repo in docker hub (kamranazeem/private) (format: namespace/reponame)
83 |
84 | Apparently Docker treates an image as a repository. That means we can have only one image in private repo.
85 |
86 |
87 | ```
88 | [kamran@registry test]$ docker build --rm -t kamranazeem/private:custom-nginx .
89 | Sending build context to Docker daemon 3.072 kB
90 | Step 1 : FROM nginx:latest
91 | ---> 0d409d33b27e
92 | Step 2 : COPY index.html /usr/share/nginx/html/
93 | ---> Using cache
94 | ---> fefe0a98edc7
95 | Successfully built fefe0a98edc7
96 | [kamran@registry test]$
97 | ```
98 |
99 | Push to private registry:
100 | ```
101 | [kamran@registry test]$ docker push kamranazeem/private:custom-nginx
102 | The push refers to a repository [docker.io/kamranazeem/private]
103 | 96376ad6d505: Mounted from kamranazeem/mynginx
104 | 5f70bf18a086: Mounted from kamranazeem/mynginx
105 | bbf4634aee1a: Mounted from kamranazeem/mynginx
106 | 64d0c8aee4b0: Mounted from kamranazeem/mynginx
107 | 4dcab49015d4: Mounted from kamranazeem/mynginx
108 | custom-nginx: digest: sha256:0e6937b2e0f209677e142645862d1a48671ecb52d16011538f91b23216f979e8 size: 2185
109 | [kamran@registry test]$
110 | ```
111 |
112 |
113 | ----
114 |
115 | # Trying from kubernetes master
116 |
117 | Docker -v says 1.9.1
118 |
119 | ```
120 | -bash-4.3# docker login
121 | Username: kamranazeem
122 | Password:
123 | Email: kamranazeem@gmail.com
124 | WARNING: login credentials saved in /root/.docker/config.json
125 | Login Succeeded
126 | ```
127 |
128 | ```
129 | -bash-4.3# cat Dockerfile
130 | FROM busybox
131 | COPY datafile.txt /tmp/
132 |
133 | -bash-4.3# cat datafile.txt
134 | This is to test busybx.
135 | -bash-4.3#
136 | ```
137 |
138 | ```
139 | -bash-4.3# docker build --rm -t kamranazeem/private:custom-busybox .
140 | Sending build context to Docker daemon 3.072 kB
141 | Step 1 : FROM busybox
142 | ---> 0d380282e68b
143 | Step 2 : COPY datafile.txt /tmp/
144 | ---> 06dd6567e62c
145 | Removing intermediate container 3be7e1dd17e2
146 | Successfully built 06dd6567e62c
147 | -bash-4.3#
148 | ```
149 |
150 | Looks like a problem below - for no obvious reason!
151 | ```
152 | -bash-4.3# docker push kamranazeem/private:custom-busybox
153 | The push refers to a repository [docker.io/kamranazeem/private] (len: 1)
154 | 06dd6567e62c: Preparing
155 | unauthorized: authentication required
156 | -bash-4.3#
157 | ```
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
--------------------------------------------------------------------------------
/kamran/Kubernetes-coreOS-rackspace.md:
--------------------------------------------------------------------------------
1 | Reference: Container Orchestration using CoreOS and Kubernetes (https://youtu.be/tA8XNVPZM2w)
2 |
3 | Kubernetes Infrastructure:
4 | CoreOS
5 | Docker
6 | etcd
7 | flannel
8 | Kubernetes Controller
9 | Kubernetes Node
10 | Terraform
11 | Google Compute Engine
12 |
13 |
14 |
15 | Kubernetes needs a dedicated etcd instance for itself, even if there is a etcd cluster in your environment, kubernetes doesn't want to use that.
16 |
17 | Kubernetes controller = 3 components (apiserver, scheduler, replication controller)
18 | Node = Kubelet (which talks to docker) , proxy , docker
19 |
20 | Resource limits should be set so scheduler can pack as much as it can on a node.
21 |
22 | Pod is always deployed as an atomic unit on one node, and of-course it gets its own IP.
23 |
24 | If there are multiple containers in a pod, e.g, nginx, redis, mysql, etc, then you can connect to those containers on their shell, using the names defined in the replication controller's definition file.
25 |
26 | $ kubectl exec helloworld-v1-xyzabc -c nginx -- uname -a
27 |
28 |
29 | In the command above, "uname -a" is the command to be run on a container named nginx in the helloworld-v1-xyzabc pod.
30 |
31 |
32 | Antoher example, where the pod has only one container:
33 |
34 | $ kubectl exec somepod -- cat /etc/hosts
35 |
36 |
37 |
38 |
39 | Check logs of a container:
40 | $ kubectl logs -f helloworld-v1-xyzabc -c nginx
41 |
42 |
43 |
44 | You should (MUST) schedule/allocate resources using cgroups, etc. You can also introduce health endpoints. (reference Kelsey's presentation "strangloop" Sept 25-26 2015.)
45 |
46 |
47 | Any pod, when created has a very small "Infrastructure" container in it, and that is the firt container created (implicitly) in a pod. This container just sits there and does nothing. This is the container, which gets an IP from the underlying docker service and asks it to create a network for it. Docker does that dutyfully and then when the next (actual) container is created it asks docker to create it and make it part of the same network as the infrastructure container. The infrastructure container is hidden from view. It consumes no resources and it's task is just to stay alive and hold the IP which it recieved from Docker. This is the IP used by the entire pod.
48 |
49 |
50 | It is also possible to restart a single container in a pod and not disturb other containers of that pod.
51 |
52 |
53 | proxy runs on each node and manages iptables on each node.
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/kamran/Kubernetes-kubectl-cheat-sheet.md:
--------------------------------------------------------------------------------
1 | An assortment of compact kubectl examples, obtained from: [http://kubernetes.io/docs/user-guide/kubectl-cheatsheet/](http://kubernetes.io/docs/user-guide/kubectl-cheatsheet/)
2 |
3 | See also: Kubectl overview and JsonPath guide.
4 |
5 | # Creating Objects
6 | ```
7 | $ kubectl create -f ./file.yml # create resource(s) in a json or yaml file
8 |
9 | $ kubectl create -f ./file1.yml -f ./file2.yaml # create resource(s) in a json or yaml file
10 |
11 | $ kubectl create -f ./dir # create resources in all .json, .yml, and .yaml files in dir
12 |
13 | # Create from a URL
14 |
15 | $ kubectl create -f http://www.fpaste.org/279276/48569091/raw/
16 |
17 |
18 | # Create multiple YAML objects from stdin
19 |
20 | $ cat < # List a particular replication controller
71 | $ kubectl get replicationcontroller # List a particular RC
72 |
73 | # Verbose output
74 | $ kubectl describe nodes
75 | $ kubectl describe pods
76 | $ kubectl describe pods/ # Equivalent to previous
77 | $ kubectl describe pods # Lists pods created by using common prefix
78 |
79 | # List Services Sorted by Name
80 | $ kubectl get services --sort-by=.metadata.name
81 |
82 | # List pods Sorted by Restart Count
83 | $ kubectl get pods --sort-by=.status.containerStatuses[0].restartCount
84 |
85 | # Get the version label of all pods with label app=cassandra
86 | $ kubectl get pods --selector=app=cassandra rc -o 'jsonpath={.items[*].metadata.labels.version}'
87 |
88 | # Get ExternalIPs of all nodes
89 | $ kubectl get nodes -o jsonpath='{.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
90 |
91 | # List Names of Pods that belong to Particular RC
92 | # "jq" command useful for transformations that are too complex for jsonpath
93 | $ sel=$(./kubectl get rc --output=json | jq -j '.spec.selector | to_entries | .[] | "\(.key)=\(.value),"')
94 | $ sel=${sel%?} # Remove trailing comma
95 | $ pods=$(kubectl get pods --selector=$sel --output=jsonpath={.items..metadata.name})`
96 |
97 | # Check which nodes are ready
98 | $ kubectl get nodes -o jsonpath='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'| tr ';' "\n" | grep "Ready=True"
99 | ```
100 |
101 | # Modifying and Deleting Resources
102 |
103 | ```
104 | $ kubectl label pods new-label=awesome # Add a Label
105 | $ kubectl annotate pods icon-url=http://goo.gl/XXBTWq # Add an annotation
106 |
107 | # TODO: examples of kubectl edit, patch, delete, replace, scale, and rolling-update commands.
108 | ```
109 |
110 | # Interacting with running Pods
111 |
112 | ```
113 | $ kubectl logs # dump pod logs (stdout)
114 | $ kubectl logs -f # stream pod logs (stdout) until canceled (ctrl-c) or timeout
115 |
116 | $ kubectl run -i --tty busybox --image=busybox -- sh # Run pod as interactive shell
117 | $ kubectl attach -i # Attach to Running Container
118 | $ kubectl port-forward # Forward port of Pod to your local machine
119 | $ kubectl port-forward # Forward port to service
120 | $ kubectl exec -- ls / # Run command in existing pod (1 container case)
121 | $ kubectl exec -c -- ls / # Run command in existing pod (multi-container case)
122 | ```
123 |
124 |
--------------------------------------------------------------------------------
/kamran/Kubernetes-workshop-with-kelsey-aug-2016.txt:
--------------------------------------------------------------------------------
1 | # Kubernetes workshop with Kelsey
2 |
3 | https://github.com/kelseyhightower/craft-kubernetes-workshop
4 |
5 | ```
6 | kamranazeem@kubernetes-with-kelsey:~$ gcloud container clusters create k0
7 | Creating cluster k0...done.
8 | Created [https://container.googleapis.com/v1/projects/kubernetes-with-kelsey/zones/europe-west1-d/clusters/k0].
9 | kubeconfig entry generated for k0.
10 | NAME ZONE MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS
11 | k0 europe-west1-d 1.3.5 104.155.6.65 n1-standard-1 1.3.5 3 RUNNING
12 | kamranazeem@kubernetes-with-kelsey:~$
13 | ```
14 |
15 |
16 | kubectl get pods --v=9
17 |
18 | shows all api calls in curl format.
19 |
20 |
21 |
22 | readiness and liveliness (liveness) probes should be part of all pod definitions.
23 |
24 |
25 |
26 | kamranazeem@kubernetes-with-kelsey:~/craft-kubernetes-workshop/kubernetes$ kubectl create -f services/monolith.yaml
27 | You have exposed your service on an external port on all nodes in your
28 | cluster. If you want to expose this service to the external internet, you may
29 | need to set up firewall rules for the service port(s) (tcp:31000) to serve traffic.
30 | See http://releases.k8s.io/release-1.3/docs/user-guide/services-firewalls.md for more details.
31 | service "monolith" created
32 | kamranazeem@kubernetes-with-kelsey:~/craft-kubernetes-workshop/kubernetes$
33 |
34 |
35 |
36 |
37 | kubectl get endpoints give the endpoints of a service. Super helpful in my load balancer.
38 |
39 | we can disable kubeproxy !!!! And get rid of all the iptables stuff.
40 |
41 |
42 | kubectl describe limitranges limits is a good place to set defaults for resource usage.
43 |
44 |
45 |
46 | git kelseyhightower/kube-cert-manager is a way to automatically generate the certs and push it to apps, using letsencrypt.
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/.gitignore:
--------------------------------------------------------------------------------
1 | *.tar*
2 | *.tgz
3 | *.zip
4 |
5 | # Ignore kubernetes binaries
6 | controllers/kube*
7 | workers/kube*
8 |
9 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/README.md:
--------------------------------------------------------------------------------
1 | # Important
2 | * The nodes need to have SSH key pre-installed. This is done by the provisioning system.
3 | * The SSL certificates need to be generated beforehand, so they could be copied into each node, before the node configuration can start.
4 | * The hosts file is an intelligent file here. It contains the columns which contain the amount of RAM and disk needed for each node.
5 |
6 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/.gitignore:
--------------------------------------------------------------------------------
1 | cfssl*
2 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/README.md:
--------------------------------------------------------------------------------
1 | **Note:**
2 | The certs in this directory are generated for example.com and for the nodes using private IP scheme. Thus they are harmless and are OK to be part of this repo.
3 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/SSL-TLS-Certificates.md:
--------------------------------------------------------------------------------
1 | # Generating certificates for all kubernetes nodes:
2 |
3 | External IP (EC2)) = 52.220.203.49
4 |
5 | ```
6 | export KUBERNETES_PUBLIC_IP_ADDRESS=52.220.203.49
7 | ```
8 |
9 |
10 | ```
11 | wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
12 | chmod +x cfssl_linux-amd64
13 | sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl
14 |
15 | wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
16 | chmod +x cfssljson_linux-amd64
17 | sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
18 | ```
19 |
20 |
21 |
22 | ```
23 | echo '{
24 | "signing": {
25 | "default": {
26 | "expiry": "8760h"
27 | },
28 | "profiles": {
29 | "kubernetes": {
30 | "usages": ["signing", "key encipherment", "server auth", "client auth"],
31 | "expiry": "8760h"
32 | }
33 | }
34 | }
35 | }' > ca-config.json
36 | ```
37 |
38 |
39 | ```
40 | echo '{
41 | "CN": "Kubernetes",
42 | "key": {
43 | "algo": "rsa",
44 | "size": 2048
45 | },
46 | "names": [
47 | {
48 | "C": "NO",
49 | "L": "Oslo",
50 | "O": "Kubernetes",
51 | "OU": "CA",
52 | "ST": "Oslo"
53 | }
54 | ]
55 | }' > ca-csr.json
56 | ```
57 |
58 |
59 | ```
60 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca
61 | ```
62 |
63 |
64 | ```
65 | openssl x509 -in ca.pem -text -noout
66 | ```
67 |
68 |
69 |
70 | ```
71 | cat > kubernetes-csr.json < ca-csr.json
48 |
49 |
50 |
51 | sed -e s/DOMAINNAME/${DOMAINNAME}/g \
52 | kubernetes-csr.json.header.template > kubernetes-csr.json.header
53 |
54 |
55 | sed -e s/CITY/${CITY}/g \
56 | -e s/STATE/${STATE}/g \
57 | -e s/COUNTRY/${COUNTRY}/g \
58 | kubernetes-csr.json.footer.template > kubernetes-csr.json.footer
59 |
60 |
61 |
62 |
63 | # Filter IPs and add to template.body
64 | # Notice a single output redirector '>' in the command below:
65 | egrep -v "\#|^127" ${HOSTSFILE} | grep ${DOMAINNAME} | awk '{print " \"" $1 "\"," }' > kubernetes-csr.body
66 |
67 |
68 | # Filter out hostnames in FQDN form and add to templete.body
69 | # Notice two output redirectors '>>' in the line below.
70 | egrep -v "\#|^127" ${HOSTSFILE} | grep ${DOMAINNAME} | awk '{print " \"" $2 "\"," }' >> kubernetes-csr.body
71 |
72 | if [ -z "${EXTERNAL_IP}" ] ; then
73 | echo "External IP was not defined. Skipping ..."
74 | else
75 | echo " \"${EXTERNAL_IP}\"," >> kubernetes-csr.body
76 | fi
77 |
78 |
79 | # Create the kubernetes-csr.json by combining three files
80 | cat kubernetes-csr.json.header kubernetes-csr.body kubernetes-csr.json.footer > kubernetes-csr.json
81 |
82 |
83 | ###############
84 | echo
85 | echo "Downloading necessary software for generating certificates ..."
86 | echo
87 |
88 | curl -# -O https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
89 | curl -# -O https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
90 |
91 | cp cfssl_linux-amd64 cfssl
92 | cp cfssljson_linux-amd64 cfssljson
93 |
94 | chmod +x cfssl*
95 |
96 | # cp cfssl* /usr/local/bin/cfssljson
97 |
98 |
99 |
100 | ################
101 | echo
102 | echo "Generate certificates now ..."
103 | echo
104 |
105 | ./cfssl gencert -initca ca-csr.json | ./cfssljson -bare ca
106 |
107 | ./cfssl gencert \
108 | -ca=ca.pem \
109 | -ca-key=ca-key.pem \
110 | -config=ca-config.json \
111 | -profile=kubernetes \
112 | kubernetes-csr.json | ./cfssljson -bare kubernetes
113 |
114 |
115 | ################
116 |
117 | echo
118 | echo "Verify certificate ..."
119 | echo
120 |
121 | openssl x509 -in kubernetes.pem -text -noout
122 |
123 |
124 | echo
125 | echo "Done!"
126 | echo
127 |
128 | echo
129 | echo "New certs generated. You can copy the following files to target computers."
130 | echo
131 |
132 | ls -1 *.pem
133 |
134 | echo
135 |
136 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.body:
--------------------------------------------------------------------------------
1 | "10.240.0.11",
2 | "10.240.0.12",
3 | "10.240.0.13",
4 | "10.240.0.21",
5 | "10.240.0.22",
6 | "10.240.0.20",
7 | "10.240.0.31",
8 | "10.240.0.32",
9 | "10.240.0.41",
10 | "10.240.0.42",
11 | "10.240.0.40",
12 | "etcd1.example.com",
13 | "etcd2.example.com",
14 | "etcd3.example.com",
15 | "controller1.example.com",
16 | "controller2.example.com",
17 | "controller.example.com",
18 | "worker1.example.com",
19 | "worker2.example.com",
20 | "lb1.example.com",
21 | "lb2.example.com",
22 | "lb.example.com",
23 | "123.45.67.89",
24 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "*.example.com",
3 | "hosts": [
4 | "10.32.0.1",
5 | "kubernetes.example.com",
6 | "10.240.0.11",
7 | "10.240.0.12",
8 | "10.240.0.13",
9 | "10.240.0.21",
10 | "10.240.0.22",
11 | "10.240.0.20",
12 | "10.240.0.31",
13 | "10.240.0.32",
14 | "10.240.0.41",
15 | "10.240.0.42",
16 | "10.240.0.40",
17 | "etcd1.example.com",
18 | "etcd2.example.com",
19 | "etcd3.example.com",
20 | "controller1.example.com",
21 | "controller2.example.com",
22 | "controller.example.com",
23 | "worker1.example.com",
24 | "worker2.example.com",
25 | "lb1.example.com",
26 | "lb2.example.com",
27 | "lb.example.com",
28 | "123.45.67.89",
29 | "localhost",
30 | "127.0.0.1"
31 | ],
32 | "key": {
33 | "algo": "rsa",
34 | "size": 2048
35 | },
36 | "names": [
37 | {
38 | "C": "NO",
39 | "L": "Oslo",
40 | "O": "Kubernetes",
41 | "OU": "Cluster",
42 | "ST": "Oslo"
43 | }
44 | ]
45 | }
46 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.json.footer:
--------------------------------------------------------------------------------
1 | "localhost",
2 | "127.0.0.1"
3 | ],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "NO",
11 | "L": "Oslo",
12 | "O": "Kubernetes",
13 | "OU": "Cluster",
14 | "ST": "Oslo"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.json.footer.template:
--------------------------------------------------------------------------------
1 | "localhost",
2 | "127.0.0.1"
3 | ],
4 | "key": {
5 | "algo": "rsa",
6 | "size": 2048
7 | },
8 | "names": [
9 | {
10 | "C": "COUNTRY",
11 | "L": "CITY",
12 | "O": "Kubernetes",
13 | "OU": "Cluster",
14 | "ST": "STATE"
15 | }
16 | ]
17 | }
18 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.json.header:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "*.example.com",
3 | "hosts": [
4 | "10.32.0.1",
5 | "kubernetes.example.com",
6 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.json.header.template:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "*.DOMAINNAME",
3 | "hosts": [
4 | "10.32.0.1",
5 | "kubernetes",
6 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-csr.json.template:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "*.DOMAINNAME",
3 | "hosts": [
4 | "10.32.0.1",
5 | "kubernetes.DOMAINNAME",
6 |
7 | "localhost",
8 | "127.0.0.1"
9 | ],
10 | "key": {
11 | "algo": "rsa",
12 | "size": 2048
13 | },
14 | "names": [
15 | {
16 | "C": "COUNTRY",
17 | "L": "CITY",
18 | "O": "Kubernetes",
19 | "OU": "Cluster",
20 | "ST": "STATE"
21 | }
22 | ]
23 | }
24 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes-key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEowIBAAKCAQEAxqJsR3l/11c5RICC/CfT9ZjN2Vu6q4nY0G5KBDIgbIxdXcu3
3 | f0Gw1MlyJ5gQHqeRXoIGrz6P1UsHVElWfecwEDcs0pHs1T5l4EzlpKg1QoxQb8/f
4 | tKiz3vMsDg/o80GhsBe/olclq+uUqZNjkgzYAbzImMZPrYABhtN1FTlpgQLefssR
5 | etvIHIG5ERZDKh7Se0CZ/QrTLwUzoMLPY8VU+uOosrawBvWwEuNmElSF1EjaGFle
6 | 45xxcPaUWdKb3/5/u+fs+Vn9FMDrOxN4+4iUSFdqofRiwJ5hITpM8ZuHMbKfJYgB
7 | vpnQHuWOch8ygP4Vhbc0cGSXOYqTEPpbOZFl6wIDAQABAoIBACdl+gInSEMnKtW6
8 | g8COCc2x9EqDlkAGBHo7FfVFGAdPxAkhqhDUr8VU+458692YsQ3Ezpyd1RuiWR7P
9 | uRcZ7v+YYgyWIxqc2MonlnkVYBMGD8JNwfpsZ9j19KwJXdQ6FV7/JuSFUpSKEp+b
10 | +2jxoTqe/k7HLvCj7imW8ac3DwUmehPCwUZmMr9VkQd4ye/yVH0G9mex3XA3TGNL
11 | pT04wquXApgHdwc5eCQH5ASAP7EfCnJQdX89WhOiDDEmv1s+LaqCLK++606e8C8V
12 | a44Vj+shw2xbWh86XCEv11ROHHDUy9vMoHAcCP4i8SfH0GWXzV4EQfHYB68WP6PH
13 | KoDlO7kCgYEA342rsBlcm6xDoEOP28+a+6gluEgiF9FqlODCq7dNzqAdSIzeHovW
14 | hHyEXr+Gd5nMpl1J0GH3/30l2QkFOX6akH1NFs3OXlGyf+NmPZeAIWLPEQXUfDU/
15 | duBUeWw6MlRugtcbchvcU1LCHYXjtZMZ9n3qtCE5es2QQ36R8E644k0CgYEA43bd
16 | 27xLsOiGuHW7yR5xjgJwqNFml63IYG1TQaZ63/lqOXzzWcr9d4lfZt7DfvLYJC9u
17 | 9Q7ep2HNi3SqehKBjkxLkKwTNgoMN6pb8WkruGv9FYakoKO+Am0/st2jpUZ8SJD6
18 | yQJOE9h83XDzHBYzT5RNTLCTFYHHuQgkMVTX1RcCgYBheAVpTEWCYkNB9vMcJZIv
19 | MwrvJBQpSlcr/isib1NvSYQ7A6A8tT+cmCWmod7g+p+t0XLDdlOIg2ojDLnIbZi6
20 | csdPr//QQ9KNy/nh1NI6vDgH9gDE/vpivEBYiIV/OCzOwzbmM5uGgMi90oiLvE1P
21 | mWrhirLjBglmRCMhIVGe6QKBgQCwXTO78g1GB6SdVTvavq4RC7dKTcIcDPR2P430
22 | Mx7Gll5UYJtWndtVWcAMxOATiDsqI1UG0Ra19VEntbeQmtS0/lcAv0rN9ZBvgLl1
23 | PrRVc/VxQUpX+12SCB8BknQV68blhPPN8umS3aJDGBaHSndSVoxwz9/bJxsyTiux
24 | zDTk1QKBgE4wwzOiE4MZVx9Ub+8zSqxGgnctrOBq26qE6ksEu4RRLs4tMHLIPmJ/
25 | Xi6ZrftCpiv+rp+d1GR3LD9XD1chBIapT0QtwSTCIp2UWQ0+EBIGqT9v7w7enPmp
26 | vIdGyt4R0sdHQJEgfMmF7u69KZeTrwlJ3mpt860HrPs6IWI5eMu8
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes.csr:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE REQUEST-----
2 | MIIELzCCAxcCAQAwajELMAkGA1UEBhMCTk8xDTALBgNVBAgTBE9zbG8xDTALBgNV
3 | BAcTBE9zbG8xEzARBgNVBAoTCkt1YmVybmV0ZXMxEDAOBgNVBAsTB0NsdXN0ZXIx
4 | FjAUBgNVBAMTDSouZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
5 | ggEKAoIBAQDGomxHeX/XVzlEgIL8J9P1mM3ZW7qridjQbkoEMiBsjF1dy7d/QbDU
6 | yXInmBAep5FeggavPo/VSwdUSVZ95zAQNyzSkezVPmXgTOWkqDVCjFBvz9+0qLPe
7 | 8ywOD+jzQaGwF7+iVyWr65Spk2OSDNgBvMiYxk+tgAGG03UVOWmBAt5+yxF628gc
8 | gbkRFkMqHtJ7QJn9CtMvBTOgws9jxVT646iytrAG9bAS42YSVIXUSNoYWV7jnHFw
9 | 9pRZ0pvf/n+75+z5Wf0UwOs7E3j7iJRIV2qh9GLAnmEhOkzxm4cxsp8liAG+mdAe
10 | 5Y5yHzKA/hWFtzRwZJc5ipMQ+ls5kWXrAgMBAAGgggF+MIIBegYJKoZIhvcNAQkO
11 | MYIBazCCAWcwggFjBgNVHREEggFaMIIBVoIWa3ViZXJuZXRlcy5leGFtcGxlLmNv
12 | bYIRZXRjZDEuZXhhbXBsZS5jb22CEWV0Y2QyLmV4YW1wbGUuY29tghFldGNkMy5l
13 | eGFtcGxlLmNvbYIXY29udHJvbGxlcjEuZXhhbXBsZS5jb22CF2NvbnRyb2xsZXIy
14 | LmV4YW1wbGUuY29tghZjb250cm9sbGVyLmV4YW1wbGUuY29tghN3b3JrZXIxLmV4
15 | YW1wbGUuY29tghN3b3JrZXIyLmV4YW1wbGUuY29tgg9sYjEuZXhhbXBsZS5jb22C
16 | D2xiMi5leGFtcGxlLmNvbYIObGIuZXhhbXBsZS5jb22CCWxvY2FsaG9zdIcECiAA
17 | AYcECvAAC4cECvAADIcECvAADYcECvAAFYcECvAAFocECvAAFIcECvAAH4cECvAA
18 | IIcECvAAKYcECvAAKocECvAAKIcEey1DWYcEfwAAATANBgkqhkiG9w0BAQsFAAOC
19 | AQEAChR01lRL2FyTchkNhKVRhKNHKRcKngJn2V3pd/kphyOiHIg+4C2GGQjpmLND
20 | DZu6C6jnmmeeBL+/TwVpkMObavg3AK1RpBkTB3kEEz55BuYi9qe1y6lgjfSSca+Z
21 | jTzZu/muD0w4UhdGYTZdXSamvAufBGhh0dIg0QR7woFofvAYT9+MyLBESkU9me6l
22 | 0iHhxqbkSwLSRjZkin2vOywklt8JxxhZnp3gxQMJVm1zaU+t5pPFnGpBU2aen740
23 | fl7ERTahMYi2/Rj2aztSfeuaMq0f3aedktUlo3jvclbCyzl53VfkU+BbmnfU4H3E
24 | EFDoyL1QNWKowTuHFvPVTo4n4A==
25 | -----END CERTIFICATE REQUEST-----
26 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/certs/kubernetes.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFRDCCBCygAwIBAgIUJ2t2Ck+JGkfXNt7iCmy2EnOfgUgwDQYJKoZIhvcNAQEL
3 | BQAwYjELMAkGA1UEBhMCTk8xDTALBgNVBAgTBE9zbG8xDTALBgNVBAcTBE9zbG8x
4 | EzARBgNVBAoTCkt1YmVybmV0ZXMxCzAJBgNVBAsTAkNBMRMwEQYDVQQDEwpLdWJl
5 | cm5ldGVzMB4XDTE2MTEyNTEwNDUwMFoXDTE3MTEyNTEwNDUwMFowajELMAkGA1UE
6 | BhMCTk8xDTALBgNVBAgTBE9zbG8xDTALBgNVBAcTBE9zbG8xEzARBgNVBAoTCkt1
7 | YmVybmV0ZXMxEDAOBgNVBAsTB0NsdXN0ZXIxFjAUBgNVBAMTDSouZXhhbXBsZS5j
8 | b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGomxHeX/XVzlEgIL8
9 | J9P1mM3ZW7qridjQbkoEMiBsjF1dy7d/QbDUyXInmBAep5FeggavPo/VSwdUSVZ9
10 | 5zAQNyzSkezVPmXgTOWkqDVCjFBvz9+0qLPe8ywOD+jzQaGwF7+iVyWr65Spk2OS
11 | DNgBvMiYxk+tgAGG03UVOWmBAt5+yxF628gcgbkRFkMqHtJ7QJn9CtMvBTOgws9j
12 | xVT646iytrAG9bAS42YSVIXUSNoYWV7jnHFw9pRZ0pvf/n+75+z5Wf0UwOs7E3j7
13 | iJRIV2qh9GLAnmEhOkzxm4cxsp8liAG+mdAe5Y5yHzKA/hWFtzRwZJc5ipMQ+ls5
14 | kWXrAgMBAAGjggHoMIIB5DAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB
15 | BQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFJTyMO6l/JIX
16 | /U6LvIpl9z3T/2fyMB8GA1UdIwQYMBaAFJD+96CQaoHJfj9ydt+lE2RP/PxmMIIB
17 | YwYDVR0RBIIBWjCCAVaCFmt1YmVybmV0ZXMuZXhhbXBsZS5jb22CEWV0Y2QxLmV4
18 | YW1wbGUuY29tghFldGNkMi5leGFtcGxlLmNvbYIRZXRjZDMuZXhhbXBsZS5jb22C
19 | F2NvbnRyb2xsZXIxLmV4YW1wbGUuY29tghdjb250cm9sbGVyMi5leGFtcGxlLmNv
20 | bYIWY29udHJvbGxlci5leGFtcGxlLmNvbYITd29ya2VyMS5leGFtcGxlLmNvbYIT
21 | d29ya2VyMi5leGFtcGxlLmNvbYIPbGIxLmV4YW1wbGUuY29tgg9sYjIuZXhhbXBs
22 | ZS5jb22CDmxiLmV4YW1wbGUuY29tgglsb2NhbGhvc3SHBAogAAGHBArwAAuHBArw
23 | AAyHBArwAA2HBArwABWHBArwABaHBArwABSHBArwAB+HBArwACCHBArwACmHBArw
24 | ACqHBArwACiHBHstQ1mHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAJIlGpuI9/MM
25 | M39lYYPnNuKpMqGVrim+J3rd8TkIWAyLYlNJxN+N28DiU42AfsnPGCmTCCQDzE1V
26 | 0TQA6prMijR8ZeYeT6apQUSAzmHr3m255sQy4LQKWpWhjQL77m4bh7ExwUyoiqaD
27 | +fbNN3EaMFe32E6ODYq/q2za3cU4Az91H8W4jSUCK5+5CxZUdsJJPH0nyqyuoj06
28 | TX2Nhwj32ArOw2Q68x+WE75NoYcS83R6Uf5cY4V7WpBJCWa4V7ShKlo1S2xoJGQj
29 | RrRygTzoLb9LAbYmJTSbm1Y4VYfOd/tS9765X0cbIAkQ4DMAcCXTJpfgq1hnjxDi
30 | zRwd1hMXD6I=
31 | -----END CERTIFICATE-----
32 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/cluster.conf:
--------------------------------------------------------------------------------
1 | # This file contains directives, which are used by the various cluster utilities in this directory.
2 |
3 | # Provisioning specific variables:
4 | ##################################
5 |
6 | # Obtain the network name (and other info) by using `virsh net-list` , and `virsh net-dumpxml `
7 | # For CLI , look here: https://kashyapc.fedorapeople.org/virt/create-a-new-libvirt-bridge.txt
8 | # You can use virt-manager to create a new NAT network with ease (GUI).
9 | # Using a NAT network is better for testing, because you can use your host computer to access the nodes, and setup routes, etc.
10 |
11 |
12 | # You can leave LIBVIRT_HOST as blank. In that case, local daemon would be used.
13 | # connection hint:
14 | # The value is the FQDN or the IP address of the remote host, which runs SSH and libvirt.
15 | # The target remote system must have your SSH public key stored in it's root user's authorized_keys.
16 |
17 | # LIBVIRT_HOST=wbitthome.sytes.net
18 |
19 | # This remote user is normaly root. If you want to use other user, you need to configure that in the target server.
20 | # LIBVIRT_REMOTE_USER=root
21 |
22 | # The name of the virtual network which you created, to be used with this cluster.
23 | LIBVIRT_NETWORK_NAME=Kubernetes
24 |
25 |
26 | # The data store where VM disks will be stored. If you set the value for VM_DISK_DIRECTORY, then the path needs to exist.
27 | # If you leave it blank / undefined, then the system will use the defult location to store VM disks.
28 | VM_DISK_DIRECTORY=/home/virtualmachines/
29 |
30 | # The HTTP_BASE_URL can be any host and any port. You just need to specify it here.
31 | # It MUST NOT BE localhost. If you are running a web server on your technician pc,
32 | # then provide the IP address of the bridge specified as LIBVIRTNETWORK_NAME , such as 10.240.0.1
33 | HTTP_BASE_URL="http://10.240.0.1"
34 |
35 | # Exact path (full path) to the ISO file you are using to provision the nodes with.
36 | # This should be Fedora 23 or higher (or RHEL/CENTOS 7 or higher), to be in accordance with the kickstart file in kickstart directory.
37 | ISO_PATH=/home/cdimages/Fedora-Server-dvd-x86_64-24-1.2.iso
38 |
39 |
40 | # INSTALL_TIME_RAM needs to be an integer, with a minimum value of 1280.
41 | # If you want parellel VM creation and have 16 GB RAM in your system, then use 1664 as the value.
42 | # Total 9 VMs will use a total of 14976 MB of RAM on your host computer, leaving some RAM for the host.
43 | # If you do not have 16 GB, but do have 8GB, then you will need to run the provisioner in a sequence.
44 | # In that case, increasing the RAM to 2 GB (2048 MB) for each VM, is a good idea.
45 | # In this mode, a VM will acquire 2 GB RAM during install time and will terminate freeing the memory for the next VM.
46 |
47 | # INSTALL_TIME_RAM=1664
48 | INSTALL_TIME_RAM=1280
49 |
50 | # PARALLEL is a directive which enables provisioning of VMs in parallel instead of in a sequence.
51 | # Note: This will use a lot of CPU and RAM on the host, and will make the host perform relatively sluggish.
52 | PARALLEL=1
53 |
54 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/controllers/.gitignore:
--------------------------------------------------------------------------------
1 | # The following are large binaries and need not be part of repo
2 | kube-apiserver
3 | kube-controller-manager
4 | kube-scheduler
5 | kubectl
6 |
7 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/controllers/README.md:
--------------------------------------------------------------------------------
1 | # Setup Controllers with k8s and HA
2 |
3 | The following scripts are required to be present in this directory. The main file, which needs to be run is `controllers.sh`:
4 | ```
5 | configure-controllers-HA.sh
6 | configure-controllers-k8s.sh
7 | controllers.sh
8 | ```
9 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/controllers/configure-controllers-HA.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Configure-HA-controllers.sh
3 |
4 | # Set the password you want for the hacluster user. This will only be used by pcsd service to sync cluster.
5 | PASSWORD="redhat"
6 | VIP="10.240.0.20"
7 |
8 | # Need to have packages installed and pcs service running on all nodes, before you move forward , or before you use this script.
9 |
10 | echo
11 | echo "------------------- Setting up HA on controller node ..."
12 | echo
13 | # Setting password for user hacluster ...
14 | echo "hacluster:${PASSWORD}" | chpasswd
15 |
16 |
17 | echo "Authenticate user hacluster to the cluster nodes ..."
18 | pcs cluster auth -u hacluster -p ${PASSWORD} controller1.example.com controller2.example.com
19 |
20 |
21 | echo "Checking PCS cluster status on node ..."
22 | pcs status pcsd
23 |
24 | # Execute the following code on node1 only
25 |
26 | if [ "$(hostname -s)" == "controller1" ]; then
27 |
28 | echo "Executing pcs cluster setup commands on node1 only ..."
29 | sleep 5
30 |
31 | echo "Creating CoroSync communication cluster/service ..."
32 | pcs cluster setup --name ControllerHA controller1.example.com controller2.example.com --force
33 |
34 | echo "Starting cluster on all cluster nodes ... This may take few seconds ..."
35 | pcs cluster start --all
36 | sleep 10
37 |
38 | # this enables the corosync and pacemaker services to start at boot time.
39 | pcs cluster enable --all
40 | sleep 1
41 |
42 | pcs property set stonith-enabled=false
43 | sleep 5
44 |
45 | pcs status nodes
46 | sleep 1
47 |
48 | pcs status resources
49 | sleep 1
50 |
51 | pcs status corosync
52 |
53 | echo "Setting up cluster resource VIP as ${VIP} ..."
54 | pcs resource create ControllerVIP ocf:heartbeat:IPaddr2 ip=${VIP} cidr_netmask=32 op monitor interval=30s
55 |
56 | # Allow cluster some time to decide where would it run the VIP resource
57 | sleep 5
58 |
59 | fi
60 |
61 | echo "Following code can run on all nodes ..."
62 | echo "Check corosync ring status ..."
63 | corosync-cfgtool -s
64 |
65 |
66 | echo "Show status of corosync and pacemaker on all nodes ..."
67 | systemctl status corosync pacemaker
68 |
69 |
70 | echo "Showing final pcs status ..."
71 | pcs status
72 |
73 | echo "Showing ip address ..."
74 | ip addr
75 |
76 |
77 | ##############################################
78 | # Re-use this script for Load balancer
79 | # Configure HAProxy , (have a synced config haproxy), grouping of VIP with HAProxy, order VIP first, HAProxy second .
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/controllers/configure-controllers-k8s.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Creating /var/lib/kubernetes and moving in certificates ..."
4 |
5 | if [ -f kubernetes.pem ] && [ -f kubernetes-key.pem ] && [ -f ca.pem ] ; then
6 | # Assuming certs are present in /root/
7 | mkdir -p /var/lib/kubernetes
8 | mv -v ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
9 | else
10 | echo "Certificates missing in /root/. Exiting ..."
11 | exit 9
12 | fi
13 |
14 |
15 | ############
16 | #
17 | # This section is already taken care of in master controllers.sh script. Files are downloaded on technician computer,
18 | # , and then copied to nodes directly in /usr/bin/ .
19 | #
20 | # echo "Downloading Kubernetes software components ..."
21 |
22 | # curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-apiserver
23 | # curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-controller-manager
24 | # curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-scheduler
25 | # curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubectl
26 |
27 | # chmod +x /root/kube*
28 |
29 | # echo "Installing Kubernetes software components into /usr/bin/"
30 | # mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/bin/
31 | #
32 | ############
33 |
34 | echo "Downloading token.csv and authorization-policy.json ... into /var/lib/kubernetes/"
35 | curl -# -O https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/token.csv
36 |
37 | cat token.csv
38 |
39 | mv token.csv /var/lib/kubernetes/
40 |
41 | curl -# -O https://raw.githubusercontent.com/kelseyhightower/kubernetes-the-hard-way/master/authorization-policy.jsonl
42 | mv authorization-policy.jsonl /var/lib/kubernetes/
43 |
44 |
45 |
46 | # Find and set the INTERNAL_IP of node.
47 | # The < sign is important in the line below.
48 | NET_IFACE=$(ip addr | grep -w ' kube-apiserver.service <<"APIEOF"
59 | [Unit]
60 | Description=Kubernetes API Server
61 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
62 |
63 | [Service]
64 | ExecStart=/usr/bin/kube-apiserver \
65 | --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \
66 | --advertise-address=INTERNAL_IP \
67 | --allow-privileged=true \
68 | --apiserver-count=2 \
69 | --authorization-mode=ABAC \
70 | --authorization-policy-file=/var/lib/kubernetes/authorization-policy.jsonl \
71 | --bind-address=0.0.0.0 \
72 | --enable-swagger-ui=true \
73 | --etcd-cafile=/var/lib/kubernetes/ca.pem \
74 | --insecure-bind-address=0.0.0.0 \
75 | --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \
76 | --etcd-servers=https://ETCD1:2379,https://ETCD2:2379,https://ETCD3:2379 \
77 | --service-account-key-file=/var/lib/kubernetes/kubernetes-key.pem \
78 | --service-cluster-ip-range=10.32.0.0/24 \
79 | --service-node-port-range=30000-32767 \
80 | --tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
81 | --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
82 | --token-auth-file=/var/lib/kubernetes/token.csv \
83 | --v=2
84 | Restart=on-failure
85 | RestartSec=5
86 |
87 | [Install]
88 | WantedBy=multi-user.target
89 | APIEOF
90 |
91 | ETCD1=$(grep -v \# /etc/hosts | grep etcd1 | awk '{print $2}')
92 | ETCD2=$(grep -v \# /etc/hosts | grep etcd2 | awk '{print $2}')
93 | ETCD3=$(grep -v \# /etc/hosts | grep etcd3 | awk '{print $2}')
94 |
95 | sed -i -e s/INTERNAL_IP/$INTERNAL_IP/g \
96 | -e s/ETCD1/$ETCD1/g \
97 | -e s/ETCD2/$ETCD2/g \
98 | -e s/ETCD3/$ETCD3/g \
99 | kube-apiserver.service
100 |
101 |
102 | cp kube-apiserver.service /etc/systemd/system/
103 |
104 |
105 | echo "Creating controller manager service file ..."
106 |
107 | cat > kube-controller-manager.service <<"MANAGEREOF"
108 | [Unit]
109 | Description=Kubernetes Controller Manager
110 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
111 |
112 | [Service]
113 | ExecStart=/usr/bin/kube-controller-manager \
114 | --allocate-node-cidrs=true \
115 | --cluster-cidr=10.200.0.0/16 \
116 | --cluster-name=kubernetes \
117 | --leader-elect=true \
118 | --master=http://INTERNAL_IP:8080 \
119 | --root-ca-file=/var/lib/kubernetes/ca.pem \
120 | --service-account-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
121 | --service-cluster-ip-range=10.32.0.0/24 \
122 | --v=2
123 | Restart=on-failure
124 | RestartSec=5
125 |
126 | [Install]
127 | WantedBy=multi-user.target
128 | MANAGEREOF
129 |
130 |
131 | sed -i s/INTERNAL_IP/$INTERNAL_IP/g kube-controller-manager.service
132 | cp kube-controller-manager.service /etc/systemd/system/
133 |
134 |
135 | echo "Creating scheduler file ..."
136 |
137 | cat > kube-scheduler.service <<"SCHEDULEREOF"
138 | [Unit]
139 | Description=Kubernetes Scheduler
140 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
141 |
142 | [Service]
143 | ExecStart=/usr/bin/kube-scheduler \
144 | --leader-elect=true \
145 | --master=http://INTERNAL_IP:8080 \
146 | --v=2
147 | Restart=on-failure
148 | RestartSec=5
149 |
150 | [Install]
151 | WantedBy=multi-user.target
152 | SCHEDULEREOF
153 |
154 |
155 |
156 | sed -i s/INTERNAL_IP/$INTERNAL_IP/g kube-scheduler.service
157 | cp kube-scheduler.service /etc/systemd/system/
158 |
159 | echo "Enabling and restarting k8s services using systemctl ... (will take about 10 seconds on each node)"
160 |
161 | systemctl daemon-reload
162 |
163 | systemctl enable kube-apiserver kube-controller-manager kube-scheduler
164 |
165 | systemctl stop kube-apiserver kube-controller-manager kube-scheduler
166 | echo "services stopped ... sleeping 1 seconds" ; sleep 1
167 |
168 | systemctl start kube-apiserver kube-controller-manager kube-scheduler
169 | echo "services started ... sleeping 5 seconds" ; sleep 5
170 |
171 |
172 | systemctl status kube-apiserver kube-controller-manager kube-scheduler --no-pager -l
173 |
174 | echo "The cluster status: (should be healthy): "
175 | kubectl get componentstatuses
176 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/controllers/controllers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Summary: Install and configure Kubernetes software on controller nodes.
3 | # Software required from fedora repo: pacemaker corosync pcs psmisc nginx
4 |
5 | SCRIPT_PATH=$(dirname $0)
6 | pushd $(pwd)
7 | cd $SCRIPT_PATH
8 |
9 |
10 | echo "======================= Configuring Kubernetes software and PCS on controller nodes ... ======================"
11 |
12 |
13 | # check if certs are there
14 | if [ ! -f ../certs/kubernetes.pem ] || [ ! -f ../certs/kubernetes-key.pem ] || [ ! -f ../certs/ca.pem ] ; then
15 | echo "Certs not found in ../certs . Cannot continue ..."
16 | popd
17 | exit 9
18 | fi
19 |
20 | chmod +x configure*.sh
21 |
22 |
23 | # Kubernetes software is large in size so it is better to download it on technician computer
24 | # , and then copy it to both nodes. This saves time.
25 |
26 | echo "Downloading Kubernetes software components to the technician computer..."
27 | curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-apiserver
28 | curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-controller-manager
29 | curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-scheduler
30 | curl -# -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubectl
31 |
32 | chmod +x kube*
33 |
34 |
35 | # List and process actual nodes and not the VIP
36 | for node in $(grep -v \# /etc/hosts| grep "controller[0-9]" | awk '{print $2}'); do
37 | echo "-------------------- Setting up Kubernetes on node: ${node}"
38 |
39 | echo "Copying certs ..."
40 | scp ../certs/*.pem root@${node}:/root/
41 |
42 | echo "Copying configure scripts ..."
43 | scp configure-controllers-k8s.sh configure-controllers-HA.sh root@${node}:/root/
44 |
45 | echo "Transferring Kubernetes software components to controller nodes directly in /usr/bin/ ..."
46 | scp kube-apiserver kube-controller-manager kube-scheduler kubectl root@${node}:/usr/bin/
47 | # Note: It is OK to get a Text file busy error. It means that the binary on target already exists and is already in use.
48 |
49 | echo "Running the configure-controller-k8s.sh script on node"
50 | ssh root@${node} "/root/configure-controllers-k8s.sh"
51 |
52 | echo "-------------------- Setting up HA software (PCS) on node: ${node}"
53 |
54 | echo "(pre)Installing HA software: pacemaker corosync pcs psmisc nginx ..."
55 | ssh root@${node} "yum -q -y install pacemaker corosync pcs psmisc nginx"
56 |
57 | # Firewalld is such a pain in the neck, that I decided to forcibly remove it and stop the iptables,
58 | # to make sure that it does not interfere withe the cluster. This is VERY important.
59 | ssh root@${node} "systemctl stop iptables firewalld ; yum -q -y remove firewalld; iptables -t nat -F ; iptables -F"
60 |
61 |
62 | echo
63 | echo "(pre)Enabling HA service: PCSD ..."
64 | echo
65 | ssh root@${node} "systemctl enable pcsd.service; systemctl stop pcsd.service; systemctl start pcsd.service"
66 |
67 | echo
68 | echo "===================================================================================================="
69 | echo
70 |
71 | done
72 |
73 |
74 |
75 |
76 | echo "======================= Configuring HA on controller nodes ... ======================"
77 |
78 | # We have a hostname for the Virtual / Floating IP we will be using on this HA cluster. i.e. controller.example.com, with a IP (or VIP) `10.240.0.20` . This is the IP which the worker nodes will use to contact the controller/API server.
79 |
80 | # List and process actual nodes and not the VIP
81 | for node in $(grep -v \# /etc/hosts| grep "controller[0-9]" | awk '{print $2}'); do
82 | echo "--------------------- Configuring HA on controller node: ${node}"
83 | # The script is already copied in the previous step (i.e. in the loop above)
84 | ssh root@${node} "/root/configure-controllers-HA.sh"
85 | done
86 |
87 |
88 |
89 | # All done. now cange directory to the same place we came from.
90 | popd
91 |
92 |
93 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/etcd/README.sh:
--------------------------------------------------------------------------------
1 | The main script in this directory is `etcd.sh` . Other files are generated from this script.
2 |
3 | Only `etcd.sh` script is used to configure etcd nodes automatically.
4 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/etcd/configure-etcd.sh:
--------------------------------------------------------------------------------
1 | INTERNAL_IP=$(ip addr show ens3 | grep -w inet | awk '{print $2}' | cut -f1 -d '/')
2 | ETCD_NAME=$(hostname -s)
3 | ETCD1=$(grep -v \# /etc/hosts | grep etcd1 | awk '{print $2}')
4 | ETCD2=$(grep -v \# /etc/hosts | grep etcd2 | awk '{print $2}')
5 | ETCD3=$(grep -v \# /etc/hosts | grep etcd3 | awk '{print $2}')
6 |
7 | sed -i -e s/INTERNAL_IP/$INTERNAL_IP/g \
8 | -e s/ETCD_NAME/$ETCD_NAME/g \
9 | -e s/ETCD1/$ETCD1/g \
10 | -e s/ETCD2/$ETCD2/g \
11 | -e s/ETCD3/$ETCD3/g \
12 | /root/etcd.service
13 |
14 | mv /root/etcd.service /etc/systemd/system/
15 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/etcd/etcd.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=etcd
3 | Documentation=https://github.com/coreos
4 |
5 | [Service]
6 | ExecStart=/usr/bin/etcd --name ETCD_NAME \
7 | --cert-file=/etc/etcd/kubernetes.pem \
8 | --key-file=/etc/etcd/kubernetes-key.pem \
9 | --peer-cert-file=/etc/etcd/kubernetes.pem \
10 | --peer-key-file=/etc/etcd/kubernetes-key.pem \
11 | --peer-trusted-ca-file=/etc/etcd/ca.pem \
12 | --trusted-ca-file=/etc/etcd/ca.pem \
13 | --initial-advertise-peer-urls https://INTERNAL_IP:2380 \
14 | --listen-peer-urls https://INTERNAL_IP:2380 \
15 | --listen-client-urls https://INTERNAL_IP:2379,http://127.0.0.1:2379 \
16 | --advertise-client-urls https://INTERNAL_IP:2379 \
17 | --initial-cluster-token etcd-cluster-0 \
18 | --initial-cluster etcd1=https://ETCD1:2380,etcd2=https://ETCD2:2380,etcd3=https://ETCD3:2380 \
19 | --initial-cluster-state new \
20 | --data-dir=/var/lib/etcd
21 | Restart=on-failure
22 | RestartSec=5
23 |
24 | [Install]
25 | WantedBy=multi-user.target
26 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/etcd/etcd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Setup etcd nodes
4 | # Before setting up etcd nodes, it is important for nodes to have operator's public key in /root/.ssh/authorized_keys file
5 | # Need to do ssh rsa scan for fingerprint of all nodes before moving on.
6 |
7 | SCRIPT_PATH=$(dirname $0)
8 | pushd $(pwd)
9 | cd $SCRIPT_PATH
10 |
11 |
12 | # Create service file
13 |
14 | echo "Creating the etcd unit file...."
15 |
16 | cat > etcd.service <<"ETCDEOF"
17 | [Unit]
18 | Description=etcd
19 | Documentation=https://github.com/coreos
20 |
21 | [Service]
22 | ExecStart=/usr/bin/etcd --name ETCD_NAME \
23 | --cert-file=/etc/etcd/kubernetes.pem \
24 | --key-file=/etc/etcd/kubernetes-key.pem \
25 | --peer-cert-file=/etc/etcd/kubernetes.pem \
26 | --peer-key-file=/etc/etcd/kubernetes-key.pem \
27 | --peer-trusted-ca-file=/etc/etcd/ca.pem \
28 | --trusted-ca-file=/etc/etcd/ca.pem \
29 | --initial-advertise-peer-urls https://INTERNAL_IP:2380 \
30 | --listen-peer-urls https://INTERNAL_IP:2380 \
31 | --listen-client-urls https://INTERNAL_IP:2379,http://127.0.0.1:2379 \
32 | --advertise-client-urls https://INTERNAL_IP:2379 \
33 | --initial-cluster-token etcd-cluster-0 \
34 | --initial-cluster etcd1=https://ETCD1:2380,etcd2=https://ETCD2:2380,etcd3=https://ETCD3:2380 \
35 | --initial-cluster-state new \
36 | --data-dir=/var/lib/etcd
37 | Restart=on-failure
38 | RestartSec=5
39 |
40 | [Install]
41 | WantedBy=multi-user.target
42 | ETCDEOF
43 |
44 |
45 | echo "Creating configure-etcd.sh script ..."
46 |
47 | cat > configure-etcd.sh <<"CONFIGUREEETCDEOF"
48 | # The < sign is important in the line below.
49 | NET_IFACE=$(ip addr | grep -w ' etcd2.ks
4 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.13/' -e 's/NODEFQDN/etcd3.example.com/' kickstart-template.ks > etcd3.ks
5 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.21/' -e 's/NODEFQDN/controller1.example.com/' kickstart-template.ks > controller1.ks
6 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.22/' -e 's/NODEFQDN/controller2.example.com/' kickstart-template.ks > controller2.ks
7 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.31/' -e 's/NODEFQDN/worker1.example.com/' kickstart-template.ks > worker1.ks
8 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.32/' -e 's/NODEFQDN/worker2.example.com/' kickstart-template.ks > worker2.ks
9 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.41/' -e 's/NODEFQDN/lb1.example.com/' kickstart-template.ks > lb1.ks
10 | [kamran@kworkhorse ansible]$ sed -e 's/NODEIP/10.240.0.42/' -e 's/NODEFQDN/lb2.example.com/' kickstart-template.ks > lb2.ks
11 |
12 | [kamran@kworkhorse kickstart-files]$ sed -e 's/NODE_IP/10.240.0.51/' -e 's/NODE_FQDN/test.example.com/' -e 's/NODE_NETMASK/255.255.255.0/' -e 's/NODE_GATEWAY/10.240.0.1/' -e 's/NODE_DNS/10.240.0.1/' kickstart-template.ks.working > test.ks
13 |
14 | ```
15 |
16 |
17 | # Serve Fedora DVD and the kickstart files, over HTTPD:
18 |
19 | First mount the Fedora ISO on a mount point.
20 |
21 | ```
22 | [root@kworkhorse cdimages]# mount -o loop /home/cdimages/Fedora-Server-dvd-x86_64-24-1.2.iso /mnt/cdrom/
23 | mount: /dev/loop2 is write-protected, mounting read-only
24 | [root@kworkhorse cdimages]#
25 | ```
26 |
27 | Start a docker container exposing port 80 on work computer. Serve cdrom and kickstart directories.
28 |
29 | ```
30 | [kamran@kworkhorse cluster-setup-scripts]$ docker run -v /mnt/cdrom:/usr/local/apache2/htdocs/cdrom -v $(pwd)/kickstart:/usr/local/apache2/htdocs/kickstart -p 80:80 -d httpd
31 | bc90d48d31aca8393877af160838dd12fc44b9931d6edc22c744ebe07be3c45f
32 |
33 |
34 | [kamran@kworkhorse cluster-setup-scripts]$ docker ps
35 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
36 | bc90d48d31ac httpd "httpd-foreground" 7 seconds ago Up 6 seconds 0.0.0.0:80->80/tcp condescending_brattain
37 | [kamran@kworkhorse cluster-setup-scripts]$
38 | ```
39 |
40 |
41 |
42 |
43 |
44 | # Virt-install commands:
45 |
46 |
47 | ```
48 | 848 virt-install -n etcd1 --description "etcd1" --hvm --os-type=Linux --os-variant=fedora22 --ram=512 --vcpus=1 --disk path=/home/virtualmachines/etcd1.qcow2,bus=virtio,size=4 --location http://10.240.0.1/cdrom/ --network network=Kubernetes --extra-args "ks=http://10.240.0.1/etcd1.ks"
49 | 869 virt-install -n etcd1 --description "etcd1" --hvm --os-type=Linux --os-variant=fedora22 --ram=512 --vcpus=1 --disk path=/home/virtualmachines/etcd1.qcow2,bus=virtio,size=4 --location http://10.240.0.1/cdrom/ --network network=Kubernetes --extra-args "ks=http://10.240.0.1/ks/etcd1.ks"
50 | 906 history | grep virt-install
51 | 907 virt-install -n ftest --description "test fedora24" --hvm --os-type=Linux --os-variant=fedora22 --ram=512 --vcpus=1 --disk path=/home/virtualmachines/ftest.qcow2,bus=virtio,size=4 --location http://10.240.0.1/fedora24/ --network network=Kubernetes
52 | 908 virt-install -n ftest --description "test fedora24" --hvm --os-type=Linux --os-variant=fedora22 --ram=512 --vcpus=1 --disk path=/home/virtualmachines/ftest.qcow2,bus=virtio,size=4 --location http://192.168.124.1/fedora24/ --network network=Kubernetes
53 | 909 virt-install -n ftest --description "test fedora24" --hvm --os-type=Linux --os-variant=fedora22 --ram=1024 --vcpus=1 --disk path=/home/virtualmachines/ftest.qcow2,bus=virtio,size=4 --location http://192.168.124.1/fedora24/ --network network=Kubernetes
54 | 910 virt-install -n ftest --description "test fedora24" --hvm --os-type=Linux --os-variant=fedora22 --ram=1024 --vcpus=1 --disk path=/home/virtualmachines/ftest.qcow2,bus=virtio,size=4 --location http://192.168.124.1/fedora24/
55 | 911 virt-install -n ftest --description "test fedora24" --hvm --os-type=Linux --os-variant=fedora22 --ram=2048 --vcpus=1 --disk path=/home/virtualmachines/ftest.qcow2,bus=virtio,size=4 --location http://192.168.124.1/fedora24/
56 |
57 |
58 | virt-install -n test --description "test fedora24" --hvm --os-type=Linux --os-variant=fedora22 --ram=1024 --cpu host --vcpus=1 --features acpi=on,apic=on --clock offset=localtime --disk path=/home/virtualmachines/test-vm.qcow2,bus=virtio,size=6 --network network=Kubernetes --location http://10.240.0.1/ --extra-args "ks=http://10.240.0.1/kickstart/test.ks" --noautoconsole --sound=clearxml --noreboot
59 |
60 | time virt-install -n test --description "test fedora24" --hvm --cpu host --os-type Linux --os-variant fedora22 --ram 1280 --vcpus 1 --features acpi=on,apic=on --clock offset=localtime --disk path=/home/virtualmachines/test-vm.qcow2,bus=virtio,size=6 --network network=Kubernetes --location http://10.240.0.1/ --extra-args "ks=http://10.240.0.1/kickstart/test.ks" --noreboot
61 |
62 |
63 |
64 | ```
65 |
66 |
67 |
68 | Modifying a VM after installation:
69 | ```
70 | [root@kworkhorse ~]# virt-xml lb1 --edit --memory 384,maxmemory=384
71 | Domain 'lb1' defined successfully.
72 | Changes will take effect after the next domain shutdown.
73 | ```
74 |
75 |
76 |
77 | Example run:
78 | ```
79 | [root@kworkhorse cdimages]# time virt-install -n test --description "test fedora24" --hvm --cpu host --os-type Linux --os-variant fedora22 --ram 1280 --vcpus 1 --features acpi=on,apic=on --clock offset=localtime --disk path=/home/virtualmachines/test-vm.qcow2,bus=virtio,size=6 --network network=Kubernetes --location http://10.240.0.1/ --extra-args "ks=http://10.240.0.1/kickstart/test.ks" --noreboot
80 |
81 | Starting install...
82 | Retrieving file vmlinuz... | 6.0 MB 00:00:00
83 | Retrieving file initrd.img... | 46 MB 00:00:00
84 | Creating domain... | 0 B 00:00:00
85 |
86 | (virt-viewer:9502): GSpice-WARNING **: Warning no automount-inhibiting implementation available
87 | Domain installation still in progress. You can reconnect to
88 | the console to complete the installation process.
89 |
90 | real 4m23.375s
91 | user 0m2.962s
92 | sys 0m1.121s
93 | [root@kworkhorse cdimages]#
94 | ```
95 |
96 |
97 | The node will shutoff after installation. At this point, change/reduce it's RAM size:
98 |
99 | ```
100 | [root@kworkhorse cdimages]# virt-xml test --edit --memory 256,maxmemory=256
101 | Domain 'test' defined successfully.
102 | [root@kworkhorse cdimages]# virsh start test
103 | Domain test started
104 |
105 | [root@kworkhorse cdimages]#
106 | ```
107 |
108 | Node stats after first boot:
109 |
110 | ```
111 | [root@test ~]# df -hT
112 | Filesystem Type Size Used Avail Use% Mounted on
113 | devtmpfs devtmpfs 111M 0 111M 0% /dev
114 | tmpfs tmpfs 119M 0 119M 0% /dev/shm
115 | tmpfs tmpfs 119M 532K 118M 1% /run
116 | tmpfs tmpfs 119M 0 119M 0% /sys/fs/cgroup
117 | /dev/vda2 xfs 5.0G 957M 4.1G 19% /
118 | tmpfs tmpfs 119M 0 119M 0% /tmp
119 | tmpfs tmpfs 24M 0 24M 0% /run/user/0
120 |
121 |
122 | [root@test ~]# rpm -qa | wc -l
123 | 299
124 |
125 |
126 | [root@test ~]# getenforce
127 | Disabled
128 | [root@test ~]#
129 |
130 |
131 | [root@test ~]# iptables -L
132 | Chain INPUT (policy ACCEPT)
133 | target prot opt source destination
134 |
135 | Chain FORWARD (policy ACCEPT)
136 | target prot opt source destination
137 |
138 | Chain OUTPUT (policy ACCEPT)
139 | target prot opt source destination
140 |
141 |
142 | [root@test ~]# service firewalld status
143 | Redirecting to /bin/systemctl status firewalld.service
144 | ● firewalld.service - firewalld - dynamic firewall daemon
145 | Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
146 | Active: inactive (dead)
147 | [root@test ~]#
148 | ```
149 |
150 |
151 |
152 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/kickstart/kickstart.template:
--------------------------------------------------------------------------------
1 | # https://docs.fedoraproject.org/en-US/Fedora/24/html/Installation_Guide/appe-kickstart-syntax-reference.html
2 |
3 | #version=DEVEL
4 |
5 | # System authorization information
6 | auth --enableshadow --passalgo=sha512
7 |
8 | # Use CDROM installation media
9 | # cdrom
10 | # Use graphical install
11 |
12 | # graphical or text
13 | text
14 | skipx
15 |
16 | # Run the Setup Agent on first boot
17 | firstboot --disable
18 |
19 | ignoredisk --only-use=vda
20 |
21 | # Keyboard layouts
22 | keyboard --vckeymap=no --xlayouts='no'
23 |
24 | # System language
25 | lang en_US.UTF-8
26 |
27 | # Network information
28 | network --bootproto=static --device=ens3 --gateway=NODE_GATEWAY --ip=NODE_IP --nameserver=NODE_DNS --netmask=NODE_NETMASK --noipv6 --activate
29 | network --hostname=NODE_FQDN
30 |
31 | # Security
32 | firewall --disabled
33 | selinux --disabled
34 |
35 |
36 | # Root password
37 | # Root password is redhat
38 | rootpw --iscrypted $6$a.26ywQsgJJ.ben6$NhB.p.q3wN6e2YzixvmnzFUa6hbPllMkQeH64QopC4uvJ/1QVgUp0kEUQCmG4vHCQsutj5b7iZ.dhMcH9WtId/
39 |
40 | # Root SSH key
41 | # Kamran's RSA public key
42 | # https://docs.fedoraproject.org/en-US/Fedora//html/Installation_Guide/sect-kickstart-commands-sshkey.html
43 |
44 | # sshkey --username=root "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAx+5TIvYxBryI9d3DjvAeDv4q8xycNbXAAmfOIwhXL0D7So67MpmnQavwHaE/dVsGzP/9XMcidOYl7xBK0aN0fozApThWHaeKpWuJC2w4qE0ijD6tCAbnA7/Wach1rEmGVtRKo5B5lpPXuTedoixM/St/T46wnLFIwsDdFOTMyk9QHRtQ+uJAKv/lkuimMZjDRWeJE5ggwR4SNsc306R9ArnDBdj9HJ3xeUb5rqiBCe1qV3a5k8MpjsaIgG8KPx5dvXRhOTFE4ueh+2wLMy6ydy68NU5kltBtxqBA8CYbEyYmUL/cqRdx6ZVkL8AT5Pv44e2JRnN3kE70HJADfoDX5w== kamran@wbitt.com"
45 |
46 | sshkey --username=root "USER_PUBLIC_KEY"
47 |
48 | # System services
49 | # ntp ?
50 | services --enabled=chronyd,ntpd,sshd --disabled=firewalld
51 | # services --disabled=firewalld
52 |
53 | # System timezone
54 | timezone Europe/Oslo --ntpservers=0.fedora.pool.ntp.org
55 |
56 |
57 | # System bootloader configuration
58 | bootloader --location=mbr --boot-drive=vda
59 |
60 | # Partition clearing information
61 | clearpart --all --initlabel
62 |
63 | # Disk partitioning information
64 | part / --fstype="xfs" --ondisk=vda --size=2048 --grow
65 | part swap --fstype="swap" --ondisk=vda --size=1024
66 |
67 | # Shutdown or reboot the node after installation. (can also use reboot)
68 | shutdown
69 |
70 |
71 | # Packages to install:
72 | # A total of 299 packages will be installed
73 |
74 | %packages
75 | # @^server-product-environment
76 | @core
77 | bind-utils
78 | chrony
79 | openssh-server
80 | curl
81 | wget
82 | bridge-utils
83 | bash
84 | # cockpit # does not exist !!!???
85 | dnf
86 | iproute
87 | iptables
88 | less
89 | logrotate
90 | lsof
91 | net-tools
92 | ntp
93 | openssl
94 | openssh-clients
95 | python
96 | python2-dnf
97 | rpm
98 | sed
99 | gawk
100 | grep
101 | grub2
102 | sudo
103 | tar
104 | telnet
105 | tzdata
106 | unzip
107 | which
108 | xz
109 | zip
110 | vim-enhanced
111 |
112 | %end
113 |
114 |
115 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/libvirt-provisioner/README.md:
--------------------------------------------------------------------------------
1 | This README is VERY DRAFTY. Please ignore for now.
2 |
3 |
4 |
5 |
6 |
7 |
8 | # The libvirt machine provisioner
9 | Designed to provision virtual machines using kvm/libvirt. Instead of provisioning each node painstakingly, or provisioning one and the ngoing through cloning and manually changing various OS configurations, here is a solution which uses tools provided by libvirt in concert with standard linux provisioning tools, such as kickstart.
10 |
11 | # Pre-requisits / setup:
12 | * You need to have the Server DVD ISO for Fedora 24 or higher - extracted (or loop mounted) in a directory on file system. If you don't have it already, download it from Fedora download URL, [here](https://getfedora.org/en/server/)
13 | * The directory containing the fedora content should be inside the document root of the web server you will be running. See next point.
14 | * You need to be able to run a web server (port 80), so we can use it to publish the Fedora DVD contents, and also the kickstart files.
15 | * The document root directory of the web server (e.g. apache) (/var/www/html) will have two subdirectories inside it.
16 | ** `cdrom` and `kickstart` .
17 | * If the Fedora DVD is extracted it needs to be in this `/var/www/html/cdrom` directory.
18 | * If the Fedora DVD ISO is to be loop mounted, it needs to be loop mounted on `/var/www/html/cdrom`.
19 | * If you use nginx, adjust the paths accordingly.
20 | * The kickstarts need to be copied into `/var/www/html/kickstart`
21 | * You can also use an apache (httpd) or nginx docker container to serve the content. (You get more geek points!)
22 |
23 | ## Libvirt setup adjustment:
24 | Since I will be running all the project commands as normal user, I would very much like to use my regular user `kamran` to be able to execute all libvirt provided tools , or, manage vms. (By default, only root is able to do that). So, to be able to do that, I need to do few small adjustments to libvirt setup.
25 |
26 | Add my user `kamran` to the group named `libvirt` . Then, save the following code as a PolKit file:
27 |
28 | ```
29 | cat > /etc/polkit-1/rules.d/49-org.libvirt.unix.manager.rules << POLKITEOF
30 | /* Allow users in kvm/libvirt group to manage the libvirt
31 | daemon without authentication */
32 | polkit.addRule(function(action, subject) {
33 | if (action.id == "org.libvirt.unix.manage" &&
34 | subject.isInGroup("libvirt")) {
35 | return polkit.Result.YES;
36 | }
37 | });
38 | POLKITEOF
39 | ```
40 |
41 | Then, execute the following on your current bash shell - as user `kamran`.
42 | ```
43 | export LIBVIRT_DEFAULT_URI=qemu:///system
44 | ```
45 |
46 | Make sure that you also add the same to your ~/.bash_profile
47 | ```
48 | echo "export LIBVIRT_DEFAULT_URI=qemu:///system" >> ~/.bash_profile
49 | ```
50 |
51 |
52 | Ideally, you will not need to restart libvirtd service. You can do that if you want to!
53 |
54 | At this point, you should be able to list machines, or do any other thing with libvirt daemon, as a regular user.
55 |
56 | ```
57 | [kamran@kworkhorse ~]$ virsh list --all
58 | Id Name State
59 | ----------------------------------------------------
60 | - Alpine-3.4.5-64bit shut off
61 | - endian-firewall shut off
62 | - test shut off
63 | - win2k12r2 shut off
64 |
65 | [kamran@kworkhorse ~]$
66 | ```
67 |
68 |
69 | ##
70 |
71 |
72 |
73 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/libvirt-provisioner/functions.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Fuctions used in provisioning scripts
3 |
4 | function echolog() {
5 | echo $1
6 | logger -t libvirt-provisioner $1
7 | }
8 |
9 |
10 | function getLibvirtNetworkIP() {
11 | # Receive name of libvirt network in $1
12 | local NETWORK=$1
13 | if [ ! -z "$NETWORK" ] ; then
14 | local IP=$(virsh net-dumpxml ${NETWORK} | grep "ip address" | tr -d "<>\'" | awk '{print $2}'| cut -d '=' -f2)
15 | echo $IP
16 | else
17 | echo "Network-Name-Missing"
18 | fi
19 | }
20 |
21 |
22 | function getLibvirtNetworkMask() {
23 | # Receive name of libvirt network in $1
24 | local NETWORK=$1
25 | if [ ! -z "$NETWORK" ] ; then
26 | local MASK=$(virsh net-dumpxml ${NETWORK} | grep "ip address" | tr -d "<>\'" | awk '{print $3}'| cut -d '=' -f2)
27 | echo $MASK
28 | else
29 | echo "Network-Name-Missing"
30 | fi
31 | }
32 |
33 | function getNodeRAM() {
34 | # Receive node name in $1
35 | local NODE_NAME=$1
36 | if [ ! -z "$NODE_NAME" ] ; then
37 | local NODE_RAM=$(egrep "$NODE_NAME" $HOSTS_FILE | grep -v \^#| awk '{print $4}')
38 | # Check if the value is actually a number
39 | local REGEX='^[0-9]+$'
40 | # Notice special syntax for this "if"
41 | if [[ $NODE_RAM =~ $REGEX ]] ; then
42 | #RAM size is in MB
43 | echo $NODE_RAM
44 | else
45 | echo "RAM-Size-Not-Integer"
46 | fi
47 | else
48 | echo "Node-Name-Missing"
49 | fi
50 | }
51 |
52 |
53 |
54 | function getNodeDisk() {
55 | # Receive node name in $1
56 | local NODE_NAME=$1
57 | if [ ! -z "$NODE_NAME" ] ; then
58 | local NODE_DISK=$(egrep "$NODE_NAME" $HOSTS_FILE | grep -v \^#| awk '{print $5}')
59 | # Check if the value is actually a number
60 | local REGEX='^[0-9]+$'
61 | # Notice special syntax for this "if"
62 | if [[ $NODE_DISK =~ $REGEX ]] ; then
63 | # disk size is in GB
64 | echo $NODE_DISK
65 | else
66 | echo "Disk-Size-Not-Integer"
67 | fi
68 | else
69 | echo "Node-Name-Missing"
70 | fi
71 | }
72 |
73 |
74 | function getNodeIP() {
75 | # Receive node name in $1
76 | local NODE_NAME=$1
77 | if [ ! -z "$NODE_NAME" ] ; then
78 | local NODE_IP=$(egrep "$NODE_NAME" $HOSTS_FILE | grep -v \^#| awk '{print $1}')
79 | # IP
80 | echo $NODE_IP
81 | else
82 | echo "Node-Name-Missing"
83 | fi
84 | }
85 |
86 |
87 | function getNodeFQDN() {
88 | # Receive node IP in $1 , and return Node's FQDN
89 | local NODE_IP=$1
90 | if [ ! -z "$NODE_IP" ] ; then
91 | local NODE_FQDN=$(egrep "$NODE_IP" $HOSTS_FILE | grep -v \^#| awk '{print $2}')
92 | # Node's FQDN
93 | echo $NODE_FQDN
94 | else
95 | echo "Node-IP-Missing"
96 | fi
97 | }
98 |
99 |
100 | function getLibvirtNetworkState() {
101 | # Receive network name in $1
102 | local NETWORK=$1
103 | local NETWORK_STATE=$(virsh net-list | grep $NETWORK | awk '{print $2}')
104 | echo $NETWORK_STATE
105 | }
106 |
107 |
108 |
109 |
110 | function checkKickstart() {
111 | # Need to make sure that kickstart directory exists inside the parent directory.
112 | # Also it needs to have a kickstart.template file in it as a minimum.
113 | if [ -f ../kickstart/kickstart.template ] ; then
114 | return 0
115 | else
116 | return 1
117 | fi
118 | }
119 |
120 | function checkHostsFile() {
121 | # checks host file exist or not.
122 | if [ -f ../hosts ] ; then
123 | return 0
124 | else
125 | return 1
126 | fi
127 | }
128 |
129 |
130 | function generateKickstartNode() {
131 | # Receive node name in $1
132 | # Receive kubernetes v-network name in $2
133 |
134 | local NODE_FQDN=$1
135 | local NODE_GATEWAY_IP=$2
136 | local NODE_NETMASK=$3
137 | local USER_PUBLIC_KEY=$4
138 |
139 | local NODE_IP=$(getNodeIP $NODE_FQDN)
140 | local NODE_DNS=$NODE_GATEWAY_IP
141 |
142 | if [ checkKickstart ] ; then
143 | local KS_DIRECTORY=../kickstart
144 | local KS_TEMPLATE=${KS_DIRECTORY}/kickstart.template
145 | sed -e "s/NODE_IP/${NODE_IP}/" \
146 | -e "s/NODE_NETMASK/${NODE_NETMASK}/" \
147 | -e "s/NODE_FQDN/${NODE_FQDN}/" \
148 | -e "s/NODE_GATEWAY/${NODE_GATEWAY_IP}/" \
149 | -e "s/NODE_DNS/${NODE_DNS}/" \
150 | -e "s/USER_PUBLIC_KEY/${USER_PUBLIC_KEY}/" \
151 | ${KS_TEMPLATE} > ${KS_DIRECTORY}/${NODE_FQDN}.ks
152 | else
153 | echo "Kickstart-Directory-or-File-Problem."
154 | fi
155 | }
156 |
157 | function getFirstThreeOctectsOfIP() {
158 | local IP=$1
159 | echo $IP | cut -d '.' -f -3
160 | }
161 |
162 | function generateKickstartAll() {
163 | # receive THREE_OCTETS as $1
164 | # receive NETWORK_GATEWAY_IP as $2
165 | # receive NETWORK_MASK as $3
166 | local THREE_OCTETS=$1
167 | local NETWORK_GATEWAY_IP=$2
168 | local NETWORK_MASK=$3
169 |
170 | # This generates kickstart for all nodes
171 |
172 | if [ checkHostsFile ] ; then
173 | # Here we generate kickstart files,
174 | # ignore lines with '-' in them
175 | for node in $(grep ^"$THREE_OCTETS" ../hosts | egrep -v "\^#|\-" | awk '{print $2}'); do
176 | # list of parametes passed to generateKickstartNode are:
177 | # Node FQDN , Network Gateway IP, Network Mask
178 | echolog "Running: generateKickstartNode $node $NETWORK_GATEWAY_IP $NETWORK_MASK"
179 | generateKickstartNode $node $NETWORK_GATEWAY_IP $NETWORK_MASK
180 | done
181 | else
182 | echolog "Hosts file could not be read. Something is wrong."
183 | fi
184 | }
185 |
186 | function createVM() {
187 | # This creates the actul VM
188 | local NODE_NAME=$1
189 | local VM_DISK_DIRECTORY=$2
190 | local VM_NETWORK_NAME=$3
191 | local HTTP_BASE_URL=$4
192 | local LIBVIRT_CONNECTION=$5
193 | local INSTALL_TIME_RAM=$6
194 |
195 | local VM_RAM=$(getNodeRAM ${NODE_NAME})
196 | local VM_DISK=$(getNodeDisk ${NODE_NAME})
197 |
198 | # --cpu host # removed from the command below, because some older CPUs are not compatible.
199 |
200 | virt-install --connect ${LIBVIRT_CONNECTION} -n ${NODE_NAME} --description "$NODE_NAME" --hvm \
201 | --os-type Linux --os-variant fedora22 \
202 | --ram $INSTALL_TIME_RAM --vcpus 1 --features acpi=on,apic=on --clock offset=localtime \
203 | --disk path=${VM_DISK_DIRECTORY}/${NODE_NAME}.qcow2,bus=virtio,size=${VM_DISK} \
204 | --network network=${VM_NETWORK_NAME} \
205 | --location ${HTTP_BASE_URL}/cdrom --extra-args "ks=${HTTP_BASE_URL}/kickstart/${NODE_NAME}.ks" \
206 | --noreboot
207 |
208 | echo "Reducing the VM ${NODE_NAME} RAM to ${VM_RAM} ..."
209 | virt-xml --connect ${LIBVIRT_CONNECTION} ${NODE_NAME} --edit --memory ${VM_RAM},maxmemory=${VM_RAM}
210 |
211 | }
212 |
213 | function createVMAll() {
214 | # This creates the VMs by calling 'createVM' func tion.
215 |
216 | # receive THREE_OCTETS as $1 to create list of nodes from hosts file.
217 | # receive VM_DISK_DIRECTORY as $2
218 | # receive VM Network Name as $3
219 | # HTTP_BASE_URL as $4
220 |
221 | local THREE_OCTETS=$1
222 | local VM_DISK_DIRECTORY=$2
223 | local VM_NETWORK_NAME=$3
224 | local HTTP_BASE_URL=$4
225 | local LIBVIRT_CONNECTION=$5
226 | local INSTALL_TIME_RAM=$6
227 | local PARALLEL=$7
228 |
229 | # This creates VMs for all nodes
230 |
231 |
232 | # echo "THREE_OCTETS ====== ${THREE_OCTETS}"
233 | # echo "VM_DISK_DIRECTORY ========= ${VM_DISK_DIRECTORY} "
234 | # echo "VM_NETWORK_NAME ========== ${VM_NETWORK_NAME} "
235 | # echo "HTTP_BASE_URL ======== ${HTTP_BASE_URL}"
236 | # echo "LIBVIRT_CONNECTION ======== ${LIBVIRT_CONNECTION}"
237 |
238 | if [ checkHostsFile ] ; then
239 | # Here we use the generated kickstart files, to create VMs.
240 | # ignore lines with '-' in them
241 | for node in $(grep ^"$THREE_OCTETS" ../hosts | egrep -v "\^#|\-" | awk '{print $2}'); do
242 | # list of parametes passed to generateKickstartNode are:
243 | # Node FQDN , Network Gateway IP, Network Mask
244 | echolog "Calling: createVM $node $VM_DISK_DIRECTORY $VM_NETWORK_NAME ${HTTP_BASE_URL} ${LIBVIRT_CONNECTION} ${INSTALL_TIME_RAM}"
245 | if [ $PARALLEL -eq 1 ] ; then
246 | # Set a variable with parallel option "&" , and just append it to command, if not set it to blank
247 | PARALLEL_OPTION='&'
248 | # Notice the & for parallel
249 | echo "Running createVM in Parallel mode"
250 | createVM $node $VM_DISK_DIRECTORY $VM_NETWORK_NAME $HTTP_BASE_URL ${LIBVIRT_CONNECTION} ${INSTALL_TIME_RAM} &
251 | sleep 1
252 | else
253 | echo "Running createVM in Serial mode"
254 | PARALLEL_OPTION=''
255 | createVM $node $VM_DISK_DIRECTORY $VM_NETWORK_NAME $HTTP_BASE_URL ${LIBVIRT_CONNECTION} ${INSTALL_TIME_RAM}
256 | fi
257 |
258 | # echo "DEBUG: createVM $node $VM_DISK_DIRECTORY $VM_NETWORK_NAME $HTTP_BASE_URL ${LIBVIRT_CONNECTION} ${INSTALL_TIME_RAM} ${PARALLEL_OPTION}"
259 | # sleep 1
260 | done
261 |
262 | # wait here for parallel/child/background processes to finish
263 | wait
264 | else
265 | echolog "Hosts file could not be read. Something is wrong."
266 | fi
267 |
268 | }
269 |
270 |
271 | function getUserPublicKey() {
272 | if [ -f ~/.ssh/id_rsa.pub ]; then
273 | local USER_PUBLIC_KEY=$(grep -v \# ~/.ssh/id_rsa.pub | grep -v ^$)
274 | echo "${USER_PUBLIC_KEY}"
275 | return 0
276 | else
277 | echo "Publuc-Key-Not-Found"
278 | return 1
279 | fi
280 | }
281 |
282 |
283 | function checkInstallTimeRAM() {
284 | local INSTALL_TIME_RAM=$1
285 | local REGEX='^[0-9]+$'
286 | # Notice special syntax for this "if"
287 | if [[ ${INSTALL_TIME_RAM} =~ $REGEX ]] ; then
288 | # It is a number! good!
289 | if [ ${INSTALL_TIME_RAM} -lt 1280 ] ; then
290 | echo "Install-Time-RAM-Not-Enough"
291 | return 1
292 | else
293 | echo $INSTALL_TIME_RAM
294 | return 0
295 | fi
296 | else
297 | echo "Install-Time-RAM-Size-Not-Integer"
298 | return 1
299 | fi
300 |
301 | }
302 |
303 |
304 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/libvirt-provisioner/prepare-web.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source ../cluster.conf
3 |
4 | if [ -z "${ISO_PATH}" ] || [ ! -f "${ISO_PATH}" ]; then
5 | echo "The ISO_PATH variable is empty of the file name or file path provided is not readable."
6 | exit 1
7 | fi
8 |
9 | echo "Mounting the provided ISO image to /mnt/cdrom"
10 | sudo mount -o loop ${ISO_PATH} /mnt/cdrom
11 |
12 |
13 | echo "Running an apache docker container to serve /cdrom and /kicstart"
14 | docker run -v /mnt/cdrom:/usr/local/apache2/htdocs/cdrom \
15 | -v $(pwd)/../kickstart:/usr/local/apache2/htdocs/kickstart \
16 | -p 80:80 \
17 | -d httpd:2.4
18 | echo
19 | echo "-----------------------------------------------------------------------------------------"
20 | docker ps
21 | echo
22 |
23 |
24 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/loadbalancers/configure-loadbalancer-HA.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Configure HA on LB nodes
3 |
4 | # Set the password you want for the hacluster user. This will only be used by pcsd service to sync cluster.
5 | PASSWORD="redhat"
6 |
7 | VIP=$(cat /etc/hosts | grep "lb\." | awk '{print $1}')
8 |
9 | HOSTNAME=$(hostname -s)
10 |
11 |
12 | # Need to have packages installed and pcs service running on all nodes, before you move forward , or before you use this script.
13 |
14 | # echo "-------------------- Setting up HA software (PCS) on node: $HOSTNAME"
15 |
16 | # This is already done in the parent script
17 | # echo "(pre)Installing HA software: pacemaker corosync pcs psmisc nginx ..."
18 | # yum -q -y install pacemaker corosync pcs psmisc nginx jq
19 |
20 |
21 | # Install / download the OCF compliant heartbeat resource agent for haproxy.
22 | curl -s -O https://raw.githubusercontent.com/thisismitch/cluster-agents/master/haproxy
23 | chmod +x haproxy
24 | cp haproxy /usr/lib/ocf/resource.d/heartbeat/
25 | # (Yes, it should be saved in /usr/lib/ocf/resource.d/heartbeat , and NOT in /usr/lib/ocf/resource.d/pacemaker)
26 |
27 | # This is already done in parent script.
28 | # echo "(pre) Enabling HA service: PCSD ..."
29 | # systemctl enable pcsd.service
30 | # systemctl stop pcsd.service
31 | # systemctl start pcsd.service
32 |
33 | echo
34 | echo "===================================================================================================="
35 | echo
36 |
37 | echo
38 | echo "------------------- Setting up HA on Load Balancer node $HOSTNAME..."
39 | echo
40 | # Setting password for user hacluster ...
41 | echo "hacluster:${PASSWORD}" | chpasswd
42 |
43 |
44 | echo "Authenticate user 'hacluster' to the cluster nodes ..."
45 | pcs cluster auth -u hacluster -p ${PASSWORD} lb1.example.com lb2.example.com
46 |
47 |
48 | echo "Checking PCS cluster status on node ..."
49 | pcs status pcsd
50 |
51 | # Execute the following code on node1 only
52 |
53 | if [ "$(hostname -s)" == "lb1" ]; then
54 |
55 | echo "Executing pcs cluster setup commands on node1 only ..."
56 |
57 |
58 | echo "Creating CoroSync communication cluster/service ..."
59 | pcs cluster setup --name LoadbalancerHA lb1.example.com lb2.example.com --force
60 | sleep 5
61 |
62 | echo "Starting cluster on all cluster nodes ... This may take few seconds ..."
63 | pcs cluster start --all
64 | sleep 5
65 |
66 | # this enables the corosync and pacemaker services to start at boot time.
67 | pcs cluster enable --all
68 | sleep 1
69 |
70 | # We do not have stonith device, (nor we are likely to get one), so disable stonith
71 | pcs property set stonith-enabled=false
72 | sleep 5
73 |
74 | pcs status nodes
75 | sleep 1
76 |
77 | pcs status resources
78 | sleep 1
79 |
80 | pcs status corosync
81 |
82 | echo "Setting up cluster resource LoadbalancerVIP as ${VIP} ..."
83 | pcs resource create LoadbalancerVIP ocf:heartbeat:IPaddr2 ip=${VIP} cidr_netmask=32 op monitor interval=30s
84 |
85 | # Allow cluster some time to decide where would it run the VIP resource
86 | sleep 5
87 |
88 | echo "Setting up cluster resource HAProxy ..."
89 | pcs resource create HAProxy ocf:heartbeat:haproxy conffile=/etc/haproxy/haproxy.cfg op monitor interval=1min
90 | sleep 5
91 |
92 | # Make sure that LoadbalancerVIP and HAProxy are on same node, and haproxy starts after LoadbalancerVIP.
93 | pcs constraint colocation add HAProxy LoadbalancerVIP INFINITY
94 | pcs constraint order LoadbalancerVIP then HAProxy
95 | sleep 5
96 | fi
97 |
98 | echo "Following code will run on all nodes ..."
99 | echo "Check corosync ring status on node $HOSTNAME..."
100 | corosync-cfgtool -s
101 |
102 |
103 | echo "Show status of corosync and pacemaker on node $HOSTNAME ..."
104 | systemctl status corosync pacemaker
105 |
106 |
107 | echo "Showing final pcs status on node $HOSTNAME..."
108 | pcs status
109 |
110 | echo "Showing ip address on $HOSTNAME..."
111 | ip addr
112 |
113 |
114 | ##############################################
115 |
116 | echo
117 | echo "================================================================="
118 | echo
119 |
120 | echo "Setting up Praqma Load Balancer ..."
121 | git clone -q https://github.com/Praqma/k8s-cloud-loadbalancer.git
122 |
123 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/loadbalancers/configure-loadbalancers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configures worker nodes...
4 |
5 | CONTROLLER_VIP=172.32.10.70
6 |
7 |
8 | echo "Creating /var/lib/kubernetes and moving in certificates ..."
9 | mkdir -p /var/lib/kubernetes
10 |
11 | mv -f ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
12 |
13 |
14 | echo "Installing necessary software components ..."
15 |
16 | yum -y install haproxy git jq
17 |
18 |
19 | echo "Downloading software components ...."
20 |
21 |
22 | echo
23 | echo "Kubernetes kube-proxy and kubectl components ..."
24 | echo "Todo: Actually, we do not need these components to run our LB, as api reader connects to controllers using curl..."
25 | curl -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-proxy
26 | curl -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubectl
27 |
28 |
29 | chmod +x /root/kube*
30 |
31 | mv -f kube* /usr/bin/
32 |
33 |
34 |
35 | echo
36 | echo "Downloading latest version of Praqma's k8s Cloud LoadBalancer"
37 | git clone https://github.com/Praqma/k8s-cloud-loadbalancer.git
38 |
39 |
40 | echo "Configuring haproxy with default config file from our repository ..."
41 | cp k8s-cloud-loadbalancer/lb-nodeport/haproxy.cfg.global-defaults /etc/haproxy/haproxy.cfg
42 |
43 | echo "Enabling haproxy service ..."
44 | # This should ideally be controlled by pacemaker. Since we do not have HA in AWS, we will just start the service on lb node 1.
45 | # Though starting haproxy on both nodes does not harm either.
46 |
47 | systemctl enable haproxy
48 | systemctl stop haproxy
49 | systemctl start haproxy
50 | sleep 5
51 | systemctl status haproxy --no-pager -l
52 |
53 |
54 |
55 |
56 |
57 | #############################################
58 |
59 |
60 |
61 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/loadbalancers/loadbalancers.md:
--------------------------------------------------------------------------------
1 | # Load Balancer
2 |
3 |
4 | Our tfhosts.txt looks like this:
5 |
6 | ```
7 | 52.220.201.1 controller1
8 | 52.220.200.175 controller2
9 | 52.220.102.101 etcd1
10 | 52.74.30.173 etcd2
11 | 52.220.201.44 etcd3
12 | 52.74.35.66 lb1
13 | 52.77.160.219 lb2
14 | 52.220.188.86 worker1
15 | 52.76.72.19 worker2
16 |
17 | ```
18 |
19 | Out /etc/hosts file on all nodes look like this:
20 |
21 | ```
22 | [root@controller1 ~]# cat /etc/hosts
23 | 127.0.0.1 localhost.localdomain localhost
24 | 172.32.10.43 controller1.example.com
25 | 172.32.10.61 controller2.example.com
26 | 172.32.10.70 controller.example.com
27 | 172.32.10.84 etcd1.example.com
28 | 172.32.10.73 etcd2.example.com
29 | 172.32.10.239 etcd3.example.com
30 | 172.32.10.162 lb1.example.com
31 | 172.32.10.40 lb2.example.com
32 | 172.32.10.50 lb.example.com
33 | 172.32.10.105 worker1.example.com
34 | 172.32.10.68 worker2.example.com
35 | [root@controller1 ~]#
36 |
37 | ```
38 |
39 |
40 |
41 | ## Install necessary software on the load balancer nodes:
42 |
43 | ```
44 | for node in $(cat tfhosts.txt | grep lb | cut -f1 -d$'\t' ); do
45 | echo "Processing node: ${node}"
46 | ssh root@${node} "yum -y install jq haproxy pacemaker pcs corosync psmisc nginx"
47 |
48 | echo "Enabling and staring PCSD service ..."
49 | ssh root@${node} "systemctl enable pcsd.service; systemctl stop pcsd.service; systemctl start pcsd.service"
50 | echo "---------------------------------------------------"
51 | done
52 |
53 | sleep 5
54 | ```
55 |
56 |
57 |
58 |
59 |
60 | ## Install and configure Kubernetes software on Worker nodes.
61 |
62 | ```
63 | for node in $(cat tfhosts.txt | grep lb | cut -f1 -d$'\t' ); do
64 | echo "Processing node: ${node}"
65 | scp configure-lb.sh root@${node}:/root/
66 | ssh root@${node} "/root/configure-lb.sh"
67 | echo "---------------------------------------------------"
68 | done
69 |
70 | sleep 5
71 | ```
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/loadbalancers/loadbalancers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # prepares lb nodes
3 |
4 | SCRIPT_PATH=$(dirname $0)
5 | pushd $(pwd)
6 | cd $SCRIPT_PATH
7 |
8 |
9 |
10 | echo "Installing HA software (Pacemaker, Corosync, haproxy, jq) on the load balancer nodes:"
11 | for node in $(grep -v \# /etc/hosts| grep "lb[0-9]\." | awk '{print $2}'); do
12 |
13 | echo "Processing node: ${node}"
14 | scp /etc/hosts root@${node}:/etc/hosts
15 |
16 | ssh root@${node} "yum -q -y install jq haproxy pacemaker pcs corosync psmisc nginx git"
17 |
18 | # Firewalld is such a pain in the neck, that I decided to forcibly remove it and stop the iptables,
19 | # to make sure that it does not interfere withe the cluster. This is VERY important.
20 | ssh root@${node} "systemctl stop iptables firewalld ; yum -q -y remove firewalld; iptables -t nat -F ; iptables -F"
21 |
22 | echo
23 | echo "Enabling and staring PCSD service on node $node ..."
24 | echo
25 | ssh root@${node} "systemctl enable pcsd.service; systemctl stop pcsd.service; systemctl start pcsd.service"
26 |
27 | done
28 |
29 | # Let the cluster settle down and decide who will be the leader, etc.
30 | sleep 2
31 |
32 |
33 |
34 | echo "========================================================================================="
35 |
36 | echo "Configure HA software on LB nodes ..."
37 |
38 | for node in $(grep -v \# /etc/hosts| grep "lb[0-9]\." | awk '{print $2}'); do
39 | echo "Setting up HA on node: ${node}"
40 | scp configure-loadbalancer-HA.sh root@${node}:/root/
41 | ssh root@${node} "/root/configure-loadbalancer-HA.sh"
42 | echo "---------------------------------------------------"
43 | done
44 |
45 | # Done
46 | popd
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/misc-tools/README.md:
--------------------------------------------------------------------------------
1 | This directory contains some basic tools needed for easier management of the cluster.
2 |
3 | # get-fingerprints.sh
4 |
5 | Normally, when you connect some computer over SSH, you are presented with it's fingerprint. You normally press 'yes' to accept the fingerprint to be added to your known_hosts file; and then you get on to your usual business. The first time you connect you encounter something like this:
6 |
7 | ```
8 | [kamran@kworkhorse ~]$ ssh root@192.168.124.200
9 | The authenticity of host '192.168.124.200 (192.168.124.200)' can't be established.
10 | ECDSA key fingerprint is SHA256:bRrm8pYvfO6r4xFnwQ7bwpZl9WgmqlDIDqkP214DqKM.
11 | ECDSA key fingerprint is MD5:a5:a6:dd:13:d2:c4:39:15:32:4c:ae:17:6e:40:94:ea.
12 | Are you sure you want to continue connecting (yes/no)? yes
13 | Warning: Permanently added '192.168.124.200' (ECDSA) to the list of known hosts.
14 | Last login: Mon Dec 12 11:29:37 2016 from 192.168.124.1
15 | [root@dockerhost ~]#
16 | ```
17 | In case you re-provisioned the target computer, it's fingerprint changes. The fingerprint also changes if someone tries to portray that it is `192.168.124.200` instead of your machine. This is exactly where the fingerprint helps. So you should not blindly type 'yes'.
18 |
19 | Anyhow, assuming you simply re-provisioned 192.168.124.200 and when you try to ssh again, you encounter this message:
20 |
21 | ```
22 | [kamran@kworkhorse ~]$ ssh root@192.168.124.200
23 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
24 | @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @
25 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
26 | IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!
27 | Someone could be eavesdropping on you right now (man-in-the-middle attack)!
28 | It is also possible that a host key has just been changed.
29 | The fingerprint for the ECDSA key sent by the remote host is
30 | SHA256:3mdbjLQvVgkJIGM/ezDAMLdu4TqCGMAmXhVroiXux5o.
31 | Please contact your system administrator.
32 | Add correct host key in /home/kamran/.ssh/known_hosts to get rid of this message.
33 | Offending ECDSA key in /home/kamran/.ssh/known_hosts:190
34 | ECDSA host key for 192.168.124.200 has changed and you have requested strict checking.
35 | Host key verification failed.
36 | [kamran@kworkhorse ~]$
37 | ```
38 |
39 | Normally you edit the known_hosts file in your home/.ssh directory, remove the offending entry, and try to connect again, which presents you with the fingerprint of the remote machine, you accept, and life moves on.
40 |
41 | Assume you have 10 or 20 or 100 machines, a cluster, such as a kubernetes cluster, and you just reprovisioned new test cluster. The names and IP scheme are still the same but the fingerprints of all nodes have changed. Would you edit the known_hosts file and remove all entries manually? I suggest you do not, and use the `get-fingerprints.sh` script to get this task.
42 |
43 | The `get-fingerprints.sh` does two things. It picks your hosts file, scans your known_hosts file and removes all the entries based on what it finds in the hosts file. (**Note:** This is specially in context to kubernetes cluster) . Then, it does an ssh-keyscan on all the nodes and adds them to your known_hosts. Simple!
44 |
45 |
46 | # get-dates.sh
47 | To verify that you will be able to connect to the cluster nodes without being presented with fingerprints, you run the innocent `get-dates.sh` script, which simply connects to each node (obtained from hosts file) , and shows the date and time on the target computer.
48 |
49 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/misc-tools/get-dates.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Henrik's script.
3 | #
4 |
5 | ips=$(egrep -v "\#|^127" /etc/hosts | grep -e "[a-z].*[0-9]\.example.com" | awk '{print $1 }')
6 |
7 | echo "Assuming you have your SSH RSA public key added to /root/.ssh/authorized_keys on the target nodes,"
8 | echo "this script obtains system date and time from all cluster nodes."
9 |
10 | for node in $ips; do
11 | echo "Date and time from node : $node : $(ssh root@$node 'date')"
12 | done
13 |
14 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/misc-tools/get-fingerprints.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Summary: Gets the fingerprints of the nodes, as listed in the hosts file in the parent directory.
3 |
4 |
5 | for node in $(grep ^10.240.0 ../hosts | grep -v \# | awk '{print $1,$2}') ; do
6 | # This produces X lines of output where X corresponds to the number of enabled hosts in the hosts file.
7 | # But since there is a comma (,) in awk command the two variables (IP and FQDN) are separated by space,
8 | # which are treated as two separate values by the for command. thus this loop runs for (2 times X) times,
9 | # which we use to remove all possible lines from the known_hosts file - which is safe.
10 |
11 | echo "Removing previous entries of the node $node from ~/.ssh/known_hosts"
12 | echo "--------------------------------------------------------------------"
13 | sed -i '/${node}/d' /home/kamran/.ssh/known_hosts
14 | done
15 |
16 | # At this point, we are done removing the existing entries, so now we can add proper entries in the known_hosts file.
17 | # Run loop one more time, but this time use comma in awk in the output, to concatenate IP and FQDN, like (IP,FQDN).
18 | # This is then used by ssh-keyscan.
19 |
20 | for node in $(grep ^10.240.0 ../hosts | grep -v \# | awk '{print $1 "," $2}') ; do
21 | # This produces X lines of output where X corresponds to the number of enabled hosts in the hosts file.
22 | # The IP and FQDN are concatenated by a comma which is treated as one value by the for loop.
23 |
24 | echo "Adding fingerprint in ~/.ssh/known_hosts for node \"$node\" "
25 | echo "-------------------------------------------------------------------------------"
26 | ssh-keyscan $node >> ~/.ssh/known_hosts
27 | done
28 |
29 |
30 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/workers/configure-workers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configures worker nodes...
4 |
5 | CONTROLLER_VIP=$(grep -v \# /etc/hosts | grep "controller\." | awk '{print $1}')
6 |
7 |
8 | if [ ! -f ca.pem ] || [ ! -f kubernetes-key.pem ] || [ ! -f kubernetes.pem ] ; then
9 | echo "Certs not found in /root/"
10 | exit 9
11 | fi
12 |
13 | echo "Creating /var/lib/kubernetes and moving in certificates ..."
14 | mkdir -p /var/lib/kubernetes
15 | mv ca.pem kubernetes-key.pem kubernetes.pem /var/lib/kubernetes/
16 |
17 |
18 | # Downloading software is already done in the parent script.
19 | # echo "Downloading software components ...."
20 | # echo
21 | # echo "Docker..."
22 | # curl -# -z docker-1.12.3.tgz -O https://get.docker.com/builds/Linux/x86_64/docker-1.12.3.tgz
23 |
24 | # echo
25 | # echo "Kubernetes worker components ..."
26 | # curl -# -z kubectl -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubectl
27 | # curl -# -z kube-proxy -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-proxy
28 | # curl -# -z kubelet -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubelet
29 |
30 | tar xzf docker-1.12.3.tgz -C /usr/bin/ --strip-components=1
31 |
32 | # delete tarfile later.
33 |
34 | chmod +x /root/kube*
35 |
36 | mv kube* /usr/bin/
37 |
38 | echo "Configuring docker service ..."
39 |
40 | cat > /etc/systemd/system/docker.service << DOCKEREOF
41 | [Unit]
42 | Description=Docker Application Container Engine
43 | Documentation=http://docs.docker.io
44 |
45 | [Service]
46 | ExecStart=/usr/bin/docker daemon \
47 | --iptables=false \
48 | --ip-masq=false \
49 | --host=unix:///var/run/docker.sock \
50 | --log-level=error \
51 | --storage-driver=overlay
52 | Restart=on-failure
53 | RestartSec=5
54 |
55 | [Install]
56 | WantedBy=multi-user.target
57 | DOCKEREOF
58 |
59 |
60 |
61 | echo "Enable and start docker service ..."
62 |
63 | systemctl daemon-reload
64 | systemctl enable docker
65 | systemctl stop docker
66 | sleep 3
67 | systemctl start docker
68 | sleep 3
69 |
70 | docker version
71 |
72 |
73 |
74 |
75 | echo "Download and install CNI plugins for kubernetes usage"
76 |
77 | mkdir -p /opt/cni
78 |
79 | # Downloaded already in the parent script.
80 | # curl -O https://storage.googleapis.com/kubernetes-release/network-plugins/cni-c864f0e1ea73719b8f4582402b0847064f9883b0.tar.gz
81 | # curl -z -O https://storage.googleapis.com/kubernetes-release/network-plugins/cni-amd64-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
82 |
83 | tar xzf cni-amd64-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz -C /opt/cni
84 |
85 |
86 | echo "Configure K8s worker components ..."
87 |
88 | echo "Configuring kubelet ..."
89 |
90 |
91 | mkdir -p /var/lib/kubelet/
92 |
93 | cat > /var/lib/kubelet/kubeconfig << KUBECONFIGEOF
94 | apiVersion: v1
95 | kind: Config
96 | clusters:
97 | - cluster:
98 | certificate-authority: /var/lib/kubernetes/ca.pem
99 | server: https://${CONTROLLER_VIP}:6443
100 | name: kubernetes
101 | contexts:
102 | - context:
103 | cluster: kubernetes
104 | user: kubelet
105 | name: kubelet
106 | current-context: kubelet
107 | users:
108 | - name: kubelet
109 | user:
110 | token: chAng3m3
111 | KUBECONFIGEOF
112 |
113 |
114 |
115 | echo "Creating the kubelet systemd unit file ..."
116 |
117 | cat > /etc/systemd/system/kubelet.service << KUBELETEOF
118 | [Unit]
119 | Description=Kubernetes Kubelet
120 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
121 | After=docker.service
122 | Requires=docker.service
123 |
124 | [Service]
125 | ExecStart=/usr/bin/kubelet \
126 | --allow-privileged=true \
127 | --api-servers=https://${CONTROLLER_VIP}:6443 \
128 | --cloud-provider= \
129 | --cluster-dns=10.32.0.10 \
130 | --cluster-domain=cluster.local \
131 | --configure-cbr0=true \
132 | --container-runtime=docker \
133 | --docker=unix:///var/run/docker.sock \
134 | --network-plugin=kubenet \
135 | --kubeconfig=/var/lib/kubelet/kubeconfig \
136 | --reconcile-cidr=true \
137 | --serialize-image-pulls=false \
138 | --tls-cert-file=/var/lib/kubernetes/kubernetes.pem \
139 | --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \
140 | --v=2
141 |
142 | Restart=on-failure
143 | RestartSec=5
144 |
145 | [Install]
146 | WantedBy=multi-user.target
147 | KUBELETEOF
148 |
149 |
150 |
151 | echo "Starting the kubelet service and check that it is running ..."
152 |
153 | systemctl daemon-reload
154 | systemctl enable kubelet
155 | systemctl stop kubelet
156 | sleep 3
157 | systemctl start kubelet
158 | sleep 3
159 |
160 | systemctl status kubelet --no-pager -l
161 |
162 |
163 |
164 |
165 | echo "Creating systemd unit file for kube-proxy ..."
166 |
167 | cat > /etc/systemd/system/kube-proxy.service << KUBEPROXYEOF
168 | [Unit]
169 | Description=Kubernetes Kube Proxy
170 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
171 |
172 | [Service]
173 | ExecStart=/usr/bin/kube-proxy \
174 | --master=https://${CONTROLLER_VIP}:6443 \
175 | --kubeconfig=/var/lib/kubelet/kubeconfig \
176 | --proxy-mode=iptables \
177 | --v=2
178 |
179 | Restart=on-failure
180 | RestartSec=5
181 |
182 | [Install]
183 | WantedBy=multi-user.target
184 | KUBEPROXYEOF
185 |
186 |
187 | systemctl daemon-reload
188 | systemctl enable kube-proxy
189 | systemctl stop kube-proxy
190 | sleep 3
191 |
192 | systemctl start kube-proxy
193 | sleep 3
194 |
195 | systemctl status kube-proxy --no-pager -l
196 |
197 |
198 |
199 | #################################################################
200 |
201 |
202 |
203 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/workers/show-routes.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | CONTROLLER_VIP=$(grep -v \# /etc/hosts | grep "controller\." | awk '{print $1}')
3 | echo "Controller VIP: $CONTROLLER_VIP"
4 | ssh root@${CONTROLLER_VIP} "kubectl get nodes"
5 |
6 | for node in $(grep -v \# /etc/hosts| grep "worker[0-9]" | awk '{print $2}'); do
7 |
8 |
9 | ssh root@${CONTROLLER_VIP} \
10 | "kubectl describe node ${node}" \
11 | | egrep -w "Name:|PodCIDR" | tr '\n' '\t' | awk '{print "Pod (CNI/CIDR) Network ",$4," is reachable via host ",$2 }'
12 | done
13 |
14 | echo
15 | echo "---------------------------------------------------------------------------"
16 |
17 | echo "Execute the following commands on the Linux gateway/router. OR , on ALL cluster nodes, except worker node."
18 | echo "On worker nodes, you do not delete the exiting route, which is connected through cbr0. You just add the other one."
19 | echo
20 | for node in $(grep -v \# /etc/hosts| grep "worker[0-9]" | awk '{print $2}'); do
21 |
22 | NODE_IP=$(grep -w $node /etc/hosts | grep -v \# | awk '{print $1}')
23 | # echo $NODE_IP
24 |
25 | # awk -v is to pass an external variable to awk
26 |
27 | ssh root@${CONTROLLER_VIP} \
28 | "kubectl describe node ${node}" \
29 | | egrep -w "Name:|PodCIDR" | tr '\n' '\t' | awk -v IP=$NODE_IP '{ print "route del -net " , $4, "\n", "route add -net " , $4 , " gw " , IP }'
30 | done
31 | echo
32 |
33 |
34 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/workers/workers.md:
--------------------------------------------------------------------------------
1 | # Install and configure Kubernetes Worker nodes:
2 |
3 | Our tfhosts.txt looks like this:
4 |
5 | ```
6 | 52.220.201.1 controller1
7 | 52.220.200.175 controller2
8 | 52.220.102.101 etcd1
9 | 52.74.30.173 etcd2
10 | 52.220.201.44 etcd3
11 | 52.74.35.66 lb1
12 | 52.77.160.219 lb2
13 | 52.220.188.86 worker1
14 | 52.76.72.19 worker2
15 |
16 | ```
17 |
18 | Out /etc/hosts file on all nodes look like this:
19 |
20 | ```
21 | [root@controller1 ~]# cat /etc/hosts
22 | 127.0.0.1 localhost.localdomain localhost
23 | 172.32.10.43 controller1.example.com
24 | 172.32.10.61 controller2.example.com
25 | 172.32.10.70 controller.example.com
26 | 172.32.10.84 etcd1.example.com
27 | 172.32.10.73 etcd2.example.com
28 | 172.32.10.239 etcd3.example.com
29 | 172.32.10.162 lb1.example.com
30 | 172.32.10.40 lb2.example.com
31 | 172.32.10.50 lb.example.com
32 | 172.32.10.105 worker1.example.com
33 | 172.32.10.68 worker2.example.com
34 | [root@controller1 ~]#
35 | ```
36 |
37 |
38 |
39 | ## Install and configure Kubernetes software on Worker nodes.
40 |
41 | ```
42 | for node in $(cat tfhosts.txt | grep worker | cut -f1 -d$'\t' ); do
43 | echo "Processing node: ${node}"
44 | scp configure-workers.sh root@${node}:/root/
45 | ssh root@${node} "/root/configure-workers.sh"
46 | done
47 |
48 | sleep 5
49 | ```
50 |
51 | # Check status of nodes from master node:
52 | ```
53 | ssh root@controller.example.com "kubectl get nodes"
54 | ```
55 |
56 |
57 | # Now we need nodes' IP subnets so we can add them to the router.
58 | ```
59 | ssh root@controller.example.com "kubectl describe node worker1 worker2 | egrep 'Name|PodCIDR' "
60 | ```
61 |
62 |
63 | ```
64 | kubectl get nodes \
65 | --output=jsonpath='{range .items[*]}{.status.addresses[?(@.type=="InternalIP")].address} {.spec.podCIDR} {"\n"}{end}'
66 | ```
67 |
68 |
69 | ```
70 | [root@controller1 ~]# kubectl get nodes \
71 | > --output=jsonpath='{range .items[*]}{.status.addresses[?(@.type=="InternalIP")].address} {.spec.podCIDR} {"\n"}{end}'
72 | 172.32.10.105 10.200.0.0/24
73 | 172.32.10.68 10.200.1.0/24
74 | [root@controller1 ~]#
75 | ```
76 |
77 |
78 |
79 | Lets get the routing table ID for the routing table connected to our VPC (and it should say "YES" in column "Main" OR Main =True ).
80 |
81 |
82 | Meanwhile, lets have two pods, (ideally on two different nodes):
83 |
84 | ```
85 | [root@controller1 ~]# kubectl get nodes
86 | NAME STATUS AGE
87 | worker1.example.com Ready 18h
88 | worker2.example.com Ready 18h
89 |
90 |
91 | [root@controller1 ~]# kubectl get pods
92 |
93 |
94 | [root@controller1 ~]# kubectl run network-multitool --image praqma/network-multitool --replicas 2
95 | deployment "network-multitool" created
96 |
97 |
98 | [root@controller1 ~]# kubectl get pods
99 | NAME READY STATUS RESTARTS AGE
100 | network-multitool-2164695616-nxvvk 0/1 ContainerCreating 0 8s
101 | network-multitool-2164695616-zsoh3 0/1 ContainerCreating 0 8s
102 | [root@controller1 ~]#
103 | ```
104 |
105 |
106 | ```
107 | [root@controller1 ~]# kubectl get pods -o wide
108 | NAME READY STATUS RESTARTS AGE IP NODE
109 | network-multitool-2164695616-nxvvk 1/1 Running 0 1m 10.200.0.2 worker1.example.com
110 | network-multitool-2164695616-zsoh3 1/1 Running 0 1m 10.200.1.2 worker2.example.com
111 | [root@controller1 ~]#
112 | ```
113 |
114 |
115 | ```
116 | [root@controller1 ~]# kubectl exec -it network-multitool-2164695616-nxvvk bash
117 | [root@network-multitool-2164695616-nxvvk /]# ping 10.200.1.2
118 | PING 10.200.1.2 (10.200.1.2) 56(84) bytes of data.
119 | ^C
120 | --- 10.200.1.2 ping statistics ---
121 | 4 packets transmitted, 0 received, 100% packet loss, time 2999ms
122 |
123 | [root@network-multitool-2164695616-nxvvk /]#
124 | ```
125 |
126 |
127 | Figure this error later:
128 |
129 | ```
130 | [root@network-multitool-2164695616-nxvvk /]# exit
131 | exit
132 | error: error executing remote command: error executing command in container: Error executing in Docker Container: 1
133 | [root@controller1 ~]#
134 | ```
135 |
136 |
137 |
138 | Lets do the routing thing in AWS
139 |
140 | ```
141 | [root@controller1 ~]# aws ec2 describe-route-tables --filters "Name=tag:By,Values=Praqma" | jq -r '.RouteTables[].RouteTableId'
142 | rtb-88ab18ec
143 | [root@controller1 ~]#
144 |
145 | ```
146 |
147 | ```
148 | [root@controller1 ~]# kubectl get nodes --output=jsonpath='{range .items[*]}{.status.addresses[?(@.type=="InternalIP")].address} {.spec.podCIDR} {"\n"}{end}'
149 | 172.32.10.105 10.200.0.0/24
150 | 172.32.10.68 10.200.1.0/24
151 | [root@controller1 ~]#
152 | ```
153 |
154 | ```
155 | [root@controller1 ~]# aws ec2 describe-instances --filters "Name=tag:Name,Values=worker-1" | jq -j '.Reservations[].Instances[].InstanceId'
156 | i-ea6d806b
157 | [root@controller1 ~]#
158 | ```
159 |
160 |
161 | ```
162 | [root@controller1 ~]# aws ec2 describe-instances --filters "Name=tag:Name,Values=worker-2" | jq -j '.Reservations[].Instances[].InstanceId'
163 | i-456d80c4
164 | [root@controller1 ~]#
165 | ```
166 |
167 |
168 | ```
169 | [root@controller1 ~]# ROUTE_TABLE_ID=$(aws ec2 describe-route-tables --filters "Name=tag:By,Values=Praqma" | jq -r '.RouteTables[].RouteTableId')
170 |
171 |
172 | [root@controller1 ~]# echo $ROUTE_TABLE_ID
173 | rtb-88ab18ec
174 | [root@controller1 ~]#
175 | ```
176 |
177 |
178 | ```
179 | [root@controller1 ~]# WORKER_1_ID=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=worker-1" | jq -j '.Reservations[].Instances[].InstanceId')
180 | [root@controller1 ~]# WORKER_2_ID=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=worker-2" | jq -j '.Reservations[].Instances[].InstanceId')
181 | [root@controller1 ~]#
182 | ```
183 |
184 |
185 | ```
186 | aws ec2 create-route \
187 | --route-table-id ${ROUTE_TABLE_ID} \
188 | --destination-cidr-block 10.200.0.0/24 \
189 | --instance-id ${WORKER_1_ID}
190 |
191 |
192 |
193 | aws ec2 create-route \
194 | --route-table-id ${ROUTE_TABLE_ID} \
195 | --destination-cidr-block 10.200.1.0/24 \
196 | --instance-id ${WORKER_2_ID}
197 | ```
198 |
199 |
200 | Now the pods see each other:
201 | ```
202 | [root@controller1 ~]# kubectl exec -it network-multitool-2164695616-nxvvk bash
203 | [root@network-multitool-2164695616-nxvvk /]# ping 10.200.1.2
204 | PING 10.200.1.2 (10.200.1.2) 56(84) bytes of data.
205 | 64 bytes from 10.200.1.2: icmp_seq=1 ttl=62 time=0.542 ms
206 | 64 bytes from 10.200.1.2: icmp_seq=2 ttl=62 time=0.495 ms
207 | 64 bytes from 10.200.1.2: icmp_seq=3 ttl=62 time=0.471 ms
208 | ^C
209 | --- 10.200.1.2 ping statistics ---
210 | 3 packets transmitted, 3 received, 0% packet loss, time 2000ms
211 | rtt min/avg/max/mdev = 0.471/0.502/0.542/0.039 ms
212 | [root@network-multitool-2164695616-nxvvk /]#
213 | ```
214 |
215 |
216 | Hurray!!!!
217 |
218 |
219 |
220 |
--------------------------------------------------------------------------------
/kamran/cluster-setup-scripts.NotUsedAnyMore/workers/workers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Summary: Install and configure Kubernetes software on controller nodes.
3 | # Software required from fedora repo: pacemaker corosync pcs psmisc nginx
4 |
5 | SCRIPT_PATH=$(dirname $0)
6 | pushd $(pwd)
7 | cd $SCRIPT_PATH
8 |
9 |
10 | echo "======================= Configuring Docker and Kubernetes software on controller nodes ... ======================"
11 |
12 |
13 | # check if certs are there
14 | if [ ! -f ../certs/kubernetes.pem ] || [ ! -f ../certs/kubernetes-key.pem ] || [ ! -f ../certs/ca.pem ] ; then
15 | echo "Certs not found in ../certs . Cannot continue ..."
16 | popd
17 | exit 9
18 | fi
19 |
20 |
21 |
22 |
23 | chmod +x configure*.sh
24 |
25 |
26 | # Kubernetes software is large in size so it is better to download it on technician computer
27 | # , and then copy it to both nodes. This saves time.
28 |
29 | echo "Downloading Docker and Kubernetes software components to the technician computer..."
30 |
31 | curl -# -z docker-1.12.3.tgz -O https://get.docker.com/builds/Linux/x86_64/docker-1.12.3.tgz
32 |
33 | curl -# -z kubectl -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubectl
34 | curl -# -z kube-proxy -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kube-proxy
35 | curl -# -z kubelet -O https://storage.googleapis.com/kubernetes-release/release/v1.3.10/bin/linux/amd64/kubelet
36 | curl -# -O https://storage.googleapis.com/kubernetes-release/network-plugins/cni-amd64-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
37 |
38 | chmod +x kube*
39 |
40 |
41 | # List and process actual nodes and not the VIP
42 | for node in $(grep -v \# /etc/hosts| grep "worker[0-9]" | awk '{print $2}'); do
43 | echo "-------------------- Setting up Kubernetes on node: ${node}"
44 |
45 | echo "Copying /etc/hosts file ..."
46 | scp /etc/hosts root@${node}:/etc/hosts
47 |
48 | echo "Copying certs ..."
49 | scp ../certs/*.pem root@${node}:/root/
50 |
51 | echo "Copying configure scripts ..."
52 | scp configure-workers.sh root@${node}:/root/
53 |
54 | echo "Transferring Kubernetes software components to controller nodes directly in /usr/bin/ ..."
55 | scp kube* root@${node}:/root/
56 | scp *.tar.gz *.tgz root@${node}:/root/
57 |
58 | echo "Note: It is OK to get a Text file busy error. It means that the binary on target already exists and is already in use."
59 |
60 | echo "Running the configure-controller-k8s.sh script on node"
61 | ssh root@${node} "/root/configure-workers.sh"
62 |
63 | echo
64 | echo "===================================================================================================="
65 | echo
66 |
67 | done
68 |
69 |
70 | CONTROLLER_VIP=$(grep -v \# /etc/hosts | grep "controller\." | awk '{print $1}')
71 |
72 | echo "Node status from Kubernetes... "
73 | sleep 5
74 | ssh root@${CONTROLLER_VIP} "kubectl get nodes"
75 |
76 | echo "Routing information so you can setup correct routing..."
77 | for node in $(grep -v \# /etc/hosts| grep "worker[0-9]" | awk '{print $2}'); do
78 | ssh root@${CONTROLLER_VIP} \
79 | "kubectl describe node ${node}" \
80 | | egrep -w "Name:|PodCIDR" | tr '\n' '\t' | awk '{print "Pod (CNI/CIDR) Network ",$4," is reachable via host ",$2 }'
81 | done
82 |
83 |
84 |
85 | # All done. now cange directory to the same place we came from.
86 | popd
87 |
88 |
89 |
--------------------------------------------------------------------------------
/kamran/fedora-atomic-cloud-init/README.md:
--------------------------------------------------------------------------------
1 | Refernce: http://www.projectatomic.io/docs/quickstart/
2 | Use the files to create a cloud init ISO for the atomic qcow2 images. Without this, the even if you import the Fedora Atomic qcow2 images, you will not have username / password. So!
3 |
4 | Generate the image using:
5 | ```
6 | # genisoimage -output init.iso -volid cidata -joliet -rock user-data meta-data
7 | ```
8 |
9 | Note: You must use filenames for above files as user-data and meta-data. If you give them different names,you will have issues when Atomic host starts. cloud-init expect user-data and meta-data as input filenames. You can learn more about this if you check cloud-init code at cloud-init-github.
10 |
11 | The files look like this:
12 |
13 | ```
14 | $ cat meta-data
15 | instance-id:
16 | local-hostname:
17 | ```
18 | You can have instance ID and hostnames in this file, or you can opt not to have. It is upto you.
19 |
20 |
21 | ```
22 | $ cat user-data
23 | #cloud-config
24 | password: atomic
25 | chpasswd: {expire: False}
26 | ssh_pwauth: True
27 | ssh_authorized_keys:
28 | -