├── QSEonK8s Logs.qvf
├── vagrantprovision
├── .vagrant
│ ├── machines
│ │ └── default
│ │ │ └── virtualbox
│ │ │ └── vagrant_cwd
│ └── rgloader
│ │ └── loader.rb
├── files
│ ├── storageClass.yaml
│ ├── mongo.yaml
│ ├── pvc.yaml
│ └── qliksense.yaml
├── sh
│ ├── 4_qlik.sh
│ ├── 1_bootstrap.sh
│ ├── 2_docker.sh
│ └── 3_minikube.sh
├── Vagrantfile
└── readme.md
├── jwtcreate
└── readme.md
├── dockerdemo
├── package.json
├── Dockerfile
├── app.js
└── readme.md
├── sample_yaml
├── service4pod1.yaml
├── pod1.yaml
├── service4pod2.yaml
├── deploy4pod1.yaml
├── rc4pod1.yaml
└── pod2.yaml
├── LICENSE
└── README.md
/QSEonK8s Logs.qvf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChristofSchwarz/qs_on_Kubernetes/HEAD/QSEonK8s Logs.qvf
--------------------------------------------------------------------------------
/vagrantprovision/.vagrant/machines/default/virtualbox/vagrant_cwd:
--------------------------------------------------------------------------------
1 | C:/Users/csw/Documents/github/qs_on_Kubernetes/vagrantprovision
--------------------------------------------------------------------------------
/jwtcreate/readme.md:
--------------------------------------------------------------------------------
1 | ## Moved
2 | This folder has been moved to its own git repo
3 | https://github.com/ChristofSchwarz/qseok_jwt_tokens
4 |
--------------------------------------------------------------------------------
/vagrantprovision/files/storageClass.yaml:
--------------------------------------------------------------------------------
1 | nfs:
2 | path: /export/k8s
3 | server: 'qliksense-minikube'
4 | storageClass:
5 | name: localnfs
6 |
--------------------------------------------------------------------------------
/dockerdemo/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "hello-christof",
3 | "version": "1.0.0",
4 | "dependencies": {
5 | "express": "^4.15.0"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/dockerdemo/Dockerfile:
--------------------------------------------------------------------------------
1 | # https://nodejs.org/en/docs/guides/nodejs-docker-webapp/
2 | FROM node:4.6
3 | WORKDIR /app
4 | COPY package*.json .
5 | RUN npm install
6 | COPY app.js .
7 | EXPOSE 8074
8 | CMD ["node", "app.js"]
9 |
--------------------------------------------------------------------------------
/vagrantprovision/files/mongo.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | enabled: true
3 | existingClaim: pvc-mongo
4 | usePassword: true
5 | mongodbRootPassword: secretpassword
6 | mongodbUsername: qlik
7 | mongodbPassword: Qlik1234
8 | mongodbDatabase: qsefe
9 |
--------------------------------------------------------------------------------
/dockerdemo/app.js:
--------------------------------------------------------------------------------
1 | var express = require('express');
2 | var app = express();
3 |
4 | app.get('/', function (req, res) {
5 | res.send('Hello from Christof!');
6 | });
7 |
8 | var server = app.listen(8074, function () {
9 | var host = server.address().address;
10 | var port = server.address().port;
11 | console.log('Example app listening at http://' + host + ':' + port);
12 | });
13 |
--------------------------------------------------------------------------------
/sample_yaml/service4pod1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: hellochris-svc1
5 | spec:
6 | type: NodePort
7 | selector:
8 | app: hellochris-app1
9 | # same key-value pair as in metadata.labels section of Pod
10 | ports:
11 | - port: 8074
12 | # matching the port number of the app as in pod1.yaml
13 | nodePort: 31002
14 | protocol: TCP
15 |
--------------------------------------------------------------------------------
/vagrantprovision/.vagrant/rgloader/loader.rb:
--------------------------------------------------------------------------------
1 | # This file loads the proper rgloader/loader.rb file that comes packaged
2 | # with Vagrant so that encoded files can properly run with Vagrant.
3 |
4 | if ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"]
5 | require File.expand_path(
6 | "rgloader/loader", ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"])
7 | else
8 | raise "Encoded files can't be read outside of the Vagrant installer."
9 | end
10 |
--------------------------------------------------------------------------------
/sample_yaml/pod1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: hellochris-pod1
5 | labels:
6 | app: hellochris-app1
7 | spec:
8 | containers:
9 | - name: hellochris-container
10 | image: qristof/hello-christof
11 | ports:
12 | - containerPort: 8074
13 |
14 | # to create this pod type
15 | # kubectl create -f pod1.yaml
16 | # To expose the port you need to create a service
17 | # kubectl create -f service4pod1.yaml
18 |
--------------------------------------------------------------------------------
/sample_yaml/service4pod2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: hellochris-svc2
5 | spec:
6 | type: NodePort
7 | selector:
8 | app: hellochris-app2
9 | # same key-value pair as in metadata.labels section of Pod
10 | ports:
11 | - port: 12345
12 | # non-exposed port number, can be anything
13 | nodePort: 31003
14 | targetPort: hellochris-port
15 | # matching the port name given in the pod2.yaml file
16 | protocol: TCP
17 |
--------------------------------------------------------------------------------
/sample_yaml/deploy4pod1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hellochris-deploy
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hellochris-app1
10 | template:
11 | metadata:
12 | name: hellochris-rcpod
13 | labels:
14 | app: hellochris-app1
15 | spec:
16 | containers:
17 | - name: hellochris-container
18 | image: qristof/hello-christof
19 | ports:
20 | - containerPort: 8074
21 |
--------------------------------------------------------------------------------
/sample_yaml/rc4pod1.yaml:
--------------------------------------------------------------------------------
1 | # note ReplicationControllers are outdated, deployment (replica sets) are the new way ...
2 |
3 | apiVersion: v1
4 | kind: ReplicationController
5 | metadata:
6 | name: hellochris-rc
7 | spec:
8 | replicas: 2
9 | selector:
10 | app: hellochris-rcapp
11 | template:
12 | metadata:
13 | name: hellochris-rcpod
14 | labels:
15 | app: hellochris-rcapp
16 | spec:
17 | containers:
18 | - name: hellochris-container
19 | image: qristof/hello-christof
20 | ports:
21 | - containerPort: 8074
22 |
--------------------------------------------------------------------------------
/sample_yaml/pod2.yaml:
--------------------------------------------------------------------------------
1 | # like pod1.yaml but this time using a name for the port, which can be used in service
2 |
3 | apiVersion: v1
4 | kind: Pod
5 | metadata:
6 | name: hellochris-pod2
7 | labels:
8 | app: hellochris-app2
9 | spec:
10 | containers:
11 | - name: hellochris-container
12 | image: qristof/hello-christof
13 | ports:
14 | - containerPort: 8074
15 | name: hellochris-port
16 | # the port name (15 chars max) will be used by the service yaml
17 |
18 | # to create this pod type
19 | # kubectl create -f pod2.yaml
20 | # To expose the port you need to create a service
21 | # kubectl create -f service4pod2.yaml
22 |
--------------------------------------------------------------------------------
/vagrantprovision/files/pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: pvc-mongo
5 | annotations:
6 | volume.beta.kubernetes.io/storage-class: localnfs
7 | spec:
8 | storageClassName: localnfs
9 | accessModes:
10 | - ReadWriteMany
11 | resources:
12 | requests:
13 | storage: 1Gi
14 | ---
15 | kind: PersistentVolumeClaim
16 | apiVersion: v1
17 | metadata:
18 | name: pvc-qse
19 | annotations:
20 | volume.beta.kubernetes.io/storage-class: localnfs
21 | spec:
22 | storageClassName: localnfs
23 | accessModes:
24 | - ReadWriteMany
25 | resources:
26 | requests:
27 | storage: 1Gi
28 |
--------------------------------------------------------------------------------
/vagrantprovision/sh/4_qlik.sh:
--------------------------------------------------------------------------------
1 | #Initialize Helm Tiller pod, upgrade and update the repos
2 | helm init
3 | helm init --wait --upgrade
4 | helm repo update
5 |
6 | #Install storageClass on NFS provider
7 | helm install -n nfs stable/nfs-client-provisioner -f /vagrant/files/storageClass.yaml
8 |
9 | #Create Persistent Volume Claims
10 | kubectl apply -f /vagrant/files/pvc.yaml
11 |
12 | #Install MongoDB
13 | helm install -n mongo stable/mongodb -f /vagrant/files/mongo.yaml
14 |
15 | #Install qliksense from stable repo
16 | helm repo add qlik-stable https://qlik.bintray.com/stable
17 | helm repo add qlik-edge https://qlik.bintray.com/edge
18 |
19 | # install the initial settings
20 | helm install -n qlikinit qlik-stable/qliksense-init
21 |
22 | # install qlik sense enterprise on kubernetes
23 | helm install -n qlik qlik-stable/qliksense -f /vagrant/files/qliksense.yaml
24 |
--------------------------------------------------------------------------------
/vagrantprovision/sh/1_bootstrap.sh:
--------------------------------------------------------------------------------
1 | echo 'executing "1_bootstrap.sh"'
2 | echo 'Updating Ubuntu'
3 | sudo apt-get -qq -y update
4 |
5 | echo 'Installing git nfs-kernel-server'
6 | sudo apt-get install -qq git nfs-kernel-server
7 |
8 | echo 'Disabling swap'
9 | sudo swapoff -a
10 |
11 | # Comment the swap line from fstab - permanently disable swap
12 | sudo sed -i.bak '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
13 |
14 | #NFS
15 | sudo mkdir -p /export/k8s
16 | sudo mkdir -p /export/src
17 | sudo chown nobody:nogroup /export/k8s
18 | sudo bash -c 'cat << EOF >>/etc/exports
19 | /export/k8s *(rw,sync,no_subtree_check,no_root_squash)
20 | /export/src *(rw,sync,no_subtree_check,no_root_squash)
21 | /export *(rw,fsid=0,no_subtree_check,sync)
22 | EOF'
23 |
24 | sudo service nfs-kernel-server restart
25 |
26 | echo 'Adding name server'
27 | sudo sed -i '1s/^/nameserver 8.8.8.8 /' /etc/resolv.conf
28 |
--------------------------------------------------------------------------------
/vagrantprovision/sh/2_docker.sh:
--------------------------------------------------------------------------------
1 | echo 'executing "2_docker.sh"'
2 |
3 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
4 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
5 | sudo apt-get -qq -y update
6 | sudo apt-get -qq -y install docker-ce=18.06.0~ce~3-0~ubuntu
7 |
8 | curl -s -L https://github.com/docker/compose/releases/download/1.23.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
9 | chmod +x /usr/local/bin/docker-compose
10 |
11 | echo 'Installing Docker Machine'
12 | curl -s -L https://github.com/docker/machine/releases/download/v0.16.0/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
13 | chmod +x /tmp/docker-machine &&
14 | sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
15 |
16 | echo 'Installing socat'
17 | sudo apt-get -q install socat -y
18 |
19 | sudo usermod -aG docker vagrant
20 | sudo gpasswd -a vagrant docker
21 |
--------------------------------------------------------------------------------
/vagrantprovision/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "bento/ubuntu-16.04"
3 | config.vm.hostname = "qliksense-minikube"
4 | config.vm.network "private_network", ip: "192.168.56.234"
5 | config.vm.provider "virtualbox" do |v|
6 | v.name = "qliksense-minikube"
7 | v.linked_clone = true
8 | v.customize ["modifyvm", :id, "--memory", 6144]
9 | v.customize ["modifyvm", :id, "--cpus", 2]
10 | v.customize ["modifyvm", :id, "--vram", 64]
11 | v.customize ["modifyvm", :id, "--clipboard", "bidirectional"]
12 | v.customize ["modifyvm", :id, "--chipset", "ich9"]
13 | end
14 | config.vm.synced_folder "./files", "/vagrant/files"
15 | config.vm.provision "bootstrap", type: "shell", path: "./sh/1_bootstrap.sh"
16 | config.vm.provision "docker", type: "shell", path: "./sh/2_docker.sh"
17 | config.vm.provision "minikube", type: "shell", path: "./sh/3_minikube.sh"
18 | # config.vm.provision "qlik", type: "shell", path: "./sh/4_qlik.sh", privileged: false
19 | end
20 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Christof Schwarz
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/dockerdemo/readme.md:
--------------------------------------------------------------------------------
1 | # Minimalistic Dockerized NodeJS app
2 | that outputs helloworld on http port 8074
3 |
4 | ## Test this container
5 |
6 | To test it, use docker build, since we have a Dockerfile that describes what to build
7 | ```
8 | cd dockerdemo/
9 | docker build .
10 | ```
11 | This outputs an id of an image, which you will use in the next command
12 | ```
13 | docker run -d -p 8074:8074 {{imageId}}
14 | ```
15 | -d runs the container "in background" (detached). -p exposes the port also to the host
16 | ```
17 | curl localhost:8074
18 | ```
19 | You should now see "Hello Christof".
20 |
21 | ## Stop the container
22 |
23 | First list the containers to get it's id using "docker ps". Search for the one with COMMAND "node app.js" or PORTS "0.0.0.0:8074->8074/tcp" and copy its CONTAINER ID
24 | ```
25 | docker ps
26 | docker kill {{containerId}}
27 | ```
28 | You stopped the container again.
29 |
30 | ## Push to Docker Hub
31 |
32 | The following commands allow you to publish this image (which you built with "docker build")
33 | ```
34 | docker login
35 | docker tag {{imageId}} {{yourDockerLogin}}/{{nameForTheImage}}
36 | docker push {{yourDockerLogin}}/{{nameForTheImage}}
37 | ```
38 | Note: This example is pushed to Docker hub under image name qristof/hello-christof
39 | https://cloud.docker.com/u/qristof/repository/list
40 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Qlik Sense Enterprise on Kubernetes
2 |
3 | The attached .qvf app connects to the Kubernetes API to read the name of the pods and
4 | - computes the actual deeplink to the log of the container(s) in the pod
5 | - imports some of the logs directly into the app
6 |
7 | Current version: __0.12__
8 |
9 | Currently I import those logs: engine, edge-auth
10 |
11 | To enable the Kubernetes API in your cluster with this command
12 | ```
13 | kubectl proxy --address='0.0.0.0' --port=8001 --accept-hosts='^*$'
14 | ```
15 | Hint: if you dont know the ip-address under which the port can be accessed, type the below. It is one of the shown IPs.
16 | ```
17 | ifconfig | grep Bcast
18 | ```
19 |
20 | * The hostname, port must also be set in the Load Script of the app (page Main).
21 | * If the REST connector configuration is missing after import, just create a new "GET" Rest Connection to any url for example https://jsonplaceholder.typicode.com/todos/1 , no authentication. Correct the statement on page "Main" in the script where it reads LIB CONNECT TO 'your_new_REST_conn';
22 |
23 |
24 | 
25 |
26 | More and more log tables will be added, like the below
27 |
28 | 
29 |
30 |
31 |
--------------------------------------------------------------------------------
/vagrantprovision/files/qliksense.yaml:
--------------------------------------------------------------------------------
1 | # minikube
2 | elastic-infra:
3 | nginx-ingress:
4 | controller:
5 | service:
6 | type: NodePort
7 | nodePorts:
8 | https: 32443
9 | extraArgs.report-node-internal-ip-address: ""
10 | default-ssl-certificate: default/elastic-infra-elastic-infra-tls-secret
11 |
12 | hub:
13 | ingress:
14 | annotations:
15 | nginx.ingress.kubernetes.io/auth-signin: https://$host:32443/login?returnto=$request_uri
16 |
17 | management-console:
18 | ingress:
19 | annotations:
20 | nginx.ingress.kubernetes.io/auth-signin: https://$host:32443/login?returnto=$request_uri
21 |
22 | edge-auth:
23 | oidc:
24 | redirectUri: https://elastic.example:32443/login/callback
25 | enabled: true
26 |
27 | #storage
28 | mongodb:
29 | uri: mongodb://qlik:Qlik1234@mongo-mongodb.default.svc.cluster.local:27017/qsefe?ssl=false
30 |
31 | qix-datafiles:
32 | persistence:
33 | accessMode: ReadWriteMany
34 | enabled: true
35 | existingClaim: pvc-qse
36 | redis:
37 | persistence:
38 | accessMode: ReadWriteMany
39 | enabled: true
40 | existingClaim: pvc-qse
41 | reporting:
42 | persistence:
43 | accessMode: ReadWriteMany
44 | enabled: true
45 | existingClaim: pvc-qse
46 | resource-library:
47 | persistence:
48 | accessMode: ReadWriteMany
49 | enabled: true
50 | existingClaim: pvc-qse
51 | temporary-contents:
52 | persistence:
53 | accessMode: ReadWriteMany
54 | enabled: true
55 | existingClaim: pvc-qse
56 | engine:
57 | acceptEULA: "yes"
58 | persistence:
59 | accessMode: ReadWriteMany
60 | enabled: true
61 | existingClaim: pvc-qse
62 | dcaas:
63 | dcaas-redis:
64 | persistence:
65 | accessMode: ReadWriteMany
66 | enabled: true
67 | existingClaim: pvc-qse
68 |
--------------------------------------------------------------------------------
/vagrantprovision/sh/3_minikube.sh:
--------------------------------------------------------------------------------
1 | echo 'executing "3_minikube.sh"'
2 | # this will install minikube Kubernetes and Helm
3 |
4 | sudo swapoff -a
5 |
6 | echo 'Installing Kubernetes'
7 |
8 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
9 | cat </etc/apt/sources.list.d/kubernetes.list
10 | deb http://apt.kubernetes.io/ kubernetes-xenial main
11 | EOF
12 | apt-get update -y -qq
13 | apt-get install -y kubectl kubeadm kubelet
14 |
15 | sudo curl -s -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x minikube && sudo mv minikube /usr/local/bin/
16 |
17 | #https://github.com/kubernetes/minikube
18 | echo 'Setting Developer Mode'
19 | export MINIKUBE_WANTUPDATENOTIFICATION=false
20 | export MINIKUBE_WANTREPORTERRORPROMPT=false
21 | export MINIKUBE_HOME=/home/vagrant
22 | export CHANGE_MINIKUBE_NONE_USER=true
23 | export KUBECONFIG=/home/vagrant/.kube/config
24 |
25 | #we want things things to stick each time we login or start the machine
26 | echo "export MINIKUBE_WANTUPDATENOTIFICATION=false" >> /home/vagrant/.bash_profile
27 | echo "export MINIKUBE_WANTREPORTERRORPROMPT=false" >> /home/vagrant/.bash_profile
28 | echo "export MINIKUBE_HOME=/home/vagrant" >> /home/vagrant/.bash_profile
29 | echo "export CHANGE_MINIKUBE_NONE_USER=true" >> /home/vagrant/.bash_profile
30 | echo "export KUBECONFIG=/home/vagrant/.kube/config" >> /home/vagrant/.bash_profile
31 | echo "sudo swapoff -a" >> /home/vagrant/.bash_profile
32 | echo "source <(kubectl completion bash)" >> /home/vagrant/.bash_profile
33 | echo "sudo chown -R vagrant /home/vagrant/.kube" >> /home/vagrant/.bash_profile
34 | echo "sudo chgrp -R vagrant /home/vagrant/.kube" >> /home/vagrant/.bash_profile
35 |
36 | #sudo cp /root/.minikube $HOME/.minikube
37 | echo "sudo chown -R vagrant /home/vagrant/.minikube" >> /home/vagrant/.bash_profile
38 | echo "sudo chgrp -R vagrant /home/vagrant/.minikube" >> /home/vagrant/.bash_profile
39 |
40 | mkdir /home/vagrant/.kube || true
41 | touch /home/vagrant/.kube/config
42 |
43 | echo 'Starting minikube local cluster'
44 |
45 | minikube start --kubernetes-version v1.14.0 --memory 4096 --cpus=2 --vm-driver=none
46 | # minikube start --kubernetes-version v1.14.0 --memory 4096 --cpus=2 --vm-driver=none --extra-config=apiserver.service-node-port-range=80-32767
47 |
48 | #sudo cp /root/.kube $HOME/.kube
49 | sudo chown -R vagrant /home/vagrant/.kube
50 | sudo chgrp -R vagrant /home/vagrant/.kube
51 |
52 | #sudo cp /root/.minikube $HOME/.minikube
53 | sudo chown -R vagrant /home/vagrant/.minikube
54 | sudo chgrp -R vagrant /home/vagrant/.minikube
55 |
56 | echo 'Getting Helm'
57 | curl -s https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
58 | chmod 700 get_helm.sh
59 | # changed on 21-05-2019: helm version 2.14 doesn't work, getting an older version
60 | ./get_helm.sh --version v2.13.1
61 |
62 |
--------------------------------------------------------------------------------
/vagrantprovision/readme.md:
--------------------------------------------------------------------------------
1 | | --> There is a new version available here https://github.com/ChristofSchwarz/qseok_on_minikube |
2 | |-------------------------|
3 |
4 | # Installing Qlik Sense Enterprise on Minikube
5 |
6 | This uses the free software Vagrant and VirtualBox to provision a Virtual Machine and installs Minikube (a non-production single-node Kubernetes) in a way to allow Qlik Sense Enterprise on Kubernetes to run on top of it.
7 |
8 |
Video to demonstrate the setup in 8 min, then show the new console and hub
9 |
10 | ## How to provision
11 |
12 | You need to install
13 |
14 | - A license from Qlik that is enabled for Multi-Cloud (the early name of Qlik Sense Enterprise for Kubernetes)
15 | - Oracle VirtualBox 5.2 or later from https://www.virtualbox.org/ (turn Hyper-V off)
16 | - Vagrant 2.2 or later from https://www.vagrantup.com/
(Note if prompted where to install leave the default C:\HarshiCorp\Vagrant to avoid issues later !)
17 |
18 | For simplicity reasons, this installation of QSEoK will use the built-in user authentication (no 3rd-party Identity Provider).
19 | You must access the portal using https://elastic.example:32443 therefore add this (or update) your hosts file found in c:\windows\System32\drivers\etc with this entry:
20 | ```
21 | 192.168.56.234 elastic.example
22 | ```
23 | After you downloaded (and unzipped) this git, open a Command Prompt and navigate to the vagrantprovision folder.
24 | Follow those vagrant commands. We start based on a Ubuntu Xenial base box and then provision what is stated in the "Vagrantfile"
25 | file and the /sh subfolder
26 | ```
27 | cd .\vagrantprovision
28 | vagrant up
29 | ```
30 | wait 1 hour or so for all the packages to deploy. To get a terminal window type
31 | ```
32 | vagrant ssh
33 | ```
34 | Type "exit" to get back from bash of the Ubuntu into your host system prompt.
35 |
36 | If you want to stop and remove the VM properly (also if you want to restart the provisioning process), type
37 | ```
38 | vagrant destroy
39 | ```
40 |
41 | ## Configuration
42 |
43 | You can see here the settings for this virtual machine. It uses
44 | * Ubuntu 16.04
45 | * 6 GB RAM
46 | * 2 processors
47 | * syncs the relative folder "/files" within the VM as "/vagrant/files" folder (You will find the .yaml config files there)
48 | * sets root user to __vagrant__ password __vagrant__
49 |
50 | The scripts which will be processed right after the first boot of the machine are found in subfolder 'sh'. They install NFS, Docker, Docker Machine, Minikube, kubeadm, kubectl, helm.
51 |
52 | You will have a blank but working minikube. The essential parts to install a storageClass, a PVC, NFS-Container, MongoDB, Qlik Sense init and Qlik Sense are found in file 4_qlik.sh which will not automatically be started.
53 |
54 | If you do it first time, I recommend to copy/paste every line of 4_qlik.sh one by one and try to understand what it does. If you want to shortcut, execute the .sh file with
55 | ```
56 | bash /vagrant/sh/4_qlik.sh
57 | ```
58 | ## First Time Login, Enter License
59 |
60 | Once all pods are running (check with: kubectl get pods) you can navigate your browser to https://elastic.example:32443/console . If everything is correct you will be redirected to port :32123 for login. Choose one of the users below. Remember: the first one to log in to a fresh installation will be site administrator (TenentAdmin).
61 |
62 | * harley@qlik.example, password "Password1!"
63 | * barb@qlik.example, password "Password1!"
64 | * sim@qlik.example, password "Password1!"
65 | * ... 7 more users, see https://support.qlik.com/articles/000076585
66 |
67 | Next you will get back to https://elastic.example:32443/console where you'll have a box to enter the site license (a JWT token you got from your Qlik representative). Once applied, you may see "unauthorized" and may have to re-login. That is only once after the site license has been set.
68 |
69 | https://elastic.example:32443/explore will show the new hub. You can create or upload apps there.
70 |
71 | Enjoy QSEonK8s
72 |
73 | ## Known Issues
74 |
75 | - The first time you try to enter the console on https://elastic.example:32443/console the browser gets redirected to https://elastic.example/console/ and fails. -> Enter the port :32443 again and it will work
76 | - This configuration has issues accessing the scheduled tasks (https://elastic.example:32443/console/scheduler/)
77 | - Logout returns "OK" but doesn't remove the cookie. The session is still alive. Use Incognito Mode or delete the Cookie in the Developer tools of your browser.
78 | - the updated helm as of May 15 2019 doesn't work with this setup. I set the version to be pulled to v2.13.1.
79 |
80 |
--------------------------------------------------------------------------------