├── .circleci └── config.yml ├── .editorconfig ├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── charts ├── nfs │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── sc.yaml │ │ └── service.yaml │ └── values.yaml ├── qlik-core │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── engine-access-control-cfg.yaml │ │ ├── engine-deployment.yaml │ │ ├── engine-hpa-custom.yaml │ │ ├── engine-prestop-hook.yaml │ │ ├── engine-service.yaml │ │ ├── ingress.yaml │ │ ├── license-service-deployment.yaml │ │ ├── license-service-service.yaml │ │ ├── qix-session-deployment.yaml │ │ └── qix-session-service.yaml │ └── values.yaml └── readme.md ├── config ├── grafana-dashboards-cfg.yaml ├── grafana-datasources-cfg.yaml └── rbac-config.yaml ├── deployment-test ├── package-lock.json ├── package.json └── verify-deployment.spec.js ├── doc ├── Shared-Africa-Urbanization.qvf └── default │ └── 739db838-dd28-4078-8715-ee9cfcc06c29 │ ├── appobjects │ ├── appobjects.qvf │ └── appobjects.qvf.lock │ └── master │ ├── 739db838-dd28-4078-8715-ee9cfcc06c29.qvf │ └── 739db838-dd28-4078-8715-ee9cfcc06c29.qvf.lock ├── renovate.json ├── run.sh ├── settings.config └── values ├── grafana.yaml ├── nginx-ingress.yaml └── prom-adapter.yaml /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | docker: 5 | - image: google/cloud-sdk 6 | environment: 7 | TEST_CLUSTER_ZONE: "europe-west3-b" 8 | ACCEPT_EULA: "yes" 9 | working_directory: ~/scaling 10 | steps: 11 | - checkout 12 | - run: 13 | name: Store Service Accounts 14 | command: | 15 | echo $GCLOUD_SERVICE_KEY > ${HOME}/gcloud-service-key.json 16 | echo $GCLOUD_SERVICE_KEY_PROD > ${HOME}/gcloud-service-key-prod.json 17 | - run: 18 | name: Configure gcloud 19 | command: | 20 | gcloud auth activate-service-account --key-file=${HOME}/gcloud-service-key.json 21 | gcloud --quiet config set project dev-qlik-core 22 | gcloud --quiet config set compute/zone $TEST_CLUSTER_ZONE 23 | - run: 24 | name: Deploy test cluster 25 | environment: 26 | GCLOUD_NUM_NODES: 2 27 | command: | 28 | curl https://storage.googleapis.com/kubernetes-helm/helm-v2.13.1-linux-amd64.tar.gz | tar xz 29 | chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/. 30 | 31 | # bootstrap cluster & nodes 32 | GCLOUD_ZONE=$TEST_CLUSTER_ZONE K8S_CLUSTER=cci-cluster-$CIRCLE_BUILD_NUM DISK_NAME=$DISK_NAME_TEST-$CIRCLE_BUILD_NUM K8S_VERSION="1.14" ./run.sh bootstrap 33 | 34 | # deploy configurations & services 35 | GCLOUD_ZONE=$TEST_CLUSTER_ZONE K8S_CLUSTER=cci-cluster-$CIRCLE_BUILD_NUM DISK_NAME=$DISK_NAME_TEST-$CIRCLE_BUILD_NUM ./run.sh upgrade 36 | 37 | # wait for all deployments to rollout 38 | for deployment in `kubectl get deployment -o name`; do echo `kubectl rollout status $deployment`; done 39 | - run: 40 | name: Install prerequisites and run sanity tests towards the test cluster 41 | working_directory: ~/scaling/deployment-test 42 | command: | 43 | curl -sL https://deb.nodesource.com/setup_10.x | bash - 44 | apt-get -y install nodejs npm 45 | npm install 46 | apt-get -y install jq 47 | export TEST_CLUSTER_IP="$(kubectl get service nginx-ingress-controller -o json | jq -r '.status.loadBalancer.ingress[0].ip')" 48 | npm run test 49 | - run: 50 | name: Remove test cluster 51 | command: | 52 | GCLOUD_ZONE=$TEST_CLUSTER_ZONE K8S_CLUSTER=cci-cluster-$CIRCLE_BUILD_NUM DISK_NAME=$DISK_NAME_TEST-$CIRCLE_BUILD_NUM ./run.sh remove-cluster 53 | when: always 54 | - run: 55 | name: Remove test disk 56 | command: GCLOUD_ZONE=$TEST_CLUSTER_ZONE K8S_CLUSTER=cci-cluster-$CIRCLE_BUILD_NUM DISK_NAME=$DISK_NAME_TEST-$CIRCLE_BUILD_NUM ./run.sh remove-disks 57 | when: always 58 | - run: 59 | name: Deploy qlik-core scaling example if on master 60 | command: | 61 | if [ "${CIRCLE_BRANCH}" == "master" ]; then 62 | gcloud auth activate-service-account --key-file=${HOME}/gcloud-service-key-prod.json 63 | gcloud --quiet config set project dev-prod-qlik-core 64 | gcloud --quiet container clusters get-credentials core-scaling --zone=europe-west3-a 65 | echo $NGINX_CONF > ./values/nginx-ingress.yaml 66 | DISK_NAME=$DISK_NAME_PROD ./run.sh upgrade 67 | POD=$(kubectl get pods --selector="role=nfs-server" -o=jsonpath='{.items[0].metadata.name}') 68 | kubectl exec $POD -- chown -R 1910:1910 /exports/default 69 | fi 70 | - run: 71 | name: Run the sanity tests toward the prod cluster (https://urbanization.qlikcore.com) 72 | working_directory: ~/scaling/deployment-test 73 | command: | 74 | if [ "${CIRCLE_BRANCH}" == "master" ]; then 75 | for deployment in `kubectl get deployment -o name`; do echo `kubectl rollout status $deployment`; done 76 | export TEST_CLUSTER_IP="$(kubectl get service nginx-ingress-controller -o json | jq -r '.status.loadBalancer.ingress[0].ip')" 77 | #note TEST_CLUSTER_IP will here get the IP of the live deployed cluster. 78 | npm run test 79 | fi 80 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: http://EditorConfig.org 2 | 3 | # Top-most EditorConfig file 4 | root = true 5 | 6 | # Source files 7 | [*] 8 | charset = utf-8 9 | indent_style = space 10 | indent_size = 2 11 | end_of_line = lf 12 | insert_final_newline = true 13 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Set the default behavior, in case people don't have core.autocrlf set. 2 | * text=auto 3 | 4 | # Declare files that will always have LF line endings on checkout. 5 | * text eol=lf 6 | 7 | *.qvf binary 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2017-present QlikTech International AB 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | Files in /custom-metrics-api are based on https://github.com/stefanprodan/k8s-prom-hpa, licensed MIT. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autoscaling Qlik Core on Google Kubernetes Engine 2 | 3 | *As of 1 July 2020, Qlik Core is no longer available to new customers. No further maintenance will be done in this repository.* 4 | 5 | This use case shows how you can set up a Qlik Core application in a Google Kubernetes Engine (GKE) cluster so you can easily scale your application up or down to meet user demands. 6 | 7 | ## Prerequisites 8 | 9 | * Set up Google CLI (`gcloud`) by following this guide [Quickstart guide](https://cloud.google.com/sdk/docs/quickstarts). 10 | 11 | * Modify the `settings.config` for your project. 12 | 13 | !!! Note 14 | You must include your license serial number, license control number and Google Cloud project name in the `settings.config` file. 15 | You should add the license serial number, license control number and Google Cloud project name after the ":-" on the respective rows. 16 | 17 | Example: `GCLOUD_PROJECT="${GCLOUD_PROJECT:-YOUR-GLCOUD-PROJECT-NAME-HERE}"` 18 | 19 | * Accept the EULA by modifying the `./qlik-core/engine-deployment.yaml` file. 20 | 21 | * Change the max number of sessions on an engine from 500 to 20 by changing `SESSIONS_PER_ENGINE_THRESHOLD` in the `./qlik-core/qix-session-deployment.yaml` file. 22 | 23 | * Change when the HPA will start scaling engines by changing `qix_active_sessions` from 250 to 10 in the `./qlik-core/engine-hpa-custom.yaml` file. 24 | 25 | * Install [jq](https://stedolan.github.io/jq/) JSON processor to make the printout more readable. 26 | 27 | ## Issues 28 | 29 | Before reporting a new issue, look for your issue in [Known issues](#known-issues). 30 | 31 | ## Getting started 32 | 33 | There are two ways get started: 34 | 35 | * By following the step-by-step guide below. 36 | 37 | If you are unfamiliar with GKE, we recommend that you follow the step-by-step guide. 38 | 39 | * By deploying everything with the following command: 40 | 41 | ```bash 42 | ./run.sh deploy 43 | ``` 44 | 45 | If you choose this option, you can skip to [Add load to the cluster](#add-load-to-the-cluster). 46 | 47 | ## Create a GKE cluster 48 | 49 | !!! Note 50 | There is often a deployment delay before the services are running. If a command fails, 51 | wait 30 seconds and try again. 52 | 53 | Create the GKE cluster with the following command: 54 | 55 | ```bash 56 | ./run.sh create 57 | ``` 58 | 59 | This command runs the script that creates the GKE cluster and volumes, and it will take some time to complete. 60 | 61 | After the script is finished running, you should be able to query the Kubernetes cluster. Run the following commands to see the node and pods metrics: 62 | 63 | * Node metrics: 64 | 65 | ```bash 66 | kubectl get --raw "/apis/metrics.k8s.io/v1beta1/nodes" | jq . 67 | ``` 68 | 69 | * Pods metrics: 70 | 71 | ```bash 72 | kubectl get --raw "/apis/metrics.k8s.io/v1beta1/pods" | jq . 73 | ``` 74 | 75 | ### Set up a custom metrics server 76 | 77 | You can scale up or down based on built-in metrics: CPU and memory. However, to scale up or down based on custom metrics, you need to add two components. 78 | 79 | You need one component to collect metrics from your applications and store them to [Prometheus](https://prometheus.io) in a time series database. 80 | 81 | The second component extends the Kubernetes custom metrics API with the [k8s-prometheus-adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter). The adapter talks to Prometheus to expose custom metrics and makes them available to the Kubernetes custom metrics API. 82 | 83 | Do the following: 84 | 85 | 1. Create the namespaces for metrics and Ingress: 86 | 87 | ```bash 88 | kubectl create -f ./namespaces.yaml 89 | ``` 90 | 91 | 1. Increase the privileges to deploy Prometheus: 92 | 93 | ```bash 94 | kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud config get-value core/account) 95 | ``` 96 | 97 | 1. Deploy Prometheus: 98 | 99 | ```bash 100 | kubectl create -f ./prometheus 101 | ``` 102 | 103 | 1. Deploy the Prometheus custom metrics API adapter: 104 | 105 | ```bash 106 | kubectl create -f ./custom-metrics-api 107 | ``` 108 | 109 | 1. Once the pod is in a ready state, you can list the custom metrics provided by Prometheus: 110 | 111 | ```bash 112 | kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1" | jq . 113 | ``` 114 | 115 | ### Ingress routing 116 | 117 | Deploy the Ingress controller, which lets you reach the qix-session services and creates sessions against engines in our cluster. 118 | 119 | ```bash 120 | kubectl create -f ./ingress 121 | ``` 122 | 123 | ### NFS volumes 124 | 125 | Deploy the NFS server, which gives read/write access of the volumes to the engine pods. 126 | 127 | ```bash 128 | kubectl create -f ./nfs-volumes 129 | ``` 130 | 131 | ## Add apps to the engine 132 | 133 | Run the seeding script to load the documents in the `./doc` folder into the cluster. 134 | 135 | ```bash 136 | ./run.sh populate-docs 137 | ``` 138 | 139 | ## Autoscaling based on custom metrics 140 | 141 | Now that the GKE cluster is set up and the documents are loaded into the cluster, you can deploy Qlik Core and start to scale based on Qlik Associative Engine active sessions. 142 | 143 | 1. Add a ClusterRole for the Mira service. 144 | 145 | ```bash 146 | kubectl create -f ./rbac-config.yaml 147 | ``` 148 | 149 | 1. Add a configmap with your license data 150 | 151 | ```bash 152 | kubectl create configmap license-data --from-literal LICENSE_KEY=YOUR-LICENSE-KEY 153 | ``` 154 | 155 | 1. Deploy Qlik Core: 156 | 157 | ```bash 158 | kubectl create -f ./qlik-core 159 | ``` 160 | 161 | The `engine` service exposes a custom metric named `qix_active_sessions`. 162 | The Prometheus adapter removes the `_total` suffix and marks the metric as a counter metric. 163 | 164 | 1. Get the total Qlik Associative Engine active sessions from the custom metrics API: 165 | 166 | ```bash 167 | kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/qix_active_sessions" | jq . 168 | ``` 169 | 170 | 1. Check that the Horizontal Pod Autoscaler (HPA), which is responsible for scaling, is active, and check that you have 0 sessions on your engines. 171 | 172 | ```bash 173 | kubectl get hpa 174 | ``` 175 | 176 | ## Monitor the cluster 177 | 178 | Before you add any load to the cluster, deploy Grafana for monitoring. 179 | 180 | 1. Deploy Grafana. 181 | 182 | ```bash 183 | kubectl create -f ./grafana 184 | ``` 185 | 186 | 1. Expose the Grafana web server on a local port. 187 | 188 | ```bash 189 | ./run.sh grafana 190 | ``` 191 | 192 | You can view Grafana on http://localhost:3000. 193 | 194 | ## Add load to the cluster 195 | 196 | Now that Grafana is set up, apply some load on the `engine` service with [core-qix-session-workout](https://github.com/qlik-oss/core-qix-session-workout). 197 | 198 | First, clone the repository and go to the repository directory. 199 | 200 | Next, you need to get the external IP address from the nginx-controller which acts as the entry point to the cluster. 201 | 202 | 1. Get the IP addresses from the nginx-controller. 203 | 204 | ``` 205 | kubectl get service ingress-nginx --namespace ingress-nginx 206 | ``` 207 | 208 | 1. Copy the external IP address and change the `host` field in the `configs/scaling.json` file to your ingress-nginx controllers external IP address. 209 | 210 | ```bash 211 | node main.js -c configs/scaling.json -s scenarios/random-selection-scenario.js 212 | ``` 213 | 214 | Now you can start putting some load on the engines. 215 | 216 | ### Results 217 | 218 | This will create 50 sessions, one new session every 10 seconds with random selections being made every 2 seconds. You can change the settings in the `configs/scaling.json` file if you want to scale up to more sessions or change the speed at which new sessions are added. 219 | 220 | The HPA is configured to start scaling new engine pods when the average selection on the engines exceeds 10 sessions. The session service is configured to place a maximum of 20 sessions on one engine. The engine deployment itself is configured to run one engine per node. 221 | 222 | Depending on how many nodes you already have, the HPA might put a new pod on a node that already exists (that is not running an engine instance) or it might need to spin up one or several new nodes to be able to deploy the engine pod. 223 | 224 | When all 50 sessions have been loaded on the engines, you can stop the `core-qix-session-workout` by pressing `ctrl + c` in the terminal it is running in. HPA will then scale down the deployment to its initial number of replicas and nodes. 225 | 226 | You may have noticed that the autoscaler doesn't react immediately to usage spikes. This is because the metrics sync happens by default once every **30 seconds**. Scaling up/down can 227 | only happen if there was no rescaling within the last **3-5 minutes** with different timers for scaling the pods and the nodes. As a result, the HPA prevents rapid execution of conflicting decisions. 228 | 229 | ## Conclusions 230 | 231 | Not all systems can meet their SLAs by relying on CPU/memory usage metrics alone. Most web and mobile back-end systems require autoscaling based on requests-per-second to handle any traffic bursts. 232 | 233 | For ETL apps, autoscaling can be triggered by the job queue length exceeding some threshold and so on. By instrumenting your applications with Prometheus and exposing the right metrics for autoscaling, you can fine-tune your applications to better handle traffic bursts and ensure high availability. 234 | 235 | ## Removing the cluster 236 | 237 | Remove the cluster with: 238 | 239 | ```bash 240 | ./run.sh remove 241 | ``` 242 | 243 | ### Known issues 244 | 245 | * If you are having problems with the nginx deployment, you might have used all your public IPs. You can clear some IPs to make them available here: https://console.cloud.google.com/net-services/loadbalancing/loadBalancers/list 246 | 247 | * If you are having problems when deploying Prometheus, it could be a problem with your username. When you get an error message, it should contain your actual username (case sensitive). Use this username and run this command before redeploying Prometheus. 248 | 249 | ```bash 250 | kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user= 251 | ``` 252 | 253 | * If you are running bash for Windows, you might have an issue with incorrect paths when querying kubectl for metrics. Use CMD instead of bash for Windows. 254 | 255 | * If you are running on Windows `gcloud` might complain about python even if you have python 2.7 installed, a fix is to then rename the binary from `python.exe` to `python2.exe` 256 | 257 | * If the cluster (API server) is unresponsive when you add load to your cluster, this is because the Kubernetes master node is being updated to match the size of the autoscaling cluster. To fix this, you have to deploy a regional cluster. Reade more here: https://cloudplatform.googleblog.com/2018/06/Regional-clusters-in-Google-Kubernetes-Engine-are-now-generally-available.html 258 | 259 | ### General Notes 260 | 261 | * We have specified kubernetes requests and limits for our services. These, especially the values for the engine, should be tweaked if you use another node size. 262 | -------------------------------------------------------------------------------- /charts/nfs/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /charts/nfs/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: Chart that helps provision shared file storage 4 | name: nfs 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /charts/nfs/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "nfs.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "nfs.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "nfs.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /charts/nfs/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "nfs.fullname" . }} 5 | labels: 6 | app: {{ template "nfs.fullname" . }} 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | role: nfs-server 12 | template: 13 | metadata: 14 | labels: 15 | role: nfs-server 16 | spec: 17 | containers: 18 | - name: nfs-server 19 | image: gcr.io/google_containers/volume-nfs:0.8 20 | resources: 21 | requests: 22 | memory: "400Mi" 23 | limits: 24 | memory: "400Mi" 25 | ports: 26 | - name: nfs 27 | containerPort: 2049 28 | - name: mountd 29 | containerPort: 20048 30 | - name: rpcbind 31 | containerPort: 111 32 | securityContext: 33 | privileged: true 34 | volumeMounts: 35 | - mountPath: /exports 36 | name: mypvc 37 | volumes: 38 | - name: mypvc 39 | gcePersistentDisk: 40 | pdName: {{ .Values.persistence.diskName }} 41 | fsType: ext4 42 | -------------------------------------------------------------------------------- /charts/nfs/templates/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: {{ template "nfs.fullname" . }} 5 | spec: 6 | capacity: 7 | storage: {{ .Values.persistence.size | quote }} 8 | accessModes: 9 | - {{ .Values.persistence.accessMode | quote }} 10 | nfs: 11 | server: nfs-server.default.svc.cluster.local 12 | path: "/" 13 | -------------------------------------------------------------------------------- /charts/nfs/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ template "nfs.fullname" . }} 6 | spec: 7 | accessModes: 8 | - {{ .Values.persistence.accessMode | quote }} 9 | resources: 10 | requests: 11 | storage: {{ .Values.persistence.size | quote }} 12 | {{- if .Values.persistence.storageClass }} 13 | {{- if (eq "-" .Values.persistence.storageClass) }} 14 | storageClassName: "" 15 | {{- else }} 16 | storageClassName: {{ .Values.persistence.storageClass }} 17 | {{- end }} 18 | {{- end }} 19 | {{- end }} -------------------------------------------------------------------------------- /charts/nfs/templates/sc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.persistence.internalStorageClass.enabled -}} 2 | kind: StorageClass 3 | apiVersion: storage.k8s.io/v1 4 | metadata: 5 | name: {{ .Values.persistence.storageClass }} 6 | {{ toYaml .Values.persistence.internalStorageClass.definition }} 7 | {{- end }} -------------------------------------------------------------------------------- /charts/nfs/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nfs-server 5 | spec: 6 | ports: 7 | - name: nfs 8 | port: 2049 9 | - name: mountd 10 | port: 20048 11 | - name: rpcbind 12 | port: 111 13 | selector: 14 | role: nfs-server 15 | -------------------------------------------------------------------------------- /charts/nfs/values.yaml: -------------------------------------------------------------------------------- 1 | 2 | ## Persistence configuration 3 | ## 4 | persistence: 5 | enabled: true 6 | autoSave: 7 | enabled: true 8 | interval: 5 9 | 10 | diskName: my-disk 11 | ## engine Persistent Volume Storage Class 12 | ## If defined, storageClassName: 13 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 14 | ## If undefined (the default) or set to null, no storageClassName spec is 15 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 16 | ## GKE, AWS & OpenStack) 17 | ## 18 | storageClass: "-" 19 | 20 | ## Persistence access mode 21 | ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes-1 22 | accessMode: ReadWriteMany 23 | 24 | ## Persistence volume default size 25 | size: 5Gi 26 | 27 | ## To enable an externally defined persistent volume claim set the name of the claim. 28 | ## If configured this chart will not create a persistent volume claim. 29 | # existingClaim: 30 | 31 | internalStorageClass: 32 | ## Normally the storage class should be created outside this helm chart 33 | ## If we want to deploy a storage class as part of the helm chart 34 | ## - Provide a storageClassName above. 35 | ## - set enabled true 36 | ## - provide a storage class definition. 37 | 38 | ## If enabled storage class will be configured as part of the chart. 39 | ## If not enabled an external storageclass can be used by providing storageClassName above. 40 | enabled: false 41 | 42 | ## Storageclass definition 43 | definition: {} 44 | ## Storage classes have a provisioner that determines what volume plugin is used for provisioning PVs. 45 | ## This field must be specified. 46 | ## See https://kubernetes.io/docs/concepts/storage/storage-classes/ 47 | # provisioner: kubernetes.io/no-provisioner 48 | 49 | ## Reclaim policy should normally be set to Retain to avoid loosing data when deleting this helm chart. 50 | # reclaimPolicy: Retain 51 | 52 | ## Persistent Volumes that are dynamically created by a storage class will have the mount options specified 53 | ## in the mountOptions field of the class. 54 | # mountOptions: {} 55 | 56 | ## Storage classes have parameters that describe volumes belonging to the storage class. 57 | ## Different parameters may be accepted depending on the provisioner. 58 | # parameters: {} 59 | -------------------------------------------------------------------------------- /charts/qlik-core/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /charts/qlik-core/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: qlik-core 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | {{- if eq (printf "%s" .Values.engine.acceptEULA) "no" }} 2 | ################################################################################################ 3 | #### ERROR: You did not agree to the EULA in your 'helm install' call. #### 4 | #### Try 'helm upgrade --install --set engine.acceptEULA=yes qlik-core ./helm/qlik-core'. #### 5 | ################################################################################################ 6 | {{- end }} -------------------------------------------------------------------------------- /charts/qlik-core/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "qlik-core.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "qlik-core.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "qlik-core.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/engine-access-control-cfg.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Values.configMapAC.name }} 5 | data: 6 | read_only.txt: | 7 | resource._actions = {"read"} 8 | allow_all.txt: | 9 | resource._actions = {"*"} 10 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/engine-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Values.engine.name }} 5 | labels: 6 | app.kubernetes.io/name: {{ .Values.engine.name }} 7 | helm.sh/chart: {{ include "qlik-core.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.engine.replicas }} 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/name: {{ .Values.engine.name }} 15 | app.kubernetes.io/instance: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/name: {{ .Values.engine.name }} 20 | app.kubernetes.io/instance: {{ .Release.Name }} 21 | qix-engine: "" 22 | {{- with .Values.engine.annotations }} 23 | annotations: 24 | {{- toYaml . | nindent 8 }} 25 | {{- end }} 26 | spec: 27 | affinity: 28 | podAntiAffinity: 29 | requiredDuringSchedulingIgnoredDuringExecution: 30 | - labelSelector: 31 | matchExpressions: 32 | - key: app.kubernetes.io/name 33 | operator: In 34 | values: 35 | - engine 36 | topologyKey: "kubernetes.io/hostname" 37 | terminationGracePeriodSeconds: 600 38 | containers: 39 | - name: {{ .Values.engine.name }} 40 | image: "{{ .Values.engine.image.repository }}:{{ .Values.engine.image.tag }}" 41 | imagePullPolicy: {{ .Values.engine.image.pullPolicy }} 42 | args: 43 | - -S 44 | - AcceptEULA={{ .Values.engine.acceptEULA }} 45 | - -S 46 | - LicenseServiceUrl=http://license-service:9200 47 | - -S 48 | - DocumentDirectory=/doc 49 | - -S 50 | - EnableABAC=1 51 | - -S 52 | - SystemAllowRulePath=/rules/read_only.txt 53 | resources: 54 | {{- toYaml .Values.engine.resources | nindent 12 }} 55 | ports: 56 | - containerPort: 9076 57 | - containerPort: 9090 58 | {{- with .Values.engine.volumeMounts }} 59 | volumeMounts: 60 | {{- toYaml . | nindent 12 }} 61 | {{- end }} 62 | lifecycle: 63 | preStop: 64 | exec: 65 | command: ["sh", "-c", "/engine-prestop-hook/engine-prestop-hook.sh"] 66 | volumes: 67 | - name: engine-prestop-hook 68 | configMap: 69 | name: engine-prestop-hook 70 | defaultMode: 0755 71 | - name: app-nfs 72 | persistentVolumeClaim: 73 | claimName: nfs-server 74 | - name: access-control 75 | configMap: 76 | name: access-control 77 | initContainers: 78 | - name: volume-mount-owner 79 | image: "alpine:3.5" 80 | command: ["sh", "-c", "chown -R 1910:1910 /doc"] 81 | volumeMounts: 82 | - name: app-nfs 83 | mountPath: /doc 84 | imagePullSecrets: 85 | - name: dockerhub 86 | {{- with .Values.nodeSelector }} 87 | nodeSelector: 88 | {{- toYaml . | nindent 8 }} 89 | {{- end }} 90 | {{- with .Values.affinity }} 91 | affinity: 92 | {{- toYaml . | nindent 8 }} 93 | {{- end }} 94 | {{- with .Values.tolerations }} 95 | tolerations: 96 | {{- toYaml . | nindent 8 }} 97 | {{- end }} 98 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/engine-hpa-custom.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: engine 5 | namespace: default 6 | labels: 7 | app.kubernetes.io/name: engine 8 | helm.sh/chart: {{ include "qlik-core.chart" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | spec: 12 | scaleTargetRef: 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | name: engine 16 | minReplicas: {{ .Values.engine.hpa.minReplicas }} 17 | maxReplicas: {{ .Values.engine.hpa.maxReplicas }} 18 | metrics: 19 | - type: Pods 20 | pods: 21 | metricName: qix_active_sessions 22 | targetAverageValue: {{ .Values.engine.hpa.targetAverageValue }} 23 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/engine-prestop-hook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Values.configMapPH.name }} 5 | data: 6 | engine-prestop-hook.sh: |- 7 | while true 8 | do 9 | sessionCount=$(curl --silent localhost:9090/metrics | grep -oE '(qix_active_sessions\ [0-9]+)' | grep -oE '([0-9]+)') 10 | if [ "$sessionCount" -eq "0" ] 11 | then 12 | exit 0 13 | fi 14 | sleep 1 15 | done 16 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/engine-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.engine.name }} 5 | labels: 6 | app.kubernetes.io/name: {{ .Values.engine.name }} 7 | helm.sh/chart: {{ include "qlik-core.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: 9076 14 | protocol: TCP 15 | name: qix 16 | - port: 9090 17 | protocol: TCP 18 | name: prometheus 19 | selector: 20 | app.kubernetes.io/name: {{ .Values.engine.name }} 21 | app.kubernetes.io/instance: {{ .Release.Name }} 22 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "qlik-core.fullname" . -}} 3 | {{- $ingressPaths := .Values.ingress.paths -}} 4 | apiVersion: extensions/v1beta1 5 | kind: Ingress 6 | metadata: 7 | name: {{ $fullName }} 8 | labels: 9 | app.kubernetes.io/name: {{ include "qlik-core.name" . }} 10 | helm.sh/chart: {{ include "qlik-core.chart" . }} 11 | app.kubernetes.io/instance: {{ .Release.Name }} 12 | app.kubernetes.io/managed-by: {{ .Release.Service }} 13 | {{- with .Values.ingress.annotations }} 14 | annotations: 15 | {{- toYaml . | nindent 4 }} 16 | {{- end }} 17 | spec: 18 | {{- if .Values.ingress.tls }} 19 | tls: 20 | {{- range .Values.ingress.tls }} 21 | - hosts: 22 | {{- range .hosts }} 23 | - {{ . | quote }} 24 | {{- end }} 25 | secretName: {{ .secretName }} 26 | {{- end }} 27 | {{- end }} 28 | rules: 29 | {{- range .Values.ingress.hosts }} 30 | - host: {{ . | quote }} 31 | http: 32 | paths: 33 | {{- range $ingressPaths }} 34 | - path: {{ . }} 35 | backend: 36 | serviceName: {{ $fullName }} 37 | servicePort: http 38 | {{- end }} 39 | {{- end }} 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/license-service-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Values.licenseService.name }} 5 | labels: 6 | app.kubernetes.io/name: {{ .Values.licenseService.name }} 7 | helm.sh/chart: {{ include "qlik-core.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/name: {{ .Values.licenseService.name }} 15 | app.kubernetes.io/instance: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/name: {{ .Values.licenseService.name }} 20 | app.kubernetes.io/instance: {{ .Release.Name }} 21 | annotations: 22 | prometheus.io/scrape: 'true' 23 | spec: 24 | containers: 25 | - name: {{ .Values.licenseService.name }} 26 | image: "{{ .Values.licenseService.image.repository }}:{{ .Values.licenseService.image.tag }}" 27 | resources: 28 | {{- toYaml .Values.licenseService.resources | nindent 10 }} 29 | imagePullPolicy: {{ .Values.licenseService.image.imagePullPolicy }} 30 | ports: 31 | - containerPort: {{ .Values.licenseService.port }} 32 | env: 33 | - name: LICENSE_KEY 34 | valueFrom: 35 | secretKeyRef: 36 | name: license-data 37 | key: LICENSE_KEY 38 | imagePullSecrets: 39 | - name: dockerhub 40 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/license-service-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.licenseService.name }} 5 | labels: 6 | app.kubernetes.io/name: {{ .Values.licenseService.name }} 7 | helm.sh/chart: {{ include "qlik-core.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: {{ .Values.licenseService.port }} 14 | targetPort: {{ .Values.licenseService.port }} 15 | protocol: TCP 16 | name: http 17 | selector: 18 | app.kubernetes.io/name: {{ .Values.licenseService.name }} 19 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/qix-session-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Values.qix.name }} 5 | labels: 6 | app.kubernetes.io/name: {{ .Values.qix.name }} 7 | helm.sh/chart: {{ include "qlik-core.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | replicas: {{ .Values.qix.replicas }} 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/name: {{ .Values.qix.name }} 15 | app.kubernetes.io/instance: {{ .Release.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | app.kubernetes.io/name: {{ .Values.qix.name }} 20 | app.kubernetes.io/instance: {{ .Release.Name }} 21 | annotations: 22 | prometheus.io/scrape: 'true' 23 | prometheus.io/path: '/v1/metrics' 24 | spec: 25 | containers: 26 | - name: {{ .Values.qix.name }} 27 | image: "{{ .Values.qix.image.repository }}:{{ .Values.qix.image.tag }}" 28 | resources: 29 | {{- toYaml .Values.qix.resources | nindent 10 }} 30 | env: 31 | - name: SESSIONS_PER_ENGINE_THRESHOLD 32 | value: "{{ .Values.qix.sessionsThreshold }}" 33 | - name: SESSION_STRATEGY 34 | value: "weighted" 35 | imagePullPolicy: IfNotPresent 36 | ports: 37 | - containerPort: {{ .Values.qix.port }} 38 | imagePullSecrets: 39 | - name: dockerhub 40 | -------------------------------------------------------------------------------- /charts/qlik-core/templates/qix-session-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Values.qix.name }} 5 | labels: 6 | app.kubernetes.io/name: {{ .Values.qix.name }} 7 | helm.sh/chart: {{ include "qlik-core.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: {{ .Values.qix.port }} 14 | targetPort: {{ .Values.qix.port }} 15 | protocol: TCP 16 | name: http 17 | selector: 18 | app.kubernetes.io/name: {{ .Values.qix.name }} 19 | -------------------------------------------------------------------------------- /charts/qlik-core/values.yaml: -------------------------------------------------------------------------------- 1 | engine: 2 | name: engine 3 | ## Accept EULA 4 | ## This needs to be changed to start engine 5 | acceptEULA: "no" 6 | replicas: 1 7 | image: 8 | repository: qlikcore/engine 9 | tag: 12.792.0 10 | pullPolicy: IfNotPresent 11 | annotations: 12 | prometheus.io/scrape: 'true' 13 | prometheus.io/port: '9090' 14 | resources: 15 | volumeMounts: 16 | - mountPath: /doc 17 | name: app-nfs 18 | - name: engine-prestop-hook 19 | mountPath: /engine-prestop-hook 20 | - mountPath: /rules 21 | name: access-control 22 | volumes: 23 | - name: engine-prestop-hook 24 | configMap: 25 | name: engine-prestop-hook 26 | defaultMode: "0755" 27 | - name: app-nfs 28 | persistentVolumeClaim: 29 | claimName: nfs-server 30 | - name: access-control 31 | configMap: 32 | name: access-control 33 | hpa: 34 | minReplicas: 1 35 | maxReplicas: 3 36 | targetAverageValue: 3000 37 | 38 | qix: 39 | name: qix-session 40 | replicas: 1 41 | image: 42 | repository: qlikcore/qix-session-placement-service 43 | tag: 0.0.1-671 44 | pullPolicy: IfNotPresent 45 | resources: 46 | requests: 47 | memory: "1Gi" 48 | limits: 49 | memory: "1Gi" 50 | port: 9455 51 | sessionsThreshold: 4500 52 | 53 | configMapPH: 54 | name: engine-prestop-hook 55 | 56 | configMapAC: 57 | name: access-control 58 | 59 | licenseService: 60 | name: license-service 61 | image: 62 | repository: qlikcore/licenses 63 | tag: 3.5.0 64 | pullPolicy: IfNotPresent 65 | resources: 66 | requests: 67 | memory: "256Mi" 68 | limits: 69 | memory: "256Mi" 70 | port: 9200 71 | 72 | ingress: 73 | annotations: {} 74 | # kubernetes.io/ingress.class: nginx 75 | # kubernetes.io/tls-acme: "true" 76 | paths: [] 77 | hosts: 78 | - chart-example.local 79 | tls: [] 80 | # - secretName: chart-example-tls 81 | # hosts: 82 | # - chart-example.local 83 | 84 | resources: {} 85 | # We usually recommend not to specify default resources and to leave this as a conscious 86 | # choice for the user. This also increases chances charts run on environments with little 87 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 88 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 89 | # limits: 90 | # cpu: 100m 91 | # memory: 128Mi 92 | # requests: 93 | # cpu: 100m 94 | # memory: 128Mi 95 | 96 | nodeSelector: 97 | cloud.google.com/gke-nodepool : default-pool 98 | 99 | tolerations: [] 100 | 101 | affinity: {} 102 | -------------------------------------------------------------------------------- /charts/readme.md: -------------------------------------------------------------------------------- 1 | # Helm tutorial 2 | 3 | In general, check the `../run.sh` file for implementation details. 4 | 5 | 1) create the cluster (including disks/monitoring stack, secrets, etc.): `./run.sh bootstrap` 6 | 2) Deploy/upgrade: `./run.sh upgrade` 7 | 3) Wipe: `./run.sh wipe` 8 | 9 | # Todo 10 | 11 | * Move qlik core helm stack to core-orchestration, and use as dependency here instead 12 | * Consume the official engine chart? 13 | * Introduce chart-of-charts with dependencies: https://github.com/codefresh-io/helm-chart-examples/tree/master/chart-of-charts and remove `run.sh` in favor of vanilla helm 14 | * Fix Circle CI build master 15 | * Remove namespaces 16 | * remove node selector - use labels. 17 | * rollback 18 | 19 | HOW DO WE HANDLE ACCEPT EULA... -------------------------------------------------------------------------------- /config/grafana-dashboards-cfg.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: grafana-dashboards 5 | namespace: monitoring 6 | labels: 7 | grafana_dashboard: "1" 8 | data: 9 | default.yml: | 10 | apiVersion: 1 11 | 12 | providers: 13 | - name: 'defaults' 14 | orgId: 1 15 | folder: '' 16 | type: file 17 | disableDeletion: false 18 | editable: true 19 | options: 20 | path: /etc/grafana/provisioning/dashboards 21 | 22 | core-scaling.json: | 23 | { 24 | "__inputs": [ 25 | { 26 | "name": "DS_PROMETHEUS", 27 | "label": "Prometheus", 28 | "description": "", 29 | "type": "datasource", 30 | "pluginId": "prometheus", 31 | "pluginName": "Prometheus" 32 | } 33 | ], 34 | "__requires": [ 35 | { 36 | "type": "grafana", 37 | "id": "grafana", 38 | "name": "Grafana", 39 | "version": "5.0.4" 40 | }, 41 | { 42 | "type": "panel", 43 | "id": "graph", 44 | "name": "Graph", 45 | "version": "5.0.0" 46 | }, 47 | { 48 | "type": "datasource", 49 | "id": "prometheus", 50 | "name": "Prometheus", 51 | "version": "5.0.0" 52 | } 53 | ], 54 | "annotations": { 55 | "list": [ 56 | { 57 | "builtIn": 1, 58 | "datasource": "-- Grafana --", 59 | "enable": true, 60 | "hide": true, 61 | "iconColor": "rgba(0, 211, 255, 1)", 62 | "name": "Annotations & Alerts", 63 | "type": "dashboard" 64 | } 65 | ] 66 | }, 67 | "editable": true, 68 | "gnetId": null, 69 | "graphTooltip": 0, 70 | "id": null, 71 | "links": [], 72 | "panels": [ 73 | { 74 | "collapsed": false, 75 | "gridPos": { 76 | "h": 1, 77 | "w": 24, 78 | "x": 0, 79 | "y": 0 80 | }, 81 | "id": 9, 82 | "panels": [], 83 | "title": "Session overview", 84 | "type": "row" 85 | }, 86 | { 87 | "aliasColors": {}, 88 | "bars": false, 89 | "dashLength": 10, 90 | "dashes": false, 91 | "datasource": null, 92 | "fill": 5, 93 | "gridPos": { 94 | "h": 5, 95 | "w": 24, 96 | "x": 0, 97 | "y": 1 98 | }, 99 | "id": 4, 100 | "legend": { 101 | "alignAsTable": false, 102 | "avg": false, 103 | "current": false, 104 | "max": false, 105 | "min": false, 106 | "rightSide": false, 107 | "show": false, 108 | "total": false, 109 | "values": false 110 | }, 111 | "lines": true, 112 | "linewidth": 3, 113 | "links": [], 114 | "nullPointMode": "null", 115 | "percentage": false, 116 | "pointradius": 5, 117 | "points": false, 118 | "renderer": "flot", 119 | "seriesOverrides": [], 120 | "spaceLength": 10, 121 | "stack": false, 122 | "steppedLine": false, 123 | "targets": [ 124 | { 125 | "expr": "sum(qix_active_sessions)", 126 | "format": "time_series", 127 | "intervalFactor": 1, 128 | "refId": "A" 129 | } 130 | ], 131 | "thresholds": [], 132 | "timeFrom": null, 133 | "timeShift": null, 134 | "title": "Total active sessions", 135 | "tooltip": { 136 | "shared": true, 137 | "sort": 0, 138 | "value_type": "individual" 139 | }, 140 | "type": "graph", 141 | "xaxis": { 142 | "buckets": null, 143 | "mode": "time", 144 | "name": null, 145 | "show": true, 146 | "values": [] 147 | }, 148 | "yaxes": [ 149 | { 150 | "format": "short", 151 | "label": null, 152 | "logBase": 1, 153 | "max": null, 154 | "min": null, 155 | "show": true 156 | }, 157 | { 158 | "format": "short", 159 | "label": null, 160 | "logBase": 1, 161 | "max": null, 162 | "min": null, 163 | "show": true 164 | } 165 | ] 166 | }, 167 | { 168 | "aliasColors": {}, 169 | "bars": false, 170 | "dashLength": 10, 171 | "dashes": false, 172 | "datasource": null, 173 | "fill": 5, 174 | "gridPos": { 175 | "h": 5, 176 | "w": 24, 177 | "x": 0, 178 | "y": 6 179 | }, 180 | "id": 7, 181 | "legend": { 182 | "alignAsTable": false, 183 | "avg": false, 184 | "current": false, 185 | "max": false, 186 | "min": false, 187 | "rightSide": false, 188 | "show": true, 189 | "total": false, 190 | "values": false 191 | }, 192 | "lines": true, 193 | "linewidth": 3, 194 | "links": [], 195 | "nullPointMode": "null", 196 | "percentage": false, 197 | "pointradius": 5, 198 | "points": false, 199 | "renderer": "flot", 200 | "seriesOverrides": [], 201 | "spaceLength": 10, 202 | "stack": false, 203 | "steppedLine": false, 204 | "targets": [ 205 | { 206 | "expr": "qix_active_sessions", 207 | "format": "time_series", 208 | "intervalFactor": 1, 209 | "legendFormat": "{{pod}}", 210 | "refId": "A" 211 | } 212 | ], 213 | "thresholds": [], 214 | "timeFrom": null, 215 | "timeShift": null, 216 | "title": "Active sessions per engine", 217 | "tooltip": { 218 | "shared": true, 219 | "sort": 0, 220 | "value_type": "individual" 221 | }, 222 | "type": "graph", 223 | "xaxis": { 224 | "buckets": null, 225 | "mode": "time", 226 | "name": null, 227 | "show": true, 228 | "values": [] 229 | }, 230 | "yaxes": [ 231 | { 232 | "format": "short", 233 | "label": null, 234 | "logBase": 1, 235 | "max": null, 236 | "min": null, 237 | "show": true 238 | }, 239 | { 240 | "format": "short", 241 | "label": null, 242 | "logBase": 1, 243 | "max": null, 244 | "min": null, 245 | "show": true 246 | } 247 | ] 248 | }, 249 | { 250 | "aliasColors": {}, 251 | "bars": false, 252 | "dashLength": 10, 253 | "dashes": false, 254 | "datasource": null, 255 | "fill": 5, 256 | "gridPos": { 257 | "h": 5, 258 | "w": 24, 259 | "x": 0, 260 | "y": 11 261 | }, 262 | "id": 6, 263 | "legend": { 264 | "avg": false, 265 | "current": false, 266 | "max": false, 267 | "min": false, 268 | "show": false, 269 | "total": false, 270 | "values": false 271 | }, 272 | "lines": true, 273 | "linewidth": 3, 274 | "links": [], 275 | "nullPointMode": "null", 276 | "percentage": false, 277 | "pointradius": 5, 278 | "points": false, 279 | "renderer": "flot", 280 | "seriesOverrides": [], 281 | "spaceLength": 10, 282 | "stack": false, 283 | "steppedLine": true, 284 | "targets": [ 285 | { 286 | "expr": "count(count(container_memory_usage_bytes{container_name=\"engine\", namespace=\"default\"}) by (kubernetes_io_hostname))", 287 | "format": "time_series", 288 | "intervalFactor": 1, 289 | "refId": "A" 290 | } 291 | ], 292 | "thresholds": [], 293 | "timeFrom": null, 294 | "timeShift": null, 295 | "title": "Engine machine count", 296 | "tooltip": { 297 | "shared": true, 298 | "sort": 0, 299 | "value_type": "individual" 300 | }, 301 | "type": "graph", 302 | "xaxis": { 303 | "buckets": null, 304 | "mode": "time", 305 | "name": null, 306 | "show": true, 307 | "values": [] 308 | }, 309 | "yaxes": [ 310 | { 311 | "decimals": 0, 312 | "format": "short", 313 | "label": null, 314 | "logBase": 1, 315 | "max": null, 316 | "min": "0", 317 | "show": true 318 | }, 319 | { 320 | "decimals": 0, 321 | "format": "short", 322 | "label": null, 323 | "logBase": 1, 324 | "max": null, 325 | "min": "0", 326 | "show": true 327 | } 328 | ] 329 | }, 330 | { 331 | "collapsed": false, 332 | "gridPos": { 333 | "h": 1, 334 | "w": 24, 335 | "x": 0, 336 | "y": 16 337 | }, 338 | "id": 13, 339 | "panels": [], 340 | "title": "Memory/CPU", 341 | "type": "row" 342 | }, 343 | { 344 | "aliasColors": {}, 345 | "bars": false, 346 | "dashLength": 10, 347 | "dashes": false, 348 | "datasource": null, 349 | "fill": 1, 350 | "gridPos": { 351 | "h": 5, 352 | "w": 12, 353 | "x": 0, 354 | "y": 17 355 | }, 356 | "id": 15, 357 | "legend": { 358 | "alignAsTable": true, 359 | "avg": false, 360 | "current": false, 361 | "max": false, 362 | "min": false, 363 | "rightSide": true, 364 | "show": true, 365 | "sideWidth": null, 366 | "total": false, 367 | "values": false 368 | }, 369 | "lines": true, 370 | "linewidth": 1, 371 | "links": [], 372 | "nullPointMode": "null", 373 | "percentage": false, 374 | "pointradius": 5, 375 | "points": false, 376 | "renderer": "flot", 377 | "seriesOverrides": [], 378 | "spaceLength": 10, 379 | "stack": false, 380 | "steppedLine": false, 381 | "targets": [ 382 | { 383 | "expr": "sum(container_memory_usage_bytes{container_name=\"engine\"}) by (pod_name)", 384 | "format": "time_series", 385 | "intervalFactor": 1, 386 | "legendFormat": "{{pod_name}}", 387 | "refId": "A" 388 | } 389 | ], 390 | "thresholds": [], 391 | "timeFrom": null, 392 | "timeShift": null, 393 | "title": "Engine container memory usage", 394 | "tooltip": { 395 | "shared": true, 396 | "sort": 0, 397 | "value_type": "individual" 398 | }, 399 | "type": "graph", 400 | "xaxis": { 401 | "buckets": null, 402 | "mode": "time", 403 | "name": null, 404 | "show": true, 405 | "values": [] 406 | }, 407 | "yaxes": [ 408 | { 409 | "format": "short", 410 | "label": null, 411 | "logBase": 1, 412 | "max": null, 413 | "min": null, 414 | "show": true 415 | }, 416 | { 417 | "format": "short", 418 | "label": null, 419 | "logBase": 1, 420 | "max": null, 421 | "min": null, 422 | "show": true 423 | } 424 | ] 425 | }, 426 | { 427 | "aliasColors": {}, 428 | "bars": false, 429 | "dashLength": 10, 430 | "dashes": false, 431 | "datasource": null, 432 | "fill": 1, 433 | "gridPos": { 434 | "h": 5, 435 | "w": 12, 436 | "x": 12, 437 | "y": 17 438 | }, 439 | "id": 19, 440 | "legend": { 441 | "alignAsTable": true, 442 | "avg": false, 443 | "current": false, 444 | "max": false, 445 | "min": false, 446 | "rightSide": true, 447 | "show": true, 448 | "total": false, 449 | "values": false 450 | }, 451 | "lines": true, 452 | "linewidth": 1, 453 | "links": [], 454 | "nullPointMode": "null", 455 | "percentage": false, 456 | "pointradius": 5, 457 | "points": false, 458 | "renderer": "flot", 459 | "seriesOverrides": [], 460 | "spaceLength": 10, 461 | "stack": false, 462 | "steppedLine": false, 463 | "targets": [ 464 | { 465 | "expr": "sum(irate(container_cpu_usage_seconds_total{container_name=\"engine\", namespace=\"default\"}[30s])) by (id,pod_name)", 466 | "format": "time_series", 467 | "intervalFactor": 1, 468 | "legendFormat": "{{pod_name}}", 469 | "refId": "A" 470 | } 471 | ], 472 | "thresholds": [], 473 | "timeFrom": null, 474 | "timeShift": null, 475 | "title": "Engine container CPU usage", 476 | "tooltip": { 477 | "shared": true, 478 | "sort": 0, 479 | "value_type": "individual" 480 | }, 481 | "type": "graph", 482 | "xaxis": { 483 | "buckets": null, 484 | "mode": "time", 485 | "name": null, 486 | "show": true, 487 | "values": [] 488 | }, 489 | "yaxes": [ 490 | { 491 | "format": "short", 492 | "label": null, 493 | "logBase": 1, 494 | "max": null, 495 | "min": null, 496 | "show": true 497 | }, 498 | { 499 | "format": "short", 500 | "label": null, 501 | "logBase": 1, 502 | "max": null, 503 | "min": null, 504 | "show": true 505 | } 506 | ] 507 | } 508 | ], 509 | "refresh": "5s", 510 | "schemaVersion": 16, 511 | "style": "dark", 512 | "tags": [], 513 | "templating": { 514 | "list": [] 515 | }, 516 | "time": { 517 | "from": "now-30m", 518 | "to": "now" 519 | }, 520 | "timepicker": { 521 | "refresh_intervals": [ 522 | "5s", 523 | "10s", 524 | "30s", 525 | "1m", 526 | "5m", 527 | "15m", 528 | "30m", 529 | "1h", 530 | "2h", 531 | "1d" 532 | ], 533 | "time_options": [ 534 | "5m", 535 | "15m", 536 | "1h", 537 | "6h", 538 | "12h", 539 | "24h", 540 | "2d", 541 | "7d", 542 | "30d" 543 | ] 544 | }, 545 | "timezone": "browser", 546 | "title": "core-scaling", 547 | "uid": "7dKzf_imb", 548 | "version": 4 549 | } 550 | -------------------------------------------------------------------------------- /config/grafana-datasources-cfg.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: grafana-datasources 5 | namespace: monitoring 6 | labels: 7 | grafana_datasource: "1" 8 | data: 9 | prometheus.yaml: | 10 | apiVersion: 1 11 | 12 | deleteDatasources: 13 | - name: Prometheus 14 | orgId: 1 15 | 16 | datasources: 17 | - name: Prometheus 18 | type: prometheus 19 | access: proxy 20 | orgId: 1 21 | url: http://prometheus-server.monitoring.svc:80 22 | isDefault: true 23 | version: 1 24 | editable: true 25 | -------------------------------------------------------------------------------- /config/rbac-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tiller 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: tiller 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: tiller 18 | namespace: kube-system 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRoleBinding 22 | metadata: 23 | name: serviceaccounts-view 24 | roleRef: 25 | apiGroup: rbac.authorization.k8s.io 26 | kind: ClusterRole 27 | name: view 28 | subjects: 29 | - apiGroup: rbac.authorization.k8s.io 30 | kind: Group 31 | name: system:serviceaccounts 32 | -------------------------------------------------------------------------------- /deployment-test/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "deployment-test", 3 | "version": "1.0.0", 4 | "scripts": { 5 | "test": "aw --test ./verify-deployment.spec.js --babel.enable false" 6 | }, 7 | "license": "MIT", 8 | "devDependencies": { 9 | "@after-work.js/aw": "6.0.14", 10 | "enigma.js": "2.7.2", 11 | "ws": "7.3.1" 12 | } 13 | } -------------------------------------------------------------------------------- /deployment-test/verify-deployment.spec.js: -------------------------------------------------------------------------------- 1 | const enigma = require('enigma.js'); 2 | const WebSocket = require('ws'); 3 | const schema = require('enigma.js/schemas/12.20.0.json'); 4 | 5 | describe('Verify the Deployment', () => { 6 | let qix; 7 | let session; 8 | const testClusterIP = process.env.TEST_CLUSTER_IP; 9 | 10 | beforeEach(async () => { 11 | session = enigma.create({ 12 | schema, 13 | url: `ws://${testClusterIP}/app/doc/739db838-dd28-4078-8715-ee9cfcc06c29`, 14 | createSocket: url => new WebSocket(url), 15 | }); 16 | qix = await session.open(); 17 | }); 18 | 19 | afterEach(async () => { 20 | await session.close(); 21 | }); 22 | 23 | it('Verify that the correct app is opened', async () => { 24 | const app = await qix.getActiveDoc(); 25 | const layout = await app.getAppLayout(); 26 | expect(layout.qTitle).to.equal('739db838-dd28-4078-8715-ee9cfcc06c29'); 27 | }); 28 | 29 | it('Verify that the app includes at least one of the correct fields', async () => { 30 | const app = await qix.getActiveDoc(); 31 | const landField = await app.getFieldDescription('Land Area'); 32 | expect(landField.qName).to.equal('Land Area'); 33 | }); 34 | 35 | it('Verify that reload is not possible', async () => { 36 | const app = await qix.getActiveDoc(); 37 | try { 38 | const result = await app.doReload(); 39 | throw new Error("Reload success"); 40 | } catch (err) { 41 | expect(err.message).to.equal('Access denied'); 42 | } 43 | }); 44 | }); 45 | -------------------------------------------------------------------------------- /doc/Shared-Africa-Urbanization.qvf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qlik-oss/core-scaling/41775397ca4ce105f48f4e68668b74d757ac5e2f/doc/Shared-Africa-Urbanization.qvf -------------------------------------------------------------------------------- /doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/appobjects/appobjects.qvf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qlik-oss/core-scaling/41775397ca4ce105f48f4e68668b74d757ac5e2f/doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/appobjects/appobjects.qvf -------------------------------------------------------------------------------- /doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/appobjects/appobjects.qvf.lock: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qlik-oss/core-scaling/41775397ca4ce105f48f4e68668b74d757ac5e2f/doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/appobjects/appobjects.qvf.lock -------------------------------------------------------------------------------- /doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/master/739db838-dd28-4078-8715-ee9cfcc06c29.qvf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qlik-oss/core-scaling/41775397ca4ce105f48f4e68668b74d757ac5e2f/doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/master/739db838-dd28-4078-8715-ee9cfcc06c29.qvf -------------------------------------------------------------------------------- /doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/master/739db838-dd28-4078-8715-ee9cfcc06c29.qvf.lock: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qlik-oss/core-scaling/41775397ca4ce105f48f4e68668b74d757ac5e2f/doc/default/739db838-dd28-4078-8715-ee9cfcc06c29/master/739db838-dd28-4078-8715-ee9cfcc06c29.qvf.lock -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "qlik-oss", 4 | "qlik-oss:groupMinorPatch" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # move into project root: 6 | cd "$(dirname "$0")" 7 | 8 | # set settings values 9 | source settings.config 10 | 11 | command=$1 12 | 13 | function bootstrap() { 14 | # create cluster 15 | gcloud container --project $GCLOUD_PROJECT clusters create $K8S_CLUSTER \ 16 | --zone $GCLOUD_ZONE --no-enable-basic-auth --cluster-version $K8S_VERSION \ 17 | --machine-type $GCLOUD_MACHINE_TYPE --image-type $GCLOUD_IMAGE_TYPE \ 18 | --disk-size $GCLOUD_DISK_SIZE --scopes=$GCLOUD_SCOPES --num-nodes $GCLOUD_NUM_NODES \ 19 | --network "default" --enable-cloud-logging --enable-cloud-monitoring \ 20 | --subnetwork "default" --enable-autoscaling --min-nodes $GCLOUD_MIN_NODES \ 21 | --max-nodes $GCLOUD_MAX_NODES --metadata disable-legacy-endpoints=true \ 22 | --enable-ip-alias --enable-autoupgrade --enable-autorepair --addons HorizontalPodAutoscaling \ 23 | --no-issue-client-certificate 24 | 25 | # create volume 26 | gcloud compute disks create --project=$GCLOUD_PROJECT --size=10GB --zone=$GCLOUD_ZONE $DISK_NAME 27 | 28 | # create monitoring node pool 29 | gcloud container node-pools create monitoring --project=$GCLOUD_PROJECT --cluster=$K8S_CLUSTER --scopes=$GCLOUD_SCOPES \ 30 | --machine-type=$GCLOUD_MACHINE_TYPE --num-nodes=1 --zone $GCLOUD_ZONE --metadata disable-legacy-endpoints=true 31 | 32 | # infra configuration 33 | kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud config get-value core/account) --dry-run -o=yaml | kubectl apply -f - 34 | kubectl apply -f ./config/rbac-config.yaml 35 | kubectl create namespace monitoring 36 | helm init --service-account tiller --upgrade 37 | kubectl rollout status -w deployment/tiller-deploy --namespace=kube-system 38 | } 39 | 40 | function copy_apps() { 41 | kubectl rollout status -w deployment/nfs-server 42 | POD=$(kubectl get pods --selector="role=nfs-server" -o=jsonpath='{.items[0].metadata.name}') 43 | # NOTE: Using the // format on file paths is to make it work in Git Bash on Windows which otherwise converts such 44 | # paths to Windows paths. 45 | kubectl cp ./doc/default $POD://exports 46 | } 47 | 48 | function upgrade() { 49 | # set up licensing info/secret 50 | kubectl create secret generic license-data --from-literal LICENSE_KEY=$LICENSE_KEY --dry-run -o=yaml | kubectl apply -f - 51 | 52 | # configuration 53 | kubectl apply -f ./config/grafana-datasources-cfg.yaml 54 | kubectl apply -f ./config/grafana-dashboards-cfg.yaml 55 | 56 | # infrastructure 57 | helm upgrade --install prometheus --namespace monitoring stable/prometheus 58 | helm upgrade --install custom-metrics-apiserver --namespace monitoring stable/prometheus-adapter -f ./values/prom-adapter.yaml 59 | helm upgrade --install grafana --namespace monitoring stable/grafana -f ./values/grafana.yaml 60 | helm upgrade --install nginx-ingress stable/nginx-ingress -f ./values/nginx-ingress.yaml 61 | helm upgrade --install nfs-server ./charts/nfs --set persistence.diskName=$DISK_NAME 62 | 63 | # copy over apps 64 | copy_apps 65 | 66 | # qlik core stack - set acceptEULA to yes to accept the EULA 67 | helm upgrade --install --set engine.acceptEULA=$ACCEPT_EULA qlik-core ./charts/qlik-core 68 | helm repo add qlikoss https://qlik.bintray.com/osscharts 69 | helm upgrade --install --set image.tag="3.0.0" mira qlikoss/mira 70 | } 71 | 72 | function grafana() { 73 | kubectl port-forward --namespace=monitoring $(kubectl get pods --namespace=monitoring --selector="app=grafana" -o=jsonpath='{.items[0].metadata.name}') 3000:3000 74 | } 75 | 76 | function remove_cluster() { 77 | gcloud container -q clusters delete $K8S_CLUSTER --project $GCLOUD_PROJECT --zone $GCLOUD_ZONE 78 | } 79 | 80 | function remove_disks() { 81 | gcloud compute -q disks delete $DISK_NAME --project $GCLOUD_PROJECT --zone $GCLOUD_ZONE 82 | } 83 | 84 | function wipe() { 85 | remove_cluster 86 | remove_disks 87 | } 88 | 89 | function external_ip() { 90 | kubectl get service nginx-ingress-controller 91 | } 92 | 93 | if [ "$command" == "bootstrap" ]; then bootstrap 94 | elif [ "$command" == "upgrade" ]; then upgrade 95 | elif [ "$command" == "copy-apps" ]; then copy_apps 96 | elif [ "$command" == "remove-cluster" ]; then remove_cluster 97 | elif [ "$command" == "remove-disks" ]; then remove_disks 98 | elif [ "$command" == "wipe" ]; then wipe 99 | elif [ "$command" == "ip" ]; then external_ip 100 | elif [ "$command" == "grafana" ]; then grafana 101 | else echo "Invalid option: $command - please use one of: bootstrap, upgrade, copy-apps, remove_cluster, remove_disks, wipe, ip, grafana"; fi 102 | -------------------------------------------------------------------------------- /settings.config: -------------------------------------------------------------------------------- 1 | GCLOUD_PROJECT="${GCLOUD_PROJECT:-YOUR-PROJECT-HERE}" 2 | K8S_CLUSTER="${K8S_CLUSTER:-my-cluster}" 3 | GCLOUD_ZONE="${GCLOUD_ZONE:-europe-west2-a}" 4 | K8S_VERSION="${K8S_VERSION:-1.14}" 5 | GCLOUD_MACHINE_TYPE="${GCLOUD_MACHINE_TYPE:-n1-standard-1}" 6 | GCLOUD_IMAGE_TYPE="${GCLOUD_IMAGE_TYPE:-COS}" 7 | GCLOUD_DISK_SIZE="${GCLOUD_DISK_SIZE:-50}" 8 | GCLOUD_SCOPES="gke-default" 9 | GCLOUD_NUM_NODES="${GCLOUD_NUM_NODES:-3}" 10 | GCLOUD_MIN_NODES="${GCLOUD_MIN_NODES:-2}" 11 | GCLOUD_MAX_NODES="${GCLOUD_MAX_NODES:-6}" 12 | LICENSE_KEY="${LICENSE_KEY:-my-license-key}" 13 | # When setting DISK_NAME make sure to update ./nfs-volumes/nfs-volume-deployment.yaml as well. 14 | DISK_NAME="${DISK_NAME:-my-disk}" 15 | -------------------------------------------------------------------------------- /values/grafana.yaml: -------------------------------------------------------------------------------- 1 | sidecar: 2 | datasources: 3 | enabled: true 4 | dashboards: 5 | enabled: true 6 | 7 | env: 8 | GF_AUTH_ANONYMOUS_ENABLED: true 9 | GF_AUTH_ANONYMOUS_ORG_ROLE: Admin 10 | -------------------------------------------------------------------------------- /values/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | # ref: https://github.com/kubernetes/charts/blob/master/stable/nginx-ingress/values.yaml 2 | 3 | rbac: 4 | create: true 5 | 6 | livenessProbe: 7 | initialDelaySeconds: 30 8 | timeoutSeconds: 5 9 | 10 | defaultBackend: 11 | replicaCount: 1 12 | 13 | resources: 14 | limits: 15 | cpu: 0.1 16 | memory: "20Mi" 17 | requests: 18 | cpu: 0.1 19 | memory: "5Mi" 20 | 21 | controller: 22 | replicaCount: 1 23 | 24 | publishService: 25 | enabled: true 26 | 27 | resources: 28 | limits: 29 | memory: "512Mi" 30 | requests: 31 | memory: "128Mi" 32 | 33 | service: 34 | annotations: 35 | prometheus.io/scrape: "true" 36 | prometheus.io/port: "10254" 37 | 38 | config: 39 | use-proxy-protocol: "false" 40 | enable-vts-status: "true" 41 | ssl-redirect: "true" 42 | http-snippet: |- 43 | upstream engine_dynamic_backend { 44 | server 0.0.0.1; 45 | balancer_by_lua_block { 46 | local balancer = require "ngx.balancer"; 47 | local ok, err = balancer.set_current_peer(ngx.var.qix_session_host, ngx.var.qix_session_port); 48 | if not ok then 49 | return ngx.exit(500) 50 | end; 51 | } 52 | } 53 | server-snippet: | 54 | # global timeouts 55 | keepalive_timeout 3h; 56 | proxy_read_timeout 1d; 57 | proxy_send_timeout 1d; 58 | 59 | location /create_qix_session { 60 | internal; 61 | proxy_pass http://qix-session.default.svc.cluster.local:9455/v1/session; 62 | } 63 | 64 | location /app { 65 | set $qix_session_host ''; 66 | set $qix_session_port ''; 67 | 68 | access_by_lua_block { 69 | local uri = string.sub(ngx.var.request_uri, 5) 70 | local routeResponse = ngx.location.capture("/create_qix_session" .. uri) 71 | if routeResponse.status == 200 then 72 | local cjson = require "cjson"; 73 | local qix_session_route_data = cjson.decode(routeResponse.body); 74 | ngx.var.qix_session_host = qix_session_route_data.ip; 75 | ngx.var.qix_session_port = qix_session_route_data.port; 76 | ngx.req.set_header("X-Qlik-Session", qix_session_route_data.sessionId) 77 | ngx.req.set_uri("/app/") 78 | ngx.log(ngx.NOTICE, "Session placement: Session placed on " .. qix_session_route_data.ip .. " with id " .. qix_session_route_data.sessionId) 79 | else 80 | ngx.log(ngx.WARN, "Session placement: Unexpected response: " .. routeResponse.status) 81 | ngx.exit(routeResponse.status) 82 | end 83 | } 84 | 85 | proxy_set_header X-Real-IP $proxy_protocol_addr; 86 | proxy_set_header X-Forwarded-For $proxy_protocol_addr; 87 | proxy_set_header X-Forwarded-Port 80; 88 | proxy_set_header X-Forwarded-Proto $scheme; 89 | proxy_set_header Host $http_host; 90 | proxy_set_header X-NginX-Proxy true; 91 | proxy_set_header Connection $connection_upgrade; 92 | proxy_set_header Upgrade $http_upgrade; 93 | 94 | # websocket timeouts 95 | proxy_connect_timeout 7d; 96 | proxy_read_timeout 7d; 97 | proxy_send_timeout 7d; 98 | proxy_http_version 1.1; 99 | 100 | proxy_pass http://engine_dynamic_backend/; 101 | } 102 | -------------------------------------------------------------------------------- /values/prom-adapter.yaml: -------------------------------------------------------------------------------- 1 | prometheus: 2 | url: http://prometheus-server.monitoring.svc 3 | port: 80 4 | 5 | rules: 6 | default: false 7 | custom: 8 | - seriesQuery: 'qix_active_sessions' 9 | metricsQuery: 'sum(<<.Series>>) by (<<.GroupBy>>)' 10 | seriesFilters: [] 11 | resources: 12 | overrides: 13 | kubernetes_namespace: {resource: "namespace"} 14 | kubernetes_pod_name: {resource: "pod"} 15 | --------------------------------------------------------------------------------