├── .gitignore ├── LICENSE ├── README.md ├── base ├── jitsi-shard │ ├── jicofo-deployment.yaml │ ├── jvb │ │ ├── jvb-hpa.yaml │ │ ├── jvb-statefulset.yaml │ │ ├── kustomization.yaml │ │ └── service-per-pod-decoratorcontroller.yaml │ ├── kustomization.yaml │ ├── prosody-deployment.yaml │ ├── prosody-service.yaml │ └── web-deployment.yaml ├── jitsi │ ├── jitsi-namespace.yaml │ ├── jitsi-secret.yaml │ ├── jvb-entrypoint-configmap.yaml │ ├── jvb-shutdown-configmap.yaml │ ├── kustomization.yaml │ ├── prosody-configmap.yaml │ ├── web-configmap.yaml │ └── web-service.yaml └── ops │ ├── cert-manager │ ├── cert-manager.yaml │ ├── cluster-issuer.yaml │ └── kustomization.yaml │ ├── dashboard │ ├── dashboard-cluster-role-binding.yaml │ ├── dashboard-service-account.yaml │ ├── kubernetes-dashboard.yaml │ └── kustomization.yaml │ ├── ingress-nginx │ ├── ingress-nginx.yaml │ ├── kustomization.yaml │ └── nginx-patch.yaml │ ├── kustomization.yaml │ ├── loadbalancer │ ├── haproxy-configmap.yaml │ ├── haproxy-ingress.yaml │ ├── haproxy-service.yaml │ ├── haproxy-statefulset.yaml │ ├── haproxy0-service.yaml │ ├── haproxy1-service.yaml │ └── kustomization.yaml │ ├── logging │ ├── eck-crd.yaml │ ├── es-realm-secret.yaml │ ├── fluentd-daemonset-elasticsearch-rbac.yaml │ ├── fluentd-daemonset-patch.yaml │ ├── kibana.yaml │ ├── kustomization.yaml │ ├── logging-namespace.yaml │ └── secret-fluentd-user.yaml │ ├── metacontroller │ ├── kustomization.yaml │ ├── metacontroller-namespace.yaml │ ├── service-per-pod-configmap.yaml │ └── service-per-pod-deployment.yaml │ ├── monitoring │ ├── bbb-dashboards-configmap.yaml │ ├── bbb-exporter-service-monitor.yaml │ ├── bbb-service-monitor.yaml │ ├── bbb-service.yaml │ ├── custom-metrics-apiservice.yaml │ ├── grafana-deployment-patch.yaml │ ├── grafana-ingress.yaml │ ├── grafana-pvc.yaml │ ├── haproxy-pod-monitor.yaml │ ├── jitsi-dashboard-configmap.yaml │ ├── jvb-pod-monitor.yaml │ ├── kustomization.yaml │ ├── metrics-server-patch.yaml │ ├── metrics-server.yaml │ ├── prometheus-adapter-config-map-patch.yaml │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ ├── prometheus-roleSpecificNamespaces.yaml │ ├── prosody-pod-monitor.yaml │ ├── turn-service-monitor.yaml │ └── turn-service.yaml │ └── reflector │ ├── kustomization.yaml │ └── reflector.yaml ├── cluster ├── dev │ ├── cluster.tf │ └── variables.tf └── prod │ ├── cluster.tf │ └── variables.tf ├── docs ├── architecture │ ├── architecture.md │ └── build │ │ ├── architecture.py │ │ ├── architecture_one_shard.py │ │ ├── architecture_shards.py │ │ ├── jitsi_meet.png │ │ ├── jitsi_meet_one_shard.png │ │ ├── jitsi_sharding.png │ │ ├── requirements.txt │ │ ├── resources │ │ ├── globe.png │ │ └── jitsi-logo-square.png │ │ ├── shard.png │ │ └── shard.py └── loadtests │ └── loadtestresults.md ├── loadtest ├── docker-compose.yml ├── init.sh ├── loadtest.tf ├── run_loadtest.sh └── variables.tf ├── overlays ├── development-monitoring │ ├── kustomization.yaml │ └── ops │ │ ├── bbb-basic-auth-secret.yaml │ │ ├── bbb-endpoints.yaml │ │ ├── certificate.yaml │ │ ├── elasticsearch.yaml │ │ ├── grafana-deployment-patch.yaml │ │ ├── grafana-ingress-patch.yaml │ │ ├── grafana-tls-secret.yaml │ │ ├── haproxy-ingress-patch.yaml │ │ ├── kustomization.yaml │ │ ├── prometheus-prometheus-patch.yaml │ │ └── turn-endpoints.yaml ├── development │ ├── jitsi-base │ │ ├── jvb-hpa-patch.yaml │ │ ├── jvb-statefulset-patch.yaml │ │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── shard-0 │ │ ├── jicofo-deployment-patch.yaml │ │ ├── jvb-hpa-patch.yaml │ │ ├── jvb-statefulset-patch.yaml │ │ ├── kustomization.yaml │ │ ├── prosody-deployment-patch.yaml │ │ └── web-deployment-patch.yaml │ └── shard-1 │ │ ├── jicofo-deployment-patch.yaml │ │ ├── jvb-hpa-patch.yaml │ │ ├── jvb-statefulset-patch.yaml │ │ ├── kustomization.yaml │ │ ├── prosody-deployment-patch.yaml │ │ └── web-deployment-patch.yaml ├── production-monitoring │ ├── kustomization.yaml │ └── ops │ │ ├── bbb-basic-auth-secret.yaml │ │ ├── bbb-endpoints.yaml │ │ ├── certificate.yaml │ │ ├── elasticsearch.yaml │ │ ├── grafana-deployment-patch.yaml │ │ ├── grafana-ingress-patch.yaml │ │ ├── grafana-tls-secret.yaml │ │ ├── haproxy-ingress-patch.yaml │ │ ├── kustomization.yaml │ │ ├── prometheus-prometheus-patch.yaml │ │ └── turn-endpoints.yaml └── production │ ├── jitsi-base │ ├── jvb-hpa-patch.yaml │ ├── jvb-statefulset-patch.yaml │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── shard-0 │ ├── jicofo-deployment-patch.yaml │ ├── jvb-hpa-patch.yaml │ ├── jvb-statefulset-patch.yaml │ ├── kustomization.yaml │ ├── prosody-deployment-patch.yaml │ └── web-deployment-patch.yaml │ └── shard-1 │ ├── jicofo-deployment-patch.yaml │ ├── jvb-hpa-patch.yaml │ ├── jvb-statefulset-patch.yaml │ ├── kustomization.yaml │ ├── prosody-deployment-patch.yaml │ └── web-deployment-patch.yaml └── secrets.sh /.gitignore: -------------------------------------------------------------------------------- 1 | base/jitsi-shard/jitsi-secret.yaml.bak 2 | 3 | loadtest/terraform.tfstate.backup 4 | loadtest/terraform.tfstate 5 | loadtest/.terraform 6 | loadtest/.terraform.tfstate.lock.info 7 | 8 | cluster/dev/terraform.tfstate.backup 9 | cluster/dev/terraform.tfstate 10 | cluster/dev/.terraform 11 | cluster/dev/.terraform.tfstate.lock.info 12 | 13 | cluster/prod/terraform.tfstate.backup 14 | cluster/prod/terraform.tfstate 15 | cluster/prod/.terraform 16 | cluster/prod/.terraform.tfstate.lock.info 17 | 18 | .idea/ 19 | .vscode/ 20 | venv/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 HPI Schul-Cloud 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Jitsi Meet 2 | 3 | Scalable video conferencing on Kubernetes. 4 | 5 | ## Structure 6 | 7 | The whole setup is based on Kubernetes YAML files and patches for these files. 8 | It makes use of [kustomize](https://github.com/kubernetes-sigs/kustomize) to customize the raw YAMLs for each environment. 9 | 10 | (Almost) every directory in the directory tree (depicted below) contains a `kustomize.yaml` file which defines resources (and possibly patches). 11 | 12 | ``` 13 | |-- base 14 | | |-- jitsi 15 | | |-- jitsi-shard 16 | | | `-- jvb 17 | | `-- ops 18 | | |-- cert-manager 19 | | |-- dashboard 20 | | |-- ingress-nginx 21 | | |-- loadbalancer 22 | | |-- logging 23 | | |-- metacontroller 24 | | |-- monitoring 25 | | `-- reflector 26 | `-- overlays 27 | |-- development 28 | | |-- jitsi-base 29 | | |-- ops 30 | | |-- shard-0 31 | | `-- shard-1 32 | `-- production 33 | |-- jitsi-base 34 | |-- ops 35 | |-- shard-0 36 | `-- shard-1 37 | ``` 38 | 39 | ## Requirements 40 | 41 | - [kubectl/v1.17.2+](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 42 | - [kustomize/v3.5.4](https://github.com/kubernetes-sigs/kustomize/releases/tag/kustomize%2Fv3.5.4) 43 | _WARNING_: newer versions of kustomize currently don't work due to changes regarding remote sources 44 | 45 | ## Install 46 | 47 | To install the full setup go to either [`overlays/development`](overlays/development) or 48 | [`overlays/production`](overlays/production) and run 49 | 50 | ```bash 51 | $ kustomize build . | kubectl apply -f - 52 | ``` 53 | This deploys a Jitsi setup consisting of two shards. A shard is a complete replica of a Jitsi setup that is used in 54 | parallel to other shards to load-balance and for high availability. More shards can be added following the documentation 55 | in [`docs/architecture/architecture.md`](docs/architecture/architecture.md). The setup was tested against a managed 56 | Kubernetes cluster (v1.17.2) running on [IONOS Cloud](https://dcd.ionos.com/). 57 | 58 | ## Architecture 59 | 60 | The Jitsi Kubernetes namespace has the following architecture: 61 | 62 | ![Architecture Jitsi Meet](docs/architecture/build/jitsi_meet_one_shard.png) 63 | 64 | The setup shown above contains only a single shard (for visual clarity). Subsequent shards would be attached to the web 65 | service. A more detailed explanation of the system architecture with multiple shards can be found in [docs/architecture/architecture.md](docs/architecture/architecture.md). 66 | 67 | ## Load Testing 68 | 69 | Load testing is based on [jitsi-meet-torture](https://github.com/jitsi/jitsi-meet-torture) which is a Java application 70 | that connects to a Jitsi instance as a user and shows a predefined video along with an audio stream by using a Selenium 71 | Chrome instance. To run multiple test users in multiple conferences a Selenium hub set up with docker-compose is used. 72 | 73 | Terraform scripts that set up the test servers with an existing image can be found under [`loadtest`](loadtest). 74 | An [init script](loadtest/init.sh) is used to provision the necessary tools to that image. This image also needs SSH 75 | access set up with public key authentication. 76 | 77 | After starting a number of load test servers, the load test can be started by using the [`loadtest/run_loadtest.sh`](loadtest/run_loadtest.sh) 78 | script (locally). Results can be found in [`docs/loadtests/loadtestresults.md`](docs/loadtests/loadtestresults.md). 79 | 80 | ## Kubernetes Dashboard Access 81 | 82 | To access the installed [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) execute 83 | ```bash 84 | $ kubectl proxy 85 | ``` 86 | and then go to `http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/`. 87 | 88 | The login token can be received by executing 89 | ```bash 90 | kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}') 91 | ``` 92 | 93 | ## Kibana Access 94 | 95 | Kibana is not accessible from the Internet and must be forwarded to your local machine via `kubectl` by executing 96 | ```bash 97 | $ kubectl port-forward -n logging svc/kibana-kb-http 5601:5601 98 | ``` 99 | After that you will be able to access Kibana via [https://localhost:5601/](https://localhost:5601/). 100 | The default login password (user `elastic`) can be received with 101 | ```bash 102 | $ kubectl get secret -n logging elasticsearch-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode; echo 103 | ``` 104 | 105 | The same procedure can be used to access Prometheus or Alertmanager. 106 | 107 | ## Relationship With Other Projects 108 | 109 | The monitoring stack that is set up by this project is currently also used by an [affiliated project](https://github.com/schul-cloud/bbb-deployment) 110 | for [Big Blue Button](https://bigbluebutton.org/). Therefore, some of the files here contain configurations to monitor 111 | that setup. To exclude them delete all files starting with `bbb-` and remove the file names from the respective 112 | `kustomization.yaml` files. -------------------------------------------------------------------------------- /base/jitsi-shard/jicofo-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: jicofo 7 | name: jicofo 8 | spec: 9 | replicas: 1 # one jicofo instance per shard 10 | strategy: 11 | type: RollingUpdate 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | selector: 16 | matchLabels: 17 | k8s-app: jicofo 18 | template: 19 | metadata: 20 | labels: 21 | k8s-app: jicofo 22 | spec: 23 | containers: 24 | - name: jicofo 25 | resources: 26 | limits: 27 | memory: 400Mi 28 | cpu: 400m 29 | requests: 30 | memory: 400Mi 31 | cpu: 400m 32 | image: jitsi/jicofo:stable-4548-1 33 | imagePullPolicy: Always 34 | readinessProbe: 35 | # caution: this readinessProbe tries to create a new conference and hence also fails if 36 | # dependencies (jvb, prosody) are not available 37 | httpGet: 38 | path: /about/health 39 | port: 8888 40 | env: 41 | - name: XMPP_SERVER 42 | value: prosody 43 | - name: XMPP_DOMAIN 44 | value: meet.jitsi 45 | - name: XMPP_AUTH_DOMAIN 46 | value: auth.meet.jitsi 47 | - name: XMPP_INTERNAL_MUC_DOMAIN 48 | value: internal-muc.meet.jitsi 49 | - name: JICOFO_COMPONENT_SECRET 50 | valueFrom: 51 | secretKeyRef: 52 | name: jitsi-config 53 | key: JICOFO_COMPONENT_SECRET 54 | - name: JICOFO_AUTH_USER 55 | value: focus 56 | - name: JICOFO_AUTH_PASSWORD 57 | valueFrom: 58 | secretKeyRef: 59 | name: jitsi-config 60 | key: JICOFO_AUTH_PASSWORD 61 | - name: TZ 62 | value: Europe/Berlin 63 | - name: JVB_BREWERY_MUC 64 | value: jvbbrewery 65 | - name: JICOFO_ENABLE_HEALTH_CHECKS 66 | value: "true" -------------------------------------------------------------------------------- /base/jitsi-shard/jvb/jvb-hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | scaleTargetRef: 8 | apiVersion: apps/v1 9 | kind: StatefulSet 10 | -------------------------------------------------------------------------------- /base/jitsi-shard/jvb/jvb-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: jvb 7 | name: jvb 8 | # needed for metacontroller to create/delete service per pod 9 | annotations: 10 | service-per-pod-label: "statefulset.kubernetes.io/pod-name" 11 | spec: 12 | replicas: 1 13 | updateStrategy: 14 | type: RollingUpdate 15 | selector: 16 | matchLabels: 17 | k8s-app: jvb 18 | serviceName: jvb 19 | # relax ordering guarantees because not needed (any JVB pod can be stopped or started at any time) 20 | podManagementPolicy: Parallel 21 | template: 22 | metadata: 23 | labels: 24 | k8s-app: jvb 25 | spec: 26 | volumes: 27 | - name: jvb-entrypoint 28 | configMap: 29 | name: jvb-entrypoint 30 | defaultMode: 0744 # make executable 31 | - name: jvb-shutdown 32 | configMap: 33 | name: jvb-shutdown 34 | defaultMode: 0744 # make executable 35 | # in case there is a long running conference on a JVB that should be shut down, give it time to finish 36 | terminationGracePeriodSeconds: 2147483647 37 | containers: 38 | - name: prometheus-exporter 39 | # sidecar container that exports stats for prometheus 40 | # values are scraped by ../ops/monitoring/jvb-pod-monitor.yaml 41 | image: systemli/prometheus-jitsi-meet-exporter:1.1.1 42 | imagePullPolicy: Always 43 | ports: 44 | - name: metrics 45 | containerPort: 9888 46 | args: 47 | - "-videobridge-url" 48 | - "http://localhost:8080/colibri/stats" 49 | - name: jvb 50 | image: jitsi/jvb:stable-4548-1 51 | imagePullPolicy: Always 52 | lifecycle: 53 | preStop: 54 | exec: 55 | command: ["bash", "/shutdown/graceful_shutdown.sh", "-t 3"] 56 | command: 57 | - /entrypoint/entrypoint.sh 58 | args: 59 | - "/init" 60 | readinessProbe: 61 | httpGet: 62 | path: /about/health 63 | port: 8080 64 | initialDelaySeconds: 10 65 | volumeMounts: 66 | - name: jvb-entrypoint 67 | mountPath: /entrypoint 68 | - name: jvb-shutdown 69 | mountPath: /shutdown 70 | env: 71 | - name: NODE_NAME 72 | valueFrom: 73 | fieldRef: 74 | fieldPath: metadata.name 75 | - name: XMPP_SERVER 76 | value: prosody 77 | - name: DOCKER_HOST_ADDRESS 78 | valueFrom: 79 | fieldRef: 80 | fieldPath: status.hostIP 81 | - name: XMPP_DOMAIN 82 | value: meet.jitsi 83 | - name: XMPP_AUTH_DOMAIN 84 | value: auth.meet.jitsi 85 | - name: XMPP_INTERNAL_MUC_DOMAIN 86 | value: internal-muc.meet.jitsi 87 | - name: JVB_STUN_SERVERS 88 | valueFrom: 89 | secretKeyRef: 90 | name: jitsi-config 91 | key: JVB_STUN_SERVERS 92 | - name: JICOFO_AUTH_USER 93 | value: focus 94 | - name: JVB_TCP_HARVESTER_DISABLED 95 | value: "true" 96 | - name: JVB_ENABLE_APIS 97 | value: colibri,rest 98 | - name: JVB_AUTH_USER 99 | value: jvb 100 | - name: JVB_AUTH_PASSWORD 101 | valueFrom: 102 | secretKeyRef: 103 | name: jitsi-config 104 | key: JVB_AUTH_PASSWORD 105 | - name: JICOFO_AUTH_PASSWORD 106 | valueFrom: 107 | secretKeyRef: 108 | name: jitsi-config 109 | key: JICOFO_AUTH_PASSWORD 110 | - name: JVB_BREWERY_MUC 111 | value: jvbbrewery 112 | - name: TZ 113 | value: Europe/Berlin 114 | -------------------------------------------------------------------------------- /base/jitsi-shard/jvb/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - service-per-pod-decoratorcontroller.yaml 6 | - jvb-statefulset.yaml 7 | - jvb-hpa.yaml 8 | -------------------------------------------------------------------------------- /base/jitsi-shard/jvb/service-per-pod-decoratorcontroller.yaml: -------------------------------------------------------------------------------- 1 | # see https://github.com/GoogleCloudPlatform/metacontroller/tree/master/examples/service-per-pod 2 | 3 | apiVersion: metacontroller.k8s.io/v1alpha1 4 | kind: DecoratorController 5 | metadata: 6 | name: service-per-pod 7 | namespace: jitsi 8 | spec: 9 | resources: 10 | - apiVersion: apps/v1 11 | resource: statefulsets 12 | annotationSelector: 13 | matchExpressions: 14 | - {key: service-per-pod-label, operator: Exists} 15 | attachments: 16 | - apiVersion: v1 17 | resource: services 18 | hooks: 19 | sync: 20 | webhook: 21 | url: http://service-per-pod.metacontroller/sync-service-per-pod 22 | finalize: 23 | webhook: 24 | url: http://service-per-pod.metacontroller/finalize-service-per-pod 25 | --- 26 | apiVersion: metacontroller.k8s.io/v1alpha1 27 | kind: DecoratorController 28 | metadata: 29 | name: pod-name-label 30 | namespace: jitsi 31 | spec: 32 | resources: 33 | - apiVersion: v1 34 | resource: pods 35 | labelSelector: 36 | matchExpressions: 37 | - {key: pod-name, operator: DoesNotExist} 38 | annotationSelector: 39 | matchExpressions: 40 | - {key: pod-name-label, operator: Exists} 41 | hooks: 42 | sync: 43 | webhook: 44 | url: http://service-per-pod.metacontroller/sync-pod-name-label 45 | -------------------------------------------------------------------------------- /base/jitsi-shard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - web-deployment.yaml 6 | - jicofo-deployment.yaml 7 | - prosody-deployment.yaml 8 | - prosody-service.yaml 9 | - jvb/ 10 | 11 | commonLabels: 12 | scope: jitsi 13 | -------------------------------------------------------------------------------- /base/jitsi-shard/prosody-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: prosody 7 | name: prosody 8 | spec: 9 | replicas: 1 # one prosody instance per shard 10 | strategy: 11 | type: RollingUpdate 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | selector: 16 | matchLabels: 17 | k8s-app: prosody 18 | shard: "0" 19 | template: 20 | metadata: 21 | labels: 22 | k8s-app: prosody 23 | shard: "0" 24 | spec: 25 | volumes: 26 | - name: prosody 27 | configMap: 28 | name: prosody 29 | items: 30 | - key: mod_prometheus.lua 31 | path: mod_prometheus.lua 32 | - key: mod_measure_stanza_counts.lua 33 | path: mod_measure_stanza_counts.lua 34 | - key: mod_measure_client_presence.lua 35 | path: mod_measure_client_presence.lua 36 | - key: jitsi-meet.cfg.lua 37 | path: jitsi-meet.cfg.lua 38 | containers: 39 | - name: prosody 40 | resources: 41 | limits: 42 | memory: 300Mi 43 | cpu: 300m 44 | requests: 45 | memory: 300Mi 46 | cpu: 300m 47 | image: jitsi/prosody:stable-4548-1 48 | imagePullPolicy: Always 49 | ports: 50 | - name: metrics 51 | containerPort: 5280 52 | readinessProbe: 53 | ## the command that is called obeys standard exit codes 54 | exec: 55 | command: 56 | - prosodyctl 57 | - --config 58 | - /config/prosody.cfg.lua 59 | - status 60 | volumeMounts: 61 | # add-ons that allow exporting of metrics to prometheus (mod_prometheus.lua) 62 | # or enrich the available stats (mod_measure_stanza_counts.lua, 63 | # mod_measure_client_presence.lua) 64 | - name: prosody 65 | mountPath: /prosody-plugins-custom/mod_prometheus.lua 66 | subPath: mod_prometheus.lua 67 | - name: prosody 68 | mountPath: /usr/lib/prosody/modules/mod_measure_stanza_counts.lua 69 | subPath: mod_measure_stanza_counts.lua 70 | - name: prosody 71 | mountPath: /usr/lib/prosody/modules/mod_measure_client_presence.lua 72 | subPath: mod_measure_client_presence.lua 73 | - name: prosody 74 | mountPath: /defaults/conf.d/jitsi-meet.cfg.lua 75 | subPath: jitsi-meet.cfg.lua 76 | env: 77 | - name: XMPP_DOMAIN 78 | value: meet.jitsi 79 | - name: XMPP_AUTH_DOMAIN 80 | value: auth.meet.jitsi 81 | - name: XMPP_MUC_DOMAIN 82 | value: muc.meet.jitsi 83 | - name: XMPP_INTERNAL_MUC_DOMAIN 84 | value: internal-muc.meet.jitsi 85 | - name: XMPP_MUC_MODULES 86 | value: muc_meeting_id,muc_domain_mapper 87 | - name: JICOFO_COMPONENT_SECRET 88 | valueFrom: 89 | secretKeyRef: 90 | name: jitsi-config 91 | key: JICOFO_COMPONENT_SECRET 92 | - name: JVB_AUTH_USER 93 | value: jvb 94 | - name: JVB_AUTH_PASSWORD 95 | valueFrom: 96 | secretKeyRef: 97 | name: jitsi-config 98 | key: JVB_AUTH_PASSWORD 99 | - name: JICOFO_AUTH_USER 100 | value: focus 101 | - name: JICOFO_AUTH_PASSWORD 102 | valueFrom: 103 | secretKeyRef: 104 | name: jitsi-config 105 | key: JICOFO_AUTH_PASSWORD 106 | - name: TZ 107 | value: Europe/Berlin 108 | - name: JVB_TCP_HARVESTER_DISABLED 109 | value: "true" 110 | # activate add-ons that enrich the available stats of prosody 111 | - name: GLOBAL_MODULES 112 | value: prometheus,measure_stanza_counts,measure_client_presence 113 | # config to allow exporting metrics in prometheus format by prometheus add-on 114 | - name: GLOBAL_CONFIG 115 | value: statistics = "internal";\nstatistics_interval = 15; 116 | - name: TURNCREDENTIALS_SECRET 117 | valueFrom: 118 | secretKeyRef: 119 | name: jitsi-config 120 | key: TURNCREDENTIALS_SECRET 121 | - name: TURN_HOST 122 | valueFrom: 123 | secretKeyRef: 124 | name: jitsi-config 125 | key: TURN_HOST 126 | - name: STUN_PORT 127 | valueFrom: 128 | secretKeyRef: 129 | name: jitsi-config 130 | key: STUN_PORT 131 | - name: TURN_PORT 132 | valueFrom: 133 | secretKeyRef: 134 | name: jitsi-config 135 | key: TURN_PORT 136 | - name: TURNS_PORT 137 | valueFrom: 138 | secretKeyRef: 139 | name: jitsi-config 140 | key: TURNS_PORT 141 | 142 | -------------------------------------------------------------------------------- /base/jitsi-shard/prosody-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | service: prosody 7 | name: prosody 8 | spec: 9 | ports: 10 | - name: "5222" 11 | port: 5222 12 | targetPort: 5222 13 | - name: "5280" 14 | port: 5280 15 | targetPort: 5280 16 | - name: "5347" 17 | port: 5347 18 | targetPort: 5347 19 | selector: 20 | k8s-app: prosody 21 | shard: "0" 22 | -------------------------------------------------------------------------------- /base/jitsi-shard/web-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: web 7 | name: web 8 | spec: 9 | replicas: 1 # one web instance per shard 10 | strategy: 11 | type: RollingUpdate 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 0 15 | selector: 16 | matchLabels: 17 | k8s-app: web 18 | template: 19 | metadata: 20 | labels: 21 | k8s-app: web 22 | spec: 23 | volumes: 24 | - name: web 25 | configMap: 26 | name: web 27 | items: 28 | - key: welcomePageAdditionalContent.html 29 | path: welcomePageAdditionalContent.html 30 | - key: plugin.head.html 31 | path: plugin.head.html 32 | - key: config.js 33 | path: config.js 34 | - key: interface_config.js 35 | path: interface_config.js 36 | containers: 37 | - name: web 38 | resources: 39 | limits: 40 | memory: 300Mi 41 | cpu: 400m 42 | requests: 43 | memory: 300Mi 44 | cpu: 400m 45 | image: jitsi/web:stable-4548-1 46 | imagePullPolicy: Always 47 | readinessProbe: 48 | httpGet: 49 | port: 80 50 | volumeMounts: 51 | - name: web 52 | mountPath: /usr/share/jitsi-meet/static/welcomePageAdditionalContent.html 53 | subPath: welcomePageAdditionalContent.html 54 | - name: web 55 | mountPath: /usr/share/jitsi-meet/plugin.head.html 56 | subPath: plugin.head.html 57 | - name: web 58 | mountPath: /defaults/config.js 59 | subPath: config.js 60 | - name: web 61 | mountPath: /defaults/interface_config.js 62 | subPath: interface_config.js 63 | env: 64 | - name: DISABLE_HTTPS 65 | value: "1" 66 | - name: HTTP_PORT 67 | value: "80" 68 | - name: XMPP_SERVER 69 | value: prosody 70 | - name: JICOFO_AUTH_USER 71 | value: focus 72 | - name: XMPP_DOMAIN 73 | value: meet.jitsi 74 | - name: XMPP_AUTH_DOMAIN 75 | value: auth.meet.jitsi 76 | - name: XMPP_INTERNAL_MUC_DOMAIN 77 | value: internal-muc.meet.jitsi 78 | - name: XMPP_BOSH_URL_BASE 79 | value: http://prosody:5280 80 | - name: XMPP_MUC_DOMAIN 81 | value: muc.meet.jitsi 82 | - name: TZ 83 | value: Europe/Berlin 84 | - name: JVB_TCP_HARVESTER_DISABLED 85 | value: "true" 86 | -------------------------------------------------------------------------------- /base/jitsi/jitsi-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: jitsi 5 | -------------------------------------------------------------------------------- /base/jitsi/jitsi-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | namespace: jitsi 5 | name: jitsi-config 6 | type: Opaque 7 | data: 8 | JICOFO_COMPONENT_SECRET: # replace with valid base64 secret 9 | JICOFO_AUTH_PASSWORD: # replace with valid base64 secret 10 | JVB_AUTH_PASSWORD: # replace with valid base64 secret 11 | JVB_STUN_SERVERS: # replace with valid base64 secret 12 | TURNCREDENTIALS_SECRET: # replace with valid base64 secret 13 | TURN_HOST: # replace with valid base64 secret 14 | STUN_PORT: # replace with valid base64 secret 15 | TURN_PORT: # replace with valid base64 secret 16 | TURNS_PORT: # replace with valid base64 secret 17 | -------------------------------------------------------------------------------- /base/jitsi/jvb-entrypoint-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-entrypoint 6 | data: 7 | entrypoint.sh: | 8 | #!/bin/bash 9 | set -euo pipefail 10 | 11 | # both jq and curl are needed for shutdown hook 12 | apt-dpkg-wrap apt-get update && apt-dpkg-wrap apt-get -y install curl jq 13 | 14 | # JVB baseport can be passed to this script 15 | if [[ "$1" =~ ^[0-9]+$ ]]; then 16 | BASE_PORT=$1 17 | shift 18 | else 19 | BASE_PORT=30300 20 | fi 21 | 22 | # add jvb ID to the base port (e.g. 30300 + 1 = 30301) 23 | export JVB_PORT=$(($BASE_PORT+${HOSTNAME##*-})) 24 | echo "JVB_PORT=$JVB_PORT" 25 | 26 | echo "Allowing shutdown of JVB via Rest from localhost..." 27 | echo "org.jitsi.videobridge.ENABLE_REST_SHUTDOWN=true" >> /defaults/sip-communicator.properties 28 | echo "org.jitsi.videobridge.shutdown.ALLOWED_SOURCE_REGEXP=127.0.0.1" >> /defaults/sip-communicator.properties 29 | 30 | echo "org.ice4j.ice.harvest.DISABLE_AWS_HARVESTER=true" >> /defaults/sip-communicator.properties 31 | 32 | exec "$@" 33 | 34 | -------------------------------------------------------------------------------- /base/jitsi/jvb-shutdown-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-shutdown 6 | data: 7 | graceful_shutdown.sh: | 8 | #!/bin/bash 9 | # 10 | # 1. The script issues shutdown command to the bridge over REST API. 11 | # If HTTP status code other than 200 is returned then it exits with 1. 12 | # 2. If the code is ok then it checks if the bridge has exited. 13 | # 3. If not then it polls bridge statistics until conference count drops to 0. 14 | # 4. Gives some time for the bridge to shutdown. If it does not quit after that 15 | # time then it kills the process. If the process was successfully killed 0 is 16 | # returned and 1 otherwise. 17 | # 18 | # Arguments: 19 | # "-p"(mandatory) the PID of jitsi Videobridge process 20 | # "-h"("http://localhost:8080" by default) REST requests host URI part 21 | # "-t"("25" by default) number of second we we for the bridge to shutdown 22 | # gracefully after conference count drops to 0 23 | # "-s"(disabled by default) enable silent mode - no info output 24 | # 25 | # NOTE: script depends on the tools jq, used to parse json, and curl 26 | # 27 | 28 | # Initialize arguments 29 | hostUrl="http://localhost:8080" 30 | timeout=25 31 | verbose=1 32 | 33 | # Parse arguments 34 | OPTIND=1 35 | while getopts "p:h:t:s" opt; do 36 | case "$opt" in 37 | p) 38 | pid=$OPTARG 39 | ;; 40 | h) 41 | hostUrl=$OPTARG 42 | ;; 43 | t) 44 | timeout=$OPTARG 45 | ;; 46 | s) 47 | verbose=0 48 | ;; 49 | *) 50 | echo "usage: $0 [-p] [-h] [-t] [-s]" >&2 51 | exit 1 52 | ;; 53 | esac 54 | done 55 | shift "$((OPTIND-1))" 56 | 57 | # Get PID from supervisor if no PID was provided 58 | if [ "$pid" = "" ] ;then 59 | pid=`s6-svstat -o pid /var/run/s6/services/jvb` 60 | fi 61 | 62 | # Check if PID is a number 63 | re='^[0-9]+$' 64 | if ! [[ $pid =~ $re ]] ; then 65 | echo "error: PID is not a number" >&2; exit 1 66 | fi 67 | 68 | # Returns conference count by calling JVB REST statistics API and extracting 69 | # conference count from JSON stats text returned. 70 | function getConferenceCount { 71 | # Total number of conferences minus the empty conferences 72 | curl -s "$hostUrl/colibri/stats"| jq '.conferences - .conference_sizes[0]' 73 | } 74 | 75 | # Prints info messages 76 | function printInfo { 77 | if [ "$verbose" == "1" ] 78 | then 79 | echo "$@" 80 | fi 81 | } 82 | 83 | # Prints errors 84 | function printError { 85 | echo "$@" 1>&2 86 | } 87 | 88 | shutdownStatus=`curl -s -o /dev/null -H "Content-Type: application/json" -d '{ "graceful-shutdown": "true" }' -w "%{http_code}" "$hostUrl/colibri/shutdown"` 89 | if [ "$shutdownStatus" == "200" ] 90 | then 91 | printInfo "Graceful shutdown started" 92 | 93 | # turn off automatic restart of JVB service 94 | s6-svc -O /var/run/s6/services/jvb 95 | 96 | confCount=`getConferenceCount` 97 | while [[ $confCount -gt 0 ]] ; do 98 | printInfo "There are still $confCount conferences" 99 | sleep 10 100 | confCount=`getConferenceCount` 101 | done 102 | 103 | sleep 5 104 | 105 | jvbAvailable=`curl -s -o /dev/null -w "%{http_code}" "$hostUrl/colibri/stats"` 106 | if [ "$jvbAvailable" == "200" ] 107 | then 108 | printInfo "It is still running, lets give it $timeout seconds" 109 | sleep $timeout 110 | jvbAvailable=`curl -s -o /dev/null -w "%{http_code}" "$hostUrl/colibri/stats"` 111 | if [ "$jvbAvailable" == "200" ] 112 | then 113 | printError "Bridge did not exit after $timeout sec - killing $pid" 114 | fi 115 | fi 116 | kill $pid 117 | 118 | # check for 3 seconds if we managed to kill 119 | for I in 1 2 3 120 | do 121 | if ps -p $pid > /dev/null 2>&1 122 | then 123 | sleep 1 124 | fi 125 | done 126 | if ps -p $pid > /dev/null 2>&1 127 | then 128 | printError "Failed to kill $pid" 129 | printError "Sending force kill to $pid" 130 | kill -9 $pid 131 | if ps -p $pid > /dev/null 2>&1 132 | then 133 | printError "Failed to force kill $pid, giving up." 134 | exit 1 135 | fi 136 | fi 137 | printInfo "Bridge shutdown OK" 138 | exit 0 139 | else 140 | printError "Invalid HTTP status for shutdown request: $shutdownStatus" 141 | exit 1 142 | fi 143 | 144 | -------------------------------------------------------------------------------- /base/jitsi/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - jitsi-namespace.yaml 6 | - web-service.yaml 7 | - jitsi-secret.yaml 8 | - prosody-configmap.yaml 9 | - web-configmap.yaml 10 | - jvb-entrypoint-configmap.yaml 11 | - jvb-shutdown-configmap.yaml 12 | 13 | commonLabels: 14 | scope: jitsi 15 | -------------------------------------------------------------------------------- /base/jitsi/prosody-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | namespace: jitsi 5 | name: prosody 6 | data: 7 | mod_prometheus.lua: | 8 | -- Log statistics to Prometheus 9 | -- 10 | -- Copyright (C) 2014 Daurnimator 11 | -- Copyright (C) 2018 Emmanuel Gil Peyrot 12 | -- 13 | -- This module is MIT/X11 licensed. 14 | module:set_global(); 15 | local tostring = tostring; 16 | local t_insert = table.insert; 17 | local t_concat = table.concat; 18 | local socket = require "socket"; 19 | local get_stats = require "core.statsmanager".get_stats; 20 | local function escape(text) 21 | return text:gsub("\\", "\\\\"):gsub("\"", "\\\""):gsub("\n", "\\n"); 22 | end 23 | local function escape_name(name) 24 | return name:gsub("[^A-Za-z0-9_]", "_"):gsub("^[^A-Za-z_]", "_%1"); 25 | end 26 | local function get_timestamp() 27 | -- Using LuaSocket for that because os.time() only has second precision. 28 | return math.floor(socket.gettime() * 1000); 29 | end 30 | local function repr_help(metric, docstring) 31 | docstring = docstring:gsub("\\", "\\\\"):gsub("\n", "\\n"); 32 | return "# HELP "..escape_name(metric).." "..docstring.."\n"; 33 | end 34 | -- local allowed_types = { counter = true, gauge = true, histogram = true, summary = true, untyped = true }; 35 | -- local allowed_types = { "counter", "gauge", "histogram", "summary", "untyped" }; 36 | local function repr_type(metric, type_) 37 | -- if not allowed_types:contains(type_) then 38 | -- return; 39 | -- end 40 | return "# TYPE "..escape_name(metric).." "..type_.."\n"; 41 | end 42 | local function repr_label(key, value) 43 | return key.."=\""..escape(value).."\""; 44 | end 45 | local function repr_labels(labels) 46 | local values = {} 47 | for key, value in pairs(labels) do 48 | t_insert(values, repr_label(escape_name(key), escape(value))); 49 | end 50 | if #values == 0 then 51 | return ""; 52 | end 53 | return "{"..t_concat(values, ", ").."}"; 54 | end 55 | local function repr_sample(metric, labels, value, timestamp) 56 | return escape_name(metric)..repr_labels(labels).." "..value.." "..timestamp.."\n"; 57 | end 58 | local allowed_extras = { min = true, max = true, average = true }; 59 | local function insert_extras(data, key, name, timestamp, extra) 60 | if not extra then 61 | return false; 62 | end 63 | local has_extra = false; 64 | for extra_name in pairs(allowed_extras) do 65 | if extra[extra_name] then 66 | local field = { 67 | 68 | value = extra[extra_name], 69 | labels = { 70 | 71 | ["type"] = name, 72 | field = extra_name, 73 | }, 74 | typ = "gauge"; 75 | timestamp = timestamp, 76 | }; 77 | t_insert(data[key], field); 78 | has_extra = true; 79 | end 80 | end 81 | return has_extra; 82 | end 83 | local function parse_stats() 84 | local timestamp = tostring(get_timestamp()); 85 | local data = {}; 86 | local stats, changed_only, extras = get_stats(); 87 | for stat, value in pairs(stats) do 88 | -- module:log("debug", "changed_stats[%q] = %s", stat, tostring(value)); 89 | local extra = extras[stat]; 90 | local host, sect, name, typ = stat:match("^/([^/]+)/([^/]+)/(.+):(%a+)$"); 91 | if host == nil then 92 | sect, name, typ = stat:match("^([^.]+)%.(.+):(%a+)$"); 93 | elseif host == "*" then 94 | host = nil; 95 | end 96 | if sect:find("^mod_measure_.") then 97 | sect = sect:sub(13); 98 | elseif sect:find("^mod_statistics_.") then 99 | sect = sect:sub(16); 100 | end 101 | local key = escape_name("prosody_"..sect); 102 | local field = { 103 | 104 | value = value, 105 | labels = { ["type"] = name}, 106 | -- TODO: Use the other types where it makes sense. 107 | typ = (typ == "rate" and "counter" or "gauge"), 108 | timestamp = timestamp, 109 | }; 110 | if host then 111 | field.labels.host = host; 112 | end 113 | if data[key] == nil then 114 | data[key] = {}; 115 | end 116 | if not insert_extras(data, key, name, timestamp, extra) then 117 | t_insert(data[key], field); 118 | end 119 | end 120 | return data; 121 | end 122 | local function get_metrics(event) 123 | local response = event.response; 124 | response.headers.content_type = "text/plain; version=0.0.4"; 125 | local answer = {}; 126 | for key, fields in pairs(parse_stats()) do 127 | t_insert(answer, repr_help(key, "TODO: add a description here.")); 128 | t_insert(answer, repr_type(key, fields[1].typ)); 129 | for _, field in pairs(fields) do 130 | t_insert(answer, repr_sample(key, field.labels, field.value, field.timestamp)); 131 | end 132 | end 133 | return t_concat(answer, ""); 134 | end 135 | function module.add_host(module) 136 | module:depends "http"; 137 | module:provides("http", { 138 | 139 | default_path = "metrics"; 140 | route = { 141 | 142 | GET = get_metrics; 143 | }; 144 | }); 145 | end 146 | mod_measure_stanza_counts.lua: | 147 | module:set_global() 148 | 149 | local filters = require"util.filters"; 150 | 151 | local stanza_kinds = { message = true, presence = true, iq = true }; 152 | 153 | local function rate(measures, dir) 154 | return function (stanza, session) 155 | measures[dir](); 156 | measures[dir .. "_" .. session.type](); 157 | if stanza.attr and not stanza.attr.xmlns and stanza_kinds[stanza.name] then 158 | measures[dir .. "_" .. session.type .. "_" .. stanza.name](); 159 | end 160 | return stanza; 161 | end 162 | end 163 | 164 | local measures = setmetatable({}, { 165 | __index = function (t, name) 166 | local m = module:measure(name, "rate"); 167 | t[name] = m; 168 | return m; 169 | end 170 | }); 171 | 172 | local function measure_stanza_counts(session) 173 | filters.add_filter(session, "stanzas/in", rate(measures, "incoming")); 174 | filters.add_filter(session, "stanzas/out", rate(measures, "outgoing")); 175 | end 176 | 177 | filters.add_filter_hook(measure_stanza_counts); 178 | 179 | mod_measure_client_presence.lua: | 180 | module:set_global(); 181 | 182 | local measure = require"core.statsmanager".measure; 183 | 184 | local valid_shows = { 185 | available = true, 186 | chat = true, 187 | away = true, 188 | dnd = true, 189 | xa = true, 190 | unavailable = true, 191 | } 192 | 193 | local counters = { 194 | available = measure("amount", "client_presence.available"), 195 | chat = measure("amount", "client_presence.chat"), 196 | away = measure("amount", "client_presence.away"), 197 | dnd = measure("amount", "client_presence.dnd"), 198 | xa = measure("amount", "client_presence.xa"), 199 | unavailable = measure("amount", "client_presence.unavailable"), 200 | invalid = measure("amount", "client_presence.invalid"); 201 | }; 202 | 203 | module:hook("stats-update", function () 204 | local buckets = { 205 | available = 0, 206 | chat = 0, 207 | away = 0, 208 | dnd = 0, 209 | xa = 0, 210 | unavailable = 0, 211 | invalid = 0, 212 | }; 213 | for _, session in pairs(full_sessions) do 214 | local status = "unavailable"; 215 | if session.presence then 216 | status = session.presence:get_child_text("show") or "available"; 217 | end 218 | if valid_shows[status] ~= nil then 219 | buckets[status] = buckets[status] + 1; 220 | else 221 | buckets.invalid = buckets.invalid + 1; 222 | end 223 | end 224 | for bucket, count in pairs(buckets) do 225 | counters[bucket](count) 226 | end 227 | end) 228 | 229 | jitsi-meet.cfg.lua: |- 230 | admins = { "{{ .Env.JICOFO_AUTH_USER }}@{{ .Env.XMPP_AUTH_DOMAIN }}" } 231 | plugin_paths = { "/prosody-plugins/", "/prosody-plugins-custom" } 232 | http_default_host = "{{ .Env.XMPP_DOMAIN }}" 233 | 234 | muc_mapper_domain_base = "{{ .Env.XMPP_DOMAIN }}" 235 | 236 | turncredentials_secret = "{{ .Env.TURNCREDENTIALS_SECRET }}"; 237 | 238 | turncredentials = { 239 | { type = "stun", host = "{{ .Env.TURN_HOST }}", port = "{{ .Env.STUN_PORT }}" }, 240 | { type = "turn", host = "{{ .Env.TURN_HOST }}", port = "{{ .Env.TURN_PORT }}", transport = "udp" }, 241 | { type = "turns", host = "{{ .Env.TURN_HOST }}", port = "{{ .Env.TURNS_PORT }}", transport = "tcp" } 242 | }; 243 | 244 | cross_domain_bosh = false; 245 | consider_bosh_secure = true; 246 | 247 | {{ $ENABLE_AUTH := .Env.ENABLE_AUTH | default "0" | toBool }} 248 | {{ $AUTH_TYPE := .Env.AUTH_TYPE | default "internal" }} 249 | {{ $JWT_ASAP_KEYSERVER := .Env.JWT_ASAP_KEYSERVER | default "" }} 250 | {{ $JWT_ALLOW_EMPTY := .Env.JWT_ALLOW_EMPTY | default "0" | toBool }} 251 | {{ $JWT_AUTH_TYPE := .Env.JWT_AUTH_TYPE | default "token" }} 252 | {{ $JWT_TOKEN_AUTH_MODULE := .Env.JWT_TOKEN_AUTH_MODULE | default "token_verification" }} 253 | 254 | {{ if and $ENABLE_AUTH (eq $AUTH_TYPE "jwt") .Env.JWT_ACCEPTED_ISSUERS }} 255 | asap_accepted_issuers = { "{{ join "\",\"" (splitList "," .Env.JWT_ACCEPTED_ISSUERS) }}" } 256 | {{ end }} 257 | 258 | {{ if and $ENABLE_AUTH (eq $AUTH_TYPE "jwt") .Env.JWT_ACCEPTED_AUDIENCES }} 259 | asap_accepted_audiences = { "{{ join "\",\"" (splitList "," .Env.JWT_ACCEPTED_AUDIENCES) }}" } 260 | {{ end }} 261 | 262 | VirtualHost "{{ .Env.XMPP_DOMAIN }}" 263 | {{ if $ENABLE_AUTH }} 264 | {{ if eq $AUTH_TYPE "jwt" }} 265 | authentication = "{{ $JWT_AUTH_TYPE }}" 266 | app_id = "{{ .Env.JWT_APP_ID }}" 267 | app_secret = "{{ .Env.JWT_APP_SECRET }}" 268 | allow_empty_token = {{ if $JWT_ALLOW_EMPTY }}true{{ else }}false{{ end }} 269 | {{ if $JWT_ASAP_KEYSERVER }} 270 | asap_key_server = "{{ .Env.JWT_ASAP_KEYSERVER }}" 271 | {{ end }} 272 | 273 | {{ else if eq $AUTH_TYPE "ldap" }} 274 | authentication = "cyrus" 275 | cyrus_application_name = "xmpp" 276 | allow_unencrypted_plain_auth = true 277 | {{ else if eq $AUTH_TYPE "internal" }} 278 | authentication = "internal_hashed" 279 | {{ end }} 280 | {{ else }} 281 | authentication = "anonymous" 282 | {{ end }} 283 | ssl = { 284 | key = "/config/certs/{{ .Env.XMPP_DOMAIN }}.key"; 285 | certificate = "/config/certs/{{ .Env.XMPP_DOMAIN }}.crt"; 286 | } 287 | modules_enabled = { 288 | "bosh"; 289 | "pubsub"; 290 | "ping"; 291 | "speakerstats"; 292 | "turncredentials"; 293 | "conference_duration"; 294 | {{ if .Env.XMPP_MODULES }} 295 | "{{ join "\";\n\"" (splitList "," .Env.XMPP_MODULES) }}"; 296 | {{ end }} 297 | {{ if and $ENABLE_AUTH (eq $AUTH_TYPE "ldap") }} 298 | "auth_cyrus"; 299 | {{end}} 300 | } 301 | 302 | speakerstats_component = "speakerstats.{{ .Env.XMPP_DOMAIN }}" 303 | conference_duration_component = "conferenceduration.{{ .Env.XMPP_DOMAIN }}" 304 | 305 | c2s_require_encryption = false 306 | 307 | {{ if and $ENABLE_AUTH (.Env.ENABLE_GUESTS | default "0" | toBool) }} 308 | VirtualHost "{{ .Env.XMPP_GUEST_DOMAIN }}" 309 | authentication = "anonymous" 310 | c2s_require_encryption = false 311 | {{ end }} 312 | 313 | VirtualHost "{{ .Env.XMPP_AUTH_DOMAIN }}" 314 | ssl = { 315 | key = "/config/certs/{{ .Env.XMPP_AUTH_DOMAIN }}.key"; 316 | certificate = "/config/certs/{{ .Env.XMPP_AUTH_DOMAIN }}.crt"; 317 | } 318 | authentication = "internal_hashed" 319 | 320 | {{ if .Env.XMPP_RECORDER_DOMAIN }} 321 | VirtualHost "{{ .Env.XMPP_RECORDER_DOMAIN }}" 322 | modules_enabled = { 323 | "ping"; 324 | } 325 | authentication = "internal_hashed" 326 | {{ end }} 327 | 328 | Component "{{ .Env.XMPP_INTERNAL_MUC_DOMAIN }}" "muc" 329 | modules_enabled = { 330 | "ping"; 331 | {{ if .Env.XMPP_INTERNAL_MUC_MODULES }} 332 | "{{ join "\";\n\"" (splitList "," .Env.XMPP_INTERNAL_MUC_MODULES) }}"; 333 | {{ end }} 334 | } 335 | storage = "memory" 336 | muc_room_cache_size = 1000 337 | muc_room_locking = false 338 | muc_room_default_public_jids = true 339 | 340 | Component "{{ .Env.XMPP_MUC_DOMAIN }}" "muc" 341 | storage = "memory" 342 | modules_enabled = { 343 | {{ if .Env.XMPP_MUC_MODULES }} 344 | "{{ join "\";\n\"" (splitList "," .Env.XMPP_MUC_MODULES) }}"; 345 | {{ end }} 346 | {{ if and $ENABLE_AUTH (eq $AUTH_TYPE "jwt") }} 347 | "{{ $JWT_TOKEN_AUTH_MODULE }}"; 348 | {{ end }} 349 | } 350 | muc_room_locking = false 351 | muc_room_default_public_jids = true 352 | 353 | Component "focus.{{ .Env.XMPP_DOMAIN }}" 354 | component_secret = "{{ .Env.JICOFO_COMPONENT_SECRET }}" 355 | 356 | Component "speakerstats.{{ .Env.XMPP_DOMAIN }}" "speakerstats_component" 357 | muc_component = "{{ .Env.XMPP_MUC_DOMAIN }}" 358 | 359 | Component "conferenceduration.{{ .Env.XMPP_DOMAIN }}" "conference_duration_component" 360 | muc_component = "{{ .Env.XMPP_MUC_DOMAIN }}" 361 | 362 | 363 | 364 | -------------------------------------------------------------------------------- /base/jitsi/web-service.yaml: -------------------------------------------------------------------------------- 1 | # this is the entrypoint service for the HAProxies 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | namespace: jitsi 6 | labels: 7 | service: web 8 | name: web 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: "http" 13 | port: 80 14 | targetPort: 80 15 | selector: 16 | k8s-app: web 17 | -------------------------------------------------------------------------------- /base/ops/cert-manager/cluster-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt 5 | namespace: cert-manager 6 | spec: 7 | acme: 8 | # The ACME server URL 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: letsencrypt 15 | # Enable the HTTP-01 challenge provider 16 | solvers: 17 | # An empty 'selector' means that this solver matches all domains 18 | - http01: 19 | ingress: 20 | class: nginx 21 | -------------------------------------------------------------------------------- /base/ops/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - cert-manager.yaml 6 | - cluster-issuer.yaml 7 | -------------------------------------------------------------------------------- /base/ops/dashboard/dashboard-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kubernetes-dashboard 13 | -------------------------------------------------------------------------------- /base/ops/dashboard/dashboard-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kubernetes-dashboard 6 | -------------------------------------------------------------------------------- /base/ops/dashboard/kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Source: https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml 16 | 17 | apiVersion: v1 18 | kind: Namespace 19 | metadata: 20 | name: kubernetes-dashboard 21 | 22 | --- 23 | 24 | apiVersion: v1 25 | kind: ServiceAccount 26 | metadata: 27 | labels: 28 | k8s-app: kubernetes-dashboard 29 | name: kubernetes-dashboard 30 | namespace: kubernetes-dashboard 31 | 32 | --- 33 | 34 | kind: Service 35 | apiVersion: v1 36 | metadata: 37 | labels: 38 | k8s-app: kubernetes-dashboard 39 | name: kubernetes-dashboard 40 | namespace: kubernetes-dashboard 41 | spec: 42 | ports: 43 | - port: 443 44 | targetPort: 8443 45 | selector: 46 | k8s-app: kubernetes-dashboard 47 | 48 | --- 49 | 50 | apiVersion: v1 51 | kind: Secret 52 | metadata: 53 | labels: 54 | k8s-app: kubernetes-dashboard 55 | name: kubernetes-dashboard-certs 56 | namespace: kubernetes-dashboard 57 | type: Opaque 58 | 59 | --- 60 | 61 | apiVersion: v1 62 | kind: Secret 63 | metadata: 64 | labels: 65 | k8s-app: kubernetes-dashboard 66 | name: kubernetes-dashboard-csrf 67 | namespace: kubernetes-dashboard 68 | type: Opaque 69 | data: 70 | csrf: "" 71 | 72 | --- 73 | 74 | apiVersion: v1 75 | kind: Secret 76 | metadata: 77 | labels: 78 | k8s-app: kubernetes-dashboard 79 | name: kubernetes-dashboard-key-holder 80 | namespace: kubernetes-dashboard 81 | type: Opaque 82 | 83 | --- 84 | 85 | kind: ConfigMap 86 | apiVersion: v1 87 | metadata: 88 | labels: 89 | k8s-app: kubernetes-dashboard 90 | name: kubernetes-dashboard-settings 91 | namespace: kubernetes-dashboard 92 | 93 | --- 94 | 95 | kind: Role 96 | apiVersion: rbac.authorization.k8s.io/v1 97 | metadata: 98 | labels: 99 | k8s-app: kubernetes-dashboard 100 | name: kubernetes-dashboard 101 | namespace: kubernetes-dashboard 102 | rules: 103 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 104 | - apiGroups: [""] 105 | resources: ["secrets"] 106 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] 107 | verbs: ["get", "update", "delete"] 108 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 109 | - apiGroups: [""] 110 | resources: ["configmaps"] 111 | resourceNames: ["kubernetes-dashboard-settings"] 112 | verbs: ["get", "update"] 113 | # Allow Dashboard to get metrics. 114 | - apiGroups: [""] 115 | resources: ["services"] 116 | resourceNames: ["heapster", "dashboard-metrics-scraper"] 117 | verbs: ["proxy"] 118 | - apiGroups: [""] 119 | resources: ["services/proxy"] 120 | resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] 121 | verbs: ["get"] 122 | 123 | --- 124 | 125 | kind: ClusterRole 126 | apiVersion: rbac.authorization.k8s.io/v1 127 | metadata: 128 | labels: 129 | k8s-app: kubernetes-dashboard 130 | name: kubernetes-dashboard 131 | rules: 132 | # Allow Metrics Scraper to get metrics from the Metrics server 133 | - apiGroups: ["metrics.k8s.io"] 134 | resources: ["pods", "nodes"] 135 | verbs: ["get", "list", "watch"] 136 | 137 | --- 138 | 139 | apiVersion: rbac.authorization.k8s.io/v1 140 | kind: RoleBinding 141 | metadata: 142 | labels: 143 | k8s-app: kubernetes-dashboard 144 | name: kubernetes-dashboard 145 | namespace: kubernetes-dashboard 146 | roleRef: 147 | apiGroup: rbac.authorization.k8s.io 148 | kind: Role 149 | name: kubernetes-dashboard 150 | subjects: 151 | - kind: ServiceAccount 152 | name: kubernetes-dashboard 153 | namespace: kubernetes-dashboard 154 | 155 | --- 156 | 157 | apiVersion: rbac.authorization.k8s.io/v1 158 | kind: ClusterRoleBinding 159 | metadata: 160 | name: kubernetes-dashboard 161 | roleRef: 162 | apiGroup: rbac.authorization.k8s.io 163 | kind: ClusterRole 164 | name: kubernetes-dashboard 165 | subjects: 166 | - kind: ServiceAccount 167 | name: kubernetes-dashboard 168 | namespace: kubernetes-dashboard 169 | 170 | --- 171 | 172 | kind: Deployment 173 | apiVersion: apps/v1 174 | metadata: 175 | labels: 176 | k8s-app: kubernetes-dashboard 177 | name: kubernetes-dashboard 178 | namespace: kubernetes-dashboard 179 | spec: 180 | replicas: 1 181 | revisionHistoryLimit: 10 182 | selector: 183 | matchLabels: 184 | k8s-app: kubernetes-dashboard 185 | template: 186 | metadata: 187 | labels: 188 | k8s-app: kubernetes-dashboard 189 | spec: 190 | containers: 191 | - name: kubernetes-dashboard 192 | image: kubernetesui/dashboard:v2.0.0 193 | imagePullPolicy: Always 194 | ports: 195 | - containerPort: 8443 196 | protocol: TCP 197 | args: 198 | - --auto-generate-certificates 199 | - --namespace=kubernetes-dashboard 200 | # Uncomment the following line to manually specify Kubernetes API server Host 201 | # If not specified, Dashboard will attempt to auto discover the API server and connect 202 | # to it. Uncomment only if the default does not work. 203 | # - --apiserver-host=http://my-address:port 204 | volumeMounts: 205 | - name: kubernetes-dashboard-certs 206 | mountPath: /certs 207 | # Create on-disk volume to store exec logs 208 | - mountPath: /tmp 209 | name: tmp-volume 210 | livenessProbe: 211 | httpGet: 212 | scheme: HTTPS 213 | path: / 214 | port: 8443 215 | initialDelaySeconds: 30 216 | timeoutSeconds: 30 217 | securityContext: 218 | allowPrivilegeEscalation: false 219 | readOnlyRootFilesystem: true 220 | runAsUser: 1001 221 | runAsGroup: 2001 222 | volumes: 223 | - name: kubernetes-dashboard-certs 224 | secret: 225 | secretName: kubernetes-dashboard-certs 226 | - name: tmp-volume 227 | emptyDir: {} 228 | serviceAccountName: kubernetes-dashboard 229 | nodeSelector: 230 | "kubernetes.io/os": linux 231 | # Comment the following tolerations if Dashboard must not be deployed on master 232 | tolerations: 233 | - key: node-role.kubernetes.io/master 234 | effect: NoSchedule 235 | 236 | --- 237 | 238 | kind: Service 239 | apiVersion: v1 240 | metadata: 241 | labels: 242 | k8s-app: dashboard-metrics-scraper 243 | name: dashboard-metrics-scraper 244 | namespace: kubernetes-dashboard 245 | spec: 246 | ports: 247 | - port: 8000 248 | targetPort: 8000 249 | selector: 250 | k8s-app: dashboard-metrics-scraper 251 | 252 | --- 253 | 254 | kind: Deployment 255 | apiVersion: apps/v1 256 | metadata: 257 | labels: 258 | k8s-app: dashboard-metrics-scraper 259 | name: dashboard-metrics-scraper 260 | namespace: kubernetes-dashboard 261 | spec: 262 | replicas: 1 263 | revisionHistoryLimit: 10 264 | selector: 265 | matchLabels: 266 | k8s-app: dashboard-metrics-scraper 267 | template: 268 | metadata: 269 | labels: 270 | k8s-app: dashboard-metrics-scraper 271 | annotations: 272 | seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' 273 | spec: 274 | containers: 275 | - name: dashboard-metrics-scraper 276 | image: kubernetesui/metrics-scraper:v1.0.4 277 | ports: 278 | - containerPort: 8000 279 | protocol: TCP 280 | livenessProbe: 281 | httpGet: 282 | scheme: HTTP 283 | path: / 284 | port: 8000 285 | initialDelaySeconds: 30 286 | timeoutSeconds: 30 287 | volumeMounts: 288 | - mountPath: /tmp 289 | name: tmp-volume 290 | securityContext: 291 | allowPrivilegeEscalation: false 292 | readOnlyRootFilesystem: true 293 | runAsUser: 1001 294 | runAsGroup: 2001 295 | serviceAccountName: kubernetes-dashboard 296 | nodeSelector: 297 | "kubernetes.io/os": linux 298 | # Comment the following tolerations if Dashboard must not be deployed on master 299 | tolerations: 300 | - key: node-role.kubernetes.io/master 301 | effect: NoSchedule 302 | volumes: 303 | - name: tmp-volume 304 | emptyDir: {} 305 | -------------------------------------------------------------------------------- /base/ops/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - dashboard-cluster-role-binding.yaml 6 | - dashboard-service-account.yaml 7 | - kubernetes-dashboard.yaml 8 | -------------------------------------------------------------------------------- /base/ops/ingress-nginx/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | # Source: https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/cloud/deploy.yaml 2 | 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: ingress-nginx 7 | labels: 8 | app.kubernetes.io/name: ingress-nginx 9 | app.kubernetes.io/instance: ingress-nginx 10 | 11 | --- 12 | # Source: ingress-nginx/templates/controller-serviceaccount.yaml 13 | apiVersion: v1 14 | kind: ServiceAccount 15 | metadata: 16 | labels: 17 | helm.sh/chart: ingress-nginx-2.1.0 18 | app.kubernetes.io/name: ingress-nginx 19 | app.kubernetes.io/instance: ingress-nginx 20 | app.kubernetes.io/version: 0.32.0 21 | app.kubernetes.io/managed-by: Helm 22 | app.kubernetes.io/component: controller 23 | name: ingress-nginx 24 | namespace: ingress-nginx 25 | --- 26 | # Source: ingress-nginx/templates/controller-configmap.yaml 27 | apiVersion: v1 28 | kind: ConfigMap 29 | metadata: 30 | labels: 31 | helm.sh/chart: ingress-nginx-2.1.0 32 | app.kubernetes.io/name: ingress-nginx 33 | app.kubernetes.io/instance: ingress-nginx 34 | app.kubernetes.io/version: 0.32.0 35 | app.kubernetes.io/managed-by: Helm 36 | app.kubernetes.io/component: controller 37 | name: ingress-nginx-controller 38 | namespace: ingress-nginx 39 | data: 40 | --- 41 | # Source: ingress-nginx/templates/clusterrole.yaml 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | kind: ClusterRole 44 | metadata: 45 | labels: 46 | helm.sh/chart: ingress-nginx-2.1.0 47 | app.kubernetes.io/name: ingress-nginx 48 | app.kubernetes.io/instance: ingress-nginx 49 | app.kubernetes.io/version: 0.32.0 50 | app.kubernetes.io/managed-by: Helm 51 | name: ingress-nginx 52 | namespace: ingress-nginx 53 | rules: 54 | - apiGroups: 55 | - '' 56 | resources: 57 | - configmaps 58 | - endpoints 59 | - nodes 60 | - pods 61 | - secrets 62 | verbs: 63 | - list 64 | - watch 65 | - apiGroups: 66 | - '' 67 | resources: 68 | - nodes 69 | verbs: 70 | - get 71 | - apiGroups: 72 | - '' 73 | resources: 74 | - services 75 | verbs: 76 | - get 77 | - list 78 | - update 79 | - watch 80 | - apiGroups: 81 | - extensions 82 | - networking.k8s.io # k8s 1.14+ 83 | resources: 84 | - ingresses 85 | verbs: 86 | - get 87 | - list 88 | - watch 89 | - apiGroups: 90 | - '' 91 | resources: 92 | - events 93 | verbs: 94 | - create 95 | - patch 96 | - apiGroups: 97 | - extensions 98 | - networking.k8s.io # k8s 1.14+ 99 | resources: 100 | - ingresses/status 101 | verbs: 102 | - update 103 | - apiGroups: 104 | - networking.k8s.io # k8s 1.14+ 105 | resources: 106 | - ingressclasses 107 | verbs: 108 | - get 109 | - list 110 | - watch 111 | --- 112 | # Source: ingress-nginx/templates/clusterrolebinding.yaml 113 | apiVersion: rbac.authorization.k8s.io/v1 114 | kind: ClusterRoleBinding 115 | metadata: 116 | labels: 117 | helm.sh/chart: ingress-nginx-2.1.0 118 | app.kubernetes.io/name: ingress-nginx 119 | app.kubernetes.io/instance: ingress-nginx 120 | app.kubernetes.io/version: 0.32.0 121 | app.kubernetes.io/managed-by: Helm 122 | name: ingress-nginx 123 | namespace: ingress-nginx 124 | roleRef: 125 | apiGroup: rbac.authorization.k8s.io 126 | kind: ClusterRole 127 | name: ingress-nginx 128 | subjects: 129 | - kind: ServiceAccount 130 | name: ingress-nginx 131 | namespace: ingress-nginx 132 | --- 133 | # Source: ingress-nginx/templates/controller-role.yaml 134 | apiVersion: rbac.authorization.k8s.io/v1 135 | kind: Role 136 | metadata: 137 | labels: 138 | helm.sh/chart: ingress-nginx-2.1.0 139 | app.kubernetes.io/name: ingress-nginx 140 | app.kubernetes.io/instance: ingress-nginx 141 | app.kubernetes.io/version: 0.32.0 142 | app.kubernetes.io/managed-by: Helm 143 | app.kubernetes.io/component: controller 144 | name: ingress-nginx 145 | namespace: ingress-nginx 146 | rules: 147 | - apiGroups: 148 | - '' 149 | resources: 150 | - namespaces 151 | verbs: 152 | - get 153 | - apiGroups: 154 | - '' 155 | resources: 156 | - configmaps 157 | - pods 158 | - secrets 159 | - endpoints 160 | verbs: 161 | - get 162 | - list 163 | - watch 164 | - apiGroups: 165 | - '' 166 | resources: 167 | - services 168 | verbs: 169 | - get 170 | - list 171 | - update 172 | - watch 173 | - apiGroups: 174 | - extensions 175 | - networking.k8s.io # k8s 1.14+ 176 | resources: 177 | - ingresses 178 | verbs: 179 | - get 180 | - list 181 | - watch 182 | - apiGroups: 183 | - extensions 184 | - networking.k8s.io # k8s 1.14+ 185 | resources: 186 | - ingresses/status 187 | verbs: 188 | - update 189 | - apiGroups: 190 | - networking.k8s.io # k8s 1.14+ 191 | resources: 192 | - ingressclasses 193 | verbs: 194 | - get 195 | - list 196 | - watch 197 | - apiGroups: 198 | - '' 199 | resources: 200 | - configmaps 201 | resourceNames: 202 | - ingress-controller-leader-nginx 203 | verbs: 204 | - get 205 | - update 206 | - apiGroups: 207 | - '' 208 | resources: 209 | - configmaps 210 | verbs: 211 | - create 212 | - apiGroups: 213 | - '' 214 | resources: 215 | - endpoints 216 | verbs: 217 | - create 218 | - get 219 | - update 220 | - apiGroups: 221 | - '' 222 | resources: 223 | - events 224 | verbs: 225 | - create 226 | - patch 227 | --- 228 | # Source: ingress-nginx/templates/controller-rolebinding.yaml 229 | apiVersion: rbac.authorization.k8s.io/v1 230 | kind: RoleBinding 231 | metadata: 232 | labels: 233 | helm.sh/chart: ingress-nginx-2.1.0 234 | app.kubernetes.io/name: ingress-nginx 235 | app.kubernetes.io/instance: ingress-nginx 236 | app.kubernetes.io/version: 0.32.0 237 | app.kubernetes.io/managed-by: Helm 238 | app.kubernetes.io/component: controller 239 | name: ingress-nginx 240 | namespace: ingress-nginx 241 | roleRef: 242 | apiGroup: rbac.authorization.k8s.io 243 | kind: Role 244 | name: ingress-nginx 245 | subjects: 246 | - kind: ServiceAccount 247 | name: ingress-nginx 248 | namespace: ingress-nginx 249 | --- 250 | # Source: ingress-nginx/templates/controller-service-webhook.yaml 251 | apiVersion: v1 252 | kind: Service 253 | metadata: 254 | labels: 255 | helm.sh/chart: ingress-nginx-2.1.0 256 | app.kubernetes.io/name: ingress-nginx 257 | app.kubernetes.io/instance: ingress-nginx 258 | app.kubernetes.io/version: 0.32.0 259 | app.kubernetes.io/managed-by: Helm 260 | app.kubernetes.io/component: controller 261 | name: ingress-nginx-controller-admission 262 | namespace: ingress-nginx 263 | spec: 264 | type: ClusterIP 265 | ports: 266 | - name: https-webhook 267 | port: 443 268 | targetPort: webhook 269 | selector: 270 | app.kubernetes.io/name: ingress-nginx 271 | app.kubernetes.io/instance: ingress-nginx 272 | app.kubernetes.io/component: controller 273 | --- 274 | # Source: ingress-nginx/templates/controller-service.yaml 275 | apiVersion: v1 276 | kind: Service 277 | metadata: 278 | labels: 279 | helm.sh/chart: ingress-nginx-2.1.0 280 | app.kubernetes.io/name: ingress-nginx 281 | app.kubernetes.io/instance: ingress-nginx 282 | app.kubernetes.io/version: 0.32.0 283 | app.kubernetes.io/managed-by: Helm 284 | app.kubernetes.io/component: controller 285 | name: ingress-nginx-controller 286 | namespace: ingress-nginx 287 | spec: 288 | type: LoadBalancer 289 | externalTrafficPolicy: Local 290 | ports: 291 | - name: http 292 | port: 80 293 | protocol: TCP 294 | targetPort: http 295 | - name: https 296 | port: 443 297 | protocol: TCP 298 | targetPort: https 299 | selector: 300 | app.kubernetes.io/name: ingress-nginx 301 | app.kubernetes.io/instance: ingress-nginx 302 | app.kubernetes.io/component: controller 303 | --- 304 | # Source: ingress-nginx/templates/controller-deployment.yaml 305 | apiVersion: apps/v1 306 | kind: Deployment 307 | metadata: 308 | labels: 309 | helm.sh/chart: ingress-nginx-2.1.0 310 | app.kubernetes.io/name: ingress-nginx 311 | app.kubernetes.io/instance: ingress-nginx 312 | app.kubernetes.io/version: 0.32.0 313 | app.kubernetes.io/managed-by: Helm 314 | app.kubernetes.io/component: controller 315 | name: ingress-nginx-controller 316 | namespace: ingress-nginx 317 | spec: 318 | selector: 319 | matchLabels: 320 | app.kubernetes.io/name: ingress-nginx 321 | app.kubernetes.io/instance: ingress-nginx 322 | app.kubernetes.io/component: controller 323 | revisionHistoryLimit: 10 324 | minReadySeconds: 0 325 | template: 326 | metadata: 327 | labels: 328 | app.kubernetes.io/name: ingress-nginx 329 | app.kubernetes.io/instance: ingress-nginx 330 | app.kubernetes.io/component: controller 331 | spec: 332 | dnsPolicy: ClusterFirst 333 | containers: 334 | - name: controller 335 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 336 | imagePullPolicy: IfNotPresent 337 | lifecycle: 338 | preStop: 339 | exec: 340 | command: 341 | - /wait-shutdown 342 | args: 343 | - /nginx-ingress-controller 344 | - --publish-service=ingress-nginx/ingress-nginx-controller 345 | - --election-id=ingress-controller-leader 346 | - --ingress-class=nginx 347 | - --configmap=ingress-nginx/ingress-nginx-controller 348 | - --validating-webhook=:8443 349 | - --validating-webhook-certificate=/usr/local/certificates/cert 350 | - --validating-webhook-key=/usr/local/certificates/key 351 | securityContext: 352 | capabilities: 353 | drop: 354 | - ALL 355 | add: 356 | - NET_BIND_SERVICE 357 | runAsUser: 101 358 | allowPrivilegeEscalation: true 359 | env: 360 | - name: POD_NAME 361 | valueFrom: 362 | fieldRef: 363 | fieldPath: metadata.name 364 | - name: POD_NAMESPACE 365 | valueFrom: 366 | fieldRef: 367 | fieldPath: metadata.namespace 368 | livenessProbe: 369 | httpGet: 370 | path: /healthz 371 | port: 10254 372 | scheme: HTTP 373 | initialDelaySeconds: 10 374 | periodSeconds: 10 375 | timeoutSeconds: 1 376 | successThreshold: 1 377 | failureThreshold: 3 378 | readinessProbe: 379 | httpGet: 380 | path: /healthz 381 | port: 10254 382 | scheme: HTTP 383 | initialDelaySeconds: 10 384 | periodSeconds: 10 385 | timeoutSeconds: 1 386 | successThreshold: 1 387 | failureThreshold: 3 388 | ports: 389 | - name: http 390 | containerPort: 80 391 | protocol: TCP 392 | - name: https 393 | containerPort: 443 394 | protocol: TCP 395 | - name: webhook 396 | containerPort: 8443 397 | protocol: TCP 398 | volumeMounts: 399 | - name: webhook-cert 400 | mountPath: /usr/local/certificates/ 401 | readOnly: true 402 | resources: 403 | requests: 404 | cpu: 100m 405 | memory: 90Mi 406 | serviceAccountName: ingress-nginx 407 | terminationGracePeriodSeconds: 300 408 | volumes: 409 | - name: webhook-cert 410 | secret: 411 | secretName: ingress-nginx-admission 412 | --- 413 | # Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml 414 | apiVersion: admissionregistration.k8s.io/v1beta1 415 | kind: ValidatingWebhookConfiguration 416 | metadata: 417 | labels: 418 | helm.sh/chart: ingress-nginx-2.1.0 419 | app.kubernetes.io/name: ingress-nginx 420 | app.kubernetes.io/instance: ingress-nginx 421 | app.kubernetes.io/version: 0.32.0 422 | app.kubernetes.io/managed-by: Helm 423 | app.kubernetes.io/component: admission-webhook 424 | name: ingress-nginx-admission 425 | namespace: ingress-nginx 426 | webhooks: 427 | - name: validate.nginx.ingress.kubernetes.io 428 | rules: 429 | - apiGroups: 430 | - extensions 431 | - networking.k8s.io 432 | apiVersions: 433 | - v1beta1 434 | operations: 435 | - CREATE 436 | - UPDATE 437 | resources: 438 | - ingresses 439 | failurePolicy: Fail 440 | clientConfig: 441 | service: 442 | namespace: ingress-nginx 443 | name: ingress-nginx-controller-admission 444 | path: /extensions/v1beta1/ingresses 445 | --- 446 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml 447 | apiVersion: rbac.authorization.k8s.io/v1 448 | kind: ClusterRole 449 | metadata: 450 | name: ingress-nginx-admission 451 | annotations: 452 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade 453 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 454 | labels: 455 | helm.sh/chart: ingress-nginx-2.1.0 456 | app.kubernetes.io/name: ingress-nginx 457 | app.kubernetes.io/instance: ingress-nginx 458 | app.kubernetes.io/version: 0.32.0 459 | app.kubernetes.io/managed-by: Helm 460 | app.kubernetes.io/component: admission-webhook 461 | namespace: ingress-nginx 462 | rules: 463 | - apiGroups: 464 | - admissionregistration.k8s.io 465 | resources: 466 | - validatingwebhookconfigurations 467 | verbs: 468 | - get 469 | - update 470 | --- 471 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml 472 | apiVersion: rbac.authorization.k8s.io/v1 473 | kind: ClusterRoleBinding 474 | metadata: 475 | name: ingress-nginx-admission 476 | annotations: 477 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade 478 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 479 | labels: 480 | helm.sh/chart: ingress-nginx-2.1.0 481 | app.kubernetes.io/name: ingress-nginx 482 | app.kubernetes.io/instance: ingress-nginx 483 | app.kubernetes.io/version: 0.32.0 484 | app.kubernetes.io/managed-by: Helm 485 | app.kubernetes.io/component: admission-webhook 486 | namespace: ingress-nginx 487 | roleRef: 488 | apiGroup: rbac.authorization.k8s.io 489 | kind: ClusterRole 490 | name: ingress-nginx-admission 491 | subjects: 492 | - kind: ServiceAccount 493 | name: ingress-nginx-admission 494 | namespace: ingress-nginx 495 | --- 496 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml 497 | apiVersion: batch/v1 498 | kind: Job 499 | metadata: 500 | name: ingress-nginx-admission-create 501 | annotations: 502 | helm.sh/hook: pre-install,pre-upgrade 503 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 504 | labels: 505 | helm.sh/chart: ingress-nginx-2.1.0 506 | app.kubernetes.io/name: ingress-nginx 507 | app.kubernetes.io/instance: ingress-nginx 508 | app.kubernetes.io/version: 0.32.0 509 | app.kubernetes.io/managed-by: Helm 510 | app.kubernetes.io/component: admission-webhook 511 | namespace: ingress-nginx 512 | spec: 513 | template: 514 | metadata: 515 | name: ingress-nginx-admission-create 516 | labels: 517 | helm.sh/chart: ingress-nginx-2.1.0 518 | app.kubernetes.io/name: ingress-nginx 519 | app.kubernetes.io/instance: ingress-nginx 520 | app.kubernetes.io/version: 0.32.0 521 | app.kubernetes.io/managed-by: Helm 522 | app.kubernetes.io/component: admission-webhook 523 | spec: 524 | containers: 525 | - name: create 526 | image: jettech/kube-webhook-certgen:v1.2.0 527 | imagePullPolicy: IfNotPresent 528 | args: 529 | - create 530 | - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.ingress-nginx.svc 531 | - --namespace=ingress-nginx 532 | - --secret-name=ingress-nginx-admission 533 | restartPolicy: OnFailure 534 | serviceAccountName: ingress-nginx-admission 535 | securityContext: 536 | runAsNonRoot: true 537 | runAsUser: 2000 538 | --- 539 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml 540 | apiVersion: batch/v1 541 | kind: Job 542 | metadata: 543 | name: ingress-nginx-admission-patch 544 | annotations: 545 | helm.sh/hook: post-install,post-upgrade 546 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 547 | labels: 548 | helm.sh/chart: ingress-nginx-2.1.0 549 | app.kubernetes.io/name: ingress-nginx 550 | app.kubernetes.io/instance: ingress-nginx 551 | app.kubernetes.io/version: 0.32.0 552 | app.kubernetes.io/managed-by: Helm 553 | app.kubernetes.io/component: admission-webhook 554 | namespace: ingress-nginx 555 | spec: 556 | template: 557 | metadata: 558 | name: ingress-nginx-admission-patch 559 | labels: 560 | helm.sh/chart: ingress-nginx-2.1.0 561 | app.kubernetes.io/name: ingress-nginx 562 | app.kubernetes.io/instance: ingress-nginx 563 | app.kubernetes.io/version: 0.32.0 564 | app.kubernetes.io/managed-by: Helm 565 | app.kubernetes.io/component: admission-webhook 566 | spec: 567 | containers: 568 | - name: patch 569 | image: jettech/kube-webhook-certgen:v1.2.0 570 | imagePullPolicy: IfNotPresent 571 | args: 572 | - patch 573 | - --webhook-name=ingress-nginx-admission 574 | - --namespace=ingress-nginx 575 | - --patch-mutating=false 576 | - --secret-name=ingress-nginx-admission 577 | - --patch-failure-policy=Fail 578 | restartPolicy: OnFailure 579 | serviceAccountName: ingress-nginx-admission 580 | securityContext: 581 | runAsNonRoot: true 582 | runAsUser: 2000 583 | --- 584 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml 585 | apiVersion: rbac.authorization.k8s.io/v1 586 | kind: Role 587 | metadata: 588 | name: ingress-nginx-admission 589 | annotations: 590 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade 591 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 592 | labels: 593 | helm.sh/chart: ingress-nginx-2.1.0 594 | app.kubernetes.io/name: ingress-nginx 595 | app.kubernetes.io/instance: ingress-nginx 596 | app.kubernetes.io/version: 0.32.0 597 | app.kubernetes.io/managed-by: Helm 598 | app.kubernetes.io/component: admission-webhook 599 | namespace: ingress-nginx 600 | rules: 601 | - apiGroups: 602 | - '' 603 | resources: 604 | - secrets 605 | verbs: 606 | - get 607 | - create 608 | --- 609 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml 610 | apiVersion: rbac.authorization.k8s.io/v1 611 | kind: RoleBinding 612 | metadata: 613 | name: ingress-nginx-admission 614 | annotations: 615 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade 616 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 617 | labels: 618 | helm.sh/chart: ingress-nginx-2.1.0 619 | app.kubernetes.io/name: ingress-nginx 620 | app.kubernetes.io/instance: ingress-nginx 621 | app.kubernetes.io/version: 0.32.0 622 | app.kubernetes.io/managed-by: Helm 623 | app.kubernetes.io/component: admission-webhook 624 | namespace: ingress-nginx 625 | roleRef: 626 | apiGroup: rbac.authorization.k8s.io 627 | kind: Role 628 | name: ingress-nginx-admission 629 | subjects: 630 | - kind: ServiceAccount 631 | name: ingress-nginx-admission 632 | namespace: ingress-nginx 633 | --- 634 | # Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml 635 | apiVersion: v1 636 | kind: ServiceAccount 637 | metadata: 638 | name: ingress-nginx-admission 639 | annotations: 640 | helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade 641 | helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded 642 | labels: 643 | helm.sh/chart: ingress-nginx-2.1.0 644 | app.kubernetes.io/name: ingress-nginx 645 | app.kubernetes.io/instance: ingress-nginx 646 | app.kubernetes.io/version: 0.32.0 647 | app.kubernetes.io/managed-by: Helm 648 | app.kubernetes.io/component: admission-webhook 649 | namespace: ingress-nginx 650 | -------------------------------------------------------------------------------- /base/ops/ingress-nginx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ingress-nginx.yaml 6 | 7 | patchesStrategicMerge: 8 | - nginx-patch.yaml 9 | -------------------------------------------------------------------------------- /base/ops/ingress-nginx/nginx-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-nginx-controller 5 | namespace: ingress-nginx 6 | spec: 7 | externalTrafficPolicy: Cluster -------------------------------------------------------------------------------- /base/ops/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - monitoring/ 6 | - logging/ 7 | - metacontroller/ 8 | - cert-manager/ 9 | - ingress-nginx/ 10 | - dashboard/ 11 | - loadbalancer/ 12 | - reflector/ 13 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/haproxy-configmap.yaml: -------------------------------------------------------------------------------- 1 | # This ConfigMaps defines the configuration of HAProxy. It enables 2 | # 1. sticky tables using URL parameter room for load-balancing (backend jitsi-meet) 3 | # 2. DNS resolving of web containers for all shards (resolvers kube-dns) 4 | # 3. peering between the HAProxy instances using the environment variables exported at start of container 5 | # (see command of haproxy pod) (peers mypeers) 6 | # 4. exporting of metrics usable by Prometheus and reachable stats frontend on port 9090 (frontend stats) 7 | kind: ConfigMap 8 | apiVersion: v1 9 | metadata: 10 | namespace: jitsi 11 | name: haproxy-config 12 | data: 13 | haproxy.cfg: | 14 | global 15 | # log to stdout 16 | log stdout format raw local0 info 17 | # enable stats socket for dynamic configuration and status retrieval 18 | stats socket ipv4@127.0.0.1:9999 level admin 19 | stats socket /var/run/hapee-lb.sock mode 666 level admin 20 | stats timeout 2m 21 | 22 | defaults 23 | log global 24 | option httplog 25 | retries 3 26 | maxconn 2000 27 | timeout connect 5s 28 | timeout client 50s 29 | timeout server 50s 30 | 31 | resolvers kube-dns 32 | # kubernetes DNS is defined in resolv.conf 33 | parse-resolv-conf 34 | hold valid 10s 35 | 36 | frontend http_in 37 | bind *:80 38 | mode http 39 | option forwardfor 40 | option http-keep-alive 41 | default_backend jitsi-meet 42 | 43 | # expose statistics in Prometheus format 44 | frontend stats 45 | mode http 46 | bind *:9090 47 | option http-use-htx 48 | http-request use-service prometheus-exporter if { path /metrics } 49 | stats enable 50 | stats uri /stats 51 | stats refresh 10s 52 | 53 | peers mypeers 54 | log stdout format raw local0 info 55 | peer "${HOSTNAME}" "${MY_POD_IP}:1024" 56 | peer "${OTHER_HOSTNAME}" "${OTHER_IP}:1024" 57 | 58 | backend jitsi-meet 59 | balance roundrobin 60 | mode http 61 | option forwardfor 62 | http-reuse safe 63 | http-request set-header Room %[urlp(room)] 64 | acl room_found urlp(room) -m found 65 | stick-table type string len 128 size 2k expire 1d peers mypeers 66 | stick on hdr(Room) if room_found 67 | # _http._tcp.web.jitsi.svc.cluster.local:80 is a SRV DNS record 68 | # A records don't work here because their order might change between calls and would result in different 69 | # shard IDs for each peered HAproxy 70 | server-template shard 0-5 _http._tcp.web.jitsi.svc.cluster.local:80 check resolvers kube-dns init-addr none 71 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/haproxy-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: haproxy-ingress 5 | namespace: jitsi 6 | spec: 7 | tls: 8 | - hosts: 9 | # fill in host here 10 | - jitsi.domainname 11 | secretName: jitsi-messenger-schule-tls 12 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/haproxy-service.yaml: -------------------------------------------------------------------------------- 1 | # this service is the entrypoint for the ingress 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: haproxy 6 | namespace: jitsi 7 | spec: 8 | selector: 9 | k8s-app: haproxy 10 | ports: 11 | - name: "http" 12 | port: 80 13 | type: ClusterIP 14 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/haproxy-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: haproxy 5 | namespace: jitsi 6 | labels: 7 | k8s-app: haproxy 8 | spec: 9 | replicas: 2 # use two replicas for high availability 10 | serviceName: haproxy 11 | selector: 12 | matchLabels: 13 | k8s-app: haproxy 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: haproxy 18 | spec: 19 | affinity: 20 | # HAProxy pods should run in different availability zones 21 | podAntiAffinity: 22 | requiredDuringSchedulingIgnoredDuringExecution: 23 | - labelSelector: 24 | matchExpressions: 25 | - key: k8s-app 26 | operator: In 27 | values: 28 | - haproxy 29 | topologyKey: "topology.kubernetes.io/zone" 30 | volumes: 31 | - name: haproxy-config 32 | configMap: 33 | name: haproxy-config 34 | items: 35 | - key: haproxy.cfg 36 | path: haproxy.cfg 37 | containers: 38 | - name: haproxy 39 | image: haproxy:2.1 40 | # enable peering between HAProxy-pods 41 | # look up ip address of other HAProxy instance and export it 42 | # those variables are then used in haproxy.cfg defined in ./haproxy-configmap.yaml 43 | command: ["bash", "-c"] 44 | args: 45 | - >- 46 | apt-get update && apt-get install -y dnsutils; 47 | [[ $HOSTNAME = 'haproxy-1' ]] && 48 | export OTHER_HOSTNAME=haproxy-0 OTHER_IP=$(nslookup haproxy-0 | awk 'NR==5 {print $2}') || 49 | export OTHER_HOSTNAME=haproxy-1 OTHER_IP=$(nslookup haproxy-1 | awk 'NR==5 {print $2}'); 50 | exec /docker-entrypoint.sh haproxy -f /usr/local/etc/haproxy/haproxy.cfg 51 | env: 52 | - name: MY_POD_IP 53 | valueFrom: 54 | fieldRef: 55 | fieldPath: status.podIP 56 | ports: 57 | - name: http 58 | containerPort: 80 59 | - name: metrics 60 | containerPort: 9090 61 | - name: peering 62 | containerPort: 1024 63 | volumeMounts: 64 | - mountPath: /usr/local/etc/haproxy/haproxy.cfg 65 | name: haproxy-config 66 | subPath: haproxy.cfg 67 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/haproxy0-service.yaml: -------------------------------------------------------------------------------- 1 | # service for peering of HAProxies (necessary to address haproxy-0 alone) 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: haproxy-0 6 | namespace: jitsi 7 | spec: 8 | selector: 9 | statefulset.kubernetes.io/pod-name: haproxy-0 10 | ports: 11 | - name: "peering" 12 | port: 1024 13 | type: ClusterIP 14 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/haproxy1-service.yaml: -------------------------------------------------------------------------------- 1 | # service for peering of HAProxies (necessary to address haproxy-1 alone) 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: haproxy-1 6 | namespace: jitsi 7 | spec: 8 | selector: 9 | statefulset.kubernetes.io/pod-name: haproxy-1 10 | ports: 11 | - name: "peering" 12 | port: 1024 13 | type: ClusterIP 14 | -------------------------------------------------------------------------------- /base/ops/loadbalancer/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - haproxy-configmap.yaml 6 | - haproxy-ingress.yaml 7 | - haproxy-service.yaml 8 | - haproxy-statefulset.yaml 9 | - haproxy0-service.yaml 10 | - haproxy1-service.yaml 11 | -------------------------------------------------------------------------------- /base/ops/logging/es-realm-secret.yaml: -------------------------------------------------------------------------------- 1 | # File realm in ES format (from the CLI or manually assembled) 2 | kind: Secret 3 | apiVersion: v1 4 | metadata: 5 | name: es-filerealm-secret 6 | namespace: logging 7 | annotations: 8 | reflector.v1.k8s.emberstack.com/reflection-allowed: "true" 9 | reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "kube-system" 10 | stringData: 11 | users: : 12 | users_roles: superuser: 13 | -------------------------------------------------------------------------------- /base/ops/logging/fluentd-daemonset-elasticsearch-rbac.yaml: -------------------------------------------------------------------------------- 1 | # Source: v1 https://raw.githubusercontent.com/fluent/fluentd-kubernetes-daemonset/79f63a80a67c388d9a5e601e2525ab052546866c/fluentd-daemonset-elasticsearch-rbac.yaml 2 | 3 | --- 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: fluentd 8 | namespace: kube-system 9 | 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1beta1 12 | kind: ClusterRole 13 | metadata: 14 | name: fluentd 15 | namespace: kube-system 16 | rules: 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - pods 21 | - namespaces 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | 27 | --- 28 | kind: ClusterRoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1beta1 30 | metadata: 31 | name: fluentd 32 | roleRef: 33 | kind: ClusterRole 34 | name: fluentd 35 | apiGroup: rbac.authorization.k8s.io 36 | subjects: 37 | - kind: ServiceAccount 38 | name: fluentd 39 | namespace: kube-system 40 | --- 41 | apiVersion: apps/v1 42 | kind: DaemonSet 43 | metadata: 44 | name: fluentd 45 | namespace: kube-system 46 | labels: 47 | k8s-app: fluentd-logging 48 | version: v1 49 | spec: 50 | selector: 51 | matchLabels: 52 | k8s-app: fluentd-logging 53 | version: v1 54 | template: 55 | metadata: 56 | labels: 57 | k8s-app: fluentd-logging 58 | version: v1 59 | spec: 60 | serviceAccount: fluentd 61 | serviceAccountName: fluentd 62 | tolerations: 63 | - key: node-role.kubernetes.io/master 64 | effect: NoSchedule 65 | containers: 66 | - name: fluentd 67 | image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch 68 | env: 69 | - name: FLUENT_ELASTICSEARCH_HOST 70 | value: "elasticsearch-logging" 71 | - name: FLUENT_ELASTICSEARCH_PORT 72 | value: "9200" 73 | - name: FLUENT_ELASTICSEARCH_SCHEME 74 | value: "http" 75 | # Option to configure elasticsearch plugin with self signed certs 76 | # ================================================================ 77 | - name: FLUENT_ELASTICSEARCH_SSL_VERIFY 78 | value: "true" 79 | # Option to configure elasticsearch plugin with tls 80 | # ================================================================ 81 | - name: FLUENT_ELASTICSEARCH_SSL_VERSION 82 | value: "TLSv1_2" 83 | # X-Pack Authentication 84 | # ===================== 85 | - name: FLUENT_ELASTICSEARCH_USER 86 | value: "elastic" 87 | - name: FLUENT_ELASTICSEARCH_PASSWORD 88 | value: "changeme" 89 | resources: 90 | limits: 91 | memory: 200Mi 92 | requests: 93 | cpu: 100m 94 | memory: 200Mi 95 | volumeMounts: 96 | - name: varlog 97 | mountPath: /var/log 98 | - name: varlibdockercontainers 99 | mountPath: /var/lib/docker/containers 100 | readOnly: true 101 | terminationGracePeriodSeconds: 30 102 | volumes: 103 | - name: varlog 104 | hostPath: 105 | path: /var/log 106 | - name: varlibdockercontainers 107 | hostPath: 108 | path: /var/lib/docker/containers 109 | -------------------------------------------------------------------------------- /base/ops/logging/fluentd-daemonset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd 5 | namespace: kube-system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: fluentd 11 | env: 12 | - name: FLUENT_ELASTICSEARCH_HOST 13 | value: "elasticsearch-es-http.logging.svc.cluster.local" 14 | - name: FLUENT_ELASTICSEARCH_PORT 15 | value: "9200" 16 | - name: FLUENT_ELASTICSEARCH_SCHEME 17 | value: "https" 18 | - name: FLUENT_ELASTICSEARCH_SSL_VERIFY 19 | value: "false" # not necessary within cluster 20 | # delete fluentd user 21 | - $patch: delete 22 | name: FLUENT_ELASTICSEARCH_USER 23 | # delete fluentd password 24 | - $patch: delete 25 | name: FLUENT_ELASTICSEARCH_PASSWORD 26 | # this secret is of form username:password and will be split by the startup script 27 | - name: FLUENT_ELASTICSEARCH_USER_PASSWORD 28 | valueFrom: 29 | secretKeyRef: 30 | name: elasticsearch-user-fluentd 31 | key: users 32 | command: ["bash", "-c"] 33 | args: 34 | - >- 35 | export FLUENT_ELASTICSEARCH_USER=${FLUENT_ELASTICSEARCH_USER_PASSWORD%%:*}; 36 | export FLUENT_ELASTICSEARCH_PASSWORD=${FLUENT_ELASTICSEARCH_USER_PASSWORD##*:}; 37 | exec tini -- /fluentd/entrypoint.sh 38 | -------------------------------------------------------------------------------- /base/ops/logging/kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kibana.k8s.elastic.co/v1 2 | kind: Kibana 3 | metadata: 4 | name: kibana 5 | namespace: logging 6 | spec: 7 | version: 7.7.0 8 | count: 1 9 | elasticsearchRef: 10 | name: elasticsearch 11 | podTemplate: 12 | spec: 13 | containers: 14 | - name: kibana 15 | resources: 16 | requests: 17 | memory: 1500Mi 18 | cpu: 1 19 | limits: 20 | memory: 1500Mi 21 | cpu: 1 22 | -------------------------------------------------------------------------------- /base/ops/logging/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - logging-namespace.yaml 6 | - eck-crd.yaml 7 | - kibana.yaml 8 | - fluentd-daemonset-elasticsearch-rbac.yaml 9 | - es-realm-secret.yaml 10 | - secret-fluentd-user.yaml 11 | 12 | patchesStrategicMerge: 13 | - fluentd-daemonset-patch.yaml -------------------------------------------------------------------------------- /base/ops/logging/logging-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: logging 5 | -------------------------------------------------------------------------------- /base/ops/logging/secret-fluentd-user.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: elasticsearch-user-fluentd 5 | namespace: kube-system 6 | annotations: 7 | reflector.v1.k8s.emberstack.com/reflects: "logging/es-filerealm-secret" -------------------------------------------------------------------------------- /base/ops/metacontroller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - https://github.com/GoogleCloudPlatform/metacontroller?ref=v0.4.0 6 | - metacontroller-namespace.yaml 7 | - service-per-pod-configmap.yaml 8 | - service-per-pod-deployment.yaml 9 | -------------------------------------------------------------------------------- /base/ops/metacontroller/metacontroller-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: metacontroller 5 | -------------------------------------------------------------------------------- /base/ops/metacontroller/service-per-pod-configmap.yaml: -------------------------------------------------------------------------------- 1 | # This ConfigMap defines the automation of creating NodePorts for every emerging JVB pod to expose it to the internet. 2 | kind: ConfigMap 3 | apiVersion: v1 4 | metadata: 5 | name: service-per-pod-hooks 6 | namespace: metacontroller 7 | data: 8 | finalize-service-per-pod.jsonnet: | 9 | function(request) { 10 | // If the StatefulSet is updated to no longer match our decorator selector, 11 | // or if the StatefulSet is deleted, clean up any attachments we made. 12 | attachments: [], 13 | // Mark as finalized once we observe all Services are gone. 14 | finalized: std.length(request.attachments['Service.v1']) == 0 15 | } 16 | sync-pod-name-label.jsonnet: | 17 | function(request) { 18 | local pod = request.object, 19 | local labelKey = pod.metadata.annotations["pod-name-label"], 20 | 21 | // Inject the Pod name as a label with the key requested in the annotation. 22 | labels: { 23 | [labelKey]: pod.metadata.name 24 | } 25 | } 26 | sync-service-per-pod.jsonnet: | 27 | function(request) { 28 | local statefulset = request.object, 29 | local labelKey = statefulset.metadata.annotations["service-per-pod-label"], 30 | 31 | // the base port for is collected from the container setup 32 | local basePort = std.parseInt([ 33 | a for a in [ 34 | c for c in statefulset.spec.template.spec.containers 35 | if c.name == 'jvb' 36 | ][0].args 37 | if std.startsWith(a, '3') && std.length(a) == 5][0]), 38 | 39 | // create a service for each pod, with a selector on the given label key 40 | attachments: [ 41 | { 42 | apiVersion: "v1", 43 | kind: "Service", 44 | metadata: { 45 | name: statefulset.metadata.name + "-" + index, 46 | labels: {app: "service-per-pod"} 47 | }, 48 | spec: { 49 | selector: { 50 | [labelKey]: statefulset.metadata.name + "-" + index 51 | }, 52 | type: "NodePort", 53 | externalTrafficPolicy: "Local", 54 | ports: [ 55 | { 56 | "port": basePort + index, 57 | "protocol": "UDP", 58 | "targetPort": basePort + index, 59 | "nodePort": basePort + index 60 | } 61 | ] 62 | } 63 | } 64 | for index in std.range(0, statefulset.spec.replicas - 1) 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /base/ops/metacontroller/service-per-pod-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: service-per-pod 5 | namespace: metacontroller 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: service-per-pod 11 | template: 12 | metadata: 13 | labels: 14 | app: service-per-pod 15 | spec: 16 | containers: 17 | - name: hooks 18 | image: metacontroller/jsonnetd:0.1 19 | imagePullPolicy: Always 20 | workingDir: /hooks 21 | volumeMounts: 22 | - name: hooks 23 | mountPath: /hooks 24 | volumes: 25 | - name: hooks 26 | configMap: 27 | name: service-per-pod-hooks 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: service-per-pod 33 | namespace: metacontroller 34 | spec: 35 | selector: 36 | app: service-per-pod 37 | ports: 38 | - port: 80 39 | targetPort: 8080 -------------------------------------------------------------------------------- /base/ops/monitoring/bbb-exporter-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | # scrapes BBB-exporter statistics 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: bbb-exporter-service-monitor 6 | namespace: monitoring 7 | spec: 8 | selector: 9 | matchLabels: 10 | k8s-app: bbb-metrics 11 | namespaceSelector: 12 | any: false 13 | matchNames: 14 | - monitoring 15 | endpoints: 16 | - basicAuth: 17 | password: 18 | name: bbb-basic-auth 19 | key: password 20 | username: 21 | name: bbb-basic-auth 22 | key: username 23 | path: /metrics 24 | interval: 30s 25 | honorLabels: true 26 | port: bbb-metrics 27 | scheme: https 28 | tlsConfig: 29 | insecureSkipVerify: true 30 | -------------------------------------------------------------------------------- /base/ops/monitoring/bbb-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | # scrapes statistics of BBB servers 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: bbb-service-monitor 6 | namespace: monitoring 7 | spec: 8 | selector: 9 | matchLabels: 10 | k8s-app: bbb-metrics 11 | namespaceSelector: 12 | any: false 13 | matchNames: 14 | - monitoring 15 | endpoints: 16 | - basicAuth: 17 | password: 18 | name: bbb-basic-auth 19 | key: password 20 | username: 21 | name: bbb-basic-auth 22 | key: username 23 | path: /metrics 24 | interval: 30s 25 | honorLabels: true 26 | port: ne-metrics 27 | scheme: https 28 | tlsConfig: 29 | insecureSkipVerify: true 30 | -------------------------------------------------------------------------------- /base/ops/monitoring/bbb-service.yaml: -------------------------------------------------------------------------------- 1 | # used to scrape statistics of affiliated BigBlueButton project 2 | # BBB endpoints are defined in overlays 3 | kind: Service 4 | apiVersion: v1 5 | metadata: 6 | name: bbb 7 | namespace: monitoring 8 | labels: 9 | k8s-app: bbb-metrics 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: ne-metrics 14 | port: 9100 15 | - name: bbb-metrics 16 | port: 9688 17 | -------------------------------------------------------------------------------- /base/ops/monitoring/custom-metrics-apiservice.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta2.custom.metrics.k8s.io 5 | spec: 6 | service: 7 | name: prometheus-adapter 8 | namespace: monitoring 9 | group: custom.metrics.k8s.io 10 | version: v1beta2 11 | insecureSkipTLSVerify: true 12 | groupPriorityMinimum: 100 13 | versionPriority: 200 -------------------------------------------------------------------------------- /base/ops/monitoring/grafana-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | template: 10 | spec: 11 | volumes: 12 | # use persistent storage (e.g. for storing users) instead of in-memory storage 13 | - $patch: delete 14 | name: grafana-storage 15 | - name: grafana-storage 16 | persistentVolumeClaim: 17 | claimName: grafana-storage 18 | - configMap: 19 | name: grafana-dashboard-jitsi 20 | name: grafana-dashboard-jitsi 21 | - configMap: 22 | name: bbb-dashboards 23 | name: bbb-dashboards 24 | containers: 25 | - name: grafana 26 | env: 27 | # configure Grafana to serve from JITSI_DOMAIN/grafana 28 | - name: GF_SERVER_ROOT_URL 29 | value: "%(protocol)s://%(domain)s:%(http_port)s/grafana/" 30 | - name: GF_SERVER_SERVE_FROM_SUB_PATH 31 | value: "true" 32 | # mount Jitsi Grafana dashboard, such that it is available after first start of pod 33 | volumeMounts: 34 | - name: grafana-dashboard-jitsi 35 | mountPath: /grafana-dashboard-definitions/0/jitsi 36 | readOnly: false 37 | - name: bbb-dashboards 38 | mountPath: /grafana-dashboard-definitions/0/bbb 39 | readOnly: false 40 | resources: 41 | limits: 42 | cpu: 600m 43 | memory: 600Mi 44 | requests: 45 | cpu: 600m 46 | memory: 600Mi 47 | securityContext: 48 | fsGroup: 427 49 | -------------------------------------------------------------------------------- /base/ops/monitoring/grafana-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: grafana-ingress 5 | namespace: monitoring 6 | annotations: 7 | # necessary to create more complex ingress rules 8 | nginx.ingress.kubernetes.io/use-regex: "true" -------------------------------------------------------------------------------- /base/ops/monitoring/grafana-pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | namespace: monitoring 5 | name: grafana-storage 6 | spec: 7 | storageClassName: ionos-enterprise-hdd 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | -------------------------------------------------------------------------------- /base/ops/monitoring/haproxy-pod-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: haproxy-pod-monitor 5 | namespace: jitsi 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: haproxy 10 | namespaceSelector: 11 | any: false 12 | matchNames: 13 | - jitsi 14 | podMetricsEndpoints: 15 | - port: metrics 16 | -------------------------------------------------------------------------------- /base/ops/monitoring/jvb-pod-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: jvb-pod-monitor 5 | namespace: jitsi 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: jvb 10 | namespaceSelector: 11 | any: false 12 | matchNames: 13 | - jitsi 14 | podMetricsEndpoints: 15 | - port: metrics 16 | -------------------------------------------------------------------------------- /base/ops/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - https://github.com/coreos/kube-prometheus?ref=release-0.7 6 | - custom-metrics-apiservice.yaml 7 | - metrics-server.yaml 8 | - grafana-ingress.yaml 9 | - grafana-pvc.yaml 10 | - jitsi-dashboard-configmap.yaml 11 | - bbb-dashboards-configmap.yaml 12 | - prometheus-roleBindingSpecificNamespaces.yaml 13 | - prometheus-roleSpecificNamespaces.yaml 14 | - jvb-pod-monitor.yaml 15 | - prosody-pod-monitor.yaml 16 | - bbb-service.yaml 17 | - bbb-service-monitor.yaml 18 | - bbb-exporter-service-monitor.yaml 19 | - turn-service.yaml 20 | - turn-service-monitor.yaml 21 | - haproxy-pod-monitor.yaml 22 | 23 | patchesStrategicMerge: 24 | - metrics-server-patch.yaml 25 | - grafana-deployment-patch.yaml 26 | - prometheus-adapter-config-map-patch.yaml 27 | -------------------------------------------------------------------------------- /base/ops/monitoring/metrics-server-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: metrics-server 5 | namespace: kube-system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: metrics-server 11 | args: 12 | - --cert-dir=/tmp 13 | - --secure-port=4443 14 | # see https://github.com/kubernetes-sigs/metrics-server/blob/v0.3.6/README.md#flags 15 | - --kubelet-insecure-tls 16 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 17 | -------------------------------------------------------------------------------- /base/ops/monitoring/metrics-server.yaml: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.6/components.yaml 2 | # Changes: 3 | # - removed ClusterRole system:aggregated-metrics-reader because it is defined by kube-prometheus 4 | --- 5 | apiVersion: rbac.authorization.k8s.io/v1 6 | kind: ClusterRoleBinding 7 | metadata: 8 | name: metrics-server:system:auth-delegator 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: system:auth-delegator 13 | subjects: 14 | - kind: ServiceAccount 15 | name: metrics-server 16 | namespace: kube-system 17 | --- 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: RoleBinding 20 | metadata: 21 | name: metrics-server-auth-reader 22 | namespace: kube-system 23 | roleRef: 24 | apiGroup: rbac.authorization.k8s.io 25 | kind: Role 26 | name: extension-apiserver-authentication-reader 27 | subjects: 28 | - kind: ServiceAccount 29 | name: metrics-server 30 | namespace: kube-system 31 | --- 32 | apiVersion: apiregistration.k8s.io/v1beta1 33 | kind: APIService 34 | metadata: 35 | name: v1beta1.metrics.k8s.io 36 | spec: 37 | service: 38 | name: metrics-server 39 | namespace: kube-system 40 | group: metrics.k8s.io 41 | version: v1beta1 42 | insecureSkipTLSVerify: true 43 | groupPriorityMinimum: 100 44 | versionPriority: 100 45 | --- 46 | apiVersion: v1 47 | kind: ServiceAccount 48 | metadata: 49 | name: metrics-server 50 | namespace: kube-system 51 | --- 52 | apiVersion: apps/v1 53 | kind: Deployment 54 | metadata: 55 | name: metrics-server 56 | namespace: kube-system 57 | labels: 58 | k8s-app: metrics-server 59 | spec: 60 | selector: 61 | matchLabels: 62 | k8s-app: metrics-server 63 | template: 64 | metadata: 65 | name: metrics-server 66 | labels: 67 | k8s-app: metrics-server 68 | spec: 69 | serviceAccountName: metrics-server 70 | volumes: 71 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 72 | - name: tmp-dir 73 | emptyDir: {} 74 | containers: 75 | - name: metrics-server 76 | image: k8s.gcr.io/metrics-server-amd64:v0.3.6 77 | imagePullPolicy: IfNotPresent 78 | args: 79 | - --cert-dir=/tmp 80 | - --secure-port=4443 81 | ports: 82 | - name: main-port 83 | containerPort: 4443 84 | protocol: TCP 85 | securityContext: 86 | readOnlyRootFilesystem: true 87 | runAsNonRoot: true 88 | runAsUser: 1000 89 | volumeMounts: 90 | - name: tmp-dir 91 | mountPath: /tmp 92 | nodeSelector: 93 | kubernetes.io/os: linux 94 | kubernetes.io/arch: "amd64" 95 | --- 96 | apiVersion: v1 97 | kind: Service 98 | metadata: 99 | name: metrics-server 100 | namespace: kube-system 101 | labels: 102 | kubernetes.io/name: "Metrics-server" 103 | kubernetes.io/cluster-service: "true" 104 | spec: 105 | selector: 106 | k8s-app: metrics-server 107 | ports: 108 | - port: 443 109 | protocol: TCP 110 | targetPort: main-port 111 | --- 112 | apiVersion: rbac.authorization.k8s.io/v1 113 | kind: ClusterRole 114 | metadata: 115 | name: system:metrics-server 116 | rules: 117 | - apiGroups: 118 | - "" 119 | resources: 120 | - pods 121 | - nodes 122 | - nodes/stats 123 | - namespaces 124 | - configmaps 125 | verbs: 126 | - get 127 | - list 128 | - watch 129 | --- 130 | apiVersion: rbac.authorization.k8s.io/v1 131 | kind: ClusterRoleBinding 132 | metadata: 133 | name: system:metrics-server 134 | roleRef: 135 | apiGroup: rbac.authorization.k8s.io 136 | kind: ClusterRole 137 | name: system:metrics-server 138 | subjects: 139 | - kind: ServiceAccount 140 | name: metrics-server 141 | namespace: kube-system 142 | -------------------------------------------------------------------------------- /base/ops/monitoring/prometheus-adapter-config-map-patch.yaml: -------------------------------------------------------------------------------- 1 | # expose metrics collected by Prometheus to the metric server 2 | # metrics can then be used by HorizontalPodAutoscalers (e.g. for autoscaling JVBs) 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: adapter-config 7 | namespace: monitoring 8 | data: 9 | config.yaml: |- 10 | rules: 11 | - seriesQuery: 'container_network_transmit_bytes_total{interface="eth0"}' 12 | resources: 13 | overrides: 14 | namespace: {resource: "namespace"} 15 | pod: {resource: "pod"} 16 | name: 17 | matches: "^(.*)_total" 18 | as: "${1}_per_second" 19 | metricsQuery: 'sum(rate(<<.Series>>{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>)' 20 | resourceRules: 21 | cpu: 22 | containerLabel: container 23 | containerQuery: 'sum(irate(container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}[5m])) by (<<.GroupBy>>)' 24 | nodeQuery: 'sum(1 - irate(node_cpu_seconds_total{mode="idle"}[5m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)' 25 | resources: 26 | overrides: 27 | namespace: 28 | resource: namespace 29 | node: 30 | resource: node 31 | pod: 32 | resource: pod 33 | memory: 34 | containerLabel: container 35 | containerQuery: 'sum(container_memory_working_set_bytes{<<.LabelMatchers>>,container!="POD",container!="",pod!=""}) by (<<.GroupBy>>)' 36 | nodeQuery: 'sum(node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} - node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>}) by (<<.GroupBy>>)' 37 | resources: 38 | overrides: 39 | instance: 40 | resource: node 41 | namespace: 42 | resource: namespace 43 | pod": 44 | resource: pod 45 | window: 5m 46 | -------------------------------------------------------------------------------- /base/ops/monitoring/prometheus-roleBindingSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | # necessary to allow Prometheus to monitor objects in namespace jitsi 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: prometheus-k8s 6 | namespace: jitsi 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: Role 10 | name: prometheus-k8s 11 | subjects: 12 | - kind: ServiceAccount 13 | name: prometheus-k8s 14 | namespace: monitoring 15 | -------------------------------------------------------------------------------- /base/ops/monitoring/prometheus-roleSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | # necessary to allow Prometheus to monitor objects in namespace jitsi 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: prometheus-k8s 6 | namespace: jitsi 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - services 12 | - endpoints 13 | - pods 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | -------------------------------------------------------------------------------- /base/ops/monitoring/prosody-pod-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: prosody-pod-monitor 5 | namespace: jitsi 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: prosody 10 | namespaceSelector: 11 | any: false 12 | matchNames: 13 | - jitsi 14 | podMetricsEndpoints: 15 | - port: metrics -------------------------------------------------------------------------------- /base/ops/monitoring/turn-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | # scrapes statistics of BBB servers 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: turn-service-monitor 6 | namespace: monitoring 7 | spec: 8 | selector: 9 | matchLabels: 10 | k8s-app: turn-metrics 11 | namespaceSelector: 12 | any: false 13 | matchNames: 14 | - monitoring 15 | endpoints: 16 | - basicAuth: 17 | password: 18 | name: bbb-basic-auth 19 | key: password 20 | username: 21 | name: bbb-basic-auth 22 | key: username 23 | path: /metrics 24 | interval: 30s 25 | honorLabels: true 26 | port: metrics 27 | scheme: https 28 | tlsConfig: 29 | insecureSkipVerify: true 30 | -------------------------------------------------------------------------------- /base/ops/monitoring/turn-service.yaml: -------------------------------------------------------------------------------- 1 | # used to scrape statistics of affiliated BigBlueButton project 2 | # BBB endpoints are defined in overlays 3 | kind: Service 4 | apiVersion: v1 5 | metadata: 6 | name: turn 7 | namespace: monitoring 8 | labels: 9 | k8s-app: turn-metrics 10 | spec: 11 | clusterIP: None 12 | ports: 13 | - name: metrics 14 | port: 9100 15 | -------------------------------------------------------------------------------- /base/ops/reflector/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - reflector.yaml -------------------------------------------------------------------------------- /base/ops/reflector/reflector.yaml: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/emberstack/kubernetes-reflector/releases/download/v5.0.10/reflector.yaml 2 | # The certificate defined in namespace `jitsi` results in a secret in namespace `jitsi`. 3 | # The reflector allows the labeling of the resulting secret. This label is used by the reflector to 4 | # copy this secret to namespace `monitoring`, where it is used by the Grafana ingress. 5 | --- 6 | # Source: reflector/templates/serviceaccount.yaml 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | name: reflector 11 | namespace: kube-system 12 | labels: 13 | helm.sh/chart: reflector-5.0.10 14 | app.kubernetes.io/name: reflector 15 | app.kubernetes.io/instance: reflector 16 | app.kubernetes.io/version: "5.0.10" 17 | app.kubernetes.io/managed-by: Helm 18 | --- 19 | # Source: reflector/templates/clusterRole.yaml 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRole 22 | metadata: 23 | name: reflector 24 | labels: 25 | helm.sh/chart: reflector-5.0.10 26 | app.kubernetes.io/name: reflector 27 | app.kubernetes.io/instance: reflector 28 | app.kubernetes.io/version: "5.0.10" 29 | app.kubernetes.io/managed-by: Helm 30 | rules: 31 | - apiGroups: [""] 32 | resources: ["configmaps", "secrets"] 33 | verbs: ["*"] 34 | - apiGroups: [""] 35 | resources: ["namespaces"] 36 | verbs: ["watch", "list"] 37 | - apiGroups: ["apiextensions.k8s.io"] 38 | resources: ["customresourcedefinitions"] 39 | verbs: ["watch", "list"] 40 | - apiGroups: ["certmanager.k8s.io"] 41 | resources: ["certificates", "certificates/finalizers"] 42 | verbs: ["watch", "list"] 43 | - apiGroups: ["cert-manager.io"] 44 | resources: ["certificates", "certificates/finalizers"] 45 | verbs: ["watch", "list"] 46 | --- 47 | # Source: reflector/templates/clusterRoleBinding.yaml 48 | apiVersion: rbac.authorization.k8s.io/v1 49 | kind: ClusterRoleBinding 50 | metadata: 51 | name: reflector 52 | labels: 53 | helm.sh/chart: reflector-5.0.10 54 | app.kubernetes.io/name: reflector 55 | app.kubernetes.io/instance: reflector 56 | app.kubernetes.io/version: "5.0.10" 57 | app.kubernetes.io/managed-by: Helm 58 | roleRef: 59 | kind: ClusterRole 60 | name: reflector 61 | apiGroup: rbac.authorization.k8s.io 62 | subjects: 63 | - kind: ServiceAccount 64 | name: reflector 65 | namespace: kube-system 66 | --- 67 | # Source: reflector/templates/deployment.yaml 68 | apiVersion: apps/v1 69 | kind: Deployment 70 | metadata: 71 | name: reflector 72 | namespace: kube-system 73 | labels: 74 | helm.sh/chart: reflector-5.0.10 75 | app.kubernetes.io/name: reflector 76 | app.kubernetes.io/instance: reflector 77 | app.kubernetes.io/version: "5.0.10" 78 | app.kubernetes.io/managed-by: Helm 79 | spec: 80 | replicas: 1 81 | selector: 82 | matchLabels: 83 | app.kubernetes.io/name: reflector 84 | app.kubernetes.io/instance: reflector 85 | template: 86 | metadata: 87 | labels: 88 | app.kubernetes.io/name: reflector 89 | app.kubernetes.io/instance: reflector 90 | spec: 91 | serviceAccountName: reflector 92 | securityContext: 93 | fsGroup: 2000 94 | containers: 95 | - name: reflector 96 | securityContext: 97 | capabilities: 98 | drop: 99 | - ALL 100 | readOnlyRootFilesystem: false 101 | runAsNonRoot: true 102 | runAsUser: 1000 103 | image: "emberstack/kubernetes-reflector:5.0.10" 104 | imagePullPolicy: IfNotPresent 105 | env: 106 | - name: ES_Serilog__MinimumLevel__Default 107 | value: "Information" 108 | ports: 109 | - name: http 110 | containerPort: 25080 111 | protocol: TCP 112 | livenessProbe: 113 | httpGet: 114 | path: /healthz 115 | port: http 116 | initialDelaySeconds: 5 117 | periodSeconds: 10 118 | readinessProbe: 119 | httpGet: 120 | path: /healthz 121 | port: http 122 | initialDelaySeconds: 5 123 | periodSeconds: 10 124 | resources: 125 | {} 126 | -------------------------------------------------------------------------------- /cluster/dev/cluster.tf: -------------------------------------------------------------------------------- 1 | resource "profitbricks_k8s_cluster" "dev" { 2 | name = "jitsi-dev" 3 | k8s_version = "1.17.2" 4 | maintenance_window { 5 | day_of_the_week = "Saturday" 6 | time = "03:49:18Z" 7 | } 8 | } 9 | 10 | resource "profitbricks_k8s_node_pool" "dev_zone_1" { 11 | name = "worker-3cpu-8gb" 12 | k8s_version = "1.17.5" 13 | maintenance_window { 14 | day_of_the_week = "Saturday" 15 | time = "16:09:33Z" 16 | } 17 | datacenter_id = var.datacenter 18 | k8s_cluster_id = profitbricks_k8s_cluster.dev.id 19 | cpu_family = "INTEL_XEON" 20 | availability_zone = "ZONE_1" 21 | storage_type = "HDD" 22 | node_count = 2 23 | cores_count = 3 24 | ram_size = 8192 25 | storage_size = 50 26 | } 27 | 28 | resource "profitbricks_k8s_node_pool" "dev_zone_2" { 29 | name = "worker-3cpu-8gb" 30 | k8s_version = "1.17.5" 31 | maintenance_window { 32 | day_of_the_week = "Wednesday" 33 | time = "23:30:49Z" 34 | } 35 | datacenter_id = var.datacenter 36 | k8s_cluster_id = profitbricks_k8s_cluster.dev.id 37 | cpu_family = "INTEL_XEON" 38 | availability_zone = "ZONE_2" 39 | storage_type = "HDD" 40 | node_count = 2 41 | cores_count = 3 42 | ram_size = 8192 43 | storage_size = 50 44 | } 45 | -------------------------------------------------------------------------------- /cluster/dev/variables.tf: -------------------------------------------------------------------------------- 1 | variable "datacenter" { 2 | description = "Video Data Center" 3 | type = string 4 | default = "df8bd51c-298f-4981-8df8-6fd07340b397" 5 | } 6 | -------------------------------------------------------------------------------- /cluster/prod/cluster.tf: -------------------------------------------------------------------------------- 1 | resource "profitbricks_k8s_cluster" "prod" { 2 | name = "jitsi-prod" 3 | k8s_version = "1.17.2" 4 | maintenance_window { 5 | day_of_the_week = "Sunday" 6 | time = "22:30:20Z" 7 | } 8 | } 9 | 10 | resource "profitbricks_k8s_node_pool" "prod_zone_1" { 11 | name = "worker-10cpu-8gb" 12 | k8s_version = "1.17.5" 13 | maintenance_window { 14 | day_of_the_week = "Saturday" 15 | time = "10:26:26Z" 16 | } 17 | datacenter_id = var.datacenter 18 | k8s_cluster_id = profitbricks_k8s_cluster.prod.id 19 | cpu_family = "INTEL_XEON" 20 | availability_zone = "ZONE_1" 21 | storage_type = "HDD" 22 | node_count = 3 23 | cores_count = 10 24 | ram_size = 8192 25 | storage_size = 50 26 | } 27 | 28 | resource "profitbricks_k8s_node_pool" "prod_zone_2" { 29 | name = "worker-10cpu-8gb" 30 | k8s_version = "1.17.5" 31 | maintenance_window { 32 | day_of_the_week = "Wednesday" 33 | time = "22:52:42Z" 34 | } 35 | datacenter_id = var.datacenter 36 | k8s_cluster_id = profitbricks_k8s_cluster.prod.id 37 | cpu_family = "INTEL_XEON" 38 | availability_zone = "ZONE_2" 39 | storage_type = "HDD" 40 | node_count = 3 41 | cores_count = 10 42 | ram_size = 8192 43 | storage_size = 50 44 | } 45 | -------------------------------------------------------------------------------- /cluster/prod/variables.tf: -------------------------------------------------------------------------------- 1 | variable "datacenter" { 2 | description = "Video Data Center" 3 | type = string 4 | default = "df8bd51c-298f-4981-8df8-6fd07340b397" 5 | } 6 | -------------------------------------------------------------------------------- /docs/architecture/architecture.md: -------------------------------------------------------------------------------- 1 | # Architecture 2 | 3 | ## Jitsi Meet 4 | 5 | ### Components 6 | 7 | A Jitsi Meet installation (holding one "shard", term explained below) consists of the following different components: 8 | 9 | 1. `web` This container represents the web frontend and is the entrypoint for each user. 10 | 2. `jicofo` This component is responsible for managing media sessions between each of the participants and the videobridge. 11 | 3. `prosody` This is the XMPP server used for creating the MUCs (multi-user conferences). 12 | 4. `jvb` The Jitsi Videobridge is an XMPP server component that allows for multi-user video communication. 13 | 14 | Jitsi uses the term "shard" to describe the composition that contains single containers for 15 | `web`, `jicofo`, `prosody` and multiple containers of `jvb` running in parallel. The following diagram 16 | depicts this setup: 17 | 18 | ![Architecture Jitsi Meet](build/shard.png) 19 | 20 | In this setup the videobridges can be scaled up and down depending on the current load 21 | (number of video conferences and participants). The videobridge typically is the component with the highest load and 22 | therefore the main part that needs to be scaled. 23 | Nevertheless, the single containers (`web`, `jicofo`, `prosody`) are also prone to running out of resources. 24 | This can be solved by scaling to multiple shards and we will explain this [below](##Shards). More information about this 25 | topic can be found in the [Scaling Jitsi Meet in the Cloud Tutorial](https://www.youtube.com/watch?v=Jj8a6ZRgehI). 26 | 27 | ### Shards 28 | 29 | A multi-shard setup has to solve the following arising difficulties: 30 | 31 | * traffic between shards needs to be load-balanced 32 | * participants joining an existing conference later on must be routed to the correct shard (where the conference takes place) 33 | 34 | To achieve this, we use the following setup: 35 | 36 | ![Architecture Sharding](build/jitsi_sharding.png) 37 | 38 | Each of the shards has the structure described in the chapter [*Components*](##Components). 39 | 40 | HAProxy is the central component here, as it allows the usage of [stick tables](https://www.haproxy.com/de/blog/introduction-to-haproxy-stick-tables/). 41 | They are used in the [configuration](../../base/ops/loadbalancer/haproxy-configmap.yaml) to store the mapping between 42 | shards and conferences. HAProxy reads the value of the URL parameter `room` in order to decide if the conference this 43 | participant wants to join already exists (and hence leading the user to the correct shard) or if it is a conference which is 44 | not known yet. In the latter case simple round-robin load-balancing between the shards is applied and HAProxy 45 | remembers this new conference and routes all arriving participants of this conference to the correct shard. 46 | HAProxy uses DNS service discovery for finding the existing shards. The configuration can be found at [`haproxy-configmap.yaml`](../../base/ops/loadbalancer/haproxy-configmap.yaml). 47 | To decrease the risk of failure a StatefulSet consisting of two HAProxy pods is used. 48 | They are sharing the stick tables holding the shard-conference mapping by using HAProxy's peering functionality. 49 | 50 | By default, we are using two shards. See [*Adding additional shards*](##Adding-additional-shards) for a detailed explanation 51 | how to add more shards. 52 | 53 | ### Kubernetes Setup 54 | 55 | The full Kubernetes architecture for the Jitsi Meet setup in this repository is depicted below: 56 | 57 | ![Architecture Jitsi Meet](build/jitsi_meet.png) 58 | 59 | The entrypoint for every user is the ingress that is defined in [`haproxy-ingress.yaml`](../../base/ops/loadbalancer/haproxy-ingress.yaml) 60 | and patched for each environment by [`haproxy-ingress-patch.yaml`](../../overlays/production/ops/haproxy-ingress-patch.yaml). 61 | At this point SSL is terminated and traffic is forwarded via HAProxy to the [`web` service](../../base/jitsi-shard/web-service.yaml) in plaintext (port 80) 62 | which in turn exposes a web frontend inside the cluster. 63 | 64 | The other containers, [jicofo](../../base/jitsi-shard/jicofo-deployment.yaml), [web](../../base/jitsi-shard/web-deployment.yaml) 65 | and [prosody](../../base/jitsi-shard/prosody-deployment.yaml), which are necessary for setting up conferences, are each managed by a rolling deployment. 66 | 67 | When a user starts a conference it is assigned to a videobridge. The video streaming happens directly between the user 68 | and this videobridge. Therefore, the videobridges need to be open to the internet. This happens with a service of type `NodePort` 69 | for each videobridge (on a different port). 70 | 71 | The videobridges are managed by a [stateful set](../../base/jitsi-shard/jvb/jvb-statefulset.yaml) (to get predictable pod names). 72 | This stateful set is patched for each environment with different resource requests/limits. 73 | A [horizontal pod autoscaler](../../base/jitsi-shard/jvb/jvb-hpa.yaml) governs the number of running videobridges based on 74 | the average value of the network traffic transmitted to/from the pods. It is also patched in the overlays to meet the requirements in the corresponding environments. 75 | 76 | To achieve the setup of an additional `NodePort` service on a dedicated port for every videobridge a 77 | [custom controller](https://metacontroller.app/api/decoratorcontroller/) is used. 78 | This [`service-per-pod` controller](../../base/metacontroller/service-per-pod-configmap.yaml) is triggered by the 79 | creation of a new videobridge pod and sets up the new service binding to a port defined by a base port for each shard (30300) plus the 80 | number of the videobridge pod (e.g. 30301 for pod `jvb-1`). A [startup script](../../base/jitsi-shard/jvb/jvb-entrypoint-configmap.yaml) 81 | handles the configuration of the port in use by the videobridge. When multiple shards exist, we use the ports 304xx (for the second shard), 305xx (for the third shard) and so on for the videobridges of the additional shards. That means, you can use 100 JVBs per shard at most, which should be sufficient. 82 | 83 | In addition, all videobridges communicate with the `prosody` server via a [service](../../base/jitsi-shard/prosody-service.yaml) 84 | of type `ClusterIP`. 85 | 86 | ## Monitoring 87 | 88 | The monitoring stack is comprised of a [kube-prometheus](https://github.com/coreos/kube-prometheus) setup that integrates 89 | 90 | * [Prometheus Operator](https://github.com/coreos/prometheus-operator) 91 | * Highly available [Prometheus](https://prometheus.io/) 92 | * Highly available [Alertmanager](https://github.com/prometheus/alertmanager) 93 | * [Prometheus node-exporter](https://github.com/prometheus/node_exporter) 94 | * [Prometheus Adapter for Kubernetes Metrics APIs](https://github.com/DirectXMan12/k8s-prometheus-adapter) 95 | * [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) 96 | * [Grafana](https://grafana.com/) 97 | 98 | This stack is adapted and patched to fit the needs of the Jitsi Meet setup. 99 | 100 | The [deployment patch for Grafana](../../base/ops/monitoring/grafana-deployment-patch.yaml) adds a permanent storage to retain 101 | users and changes made in the dashboards. In addition, Grafana is configured to serve from the subpath `/grafana`. 102 | An [ingress](../../base/ops/monitoring/grafana-ingress.yaml) is defined to route traffic to the Grafana instance. 103 | Again, SSL is terminated at the ingress. In order to copy the Kubernetes Secret containing the certificate for your domain from namespace `jitsi` to the `monitoring` namespace, the [kubernetes-reflector](https://github.com/emberstack/kubernetes-reflector) is used. 104 | 105 | A role and a role binding to let Prometheus monitor the `jitsi` namespace is defined in 106 | [prometheus-roleBindingSpecificNamespaces.yaml](../../base/ops/monitoring/prometheus-roleBindingSpecificNamespaces.yaml) and 107 | [prometheus-roleSpecificNamespaces.yaml](../../base/ops/monitoring/prometheus-roleSpecificNamespaces.yaml) respectively. 108 | 109 | Prometheus also gets adapted by an environment specific [patch](../../overlays/production/ops/prometheus-prometheus-patch.yaml) 110 | that adjusts CPU/memory requests and adds a persistent volume. 111 | 112 | Furthermore, [metrics-server](https://github.com/kubernetes-sigs/metrics-server) is used to aggregate resource usage data. 113 | 114 | ### Videobridge monitoring 115 | 116 | The videobridge pods mentioned above have a sidecar container deployed that gathers metrics about the videobridge and 117 | exposes them via a Rest endpoint. This endpoint is scraped by Prometheus based on the definition of a 118 | [PodMonitor](../../base/ops/monitoring/jvb-pod-monitor.yaml) available by the 119 | [Prometheus Operator](https://github.com/coreos/prometheus-operator#customresourcedefinitions). In folder `Default` of Grafana, you will find a dashboard for the current state of your Jitsi-installation. 120 | 121 | ### Monitoring of other components 122 | 123 | Stats of Prosody (using an additional [add-on](https://modules.prosody.im/mod_prometheus.html) in a [configmap](../../base/jitsi-shard/prosody-configmap.yaml)) and [HAProxy](https://www.haproxy.com/de/blog/haproxy-exposes-a-prometheus-metrics-endpoint/) are also gathered by Prometheus and can hence be used for monitoring. Similar to the videobridges, PodMonitors are defined for them. 124 | 125 | ## Adding additional shards 126 | 127 | In order to add an additional shard, follow these steps: 128 | 129 | 1. In the environment of your choice copy the folder [shard-0](../../overlays/production/shard-0) in the same 130 | [folder](../../overlays/production/) and change its name to e.g. `shard-2`. 131 | 2. In all the `.yaml` files contained in the shard folder, change every occurrence of `shard-0` to `shard-2`, 132 | even if `shard-0` can only be found as a substring. 133 | 3. In `jvb-statefulset-patch.yaml` in folder `shard-2`, change the argument from `30300` to `30500` (and if you want to 134 | add even more shards, change this value to `30600`, `30700`, ... for every additional shard). 135 | 4. In [`kustomization.yaml`](../../overlays/production/kustomization.yaml) add the folder you have added in step 1. 136 | 5. Apply your setup as described in chapter *Install* of [`README.md`](../../README.md). 137 | -------------------------------------------------------------------------------- /docs/architecture/build/architecture.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from diagrams import Diagram, Cluster 5 | from diagrams.custom import Custom 6 | from diagrams.k8s.clusterconfig import HPA 7 | from diagrams.k8s.compute import Deployment, Pod, StatefulSet 8 | from diagrams.k8s.network import Ingress, Service 9 | 10 | globe_img = "resources/globe.png" 11 | 12 | graph_attr = { 13 | "pad": "0.5" 14 | } 15 | 16 | with Diagram(filename="jitsi_meet", direction='TB', show=False, outformat='png', graph_attr=graph_attr): 17 | with Cluster("Conference 1"): 18 | users_1 = [Custom("user", globe_img) for _ in range(3)] 19 | with Cluster("Conference 2"): 20 | users_2 = [Custom("user", globe_img) for _ in range(2)] 21 | 22 | all_users = Custom("all users", globe_img) 23 | 24 | with Cluster("Namespace 'jitsi'"): 25 | n_shards = 2 26 | n_haproxy = 2 27 | haproxy_sts = StatefulSet("haproxy") 28 | haproxy_pods = [Pod(f"haproxy-{j}") for j in range(n_haproxy)] 29 | haproxy_sts >> haproxy_pods 30 | web_service = Service("web") 31 | ingress = Ingress("jitsi.messenger.schule") 32 | ingress >> Service("haproxy") >> haproxy_pods >> web_service 33 | 34 | for k in range(n_shards): 35 | with Cluster(f"Shard-{k}"): 36 | web_pod = Pod(f"shard-{k}-web") 37 | prosody_pod = Pod(f"shard-{k}-prosody") 38 | jicofo_pod = Pod(f"shard-{k}-jicofo") 39 | Deployment(f"shard-{k}-prosody") >> prosody_pod 40 | Deployment(f"shard-{k}-jicofo") >> jicofo_pod 41 | web_service >> web_pod 42 | prosody_service = Service(f"shard-{k}-prosody") 43 | prosody_service >> prosody_pod 44 | prosody_service << web_pod 45 | prosody_service << jicofo_pod 46 | 47 | n_jvbs = 3 48 | with Cluster(f"Jitsi Videobridge Shard-{k}"): 49 | jvb_pods = [Pod(f"shard-{k}-jvb-{i}") for i in range(n_jvbs)] 50 | jvb_services = [Service(f"shard-{k}-jvb-{i}") for i in range(n_jvbs)] 51 | [jvb_services[i] >> jvb_pods[i] >> prosody_service for i in range(n_jvbs)] 52 | jvb_pods << StatefulSet(f"shard-{k}-jvb") << HPA(f"shard-{k}-hpa") 53 | if k == 0: 54 | users_1 >> jvb_services[0] 55 | if k == 1: 56 | users_2 >> jvb_services[1] 57 | all_users >> ingress 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /docs/architecture/build/architecture_one_shard.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | from diagrams import Diagram, Cluster 4 | from diagrams.custom import Custom 5 | from diagrams.k8s.clusterconfig import HPA 6 | from diagrams.k8s.compute import Deployment, Pod, StatefulSet 7 | from diagrams.k8s.network import Ingress, Service 8 | 9 | globe_img = "resources/globe.png" 10 | 11 | graph_attr = { 12 | "pad": "0.5" 13 | } 14 | 15 | with Diagram(filename="jitsi_meet_one_shard", direction='TB', show=False, outformat='png', graph_attr=graph_attr): 16 | with Cluster("Conference 1"): 17 | users_1 = [Custom("user", globe_img) for _ in range(3)] 18 | with Cluster("Conference 2"): 19 | users_2 = [Custom("user", globe_img) for _ in range(2)] 20 | 21 | all_users = Custom("all users", globe_img) 22 | 23 | with Cluster("Namespace 'jitsi'"): 24 | n_shards = 1 25 | n_haproxy = 2 26 | haproxy_sts = StatefulSet("haproxy") 27 | haproxy_pods = [Pod(f"haproxy-{j}") for j in range(n_haproxy)] 28 | haproxy_sts >> haproxy_pods 29 | web_service = Service("web") 30 | ingress = Ingress("jitsi.messenger.schule") 31 | ingress >> Service("haproxy") >> haproxy_pods >> web_service 32 | 33 | for k in range(n_shards): 34 | with Cluster(f"Shard-{k}"): 35 | web_pod = Pod(f"shard-{k}-web") 36 | prosody_pod = Pod(f"shard-{k}-prosody") 37 | jicofo_pod = Pod(f"shard-{k}-jicofo") 38 | Deployment(f"shard-{k}-prosody") >> prosody_pod 39 | Deployment(f"shard-{k}-jicofo") >> jicofo_pod 40 | web_service >> web_pod 41 | prosody_service = Service(f"shard-{k}-prosody") 42 | prosody_service >> prosody_pod 43 | prosody_service << web_pod 44 | prosody_service << jicofo_pod 45 | 46 | n_jvbs = 3 47 | with Cluster(f"Jitsi Videobridge Shard-{k}"): 48 | jvb_pods = [Pod(f"shard-{k}-jvb-{i}") for i in range(n_jvbs)] 49 | jvb_services = [Service(f"shard-{k}-jvb-{i}") for i in range(n_jvbs)] 50 | [jvb_services[i] >> jvb_pods[i] >> prosody_service for i in range(n_jvbs)] 51 | jvb_pods << StatefulSet(f"shard-{k}-jvb") << HPA(f"shard-{k}-hpa") 52 | if k == 0: 53 | users_1 >> jvb_services[0] 54 | users_2 >> jvb_services[1] 55 | all_users >> ingress 56 | 57 | 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /docs/architecture/build/architecture_shards.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from diagrams import Diagram, Cluster, Edge 5 | from diagrams.custom import Custom 6 | from diagrams.k8s.compute import Pod 7 | from diagrams.k8s.network import Ingress 8 | 9 | globe_img = "resources/globe.png" 10 | jitsi_img = "resources/jitsi-logo-square.png" 11 | 12 | graph_attr = { 13 | "pad": "0.5" 14 | } 15 | 16 | with Diagram(filename="jitsi_sharding", direction='TB', show=False, outformat='png', graph_attr=graph_attr): 17 | 18 | with Cluster("Conference 1"): 19 | users_1 = [Custom("user", globe_img) for _ in range(3)] 20 | 21 | with Cluster("Conference 2"): 22 | users_2 = [Custom("user", globe_img) for _ in range(2)] 23 | 24 | with Cluster("Kubernetes Cluster"): 25 | ingress = Ingress("jitsi.messenger.schule") 26 | with Cluster("HAProxy"): 27 | n_haproxy = 2 28 | haproxy_pods = [Pod(f"haproxy-{i}") for i in range(n_haproxy)] 29 | 30 | edge_conference_1 = Edge(color="red") 31 | edge_conference_2 = Edge(color="green") 32 | shard_0 = Custom("shard-0", jitsi_img) 33 | shard_1 = Custom("shard-1", jitsi_img) 34 | users_1 >> edge_conference_1 >> ingress 35 | users_2 >> edge_conference_2 >> ingress 36 | 37 | for haproxy in haproxy_pods: 38 | ingress >> haproxy 39 | 40 | for i in range(len(users_1)): 41 | haproxy_pods[i % len(haproxy_pods)] >> edge_conference_1 >> shard_0 42 | 43 | for i in range(len(users_2)): 44 | haproxy_pods[i % len(haproxy_pods)] >> edge_conference_2 >> shard_1 45 | -------------------------------------------------------------------------------- /docs/architecture/build/jitsi_meet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpi-schul-cloud/jitsi-deployment/463ba815d3a18087e1e3b5bab9801699c5b31892/docs/architecture/build/jitsi_meet.png -------------------------------------------------------------------------------- /docs/architecture/build/jitsi_meet_one_shard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpi-schul-cloud/jitsi-deployment/463ba815d3a18087e1e3b5bab9801699c5b31892/docs/architecture/build/jitsi_meet_one_shard.png -------------------------------------------------------------------------------- /docs/architecture/build/jitsi_sharding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpi-schul-cloud/jitsi-deployment/463ba815d3a18087e1e3b5bab9801699c5b31892/docs/architecture/build/jitsi_sharding.png -------------------------------------------------------------------------------- /docs/architecture/build/requirements.txt: -------------------------------------------------------------------------------- 1 | contextvars==2.4 2 | diagrams==0.10.0 3 | graphviz==0.13.2 4 | immutables==0.13 5 | Jinja2==2.11.2 6 | MarkupSafe==1.1.1 7 | -------------------------------------------------------------------------------- /docs/architecture/build/resources/globe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpi-schul-cloud/jitsi-deployment/463ba815d3a18087e1e3b5bab9801699c5b31892/docs/architecture/build/resources/globe.png -------------------------------------------------------------------------------- /docs/architecture/build/resources/jitsi-logo-square.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpi-schul-cloud/jitsi-deployment/463ba815d3a18087e1e3b5bab9801699c5b31892/docs/architecture/build/resources/jitsi-logo-square.png -------------------------------------------------------------------------------- /docs/architecture/build/shard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpi-schul-cloud/jitsi-deployment/463ba815d3a18087e1e3b5bab9801699c5b31892/docs/architecture/build/shard.png -------------------------------------------------------------------------------- /docs/architecture/build/shard.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from diagrams import Diagram, Cluster, Edge 5 | from diagrams.custom import Custom 6 | from diagrams.k8s.compute import Pod 7 | 8 | globe_img = "resources/globe.png" 9 | 10 | graph_attr = { 11 | "pad": "0.5" 12 | } 13 | 14 | with Diagram(filename="shard", direction='TB', show=False, outformat='png', graph_attr=graph_attr): 15 | user_1 = [Custom("user", globe_img) for _ in range(1)] 16 | 17 | with Cluster("Shard"): 18 | web, jicofo, prosody = [Pod("web"), Pod("jicofo"), Pod("prosody")] 19 | 20 | user_1 >> web 21 | 22 | web >> prosody 23 | jicofo >> Edge() << prosody 24 | 25 | n_jvbs = 3 26 | with Cluster("Jitsi Videobridge"): 27 | jvb_pods = [Pod(f"jvb-{i}") for i in range(n_jvbs)] 28 | [jvb_pods[i] >> Edge() << prosody for i in range(n_jvbs)] 29 | 30 | user_1 >> jvb_pods[0] 31 | -------------------------------------------------------------------------------- /docs/loadtests/loadtestresults.md: -------------------------------------------------------------------------------- 1 | # Loadtest results 2 | 3 | Below, we share the results of our load tests run in [IONOS Cloud](https://dcd.ionos.com/), which took place with a Jitsi single-shard-setup. 4 | The loadtest test server configuration for both cases was set to 16 Cores Intel, 16 GB RAM, 8 server. All simulated participants did send a high-quality video-stream (1280x720 pixels, 60 frames per second) and sound. There are two different test cases we investigated: 5 | 6 | ## 1 One big conference 7 | 8 | All test servers are connecting to one conference. This was achieved by a slight variation of [run_loadtest.sh](loadtest/run_loadtest.sh). 9 | We have tested two different setups for the limits and request of the JVB pods: 10 | 11 | ### 1.1 CPU limit is 4 cores, Memory limit is 1 GB 12 | 13 | We have further varied the number channelLastN in web-configmap.yaml. It decides how many video streams are forwarded to every conference participant, where only the video of the most active N users are used. 14 | 15 | #### 1.1.1 ChannelLastN not set 16 | 17 | Here are the results for channelLastN not set, that means all videos are streamed to all participants: 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 |
Participants totalCPU usage test servervideo qualityframe ratesoundnum participants stableMax Memory Usage per JVBMax CPU usage per JVBNetwork Transmitted maxCPU-ThrottlingRun orderError Messages JVB and Remarks
8~ 7 %very highvery highvery goodstable830 MB1,9 cores4,7 MB0%2
14~ 7 %very highvery highvery goodstable830 MB3,5 cores10,7 MB5%1
24~20 %okaypartly laggingvery goodstable940 MB4 Cores (pod limit)14,6 MB80%3Conference was moved to another JVB. Observed warnings: TCC packet contained received sequence numbers: 58852-59404. Couldn't find packet detail for the seq nums: 58852-59404. Latest seqNum was 63013, size is 1000. Latest RTT is 4310.241456 ms.
81 | 82 | #### 1.1.2 ChannelLastN set to 5 83 | 84 | Here are the corresponding results for channelLastN set to 5: 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 |
Participants totalCPU-usage test servervideo qualityframe ratesoundnum participants stableMax Memory Usage per JVB in MBMax CPU-Usage per JVBMax Network Transmitted MBCPU-ThrottlingError Messages JVB and RemarksRun order
8~ 7%very highvery highvery goodstable760 MB1,9 Cores4,1 MB0%5
16~17%very highvery highvery goodstable760 MB3,5 Cores9,1 MB5%4
24~20%highvery highvery goodstable800 MB4 Cores (pod limit)8,4 MB80%6video quality still high, network traffic seems to be manageble for JVB
147 | 148 | ### 1.2 CPU limit is 8 cores, Memory limit is 1 GB 149 | 150 | #### 1.2.1 ChannelLastN not set 151 | Not tested. 152 | #### 1.2.2 ChannelLastN set to 5 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 |
Participants totalCPU-usage test servervideo qualityframe ratesoundnum participants stableMax Memory Usage per JVB in MBMax CPU-Usage per JVBMax Network Transmitted MBCPU-ThrottlingError Messages JVB and RemarksRun order
3230%very highpartly laggingvery good3 participants leaving9104,3170WARNING: 1891053881 large jump in sequence numbers detected (highest received was 52436, current is 52603, jump of 166), not requesting retransmissions , WARNING: TCC packet contained received sequence numbers: 21287-21371. Couldn't find packet detail for the seq nums: 21287-21371. Latest seqNum was 24965, size is 1000. Latest RTT is 4568.5094 ms. , INFO: timeout for pair: 217.160.210.116:30301/udp/srflx -> 157.97.109.136:52047/udp/prflx (stream-59888d67.RTP), failing. Error Messages indicate network issues. The traffic might be already to high. Conference stays mostly stable, though1
1615%very highvery highvery goodstable9601,88,50CPU usage inconsistent with other observations2
201 | 202 | ## 2 Eight smaller conferences in parallel 203 | 204 | Each of the eight test servers start exactly one conference with a certain number of participants. The script [run_loadtest.sh](loadtest/run_loadtest.sh) was used for this purpose. 205 | 206 | ### 2.1 CPU limit is 4 cores, Memory limit is 1GB 207 | 208 | There were two JVBs running for this test. 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 |
Participants per conferenceCPU-usage test servervideo qualityframe ratesoundnum participants stableMax Memory Usage per JVB in MBMax CPU-Usage per JVBMax Network Transmitted MB (both JVBs)CPU-ThrottlingError Messages JVB and RemarksRun order
5~ 50%very highvery highvery goodstable~ 900 MB~ 2.5 Cores9,1 MB; 6,0 MB0%4
10~75%highhighvery goodseveral participants lost~ 860 MB~ 3,9 Cores (nearly pod limit=12,5 MB ; 10,5 MB50%3
258 | 259 | Tests with bigger conference sizes have not been carried out due to the high resource demands of the test servers. 260 | -------------------------------------------------------------------------------- /loadtest/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | selenium-hub: 4 | image: selenium/hub:3.141.59-20200409 5 | container_name: selenium-hub 6 | ports: 7 | - "4444:4444" 8 | chrome: 9 | image: selenium/node-chrome:3.141.59-20200409 10 | volumes: 11 | - /dev/shm:/dev/shm 12 | - ./resources:/usr/share/jitsi-meet-torture/resources 13 | depends_on: 14 | - selenium-hub 15 | environment: 16 | - HUB_HOST=selenium-hub 17 | - HUB_PORT=4444 18 | -------------------------------------------------------------------------------- /loadtest/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script sets up the virtual machine image that will then be frozen into a snapshot 4 | 5 | sudo apt-get update 6 | 7 | # Install docker, see https://docs.docker.com/engine/install/ubuntu/ 8 | sudo apt-get install \ 9 | apt-transport-https \ 10 | ca-certificates \ 11 | curl \ 12 | gnupg-agent \ 13 | software-properties-common 14 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 15 | sudo add-apt-repository \ 16 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 17 | $(lsb_release -cs) \ 18 | stable" 19 | sudo apt-get install docker-ce docker-ce-cli containerd.io -y 20 | 21 | # Install docker-compose 22 | sudo curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 23 | sudo chmod +x /usr/local/bin/docker-compose 24 | 25 | # Install maven 26 | sudo apt install maven -y 27 | 28 | # Clone the jitsi-torture-repository 29 | git clone https://github.com/jitsi/jitsi-meet-torture 30 | 31 | # Download the test video 32 | wget -O jitsi-meet-torture/resources/FourPeople_1280x720_30.y4m https://media.xiph.org/video/derf/y4m/FourPeople_1280x720_60.y4m 33 | 34 | # Get docker-compose file 35 | wget -O jitsi-meet-torture/docker-compose.yml https://raw.githubusercontent.com/schul-cloud/jitsi-deployment/develop/loadtest/docker-compose.yml 36 | -------------------------------------------------------------------------------- /loadtest/loadtest.tf: -------------------------------------------------------------------------------- 1 | resource "profitbricks_lan" "loadtest_lan" { 2 | name = "loadtest" 3 | datacenter_id = var.datacenter 4 | public = true 5 | } 6 | 7 | resource "profitbricks_server" "loadtest_server" { 8 | count = var.server_count 9 | name = "loadtest-server-${count.index}" 10 | datacenter_id = var.datacenter 11 | cores = 16 12 | ram = 16384 13 | availability_zone = "ZONE_1" 14 | cpu_family = "INTEL_XEON" 15 | image_name = var.image_name 16 | 17 | volume { 18 | name = "hdd-loadtest-${count.index}" 19 | size = var.hdd_size 20 | disk_type = "HDD" 21 | } 22 | 23 | nic { 24 | lan = profitbricks_lan.loadtest_lan.id 25 | dhcp = true 26 | firewall_active = true 27 | 28 | firewall { 29 | protocol = "TCP" 30 | name = "SSH" 31 | port_range_start = 22 32 | port_range_end = 22 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /loadtest/run_loadtest.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | #/ Usage: run_loadtest.sh [option..] 5 | #/ Description: Run array load test on a Jitsi Meet installation 6 | #/ Examples: ./run_loadtest.sh -u wolfgang.moser@woodmark.de -pw Pa$$w0rd -p 10 -j https://jitsi.dev.messenger.schule -d 180 -k ~/.ssh/id_rsa 7 | #/ Options: 8 | #/ -u, --user: IONOS cloud user name 9 | #/ -pw, --password: IONOS cloud user password 10 | #/ -p, --participants: Number of video conference participants per worker 11 | #/ -j, --jitsi-url: URL pointing to the Jitsi installation (optional, defaults to https://jitsi.dev.messenger.schule) 12 | #/ -d, --duration: Duration of each conference in seconds 13 | #/ -k, --ssh-key-path: Path to the SSH private key file used for connecting to the worker nodes (optional, defaults to ~/.ssh/id_rsa) 14 | #/ --help: Display this help message 15 | usage() { grep '^#/' "$0" | cut -c4- ; exit 0 ; } 16 | expr "$*" : ".*--help" > /dev/null && usage 17 | 18 | readonly LOG_FILE="/tmp/$(basename "$0").log" 19 | info() { echo "[INFO] $@" | tee -a "$LOG_FILE" >&2 ; } 20 | warning() { echo "[WARNING] $@" | tee -a "$LOG_FILE" >&2 ; } 21 | error() { echo "[ERROR] $@" | tee -a "$LOG_FILE" >&2 ; } 22 | fatal() { echo "[FATAL] $@" | tee -a "$LOG_FILE" >&2 ; exit 1 ; } 23 | 24 | cleanup() { 25 | for ip in "${ips[@]}" 26 | do 27 | ssh -o "StrictHostKeyChecking no" -i "$1" root@"$ip" docker-compose -f '~/jitsi-meet-torture/docker-compose.yml' down 28 | done 29 | } 30 | 31 | # Parse Parameters # 32 | while [ "$#" -gt 1 ]; 33 | do 34 | key="$1" 35 | 36 | case $key in 37 | -u|--user) 38 | user="$2" 39 | shift 40 | ;; 41 | -pw|--password) 42 | password="$2" 43 | shift 44 | ;; 45 | -p|--participants) 46 | participants="$2" 47 | shift 48 | ;; 49 | -j|--jitsi-url) 50 | jitsiUrl="$2" 51 | shift 52 | ;; 53 | -d|--duration) 54 | duration="$2" 55 | shift 56 | ;; 57 | -k|--ssh-key-path) 58 | sshKeyPath="$2" 59 | shift 60 | ;; 61 | *) 62 | ;; 63 | esac 64 | shift 65 | done 66 | 67 | if [ -z "$jitsiUrl" ]; then 68 | jitsiUrl="https://jitsi.dev.messenger.schule" 69 | fi 70 | 71 | if [ -z "$sshKeyPath" ]; then 72 | sshKeyPath='~/.ssh/id_rsa' 73 | fi 74 | 75 | if [[ "${BASH_SOURCE[0]}" = "$0" ]]; then 76 | info "Get IP addresses" 77 | mapfile -t ips < <( \ 78 | curl -s -u "$user":"$password" 'https://api.ionos.com/cloudapi/v5/datacenters/df8bd51c-298f-4981-8df8-6fd07340b397/servers?depth=3' \ 79 | | jq '.items[] | select(.properties.name | startswith("loadtest-server-")) | .entities.nics.items[].properties.ips[]' \ 80 | | tr -d \" | tr -d \\r \ 81 | ) 82 | info "Found ${#ips[@]} IPs: ${ips[*]}" 83 | 84 | trap "cleanup $sshKeyPath $ips" EXIT 85 | 86 | info "Start docker-compose" 87 | for ip in "${ips[@]}" 88 | do 89 | ssh -o "StrictHostKeyChecking no" -i "$sshKeyPath" root@"$ip" docker-compose -f '~/jitsi-meet-torture/docker-compose.yml' up --build --scale chrome="$participants" -d 90 | done 91 | 92 | info "Start load test" 93 | for (( i = 0 ; i < ${#ips[@]} ; i=$i+1 )); 94 | do 95 | ssh -o "StrictHostKeyChecking no" -i "$sshKeyPath" root@"${ips[${i}]}" mvn -f '~/jitsi-meet-torture/pom.xml' \ 96 | -Dthreadcount=1 \ 97 | -Dorg.jitsi.malleus.conferences=1 \ 98 | -Dorg.jitsi.malleus.participants="$participants" \ 99 | -Dorg.jitsi.malleus.senders="$participants" \ 100 | -Dorg.jitsi.malleus.audio_senders="$participants" \ 101 | -Dorg.jitsi.malleus.duration="$duration" \ 102 | -Dorg.jitsi.malleus.room_name_prefix=loadtest-"$i" \ 103 | -Dorg.jitsi.malleus.regions="" \ 104 | -Dremote.address="http://localhost:4444/wd/hub" \ 105 | -Djitsi-meet.tests.toRun=MalleusJitsificus \ 106 | -Dwdm.gitHubTokenName=jitsi-jenkins \ 107 | -Dremote.resource.path='/usr/share/jitsi-meet-torture' \ 108 | -Djitsi-meet.instance.url="$jitsiUrl" \ 109 | -Djitsi-meet.isRemote=true \ 110 | -Dchrome.disable.nosanbox=true \ 111 | test &>/dev/null & 112 | info "Created conference: $jitsiUrl/loadtest-"$i"0" 113 | done 114 | 115 | # wait until load tests finishes (plus an additional startup time) 116 | sleep $((60+$duration)) 117 | 118 | info "Clean up" 119 | cleanup $sshKeyPath $ips 120 | fi 121 | -------------------------------------------------------------------------------- /loadtest/variables.tf: -------------------------------------------------------------------------------- 1 | variable "datacenter" { 2 | description = "data center ID where loadtest servers should be created" 3 | type = string 4 | default = "df8bd51c-298f-4981-8df8-6fd07340b397" # Video Data Center 5 | } 6 | 7 | variable "server_count" { 8 | description = "number of loadtest servers" 9 | type = number 10 | default = 8 11 | } 12 | 13 | variable "hdd_size" { 14 | description = "size in GB of VM hard disk" 15 | type = number 16 | default = "20" 17 | } 18 | 19 | variable "image_name" { 20 | description = "image name" 21 | type = string 22 | default = "517aa2f9-8cfb-4a11-9893-db0421be1c5e" # Jitsi Torturer 23 | } 24 | -------------------------------------------------------------------------------- /overlays/development-monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ops/ 3 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/bbb-basic-auth-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | namespace: monitoring 5 | name: bbb-basic-auth 6 | data: 7 | username: 8 | password: 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/bbb-endpoints.yaml: -------------------------------------------------------------------------------- 1 | # defines the IPs of BBB that should be scraped by Prometheus 2 | kind: Endpoints 3 | apiVersion: v1 4 | metadata: 5 | name: bbb 6 | namespace: monitoring 7 | labels: 8 | k8s-app: bbb-metrics 9 | subsets: 10 | - addresses: 11 | - ip: 81.173.113.21 12 | - ip: 85.215.237.214 13 | - ip: 85.215.237.213 14 | - ip: 185.132.45.5 15 | - ip: 217.160.200.92 16 | - ip: 217.160.200.91 17 | - ip: 185.48.117.209 18 | - ip: 213.244.192.234 19 | - ip: 185.48.117.43 20 | - ip: 217.160.200.12 21 | - ip: 185.48.116.231 22 | - ip: 185.48.119.186 23 | - ip: 185.48.116.252 24 | ports: 25 | - name: ne-metrics 26 | port: 9100 27 | - name: bbb-metrics 28 | port: 9688 29 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/certificate.yaml: -------------------------------------------------------------------------------- 1 | # we cannot patch custom resources with patchesStrategicMerge -> hence, we define the certificate here 2 | apiVersion: cert-manager.io/v1alpha2 3 | kind: Certificate 4 | metadata: 5 | name: jitsi-messenger-schule 6 | namespace: jitsi 7 | # allows the copying of the resulting secret to namespace `monitoring` 8 | annotations: 9 | reflector.v1.k8s.emberstack.com/secret-reflection-allowed: "true" 10 | reflector.v1.k8s.emberstack.com/secret-reflection-allowed-namespaces: "monitoring" 11 | spec: 12 | # secret names are always required 13 | secretName: jitsi-messenger-schule-tls 14 | duration: 2160h # 90d 15 | renewBefore: 360h # 15d 16 | organization: 17 | - hpi 18 | isCA: false 19 | keySize: 2048 20 | keyAlgorithm: rsa 21 | keyEncoding: pkcs1 22 | usages: 23 | - server auth 24 | - client auth 25 | # at least one of DNS Name, URI, or IP address is required 26 | dnsNames: 27 | # fill in dnsName here 28 | - jitsi.staging.messenger.schule 29 | # issuer references are always required 30 | issuerRef: 31 | name: letsencrypt 32 | # ClusterIssuers can be referenced by changing the kind here 33 | # default value is Issuer (i.e. a locally namespaced Issuer) 34 | kind: ClusterIssuer 35 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/elasticsearch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: elasticsearch.k8s.elastic.co/v1 2 | kind: Elasticsearch 3 | metadata: 4 | name: elasticsearch 5 | namespace: logging 6 | spec: 7 | version: 7.7.0 8 | auth: 9 | fileRealm: 10 | - secretName: es-filerealm-secret 11 | podDisruptionBudget: {} 12 | nodeSets: 13 | - name: all-in-one 14 | count: 2 15 | config: 16 | node.master: true 17 | node.data: true 18 | node.ingest: true 19 | podTemplate: 20 | spec: 21 | initContainers: 22 | # see https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html#k8s-virtual-memory 23 | - name: sysctl 24 | securityContext: 25 | privileged: true 26 | command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144'] 27 | volumeClaimTemplates: 28 | - metadata: 29 | name: elasticsearch-data 30 | spec: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 5Gi 36 | storageClassName: ionos-enterprise-hdd 37 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/grafana-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: grafana 13 | env: 14 | - name: GF_SERVER_DOMAIN 15 | value: "jitsi.staging.messenger.schule" 16 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/grafana-ingress-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: grafana-ingress 5 | namespace: monitoring 6 | annotations: 7 | nginx.ingress.kubernetes.io/use-regex: "true" 8 | spec: 9 | tls: 10 | - hosts: 11 | - jitsi.staging.messenger.schule 12 | secretName: jitsi-messenger-schule-tls 13 | rules: 14 | - host: jitsi.staging.messenger.schule 15 | http: 16 | paths: 17 | # only match /grafana and paths under /grafana/ 18 | - path: /grafana(/|$)(.*) 19 | backend: 20 | serviceName: grafana 21 | servicePort: http 22 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/grafana-tls-secret.yaml: -------------------------------------------------------------------------------- 1 | # filled by the kubernetes-reflector using the secret in namespace jitsi (which holds the certificate of the domain) 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: jitsi-messenger-schule-tls 6 | namespace: monitoring 7 | annotations: 8 | reflector.v1.k8s.emberstack.com/reflects: "jitsi/jitsi-messenger-schule-tls" 9 | data: 10 | {} -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/haproxy-ingress-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: haproxy-ingress 5 | namespace: jitsi 6 | spec: 7 | tls: 8 | - hosts: 9 | - jitsi.staging.messenger.schule 10 | secretName: jitsi-messenger-schule-tls 11 | rules: 12 | - host: jitsi.staging.messenger.schule 13 | http: 14 | paths: 15 | - path: / 16 | backend: 17 | serviceName: haproxy 18 | servicePort: 80 19 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../../base/ops 3 | 4 | resources: 5 | - bbb-endpoints.yaml 6 | - turn-endpoints.yaml 7 | - certificate.yaml 8 | - elasticsearch.yaml 9 | - grafana-tls-secret.yaml 10 | - bbb-basic-auth-secret.yaml 11 | 12 | patchesStrategicMerge: 13 | - grafana-deployment-patch.yaml 14 | - grafana-ingress-patch.yaml 15 | - prometheus-prometheus-patch.yaml 16 | - haproxy-ingress-patch.yaml 17 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/prometheus-prometheus-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | name: k8s 5 | namespace: monitoring 6 | spec: 7 | retention: 2d 8 | resources: 9 | requests: 10 | memory: 1000Mi 11 | cpu: 1000m 12 | limits: 13 | memory: 1000Mi 14 | cpu: 1000m 15 | storage: 16 | volumeClaimTemplate: 17 | metadata: 18 | name: prometheus 19 | spec: 20 | accessModes: [ "ReadWriteOnce" ] 21 | storageClassName: ionos-enterprise-hdd 22 | resources: 23 | requests: 24 | storage: 10Gi 25 | -------------------------------------------------------------------------------- /overlays/development-monitoring/ops/turn-endpoints.yaml: -------------------------------------------------------------------------------- 1 | # defines the IPs of TURN servers that should be scraped by Prometheus 2 | kind: Endpoints 3 | apiVersion: v1 4 | metadata: 5 | name: turn 6 | namespace: monitoring 7 | labels: 8 | k8s-app: turn-metrics 9 | subsets: 10 | - addresses: 11 | # sc-staging-bbb-turn-1 12 | - ip: 213.244.192.173 13 | - ip: 213.244.192.110 14 | - ip: 217.160.200.52 15 | - ip: 217.160.200.21 16 | # sc-staging-jitsi-turn-1 17 | - ip: 217.160.200.167 18 | - ip: 217.160.200.166 19 | - ip: 217.160.200.169 20 | - ip: 217.160.200.168 21 | ports: 22 | - name: metrics 23 | port: 9100 24 | -------------------------------------------------------------------------------- /overlays/development/jitsi-base/jvb-hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | minReplicas: 1 8 | maxReplicas: 2 9 | metrics: 10 | - type: Pods 11 | pods: 12 | metric: 13 | name: container_network_transmit_bytes_per_second 14 | target: 15 | type: AverageValue 16 | averageValue: 400000 17 | -------------------------------------------------------------------------------- /overlays/development/jitsi-base/jvb-statefulset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | name: jvb 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: prometheus-exporter 11 | resources: 12 | requests: 13 | cpu: "100m" 14 | memory: "100Mi" 15 | limits: 16 | cpu: "100m" 17 | memory: "100Mi" 18 | - name: jvb 19 | resources: 20 | requests: 21 | cpu: "1500m" 22 | memory: "1000Mi" 23 | limits: 24 | cpu: "1500m" 25 | memory: "1000Mi" 26 | -------------------------------------------------------------------------------- /overlays/development/jitsi-base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../../base/jitsi-shard 3 | 4 | patchesStrategicMerge: 5 | - jvb-statefulset-patch.yaml 6 | - jvb-hpa-patch.yaml -------------------------------------------------------------------------------- /overlays/development/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../base/jitsi 3 | 4 | resources: 5 | - shard-0/ 6 | - shard-1/ 7 | -------------------------------------------------------------------------------- /overlays/development/shard-0/jicofo-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: jicofo 7 | name: jicofo 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_1 14 | containers: 15 | - name: jicofo 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-0-prosody 19 | -------------------------------------------------------------------------------- /overlays/development/shard-0/jvb-hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | scaleTargetRef: 8 | name: shard-0-jvb # needs to be manually set 9 | 10 | -------------------------------------------------------------------------------- /overlays/development/shard-0/jvb-statefulset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | name: jvb 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: jvb 10 | shard: "0" 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: jvb 15 | shard: "0" 16 | spec: 17 | # avoid that pods of different shards share zone 18 | nodeSelector: 19 | topology.kubernetes.io/zone: ZONE_1 20 | containers: 21 | - name: jvb 22 | env: 23 | - name: XMPP_SERVER 24 | value: shard-0-prosody 25 | args: 26 | - "30300" 27 | - "/init" 28 | -------------------------------------------------------------------------------- /overlays/development/shard-0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../jitsi-base 3 | 4 | patchesStrategicMerge: 5 | - jvb-hpa-patch.yaml 6 | - jvb-statefulset-patch.yaml 7 | - jicofo-deployment-patch.yaml 8 | - web-deployment-patch.yaml 9 | - prosody-deployment-patch.yaml 10 | 11 | namespace: jitsi 12 | 13 | namePrefix: shard-0- 14 | 15 | commonLabels: 16 | shard: "0" 17 | -------------------------------------------------------------------------------- /overlays/development/shard-0/prosody-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: prosody 7 | name: prosody 8 | spec: 9 | selector: 10 | matchLabels: 11 | shard: "0" 12 | template: 13 | spec: 14 | # avoid that pods of different shards share zone 15 | nodeSelector: 16 | topology.kubernetes.io/zone: ZONE_1 17 | metadata: 18 | labels: 19 | shard: "0" 20 | -------------------------------------------------------------------------------- /overlays/development/shard-0/web-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: web 7 | name: web 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_1 14 | containers: 15 | - name: web 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-0-prosody 19 | - name: XMPP_BOSH_URL_BASE 20 | value: http://shard-0-prosody:5280 21 | -------------------------------------------------------------------------------- /overlays/development/shard-1/jicofo-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: jicofo 7 | name: jicofo 8 | spec: 9 | 10 | template: 11 | spec: 12 | # avoid that pods of different shards share zone 13 | nodeSelector: 14 | topology.kubernetes.io/zone: ZONE_2 15 | containers: 16 | - name: jicofo 17 | env: 18 | - name: XMPP_SERVER 19 | value: shard-1-prosody 20 | -------------------------------------------------------------------------------- /overlays/development/shard-1/jvb-hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | scaleTargetRef: 8 | name: shard-1-jvb # needs to be manually set 9 | 10 | -------------------------------------------------------------------------------- /overlays/development/shard-1/jvb-statefulset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | name: jvb 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: jvb 10 | shard: "1" 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: jvb 15 | shard: "1" 16 | spec: 17 | # avoid that pods of different shards share zone 18 | nodeSelector: 19 | topology.kubernetes.io/zone: ZONE_2 20 | containers: 21 | - name: jvb 22 | env: 23 | - name: XMPP_SERVER 24 | value: shard-1-prosody 25 | args: 26 | - "30400" 27 | - "/init" 28 | -------------------------------------------------------------------------------- /overlays/development/shard-1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../jitsi-base 3 | 4 | patchesStrategicMerge: 5 | - jvb-hpa-patch.yaml 6 | - jvb-statefulset-patch.yaml 7 | - jicofo-deployment-patch.yaml 8 | - web-deployment-patch.yaml 9 | - prosody-deployment-patch.yaml 10 | 11 | namespace: jitsi 12 | 13 | namePrefix: shard-1- 14 | 15 | commonLabels: 16 | shard: "1" 17 | -------------------------------------------------------------------------------- /overlays/development/shard-1/prosody-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: prosody 7 | name: prosody 8 | spec: 9 | selector: 10 | matchLabels: 11 | shard: "1" 12 | template: 13 | spec: 14 | # avoid that pods of different shards share zone 15 | nodeSelector: 16 | topology.kubernetes.io/zone: ZONE_2 17 | metadata: 18 | labels: 19 | shard: "1" 20 | -------------------------------------------------------------------------------- /overlays/development/shard-1/web-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: web 7 | name: web 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_2 14 | containers: 15 | - name: web 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-1-prosody 19 | - name: XMPP_BOSH_URL_BASE 20 | value: http://shard-1-prosody:5280 21 | -------------------------------------------------------------------------------- /overlays/production-monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ops/ 3 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/bbb-basic-auth-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | namespace: monitoring 5 | name: bbb-basic-auth 6 | data: 7 | username: 8 | password: 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/bbb-endpoints.yaml: -------------------------------------------------------------------------------- 1 | # defines the IPs of BBB that should be scraped by Prometheus 2 | kind: Endpoints 3 | apiVersion: v1 4 | metadata: 5 | name: bbb 6 | namespace: monitoring 7 | labels: 8 | k8s-app: bbb-metrics 9 | subsets: 10 | - addresses: 11 | # scalelite 12 | - ip: 85.215.238.43 13 | # sc-prod-bbb-1 14 | - ip: 185.56.149.60 15 | - ip: 185.56.149.59 16 | - ip: 185.56.149.58 17 | - ip: 185.56.149.51 18 | - ip: 185.56.149.50 19 | - ip: 185.56.149.53 20 | - ip: 185.56.149.52 21 | - ip: 185.56.149.55 22 | - ip: 185.56.149.54 23 | - ip: 185.56.149.57 24 | - ip: 185.56.149.56 25 | - ip: 185.56.149.71 26 | - ip: 185.56.149.70 27 | - ip: 185.56.149.108 28 | - ip: 185.56.149.109 29 | - ip: 185.56.149.104 30 | - ip: 185.56.149.2 31 | - ip: 185.56.149.105 32 | - ip: 185.56.149.3 33 | - ip: 185.56.149.106 34 | - ip: 185.56.149.4 35 | - ip: 185.56.149.107 36 | - ip: 185.56.149.5 37 | - ip: 185.56.149.100 38 | - ip: 185.56.149.6 39 | - ip: 185.56.149.69 40 | - ip: 185.56.149.101 41 | - ip: 185.56.149.7 42 | - ip: 185.56.149.102 43 | - ip: 185.56.149.8 44 | - ip: 185.56.149.103 45 | - ip: 185.56.149.9 46 | - ip: 185.56.149.62 47 | - ip: 185.56.149.61 48 | - ip: 185.56.149.64 49 | - ip: 185.56.149.63 50 | - ip: 185.56.149.66 51 | - ip: 185.56.149.65 52 | - ip: 185.56.149.68 53 | - ip: 185.56.149.67 54 | - ip: 185.56.149.80 55 | - ip: 185.56.149.82 56 | - ip: 185.56.149.81 57 | - ip: 185.56.149.119 58 | - ip: 185.56.149.115 59 | - ip: 185.56.149.116 60 | - ip: 185.56.149.117 61 | - ip: 185.56.149.118 62 | - ip: 185.56.149.111 63 | - ip: 185.56.149.112 64 | - ip: 185.56.149.113 65 | - ip: 185.56.149.114 66 | - ip: 185.56.149.110 67 | - ip: 185.56.149.73 68 | - ip: 185.56.149.72 69 | - ip: 185.56.149.75 70 | - ip: 185.56.149.74 71 | - ip: 185.56.149.77 72 | - ip: 185.56.149.76 73 | - ip: 185.56.149.79 74 | - ip: 185.56.149.78 75 | - ip: 185.56.149.91 76 | - ip: 185.56.149.90 77 | - ip: 185.56.149.93 78 | - ip: 185.56.149.92 79 | - ip: 185.56.149.126 80 | - ip: 185.56.149.127 81 | - ip: 185.56.149.122 82 | - ip: 185.56.149.123 83 | - ip: 185.56.149.124 84 | - ip: 185.56.149.125 85 | - ip: 185.56.149.120 86 | - ip: 185.56.149.121 87 | - ip: 185.56.149.84 88 | - ip: 185.56.149.83 89 | - ip: 185.56.149.86 90 | - ip: 185.56.149.85 91 | - ip: 185.56.149.88 92 | - ip: 185.56.149.87 93 | - ip: 185.56.149.89 94 | - ip: 185.56.149.15 95 | - ip: 185.56.149.14 96 | - ip: 185.56.149.17 97 | - ip: 185.56.149.16 98 | - ip: 185.56.149.19 99 | - ip: 185.56.149.18 100 | - ip: 185.56.149.95 101 | - ip: 185.56.149.94 102 | - ip: 185.56.149.97 103 | - ip: 185.56.149.96 104 | - ip: 185.56.149.11 105 | - ip: 185.56.149.99 106 | - ip: 185.56.149.10 107 | - ip: 185.56.149.98 108 | - ip: 185.56.149.13 109 | - ip: 185.56.149.12 110 | - ip: 185.56.149.26 111 | - ip: 185.56.149.25 112 | - ip: 185.56.149.28 113 | - ip: 185.56.149.27 114 | - ip: 185.56.149.29 115 | - ip: 185.56.149.20 116 | - ip: 185.56.149.22 117 | - ip: 185.56.149.21 118 | - ip: 185.56.149.24 119 | - ip: 185.56.149.23 120 | - ip: 185.56.149.37 121 | - ip: 185.56.149.36 122 | - ip: 185.56.149.39 123 | - ip: 185.56.149.38 124 | - ip: 185.56.149.31 125 | - ip: 185.56.149.30 126 | - ip: 185.56.149.33 127 | - ip: 185.56.149.32 128 | - ip: 185.56.149.35 129 | - ip: 185.56.149.34 130 | - ip: 185.56.149.48 131 | - ip: 185.56.149.47 132 | - ip: 185.56.149.49 133 | - ip: 185.56.149.40 134 | - ip: 185.56.149.42 135 | - ip: 185.56.149.41 136 | - ip: 185.56.149.44 137 | - ip: 185.56.149.43 138 | - ip: 185.56.149.46 139 | - ip: 185.56.149.45 140 | # sc-prod-bbb-2 141 | - ip: 185.56.149.218 142 | - ip: 185.56.149.219 143 | - ip: 185.56.149.214 144 | - ip: 185.56.149.215 145 | - ip: 185.56.149.216 146 | - ip: 185.56.149.217 147 | - ip: 185.56.149.177 148 | - ip: 185.56.149.210 149 | - ip: 185.56.149.178 150 | - ip: 185.56.149.211 151 | - ip: 185.56.149.179 152 | - ip: 185.56.149.212 153 | - ip: 185.56.149.213 154 | - ip: 185.56.149.173 155 | - ip: 185.56.149.174 156 | - ip: 185.56.149.175 157 | - ip: 185.56.149.176 158 | - ip: 185.56.149.170 159 | - ip: 185.56.149.171 160 | - ip: 185.56.149.172 161 | - ip: 185.56.149.229 162 | - ip: 185.56.149.225 163 | - ip: 185.56.149.226 164 | - ip: 185.56.149.227 165 | - ip: 185.56.149.228 166 | - ip: 185.56.149.188 167 | - ip: 185.56.149.221 168 | - ip: 185.56.149.189 169 | - ip: 185.56.149.222 170 | - ip: 185.56.149.223 171 | - ip: 185.56.149.224 172 | - ip: 185.56.149.184 173 | - ip: 185.56.149.185 174 | - ip: 185.56.149.186 175 | - ip: 185.56.149.187 176 | - ip: 185.56.149.220 177 | - ip: 185.56.149.180 178 | - ip: 185.56.149.181 179 | - ip: 185.56.149.182 180 | - ip: 185.56.149.183 181 | - ip: 185.56.149.236 182 | - ip: 185.56.149.237 183 | - ip: 185.56.149.238 184 | - ip: 185.56.149.239 185 | - ip: 185.56.149.199 186 | - ip: 185.56.149.232 187 | - ip: 185.56.149.233 188 | - ip: 185.56.149.234 189 | - ip: 185.56.149.235 190 | - ip: 185.56.149.195 191 | - ip: 185.56.149.196 192 | - ip: 185.56.149.197 193 | - ip: 185.56.149.230 194 | - ip: 185.56.149.198 195 | - ip: 185.56.149.231 196 | - ip: 185.56.149.191 197 | - ip: 185.56.149.192 198 | - ip: 185.56.149.193 199 | - ip: 185.56.149.194 200 | - ip: 185.56.149.190 201 | - ip: 185.56.149.128 202 | - ip: 185.56.149.129 203 | - ip: 185.56.149.243 204 | - ip: 185.56.149.244 205 | - ip: 185.56.149.245 206 | - ip: 185.56.149.246 207 | - ip: 185.56.149.240 208 | - ip: 185.56.149.241 209 | - ip: 185.56.149.242 210 | - ip: 185.56.149.137 211 | - ip: 185.56.149.138 212 | - ip: 185.56.149.139 213 | - ip: 185.56.149.133 214 | - ip: 185.56.149.134 215 | - ip: 185.56.149.135 216 | - ip: 185.56.149.136 217 | - ip: 185.56.149.130 218 | - ip: 185.56.149.131 219 | - ip: 185.56.149.132 220 | - ip: 185.56.149.148 221 | - ip: 185.56.149.149 222 | - ip: 185.56.149.144 223 | - ip: 185.56.149.145 224 | - ip: 185.56.149.146 225 | - ip: 185.56.149.147 226 | - ip: 185.56.149.140 227 | - ip: 185.56.149.141 228 | - ip: 185.56.149.142 229 | - ip: 185.56.149.143 230 | - ip: 185.56.149.159 231 | - ip: 185.56.149.155 232 | - ip: 185.56.149.156 233 | - ip: 185.56.149.157 234 | - ip: 185.56.149.158 235 | - ip: 185.56.149.151 236 | - ip: 185.56.149.152 237 | - ip: 185.56.149.153 238 | - ip: 185.56.149.154 239 | - ip: 185.56.149.150 240 | - ip: 185.56.149.207 241 | - ip: 185.56.149.208 242 | - ip: 185.56.149.209 243 | - ip: 185.56.149.203 244 | - ip: 185.56.149.204 245 | - ip: 185.56.149.205 246 | - ip: 185.56.149.206 247 | - ip: 185.56.149.166 248 | - ip: 185.56.149.167 249 | - ip: 185.56.149.200 250 | - ip: 185.56.149.168 251 | - ip: 185.56.149.201 252 | - ip: 185.56.149.169 253 | - ip: 185.56.149.202 254 | - ip: 185.56.149.162 255 | - ip: 185.56.149.163 256 | - ip: 185.56.149.164 257 | - ip: 185.56.149.165 258 | - ip: 185.56.149.160 259 | - ip: 185.56.149.161 260 | ports: 261 | - name: ne-metrics 262 | port: 9100 263 | - name: bbb-metrics 264 | port: 9688 265 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/certificate.yaml: -------------------------------------------------------------------------------- 1 | # we cannot patch custom resources with patchesStrategicMerge -> hence, we define the certificate here 2 | apiVersion: cert-manager.io/v1alpha2 3 | kind: Certificate 4 | metadata: 5 | name: jitsi-messenger-schule 6 | namespace: jitsi 7 | # allows the copying of the resulting secret to namespace `monitoring` 8 | annotations: 9 | reflector.v1.k8s.emberstack.com/secret-reflection-allowed: "true" 10 | reflector.v1.k8s.emberstack.com/secret-reflection-allowed-namespaces: "monitoring" 11 | spec: 12 | # secret names are always required 13 | secretName: jitsi-messenger-schule-tls 14 | duration: 2160h # 90d 15 | renewBefore: 360h # 15d 16 | organization: 17 | - hpi 18 | isCA: false 19 | keySize: 2048 20 | keyAlgorithm: rsa 21 | keyEncoding: pkcs1 22 | usages: 23 | - server auth 24 | - client auth 25 | # at least one of DNS Name, URI, or IP address is required 26 | dnsNames: 27 | # fill in dnsName here 28 | - jitsi.messenger.schule 29 | # issuer references are always required 30 | issuerRef: 31 | name: letsencrypt 32 | # ClusterIssuers can be referenced by changing the kind here 33 | # default value is Issuer (i.e. a locally namespaced Issuer) 34 | kind: ClusterIssuer 35 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/elasticsearch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: elasticsearch.k8s.elastic.co/v1 2 | kind: Elasticsearch 3 | metadata: 4 | name: elasticsearch 5 | namespace: logging 6 | spec: 7 | version: 7.7.0 8 | auth: 9 | fileRealm: 10 | - secretName: es-filerealm-secret 11 | podDisruptionBudget: {} 12 | nodeSets: 13 | - name: master 14 | count: 3 15 | config: 16 | node.master: true 17 | node.data: false 18 | node.ingest: false 19 | podTemplate: 20 | spec: 21 | initContainers: 22 | - name: sysctl 23 | securityContext: 24 | privileged: true 25 | command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144'] 26 | - name: data-ingest 27 | count: 2 28 | config: 29 | node.master: false 30 | node.data: true 31 | node.ingest: true 32 | podTemplate: 33 | spec: 34 | initContainers: 35 | # see https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-virtual-memory.html#k8s-virtual-memory 36 | - name: sysctl 37 | securityContext: 38 | privileged: true 39 | command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144'] 40 | volumeClaimTemplates: 41 | - metadata: 42 | name: elasticsearch-data 43 | spec: 44 | accessModes: 45 | - ReadWriteOnce 46 | resources: 47 | requests: 48 | storage: 5Gi 49 | storageClassName: ionos-enterprise-hdd 50 | - name: client 51 | count: 2 52 | config: 53 | node.master: false 54 | node.data: false 55 | node.ingest: false 56 | podTemplate: 57 | spec: 58 | initContainers: 59 | - name: sysctl 60 | securityContext: 61 | privileged: true 62 | command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144'] 63 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/grafana-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grafana 6 | name: grafana 7 | namespace: monitoring 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: grafana 13 | env: 14 | - name: GF_SERVER_DOMAIN 15 | value: "jitsi.messenger.schule" 16 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/grafana-ingress-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: grafana-ingress 5 | namespace: monitoring 6 | annotations: 7 | nginx.ingress.kubernetes.io/use-regex: "true" 8 | spec: 9 | tls: 10 | - hosts: 11 | - jitsi.messenger.schule 12 | secretName: jitsi-messenger-schule-tls 13 | rules: 14 | - host: jitsi.messenger.schule 15 | http: 16 | paths: 17 | # only match /grafana and paths under /grafana/ 18 | - path: /grafana(/|$)(.*) 19 | backend: 20 | serviceName: grafana 21 | servicePort: http -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/grafana-tls-secret.yaml: -------------------------------------------------------------------------------- 1 | # filled by the kubernetes-reflector using the secret in namespace jitsi (which holds the certificate of the domain) 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: jitsi-messenger-schule-tls 6 | namespace: monitoring 7 | annotations: 8 | reflector.v1.k8s.emberstack.com/reflects: "jitsi/jitsi-messenger-schule-tls" 9 | data: 10 | {} -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/haproxy-ingress-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: haproxy-ingress 5 | namespace: jitsi 6 | spec: 7 | tls: 8 | - hosts: 9 | - jitsi.messenger.schule 10 | secretName: jitsi-messenger-schule-tls 11 | rules: 12 | - host: jitsi.messenger.schule 13 | http: 14 | paths: 15 | - path: / 16 | backend: 17 | serviceName: haproxy 18 | servicePort: 80 19 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../../base/ops 3 | 4 | resources: 5 | - bbb-endpoints.yaml 6 | - turn-endpoints.yaml 7 | - certificate.yaml 8 | - elasticsearch.yaml 9 | - grafana-tls-secret.yaml 10 | - bbb-basic-auth-secret.yaml 11 | 12 | patchesStrategicMerge: 13 | - grafana-deployment-patch.yaml 14 | - grafana-ingress-patch.yaml 15 | - prometheus-prometheus-patch.yaml 16 | - haproxy-ingress-patch.yaml 17 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/prometheus-prometheus-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | name: k8s 5 | namespace: monitoring 6 | spec: 7 | retention: 7d 8 | resources: 9 | requests: 10 | memory: 5500Mi 11 | cpu: 2000m 12 | limits: 13 | memory: 5500Mi 14 | cpu: 2000m 15 | storage: 16 | volumeClaimTemplate: 17 | metadata: 18 | name: prometheus 19 | spec: 20 | accessModes: [ "ReadWriteOnce" ] 21 | storageClassName: ionos-enterprise-hdd 22 | resources: 23 | requests: 24 | storage: 100Gi 25 | -------------------------------------------------------------------------------- /overlays/production-monitoring/ops/turn-endpoints.yaml: -------------------------------------------------------------------------------- 1 | # defines the IPs of TURN servers that should be scraped by Prometheus 2 | kind: Endpoints 3 | apiVersion: v1 4 | metadata: 5 | name: turn 6 | namespace: monitoring 7 | labels: 8 | k8s-app: turn-metrics 9 | subsets: 10 | - addresses: 11 | # sc-prod-bbb-turn-1 12 | - ip: 85.215.238.104 13 | - ip: 85.215.238.127 14 | - ip: 85.215.238.117 15 | - ip: 85.215.238.128 16 | - ip: 85.215.238.122 17 | - ip: 85.215.238.199 18 | - ip: 85.215.238.101 19 | - ip: 85.215.238.200 20 | - ip: 85.215.238.102 21 | - ip: 85.215.238.179 22 | - ip: 85.215.238.228 23 | - ip: 85.215.239.32 24 | - ip: 85.215.238.98 25 | - ip: 85.215.238.119 26 | - ip: 85.215.238.207 27 | - ip: 85.215.238.99 28 | - ip: 85.215.239.35 29 | - ip: 85.215.238.173 30 | - ip: 85.215.238.120 31 | - ip: 85.215.238.131 32 | # sc-prod-jitsi-turn-1 33 | - ip: 81.173.114.130 34 | - ip: 81.173.112.62 35 | - ip: 81.173.112.50 36 | - ip: 81.173.112.244 37 | ports: 38 | - name: metrics 39 | port: 9100 40 | -------------------------------------------------------------------------------- /overlays/production/jitsi-base/jvb-hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | minReplicas: 2 8 | maxReplicas: 6 9 | metrics: 10 | - type: Pods 11 | pods: 12 | metric: 13 | name: container_network_transmit_bytes_per_second 14 | target: 15 | type: AverageValue 16 | averageValue: 100000 17 | -------------------------------------------------------------------------------- /overlays/production/jitsi-base/jvb-statefulset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | name: jvb 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: prometheus-exporter 11 | resources: 12 | requests: 13 | cpu: "100m" 14 | memory: "100Mi" 15 | limits: 16 | cpu: "100m" 17 | memory: "100Mi" 18 | - name: jvb 19 | resources: 20 | requests: 21 | cpu: "8000m" 22 | memory: "1000Mi" 23 | limits: 24 | cpu: "8000m" 25 | memory: "1000Mi" 26 | -------------------------------------------------------------------------------- /overlays/production/jitsi-base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../../base/jitsi-shard 3 | 4 | patchesStrategicMerge: 5 | - jvb-statefulset-patch.yaml 6 | - jvb-hpa-patch.yaml -------------------------------------------------------------------------------- /overlays/production/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../../base/jitsi 3 | 4 | resources: 5 | - shard-0/ 6 | - shard-1/ 7 | -------------------------------------------------------------------------------- /overlays/production/shard-0/jicofo-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: jicofo 7 | name: jicofo 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_1 14 | containers: 15 | - name: jicofo 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-0-prosody 19 | -------------------------------------------------------------------------------- /overlays/production/shard-0/jvb-hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | scaleTargetRef: 8 | name: shard-0-jvb # needs to be manually set 9 | 10 | -------------------------------------------------------------------------------- /overlays/production/shard-0/jvb-statefulset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | name: jvb 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: jvb 10 | shard: "0" 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: jvb 15 | shard: "0" 16 | spec: 17 | # avoid that pods of different shards share zone 18 | nodeSelector: 19 | topology.kubernetes.io/zone: ZONE_1 20 | containers: 21 | - name: jvb 22 | env: 23 | - name: XMPP_SERVER 24 | value: shard-0-prosody 25 | args: 26 | - "30300" 27 | - "/init" 28 | -------------------------------------------------------------------------------- /overlays/production/shard-0/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../jitsi-base 3 | 4 | patchesStrategicMerge: 5 | - jvb-hpa-patch.yaml 6 | - jvb-statefulset-patch.yaml 7 | - jicofo-deployment-patch.yaml 8 | - web-deployment-patch.yaml 9 | - prosody-deployment-patch.yaml 10 | 11 | namespace: jitsi 12 | 13 | namePrefix: shard-0- 14 | 15 | commonLabels: 16 | shard: "0" 17 | -------------------------------------------------------------------------------- /overlays/production/shard-0/prosody-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: prosody 7 | name: prosody 8 | spec: 9 | selector: 10 | matchLabels: 11 | shard: "0" 12 | template: 13 | spec: 14 | # avoid that pods of different shards share zone 15 | nodeSelector: 16 | topology.kubernetes.io/zone: ZONE_1 17 | metadata: 18 | labels: 19 | shard: "0" 20 | -------------------------------------------------------------------------------- /overlays/production/shard-0/web-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: web 7 | name: web 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_1 14 | containers: 15 | - name: web 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-0-prosody 19 | - name: XMPP_BOSH_URL_BASE 20 | value: http://shard-0-prosody:5280 21 | -------------------------------------------------------------------------------- /overlays/production/shard-1/jicofo-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: jicofo 7 | name: jicofo 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_2 14 | containers: 15 | - name: jicofo 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-1-prosody 19 | -------------------------------------------------------------------------------- /overlays/production/shard-1/jvb-hpa-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | namespace: jitsi 5 | name: jvb-hpa 6 | spec: 7 | scaleTargetRef: 8 | name: shard-1-jvb # needs to be manually set 9 | 10 | -------------------------------------------------------------------------------- /overlays/production/shard-1/jvb-statefulset-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: jitsi 5 | name: jvb 6 | spec: 7 | selector: 8 | matchLabels: 9 | k8s-app: jvb 10 | shard: "1" 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: jvb 15 | shard: "1" 16 | spec: 17 | # avoid that pods of different shards share zone 18 | nodeSelector: 19 | topology.kubernetes.io/zone: ZONE_2 20 | containers: 21 | - name: jvb 22 | env: 23 | - name: XMPP_SERVER 24 | value: shard-1-prosody 25 | args: 26 | - "30400" 27 | - "/init" 28 | -------------------------------------------------------------------------------- /overlays/production/shard-1/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../jitsi-base 3 | 4 | patchesStrategicMerge: 5 | - jvb-hpa-patch.yaml 6 | - jvb-statefulset-patch.yaml 7 | - jicofo-deployment-patch.yaml 8 | - web-deployment-patch.yaml 9 | - prosody-deployment-patch.yaml 10 | 11 | namespace: jitsi 12 | 13 | namePrefix: shard-1- 14 | 15 | commonLabels: 16 | shard: "1" 17 | -------------------------------------------------------------------------------- /overlays/production/shard-1/prosody-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: prosody 7 | name: prosody 8 | spec: 9 | selector: 10 | matchLabels: 11 | shard: "1" 12 | template: 13 | spec: 14 | # avoid that pods of different shards share zone 15 | nodeSelector: 16 | topology.kubernetes.io/zone: ZONE_2 17 | metadata: 18 | labels: 19 | shard: "1" 20 | -------------------------------------------------------------------------------- /overlays/production/shard-1/web-deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: jitsi 5 | labels: 6 | k8s-app: web 7 | name: web 8 | spec: 9 | template: 10 | spec: 11 | # avoid that pods of different shards share zone 12 | nodeSelector: 13 | topology.kubernetes.io/zone: ZONE_2 14 | containers: 15 | - name: web 16 | env: 17 | - name: XMPP_SERVER 18 | value: shard-1-prosody 19 | - name: XMPP_BOSH_URL_BASE 20 | value: http://shard-1-prosody:5280 21 | -------------------------------------------------------------------------------- /secrets.sh: -------------------------------------------------------------------------------- 1 | secretsfile="$1" 2 | instance="$2" 3 | 4 | get-secret () { 5 | printf '%q' $(grep -e "$1" "$secretsfile" | cut -d "=" -f2) 6 | } 7 | 8 | encrypt-secret () { 9 | get-secret "$1" | base64 10 | } 11 | 12 | sed -i 's/JICOFO_COMPONENT_SECRET: .*/JICOFO_COMPONENT_SECRET: '$(encrypt-secret "JICOFO_COMPONENT_SECRET")'/g' base/jitsi/jitsi-secret.yaml 13 | sed -i 's/JICOFO_AUTH_PASSWORD: .*/JICOFO_AUTH_PASSWORD: '$(encrypt-secret "JICOFO_AUTH_PASSWORD")'/g' base/jitsi/jitsi-secret.yaml 14 | sed -i 's/JVB_AUTH_PASSWORD: .*/JVB_AUTH_PASSWORD: '$(encrypt-secret "JVB_AUTH_PASSWORD")'/g' base/jitsi/jitsi-secret.yaml 15 | sed -i 's/JVB_STUN_SERVERS: .*/JVB_STUN_SERVERS: '$(encrypt-secret "JVB_STUN_SERVERS")'/g' base/jitsi/jitsi-secret.yaml 16 | sed -i 's/TURNCREDENTIALS_SECRET: .*/TURNCREDENTIALS_SECRET: '$(encrypt-secret "TURNCREDENTIALS_SECRET")'/g' base/jitsi/jitsi-secret.yaml 17 | sed -i 's/TURN_HOST: .*/TURN_HOST: '$(encrypt-secret "TURN_HOST")'/g' base/jitsi/jitsi-secret.yaml 18 | sed -i 's/STUN_PORT: .*/STUN_PORT: '$(encrypt-secret "STUN_PORT")'/g' base/jitsi/jitsi-secret.yaml 19 | sed -i 's/TURN_PORT: .*/TURN_PORT: '$(encrypt-secret "TURN_PORT")'/g' base/jitsi/jitsi-secret.yaml 20 | sed -i 's/TURNS_PORT: .*/TURNS_PORT: '$(encrypt-secret "TURNS_PORT")'/g' base/jitsi/jitsi-secret.yaml 21 | 22 | sed -i 's/stunServers: \[.*/stunServers: \['$(get-secret "config.js")'/g' base/jitsi/web-configmap.yaml 23 | 24 | sed -i 's/email: .*/email: '$(get-secret "spec.acme.email")'/g' base/ops/cert-manager/cluster-issuer.yaml 25 | 26 | sed -i 's/users: .*/users: '$(encrypt-secret "users")'/g' base/ops/logging/es-realm-secret.yaml 27 | sed -i 's/users_roles: .*/users_roles: '$(encrypt-secret "users_roles")'/g' base/ops/logging/es-realm-secret.yaml 28 | 29 | sed -i 's/username: .*/username: '$(encrypt-secret "username")'/g' overlays/${instance}-monitoring/ops/bbb-basic-auth-secret.yaml 30 | sed -i 's/password: .*/password: '$(encrypt-secret "password")'/g' overlays/${instance}-monitoring/ops/bbb-basic-auth-secret.yaml 31 | --------------------------------------------------------------------------------