├── envs ├── k8s-dashboard-values.yaml ├── ss_values.yaml ├── weblate.yaml ├── cert-man_values.yaml ├── longhorn-values.yaml ├── coturn.yaml ├── onedev-mysql.yaml ├── tigase-server-mysql.yaml ├── killbill-mysql.yaml ├── onedev.yaml ├── killbill.yaml ├── loki-values.yaml ├── tigase-server.yaml ├── prometheus-values.yaml ├── cluster.env ├── nginx_values.yaml ├── mailu-values.yaml ├── mailu.env └── versions.env ├── .github └── FUNDING.yml ├── scripts ├── cluster-loki-stack.sh ├── flux-create-source.sh ├── cluster-tigase-helm-charts.sh ├── cluster-common-sources.sh ├── aws-update-zone.sh ├── cluster-ingress-nginx.sh ├── cluster-sealed-secrets.sh ├── k8s-basic-secret.sh ├── cluster-bootstrap.sh ├── cluster-kube-prometheus-stack.sh ├── cluster-rancher.sh ├── create-longhorn-pvc.sh ├── cluster-kubernetes-dashboard.sh ├── cluster-coturn.sh ├── cluster-script-preprocess.sh ├── scripts-env-init.sh ├── flux-create-helmrel.sh ├── cluster-cert-manager.sh ├── flux-bootstrap.sh ├── cluster-onedev.sh ├── cluster-longhorn.sh ├── cluster-tools.sh ├── cluster-weblate.sh ├── cluster-killbill.sh ├── cluster-tigase-server.sh └── cluster-mailu.sh ├── README.md └── LICENSE /envs/k8s-dashboard-values.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /envs/ss_values.yaml: -------------------------------------------------------------------------------- 1 | ingress: 2 | enabled: false 3 | 4 | -------------------------------------------------------------------------------- /envs/weblate.yaml: -------------------------------------------------------------------------------- 1 | adminUser: "admin" 2 | adminPassword: "" 3 | 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [tigase] 4 | -------------------------------------------------------------------------------- /envs/cert-man_values.yaml: -------------------------------------------------------------------------------- 1 | installCRDs: true 2 | prometheus: 3 | # set to true if you want to enable prometheus monitoring for cert-manager 4 | enabled: false 5 | 6 | -------------------------------------------------------------------------------- /envs/longhorn-values.yaml: -------------------------------------------------------------------------------- 1 | defaultSettings: 2 | defaultReplicaCount: 3 3 | defaultDataLocality: "best-effort" 4 | backupTarget: "" 5 | backupTargetCredentialSecret: "aws-s3-backup" 6 | 7 | -------------------------------------------------------------------------------- /envs/coturn.yaml: -------------------------------------------------------------------------------- 1 | vhost: "${COTURN_DOMAIN}" 2 | 3 | secret: 4 | username: "${COTURN_USERNAME}" 5 | password: "${COTURN_PASSWORD}" 6 | 7 | hostNetwork: true 8 | 9 | service: 10 | enabled: false 11 | 12 | ports: 13 | from: "${COTURN_LPORT}" 14 | to: "${COTURN_UPORT}" -------------------------------------------------------------------------------- /envs/onedev-mysql.yaml: -------------------------------------------------------------------------------- 1 | persistentVolume: 2 | enabled: true 3 | 4 | backup: 5 | s3: 6 | enabled: ${ONEDEV_MYSQL_S3_BACKUP} 7 | schedule: "${ONEDEV_MYSQL_S3_BACKUP_SCHEDULE}" 8 | endpoint: "${ONEDEV_MYSQL_S3_BACKUP_ENDPOINT}" 9 | bucket: "${ONEDEV_MYSQL_S3_BACKUP_BUCKET}" 10 | prefix: "${ONEDEV_MYSQL_S3_BACKUP_PREFIX}" 11 | existingSecret: "mysql-backup-s3" 12 | expireIn: "${ONEDEV_MYSQL_S3_BACKUP_EXPIRE_IN}" 13 | 14 | resources: 15 | limits: 16 | memory: 512Mi 17 | -------------------------------------------------------------------------------- /envs/tigase-server-mysql.yaml: -------------------------------------------------------------------------------- 1 | persistentVolume: 2 | enabled: true 3 | 4 | backup: 5 | s3: 6 | enabled: ${TIGASE_MYSQL_S3_BACKUP} 7 | schedule: "${TIGASE_MYSQL_S3_BACKUP_SCHEDULE}" 8 | endpoint: "${TIGASE_MYSQL_S3_BACKUP_ENDPOINT}" 9 | bucket: "${TIGASE_MYSQL_S3_BACKUP_BUCKET}" 10 | prefix: "${TIGASE_MYSQL_S3_BACKUP_PREFIX}" 11 | existingSecret: "mysql-backup-s3" 12 | expireIn: "${TIGASE_MYSQL_S3_BACKUP_EXPIRE_IN}" 13 | 14 | resources: 15 | limits: 16 | memory: 512Mi 17 | -------------------------------------------------------------------------------- /envs/killbill-mysql.yaml: -------------------------------------------------------------------------------- 1 | persistentVolume: 2 | enabled: true 3 | 4 | backup: 5 | s3: 6 | enabled: ${KILLBILL_MYSQL_S3_BACKUP} 7 | schedule: "${KILLBILL_MYSQL_S3_BACKUP_SCHEDULE}" 8 | endpoint: "${KILLBILL_MYSQL_S3_BACKUP_ENDPOINT}" 9 | bucket: "${KILLBILL_MYSQL_S3_BACKUP_BUCKET}" 10 | prefix: "${KILLBILL_MYSQL_S3_BACKUP_PREFIX}" 11 | existingSecret: "mysql-backup-s3" 12 | expireIn: "${KILLBILL_MYSQL_S3_BACKUP_EXPIRE_IN}" 13 | 14 | resources: 15 | limits: 16 | memory: 512Mi 17 | -------------------------------------------------------------------------------- /envs/onedev.yaml: -------------------------------------------------------------------------------- 1 | certmanager: 2 | enabled: false 3 | 4 | onedev: 5 | service: 6 | type: ClusterIP 7 | persistentVolume: 8 | enabled: true 9 | 10 | ingress: 11 | annotations: 12 | kubernetes.io/ingress.class: nginx 13 | enabled: true 14 | certificateIssuer: ${SSL_ISSUER} 15 | tls: 16 | hosts: 17 | - ${ONEDEV_DOMAIN} 18 | 19 | hosts: 20 | - host: ${ONEDEV_DOMAIN} 21 | paths: 22 | - path: / 23 | pathType: ImplementationSpecific -------------------------------------------------------------------------------- /envs/killbill.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | database: 4 | host: '${KILLBILL_DATABASE_HOST}' 5 | existingSecret: 'mysql-credentials' 6 | 7 | ingress: 8 | enabled: true 9 | hosts: 10 | - host: '${KILLBILL_DOMAIN}' 11 | paths: 12 | - path: / 13 | pathType: Prefix 14 | serviceType: 'killbill' 15 | - host: '${KAUI_DOMAIN}' 16 | paths: 17 | - path: / 18 | pathType: Prefix 19 | serviceType: 'kaui' 20 | certificateIssuer: ${SSL_ISSUER} 21 | tls: 22 | hosts: 23 | - '${KILLBILL_DOMAIN}' 24 | - '${KAUI_DOMAIN}' 25 | 26 | resources: 27 | limits: 28 | memory: 1Gi -------------------------------------------------------------------------------- /envs/loki-values.yaml: -------------------------------------------------------------------------------- 1 | promtail: 2 | enabled: true 3 | serviceMonitor: 4 | enabled: true 5 | additionalLabels: 6 | release: prometheus 7 | pipelineStages: 8 | - docker: {} 9 | - drop: 10 | source: namespace 11 | expression: "kube-.*" 12 | prometheus: 13 | enabled: false 14 | fluent-bit: 15 | enabled: false 16 | grafana: 17 | enabled: false 18 | loki: 19 | enabled: true 20 | # Configure for 28 day retention on persistent volume 21 | persistence: 22 | enabled: true 23 | size: 10Gi 24 | config: 25 | chunk_store_config: 26 | max_look_back_period: 672h 27 | table_manager: 28 | retention_deletes_enabled: true 29 | retention_period: 672h 30 | 31 | -------------------------------------------------------------------------------- /scripts/cluster-loki-stack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script deploys loki for collecting logs from k8s services 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | NAME="${LO_NAME}" 9 | TNS="${LO_TARGET_NAMESPACE}" 10 | 11 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | CL_DIR=`mkdir_ns ${BASE_DIR} ${TNS} ${FLUX_NS}` 14 | 15 | echo " ${BOLD}Deploying ${NAME}${NORMAL}" 16 | ${SCRIPTS}/flux-create-helmrel.sh \ 17 | "${LO_NAME}" \ 18 | "${LO_VER}" \ 19 | "${LO_RNAME}" \ 20 | "${LO_TARGET_NAMESPACE}" \ 21 | "${LO_NAMESPACE}" \ 22 | "${LO_SOURCE}" \ 23 | "${LO_VALUES}" --create-target-namespace || exit 1 24 | 25 | update_chart_ns "${CL_DIR}/${NAME}/${NAME}.yaml" 26 | 27 | update_repo "${NAME}" 28 | 29 | -------------------------------------------------------------------------------- /scripts/flux-create-source.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script adds a new helm source to the FluxCD repository 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | INTERVAL="${DEF_INTERVAL}" 9 | NAME="" 10 | URL="" 11 | DIR="${BASE_DIR}/sources" 12 | 13 | [[ -z "$1" ]] && { echo "${ERROR}Missing name argument${NORMAL}"; } || { NAME="$1"; } 14 | 15 | [[ -z "$1" || "$1" == "-h" ]] && { 16 | echo "\$1 - name" 17 | echo "\$2 - url" 18 | exit 0 19 | } 20 | 21 | [[ -z "$2" ]] && { echo "${ERROR}Missing url argument${NORMAL}"; exit 1; } || { 22 | URL="$2" 23 | } 24 | 25 | FILE="${DIR}/"${NAME}".yaml" 26 | 27 | [[ -f "${FILE}" ]] && exit 1 28 | 29 | flux create source helm ${NAME} \ 30 | --url=${URL} \ 31 | --interval=${INTERVAL} \ 32 | --export > "${FILE}" 33 | 34 | cd ${DIR} 35 | rm -f kustomization.yaml 36 | kustomize create --namespace="flux-system" --autodetect --recursive 37 | cd - 38 | 39 | -------------------------------------------------------------------------------- /scripts/cluster-tigase-helm-charts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script adds Tigase Helm charts if not present. 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | INTERVAL="${DEF_INTERVAL}" 9 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 10 | 11 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | 14 | if [ ! -f "${BASE_DIR}/sources/tigase-git.yaml" ]; then 15 | 16 | echo " ${BOLD}Adding tigase helm chart${NORMAL}" 17 | 18 | cat > "${BASE_DIR}/sources/tigase-git.yaml" << EOF 19 | apiVersion: source.toolkit.fluxcd.io/v1beta1 20 | kind: GitRepository 21 | metadata: 22 | name: tigase 23 | namespace: flux-system 24 | spec: 25 | interval: ${INTERVAL} 26 | url: https://github.com/tigase/helm-charts 27 | ref: 28 | branch: master 29 | EOF 30 | 31 | update_kustomization ${BASE_DIR}/sources 32 | 33 | git add -A 34 | git commit -am "Added tigase source" 35 | 36 | fi 37 | -------------------------------------------------------------------------------- /scripts/cluster-common-sources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Scripts adds helm sources to the git repo for fluxcd for all common 4 | # services being installed on the cluster 5 | # 6 | 7 | source `dirname "$0"`/scripts-env-init.sh 8 | 9 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 10 | 11 | SOURCES=( 12 | "${SS_S_NAME}:${SS_URL}" 13 | "${IN_S_NAME}:${IN_URL}" 14 | "${LH_S_NAME}:${LH_URL}" 15 | "${CM_S_NAME}:${CM_URL}" 16 | "${PM_S_NAME}:${PM_URL}" 17 | "${LO_S_NAME}:${LO_URL}" 18 | "${VE_S_NAME}:${VE_URL}" 19 | "${DA_S_NAME}:${DA_URL}" 20 | ) 21 | 22 | add_source() { 23 | name=${1%%:*} 24 | url=${1#*:} 25 | echo " ${BOLD}Adding ${name} source at ${url}${NORMAL}" 26 | ${SCRIPTS}/flux-create-source.sh ${name} ${url} 27 | git add -A 28 | git commit -am "Added ${name} source" 29 | } 30 | 31 | 32 | echo -e "\n\n ${BOLD}Adding common sources${NORMAL}" 33 | 34 | for src in "${SOURCES[@]}" ; do 35 | add_source ${src} 36 | done 37 | 38 | git push 39 | flux reconcile source git flux-system 40 | 41 | -------------------------------------------------------------------------------- /scripts/aws-update-zone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script updates records of a DNS ZONE on AWS Route53 4 | # 5 | # $1 - mandatory domain zone ID on AWS Route53 6 | # $2 - mandatory hostname, like mail.example.com 7 | # $3 - mandatory server IP address. 8 | # $4 - optional AWS profile 9 | 10 | 11 | ZONE_ID="${1}" 12 | HOSTNAME="${2}" 13 | IP="${3}" 14 | PROFILE="${4}" 15 | 16 | cat < ~/tmp-zone.json 17 | { 18 | "HostedZoneId": "${ZONE_ID}", 19 | "ChangeBatch": { 20 | "Comment": "", 21 | "Changes": [ 22 | { 23 | "Action": "UPSERT", 24 | "ResourceRecordSet": { 25 | "Name": "${HOSTNAME}", 26 | "Type": "A", 27 | "TTL": 300, 28 | "ResourceRecords": [ 29 | { 30 | "Value": "${IP}" 31 | } 32 | ] 33 | } 34 | } 35 | ] 36 | } 37 | } 38 | EOF 39 | 40 | [[ -n "${PROFILE}" ]] && export AWS_PROFILE=${PROFILE} 41 | 42 | aws route53 change-resource-record-sets --cli-input-json file://~/tmp-zone.json 43 | 44 | -------------------------------------------------------------------------------- /scripts/cluster-ingress-nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script installs nginx as ingress service 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "No cluster repo dir!"; exit 1; } 9 | 10 | name="${IN_S_NAME}" 11 | url="${IN_URL}" 12 | 13 | echo " ${BOLD}Adding ${name} source at ${url}${NORMAL}" 14 | ${SCRIPTS}/flux-create-source.sh ${name} ${url} 15 | update_repo "${IN_NAME}" 16 | wait_for_ready 5 17 | 18 | NAME="${IN_NAME}" 19 | TNS="${IN_TARGET_NAMESPACE}" 20 | 21 | CL_DIR=`mkdir_ns ${BASE_DIR} ${TNS} ${FLUX_NS}` 22 | 23 | echo " ${BOLD}Deploying ${NAME}${NORMAL}" 24 | ${SCRIPTS}/flux-create-helmrel.sh \ 25 | "${IN_NAME}" \ 26 | "${IN_VER}" \ 27 | "${IN_RNAME}" \ 28 | "${IN_TARGET_NAMESPACE}" \ 29 | "${IN_NAMESPACE}" \ 30 | "${IN_SOURCE}" \ 31 | "${IN_VALUES}" --create-target-namespace || exit 1 32 | 33 | update_chart_ns "${CL_DIR}/${NAME}/${NAME}.yaml" 34 | yq e -i '.spec.install.remediation.retries = 3' "${CL_DIR}/${NAME}/${NAME}.yaml" 35 | yq e -i '.spec.upgrade.remediation.retries = 3' "${CL_DIR}/${NAME}/${NAME}.yaml" 36 | 37 | update_repo "${NAME}" 38 | 39 | wait_for_ready 40 | 41 | -------------------------------------------------------------------------------- /envs/tigase-server.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | vhost: "${TIGASE_DOMAIN}" 4 | 5 | admins: 6 | - 'admin@${TIGASE_DOMAIN}' 7 | - 'wojtek@tigase.org' 8 | - 'bmalkow@malkowscy.net' 9 | - 'andrzej.wojcik@tigase.org' 10 | - 'wojciech.kapcia@tigase.org' 11 | - 'cron_runner@${TIGASE_DOMAIN}' 12 | 13 | users: 14 | create: false 15 | 16 | fileUpload: 17 | enabled: true 18 | domain: "upload.${TIGASE_DOMAIN}" 19 | storage: 's3' 20 | s3storage: 21 | endpointUrl: "${TIGASE_S3_UPLOAD_ENDPOINT}" 22 | bucket: 'tigase-org-xmpp-upload' 23 | accessKeyId: "${TIGASE_S3_UPLOAD_ACCESS_KEY}" 24 | secret: "tigase-s3-upload" 25 | pathStyleAccess: "${TIGASE_S3_UPLOAD_PATH_STYLE}" 26 | 27 | service: 28 | type: ClusterIP 29 | 30 | ingress: 31 | enabled: true 32 | sslCertificateIssuer: "letsencrypt" 33 | tls: 34 | hosts: 35 | - "upload.${TIGASE_DOMAIN}" 36 | hosts: 37 | - host: "upload.${TIGASE_DOMAIN}" 38 | paths: 39 | - path: / 40 | pathType: Prefix 41 | 42 | resources: 43 | limits: 44 | memory: 1Gi -------------------------------------------------------------------------------- /scripts/cluster-sealed-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script deploys sealed secrets on the k8s cluster and adds public key 4 | # to the git repository so later, secrets can be encrypted. 5 | # 6 | 7 | source `dirname "$0"`/scripts-env-init.sh 8 | 9 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "No cluster repo dir!"; exit 1; } 10 | 11 | name="${SS_S_NAME}" 12 | url="${SS_URL}" 13 | 14 | echo " ${BOLD}Adding ${name} source at ${url}${NORMAL}" 15 | ${SCRIPTS}/flux-create-source.sh ${name} ${url} 16 | update_repo "${SS_NAME}" 17 | wait_for_ready 5 18 | 19 | echo " ${BOLD}Deploying sealed secrets${NORMAL}" 20 | ${SCRIPTS}/flux-create-helmrel.sh \ 21 | "${SS_NAME}" \ 22 | "${SS_VER}" \ 23 | "${SS_RNAME}" \ 24 | "${SS_TARGET_NAMESPACE}" \ 25 | "${SS_NAMESPACE}" \ 26 | "${SS_SOURCE}" \ 27 | "${SS_VALUES}" --crds=CreateReplace || exit 1 28 | 29 | update_repo "${SS_NAME}" 30 | 31 | wait_for_ready 32 | 33 | kubectl port-forward service/sealed-secrets-controller 8080:8080 -n flux-system & 34 | sleep 10 35 | curl --retry 5 --retry-connrefused localhost:8080/v1/cert.pem > pub-sealed-secrets-${CLUSTER_NAME}.pem 36 | killall kubectl 37 | 38 | update_repo "public-key" 39 | 40 | -------------------------------------------------------------------------------- /scripts/k8s-basic-secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ###### 4 | # If you forgot or lost login credentials for basic-auth for an HTTP(S) service 5 | # this utility script can reinstall the secret with new credentials 6 | # 7 | 8 | source `dirname "$0"`/scripts-env-init.sh 9 | 10 | [[ -z "$1" ]] && { 11 | echo "Provide a namespace as the first parameter" 12 | exit 1 13 | } 14 | ns=$1 15 | 16 | u_name="" 17 | u_pass="" 18 | 19 | [[ "$1" == "-q" ]] || { 20 | echo -n "Provide ${ns} user name or ENTER to generate: "; read u_name 21 | echo -n "Provide ${ns} user password or ENTER to generate: "; read u_pass 22 | } 23 | [[ -z "${u_name}" ]] && u_name=`gen_token 8` 24 | USER_NAME=${u_name} 25 | [[ -z "${u_pass}" ]] && u_pass=`gen_token 24` 26 | USER_PASS=${u_pass} 27 | 28 | update_k8s_secrets "${ns}-user" ${USER_NAME} 29 | update_k8s_secrets "${ns}-pass" ${USER_PASS} 30 | 31 | AUTH_FILE="$TMP_DIR/auth" 32 | rm -f $AUTH_FILE 33 | 34 | echo "${USER_NAME}:$(openssl passwd -stdin -apr1 <<< ${USER_PASS})" >> $AUTH_FILE 35 | 36 | cat $AUTH_FILE 37 | 38 | if [ "$2" == "--reinstall" ]; then 39 | kubectl delete secret basic-auth -n ${ns} 40 | fi 41 | 42 | kubectl -n ${ns} create secret generic basic-auth --from-file=$AUTH_FILE 43 | 44 | -------------------------------------------------------------------------------- /envs/prometheus-values.yaml: -------------------------------------------------------------------------------- 1 | alertmanager: 2 | enabled: true 3 | defaultRules: 4 | create: true 5 | rules: 6 | etcd: false 7 | kubeScheduler: false 8 | grafana: 9 | enabled: true 10 | kubeEtcd: 11 | enabled: false 12 | kubeScheduler: 13 | enabled: false 14 | prometheus: 15 | enabled: true 16 | additionalServiceMonitors: 17 | - name: "loki-monitor" 18 | selector: 19 | matchLabels: 20 | app: loki 21 | release: loki 22 | namespaceSelector: 23 | matchNames: 24 | - monitoring 25 | endpoints: 26 | - port: "http-metrics" 27 | - name: "promtail-monitor" 28 | selector: 29 | matchLabels: 30 | app: promtail 31 | release: loki 32 | namespaceSelector: 33 | matchNames: 34 | - monitoring 35 | endpoints: 36 | - port: "http-metrics" 37 | prometheusSpec: 38 | storageSpec: 39 | volumeClaimTemplate: 40 | spec: 41 | accessModes: 42 | - ReadWriteOnce 43 | resources: 44 | requests: 45 | storage: 5Gi 46 | storageClassName: longhorn 47 | prometheusOperator: 48 | enabled: true 49 | 50 | -------------------------------------------------------------------------------- /scripts/cluster-bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script is the main entry point to setup entire k8s cluster. 4 | # It first initializes k8s cluster with fluxcd, setups repository 5 | # and then adds all listed services one by one. 6 | # Execution of the entire script may take long time because the script 7 | # waits until flux system fully reconcilled after each service is added 8 | # 9 | 10 | source `dirname "$0"`/scripts-env-init.sh 11 | 12 | ### Versions 13 | # helm search repo .... 14 | 15 | echo "${BOLD}Bootstraping flux...${NORMAL}" 16 | ${SCRIPTS}/flux-bootstrap.sh || { 17 | echo "${ERROR}Unsuccessfull flux bootstrap, fix the problem and rerun${NORMAL}" 18 | echo " \$ ${SCRIPTS}/flux-bootstrap.sh" 19 | exit 1 20 | } 21 | 22 | wait_for_ready 23 | 24 | #BASE_TOOLS="common-sources ${SS_NAME} ${DA_NAME} ${IN_NAME} ${CM_NAME} ${LH_NAME} ${PM_NAME} ${LO_NAME} ${VE_NAME}" 25 | BASE_TOOLS="common-sources ${SS_NAME} ${DA_NAME} ${IN_NAME} ${CM_NAME} ${LH_NAME} ${PM_NAME} ${LO_NAME} ${ONEDEV_NAME} ${WEBLATE_NAME}" 26 | 27 | echo -e "\n\n ${BOLD}Deploying base tools ${BASE_TOOLS}${NORMAL}" 28 | 29 | for NAME in ${BASE_TOOLS} ; do 30 | ${SCRIPTS}/cluster-${NAME}.sh -q || { 31 | echo "${ERROR}Unsuccessful deployment for ${NAME}, correct the problem and rerun the script:${NORMAL}" 32 | echo " \$ ${SCRIPTS}/cluster-${NAME}.sh" 33 | exit 1 34 | } 35 | wait_for_ready 36 | done 37 | 38 | echo -e "\n\n ${BOLD}${GREEN}Deployment finished successfully!${NORMAL}" 39 | 40 | -------------------------------------------------------------------------------- /envs/cluster.env: -------------------------------------------------------------------------------- 1 | ### Generic 2 | 3 | PROJECTS_DIR="${CONFIG}/projects" 4 | 5 | COLORED_OUTPUT=true 6 | 7 | # Run: kubectl config get-contexts 8 | # and copy your context name here 9 | # This is to protect k8s cluster in case you manage multiple clusters 10 | K8S_CLUSTER_CONTEXT="" 11 | 12 | ### Cluster 13 | 14 | CLUSTER_NAME="YOUR_CLUSTER_NAME" 15 | 16 | ### Repository 17 | 18 | export GITHUB_USER="YOUR_GITHUB_USER" 19 | export GITHUB_TOKEN="YOUR_GITHUB_TOKEN" 20 | 21 | CLUSTER_REPO=${CLUSTER_NAME} 22 | REPO_BRANCH="master" 23 | 24 | ### Repository and folders structure for the cluster data 25 | CLUSTER_REPO_DIR="${PROJECTS_DIR}/${CLUSTER_REPO}" 26 | INFRA="infra" 27 | BASE="common" 28 | APPS="apps" 29 | BASE_DIR="${INFRA}/${BASE}" 30 | APPS_DIR="${INFRA}/${APPS}" 31 | 32 | ### Other settings 33 | DEF_INTERVAL=1h 34 | FLUX_NS="flux-system" 35 | SSL_EMAIL="EMAIL_FOR_LETSENCRYPT" 36 | SSL_STAG_ISSUER="letsencrypt-staging" 37 | SSL_PROD_ISSUER="letsencrypt" 38 | ROUTE53_ACCESS_KEY="" 39 | ROUTE53_SECRET_KEY="" 40 | 41 | ### Longhorn backup settings 42 | export LH_S3_BACKUP_ACCESS_KEY="" 43 | export LH_S3_BACKUP_SECRET_KEY="" 44 | 45 | ### OneDev backup settings 46 | export ONEDEV_MYSQL_S3_BACKUP="false" 47 | export ONEDEV_MYSQL_S3_BACKUP_ACCESS_KEY="" 48 | export ONEDEV_MYSQL_S3_BACKUP_SECRET_KEY="" 49 | export ONEDEV_MYSQL_S3_BACKUP_SCHEDULE="1 5 * * *" 50 | export ONEDEV_MYSQL_S3_BACKUP_ENDPOINT="" 51 | export ONEDEV_MYSQL_S3_BACKUP_BUCKET="" 52 | export ONEDEV_MYSQL_S3_BACKUP_PREFIX="" 53 | export ONEDEV_MYSQL_S3_BACKUP_EXPIRE_IN="7" 54 | export ONEDEV_DOMAIN="" 55 | -------------------------------------------------------------------------------- /envs/nginx_values.yaml: -------------------------------------------------------------------------------- 1 | controller: 2 | service: 3 | annotations: 4 | service.beta.kubernetes.io/oci-load-balancer-shape: flexible 5 | service.beta.kubernetes.io/oci-load-balancer-shape-flex-min: 10 6 | service.beta.kubernetes.io/oci-load-balancer-shape-flex-max: 10 7 | config: 8 | use-proxy-protocol: "false" 9 | server-tokens: "false" 10 | enable-brotli: "true" 11 | use-forwarded-headers: "true" 12 | admissionWebhooks: 13 | timeoutSeconds: 30 14 | publishService: 15 | enabled: true 16 | extraArgs: 17 | update-status-on-shutdown: "false" 18 | updateStrategy: 19 | rollingUpdate: 20 | maxUnavailable: 1 21 | type: RollingUpdate 22 | ingressClassResource: 23 | enabled: true 24 | default: true 25 | replicaCount: 2 26 | metrics: 27 | enabled: false 28 | serviceMonitor: 29 | enabled: true 30 | additionalLabels: 31 | release: prometheus 32 | prometheusRule: 33 | enabled: true 34 | additionalLabels: 35 | release: prometheus 36 | rules: 37 | - alert: Ingress-NGINXConfigFailed 38 | expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 39 | for: 1s 40 | labels: 41 | severity: critical 42 | annotations: 43 | description: bad ingress config - nginx config test failed 44 | summary: uninstall the latest ingress changes to allow config reloads to resume 45 | resources: 46 | limits: 47 | cpu: 1 48 | memory: 1024Mi 49 | requests: 50 | cpu: 100m 51 | memory: 128Mi 52 | 53 | -------------------------------------------------------------------------------- /envs/mailu-values.yaml: -------------------------------------------------------------------------------- 1 | logLevel: WARNING 2 | nodeSelector: 3 | beta.kubernetes.io/arch: amd64 4 | kubernetes.io/arch: amd64 5 | persistence: 6 | single_pvc: false 7 | certmanager: 8 | enabled: true 9 | issuerName: letsencrypt-dns 10 | apiVersion: "cert-manager.io/v1" 11 | front: 12 | controller: 13 | kind: DaemonSet 14 | hostPort: 15 | enabled: false 16 | externalService: 17 | enabled: true 18 | externalTrafficPolicy: Local 19 | annotations: 20 | service.beta.kubernetes.io/oci-load-balancer-shape: "flexible" 21 | service.beta.kubernetes.io/oci-load-balancer-shape-flex-min: "10" 22 | service.beta.kubernetes.io/oci-load-balancer-shape-flex-max: "20" 23 | type: LoadBalancer 24 | imap: 25 | imap: true 26 | imaps: true 27 | smtp: 28 | smtp: true 29 | smtps: true 30 | submission: true 31 | admin: 32 | persistence: 33 | size: 1Gi 34 | resources: 35 | requests: 36 | cpu: 80m 37 | postfix: 38 | persistence: 39 | size: 5Gi 40 | resources: 41 | requests: 42 | cpu: 80m 43 | dovecot: 44 | persistence: 45 | size: 50Gi 46 | resources: 47 | requests: 48 | cpu: 80m 49 | roundcube: 50 | persistence: 51 | size: 1Gi 52 | resources: 53 | requests: 54 | cpu: 50m 55 | redis: 56 | persistence: 57 | size: 5Gi 58 | resources: 59 | requests: 60 | cpu: 50m 61 | rspamd_clamav_persistence: 62 | single_pvc: false 63 | clamav: 64 | persistence: 65 | size: 5Gi 66 | resources: 67 | requests: 68 | cpu: 80m 69 | rspamd: 70 | persistence: 71 | size: 1Gi 72 | resources: 73 | limits: 74 | memory: 500Mi 75 | -------------------------------------------------------------------------------- /scripts/cluster-kube-prometheus-stack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script deploys Prometheus monitoring to the k8s cluster with grafana 4 | # admin passworf encrypted using sealed secrets. 5 | # If run with '-q' parameter, password is autogenerated using pwgen 6 | # otherwise the script asks to enter admin password from commandline 7 | # 8 | 9 | source `dirname "$0"`/scripts-env-init.sh 10 | 11 | NAME="${PM_NAME}" 12 | TNS=${PM_TARGET_NAMESPACE} 13 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 14 | GRAFANA_ADMIN_PASSWORD=`gen_token 32` 15 | [[ "$1" == "-q" ]] || { 16 | echo -n "Provide grafana password: "; read gr_adm_pass 17 | [[ -z ${gr_adm_pass} ]] || GRAFANA_ADMIN_PASSWORD==${gr_adm_pass} 18 | } 19 | 20 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 21 | 22 | CL_DIR=`mkdir_ns ${BASE_DIR} ${TNS} ${FLUX_NS}` 23 | 24 | mkdir -p "${CL_DIR}/${NAME}" 25 | kubectl create secret generic "prometheus-stack-credentials" \ 26 | --namespace "${PM_TARGET_NAMESPACE}" \ 27 | --from-literal=grafana_admin_password="${GRAFANA_ADMIN_PASSWORD}" \ 28 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 29 | --format=yaml > "${CL_DIR}/${NAME}/prometheus-stack-credentials-sealed.yaml" 30 | 31 | echo " ${BOLD}Deploying ${NAME}${NORMAL}" 32 | ${SCRIPTS}/flux-create-helmrel.sh \ 33 | "${PM_NAME}" \ 34 | "${PM_VER}" \ 35 | "${PM_RNAME}" \ 36 | "${PM_TARGET_NAMESPACE}" \ 37 | "${PM_NAMESPACE}" \ 38 | "${PM_SOURCE}" \ 39 | "${PM_VALUES}" --create-target-namespace --depends-on="${FLUX_NS}/${SS_NAME}" || exit 1 40 | 41 | update_chart_ns "${CL_DIR}/${NAME}/${NAME}.yaml" 42 | 43 | cat >> "${CL_DIR}/${NAME}/${NAME}.yaml" < /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | ${SCRIPTS}/flux-create-source.sh ${RANCHER_S_NAME} ${RANCHER_URL} 14 | 15 | echo " ${BOLD}Deploying Rancher${NORMAL}" 16 | 17 | TNS=${RANCHER_TARGET_NAMESPACE} 18 | 19 | [[ "$1" == "-q" ]] || { 20 | echo -n "Provide Rancher admin password: "; read u_pass 21 | echo -n "Provide Rancher hostname: "; read u_hostname 22 | [[ -z ${u_pass} ]] || RANCHER_PASSWORD=${u_pass} 23 | [[ -z ${u_hostname} ]] || RANCHER_HOSTNAME=${u_hostname} 24 | } 25 | 26 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 27 | 28 | NAME="${RANCHER_NAME}" 29 | 30 | cat > "${CL_DIR}/${TNS}.yaml" << EOF 31 | apiVersion: v1 32 | kind: Namespace 33 | metadata: 34 | name: ${TNS} 35 | EOF 36 | 37 | mkdir -p "${CL_DIR}/${NAME}" 38 | 39 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 40 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 41 | kind: HelmRelease 42 | metadata: 43 | name: ${NAME} 44 | namespace: ${TNS} 45 | spec: 46 | releaseName: ${NAME} 47 | chart: 48 | spec: 49 | chart: rancher 50 | reconcileStrategy: ChartVersion 51 | sourceRef: 52 | kind: HelmRepository 53 | name: rancher 54 | namespace: flux-system 55 | version: ${RANCHER_VER} 56 | interval: 1h0m0s 57 | values: 58 | hostname: "${RANCHER_HOSTNAME}" 59 | ingress: 60 | tls: 61 | source: letsEncrypt 62 | letsEncrypt: 63 | email: "${SSL_EMAIL}" 64 | ingress: 65 | class: nginx 66 | bootstrapPassword: "${RANCHER_PASSWORD}" 67 | EOF 68 | 69 | update_kustomization ${CL_DIR}/${NAME} 70 | 71 | update_kustomization ${CL_DIR} 72 | 73 | update_kustomization ${APPS_DIR} 74 | 75 | echo " ${BOLD}Deploying changes${NORMAL}" 76 | 77 | update_repo ${NAME} 78 | 79 | wait_for_ready -------------------------------------------------------------------------------- /scripts/create-longhorn-pvc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | example() { 4 | echo "Example:" 5 | echo "$0 name namespace 5Gi /path/to/folder" 6 | echo "$0 name --remove /path/to/folder" 7 | exit 1 8 | } 9 | 10 | 11 | [ -z "$1" ] && { 12 | echo "Missing volume name" 13 | example 14 | } 15 | 16 | [ -z "$2" ] && { 17 | echo "Missing volume namespace" 18 | example 19 | } 20 | 21 | source `dirname "$0"`/scripts-env-init.sh 22 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "No cluster repo dir!"; exit 1; } 23 | 24 | if [ "$2" == "--remove" ] ; then 25 | [ -z "$3" ] && { 26 | echo "Missing volume path" 27 | example 28 | } 29 | 30 | CL_DIR=${3} 31 | PVC_FILE=${CL_DIR}/${1}.yaml 32 | 33 | echo " Removing ${PVC_FILE}" 34 | rm -f ${PVC_FILE} 35 | update_kustomization ${CL_DIR} 36 | update_repo ${1} 37 | exit 0 38 | fi 39 | 40 | [ -z "$3" ] && { 41 | echo "Missing volume size" 42 | example 43 | } 44 | 45 | [ -z "$4" ] && { 46 | echo "Missing volume path" 47 | example 48 | } 49 | 50 | CL_DIR=${4} 51 | PVC_FILE=${CL_DIR}/${1}.yaml 52 | 53 | [ -f "${PVC_FILE}" ] && { 54 | echo " Volume already created: ${PVC_FILE}" 55 | exit 1 56 | } 57 | 58 | cat < ${PVC_FILE} 59 | --- 60 | apiVersion: v1 61 | kind: PersistentVolume 62 | metadata: 63 | name: $1-pv 64 | spec: 65 | accessModes: 66 | - ReadWriteOnce 67 | capacity: 68 | storage: $3 69 | persistentVolumeReclaimPolicy: Delete 70 | volumeMode: Filesystem 71 | storageClassName: longhorn 72 | csi: 73 | driver: driver.longhorn.io 74 | fsType: ext4 75 | volumeAttributes: 76 | numberOfReplicas: '3' 77 | staleReplicaTimeout: '2880' 78 | volumeHandle: $1-pv 79 | EOF 80 | 81 | cat <> ${PVC_FILE} 82 | 83 | --- 84 | apiVersion: v1 85 | kind: PersistentVolumeClaim 86 | metadata: 87 | name: $1 88 | namespace: $2 89 | spec: 90 | accessModes: 91 | - ReadWriteOnce 92 | resources: 93 | requests: 94 | storage: $3 95 | volumeName: $1-pv 96 | storageClassName: longhorn 97 | 98 | EOF 99 | 100 | -------------------------------------------------------------------------------- /scripts/cluster-kubernetes-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Scripts installs k8s dashboard service and stores access token 4 | # in a local FS file 5 | # 6 | 7 | source `dirname "$0"`/scripts-env-init.sh 8 | 9 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 10 | 11 | CL_SERV_NAME=${DA_NAME} 12 | CL_SERV_TNS=${DA_TARGET_NAMESPACE} 13 | CL_SERV_TYPE=${BASE} 14 | 15 | source ${SCRIPTS}/cluster-script-preprocess.sh $1 16 | 17 | name="${DA_S_NAME}" 18 | url="${DA_URL}" 19 | 20 | echo " ${BOLD}Adding ${name} source at ${url}${NORMAL}" 21 | ${SCRIPTS}/flux-create-source.sh ${name} ${url} 22 | update_repo "${DA_NAME}" 23 | wait_for_ready 5 24 | 25 | NAME=${DA_NAME} 26 | TNS=${DA_TARGET_NAMESPACE} 27 | DA_USER="dashboard-admin" 28 | 29 | CL_DIR=`mkdir_ns ${BASE_DIR} ${TNS} ${FLUX_NS}` 30 | 31 | mkdir -p "${CL_DIR}/${NAME}" 32 | 33 | cat > "${CL_DIR}/${NAME}/dashboard-service-account.yaml" < /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | echo " ${BOLD}Deploying CoTURN${NORMAL}" 14 | 15 | [[ "$1" == "-q" ]] || { 16 | echo -n "Provide CoTURN domain: "; read u_domain; 17 | echo -n "Provide CoTURN username: "; read u_user 18 | echo -n "Provide CoTURN password: "; read u_pass 19 | echo -n "Provide lower bound of UDP port to use for relay [40000]: "; read u_lport; 20 | echo -n "Provide upper bound of UDP port to use for relay [41000]: "; read u_uport; 21 | [[ -z ${u_domain} ]] || COTURN_DOMAIN=${u_domain} 22 | [[ -z ${u_user} ]] || COTURN_USERNAME=${u_user} 23 | [[ -z ${u_pass} ]] || COTURN_PASSWORD=${u_pass} 24 | [[ -z ${u_lport} ]] || COTURN_LPORT=${u_lport} 25 | [[ -z ${u_uport} ]] || COTURN_UPORT=${u_uport} 26 | } 27 | 28 | TNS=${COTURN_TARGET_NAMESPACE} 29 | 30 | ${SCRIPTS}/cluster-tigase-helm-charts.sh 31 | 32 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 33 | 34 | cat > "${CL_DIR}/${TNS}.yaml" << EOF 35 | apiVersion: v1 36 | kind: Namespace 37 | metadata: 38 | name: ${TNS} 39 | EOF 40 | 41 | NAME="${COTURN_NAME}" 42 | 43 | mkdir -p "${CL_DIR}/${NAME}" 44 | 45 | VALUES=`export COTURN_DOMAIN="${COTURN_DOMAIN}" COTURN_USERNAME="${COTURN_USERNAME}" COTURN_PASSWORD="${COTURN_PASSWORD}" COTURN_LPORT="${COTURN_LPORT}" COTURN_UPORT="${COTURN_UPORT}" && envsubst < ${COTURN_VALUES_FILE}` 46 | 47 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 48 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 49 | kind: HelmRelease 50 | metadata: 51 | name: ${NAME} 52 | namespace: ${TNS} 53 | spec: 54 | releaseName: ${NAME} 55 | chart: 56 | spec: 57 | chart: coturn 58 | sourceRef: 59 | kind: GitRepository 60 | name: tigase 61 | namespace: flux-system 62 | interval: 1m 63 | interval: 5m 64 | values: 65 | ${VALUES} 66 | EOF 67 | 68 | update_kustomization ${CL_DIR}/${NAME} 69 | 70 | update_kustomization ${CL_DIR} 71 | 72 | update_kustomization ${APPS_DIR} 73 | 74 | # update_repo ${NAME} 75 | 76 | wait_for_ready 77 | 78 | -------------------------------------------------------------------------------- /scripts/cluster-script-preprocess.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script should be called from within a cluster-service.sh script 4 | # at the begining to execute certain actions depending on command 5 | # line parameters 6 | # 7 | # Currently implemented actions: 8 | # --update - updates current service installation with changed 9 | # configuration and/or manifest files. Backups 10 | # should be made prior to avoid potential data loss 11 | # --remove - completely uninstalls and removes the service 12 | # which, most likely means all service data erased 13 | # --reset - this property runs the '--remove' and then allows 14 | # the script to reinstall service from scratch 15 | # 16 | 17 | [ -z "$1" ] || { 18 | 19 | if [ -z "${CL_SERV_TYPE}" ] || [ -z "${CL_SERV_TNS}" ] || [ -z "${CL_SERV_NAME}" ] ; then 20 | echo "Script not prepared yet for preprocessing, $1 failed." 21 | exit 1 22 | fi 23 | 24 | CL_SERV_ACTION=${1:2} 25 | CL_DIR=`prepdir_ns ${INFRA}/${CL_SERV_TYPE} ${CL_SERV_TNS} ${FLUX_NS}` 26 | 27 | cl_serv_update() { 28 | echo " ${INFO}Preparing to ${CL_SERV_ACTION}: ${CL_SERV_NAME}" 29 | echo " Removing: ${CL_DIR}${NORMAL}" 30 | rm -rf ${CL_DIR} 31 | } 32 | 33 | cl_serv_remove() { 34 | echo "${ERROR} WARNING!!${NORMAL}" 35 | echo "${ERROR} You are about to remove ${CL_SERV_NAME} from your k8s cluster.${NORMAL}" 36 | echo "${ERROR} This application will no longer be available.${NORMAL}" 37 | echo "${ERROR} All the application data will be lost and cannot be restored unless there is backup.${NORMAL}" 38 | echo "To proceed press ${INFO}ENTER${NORMAL} to stop press ${ERROR}Ctrl-c${NORMAL}" 39 | read 40 | cl_serv_update 41 | update_kustomization ${CL_DIR%${CL_SERV_TNS}} 42 | echo " ${INFO}Removing: ${BASE_DIR}/sources/${CL_SERV_NAME}.yaml${NORMAL}" 43 | rm -f ${BASE_DIR}/sources/${CL_SERV_NAME}.yaml 44 | update_kustomization ${BASE_DIR}/sources 45 | update_repo "Removing ${CL_SERV_NAME}" 46 | [ "$1" == "--no-quit" ] || exit 0 47 | } 48 | 49 | cl_serv_reset() { 50 | cl_serv_remove --no-quit 51 | wait_for_ready 10 ${CL_SERV_NAME} 52 | } 53 | 54 | case ${CL_SERV_ACTION} in 55 | 56 | update) 57 | cl_serv_update 58 | ;; 59 | 60 | remove) 61 | cl_serv_remove 62 | ;; 63 | 64 | reset) 65 | cl_serv_reset 66 | ;; 67 | 68 | esac 69 | 70 | } 71 | 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-scripts 2 | 3 | Various scripts for automated k8s cluster bootstrap and management with fluxcd. 4 | 5 | Scripts are created and tested in Linux (Ubuntu) environment but should work on any system with Bash and basic tools installed to default executable PATH. 6 | 7 | Requirements 8 | 9 | 1. Running 'empty' k8s cluster 10 | 2. Configured and working local environment with kubectl connecting to the k8s cluster by default 11 | 3. Commandline tools installed and available in executable PATH 12 | - pwgen 13 | - kubectl 14 | - kubeseal - https://github.com/bitnami-labs/sealed-secrets/releases 15 | - fluxcd - https://fluxcd.io/docs/installation/ 16 | - kustomize - https://kubectl.docs.kubernetes.io/installation/kustomize/ 17 | - git 18 | - yq - https://github.com/mikefarah/yq/ 19 | 4. Bash 20 | 21 | Initial setup: 22 | 23 | 1. Create config file for configuration settings and backup, ie. ```mkdir ~/.tigase-flux```. If you wish to put config in a different location, set `TIG_CLUSTER_HOME` variable to point to this location. This will be used as `CONFIG` variable. 24 | 2. Copy entire `envs` folder to ${CONFIG}/envs: ```cp -rv envs ${CONFIG}/``` 25 | 3. Edit `cluster.env` file. Following properties are mandatory to be set: 26 | * `CLUSTER_NAME` - your k8s cluster name, this will be your git repo name as well. 27 | * `GITHUB_USER` and `GITHUB_TOKEN` - your github credentials. 28 | * `SSL_EMAIL` - email provided to letsencrypt during SSL certificates generation and renewal. 29 | 4. Non mandatory but necessary for longhorn backups on S3 object storage, change properties: `LH_S3_BACKUP...` 30 | 31 | Usage: 32 | 33 | The main script to bootstrap fluxcd on k8s custer with all basic services is `cluster-bootsrap.sh`. Normally if the environment is correctly configured and tested this is all that needs to be run. 34 | It may take a few minutes but everything is setup automatically with no input from the user. 35 | 36 | However, on a fresh system, it is recommended to run bootstrap scripts manually one by one. `-q` option can be added to the script for fully automated execution. 37 | 1. `flux-bootstrap.sh` - flux bootstrap, git repository setup and creating basic repo structure 38 | 2. `cluster-common-sources.sh` - deploying helm sources to flux system on k8s cluster 39 | 3. `cluster-sealed-secrets.sh` 40 | 4. `cluster-kubernetes-dashboard.sh` 41 | 5. `cluster-ingress-nginx.sh` 42 | 6. `cluster-cert-manager.sh` 43 | 7. `cluster-longhorn.sh` 44 | 8. `cluster-kube-prometheus-stack.sh` 45 | 9. `cluster-loki-stack.sh` 46 | 47 | -------------------------------------------------------------------------------- /scripts/scripts-env-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script is the main entry point to setup entire k8s cluster. 4 | # It first initializes k8s cluster with fluxcd, setups repository 5 | # and then adds all listed services one by one. 6 | # Execution of the entire script may take long time because the script 7 | # waits until flux system fully reconcilled after each service is added 8 | # 9 | 10 | [ -z ${K8S_CONTEXT} ] && { 11 | echo "K8S_CONTEXT not set." 12 | echo "You have to set K8S_CONTEXT environment to your k8s context:" 13 | echo "kubectl config get-contexts" 14 | echo "Then set your context name as the variable value:" 15 | echo "export K8S_CONTEXT=context-name" 16 | exit 1 17 | } 18 | 19 | if [ -z "$TIG_CLUSTER_HOME" ]; then 20 | export CONFIG="$HOME/.tigase-flux" 21 | else 22 | export CONFIG="$TIG_CLUSTER_HOME" 23 | fi 24 | 25 | export CONFIG="${CONFIG}/${K8S_CONTEXT}" 26 | 27 | if [ ! -d "$CONFIG" ]; then 28 | echo "Config directory $CONFIG does not exist!"; 29 | exit 1; 30 | fi 31 | 32 | export TMP_DIR="$CONFIG/tmp" 33 | 34 | [ ! -d "$TMP_DIR" ] && mkdir $TMP_DIR 35 | 36 | SCRIPT_DIR=`realpath "$0"` 37 | export SCRIPTS=`dirname "$SCRIPT_DIR"` 38 | 39 | if [ ! -d "$CONFIG/envs" ]; then 40 | echo "Environment directory $CONFIG/envs does not exist!"; 41 | exit 1; 42 | fi 43 | 44 | source "${CONFIG}/envs/cluster.env" || { echo "No cluster.env file"; exit 1; } 45 | 46 | if [ "${K8S_CONTEXT}" != "${K8S_CLUSTER_CONTEXT}" ]; then 47 | echo "Cluster k8s context does not match your environment k8s context." 48 | echo "Make sure you have K8S_CLUSTER_CONTEXT property set to a correct value in cluster.env file" 49 | exit 1 50 | fi 51 | 52 | 53 | kubectl config use-context ${K8S_CONTEXT} 54 | 55 | if [ "$COLORED_OUTPUT" = true ]; then 56 | export BOLD="$(tput bold)" 57 | export RED="$(tput setaf 1)" 58 | export GREEN="$(tput setaf 2)" 59 | export NORMAL="$(tput sgr0)" 60 | export CYAN="$(tput setaf 6)" 61 | export YELLOW="$(tput setaf 3)" 62 | else 63 | export BOLD="" 64 | export RED="" 65 | export GREEN="" 66 | export NORMAL="" 67 | export CYAN="" 68 | export YELLOW="" 69 | fi 70 | export ERROR="$BOLD$RED" 71 | export WARNING="$BOLD$YELLOW" 72 | export INFO="$BOLD$GREEN" 73 | 74 | # Make sure all required executables are installed 75 | REQUIRED_CMDS="pwgen kubectl kubeseal flux kustomize git yq" 76 | 77 | for CMD in $REQUIRED_CMDS; do 78 | if ! command -v "$CMD" &> /dev/null; then 79 | echo "${ERROR}$CMD could not be found!${NORMAL}" 80 | exit 81 | fi 82 | done 83 | 84 | source "${CONFIG}/envs/versions.env" || { echo "${ERROR}No versions.env file${NORMAL}"; exit 1; } 85 | source "${SCRIPTS}/cluster-tools.sh" || exit 1 86 | 87 | if [ -z "$SSL_ISSUER" ]; then 88 | export SSL_ISSUER="$SSL_STAG_ISSUER"; 89 | fi 90 | 91 | [ "$1" != "--check" ] || echo "All seems to be OK and ready to go" 92 | -------------------------------------------------------------------------------- /envs/mailu.env: -------------------------------------------------------------------------------- 1 | # If the mailu domain zone is hosted on AWS Route53 the, installation script 2 | # can automatically update the domain and hostname IP address 3 | # For this to work you need installed, configured and working `aws` cli utility 4 | # and you need to provide AWS ZONE ID here: 5 | MAILU_DOMAIN_AWS_ZONE_ID="" 6 | MAILU_HOSTNAME_AWS_ZONE_ID="" 7 | 8 | # In case you have multiple AWS accounts and need to use a non-default one 9 | # for updaring mailu DNS, put the AWS profile here. 10 | AWS_PROFILE="" 11 | 12 | # Set to "true" if you want Mailu to use existing PVC instead of creating new ones 13 | # Not fully implemented yet 14 | MAILU_EXISTING_PVC="false" 15 | 16 | # Mailu requires quite a long time for initialization of all components. 17 | # This time depends on many factors so it may be shorter or even longer. 18 | # If you get fluxcd installation errors: "install retries exhausted" 19 | # Mailu Ready=false, wait more time, 10, 20 minutes and try to connect 20 | # to the app anyway. If it works and if everything seems to be working 21 | # but fluxcd says installation failed, increase this timeout and 22 | # rerun installation. 23 | MAILU_FLUXCD_TIMEOUT="20m" 24 | 25 | # Email domain, that is domain for all email addresses. It's the first 26 | # and primary email domain. More can be added later from admin GUI. 27 | # For example: example.com 28 | MAILU_DOMAIN="" 29 | 30 | # These are DNS domains used to connect to the email server for which 31 | # SSL certificates are needed, like: 32 | # mail.example.com imap.example.com smtp.example.com example.com 33 | MAILU_HOSTNAMES=(mail.${MAILU_DOMAIN} ${MAILU_DOMAIN}) 34 | 35 | # Initial admin user name. This is the first account that will be 36 | # created on the server. More accounts and admins can be created 37 | # from admin web ui. 38 | # Leave empty to generate random, 12 chars long user name 39 | MAILU_ADMIN_USERNAME="" 40 | 41 | # Initial admin domain name. Any domain works here, this is just 42 | # for admin identification. However, usually it should be the 43 | # email server working domain to be able to receive emails to the 44 | # account. By default it is set to the primary email domain. 45 | MAILU_ADMIN_DOMAIN="${MAILU_DOMAIN}" 46 | 47 | # Primary admin passsword. 48 | # Leave empty and a strong password will be generated for you. 49 | MAILU_ADMIN_PASSWORD="" 50 | 51 | # Subnet for the k8s cluster pods. Default is: 10.42.0.0/16 but this 52 | # is k8s cluster specific. Check with your provider and set accordingly. 53 | MAILU_SUBNET="" 54 | 55 | # Email relay host, if external relay is used for email delivery. 56 | MAILU_RELAY_HOST="" 57 | 58 | # Email relay username for external relay 59 | MAILU_RELAY_USERNAME="" 60 | 61 | # Email relay password for external relay 62 | MAILU_RELAY_PASSWORD="" 63 | 64 | # A secret key which is required to protect authentication cookies 65 | # and must be set individually for each deployment 66 | MAILU_SECRET_KEY="" 67 | 68 | -------------------------------------------------------------------------------- /scripts/flux-create-helmrel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script adds a new helm release to the FluxCD repository and 4 | # kustomization metadata 5 | # 6 | 7 | source `dirname "$0"`/scripts-env-init.sh 8 | 9 | INTERVAL="${DEF_INTERVAL}" 10 | NAME="" 11 | VERSION="" 12 | NAMESPACE="flux-system" 13 | TARGET_NAMESPACE="${NAMESPACE}" 14 | #URL="" 15 | 16 | #### $1 17 | 18 | ## Check if this is app setup 19 | [[ "$1" == "app" ]] && { BASE_DIR="${APPS_DIR}"; shift; } 20 | [[ -z "$1" ]] && { echo "${ERROR}Missing name argument${NORMAL}"; } || { NAME="$1"; } 21 | 22 | [[ -z "$1" || "$1" == "-h" ]] && { 23 | echo "CLUSTER_NAME environment variable must be set" 24 | echo "\$1 - name - package name" 25 | echo "\$2 - chart version" 26 | # echo "\$3 - url - HelmRepository source URL" 27 | echo "\$3 - release name [name]" 28 | echo "\$4 - target namespace" 29 | echo "\$5 - namespace" 30 | echo "\$6 - source [HelmRepository/name]" 31 | echo "Any flux compatible parameter will be appended to thend of the command" 32 | exit 0 33 | } 34 | 35 | [[ -z "${CLUSTER_NAME}" ]] && { echo "{ERROR}CLUSTER_NAME env not set!${NORMAL}"; exit 1; } 36 | 37 | REL_NAME="${NAME}" 38 | SOURCE="HelmRepository/${NAME}" 39 | CHART="${NAME}" 40 | 41 | shift 42 | 43 | #### $2 44 | [[ -z "$1" ]] && { echo "${ERROR}Missing version argument${NORMAL}"; exit 1; } || { VERSION="$1"; shift; } 45 | #### $3 46 | #[[ -z "$1" ]] || { URL="$1"; shift; } 47 | #### $3 48 | [[ -z "$1" ]] || { REL_NAME="$1"; shift; } 49 | #### $4 50 | [[ -z "$1" ]] || { TARGET_NAMESPACE="$1"; shift; } 51 | #### $5 52 | [[ -z "$1" ]] || { NAMESPACE="$1"; shift; } 53 | #### $6 54 | [[ -z "$1" ]] || { SOURCE="$1"; shift; } 55 | 56 | CL_DIR=`create_ns ${BASE_DIR} ${TARGET_NAMESPACE}` 57 | 58 | DIR="${CL_DIR}/${NAME}" 59 | FILE="${DIR}/${NAME}.yaml" 60 | 61 | ### check if the target file already exists 62 | 63 | [[ -f "${FILE}" ]] && { 64 | echo "${ERROR}Files are alredy generated, please either edit to update or remove to regenrate:${NORMAL}" 65 | echo " - ${FILE}" 66 | echo " - ${DIR}/kustomization.yaml" 67 | exit 2 68 | } 69 | 70 | ### Safe to proceed with files generation 71 | 72 | mkdir -p "${DIR}" 73 | 74 | CMD="flux create helmrelease ${NAME} \ 75 | --interval=${INTERVAL} \ 76 | --release-name=${REL_NAME} \ 77 | --source=${SOURCE} \ 78 | --chart-version=${VERSION} \ 79 | --chart=${CHART} \ 80 | --namespace=${NAMESPACE} 81 | --target-namespace=${TARGET_NAMESPACE} $*" 82 | echo -e "${CMD}\n" >> $TMP_DIR/flux-cmds.txt 83 | #set -x 84 | ${CMD} --export > ${FILE} 85 | #set +x 86 | 87 | echo "Update service kustomization" 88 | cd ${DIR} 89 | rm -f kustomization.yaml 90 | kustomize create --autodetect --recursive 91 | cd - 92 | 93 | [[ "${TARGET_NAMESPACE}" == "${FLUX_NS}" ]] || { 94 | echo "Update namespace kustomization" 95 | cd ${CL_DIR} 96 | rm -f kustomization.yaml 97 | kustomize create --autodetect --recursive --namespace="${TARGET_NAMESPACE}" 98 | cd - 99 | } 100 | 101 | echo "Update common kustomization" 102 | cd ${BASE_DIR} 103 | rm -f kustomization.yaml 104 | kustomize create --autodetect --recursive 105 | cd - 106 | 107 | -------------------------------------------------------------------------------- /scripts/cluster-cert-manager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script deploys cert manager and sets it up to use letsencrypt to 4 | # generate certificates for requested services 5 | # 6 | 7 | source `dirname "$0"`/scripts-env-init.sh 8 | 9 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "No cluster repo dir!"; exit 1; } 10 | 11 | CL_SERV_NAME=${CM_NAME} 12 | CL_SERV_TNS=${CM_TARGET_NAMESPACE} 13 | CL_SERV_TYPE=${BASE} 14 | 15 | source ${SCRIPTS}/cluster-script-preprocess.sh $1 16 | 17 | name="${CM_S_NAME}" 18 | url="${CM_URL}" 19 | 20 | echo " ${BOLD}Adding ${name} source at ${url}${NORMAL}" 21 | ${SCRIPTS}/flux-create-source.sh ${name} ${url} && { 22 | update_repo "${CM_NAME}" 23 | wait_for_ready 5 24 | } 25 | 26 | NAME="${CM_NAME}" 27 | TNS="${CM_TARGET_NAMESPACE}" 28 | 29 | CL_DIR=`mkdir_ns ${BASE_DIR} ${TNS} ${FLUX_NS}` 30 | 31 | echo " ${BOLD}Deploying ${NAME}${NORMAL}" 32 | ${SCRIPTS}/flux-create-helmrel.sh \ 33 | "${CM_NAME}" \ 34 | "${CM_VER}" \ 35 | "${CM_RNAME}" \ 36 | "${CM_TARGET_NAMESPACE}" \ 37 | "${CM_NAMESPACE}" \ 38 | "${CM_SOURCE}" \ 39 | "${CM_VALUES}" --create-target-namespace --crds=CreateReplace || exit 1 40 | 41 | update_chart_ns "${CL_DIR}/${NAME}/${NAME}.yaml" 42 | 43 | update_repo "${NAME}" 44 | 45 | wait_for_ready 46 | 47 | cat > "${CL_DIR}/${NAME}/issuer-staging.yaml" < "${CL_DIR}/${NAME}/issuer-production.yaml" < "${CL_DIR}/${NAME}/issuer-production-dns.yaml" < "${CL_DIR}/${NAME}/route53-secret.yaml" 110 | kubectl apply -f "${CL_DIR}/${NAME}/route53-secret.yaml" 111 | 112 | } 113 | 114 | update_kustomization ${CL_DIR}/${NAME} 115 | 116 | update_repo "${NAME}" 117 | 118 | -------------------------------------------------------------------------------- /scripts/flux-bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script initializes cluster with FluxCD and setups basic 4 | # git repo structure and data 5 | # 6 | 7 | source `dirname "$0"`/scripts-env-init.sh 8 | 9 | MODE="FluxCD installation" 10 | 11 | [[ "$1" == "--update" ]] && MODE="FluxCD update" 12 | 13 | [[ "$1" != "--update" ]] && [[ -d ${CLUSTER_REPO_DIR} ]] && { 14 | echo "${ERROR}Local folder the cluster repository already exist: ${CLUSTER_REPO_DIR}${NORMAL}" 15 | echo "${ERROR}Cleanup first and then rerun the script${NORMAL}" 16 | exit 1 17 | } 18 | 19 | flux check 20 | echo -e " ${INFO}${MODE}\nPress ENTER if everything looks correct, Ctrl-C to stop${NORMAL}" 21 | read abc 22 | 23 | flux bootstrap github \ 24 | --owner=$GITHUB_USER \ 25 | --repository=$CLUSTER_REPO \ 26 | --branch=$REPO_BRANCH \ 27 | --path=./clusters/$CLUSTER_NAME \ 28 | --token-auth \ 29 | --personal \ 30 | --read-write-key \ 31 | --components-extra=image-reflector-controller,image-automation-controller 32 | 33 | [[ "$1" == "--update" ]] && { 34 | flux reconcile source git flux-system 35 | echo "FluxCD on the cluster updated" 36 | exit 0 37 | } 38 | 39 | if [ ! -d "$PROJECTS_DIR" ]; then 40 | mkdir "$PROJECTS_DIR" 41 | fi 42 | 43 | cd ${PROJECTS_DIR} &> /dev/null || { echo "No projects dir!"; exit 1; } 44 | git clone "https://github.com/$GITHUB_USER/$CLUSTER_REPO" 45 | if [ ! -d "$CLUSTER_REPO" ]; then 46 | echo "${ERROR}Failed to clone cluster repository $CLUSTER_REPO to $CLUSTER_REPO_DIR!${NORMAL}" 47 | exit 1; 48 | fi 49 | cd $CLUSTER_REPO 50 | 51 | FILE="./clusters/${CLUSTER_NAME}/${BASE}.yaml" 52 | 53 | [[ -f "${FILE}" ]] || cat > "${FILE}" < "${FILE}" < "${FILE}" < "${FILE}" < "${FILE}" < "${FILE}" < /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | echo " ${BOLD}Deploying onedev${NORMAL}" 14 | 15 | if [ -z "$ONEDEV_DOMAIN" ]; then 16 | echo " ${ERROR}onedev domain name is not set!${NORMAL}"; 17 | exit 1; 18 | fi 19 | 20 | 21 | TNS=${ONEDEV_TARGET_NAMESPACE} 22 | 23 | ${SCRIPTS}/cluster-tigase-helm-charts.sh 24 | 25 | ONEDEV_MYSQL_PASSWORD=`gen_token 8` 26 | ONEDEV_MYSQL_ROOT_PASSWORD=`gen_token 24` 27 | 28 | [[ "$1" == "-q" ]] || { 29 | echo -n "Provide MySQL user password: "; read u_pass 30 | echo -n "Provide MySQL root user password: "; read u_root_pass 31 | [[ -z ${u_pass} ]] || ONEDEV_MYSQL_PASSWORD=${u_pass} 32 | [[ -z ${u_root_pass} ]] || ONEDEV_MYSQL_ROOT_PASSWORD=${u_root_pass} 33 | } 34 | 35 | if [ "${ONEDEV_MYSQL_S3_BACKUP}" == "true" ]; then 36 | if [ -z "${ONEDEV_MYSQL_S3_BACKUP_ACCESS_KEY}" ]; then 37 | echo -n "Provide MySQL S3 backup access-key: "; read a_key; 38 | echo -n "Provide MySQL S3 backup secret-key: "; read s_key; 39 | [[ -z ${a_key} ]] || ONEDEV_MYSQL_S3_BACKUP_ACCESS_KEY=${a_key} 40 | [[ -z ${s_key} ]] || ONEDEV_MYSQL_S3_BACKUP_SECRET_KEY=${s_key} 41 | fi 42 | fi 43 | 44 | echo " ${BOLD}Preparing MySQL deployment${NORMAL}" 45 | 46 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 47 | 48 | cat > "${CL_DIR}/${TNS}.yaml" << EOF 49 | apiVersion: v1 50 | kind: Namespace 51 | metadata: 52 | name: ${TNS} 53 | EOF 54 | 55 | MYSQL_NAME="${ONEDEV_NAME}-mysql" 56 | NAME="${MYSQL_NAME}" 57 | 58 | mkdir -p "${CL_DIR}/${NAME}" 59 | kubectl create secret generic "mysql-credentials" \ 60 | --namespace "${TNS}" \ 61 | --from-literal=mysql-password="${ONEDEV_MYSQL_PASSWORD}" \ 62 | --from-literal=mysql-root-password="${ONEDEV_MYSQL_ROOT_PASSWORD}" \ 63 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 64 | --format=yaml > "${CL_DIR}/${NAME}/onedev-mysql-credentials-sealed.yaml" 65 | 66 | if [ "${ONEDEV_MYSQL_S3_BACKUP}" == "true" ]; then 67 | kubectl create secret generic "mysql-backup-s3" \ 68 | --namespace "${TNS}" \ 69 | --from-literal=access-key="${ONEDEV_MYSQL_S3_BACKUP_ACCESS_KEY}" \ 70 | --from-literal=secret-key="${ONEDEV_MYSQL_S3_BACKUP_SECRET_KEY}" \ 71 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 72 | --format=yaml > "${CL_DIR}/${NAME}/onedev-mysql-backup-s3-sealed.yaml" 73 | fi 74 | 75 | VALUES=`envsubst < ${ONEDEV_MYSQL_VALUES_FILE}` 76 | 77 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 78 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 79 | kind: HelmRelease 80 | metadata: 81 | name: ${NAME} 82 | namespace: ${TNS} 83 | spec: 84 | releaseName: ${NAME} 85 | chart: 86 | spec: 87 | chart: mysql 88 | sourceRef: 89 | kind: GitRepository 90 | name: tigase 91 | namespace: flux-system 92 | interval: 1m 93 | interval: 5m 94 | values: 95 | auth: 96 | database: "onedev" 97 | username: "onedev" 98 | existingSecret: "mysql-credentials" 99 | 100 | updateStrategy: Recreate 101 | 102 | ${VALUES} 103 | EOF 104 | 105 | update_kustomization ${CL_DIR}/${NAME} 106 | 107 | echo " ${BOLD}Preparing onedev deployment${NORMAL}" 108 | 109 | NAME="${ONEDEV_NAME}" 110 | 111 | mkdir -p "${CL_DIR}/${NAME}" 112 | 113 | VALUES=`envsubst < ${ONEDEV_VALUES_FILE}` 114 | 115 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 116 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 117 | kind: HelmRelease 118 | metadata: 119 | name: ${NAME} 120 | namespace: ${TNS} 121 | spec: 122 | releaseName: ${NAME} 123 | chart: 124 | spec: 125 | chart: onedev 126 | sourceRef: 127 | kind: GitRepository 128 | name: tigase 129 | namespace: flux-system 130 | interval: 1m 131 | interval: 5m 132 | values: 133 | mysql: 134 | enabled: false 135 | externalDatabase: 136 | host: "${MYSQL_NAME}" 137 | database: "onedev" 138 | user: "onedev" 139 | port: 3306 140 | existingSecret: "mysql-credentials" 141 | 142 | updateStrategy: Recreate 143 | 144 | ${VALUES} 145 | EOF 146 | 147 | update_kustomization ${CL_DIR}/${NAME} 148 | 149 | update_kustomization ${CL_DIR} 150 | 151 | update_kustomization ${APPS_DIR} 152 | 153 | ingress_nginx_forward_port 22 "$ONEDEV_TARGET_NAMESPACE" "$ONEDEV_NAME" 154 | 155 | echo " ${BOLD}Deploying changes${NORMAL}" 156 | 157 | update_repo ${NAME} 158 | 159 | wait_for_ready 160 | -------------------------------------------------------------------------------- /envs/versions.env: -------------------------------------------------------------------------------- 1 | ### Versions 2 | # helm search repo .... 3 | 4 | FLUX_NS="flux-system" 5 | 6 | # Sealed secrets 7 | SS_VER="1.16.1" 8 | SS_URL="https://bitnami-labs.github.io/sealed-secrets" 9 | SS_NAME="sealed-secrets" 10 | SS_S_NAME="${SS_NAME}" 11 | SS_RNAME="${SS_NAME}-controller" 12 | SS_NAMESPACE="${FLUX_NS}" 13 | SS_TARGET_NAMESPACE="${FLUX_NS}" 14 | SS_SOURCE="HelmRepository/${SS_S_NAME}" 15 | SS_VALUES="--values=${CONFIG}/envs/ss_values.yaml" 16 | 17 | # Longhorn 18 | LH_VER="1.2.3" 19 | LH_URL="https://charts.longhorn.io" 20 | LH_NAME="longhorn" 21 | LH_S_NAME="${LH_NAME}" 22 | LH_RNAME="${LH_NAME}" 23 | LH_NAMESPACE="${FLUX_NS}" 24 | LH_TARGET_NAMESPACE="${LH_NAME}-system" 25 | LH_SOURCE="HelmRepository/${LH_S_NAME}" 26 | LH_VALUES="--values=${CONFIG}/envs/longhorn-values.yaml" 27 | 28 | # Ingress Nginx 29 | IN_VER="4.0.6" 30 | IN_URL="https://kubernetes.github.io/ingress-nginx" 31 | IN_NAME="ingress-nginx" 32 | IN_S_NAME="${IN_NAME}" 33 | IN_RNAME="${IN_NAME}" 34 | IN_NAMESPACE="${FLUX_NS}" 35 | IN_TARGET_NAMESPACE="${IN_NAME}" 36 | IN_SOURCE="HelmRepository/${IN_S_NAME}" 37 | IN_VALUES="--values=${CONFIG}/envs/nginx_values.yaml" 38 | 39 | # Cert-Manager 40 | CM_VER="1.6.1" 41 | CM_URL="https://charts.jetstack.io" 42 | CM_NAME="cert-manager" 43 | CM_S_NAME="${CM_NAME}" 44 | CM_RNAME="${CM_NAME}" 45 | CM_NAMESPACE="${FLUX_NS}" 46 | CM_TARGET_NAMESPACE="${CM_NAME}" 47 | CM_SOURCE="HelmRepository/${CM_S_NAME}" 48 | CM_VALUES="--values=${CONFIG}/envs/cert-man_values.yaml" 49 | 50 | # Prometheus 51 | PM_VER="19.2.3" 52 | PM_URL="https://prometheus-community.github.io/helm-charts" 53 | PM_NAME="kube-prometheus-stack" 54 | PM_S_NAME="prometheus-community" 55 | PM_RNAME="kube-prom-stack" 56 | PM_NAMESPACE="${FLUX_NS}" 57 | PM_TARGET_NAMESPACE="monitoring" 58 | PM_SOURCE="HelmRepository/${PM_S_NAME}" 59 | PM_VALUES="--values=${CONFIG}/envs/prometheus-values.yaml" 60 | 61 | # LOKI 62 | LO_VER="2.5.0" 63 | LO_URL="https://grafana.github.io/helm-charts" 64 | LO_NAME="loki-stack" 65 | LO_S_NAME="grafana" 66 | LO_RNAME="loki" 67 | LO_NAMESPACE="${FLUX_NS}" 68 | LO_TARGET_NAMESPACE="monitoring" 69 | LO_SOURCE="HelmRepository/${LO_S_NAME}" 70 | LO_VALUES="--values=${CONFIG}/envs/loki-values.yaml" 71 | 72 | # VELERO 73 | VE_VER="2.26.2" 74 | VE_URL="https://vmware-tanzu.github.io/helm-charts" 75 | VE_NAME="velero" 76 | VE_S_NAME="vmware-tanzu" 77 | VE_RNAME="velero" 78 | VE_NAMESPACE="${FLUX_NS}" 79 | VE_TARGET_NAMESPACE="velero" 80 | VE_SOURCE="HelmRepository/${VE_S_NAME}" 81 | VE_VALUES="--values=${CONFIG}/envs/velero-values.yaml" 82 | 83 | # K8s Dashboard 84 | DA_VER="5.0.5" 85 | DA_URL="https://kubernetes.github.io/dashboard/" 86 | DA_NAME="k8s-dash" 87 | DA_S_NAME="${DA_NAME}" 88 | DA_RNAME="kubernetes-dashboard" 89 | DA_NAMESPACE="${FLUX_NS}" 90 | DA_TARGET_NAMESPACE="${DA_NAME}" 91 | DA_SOURCE="HelmRepository/${DA_S_NAME}" 92 | DA_VALUES="--values=${CONFIG}/envs/k8s-dashboard-values.yaml" 93 | 94 | # ONEDEV 95 | ONEDEV_NAME="onedev" 96 | ONEDEV_TARGET_NAMESPACE="${ONEDEV_NAME}-prod" 97 | ONEDEV_VALUES_FILE="${CONFIG}/envs/${ONEDEV_NAME}.yaml" 98 | ONEDEV_MYSQL_VALUES_FILE="${CONFIG}/envs/${ONEDEV_NAME}-mysql.yaml" 99 | 100 | # WEBLATE 101 | WEBLATE_VER="0.4.6" 102 | WEBLATE_URL="https://helm.weblate.org" 103 | WEBLATE_NAME="weblate" 104 | WEBLATE_S_NAME="weblate" 105 | WEBLATE_NAMESPACE="${FLUX_NS}" 106 | WEBLATE_SOURCE="HelmRepository/${WEBLATE_S_NAME}" 107 | WEBLATE_RNAME="${WEBLATE_NAME}" 108 | WEBLATE_TARGET_NAMESPACE="${WEBLATE_NAME}-prod" 109 | WEBLATE_VALUES="--values=${CONFIG}/envs/${WEBLATE_NAME}.yaml" 110 | 111 | # Tigase 112 | TIGASE_NAME="tigase-server" 113 | TIGASE_TARGET_NAMESPACE="${TIGASE_NAME}-prod" 114 | TIGASE_VALUES_FILE="${CONFIG}/envs/${TIGASE_NAME}.yaml" 115 | TIGASE_MYSQL_VALUES_FILE="${CONFIG}/envs/${TIGASE_NAME}-mysql.yaml" 116 | 117 | # CoTURN 118 | COTURN_NAME="coturn" 119 | COTURN_TARGET_NAMESPACE="${COTURN_NAME}-prod" 120 | COTURN_VALUES_FILE="${CONFIG}/envs/${COTURN_NAME}.yaml" 121 | 122 | # MAILU 123 | MAILU_VER="0.3.1" 124 | MAILU_URL="https://mailu.github.io/helm-charts/" 125 | MAILU_NAME="mailu" 126 | MAILU_S_NAME="mailu" 127 | MAILU_NAMESPACE="${MAILU_NAME}-prod" 128 | MAILU_SOURCE="HelmRepository/${MAILU_S_NAME}" 129 | MAILU_RNAME="${MAILU_NAME}" 130 | MAILU_TARGET_NAMESPACE="${MAILU_NAMESPACE}" 131 | MAILU_VALUES="--values=${CONFIG}/envs/${MAILU_NAME}-values.yaml" 132 | 133 | # RANCHER 134 | RANCHER_VER="2.6.6" 135 | RANCHER_URL="https://releases.rancher.com/server-charts/latest" 136 | RANCHER_NAME="rancher" 137 | RANCHER_S_NAME="rancher" 138 | RANCHER_NAMESPACE="${FLUX_NS}" 139 | RANCHER_SOURCE="HelmRepository/${RANCHER_S_NAME}" 140 | RANCHER_RNAME="${RANCHER_NAME}" 141 | RANCHER_TARGET_NAMESPACE="${RANCHER_NAMESPACE}" 142 | 143 | # KILLBILL 144 | KILLBILL_NAME="killbill" 145 | KILLBILL_TARGET_NAMESPACE="${KILLBILL_NAME}-prod" 146 | KILLBILL_VALUES_FILE="${CONFIG}/envs/${KILLBILL_NAME}.yaml" 147 | KILLBILL_MYSQL_VALUES_FILE="${CONFIG}/envs/${KILLBILL_NAME}-mysql.yaml" -------------------------------------------------------------------------------- /scripts/cluster-longhorn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Scripts deploys longhorn to the cluster with some basic configuration 4 | # Web access is protected with user name and password. Both username 5 | # and password are generated during the script runtime using pwgen 6 | # and stored in the local filesystem for admin to use 7 | # Username and password are also stored in the Longhorn metadata 8 | # configuration encrypted using sealed secrets 9 | # 10 | 11 | source `dirname "$0"`/scripts-env-init.sh 12 | 13 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "No cluster repo dir!"; exit 1; } 14 | 15 | CL_SERV_NAME=${LH_NAME} 16 | CL_SERV_TNS=${LH_TARGET_NAMESPACE} 17 | CL_SERV_TYPE=${BASE} 18 | 19 | source ${SCRIPTS}/cluster-script-preprocess.sh $1 20 | 21 | name="${LH_S_NAME}" 22 | url="${LH_URL}" 23 | 24 | echo " ${BOLD}Adding ${name} source at ${url}${NORMAL}" 25 | ${SCRIPTS}/flux-create-source.sh ${name} ${url} && { 26 | update_repo "${LH_NAME}" 27 | wait_for_ready 5 28 | } 29 | 30 | NAME="${LH_NAME}" 31 | TNS=${LH_TARGET_NAMESPACE} 32 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 33 | u_name="" 34 | u_pass="" 35 | 36 | [[ "$1" == "-q" ]] || { 37 | echo -n "Provide Longhorn user name or ENTER to generate: "; read u_name 38 | echo -n "Provide Longhorn user password or ENTER to generate: "; read u_pass 39 | } 40 | [[ -z "${u_name}" ]] && u_name=`gen_token 8` 41 | USER_NAME=${u_name} 42 | [[ -z "${u_pass}" ]] && u_pass=`gen_token 24` 43 | USER_PASS=${u_pass} 44 | 45 | update_k8s_secrets "longhorn-user" ${USER_NAME} 46 | update_k8s_secrets "longhorn-pass" ${USER_PASS} 47 | 48 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 49 | #source ${HOME}/.oci/k8stests-secrets-keys || exit 1 50 | #S3_SECRET_KEY="${secret_key}" 51 | #S3_ACCESS_KEY="${access_key}" 52 | 53 | CL_DIR=`mkdir_ns ${BASE_DIR} ${TNS} ${FLUX_NS}` 54 | 55 | mkdir -p "${CL_DIR}/${NAME}" 56 | 57 | #kubectl create secret generic "s3-secrets" \ 58 | # --namespace "${LH_TARGET_NAMESPACE}" \ 59 | # --from-literal=AWS_ACCESS_KEY_ID="${S3_ACCESS_KEY}" \ 60 | # --from-literal=AWS_SECRET_ACCESS_KEY="${S3_SECRET_KEY}" \ 61 | # --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 62 | # --format=yaml > "${CL_DIR}/${NAME}/s3-secrets-sealed.yaml" 63 | 64 | 65 | echo " ${BOLD}Deploying ${NAME}${NORMAL}" 66 | ${SCRIPTS}/flux-create-helmrel.sh \ 67 | "${LH_NAME}" \ 68 | "${LH_VER}" \ 69 | "${LH_RNAME}" \ 70 | "${LH_TARGET_NAMESPACE}" \ 71 | "${LH_NAMESPACE}" \ 72 | "${LH_SOURCE}" \ 73 | "${LH_VALUES}" --create-target-namespace --depends-on="${FLUX_NS}/${SS_NAME}" || exit 1 74 | 75 | update_chart_ns "${CL_DIR}/${NAME}/${NAME}.yaml" 76 | 77 | update_repo ${NAME} 78 | 79 | wait_for_ready 80 | 81 | echo " ${WARNING}Making oci storage class non-default${NORMAL}" 82 | kubectl patch storageclass oci -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}' 83 | kubectl patch storageclass oci -p '{"metadata": {"annotations":{"storageclass.beta.kubernetes.io/is-default-class":"false"}}}' 84 | 85 | if [ ! -z "${LH_S3_BACKUP_ACCESS_KEY}" ]; then 86 | kubectl create secret generic "aws-s3-backup" \ 87 | --namespace "${LH_TARGET_NAMESPACE}" \ 88 | --from-literal=AWS_ACCESS_KEY_ID="${LH_S3_BACKUP_ACCESS_KEY}" \ 89 | --from-literal=AWS_SECRET_ACCESS_KEY="${LH_S3_BACKUP_SECRET_KEY}" \ 90 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 91 | --format=yaml > "${CL_DIR}/${NAME}/aws-s3-backup-credentials-sealed.yaml" 92 | kubectl apply -f "${CL_DIR}/${NAME}/aws-s3-backup-credentials-sealed.yaml" 93 | fi 94 | 95 | AUTH_FILE="$TMP_DIR/auth" 96 | rm -f $AUTH_FILE 97 | echo "${USER_NAME}:$(openssl passwd -stdin -apr1 <<< ${USER_PASS})" >> $AUTH_FILE 98 | cat $AUTH_FILE 99 | kubectl -n longhorn-system create secret generic basic-auth --from-file=$AUTH_FILE 100 | kubectl -n longhorn-system get secret basic-auth -o yaml > ${CONFIG}/longhorn-basic-auth.yaml 101 | 102 | cat >> "${CL_DIR}/${NAME}/${NAME}-ingress.yaml" <> "${CL_DIR}/${NAME}/${NAME}-daily-backup.yaml" < "${cl_dir}/namespace.yaml" <> ${CONFIG}/k8s-secrets 126 | echo "${2}" >> ${CONFIG}/k8s-secrets 127 | } 128 | 129 | # Function generates random and secrure token or password, optionally 130 | # storring the token for a user in local filesystem using 131 | # `update_k8s_secrets` function. 132 | # $1 - optional token length, if not provided the len is 16 133 | # $2 - optional token name, if provided the token is stored using 134 | # `update_k8s_secrets` function. 135 | gen_token() { 136 | len=16 137 | [[ -z "$1" ]] || { len=$1; } 138 | t=`pwgen -s ${len} 1` 139 | [[ -z "$2" ]] || { update_k8s_secrets ${2} ${t}; } 140 | echo ${t} 141 | } 142 | 143 | # Function forwards port from ingress-nginx to service 144 | 145 | ingress_nginx_forward_port() { 146 | INGRESS_DIR=`mkdir_ns ${BASE_DIR} ${IN_TARGET_NAMESPACE} ${FLUX_NS}` 147 | port="$1" 148 | tns="$2" 149 | name="$3" 150 | yq e ".spec.values.tcp.str_${port} = \"${tns}/${name}:${port}\"" -i "${INGRESS_DIR}/${IN_NAME}/${IN_NAME}.yaml" 151 | sed -i'' -e "s/str_${port}/\!\!str ${port}/" "${INGRESS_DIR}/${IN_NAME}/${IN_NAME}.yaml" 152 | } 153 | 154 | -------------------------------------------------------------------------------- /scripts/cluster-weblate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script deploys Weblate on the k8s cluster. 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | INTERVAL="${DEF_INTERVAL}" 9 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 10 | 11 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | TNS=${WEBLATE_TARGET_NAMESPACE} 14 | 15 | if [ -z "${WEBLATE_SITE_TITLE}" ]; then 16 | echo -n "Provide Weblate site title: "; read o_key; 17 | [[ -z ${o_key} ]] || WEBLATE_SITE_TITLE=${o_key} 18 | fi 19 | if [ -z "${WEBLATE_SITE_DOMAIN}" ]; then 20 | echo -n "Provide Weblate site domain: "; read o_key; 21 | [[ -z ${o_key} ]] || WEBLATE_SITE_DOMAIN=${o_key} 22 | fi 23 | if [ -z "${WEBLATE_ADMIN_EMAIL}" ]; then 24 | echo -n "Provide Weblate admin email: "; read o_key; 25 | [[ -z ${o_key} ]] || WEBLATE_ADMIN_EMAIL=${o_key} 26 | fi 27 | if [ -z "${WEBLATE_EMAIL_HOST}" ]; then 28 | echo -n "Provide Weblate email host: "; read o_key; 29 | [[ -z ${o_key} ]] || WEBLATE_EMAIL_HOST=${o_key} 30 | fi 31 | if [ -z "${WEBLATE_EMAIL_SSL}" ]; then 32 | echo -n "Provide Weblate email SSL (true/false): "; read o_key; 33 | if [ "true" == "${o_key}" ]; then 34 | WEBLATE_EMAIL_SSL=true 35 | else 36 | WEBLATE_EMAIL_SSL=false 37 | fi 38 | fi 39 | if [ -z "${WEBLATE_EMAIL_USER}" ]; then 40 | echo -n "Provide Weblate email user: "; read o_key; 41 | [[ -z ${o_key} ]] || WEBLATE_EMAIL_USER=${o_key} 42 | if [ -z "${WEBLATE_EMAIL_PASSWORD}" ]; then 43 | echo -n "Provide Weblate email password: "; read p_key; 44 | [[ -z ${p_key} ]] || WEBLATE_EMAIL_PASSWORD=${p_key} 45 | fi 46 | fi 47 | if [ -z "${WEBLATE_EMAIL_ADDRESS}" ]; then 48 | echo -n "Provide Weblate email address: "; read o_key; 49 | [[ -z ${o_key} ]] || WEBLATE_EMAIL_ADDRESS=${o_key} 50 | fi 51 | 52 | if [ -z "${WEBLATE_SOCIAL_AUTH_GITHUB_KEY}" ]; then 53 | echo -n "Provide GitHub OAuth key: "; read o_key; 54 | [[ -z ${o_key} ]] || WEBLATE_SOCIAL_AUTH_GITHUB_KEY=${o_key} 55 | 56 | if [ ! -z "${WEBLATE_SOCIAL_AUTH_GITHUB_KEY}" ]; then 57 | if [ -z "${WEBLATE_SOCIAL_AUTH_GITHUB_SECRET}" ]; then 58 | echo -n "Provide GitHub OAuth secret: "; read s_key; 59 | [[ -z ${s_key} ]] || WEBLATE_SOCIAL_AUTH_GITHUB_SECRET=${s_key} 60 | fi 61 | fi 62 | fi 63 | 64 | if [ -z "${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_KEY}" ]; then 65 | echo -n "Provide GitHubOrg OAuth key: "; read o_key; 66 | [[ -z ${o_key} ]] || WEBLATE_SOCIAL_AUTH_GITHUB_ORG_KEY=${o_key} 67 | 68 | if [ ! -z "${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_KEY}" ]; then 69 | if [ -z "${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_SECRET}" ]; then 70 | echo -n "Provide GitHubOrg OAuth secret: "; read s_key; 71 | [[ -z ${s_key} ]] || WEBLATE_SOCIAL_AUTH_GITHUB_ORG_SECRET=${s_key} 72 | fi 73 | if [ -z "${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_NAME}" ]; then 74 | echo -n "Provide GitHubOrg OAuth organization name: "; read n_key; 75 | [[ -z ${n_key} ]] || WEBLATE_SOCIAL_AUTH_GITHUB_ORG_NAME=${n_key} 76 | fi 77 | fi 78 | fi 79 | 80 | echo " ${BOLD}Adding Weblate helm chart${NORMAL}" 81 | 82 | ${SCRIPTS}/flux-create-source.sh ${WEBLATE_S_NAME} ${WEBLATE_URL} 83 | 84 | update_kustomization ${BASE_DIR}/sources 85 | 86 | echo " ${BOLD}Preparing Weblate deployment${NORMAL}" 87 | 88 | NAME="${WEBLATE_NAME}" 89 | 90 | ${SCRIPTS}/flux-create-helmrel.sh app \ 91 | "${WEBLATE_NAME}" \ 92 | "${WEBLATE_VER}" \ 93 | "${WEBLATE_RNAME}" \ 94 | "${WEBLATE_TARGET_NAMESPACE}" \ 95 | "${WEBLATE_NAMESPACE}" \ 96 | "${WEBLATE_SOURCE}" \ 97 | "${WEBLATE_VALUES}" --create-target-namespace || exit 1 98 | 99 | VALUES="" 100 | VALUES="$VALUES\n adminEmail: \"${WEBLATE_ADMIN_EMAIL}\"" 101 | VALUES="$VALUES\n siteTitle: \"${WEBLATE_SITE_TITLE}\"" 102 | VALUES="$VALUES\n siteDomain: \"${WEBLATE_SITE_DOMAIN}\"" 103 | VALUES="$VALUES\n emailHost: \"${WEBLATE_EMAIL_HOST}\"" 104 | VALUES="$VALUES\n emailSSL: ${WEBLATE_EMAIL_SSL}" 105 | VALUES="$VALUES\n emailUser: \"${WEBLATE_EMAIL_USER}\"" 106 | VALUES="$VALUES\n emailPassword: \"${WEBLATE_EMAIL_PASSWORD}\"" 107 | VALUES="$VALUES\n serverEmail: \"${WEBLATE_EMAIL_ADDRESS}\"" 108 | VALUES="$VALUES\n defaultFromEmail: \"${WEBLATE_EMAIL_ADDRESS}\"" 109 | 110 | VALUES="$VALUES\n extraConfig:" 111 | [ -z "$WEBLATE_SOCIAL_AUTH_GITHUB_KEY" ] \ 112 | || VALUES="$VALUES\n WEBLATE_SOCIAL_AUTH_GITHUB_KEY=\"${WEBLATE_SOCIAL_AUTH_GITHUB_KEY}\"" 113 | [ -z "$WEBLATE_SOCIAL_AUTH_GITHUB_SECRET" ] \ 114 | || VALUES="$VALUES\n WEBLATE_SOCIAL_AUTH_GITHUB_SECRET=\"${WEBLATE_SOCIAL_AUTH_GITHUB_SECRET}\"" 115 | [ -z "$WEBLATE_SOCIAL_AUTH_GITHUB_ORG_KEY" ] \ 116 | || VALUES="$VALUES\n WEBLATE_SOCIAL_AUTH_GITHUB_ORG_KEY=\"${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_KEY}\"" 117 | [ -z "$WEBLATE_SOCIAL_AUTH_GITHUB_ORG_SECRET" ] \ 118 | || VALUES="$VALUES\n WEBLATE_SOCIAL_AUTH_GITHUB_ORG_SECRET=\"${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_SECRET}\"" 119 | [ -z "$WEBLATE_SOCIAL_AUTH_GITHUB_ORG_NAME" ] \ 120 | || VALUES="$VALUES\n WEBLATE_SOCIAL_AUTH_GITHUB_ORG_NAME=\"${WEBLATE_SOCIAL_AUTH_GITHUB_ORG_NAME}\"" 121 | 122 | VALUES="$VALUES\n ingress:" 123 | VALUES="$VALUES\n enabled: true" 124 | VALUES="$VALUES\n annotations:" 125 | VALUES="$VALUES\n cert-manager.io/cluster-issuer: \"letsencrypt\"" 126 | VALUES="$VALUES\n hosts:" 127 | VALUES="$VALUES\n - host: \"${WEBLATE_SITE_DOMAIN}\"" 128 | VALUES="$VALUES\n paths:" 129 | VALUES="$VALUES\n - /" 130 | VALUES="$VALUES\n tls:" 131 | VALUES="$VALUES\n - secretName: \"${WEBLATE_NAME}-translate\"" 132 | VALUES="$VALUES\n hosts:" 133 | VALUES="$VALUES\n - \"${WEBLATE_SITE_DOMAIN}\"" 134 | 135 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 136 | NAME=${WEBLATE_NAME} 137 | 138 | printf "\n$VALUES" >> "${CL_DIR}/${NAME}/${NAME}.yaml" 139 | 140 | echo " ${BOLD}Deploying changes${NORMAL}" 141 | 142 | #update_repo ${NAME} 143 | 144 | #wait_for_ready 145 | -------------------------------------------------------------------------------- /scripts/cluster-killbill.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script is the main entry point to setup KillBill & MySQL instances in a specified namespace. 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | INTERVAL="${DEF_INTERVAL}" 9 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 10 | 11 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | echo " ${BOLD}Deploying KillBill${NORMAL}" 14 | 15 | TNS=${KILLBILL_TARGET_NAMESPACE} 16 | 17 | ${SCRIPTS}/cluster-tigase-helm-charts.sh 18 | 19 | KILLBILL_MYSQL_PASSWORD=`gen_token 8` 20 | KILLBILL_MYSQL_ROOT_PASSWORD=`gen_token 24` 21 | 22 | [[ "$1" == "-q" ]] || { 23 | echo -n "Provide MySQL user password: "; read u_pass 24 | echo -n "Provide MySQL root user password: "; read u_root_pass 25 | [[ -z ${u_pass} ]] || KILLBILL_MYSQL_PASSWORD=${u_pass} 26 | [[ -z ${u_root_pass} ]] || KILLBILL_MYSQL_ROOT_PASSWORD=${u_root_pass} 27 | } 28 | 29 | if [ -z "${KILLBILL_MYSQL_S3_BACKUP}" ]; then 30 | echo -n "Enable MySQL backup to S3: "; read e_key; 31 | [[ -z ${e_key} ]] || KILLBILL_MYSQL_S3_BACKUP=${e_key} 32 | if [ "true" == "${e_key}" ]; then 33 | echo -n "Provide MySQL S3 backup endpoint: "; read e_key; 34 | echo -n "Provide MySQL S3 backup bucket: "; read b_key; 35 | echo -n "Provide MySQL S3 backup prefix: "; read p_key; 36 | echo -n "Provide MySQL S3 backup access-key: "; read a_key; 37 | echo -n "Provide MySQL S3 backup secret-key: "; read s_key; 38 | echo -n "Provide MySQL S3 backup schedule: "; read sc_key; 39 | echo -n "Provide MySQL S3 backup expire in: "; read ei_key; 40 | [[ -z ${e_key} ]] || KILLBILL_MYSQL_S3_BACKUP_ENDPOINT=${e_key}; 41 | [[ -z ${b_key} ]] || KILLBILL_MYSQL_S3_BACKUP_BUCKET=${b_key}; 42 | [[ -z ${p_key} ]] || KILLBILL_MYSQL_S3_BACKUP_PREFIX=${p_key}; 43 | [[ -z ${a_key} ]] || KILLBILL_MYSQL_S3_BACKUP_ACCESS_KEY=${a_key} 44 | [[ -z ${s_key} ]] || KILLBILL_MYSQL_S3_BACKUP_SECRET_KEY=${s_key} 45 | [[ -z ${sc_key} ]] || KILLBILL_MYSQL_S3_BACKUP_SCHEDULE=${sc_key} 46 | [[ -z ${ei_key} ]] || KILLBILL_MYSQL_S3_BACKUP_EXPIRE_IN=${ei_key} 47 | fi 48 | fi 49 | 50 | if [ -z "${KILLBILL_DOMAIN}" ]; then 51 | echo -n "Provide KillBill default domain: "; read a_key; 52 | [[ -z ${a_key} ]] || KILLBILL_DOMAIN=${a_key} 53 | fi 54 | 55 | if [ -z "${KAUI_DOMAIN}" ]; then 56 | echo -n "Provide KAUI default domain: "; read a_key; 57 | [[ -z ${a_key} ]] || KAUI_DOMAIN=${a_key} 58 | fi 59 | 60 | echo " ${BOLD}Preparing MySQL deployment${NORMAL}" 61 | 62 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 63 | 64 | cat > "${CL_DIR}/${TNS}.yaml" << EOF 65 | apiVersion: v1 66 | kind: Namespace 67 | metadata: 68 | name: ${TNS} 69 | EOF 70 | 71 | MYSQL_NAME="${KILLBILL_NAME}-mysql" 72 | NAME="${MYSQL_NAME}" 73 | 74 | mkdir -p "${CL_DIR}/${NAME}" 75 | kubectl create secret generic "mysql-credentials" \ 76 | --namespace "${TNS}" \ 77 | --from-literal=mysql-password="${KILLBILL_MYSQL_PASSWORD}" \ 78 | --from-literal=mysql-root-password="${KILLBILL_MYSQL_ROOT_PASSWORD}" \ 79 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 80 | --format=yaml > "${CL_DIR}/${NAME}/killbill-mysql-credentials-sealed.yaml" 81 | 82 | if [ "${KILLBILL_MYSQL_S3_BACKUP}" == "true" ]; then 83 | kubectl create secret generic "mysql-backup-s3" \ 84 | --namespace "${TNS}" \ 85 | --from-literal=access-key="${KILLBILL_MYSQL_S3_BACKUP_ACCESS_KEY}" \ 86 | --from-literal=secret-key="${KILLBILL_MYSQL_S3_BACKUP_SECRET_KEY}" \ 87 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 88 | --format=yaml > "${CL_DIR}/${NAME}/killbill-mysql-backup-s3-sealed.yaml" 89 | fi 90 | 91 | VALUES=`export KILLBILL_MYSQL_S3_BACKUP="${KILLBILL_MYSQL_S3_BACKUP}" KILLBILL_MYSQL_S3_BACKUP_ENDPOINT="${KILLBILL_MYSQL_S3_BACKUP_ENDPOINT}" KILLBILL_MYSQL_S3_BACKUP_BUCKET="${KILLBILL_MYSQL_S3_BACKUP_BUCKET}" KILLBILL_MYSQL_S3_BACKUP_PREFIX="${KILLBILL_MYSQL_S3_BACKUP_PREFIX}" KILLBILL_MYSQL_S3_BACKUP_ACCESS_KEY="${KILLBILL_MYSQL_S3_BACKUP_ACCESS_KEY}" KILLBILL_MYSQL_S3_BACKUP_SCHEDULE="${KILLBILL_MYSQL_S3_BACKUP_SCHEDULE}" KILLBILL_MYSQL_S3_BACKUP_EXPIRE_IN="${KILLBILL_MYSQL_S3_BACKUP_EXPIRE_IN}" && envsubst < ${KILLBILL_MYSQL_VALUES_FILE}` 92 | 93 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 94 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 95 | kind: HelmRelease 96 | metadata: 97 | name: ${NAME} 98 | namespace: ${TNS} 99 | spec: 100 | releaseName: ${NAME} 101 | chart: 102 | spec: 103 | chart: mysql 104 | sourceRef: 105 | kind: GitRepository 106 | name: tigase 107 | namespace: flux-system 108 | interval: 1m 109 | interval: 5m 110 | values: 111 | auth: 112 | database: "killbill" 113 | username: "killbill" 114 | existingSecret: "mysql-credentials" 115 | 116 | updateStrategy: Recreate 117 | 118 | ${VALUES} 119 | EOF 120 | 121 | update_kustomization ${CL_DIR}/${NAME} 122 | 123 | echo " ${BOLD}Preparing KillBill deployment${NORMAL}" 124 | 125 | NAME="${KILLBILL_NAME}" 126 | 127 | mkdir -p "${CL_DIR}/${NAME}" 128 | 129 | VALUES=`export KILLBILL_DOMAIN="${KILLBILL_DOMAIN}" KAUI_DOMAIN="${KAUI_DOMAIN}" KILLBILL_DATABASE_HOST="${MYSQL_NAME}" && envsubst < ${KILLBILL_VALUES_FILE}` 130 | 131 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 132 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 133 | kind: HelmRelease 134 | metadata: 135 | name: ${NAME} 136 | namespace: ${TNS} 137 | spec: 138 | releaseName: ${NAME} 139 | chart: 140 | spec: 141 | chart: killbill 142 | sourceRef: 143 | kind: GitRepository 144 | name: tigase 145 | namespace: flux-system 146 | interval: 1m 147 | interval: 5m 148 | values: 149 | updateStrategy: Recreate 150 | 151 | ${VALUES} 152 | EOF 153 | 154 | update_kustomization ${CL_DIR}/${NAME} 155 | 156 | update_kustomization ${CL_DIR} 157 | 158 | update_kustomization ${APPS_DIR} 159 | 160 | echo " ${BOLD}Deploying changes${NORMAL}" 161 | 162 | update_repo ${NAME} 163 | 164 | wait_for_ready 165 | 166 | -------------------------------------------------------------------------------- /scripts/cluster-tigase-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script is the main entry point to setup Tigase & MySQL instances in a specified namespace. 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | INTERVAL="${DEF_INTERVAL}" 9 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 10 | 11 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 12 | 13 | echo " ${BOLD}Deploying Tigase${NORMAL}" 14 | 15 | TNS=${TIGASE_TARGET_NAMESPACE} 16 | 17 | ${SCRIPTS}/cluster-tigase-helm-charts.sh 18 | 19 | TIGASE_MYSQL_PASSWORD=`gen_token 8` 20 | TIGASE_MYSQL_ROOT_PASSWORD=`gen_token 24` 21 | 22 | [[ "$1" == "-q" ]] || { 23 | echo -n "Provide MySQL user password: "; read u_pass 24 | echo -n "Provide MySQL root user password: "; read u_root_pass 25 | [[ -z ${u_pass} ]] || TIGASE_MYSQL_PASSWORD=${u_pass} 26 | [[ -z ${u_root_pass} ]] || TIGASE_MYSQL_ROOT_PASSWORD=${u_root_pass} 27 | } 28 | 29 | if [ -z "${TIGASE_MYSQL_S3_BACKUP}" ]; then 30 | echo -n "Enable MySQL backup to S3: "; read e_key; 31 | [[ -z ${e_key} ]] || TIGASE_MYSQL_S3_BACKUP=${e_key} 32 | if [ "true" == "${e_key}" ]; then 33 | echo -n "Provide MySQL S3 backup endpoint: "; read e_key; 34 | echo -n "Provide MySQL S3 backup bucket: "; read b_key; 35 | echo -n "Provide MySQL S3 backup prefix: "; read p_key; 36 | echo -n "Provide MySQL S3 backup access-key: "; read a_key; 37 | echo -n "Provide MySQL S3 backup secret-key: "; read s_key; 38 | echo -n "Provide MySQL S3 backup schedule: "; read sc_key; 39 | echo -n "Provide MySQL S3 backup expire in: "; read ei_key; 40 | [[ -z ${e_key} ]] || TIGASE_MYSQL_S3_BACKUP_ENDPOINT=${e_key}; 41 | [[ -z ${b_key} ]] || TIGASE_MYSQL_S3_BACKUP_BUCKET=${b_key}; 42 | [[ -z ${p_key} ]] || TIGASE_MYSQL_S3_BACKUP_PREFIX=${p_key}; 43 | [[ -z ${a_key} ]] || TIGASE_MYSQL_S3_BACKUP_ACCESS_KEY=${a_key} 44 | [[ -z ${s_key} ]] || TIGASE_MYSQL_S3_BACKUP_SECRET_KEY=${s_key} 45 | [[ -z ${sc_key} ]] || TIGASE_MYSQL_S3_BACKUP_SCHEDULE=${sc_key} 46 | [[ -z ${ei_key} ]] || TIGASE_MYSQL_S3_BACKUP_EXPIRE_IN=${ei_key} 47 | fi 48 | fi 49 | 50 | if [ -z "${TIGASE_DOMAIN}" ]; then 51 | echo -n "Provide Tigase default domain: "; read a_key; 52 | [[ -z ${a_key} ]] || TIGASE_DOMAIN=${a_key} 53 | fi 54 | 55 | if [ -z "${TIGASE_S3_UPLOAD_ENDPOINT}" ]; then 56 | echo -n "Provide Tigase S3 upload endpoint: "; read a_key; 57 | [[ -z ${a_key} ]] || TIGASE_S3_UPLOAD_ENDPOINT=${a_key}; 58 | fi 59 | 60 | if [ -z "${TIGASE_S3_UPLOAD_ACCESS_KEY}" ]; then 61 | echo -n "Provide Tigase S3 upload access-key: "; read a_key; 62 | echo -n "Provide Tigase S3 upload secret-key: "; read s_key; 63 | [[ -z ${a_key} ]] || TIGASE_S3_UPLOAD_ACCESS_KEY=${a_key} 64 | [[ -z ${s_key} ]] || TIGASE_S3_UPLOAD_SECRET_KEY=${s_key} 65 | fi 66 | 67 | if [ -z "${TIGASE_S3_UPLOAD_BUCKET}" ]; then 68 | echo -n "Provide Tigase S3 upload bucket: "; read a_key; 69 | [[ -z ${a_key} ]] || TIGASE_S3_UPLOAD_BUCKET=${a_key}; 70 | fi 71 | 72 | if [ -z "${TIGASE_S3_UPLOAD_PATH_STYLE}" ]; then 73 | echo -n "Provide Tigase S3 upload path style: "; read a_key; 74 | [[ -z ${a_key} ]] || TIGASE_S3_UPLOAD_PATH_STYLE=${a_key}; 75 | fi 76 | 77 | echo " ${BOLD}Preparing MySQL deployment${NORMAL}" 78 | 79 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 80 | 81 | cat > "${CL_DIR}/${TNS}.yaml" << EOF 82 | apiVersion: v1 83 | kind: Namespace 84 | metadata: 85 | name: ${TNS} 86 | EOF 87 | 88 | MYSQL_NAME="${TIGASE_NAME}-mysql" 89 | NAME="${MYSQL_NAME}" 90 | 91 | mkdir -p "${CL_DIR}/${NAME}" 92 | kubectl create secret generic "mysql-credentials" \ 93 | --namespace "${TNS}" \ 94 | --from-literal=mysql-password="${TIGASE_MYSQL_PASSWORD}" \ 95 | --from-literal=mysql-root-password="${TIGASE_MYSQL_ROOT_PASSWORD}" \ 96 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 97 | --format=yaml > "${CL_DIR}/${NAME}/tigase-mysql-credentials-sealed.yaml" 98 | 99 | if [ "${TIGASE_MYSQL_S3_BACKUP}" == "true" ]; then 100 | kubectl create secret generic "mysql-backup-s3" \ 101 | --namespace "${TNS}" \ 102 | --from-literal=access-key="${TIGASE_MYSQL_S3_BACKUP_ACCESS_KEY}" \ 103 | --from-literal=secret-key="${TIGASE_MYSQL_S3_BACKUP_SECRET_KEY}" \ 104 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 105 | --format=yaml > "${CL_DIR}/${NAME}/tigase-mysql-backup-s3-sealed.yaml" 106 | fi 107 | 108 | VALUES=`export TIGASE_MYSQL_S3_BACKUP="${TIGASE_MYSQL_S3_BACKUP}" TIGASE_MYSQL_S3_BACKUP_ENDPOINT="${TIGASE_MYSQL_S3_BACKUP_ENDPOINT}" TIGASE_MYSQL_S3_BACKUP_BUCKET="${TIGASE_MYSQL_S3_BACKUP_BUCKET}" TIGASE_MYSQL_S3_BACKUP_PREFIX="${TIGASE_MYSQL_S3_BACKUP_PREFIX}" TIGASE_MYSQL_S3_BACKUP_ACCESS_KEY="${TIGASE_MYSQL_S3_BACKUP_ACCESS_KEY}" TIGASE_MYSQL_S3_BACKUP_SCHEDULE="${TIGASE_MYSQL_S3_BACKUP_SCHEDULE}" TIGASE_MYSQL_S3_BACKUP_EXPIRE_IN="${TIGASE_MYSQL_S3_BACKUP_EXPIRE_IN}" && envsubst < ${TIGASE_MYSQL_VALUES_FILE}` 109 | 110 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 111 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 112 | kind: HelmRelease 113 | metadata: 114 | name: ${NAME} 115 | namespace: ${TNS} 116 | spec: 117 | releaseName: ${NAME} 118 | chart: 119 | spec: 120 | chart: mysql 121 | sourceRef: 122 | kind: GitRepository 123 | name: tigase 124 | namespace: flux-system 125 | interval: 1m 126 | interval: 5m 127 | values: 128 | auth: 129 | database: "tigase" 130 | username: "tigase" 131 | existingSecret: "mysql-credentials" 132 | 133 | updateStrategy: Recreate 134 | 135 | ${VALUES} 136 | EOF 137 | 138 | update_kustomization ${CL_DIR}/${NAME} 139 | 140 | echo " ${BOLD}Preparing Tigase deployment${NORMAL}" 141 | 142 | NAME="${TIGASE_NAME}" 143 | 144 | mkdir -p "${CL_DIR}/${NAME}" 145 | 146 | if [ "${TIGASE_S3_UPLOAD}" == "true" ]; then 147 | kubectl create secret generic "tigase-s3-upload" \ 148 | --namespace "${TNS}" \ 149 | --from-literal="${TIGASE_S3_UPLOAD_ACCESS_KEY}"="${TIGASE_S3_UPLOAD_SECRET_KEY}" \ 150 | --dry-run=client -o yaml | kubeseal --cert="${SEALED_SECRETS_PUB_KEY}" \ 151 | --format=yaml > "${CL_DIR}/${NAME}/tigase-s3-upload-sealed.yaml" 152 | fi 153 | 154 | VALUES=`export TIGASE_S3_UPLOAD_ACCESS_KEY="${TIGASE_S3_UPLOAD_ACCESS_KEY}" TIGASE_DOMAIN="${TIGASE_DOMAIN}" TIGASE_S3_UPLOAD_ENDPOINT="${TIGASE_S3_UPLOAD_ENDPOINT}" TIGASE_S3_UPLOAD_PATH_STYLE="${TIGASE_S3_UPLOAD_PATH_STYLE}" && envsubst < ${TIGASE_VALUES_FILE}` 155 | 156 | cat > "${CL_DIR}/${NAME}/${NAME}.yaml" << EOF 157 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 158 | kind: HelmRelease 159 | metadata: 160 | name: ${NAME} 161 | namespace: ${TNS} 162 | spec: 163 | releaseName: ${NAME} 164 | chart: 165 | spec: 166 | chart: tigase-xmpp-server 167 | sourceRef: 168 | kind: GitRepository 169 | name: tigase 170 | namespace: flux-system 171 | interval: 1m 172 | interval: 5m 173 | values: 174 | database: 175 | type: "mysql" 176 | host: "tigase-server-mysql" 177 | user: "tigase" 178 | secret: "mysql-credentials" 179 | secretPasswordKey: "mysql-password" 180 | 181 | updateStrategy: Recreate 182 | 183 | ${VALUES} 184 | EOF 185 | 186 | update_kustomization ${CL_DIR}/${NAME} 187 | 188 | update_kustomization ${CL_DIR} 189 | 190 | update_kustomization ${APPS_DIR} 191 | 192 | for port in 5222 5223 5269 5280 5290 5291 8080 193 | do 194 | ingress_nginx_forward_port "$port" "$TNS" "$NAME-tigase-xmpp-server" 195 | done 196 | 197 | echo " ${BOLD}Deploying changes${NORMAL}" 198 | 199 | update_repo ${NAME} 200 | 201 | wait_for_ready 202 | 203 | -------------------------------------------------------------------------------- /scripts/cluster-mailu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script deploys Mailu on the k8s cluster. 4 | # 5 | 6 | source `dirname "$0"`/scripts-env-init.sh 7 | 8 | cd ${CLUSTER_REPO_DIR} &> /dev/null || { echo "${ERROR}No cluster repo dir!${NORMAL}"; exit 1; } 9 | 10 | CL_SERV_NAME=${MAILU_NAME} 11 | CL_SERV_TNS=${MAILU_TARGET_NAMESPACE} 12 | CL_SERV_TYPE=${APPS} 13 | 14 | source ${SCRIPTS}/cluster-script-preprocess.sh $1 15 | 16 | source "${CONFIG}/envs/mailu.env" 17 | 18 | INTERVAL="${DEF_INTERVAL}" 19 | SEALED_SECRETS_PUB_KEY="pub-sealed-secrets-${CLUSTER_NAME}.pem" 20 | 21 | TNS=${MAILU_TARGET_NAMESPACE} 22 | 23 | if [ -z "${MAILU_DOMAIN}" ]; then 24 | echo -n "Provide domain: "; read h_key; 25 | [[ -z ${h_key} ]] || MAILU_DOMAIN=${h_key} 26 | fi 27 | 28 | if [ -z "${MAILU_HOSTNAMES}" ]; then 29 | MAILU_HOSTNAMES=() 30 | while : ; do 31 | echo -n "Provide hostname to host emails for, or empty to continue: "; read d_key; 32 | if [[ -z ${d_key} ]]; then 33 | break; 34 | else 35 | idx=$(( ${#MAILU_HOSTNAMES[@]} + 1 )); 36 | MAILU_HOSTNAMES[${idx}]="${d_key}"; 37 | echo "Hostnames: $idx" 38 | fi 39 | done 40 | fi 41 | 42 | if [ -z "${MAILU_ADMIN_USERNAME}" ]; then 43 | echo -n "Provide admin username or ENTER to generate: "; read u_key; 44 | [[ -z ${u_key} ]] && u_key=`gen_token 12` 45 | MAILU_ADMIN_USERNAME=${u_key} 46 | fi 47 | 48 | if [ -z "${MAILU_ADMIN_DOMAIN}" ]; then 49 | echo -n "Provide admin domain: "; read d_key; 50 | [[ -z ${d_key} ]] || MAILU_ADMIN_DOMAIN=${d_key} 51 | fi 52 | 53 | if [ -z "${MAILU_ADMIN_PASSWORD}" ]; then 54 | echo -n "Provide admin password or ENTER to generate: "; read p_key; 55 | [[ -z ${p_key} ]] && p_key=`gen_token 24` 56 | MAILU_ADMIN_PASSWORD=${p_key} 57 | fi 58 | 59 | update_k8s_secrets "mailu_admin_user" "${MAILU_ADMIN_USERNAME}@${MAILU_ADMIN_DOMAIN}" 60 | update_k8s_secrets "mailu_admin_pass" "${MAILU_ADMIN_PASSWORD}" 61 | 62 | if [ -z "${MAILU_SUBNET}" ]; then 63 | echo -n "Provide subnet from which to allow to accept connections: "; read s_key; 64 | [[ -z ${s_key} ]] || MAILU_SUBNET=${s_key} 65 | fi 66 | 67 | if [ -z "${MAILU_RELAY_HOST}" ]; then 68 | echo -n "Provide relay host for sending outgoing emails: "; read h_key; 69 | [[ -z ${h_key} ]] || MAILU_RELAY_HOST=${h_key} 70 | fi 71 | 72 | if [ ! -z "${MAILU_RELAY_HOST}" ]; then 73 | if [ -z "${MAILU_RELAY_USERNAME}" ]; then 74 | echo -n "Provide relay username: "; read u_key; 75 | [[ -z ${u_key} ]] || MAILU_RELAY_USERNAME=${u_key} 76 | fi 77 | 78 | if [ ! -z "${MAILU_RELAY_USERNAME}" ]; then 79 | if [ -z "${MAILU_RELAY_PASSWORD}" ]; then 80 | echo -n "Provide relay password: "; read p_key; 81 | [[ -z ${p_key} ]] || MAILU_RELAY_PASSWORD=${p_key} 82 | fi 83 | fi 84 | fi 85 | 86 | if [ -z "${MAILU_SECRET_KEY}" ]; then 87 | MAILU_SECRET_KEY=`gen_token 16` 88 | fi 89 | 90 | HOSTNAMES_COUNT=${#MAILU_HOSTNAMES[@]} 91 | if [ $HOSTNAMES_COUNT == 0 ]; then 92 | echo "No hostnames!" 93 | exit 1 94 | fi 95 | 96 | VALUES=$" subnet: ${MAILU_SUBNET}" 97 | VALUES="$VALUES\n secretKey: \"${MAILU_SECRET_KEY}\"\n domain: \"$MAILU_DOMAIN\"\n hostnames:" 98 | for vhost in "${MAILU_HOSTNAMES[@]}" 99 | do 100 | echo "$i" 101 | VALUES=$"$VALUES\n - \"$vhost\"" 102 | done 103 | 104 | VALUES="$VALUES\n initialAccount:" 105 | VALUES="$VALUES\n username: \"${MAILU_ADMIN_USERNAME}\"" 106 | VALUES="$VALUES\n domain: \"${MAILU_ADMIN_DOMAIN}\"" 107 | VALUES="$VALUES\n password: \"${MAILU_ADMIN_PASSWORD}\"" 108 | if [ ! -z "${MAILU_RELAY_HOST}" ]; then 109 | VALUES="$VALUES\n external_relay:" 110 | VALUES="$VALUES\n host: \"${MAILU_RELAY_HOST}\"" 111 | if [ ! -z "${MAILU_RELAY_USERNAME}" ]; then 112 | VALUES="$VALUES\n username: \"${MAILU_RELAY_USERNAME}\"" 113 | fi 114 | if [ ! -z "${MAILU_RELAY_PASSWORD}" ]; then 115 | VALUES="$VALUES\n password: \"${MAILU_RELAY_PASSWORD}\"" 116 | fi 117 | fi 118 | 119 | echo " ${BOLD}Adding Mailu helm chart${NORMAL}" 120 | 121 | ${SCRIPTS}/flux-create-source.sh ${MAILU_S_NAME} ${MAILU_URL} && { 122 | [ "$1" == "--no-commit" ] || [ "$2" == "--no-commit" ] || { 123 | update_repo "${MAILU_NAME}" 124 | wait_for_ready 5 125 | } 126 | } 127 | 128 | update_kustomization ${BASE_DIR}/sources 129 | 130 | NAME="${MAILU_NAME}" 131 | 132 | echo " ${BOLD}Preparing Mailu deployment${NORMAL}" 133 | 134 | [ "${MAILU_EXISTING_PVC}" == "true" ] && { 135 | 136 | echo " ${BOLD}Creating namespace${NORMAL}" 137 | CL_DIR=`create_ns ${APPS_DIR} ${MAILU_TARGET_NAMESPACE}` 138 | update_kustomization `dirname ${CL_DIR}` 139 | update_kustomization ${CL_DIR} 140 | 141 | [ "${1}" == "--update" ] || { 142 | 143 | update_repo ${NAME} 144 | sleep 20 145 | 146 | echo " ${BOLD}${MAILU_TARGET_NAMESPACE} namespace is prepared, you can now create PVC: mailu-pvc${NORMAL}" 147 | echo " ${BOLD}for mailu deployment. Press enter when ready.${NORMAL}" 148 | read abc 149 | 150 | #echo " ${BOLD}Prepare PVCs${NORMAL}" 151 | 152 | #${SCRIPTS}/create-longhorn-pvc.sh mailu-pvc ${MAILU_TARGET_NAMESPACE} 20Gi ${CL_DIR} 153 | #update_kustomization ${CL_DIR} 154 | #update_repo ${NAME} 155 | #sleep 30 156 | 157 | #exit 1 158 | 159 | } 160 | 161 | } 162 | 163 | MAILU_VALUES="" 164 | ${SCRIPTS}/flux-create-helmrel.sh app \ 165 | "${MAILU_NAME}" \ 166 | "${MAILU_VER}" \ 167 | "${MAILU_RNAME}" \ 168 | "${MAILU_TARGET_NAMESPACE}" \ 169 | "${MAILU_NAMESPACE}" \ 170 | "${MAILU_SOURCE}" \ 171 | "${MAILU_VALUES}" --create-target-namespace || exit 1 172 | 173 | CL_DIR=`mkdir_ns ${APPS_DIR} ${TNS} ${FLUX_NS}` 174 | 175 | update_chart_ns "${CL_DIR}/${NAME}/${NAME}.yaml" 176 | 177 | yq e -i ".spec.timeout = \"${MAILU_FLUXCD_TIMEOUT}\"" "${CL_DIR}/${NAME}/${NAME}.yaml" 178 | yq e -i ".spec.install.timeout = \"${MAILU_FLUXCD_TIMEOUT}\"" "${CL_DIR}/${NAME}/${NAME}.yaml" 179 | #yq e -i '.spec.install.disableWait = true' "${CL_DIR}/${NAME}/${NAME}.yaml" 180 | #yq e -i '.spec.secretRef.name = "regcred"' "${CL_DIR}/${NAME}/${NAME}.yaml" 181 | #yq e -i '.spec.accessFrom."namespaceSelectors"[0].matchLabels."kubernetes.io/metadata.name" = "flux-system"' "${CL_DIR}/${NAME}/${NAME}.yaml" 182 | 183 | echo -e " values:" >> "${CL_DIR}/${NAME}/${NAME}.yaml" 184 | 185 | cat "${CONFIG}/envs/${MAILU_NAME}-values.yaml" >> "${CL_DIR}/${NAME}/${NAME}.yaml" 186 | 187 | printf "$VALUES" >> "${CL_DIR}/${NAME}/${NAME}.yaml" 188 | 189 | [ "$1" == "--no-commit" ] || [ "$2" == "--no-commit" ] || { 190 | 191 | echo " ${BOLD}Deploying changes${NORMAL}" 192 | 193 | update_repo ${NAME} 194 | 195 | wait_for_ready 196 | 197 | m_domain="${MAILU_DOMAIN}" 198 | m_server="${MAILU_HOSTNAMES[0]}" 199 | CLUSTER_IP=`kubectl get ingress -n mailu-prod mailu-ingress -o jsonpath='{.status.loadBalancer.ingress[].ip}'` 200 | MAILU_IP=`kubectl get svc -n mailu-prod mailu-front-ext -o jsonpath='{.status.loadBalancer.ingress[].ip}'` 201 | 202 | if [ -n "${MAILU_DOMAIN_AWS_ZONE_ID}" ]; then 203 | echo " ${BOLD}Updating DNS for ${INFO}${m_domain}${NORMAL}" 204 | echo " ${BOLD}Pointing ${INFO}${m_domain}${NORMAL} -> ${INFO}${CLUSTER_IP}${NORMAL}" 205 | ${SCRIPTS}/aws-update-zone.sh "${MAILU_DOMAIN_AWS_ZONE_ID}" "${m_domain}" "${CLUSTER_IP}" "${AWS_PROFILE}" 206 | else 207 | echo " ${WARNING}Cannot automatically update DNS for domain and host. Make necessary adjustments:${NORMAL}" 208 | echo " ${BOLD}Point ${INFO}${m_domain}${NORMAL} -> ${INFO}${CLUSTER_IP}${NORMAL}" 209 | fi 210 | 211 | if [ -n "${MAILU_HOSTNAME_AWS_ZONE_ID}" ]; then 212 | echo " ${BOLD}Updating DNS for ${INFO}${m_server}${NORMAL}" 213 | echo " ${BOLD}Pointing ${INFO}${m_server}${NORMAL} -> ${INFO}${MAILU_IP}${NORMAL}" 214 | ${SCRIPTS}/aws-update-zone.sh "${MAILU_HOSTNAME_AWS_ZONE_ID}" "${m_server}" "${MAILU_IP}" "${AWS_PROFILE}" 215 | else 216 | echo " ${WARNING}Cannot automatically update DNS for domain and host. Make necessary adjustments:${NORMAL}" 217 | echo " ${BOLD}Point ${INFO}${m_server}${NORMAL} -> ${INFO}${MAILU_IP}${NORMAL}" 218 | fi 219 | } 220 | 221 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU AFFERO GENERAL PUBLIC LICENSE 2 | Version 3, 19 November 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU Affero General Public License is a free, copyleft license for 11 | software and other kinds of works, specifically designed to ensure 12 | cooperation with the community in the case of network server software. 13 | 14 | The licenses for most software and other practical works are designed 15 | to take away your freedom to share and change the works. By contrast, 16 | our General Public Licenses are intended to guarantee your freedom to 17 | share and change all versions of a program--to make sure it remains free 18 | software for all its users. 19 | 20 | When we speak of free software, we are referring to freedom, not 21 | price. Our General Public Licenses are designed to make sure that you 22 | have the freedom to distribute copies of free software (and charge for 23 | them if you wish), that you receive source code or can get it if you 24 | want it, that you can change the software or use pieces of it in new 25 | free programs, and that you know you can do these things. 26 | 27 | Developers that use our General Public Licenses protect your rights 28 | with two steps: (1) assert copyright on the software, and (2) offer 29 | you this License which gives you legal permission to copy, distribute 30 | and/or modify the software. 31 | 32 | A secondary benefit of defending all users' freedom is that 33 | improvements made in alternate versions of the program, if they 34 | receive widespread use, become available for other developers to 35 | incorporate. Many developers of free software are heartened and 36 | encouraged by the resulting cooperation. However, in the case of 37 | software used on network servers, this result may fail to come about. 38 | The GNU General Public License permits making a modified version and 39 | letting the public access it on a server without ever releasing its 40 | source code to the public. 41 | 42 | The GNU Affero General Public License is designed specifically to 43 | ensure that, in such cases, the modified source code becomes available 44 | to the community. It requires the operator of a network server to 45 | provide the source code of the modified version running there to the 46 | users of that server. Therefore, public use of a modified version, on 47 | a publicly accessible server, gives the public access to the source 48 | code of the modified version. 49 | 50 | An older license, called the Affero General Public License and 51 | published by Affero, was designed to accomplish similar goals. This is 52 | a different license, not a version of the Affero GPL, but Affero has 53 | released a new version of the Affero GPL which permits relicensing under 54 | this license. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | TERMS AND CONDITIONS 60 | 61 | 0. Definitions. 62 | 63 | "This License" refers to version 3 of the GNU Affero General Public License. 64 | 65 | "Copyright" also means copyright-like laws that apply to other kinds of 66 | works, such as semiconductor masks. 67 | 68 | "The Program" refers to any copyrightable work licensed under this 69 | License. Each licensee is addressed as "you". "Licensees" and 70 | "recipients" may be individuals or organizations. 71 | 72 | To "modify" a work means to copy from or adapt all or part of the work 73 | in a fashion requiring copyright permission, other than the making of an 74 | exact copy. The resulting work is called a "modified version" of the 75 | earlier work or a work "based on" the earlier work. 76 | 77 | A "covered work" means either the unmodified Program or a work based 78 | on the Program. 79 | 80 | To "propagate" a work means to do anything with it that, without 81 | permission, would make you directly or secondarily liable for 82 | infringement under applicable copyright law, except executing it on a 83 | computer or modifying a private copy. Propagation includes copying, 84 | distribution (with or without modification), making available to the 85 | public, and in some countries other activities as well. 86 | 87 | To "convey" a work means any kind of propagation that enables other 88 | parties to make or receive copies. Mere interaction with a user through 89 | a computer network, with no transfer of a copy, is not conveying. 90 | 91 | An interactive user interface displays "Appropriate Legal Notices" 92 | to the extent that it includes a convenient and prominently visible 93 | feature that (1) displays an appropriate copyright notice, and (2) 94 | tells the user that there is no warranty for the work (except to the 95 | extent that warranties are provided), that licensees may convey the 96 | work under this License, and how to view a copy of this License. If 97 | the interface presents a list of user commands or options, such as a 98 | menu, a prominent item in the list meets this criterion. 99 | 100 | 1. Source Code. 101 | 102 | The "source code" for a work means the preferred form of the work 103 | for making modifications to it. "Object code" means any non-source 104 | form of a work. 105 | 106 | A "Standard Interface" means an interface that either is an official 107 | standard defined by a recognized standards body, or, in the case of 108 | interfaces specified for a particular programming language, one that 109 | is widely used among developers working in that language. 110 | 111 | The "System Libraries" of an executable work include anything, other 112 | than the work as a whole, that (a) is included in the normal form of 113 | packaging a Major Component, but which is not part of that Major 114 | Component, and (b) serves only to enable use of the work with that 115 | Major Component, or to implement a Standard Interface for which an 116 | implementation is available to the public in source code form. A 117 | "Major Component", in this context, means a major essential component 118 | (kernel, window system, and so on) of the specific operating system 119 | (if any) on which the executable work runs, or a compiler used to 120 | produce the work, or an object code interpreter used to run it. 121 | 122 | The "Corresponding Source" for a work in object code form means all 123 | the source code needed to generate, install, and (for an executable 124 | work) run the object code and to modify the work, including scripts to 125 | control those activities. However, it does not include the work's 126 | System Libraries, or general-purpose tools or generally available free 127 | programs which are used unmodified in performing those activities but 128 | which are not part of the work. For example, Corresponding Source 129 | includes interface definition files associated with source files for 130 | the work, and the source code for shared libraries and dynamically 131 | linked subprograms that the work is specifically designed to require, 132 | such as by intimate data communication or control flow between those 133 | subprograms and other parts of the work. 134 | 135 | The Corresponding Source need not include anything that users 136 | can regenerate automatically from other parts of the Corresponding 137 | Source. 138 | 139 | The Corresponding Source for a work in source code form is that 140 | same work. 141 | 142 | 2. Basic Permissions. 143 | 144 | All rights granted under this License are granted for the term of 145 | copyright on the Program, and are irrevocable provided the stated 146 | conditions are met. This License explicitly affirms your unlimited 147 | permission to run the unmodified Program. The output from running a 148 | covered work is covered by this License only if the output, given its 149 | content, constitutes a covered work. This License acknowledges your 150 | rights of fair use or other equivalent, as provided by copyright law. 151 | 152 | You may make, run and propagate covered works that you do not 153 | convey, without conditions so long as your license otherwise remains 154 | in force. You may convey covered works to others for the sole purpose 155 | of having them make modifications exclusively for you, or provide you 156 | with facilities for running those works, provided that you comply with 157 | the terms of this License in conveying all material for which you do 158 | not control copyright. Those thus making or running the covered works 159 | for you must do so exclusively on your behalf, under your direction 160 | and control, on terms that prohibit them from making any copies of 161 | your copyrighted material outside their relationship with you. 162 | 163 | Conveying under any other circumstances is permitted solely under 164 | the conditions stated below. Sublicensing is not allowed; section 10 165 | makes it unnecessary. 166 | 167 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 168 | 169 | No covered work shall be deemed part of an effective technological 170 | measure under any applicable law fulfilling obligations under article 171 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 172 | similar laws prohibiting or restricting circumvention of such 173 | measures. 174 | 175 | When you convey a covered work, you waive any legal power to forbid 176 | circumvention of technological measures to the extent such circumvention 177 | is effected by exercising rights under this License with respect to 178 | the covered work, and you disclaim any intention to limit operation or 179 | modification of the work as a means of enforcing, against the work's 180 | users, your or third parties' legal rights to forbid circumvention of 181 | technological measures. 182 | 183 | 4. Conveying Verbatim Copies. 184 | 185 | You may convey verbatim copies of the Program's source code as you 186 | receive it, in any medium, provided that you conspicuously and 187 | appropriately publish on each copy an appropriate copyright notice; 188 | keep intact all notices stating that this License and any 189 | non-permissive terms added in accord with section 7 apply to the code; 190 | keep intact all notices of the absence of any warranty; and give all 191 | recipients a copy of this License along with the Program. 192 | 193 | You may charge any price or no price for each copy that you convey, 194 | and you may offer support or warranty protection for a fee. 195 | 196 | 5. Conveying Modified Source Versions. 197 | 198 | You may convey a work based on the Program, or the modifications to 199 | produce it from the Program, in the form of source code under the 200 | terms of section 4, provided that you also meet all of these conditions: 201 | 202 | a) The work must carry prominent notices stating that you modified 203 | it, and giving a relevant date. 204 | 205 | b) The work must carry prominent notices stating that it is 206 | released under this License and any conditions added under section 207 | 7. This requirement modifies the requirement in section 4 to 208 | "keep intact all notices". 209 | 210 | c) You must license the entire work, as a whole, under this 211 | License to anyone who comes into possession of a copy. This 212 | License will therefore apply, along with any applicable section 7 213 | additional terms, to the whole of the work, and all its parts, 214 | regardless of how they are packaged. This License gives no 215 | permission to license the work in any other way, but it does not 216 | invalidate such permission if you have separately received it. 217 | 218 | d) If the work has interactive user interfaces, each must display 219 | Appropriate Legal Notices; however, if the Program has interactive 220 | interfaces that do not display Appropriate Legal Notices, your 221 | work need not make them do so. 222 | 223 | A compilation of a covered work with other separate and independent 224 | works, which are not by their nature extensions of the covered work, 225 | and which are not combined with it such as to form a larger program, 226 | in or on a volume of a storage or distribution medium, is called an 227 | "aggregate" if the compilation and its resulting copyright are not 228 | used to limit the access or legal rights of the compilation's users 229 | beyond what the individual works permit. Inclusion of a covered work 230 | in an aggregate does not cause this License to apply to the other 231 | parts of the aggregate. 232 | 233 | 6. Conveying Non-Source Forms. 234 | 235 | You may convey a covered work in object code form under the terms 236 | of sections 4 and 5, provided that you also convey the 237 | machine-readable Corresponding Source under the terms of this License, 238 | in one of these ways: 239 | 240 | a) Convey the object code in, or embodied in, a physical product 241 | (including a physical distribution medium), accompanied by the 242 | Corresponding Source fixed on a durable physical medium 243 | customarily used for software interchange. 244 | 245 | b) Convey the object code in, or embodied in, a physical product 246 | (including a physical distribution medium), accompanied by a 247 | written offer, valid for at least three years and valid for as 248 | long as you offer spare parts or customer support for that product 249 | model, to give anyone who possesses the object code either (1) a 250 | copy of the Corresponding Source for all the software in the 251 | product that is covered by this License, on a durable physical 252 | medium customarily used for software interchange, for a price no 253 | more than your reasonable cost of physically performing this 254 | conveying of source, or (2) access to copy the 255 | Corresponding Source from a network server at no charge. 256 | 257 | c) Convey individual copies of the object code with a copy of the 258 | written offer to provide the Corresponding Source. This 259 | alternative is allowed only occasionally and noncommercially, and 260 | only if you received the object code with such an offer, in accord 261 | with subsection 6b. 262 | 263 | d) Convey the object code by offering access from a designated 264 | place (gratis or for a charge), and offer equivalent access to the 265 | Corresponding Source in the same way through the same place at no 266 | further charge. You need not require recipients to copy the 267 | Corresponding Source along with the object code. If the place to 268 | copy the object code is a network server, the Corresponding Source 269 | may be on a different server (operated by you or a third party) 270 | that supports equivalent copying facilities, provided you maintain 271 | clear directions next to the object code saying where to find the 272 | Corresponding Source. Regardless of what server hosts the 273 | Corresponding Source, you remain obligated to ensure that it is 274 | available for as long as needed to satisfy these requirements. 275 | 276 | e) Convey the object code using peer-to-peer transmission, provided 277 | you inform other peers where the object code and Corresponding 278 | Source of the work are being offered to the general public at no 279 | charge under subsection 6d. 280 | 281 | A separable portion of the object code, whose source code is excluded 282 | from the Corresponding Source as a System Library, need not be 283 | included in conveying the object code work. 284 | 285 | A "User Product" is either (1) a "consumer product", which means any 286 | tangible personal property which is normally used for personal, family, 287 | or household purposes, or (2) anything designed or sold for incorporation 288 | into a dwelling. In determining whether a product is a consumer product, 289 | doubtful cases shall be resolved in favor of coverage. For a particular 290 | product received by a particular user, "normally used" refers to a 291 | typical or common use of that class of product, regardless of the status 292 | of the particular user or of the way in which the particular user 293 | actually uses, or expects or is expected to use, the product. A product 294 | is a consumer product regardless of whether the product has substantial 295 | commercial, industrial or non-consumer uses, unless such uses represent 296 | the only significant mode of use of the product. 297 | 298 | "Installation Information" for a User Product means any methods, 299 | procedures, authorization keys, or other information required to install 300 | and execute modified versions of a covered work in that User Product from 301 | a modified version of its Corresponding Source. The information must 302 | suffice to ensure that the continued functioning of the modified object 303 | code is in no case prevented or interfered with solely because 304 | modification has been made. 305 | 306 | If you convey an object code work under this section in, or with, or 307 | specifically for use in, a User Product, and the conveying occurs as 308 | part of a transaction in which the right of possession and use of the 309 | User Product is transferred to the recipient in perpetuity or for a 310 | fixed term (regardless of how the transaction is characterized), the 311 | Corresponding Source conveyed under this section must be accompanied 312 | by the Installation Information. But this requirement does not apply 313 | if neither you nor any third party retains the ability to install 314 | modified object code on the User Product (for example, the work has 315 | been installed in ROM). 316 | 317 | The requirement to provide Installation Information does not include a 318 | requirement to continue to provide support service, warranty, or updates 319 | for a work that has been modified or installed by the recipient, or for 320 | the User Product in which it has been modified or installed. Access to a 321 | network may be denied when the modification itself materially and 322 | adversely affects the operation of the network or violates the rules and 323 | protocols for communication across the network. 324 | 325 | Corresponding Source conveyed, and Installation Information provided, 326 | in accord with this section must be in a format that is publicly 327 | documented (and with an implementation available to the public in 328 | source code form), and must require no special password or key for 329 | unpacking, reading or copying. 330 | 331 | 7. Additional Terms. 332 | 333 | "Additional permissions" are terms that supplement the terms of this 334 | License by making exceptions from one or more of its conditions. 335 | Additional permissions that are applicable to the entire Program shall 336 | be treated as though they were included in this License, to the extent 337 | that they are valid under applicable law. If additional permissions 338 | apply only to part of the Program, that part may be used separately 339 | under those permissions, but the entire Program remains governed by 340 | this License without regard to the additional permissions. 341 | 342 | When you convey a copy of a covered work, you may at your option 343 | remove any additional permissions from that copy, or from any part of 344 | it. (Additional permissions may be written to require their own 345 | removal in certain cases when you modify the work.) You may place 346 | additional permissions on material, added by you to a covered work, 347 | for which you have or can give appropriate copyright permission. 348 | 349 | Notwithstanding any other provision of this License, for material you 350 | add to a covered work, you may (if authorized by the copyright holders of 351 | that material) supplement the terms of this License with terms: 352 | 353 | a) Disclaiming warranty or limiting liability differently from the 354 | terms of sections 15 and 16 of this License; or 355 | 356 | b) Requiring preservation of specified reasonable legal notices or 357 | author attributions in that material or in the Appropriate Legal 358 | Notices displayed by works containing it; or 359 | 360 | c) Prohibiting misrepresentation of the origin of that material, or 361 | requiring that modified versions of such material be marked in 362 | reasonable ways as different from the original version; or 363 | 364 | d) Limiting the use for publicity purposes of names of licensors or 365 | authors of the material; or 366 | 367 | e) Declining to grant rights under trademark law for use of some 368 | trade names, trademarks, or service marks; or 369 | 370 | f) Requiring indemnification of licensors and authors of that 371 | material by anyone who conveys the material (or modified versions of 372 | it) with contractual assumptions of liability to the recipient, for 373 | any liability that these contractual assumptions directly impose on 374 | those licensors and authors. 375 | 376 | All other non-permissive additional terms are considered "further 377 | restrictions" within the meaning of section 10. If the Program as you 378 | received it, or any part of it, contains a notice stating that it is 379 | governed by this License along with a term that is a further 380 | restriction, you may remove that term. If a license document contains 381 | a further restriction but permits relicensing or conveying under this 382 | License, you may add to a covered work material governed by the terms 383 | of that license document, provided that the further restriction does 384 | not survive such relicensing or conveying. 385 | 386 | If you add terms to a covered work in accord with this section, you 387 | must place, in the relevant source files, a statement of the 388 | additional terms that apply to those files, or a notice indicating 389 | where to find the applicable terms. 390 | 391 | Additional terms, permissive or non-permissive, may be stated in the 392 | form of a separately written license, or stated as exceptions; 393 | the above requirements apply either way. 394 | 395 | 8. Termination. 396 | 397 | You may not propagate or modify a covered work except as expressly 398 | provided under this License. Any attempt otherwise to propagate or 399 | modify it is void, and will automatically terminate your rights under 400 | this License (including any patent licenses granted under the third 401 | paragraph of section 11). 402 | 403 | However, if you cease all violation of this License, then your 404 | license from a particular copyright holder is reinstated (a) 405 | provisionally, unless and until the copyright holder explicitly and 406 | finally terminates your license, and (b) permanently, if the copyright 407 | holder fails to notify you of the violation by some reasonable means 408 | prior to 60 days after the cessation. 409 | 410 | Moreover, your license from a particular copyright holder is 411 | reinstated permanently if the copyright holder notifies you of the 412 | violation by some reasonable means, this is the first time you have 413 | received notice of violation of this License (for any work) from that 414 | copyright holder, and you cure the violation prior to 30 days after 415 | your receipt of the notice. 416 | 417 | Termination of your rights under this section does not terminate the 418 | licenses of parties who have received copies or rights from you under 419 | this License. If your rights have been terminated and not permanently 420 | reinstated, you do not qualify to receive new licenses for the same 421 | material under section 10. 422 | 423 | 9. Acceptance Not Required for Having Copies. 424 | 425 | You are not required to accept this License in order to receive or 426 | run a copy of the Program. Ancillary propagation of a covered work 427 | occurring solely as a consequence of using peer-to-peer transmission 428 | to receive a copy likewise does not require acceptance. However, 429 | nothing other than this License grants you permission to propagate or 430 | modify any covered work. These actions infringe copyright if you do 431 | not accept this License. Therefore, by modifying or propagating a 432 | covered work, you indicate your acceptance of this License to do so. 433 | 434 | 10. Automatic Licensing of Downstream Recipients. 435 | 436 | Each time you convey a covered work, the recipient automatically 437 | receives a license from the original licensors, to run, modify and 438 | propagate that work, subject to this License. You are not responsible 439 | for enforcing compliance by third parties with this License. 440 | 441 | An "entity transaction" is a transaction transferring control of an 442 | organization, or substantially all assets of one, or subdividing an 443 | organization, or merging organizations. If propagation of a covered 444 | work results from an entity transaction, each party to that 445 | transaction who receives a copy of the work also receives whatever 446 | licenses to the work the party's predecessor in interest had or could 447 | give under the previous paragraph, plus a right to possession of the 448 | Corresponding Source of the work from the predecessor in interest, if 449 | the predecessor has it or can get it with reasonable efforts. 450 | 451 | You may not impose any further restrictions on the exercise of the 452 | rights granted or affirmed under this License. For example, you may 453 | not impose a license fee, royalty, or other charge for exercise of 454 | rights granted under this License, and you may not initiate litigation 455 | (including a cross-claim or counterclaim in a lawsuit) alleging that 456 | any patent claim is infringed by making, using, selling, offering for 457 | sale, or importing the Program or any portion of it. 458 | 459 | 11. Patents. 460 | 461 | A "contributor" is a copyright holder who authorizes use under this 462 | License of the Program or a work on which the Program is based. The 463 | work thus licensed is called the contributor's "contributor version". 464 | 465 | A contributor's "essential patent claims" are all patent claims 466 | owned or controlled by the contributor, whether already acquired or 467 | hereafter acquired, that would be infringed by some manner, permitted 468 | by this License, of making, using, or selling its contributor version, 469 | but do not include claims that would be infringed only as a 470 | consequence of further modification of the contributor version. For 471 | purposes of this definition, "control" includes the right to grant 472 | patent sublicenses in a manner consistent with the requirements of 473 | this License. 474 | 475 | Each contributor grants you a non-exclusive, worldwide, royalty-free 476 | patent license under the contributor's essential patent claims, to 477 | make, use, sell, offer for sale, import and otherwise run, modify and 478 | propagate the contents of its contributor version. 479 | 480 | In the following three paragraphs, a "patent license" is any express 481 | agreement or commitment, however denominated, not to enforce a patent 482 | (such as an express permission to practice a patent or covenant not to 483 | sue for patent infringement). To "grant" such a patent license to a 484 | party means to make such an agreement or commitment not to enforce a 485 | patent against the party. 486 | 487 | If you convey a covered work, knowingly relying on a patent license, 488 | and the Corresponding Source of the work is not available for anyone 489 | to copy, free of charge and under the terms of this License, through a 490 | publicly available network server or other readily accessible means, 491 | then you must either (1) cause the Corresponding Source to be so 492 | available, or (2) arrange to deprive yourself of the benefit of the 493 | patent license for this particular work, or (3) arrange, in a manner 494 | consistent with the requirements of this License, to extend the patent 495 | license to downstream recipients. "Knowingly relying" means you have 496 | actual knowledge that, but for the patent license, your conveying the 497 | covered work in a country, or your recipient's use of the covered work 498 | in a country, would infringe one or more identifiable patents in that 499 | country that you have reason to believe are valid. 500 | 501 | If, pursuant to or in connection with a single transaction or 502 | arrangement, you convey, or propagate by procuring conveyance of, a 503 | covered work, and grant a patent license to some of the parties 504 | receiving the covered work authorizing them to use, propagate, modify 505 | or convey a specific copy of the covered work, then the patent license 506 | you grant is automatically extended to all recipients of the covered 507 | work and works based on it. 508 | 509 | A patent license is "discriminatory" if it does not include within 510 | the scope of its coverage, prohibits the exercise of, or is 511 | conditioned on the non-exercise of one or more of the rights that are 512 | specifically granted under this License. You may not convey a covered 513 | work if you are a party to an arrangement with a third party that is 514 | in the business of distributing software, under which you make payment 515 | to the third party based on the extent of your activity of conveying 516 | the work, and under which the third party grants, to any of the 517 | parties who would receive the covered work from you, a discriminatory 518 | patent license (a) in connection with copies of the covered work 519 | conveyed by you (or copies made from those copies), or (b) primarily 520 | for and in connection with specific products or compilations that 521 | contain the covered work, unless you entered into that arrangement, 522 | or that patent license was granted, prior to 28 March 2007. 523 | 524 | Nothing in this License shall be construed as excluding or limiting 525 | any implied license or other defenses to infringement that may 526 | otherwise be available to you under applicable patent law. 527 | 528 | 12. No Surrender of Others' Freedom. 529 | 530 | If conditions are imposed on you (whether by court order, agreement or 531 | otherwise) that contradict the conditions of this License, they do not 532 | excuse you from the conditions of this License. If you cannot convey a 533 | covered work so as to satisfy simultaneously your obligations under this 534 | License and any other pertinent obligations, then as a consequence you may 535 | not convey it at all. For example, if you agree to terms that obligate you 536 | to collect a royalty for further conveying from those to whom you convey 537 | the Program, the only way you could satisfy both those terms and this 538 | License would be to refrain entirely from conveying the Program. 539 | 540 | 13. Remote Network Interaction; Use with the GNU General Public License. 541 | 542 | Notwithstanding any other provision of this License, if you modify the 543 | Program, your modified version must prominently offer all users 544 | interacting with it remotely through a computer network (if your version 545 | supports such interaction) an opportunity to receive the Corresponding 546 | Source of your version by providing access to the Corresponding Source 547 | from a network server at no charge, through some standard or customary 548 | means of facilitating copying of software. This Corresponding Source 549 | shall include the Corresponding Source for any work covered by version 3 550 | of the GNU General Public License that is incorporated pursuant to the 551 | following paragraph. 552 | 553 | Notwithstanding any other provision of this License, you have 554 | permission to link or combine any covered work with a work licensed 555 | under version 3 of the GNU General Public License into a single 556 | combined work, and to convey the resulting work. The terms of this 557 | License will continue to apply to the part which is the covered work, 558 | but the work with which it is combined will remain governed by version 559 | 3 of the GNU General Public License. 560 | 561 | 14. Revised Versions of this License. 562 | 563 | The Free Software Foundation may publish revised and/or new versions of 564 | the GNU Affero General Public License from time to time. Such new versions 565 | will be similar in spirit to the present version, but may differ in detail to 566 | address new problems or concerns. 567 | 568 | Each version is given a distinguishing version number. If the 569 | Program specifies that a certain numbered version of the GNU Affero General 570 | Public License "or any later version" applies to it, you have the 571 | option of following the terms and conditions either of that numbered 572 | version or of any later version published by the Free Software 573 | Foundation. If the Program does not specify a version number of the 574 | GNU Affero General Public License, you may choose any version ever published 575 | by the Free Software Foundation. 576 | 577 | If the Program specifies that a proxy can decide which future 578 | versions of the GNU Affero General Public License can be used, that proxy's 579 | public statement of acceptance of a version permanently authorizes you 580 | to choose that version for the Program. 581 | 582 | Later license versions may give you additional or different 583 | permissions. However, no additional obligations are imposed on any 584 | author or copyright holder as a result of your choosing to follow a 585 | later version. 586 | 587 | 15. Disclaimer of Warranty. 588 | 589 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 590 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 591 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 592 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 593 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 594 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 595 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 596 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 597 | 598 | 16. Limitation of Liability. 599 | 600 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 601 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 602 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 603 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 604 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 605 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 606 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 607 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 608 | SUCH DAMAGES. 609 | 610 | 17. Interpretation of Sections 15 and 16. 611 | 612 | If the disclaimer of warranty and limitation of liability provided 613 | above cannot be given local legal effect according to their terms, 614 | reviewing courts shall apply local law that most closely approximates 615 | an absolute waiver of all civil liability in connection with the 616 | Program, unless a warranty or assumption of liability accompanies a 617 | copy of the Program in return for a fee. 618 | 619 | END OF TERMS AND CONDITIONS 620 | 621 | How to Apply These Terms to Your New Programs 622 | 623 | If you develop a new program, and you want it to be of the greatest 624 | possible use to the public, the best way to achieve this is to make it 625 | free software which everyone can redistribute and change under these terms. 626 | 627 | To do so, attach the following notices to the program. It is safest 628 | to attach them to the start of each source file to most effectively 629 | state the exclusion of warranty; and each file should have at least 630 | the "copyright" line and a pointer to where the full notice is found. 631 | 632 | 633 | Copyright (C) 634 | 635 | This program is free software: you can redistribute it and/or modify 636 | it under the terms of the GNU Affero General Public License as published 637 | by the Free Software Foundation, either version 3 of the License, or 638 | (at your option) any later version. 639 | 640 | This program is distributed in the hope that it will be useful, 641 | but WITHOUT ANY WARRANTY; without even the implied warranty of 642 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 643 | GNU Affero General Public License for more details. 644 | 645 | You should have received a copy of the GNU Affero General Public License 646 | along with this program. If not, see . 647 | 648 | Also add information on how to contact you by electronic and paper mail. 649 | 650 | If your software can interact with users remotely through a computer 651 | network, you should also make sure that it provides a way for users to 652 | get its source. For example, if your program is a web application, its 653 | interface could display a "Source" link that leads users to an archive 654 | of the code. There are many ways you could offer source, and different 655 | solutions will be better for different programs; see section 13 for the 656 | specific requirements. 657 | 658 | You should also get your employer (if you work as a programmer) or school, 659 | if any, to sign a "copyright disclaimer" for the program, if necessary. 660 | For more information on this, and how to apply and follow the GNU AGPL, see 661 | . 662 | --------------------------------------------------------------------------------