├── .envrc ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug-repport.md │ ├── config.yml │ ├── feature-request.md │ └── question.md ├── PULL_REQUEST_TEMPLATE │ └── config.yml ├── pull_request_template.md └── workflows │ ├── ci.yml │ ├── custom-artifact.yml │ ├── deploy-wiab.yml │ ├── lint.yml │ └── offline.yml ├── .gitignore ├── .gitmodules ├── .travis.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── ansible ├── .gitignore ├── Makefile ├── README.md ├── admin_users.yml ├── ansible.cfg ├── backup_rabbitmq.yml ├── bootstrap.yml ├── cassandra-verify-ntp.yml ├── cassandra.yml ├── db-operations │ ├── README.md │ ├── cassandra_alter_keyspace_replication.yml │ ├── cassandra_cleanup.yml │ ├── cassandra_healthy.yml │ ├── cassandra_post_upgrade.yml │ ├── cassandra_pre_upgrade.yml │ ├── cassandra_restart.yml │ ├── cassandra_rolling_repair.yml │ ├── elasticsearch_joined.yml │ ├── elasticsearch_restart.yml │ ├── elasticsearch_stop.yml │ └── tasks │ │ ├── cassandra_cluster_healthy.yml │ │ ├── cassandra_down.yml │ │ ├── cassandra_manual_repair.yml │ │ ├── cassandra_remove_backup.yml │ │ ├── cassandra_remove_cron.yml │ │ ├── cassandra_remove_repair_and_daily_backup_cron.yml │ │ ├── cassandra_up.yml │ │ ├── cassandra_wait_ongoing_repair.yml │ │ ├── elasticsearch_cluster_healthy.yml │ │ ├── elasticsearch_down.yml │ │ ├── elasticsearch_shard_allocation.yml │ │ └── elasticsearch_up.yml ├── elasticsearch.yml ├── files │ ├── hetzner_server_libvirt_default_net.xml │ ├── hetzner_server_nftables.conf.j2 │ ├── hetzner_server_sshd_config │ ├── registry │ │ ├── .gitignore │ │ ├── images.sh │ │ ├── list_of_docker_images.txt │ │ ├── mk-certs │ │ ├── mk-sub-certificate │ │ ├── openssl.cnf │ │ ├── registry-run.sh │ │ └── upload_image.sh │ └── serve-assets.service ├── get-logs.yml ├── helm_external.yml ├── hetzner-single-deploy.yml ├── host_vars │ └── localhost │ │ └── python.yml ├── inventory │ ├── demo │ │ └── host.yml │ ├── offline │ │ ├── 99-static │ │ └── group_vars │ │ │ ├── all │ │ │ └── offline.yml │ │ │ └── demo │ │ │ └── offline.yml │ └── prod │ │ └── hosts.example.ini ├── iptables.yml ├── kube-minio-static-files.yml ├── kubernetes-fetch-kubeconfig.yml ├── kubernetes-renew-certs.yml ├── kubernetes.yml ├── kubernetes_logging.yml ├── logging.yml ├── minio.yml ├── ntp.yml ├── provision-sft.yml ├── rabbitmq.yml ├── registry.yml ├── roles │ ├── etcd-helpers │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── etcd-health.sh.j2 │ │ │ └── etcdctl3.sh.j2 │ ├── minio-static-files │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── android.json │ │ │ └── ios.json │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── deeplink.html.j2 │ │ │ └── deeplink.json.j2 │ ├── rabbitmq-cluster │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── cluster.yml │ │ │ ├── config.yml │ │ │ ├── configure_dns.yml │ │ │ ├── create_users.yml │ │ │ ├── enable_ha_queues.yml │ │ │ ├── erlang_cookie.yml │ │ │ ├── hosts.yml │ │ │ ├── install.yml │ │ │ ├── join_cluster.yml │ │ │ ├── main.yml │ │ │ ├── service.yaml │ │ │ └── tls.yml │ │ └── templates │ │ │ ├── erlang.cookie.j2 │ │ │ └── etc │ │ │ ├── default │ │ │ └── rabbitmq-server.j2 │ │ │ └── rabbitmq │ │ │ └── rabbitmq.config.j2 │ └── systemd-coredump │ │ ├── defaults │ │ └── main.yml │ │ └── tasks │ │ └── main.yml ├── seed-offline-containerd.yml ├── seed-offline-docker.yml ├── setup-offline-sources.yml ├── sync_time.yml ├── tasks │ └── helm_external.yml ├── templates │ ├── elasticsearch.conf.j2 │ ├── helm_external.yaml.j2 │ ├── ntp.conf.j2 │ └── qradar.conf.j2 ├── tinc.yml └── wiab-demo │ ├── clean_cluster.yml │ ├── deploy_wiab.yml │ ├── download_artifact.yml │ ├── helm_install.yml │ ├── install_pkgs.yml │ ├── iptables_rules.yml │ ├── minikube_cluster.yml │ ├── setup_ssh.yml │ ├── verify_dns.yml │ ├── verify_wire_ip.yml │ └── wire_secrets.yml ├── bin ├── accept-invitation.sh ├── autodeploy.sh ├── bootstrap │ ├── README.md │ ├── init.sh │ └── inside.sh ├── debug_logs.sh ├── demo-setup.sh ├── deployment-info.sh ├── fix_default_router.sh ├── generate-image-list.sh ├── information-gathering.sh ├── logging.sh ├── offline-cluster.sh ├── offline-deploy.sh ├── offline-env.sh ├── offline-helm.sh ├── offline-secrets.sh ├── offline-vm-setup.sh ├── prod-init.sh ├── prod-setup.sh ├── secrets.sh ├── shellcheck.sh ├── test-aws-s3-auth-v4.sh └── wiab-demo │ ├── offline-env.sh │ └── offline_deploy_k8s.sh ├── default.nix ├── examples ├── control-planes-only-k8s │ ├── README.md │ └── terraform.tfvars ├── multi-instance-sft │ ├── README.md │ └── terraform.tfvars ├── multi-node-k8s-with-lb-and-dns │ ├── README.md │ ├── backend.tfvars │ ├── helm_vars │ │ ├── demo-smtp │ │ │ └── values.yaml │ │ ├── nginx-ingress-services │ │ │ └── values.yaml │ │ └── wire-server │ │ │ └── values.yaml │ ├── helmfile.yaml │ ├── inventory │ │ └── inventory.yml │ └── terraform.tfvars └── team-provisioning-qr-codes │ ├── README.md │ └── generate-user-pdf.sh ├── helm ├── Makefile └── README.md ├── nix ├── docker-alpine.nix ├── overlay.nix ├── pkgs │ ├── helm-mapkubeapis.nix │ ├── kubernetes-tools.nix │ └── wire-binaries.nix ├── scripts │ ├── create-build-entry.sh │ ├── create-container-dump.sh │ ├── create-offline-artifact.sh │ ├── generate-gpg1-key.sh │ ├── list-helm-containers.sh │ └── mirror-apt-jammy.sh ├── sources.json └── sources.nix ├── offline ├── cd.sh ├── coturn.md ├── default-build │ └── build.sh ├── demo-build │ ├── build.sh │ └── post_chart_process_1.sh ├── demo-wiab.md ├── docs_ubuntu_22.04.md ├── federation_preparation.md ├── k8ssandra_setup.md ├── ldap-scim-bridge.md ├── local_persistent_storage_k8s.md ├── min-build │ └── build.sh ├── pull_helm_charts_flow.md ├── rabbitmq_setup.md ├── single_hetzner_machine_installation.md ├── stackIT-wiab.md ├── tasks │ ├── build_adminhost_containers.sh │ ├── build_linux_pkgs.sh │ ├── post_chart_process_0.sh │ ├── pre_chart_process_0.sh │ ├── pre_clean_values_0.sh │ ├── proc_pull_charts.sh │ ├── proc_system_containers.sh │ ├── proc_wire_binaries.sh │ └── process_charts.sh ├── ubuntu_18_to_ubuntu_22_migration.md ├── upgrading-SFT_ONLY.md └── upgrading.md ├── terraform ├── .gitignore ├── README.md ├── environment │ ├── Makefile │ ├── aws.tf │ ├── hcloud.tf │ ├── hcloud.vars.tf │ ├── inventory.tf │ ├── kubernetes.cluster.tf │ ├── kubernetes.cluster.vars.tf │ ├── kubernetes.dns.tf │ ├── kubernetes.dns.vars.tf │ ├── kubernetes.inventory.tf │ ├── main.vars.tf │ ├── sft.inventory.tf │ ├── sft.tf │ ├── sft.vars.tf │ └── terraform.tf ├── examples │ ├── .gitignore │ ├── README.md │ ├── create-infrastructure.tf │ ├── inventory.tpl │ └── wire-server-deploy-offline-hetzner │ │ ├── .envrc │ │ ├── README.md │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── versions.tf └── modules │ ├── README.md │ ├── aws-ami-ubuntu-search │ ├── README.md │ ├── main.tf │ └── outputs.tf │ ├── aws-brig-prekey-lock-event-queue-email-sending │ ├── README.md │ ├── data.tf │ ├── locals.mailing.tf │ ├── main.tf │ ├── outputs.mailing.tf │ ├── outputs.tf │ ├── resources.dns.mailing.tf │ ├── resources.dynamodb.tf │ ├── resources.iam.mailing.tf │ ├── resources.iam.tf │ ├── resources.ses.mailing.tf │ ├── resources.sns.mailling.tf │ ├── resources.sqs.mailing.tf │ ├── resources.sqs.tf │ ├── variables.mailing.tf │ └── variables.tf │ ├── aws-cargohold-asset-storage │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── resources.iam.tf │ ├── resources.s3.tf │ ├── resources.security_groups.tf │ └── variables.tf │ ├── aws-dns-records │ ├── README.md │ ├── data.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── resources.route53.tf │ └── variables.tf │ ├── aws-network-load-balancer │ ├── README.md │ ├── data.tf │ ├── main.tf │ ├── outputs.tf │ ├── resources.lb.tf │ └── variables.tf │ ├── aws-terraform-state-share │ ├── README.md │ ├── main.tf │ ├── resources.tf │ └── variables.tf │ ├── aws-vpc-security-groups │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf │ ├── aws-vpc │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf │ ├── hetzner-kubernetes │ ├── load-balancer.locals.tf │ ├── load-balancer.resources.tf │ ├── load-balancer.variables.tf │ ├── locals.tf │ ├── machines.outputs.tf │ ├── machines.resources.tf │ ├── machines.variables.tf │ ├── network.resources.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ └── sft │ ├── dns.tf │ ├── outputs.tf │ ├── server.tf │ ├── srv-announcer-iam.tf │ ├── variables.tf │ └── versions.tf ├── utils ├── generate_graph.pl ├── rtpdelay_graph.py ├── rtpstreams_graph.py └── rtpstreams_summary.py └── values ├── account-pages ├── demo-values.example.yaml └── prod-values.example.yaml ├── aws-ingress └── demo-values.example.yaml ├── cassandra-external └── prod-values.example.yaml ├── coturn ├── demo-values.example.yaml ├── prod-secrets.example.yaml └── prod-values.example.yaml ├── databases-ephemeral ├── demo-values.example.yaml └── prod-values.example.yaml ├── demo-smtp ├── demo-values.example.yaml └── prod-values.example.yaml ├── elasticsearch-external └── prod-values.example.yaml ├── external-dns └── demo-values.example.yaml ├── fake-aws ├── demo-values.example.yaml └── prod-values.example.yaml ├── fluent-bit └── prod-values.example.yaml ├── ingress-nginx-controller ├── demo-values.example.yaml ├── hetzner-ci.example.yaml └── prod-values.example.yaml ├── k8ssandra-operator └── prod-values.example.yaml ├── k8ssandra-test-cluster └── prod-values.example.yaml ├── keycloakx └── prod-values.example.yaml ├── ldap-scim-bridge └── values-prod.example.yaml ├── metallb └── demo-values.example.yaml ├── minio-external └── prod-values.example.yaml ├── nginx-ingress-services ├── demo-secrets.example.yaml ├── demo-values.example.yaml ├── prod-secrets.example.yaml └── prod-values.example.yaml ├── outlook-addin └── prod-values.example.yaml ├── rabbitmq ├── demo-secrets.example.yaml ├── demo-values.example.yaml ├── prod-secrets.example.yaml └── prod-values.example.yaml ├── redis-ephemeral └── prod-values.example.yaml ├── restund └── prod-values.example.yaml ├── sftd ├── demo-values.example.yaml └── prod-values.example.yaml ├── smallstep-accomp ├── demo-values.example.yaml └── prod-values.example.yaml ├── step-certificates ├── demo-values.example.yaml └── prod-values.example.yaml ├── team-settings ├── demo-secrets.example.yaml ├── demo-values.example.yaml ├── prod-secrets.example.yaml └── prod-values.example.yaml ├── webapp ├── demo-values.example.yaml └── prod-values.example.yaml ├── wire-server-metrics └── demo-values.example.yaml └── wire-server ├── demo-secrets.example.yaml ├── demo-values.example.yaml ├── prod-secrets.example.yaml └── prod-values.example.yaml /.envrc: -------------------------------------------------------------------------------- 1 | nix-build $PWD/default.nix -A env --out-link .nix-env 2 | 3 | PATH_add ".nix-env/bin" 4 | 5 | export LOCALHOST_PYTHON="$PWD/.nix-env/bin/python" 6 | 7 | # source .profile from `$env`. 8 | # This is only used to set things interpolated by nix. 9 | # All *static* things should live inside .envrc. 10 | [[ -f ".nix-env/.profile" ]] && source_env ".nix-env/.profile" 11 | 12 | # allow local .envrc overrides 13 | [[ -f .envrc.local ]] && source_env .envrc.local 14 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # This file designates code owners for different parts of the repository 2 | 3 | # Define code owners for all files in the repository 4 | * @wireapp/customerops @julialongtin 5 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-repport.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: template to report a bug 4 | title: "Bug: [BUG TITLE]" 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | ### Basic information 10 | 11 | 12 | 13 | * On-premises: 14 | * Cloud-Provider: 15 | * Installation type: 16 | * Kubernetes version: 17 | * Helm version: 18 | * Installed with Kubespray: 19 | * (Helm) Charts version: 20 | * List of installed top-level charts: 21 | * Other related technologies + version: 22 | 23 | 24 | ### What is the expected result? 25 | 26 | 29 | 30 | 31 | ### What is the actual result? 32 | 33 | 37 | 38 | 39 | ### How to reproduce the issue? 40 | 41 | 45 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Template for a question around the topic of deploying wire-server 4 | title: "Feature: [FEATURE TITLE]" 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | 10 | ### What kind of feature are you looking for? 11 | 12 | 15 | 16 | 17 | ### In which scenario would this feature be helpful? 18 | 19 | 23 | 24 | 25 | ### How do you imagine that this feature is being used? 26 | 27 | 31 | 32 | 33 | ### How did you install Wire? 34 | 35 | 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: template for a question around the topic of deploying wire-server 4 | title: "Question: [QUESTION TITLE]" 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | ### Basic information 10 | 11 | 12 | 13 | * On-premises: 14 | * Cloud-Provider: 15 | * Installation type: 16 | * Kubernetes version: 17 | * Helm version: 18 | * Installed with Kubespray: 19 | * (Helm) Charts version: 20 | * List of installed top-level charts: 21 | * Other related technologies + version: 22 | 23 | 24 | ### How did you install Wire? 25 | 26 | 32 | 33 | 34 | ### Question 35 | 36 | 40 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 4 | 5 | ### Change type 6 | 7 | 8 | 9 | * [ ] Fix 10 | * [ ] Feature 11 | * [ ] Documentation 12 | * [ ] Security / Upgrade 13 | 14 | ### Basic information 15 | 16 | * [ ] THIS CHANGE REQUIRES A DEPLOYMENT PACKAGE RELEASE 17 | * [ ] THIS CHANGE REQUIRES A WIRE-DOCS RELEASE 18 | 19 | ### Testing 20 | 21 | * [ ] I ran/applied the changes myself, in a test environment. 22 | * [ ] The CI job attached to this repo will test it for me. 23 | 24 | ### Tracking 25 | 26 | * [ ] I mentioned this PR in Jira, OR I mentioned the Jira ticket in this PR. 27 | * [ ] I mentioned this PR in one of the issues attached to one of our repositories. 28 | 29 | ### Knowledge Transfer 30 | * [ ] An Asciinema session is attached to the Jira ticket. 31 | 32 | ### Motivation 33 | 34 | 39 | 40 | 41 | ### Objective 42 | 43 | 47 | 48 | 49 | ### Reason 50 | 51 | 55 | 56 | 57 | ### Use case 58 | 59 | 63 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [master, develop] 4 | pull_request: 5 | branches: [master, develop] 6 | jobs: 7 | build: 8 | name: build 9 | runs-on: ${{ matrix.os }} 10 | strategy: 11 | matrix: 12 | os: 13 | - ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | submodules: true 18 | - uses: cachix/install-nix-action@v27 19 | - uses: cachix/cachix-action@v15 20 | with: 21 | name: wire-server 22 | signingKey: "${{ secrets.CACHIX_SIGNING_KEY }}" 23 | 24 | - name: Build the environment 25 | run: nix-build -A env 26 | - name: Install the environment 27 | run: nix-env -f . -A env -i 28 | - name: Install terraform 29 | uses: hashicorp/setup-terraform@v3 30 | with: 31 | terraform_version: "^1.3.7" 32 | terraform_wrapper: false 33 | - name: Check terraform init 34 | run: | 35 | cd terraform/environment 36 | terraform init --backend=false 37 | -------------------------------------------------------------------------------- /.github/workflows/deploy-wiab.yml: -------------------------------------------------------------------------------- 1 | name: Deploy on Hetzner WIAB setup 2 | on: 3 | workflow_run: 4 | workflows: ["Prepare custom offline package"] 5 | types: 6 | - completed 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | concurrency: 12 | group: autodeploy-script 13 | cancel-in-progress: false 14 | 15 | steps: 16 | # Step 1: Checkout the repository code 17 | - name: Checkout code 18 | uses: actions/checkout@v3 19 | 20 | # Step 2: Set up SSH key for remote access 21 | - name: Set up SSH key 22 | uses: webfactory/ssh-agent@v0.5.3 23 | with: 24 | ssh-private-key: ${{ secrets.WIAB_PRIVATE_SSH_KEY }} 25 | 26 | # Step 3: Get the latest commit SHA, for the artifact 27 | - name: Get latest commit SHA 28 | id: get_commit_sha 29 | run: | 30 | COMMIT_SHA=$(git rev-parse HEAD) 31 | echo "commit_sha=$COMMIT_SHA" >> $GITHUB_ENV 32 | 33 | # Step 4: Run the autodeploy script 34 | - name: Run Auto Deploy Script 35 | run: | 36 | cd bin 37 | ./autodeploy.sh --artifact-hash ${{ env.COMMIT_SHA }} --target-domain wiab-test-box.wire.link --force-redeploy 38 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [master, develop] 4 | pull_request: 5 | branches: [master, develop] 6 | jobs: 7 | build: 8 | name: build 9 | runs-on: ${{ matrix.os }} 10 | strategy: 11 | matrix: 12 | os: 13 | - ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | with: 17 | submodules: true 18 | - uses: cachix/install-nix-action@v27 19 | - uses: cachix/cachix-action@v15 20 | with: 21 | name: wire-server 22 | signingKey: "${{ secrets.CACHIX_SIGNING_KEY }}" 23 | 24 | - name: Lint 25 | run: make shellcheck ENV="no-env" 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.tgz 2 | *secrets.yaml 3 | *.key 4 | *.bak 5 | *.cert 6 | # Sometimes left behind as an artifact during CI runs 7 | /index.yaml 8 | secrets.yaml 9 | requirements.lock 10 | values/*/demo-values.override.yaml 11 | values/*/demo-secrets.override.yaml 12 | .DS_Store 13 | values-init-done 14 | # Emacs backup files 15 | *~ 16 | # Emacs autosave files 17 | \#*\# 18 | 19 | # Envrc local overrides 20 | .envrc.local 21 | 22 | # Nix-created result symlinks 23 | result 24 | result-* 25 | 26 | .nix-env 27 | 28 | # for bin/secrets.sh 29 | secrets_cache/ 30 | 31 | terraform.tfstate 32 | terraform.tfstate.backup 33 | kubeconfig.new 34 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | Thank you for considering contributing to the deployment options of wire-server. 4 | 5 | This is an open source project and we are happy to receive contributions! Improvements to our helm charts or documentation, submitting bugs or issues about e.g. possible incompatibilities or problems using different versions or hosting on different platforms; or alternative ways of how you installed wire-server are all valuable. 6 | 7 | ## Guidelines 8 | 9 | *Before we can accept your pull request, you have to sign a [CLA](https://cla-assistant.io/wireapp/wire-server)* 10 | 11 | If submitting pull requests, please follow these guidelines: 12 | 13 | * if you want to make larger changes, it might be best to first open an issue to discuss the change. 14 | * if helm charts are involved, 15 | * use the `./bin/update.sh ./charts/` script, to ensure changes in a subchart (e.g. brig) are correctly propagated to the parent chart (e.g. wire-server) before linting or installing. 16 | * ensure they pass linting, you can check with `helm lint -f path/to/extra/values-file.yaml charts/mychart`. 17 | * If you can, try to also install the chart to see if they work the way you intended. 18 | 19 | If you find yourself wishing for a feature that doesn't exist, open an issue on our issues list on GitHub which describes the feature you would like to see, why you need it, and how it should work. 20 | 21 | Since our team is fairly small, while we try to respond to issues and pull requests within a few days, it may in some cases take up to a few weeks before getting a response. 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nixos/nix 2 | 3 | COPY . /wire-server-deploy 4 | 5 | RUN nix-env -iA nixpkgs.bash nixpkgs.git 6 | 7 | RUN nix-build /wire-server-deploy/default.nix -A env --out-link /.nix-env 8 | 9 | RUN rm -rf /wire-server-deploy 10 | 11 | ENV PATH="/.nix-env/bin:$PATH" 12 | ENV LOCALHOST_PYTHON="/.nix-env/bin/python" 13 | 14 | -------------------------------------------------------------------------------- /ansible/.gitignore: -------------------------------------------------------------------------------- 1 | hosts.ini 2 | artifacts 3 | secrets 4 | *.retry 5 | output 6 | roles-override 7 | -------------------------------------------------------------------------------- /ansible/admin_users.yml: -------------------------------------------------------------------------------- 1 | # Optional - support playbook/role 2 | # 3 | # creates admin users (with passwordless sudo) on machines 4 | # you probably need some other way (e.g. using passwords) to initially run this playbook. 5 | # See the `-k` and `-K` flags when running ansible-playbook. 6 | # 7 | - hosts: all 8 | environment: "{{ proxy_env | default({}) }}" 9 | vars: 10 | admin_users: 11 | - username: admin 12 | fullname: Mister admin 13 | pubkey: "{{ lookup('file', 'files/ssh/admin.pub') }}" 14 | roles: 15 | - admin_user 16 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | pipelining = True 3 | control_path = /tmp/ansible-%%r@%%h:%%p 4 | 5 | [defaults] 6 | retry_files_enabled = False 7 | roles_path = ./roles-external:./roles:./roles-external/sft/roles:./roles-external/kubespray/roles 8 | 9 | gathering = smart 10 | 11 | host_key_checking = no 12 | 13 | interpreter_python = /usr/bin/python3 14 | 15 | 16 | [privilege_escalation] 17 | become = yes 18 | -------------------------------------------------------------------------------- /ansible/backup_rabbitmq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: rmq-cluster 3 | vars: 4 | backup_dir: "/path/to/backup" # need to pass this as an extra var 5 | tasks: 6 | - name: Enable RabbitMQ management plugin 7 | command: rabbitmq-plugins enable rabbitmq_management 8 | become: true 9 | 10 | - name: Ensure backup directory exists 11 | file: 12 | path: "{{ backup_dir }}" 13 | state: directory 14 | 15 | - name: Export RabbitMQ configurations 16 | command: rabbitmqadmin export "{{ backup_dir }}/definitions.json" 17 | 18 | - name: Get RabbitMQ node directory 19 | command: rabbitmqctl eval 'rabbit_mnesia:dir().' 20 | register: node_directory 21 | 22 | - name: Set RabbitMQ node directory path 23 | set_fact: 24 | rabbitmq_node_dir: "{{ node_directory.stdout | regex_replace('\"', '') }}" 25 | 26 | - name: Stop RabbitMQ service 27 | service: 28 | name: rabbitmq-server 29 | state: stopped 30 | 31 | - name: List contents of RabbitMQ node directory 32 | command: ls {{ rabbitmq_node_dir }} 33 | register: dir_contents 34 | 35 | - name: Back up RabbitMQ data directory 36 | command: tar cvf "{{ backup_dir }}/rabbitmq-backup.tgz" {{ rabbitmq_node_dir }} 37 | 38 | - name: Start RabbitMQ service 39 | service: 40 | name: rabbitmq-server 41 | state: started 42 | -------------------------------------------------------------------------------- /ansible/bootstrap.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: ./provision-sft.yml 2 | - import_playbook: ./kubernetes.yml 3 | -------------------------------------------------------------------------------- /ansible/cassandra-verify-ntp.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This playbook helps you check if ntp has been properly setup on the host that Cassandra 3 | # is running on. Verifies that the daemon is running and that there is no more than 4 | # `allowed_time_diff_between_servers_in_ms` diff between all servers 5 | # 6 | # NOTE: This playbook has been tested on Ubuntu and using ansible2.7 7 | # TODO: Adjust this to be run on _ALL_ hosts in the inventory 8 | # 9 | - hosts: cassandra 10 | any_errors_fatal: true 11 | environment: "{{ proxy_env | default({}) }}" 12 | roles: 13 | - role: ansible-ntp-verify 14 | -------------------------------------------------------------------------------- /ansible/cassandra.yml: -------------------------------------------------------------------------------- 1 | - hosts: cassandra 2 | any_errors_fatal: true 3 | environment: "{{ proxy_env | default({}) }}" 4 | vars: 5 | cassandra_keyspaces: 6 | - brig 7 | - galley 8 | - gundeck 9 | - spar 10 | # cassandra 3.11 doesn't support java 11 yet, use openjdk-8. 11 | java_packages: 12 | - openjdk-8-jre-headless 13 | roles: 14 | 15 | - role: ansible-role-ntp 16 | tags: 17 | - ntp 18 | when: not (offline|default(false)) 19 | 20 | - role: ansible-role-java 21 | tags: 22 | - java 23 | 24 | - role: ansible-cassandra 25 | tags: 26 | - cassandra 27 | 28 | - role: ansible-ntp-verify 29 | tags: 30 | - ntp 31 | when: not (offline|default(false)) 32 | tasks: 33 | # these are optional debug tasks to see that the cluser has come up sucessfully 34 | - shell: nodetool -Dcom.sun.jndi.rmiURLParsing=legacy status 35 | register: nodetool_status 36 | tags: 37 | - cassandra 38 | - debug 39 | - debug: var=nodetool_status.stdout_lines 40 | tags: 41 | - cassandra 42 | - debug 43 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_alter_keyspace_replication.yml: -------------------------------------------------------------------------------- 1 | # See e.g. https://docs.k8ssandra.io/tasks/migrate/ 2 | - name: 'alter keyspace replication' 3 | vars_prompt: 4 | - name: old 5 | promt: "name of old datacenter" 6 | private: no 7 | - name: k8ssandra_dc_name 8 | promt: "name of new datacenter in k8ssandra" 9 | private: no 10 | - name: phase 11 | prompt: "enter phase: BEFORE (before connecting to k8ssandra), TWO (for replicating to two datacentres once they are alreay connected)" 12 | private: no 13 | hosts: cassandra 14 | any_errors_fatal: yes 15 | tasks: 16 | - name: phase check 17 | fail: 18 | msg: "phase must be one of [BEFORE, TWO]" 19 | when: phase not in ["BEFORE", "TWO"] 20 | 21 | - action: ec2_metadata_facts 22 | 23 | - when: phase == "BEFORE" 24 | name: alter keyspace BEFORE 25 | shell: > 26 | /opt/cassandra/bin/cqlsh $(hostname) -e "ALTER KEYSPACE {{ item }} WITH replication = {'class': 'NetworkTopologyStrategy', '{{ old }}': 3}" 27 | loop: 28 | - "system_auth" 29 | - "system_traces" 30 | - "system_distributed" 31 | - "spar" 32 | - "brig" 33 | - "gundeck" 34 | - "galley" 35 | 36 | - when: phase == "BEFORE" 37 | debug: 38 | msg: Run a repair now using cassandra_rolling_repair.yml! 39 | 40 | - when: phase == "TWO" 41 | name: alter keyspace to replicate to TWO datacentres 42 | shell: > 43 | /opt/cassandra/bin/cqlsh $(hostname) -e "ALTER KEYSPACE {{ item }} WITH replication = {'class': 'NetworkTopologyStrategy', '{{ old }}': 3, '{{ k8ssandra_dc_name }}': 3}" 44 | loop: 45 | - "system_auth" 46 | - "system_traces" 47 | - "system_distributed" 48 | - "spar" 49 | - "brig" 50 | - "gundeck" 51 | - "galley" 52 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_cleanup.yml: -------------------------------------------------------------------------------- 1 | - name: 'Run "nodetool cleanup" serially (only necessary once after adding nodes to a cluster)' 2 | hosts: "cassandra" 3 | any_errors_fatal: yes 4 | serial: 1 5 | tasks: 6 | - include: tasks/cassandra_cluster_healthy.yml 7 | - name: Run nodetool cleanup - wait for up to 1h, poll every 10 sec 8 | shell: nodetool cleanup 9 | async: 3600 10 | poll: 10 11 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_healthy.yml: -------------------------------------------------------------------------------- 1 | - name: Check cluster is healthy 2 | hosts: cassandra 3 | any_errors_fatal: yes 4 | gather_facts: no 5 | tasks: 6 | - include: tasks/cassandra_cluster_healthy.yml 7 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_post_upgrade.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Follow the guidelines from DataStax for upgrades. 3 | # 4 | hosts: "cassandra" 5 | any_errors_fatal: yes 6 | gather_facts: no 7 | serial: 1 8 | vars: 9 | cluster_name: default 10 | vars_files: 11 | - roles-external/ansible-cassandra/defaults/main.yml 12 | tasks: 13 | - action: ec2_metadata_facts 14 | - include: tasks/cassandra_cluster_healthy.yml 15 | vars: 16 | cassandra_role: "cassandra_{{ cluster_name }}" 17 | # TODO: Adjust this value accordingly! 18 | expected_num_schemas: 1 19 | 20 | - name: 'Cassandra: upgrade sstables' 21 | shell: nodetool upgradesstables 22 | 23 | - include: roles-external/ansible-cassandra/tasks/repairs_backups.yml 24 | vars: 25 | cassandra_cluster_name: "{{ cluster_name }}" 26 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_pre_upgrade.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure no ongoing repairs on any node and stop cronjobs 2 | hosts: cassandra 3 | gather_facts: yes 4 | vars_files: 5 | - roles/cassandra/defaults/main.yml 6 | 7 | tasks: 8 | # First let's ensure that are no repairs on _any_ nodes 9 | - include: tasks/cassandra_remove_cron.yml 10 | vars: 11 | cluster_name: default 12 | - include: tasks/cassandra_wait_ongoing_repair.yml 13 | 14 | - name: Prepare the nodes 15 | hosts: cassandra 16 | any_errors_fatal: yes 17 | gather_facts: no 18 | serial: 1 19 | tasks: 20 | - name: 'Cassandra: first upgrade sstables' 21 | shell: nodetool upgradesstables 22 | 23 | - name: 'Cassandra: run repairs' 24 | shell: nodetool repair -full -pr 2>&1 | systemd-cat -t cassandra_repair 25 | 26 | - include: tasks/cassandra_cluster_healthy.yml 27 | 28 | - name: 'Cassandra: backup the data' 29 | shell: /usr/local/bin/cassandra_backup_{{ cluster_name }} 2>&1 | systemd-cat -t cassandra_daily_backup 30 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_restart.yml: -------------------------------------------------------------------------------- 1 | - name: restart cassandra nodes 2 | hosts: "cassandra" 3 | any_errors_fatal: yes 4 | gather_facts: no 5 | serial: 1 6 | tasks: 7 | - include: tasks/cassandra_cluster_healthy.yml 8 | - include: tasks/cassandra_down.yml 9 | - include: tasks/cassandra_up.yml 10 | -------------------------------------------------------------------------------- /ansible/db-operations/cassandra_rolling_repair.yml: -------------------------------------------------------------------------------- 1 | # Remove repair crons on all nodes at once 2 | - name: 'Rolling repair' 3 | hosts: cassandra 4 | any_errors_fatal: yes 5 | tasks: 6 | # First let's ensure that are no repairs on _any_ nodes 7 | - include: tasks/cassandra_remove_repair_and_daily_backup_cron.yml 8 | vars: 9 | cluster_name: default 10 | - include: tasks/cassandra_wait_ongoing_repair.yml 11 | 12 | # do a rolling repair 13 | - name: 'Rolling repair' 14 | hosts: cassandra 15 | any_errors_fatal: yes 16 | serial: 1 17 | tasks: 18 | - include: tasks/cassandra_manual_repair.yml 19 | 20 | # run actual playbook again to re-enable cron jobs. 21 | - import_playbook: "cassandra.yml" 22 | -------------------------------------------------------------------------------- /ansible/db-operations/elasticsearch_joined.yml: -------------------------------------------------------------------------------- 1 | # 2 | # ansible-playbook elasticsearch_joined.yml -e "ip_to_check=1.2.3.4" 3 | # 4 | - name: Wait for a given new node to join the cluster and shard relocations to settle 5 | hosts: elasticsearch 6 | gather_facts: no 7 | any_errors_fatal: yes 8 | tasks: 9 | - fail: 10 | msg: You need to specify ip_to_check. See comment at the top of the playbook for usage. 11 | when: not ip_to_check 12 | 13 | - name: 'Elasticsearch: Wait for HTTP port' 14 | wait_for: port={{ routing_table[elasticsearch_role].http.exposed }} 15 | 16 | - name: 'Elasticsearch: Wait for node discovery' 17 | shell: > 18 | set -o pipefail; 19 | curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_nodes|jq ".nodes|keys|length" 20 | args: 21 | executable: /bin/bash 22 | register: num_nodes 23 | until: 'num_nodes.stdout|int == groups[elasticsearch_role]|length' 24 | retries: 60 25 | delay: 5 26 | 27 | - name: 'check ip_to_check={{ ip_to_check }} is part of the cluster' 28 | shell: > 29 | set -o pipefail; 30 | curl -sSf 'http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cat/nodes?v&h=ip' | grep "{{ ip_to_check }}" 31 | args: 32 | executable: /bin/bash 33 | 34 | - include: tasks/elasticsearch_cluster_healthy.yml 35 | 36 | - name: 'Elasticsearch: Wait for shard relocation to finish' 37 | shell: > 38 | set -o pipefail; 39 | curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cluster/health | jq .relocating_shards 40 | args: 41 | executable: /bin/bash 42 | register: num_shards 43 | until: 'num_shards.stdout|int == 0' 44 | retries: 60 45 | delay: 15 46 | -------------------------------------------------------------------------------- /ansible/db-operations/elasticsearch_restart.yml: -------------------------------------------------------------------------------- 1 | hosts: elasticsearch 2 | serial: 1 3 | any_errors_fatal: yes 4 | tasks: 5 | - include: tasks/elasticsearch_cluster_healthy.yml 6 | - include: tasks/elasticsearch_down.yml 7 | - include: tasks/elasticsearch_up.yml 8 | -------------------------------------------------------------------------------- /ansible/db-operations/elasticsearch_stop.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Example: 3 | # ansible-playbook elasticsearch_stop.yml -e "ip_to_stop=1.2.3.4" 4 | # 5 | - name: checks 6 | hosts: localhost 7 | tasks: 8 | - fail: 9 | msg: "You need to specify ip_to_stop, it needs to be a valid ipv4. invalid:[{{ ip_to_stop }}] See comment at the top of the playbook for usage." 10 | when: not ip_to_stop | ipaddr 11 | 12 | - name: Stop elasticsearch node 13 | hosts: '{{ ip_to_stop }}' 14 | any_errors_fatal: yes 15 | serial: 1 16 | tasks: 17 | - include: tasks/elasticsearch_shard_allocation.yml 18 | vars: 19 | exclude: "{{ ansible_default_ipv4.address }}" 20 | 21 | - pause: 22 | seconds: 5 23 | 24 | - name: 'Elasticsearch: Wait for shard relocation to finish' 25 | shell: > 26 | set -o pipefail; 27 | curl -sSf http://localhost:9200/_cluster/health | jq .relocating_shards 28 | args: 29 | executable: /bin/bash 30 | register: num_shards 31 | until: 'num_shards.stdout|int == 0' 32 | retries: 200 33 | delay: 15 34 | 35 | - command: systemctl stop elasticsearch 36 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_cluster_healthy.yml: -------------------------------------------------------------------------------- 1 | - name: 'Cassandra: gather number of schemas' 2 | shell: nodetool describecluster | grep '[0-9a-f\]\{8\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{4\}-[0-9a-f]\{12\}' | wc -l 3 | register: num_schemas 4 | 5 | - name: 'Cassandra: check for schema disagreements' 6 | fail: msg="Schema disagreements. Please fix first." 7 | when: 'expected_num_schemas is defined and (num_schemas.stdout|int > expected_num_schemas) or 8 | num_schemas.stdout|int > 1' 9 | 10 | - name: 'Cassandra: check more for schema disagreements' 11 | fail: msg="Schema disagreements. Please fix first." 12 | when: 'num_schemas.stdout.find("UNREACHABLE") != -1' 13 | 14 | # can't use 'var:' in a task 15 | - set_fact: 16 | desired_nodes: "{{ groups[cassandra_role|replace('_seed','')]|default([])|length + groups[cassandra_role|replace('_seed','') + '_seed']|length }}" 17 | 18 | - debug: var=desired_nodes 19 | 20 | - name: 'Cassandra: check that the desired number of nodes is up' 21 | shell: nodetool status | grep ^UN | wc -l 22 | register: num_nodes 23 | until: num_nodes.stdout|int == desired_nodes|int 24 | retries: 10 25 | delay: 15 26 | when: dry_run is not defined 27 | 28 | - debug: var=num_nodes.stdout 29 | when: dry_run is not defined 30 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_down.yml: -------------------------------------------------------------------------------- 1 | - name: 'Cassandra: draining node...' 2 | shell: nodetool drain 3 | 4 | - name: 'Cassandra: stopping the daemon' 5 | shell: systemctl stop cassandra 6 | ignore_errors: true 7 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_manual_repair.yml: -------------------------------------------------------------------------------- 1 | - name: Run repair - wait for up to 2h, poll every 10 sec 2 | # This is copied from the crontab in cassandra/tasks/cron 3 | shell: "flock -n /tmp/backup_repair_mutex /usr/local/bin/cassandra_repair_default | systemd-cat -t cassandra_repair/" 4 | async: 7200 5 | poll: 10 6 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_remove_backup.yml: -------------------------------------------------------------------------------- 1 | # 2 | # You will need to define: 3 | # - path_to_backup_folders (as defined by http://man7.org/linux/man-pages/man1/find.1.html) 4 | # 5 | - name: Show disk size before at /mnt 6 | shell: df -h /mnt 7 | register: df_size 8 | 9 | - name: Show current disk state 10 | debug: 11 | msg: "{{ df_size.stdout_lines }}" 12 | 13 | - name: Gather size before 14 | # -c simply gives you a grand total, just to have an idea of the difference 15 | # in size. 16 | # The last line will look like: " total" 17 | shell: du -c {{ path_to_backup_folders }} | tail -n 1 | awk '{ print $1 }' 18 | register: backup_folders_size 19 | 20 | - debug: 21 | msg: "Size of backup folder = {{ backup_folders_size.stdout }}" 22 | 23 | - name: Find all files in the backup folder 24 | shell: > 25 | find {{ path_to_backup_folders }} -type f 26 | register: files_to_delete 27 | ignore_errors: yes 28 | # If there are no files/directory, this fails but that's OK. Returns `[]` 29 | # python is hard but: https://docs.ansible.com/ansible/2.7/user_guide/playbooks_error_handling.html 30 | 31 | - name: Show files to be deleted 32 | debug: 33 | msg: "{{ files_to_delete.stdout_lines }}" 34 | 35 | - name: Delete all files 36 | file: 37 | path: "{{ item }}" 38 | state: absent 39 | with_items: "{{ files_to_delete.stdout_lines }}" 40 | 41 | - name: Gather size after 42 | # -c simply gives you a grand total, just to have an idea of the difference 43 | shell: du -c {{ path_to_backup_folders }} | tail -n 1 | awk '{ print $1 }' 44 | register: backup_folders_size 45 | 46 | - debug: 47 | msg: "Size of backup folders = {{ backup_folders_size.stdout }}" 48 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_remove_cron.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - debug: var=cassandra_cluster_name 3 | # Note that these should match the job names at roles/cassandra/tasks/cron.yml 4 | - name: 'Remove cassandra cronjobs' 5 | cron: 6 | name: "{{ item }}" 7 | state: absent 8 | with_items: 9 | - "cassandra_incremental_backup_{{ cassandra_cluster_name }}" 10 | - "cassandra_backup_{{ cassandra_cluster_name }}" 11 | - "cassandra_repair_{{ cassandra_cluster_name }}" 12 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_remove_repair_and_daily_backup_cron.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Remove cassandra cronjobs' 3 | cron: 4 | name: "{{ item }}" 5 | state: absent 6 | with_items: 7 | - "cassandra_repair_{{ cassandra_cluster_name }}" 8 | - "cassandra_backup_{{ cassandra_cluster_name }}" 9 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_up.yml: -------------------------------------------------------------------------------- 1 | - include: runit_up.yml service=cassandra 2 | 3 | - name: 'Cassandra: Waiting for thrift port' 4 | wait_for: > 5 | port={{ routing_table[cassandra_role].rpc.exposed }} 6 | host="{{ ansible_ec2_local_ipv4 }}" 7 | 8 | - name: 'Cassandra: Waiting for CQL port' 9 | wait_for: > 10 | port={{ routing_table[cassandra_role].native_transport.exposed }} 11 | host="{{ ansible_ec2_local_ipv4 }}" 12 | 13 | - include: cassandra_cluster_healthy.yml 14 | 15 | - pause: seconds={{ cassandra_wait_after_restart|default(120) }} 16 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/cassandra_wait_ongoing_repair.yml: -------------------------------------------------------------------------------- 1 | # Note that HintedHandoff is cassandra 3.x only while HintsDispatcher is cassandra 2.x 2 | # which is why the output of these will always be just 3 3 | # c.f.: 4 | # http://docs.datastax.com/en/cassandra/latest/cassandra/operations/opsRepairNodesTOC.html 5 | # http://docs.datastax.com/en/cassandra/latest/cassandra/tools/toolsTPstats.html 6 | - name: 'Cassandra: ensure that there are no repair operations by thread pool (cassandra 2.x and 3.x)' 7 | shell: nodetool tpstats | grep -E 'HintedHandoff|HintsDispatcher|ReadRepairStage|AntiEntropyStage' | awk '{print $2,$3}' 8 | register: repair_operations 9 | until: repair_operations.stdout == "0 0\n0 0\n0 0" 10 | retries: 30 11 | delay: 30 12 | 13 | # Fail if there are still ongoing repairs 14 | - name: 'Cassandra: ensure that there are no ongoing repairs (cassandra 2.x and 3.x)' 15 | shell: '! (nodetool tpstats | grep Repair#)' 16 | register: ongoing_repair 17 | until: ongoing_repair.rc == 0 and ongoing_repair.stdout == "" 18 | retries: 30 19 | delay: 30 20 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/elasticsearch_cluster_healthy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Elasticsearch: wait for the cluster to become green' 3 | shell: > 4 | curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cluster/health \ 5 | | jq ".status" 6 | register: health 7 | until: '"green" in health.stdout' 8 | retries: 120 9 | delay: 10 10 | when: dry_run is not defined 11 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/elasticsearch_down.yml: -------------------------------------------------------------------------------- 1 | - include: elasticsearch_shard_allocation.yml exclude={{ ansible_default_ipv4.address }} 2 | - command: systemctl stop elasticsearch 3 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/elasticsearch_shard_allocation.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Elasticsearch: toggle shard allocation' 3 | shell: > 4 | curl -sSf -XPUT http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_cluster/settings -d '{ 5 | "transient" : { 6 | "cluster.routing.allocation.exclude._ip": {% if exclude is defined %}"{{ exclude }}"{% else %}null{% endif %} 7 | } 8 | }' 9 | -------------------------------------------------------------------------------- /ansible/db-operations/tasks/elasticsearch_up.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: runit_up.yml service=elasticsearch 3 | 4 | - name: 'Elasticsearch: Wait for HTTP port' 5 | wait_for: port={{ routing_table[elasticsearch_role].http.exposed }} 6 | 7 | - name: 'Elasticsearch: Wait for node discovery' 8 | shell: > 9 | set -o pipefail; 10 | curl -sSf http://localhost:{{ routing_table[elasticsearch_role].http.exposed }}/_nodes|jq ".nodes|keys|length" 11 | args: 12 | executable: /bin/bash 13 | register: num_nodes 14 | until: 'num_nodes.stdout|int == groups.{{ elasticsearch_role }}|length' 15 | retries: 60 16 | delay: 5 17 | when: dry_run is not defined 18 | 19 | - include: elasticsearch_shard_allocation.yml 20 | - include: elasticsearch_cluster_healthy.yml 21 | -------------------------------------------------------------------------------- /ansible/files/hetzner_server_libvirt_default_net.xml: -------------------------------------------------------------------------------- 1 | 2 | wirebox 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /ansible/files/hetzner_server_sshd_config: -------------------------------------------------------------------------------- 1 | Port 22 2 | 3 | AcceptEnv LANG LC_* 4 | LogLevel verbose 5 | PrintMotd no 6 | 7 | # Hardened algorithm configuration based on the output of 'ssh-audit' (https://github.com/jtesta/ssh-audit). 8 | 9 | KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512 10 | Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com 11 | MACs hmac-sha2-512-etm@openssh.com 12 | 13 | HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512 14 | CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512 15 | GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- 16 | HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512 17 | PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,ecdsa-sha2-nistp521 18 | 19 | PasswordAuthentication no 20 | PubkeyAuthentication yes 21 | ChallengeResponseAuthentication no 22 | 23 | Subsystem sftp /usr/lib/openssh/sftp-server 24 | UsePAM yes 25 | X11Forwarding no 26 | -------------------------------------------------------------------------------- /ansible/files/registry/.gitignore: -------------------------------------------------------------------------------- 1 | certs 2 | -------------------------------------------------------------------------------- /ansible/files/registry/images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 6 | 7 | registry_name="localhost" 8 | 9 | images=$(cat $SCRIPT_DIR/list_of_docker_images.txt) 10 | quay=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^quay.io" | awk -F quay.io/ '{print $2}' | grep -v '^$' ) 11 | gcr=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^gcr.io" | awk -F gcr.io/ '{print $2}' | grep -v '^$') 12 | registryk8s=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep "^registry.k8s.io" | awk -F registry.k8s.io/ '{print $2}' | grep -v '^$') 13 | hub=$(cat $SCRIPT_DIR/list_of_docker_images.txt | grep -v gcr.io | grep -v quay.io) 14 | 15 | 16 | function mirror() { 17 | docker pull $prefix$image 18 | docker tag $prefix$image $registry_name/$image 19 | docker push $registry_name/$image 20 | #docker image remove $registry_name/$image 21 | #docker image remove $prefix/$image 22 | } 23 | 24 | prefix=quay.io/ 25 | for image in ${quay[@]}; do 26 | mirror 27 | done; 28 | 29 | prefix=registry.k8s.io/ 30 | for image in ${registryk8s[@]}; do 31 | mirror 32 | done; 33 | 34 | prefix=gcr.io/ 35 | for image in ${gcr[@]}; do 36 | mirror 37 | done; 38 | 39 | prefix="" 40 | for image in ${hub[@]}; do 41 | mirror 42 | done; 43 | 44 | -------------------------------------------------------------------------------- /ansible/files/registry/mk-certs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -xe 4 | 5 | # Generate a private key and a self-signed certificate. 6 | 7 | CERTS_DIR='certs' 8 | CONFIG='openssl.cnf' 9 | 10 | mkdir -p "$CERTS_DIR" 11 | 12 | # Get domain name from config file. 13 | # Domain is used to name self-signed cert file. 14 | DOMAIN=$(grep commonName "$CONFIG" | cut -d'=' -f 2 | tr -d ' ') 15 | 16 | # Generate private key. 17 | openssl genrsa -out "$CERTS_DIR/private.pem" 1024 18 | 19 | # Generate cert signing request. 20 | openssl req -new \ 21 | -key "$CERTS_DIR/private.pem" \ 22 | -out "$CERTS_DIR/proxy.csr" \ 23 | -config "$CONFIG" 24 | 25 | # Generate self-signed cert. 26 | openssl x509 -req \ 27 | -days 730 \ 28 | -signkey "$CERTS_DIR/private.pem" \ 29 | -in "$CERTS_DIR/proxy.csr" \ 30 | -out "$CERTS_DIR/$DOMAIN.crt" \ 31 | -extensions v3_req \ 32 | -extfile "$CONFIG" 33 | 34 | # Delete signing request. 35 | rm $CERTS_DIR/proxy.csr 36 | -------------------------------------------------------------------------------- /ansible/files/registry/mk-sub-certificate: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | USAGE="create certificate using self-signed CA. Usage: $0 where CN is 'mydomain.com'" 4 | cn=${1:?$USAGE} 5 | 6 | 7 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 8 | CERT_DIR="${SCRIPT_DIR}/certs" 9 | mkdir -p "$CERT_DIR" 10 | 11 | 12 | CA_cert=${2:-"wire.com.crt"} 13 | CA_key=${3:-"private.pem"} 14 | 15 | cd "$CERT_DIR" || exit 16 | 17 | set -ex 18 | 19 | echo $cn > dns_name 20 | 21 | ALLDOMAINS="" 22 | for ONEREGISTRYIN in quay.io registry.k8s.io gcr.io docker.caching.proxy.internal registry-1.docker.io auth.docker.io ${REGISTRIES}; do 23 | ONEREGISTRY=$(echo ${ONEREGISTRYIN} | xargs) # Remove whitespace 24 | echo "Adding certificate for registry: $ONEREGISTRY" 25 | ALLDOMAINS="${ALLDOMAINS},DNS:${ONEREGISTRY}" 26 | done 27 | export ALLDOMAINS=${ALLDOMAINS:1} # remove the first comma and export 28 | 29 | openssl genrsa -out client.key 2048 30 | 31 | openssl req -new -sha256 -key client.key -subj "/CN=${cn}" \ 32 | -reqexts SAN -config <(cat <(printf "[req]\ndistinguished_name = dn\n[dn]\n[SAN]\nsubjectAltName=${ALLDOMAINS}")) \ 33 | -out client-csr.csr 34 | 35 | openssl x509 -req -in client-csr.csr -CA "$CA_cert" -CAkey "$CA_key" -CAcreateserial -out client.crt \ 36 | -days 500 -sha256 -extensions SAN \ 37 | -extfile <(cat <(printf "[req]\ndistinguished_name = dn\n[dn]\n[SAN]\nsubjectAltName=${ALLDOMAINS}")) \ 38 | 39 | rm client-csr.csr 40 | 41 | -------------------------------------------------------------------------------- /ansible/files/registry/openssl.cnf: -------------------------------------------------------------------------------- 1 | prompt = no 2 | HOME = . 3 | RANDFILE = $ENV::HOME/.rnd 4 | oid_section = new_oids 5 | extensions = v3_req 6 | 7 | [ new_oids ] 8 | tsa_policy1 = 1.2.3.4.1 9 | tsa_policy2 = 1.2.3.4.5.6 10 | tsa_policy3 = 1.2.3.4.5.7 11 | 12 | [ req ] 13 | default_bits = 1024 14 | default_keyfile = privkey.pem 15 | distinguished_name = req_distinguished_name 16 | attributes = req_attributes 17 | string_mask = utf8only 18 | 19 | [ req_distinguished_name ] 20 | 0.organizationName = wire.com 21 | commonName = wire.com 22 | 23 | [ req_attributes ] 24 | 25 | [ v3_req ] 26 | basicConstraints = critical,CA:TRUE 27 | keyUsage = keyCertSign, cRLSign 28 | -------------------------------------------------------------------------------- /ansible/files/registry/registry-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | IP=${1:-"0.0.0.0"} 4 | PORT=${2:-"5001"} 5 | 6 | docker run \ 7 | -d \ 8 | --network=host \ 9 | --restart=always \ 10 | --name registry \ 11 | -v $(pwd)/../../mnt/registry:/var/lib/registry \ 12 | -v "$(pwd)/certs:/certs" \ 13 | -e REGISTRY_HTTP_ADDR=${IP}:${PORT} \ 14 | -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/client.crt \ 15 | -e REGISTRY_HTTP_TLS_KEY=/certs/client.key \ 16 | registry:2 17 | -------------------------------------------------------------------------------- /ansible/files/registry/upload_image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | registry_name="localhost:5001" 4 | 5 | function mirror() { 6 | prefix=$1 7 | image=$2 8 | rmafter=$3 9 | docker pull $prefix/$image 10 | docker tag $prefix/$image $registry_name/$image 11 | docker push $registry_name/$image 12 | [ -n "$rmafter" ] && docker image remove $registry_name/$image 13 | [ -n "$rmafter" ] && docker image remove $prefix/$image 14 | } 15 | 16 | mirror $1 $2 yes 17 | -------------------------------------------------------------------------------- /ansible/files/serve-assets.service: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStart=/usr/bin/python3 -m http.server 8080 3 | WorkingDirectory=/opt/assets/ 4 | -------------------------------------------------------------------------------- /ansible/get-logs.yml: -------------------------------------------------------------------------------- 1 | - hosts: "{{ log_host }}" 2 | tasks: 3 | - assert: 4 | msg: "'log_host' must be set and not empty" 5 | that: 6 | - log_host is defined 7 | - log_host | length > 0 8 | - assert: 9 | msg: "'log_service' must be set and not empty" 10 | that: 11 | - log_service is defined 12 | - log_service | length > 0 13 | - assert: 14 | msg: "'log_since' must be set and not empty" 15 | that: 16 | - log_since is defined 17 | - log_since | length > 0 18 | 19 | - name: get logs 20 | shell: journalctl -u {{ log_service }} --since '{{ log_since }}' --until '{{ log_until | default('now', true) }}' 21 | register: the_logs 22 | - name: create logs directory 23 | delegate_to: localhost 24 | become: no 25 | file: 26 | state: directory 27 | path: "{{ log_dir | default('./', true) }}" 28 | - name: save logs 29 | delegate_to: localhost 30 | become: no 31 | copy: 32 | dest: "{{ log_dir | default('/tmp', true) }}/{{log_host}}-{{ log_service }}-{{ log_since }}-{{ log_until | default('now', true) }}.log" 33 | content: "{{ the_logs.stdout }}" 34 | -------------------------------------------------------------------------------- /ansible/helm_external.yml: -------------------------------------------------------------------------------- 1 | # Generates -external/values.yaml files containing the ip addresses 2 | # that these databases listen on. These files are used as overrides with the 3 | # -external helm charts (e.g. cassandra-external). 4 | # 5 | # After any change to IPs/servers: 6 | # 1. run this playbook: 7 | # ansible-playbook -i hosts.ini helm_external.yml -vv --diff 8 | # 2. re-run the helm upgrade specifying the override files. 9 | - hosts: "elasticsearch" 10 | become: false 11 | tasks: 12 | - name: Generate elasticsearch IPs for helm 13 | include_tasks: tasks/helm_external.yml 14 | vars: 15 | external_dir_name: elasticsearch-external 16 | server_type: elasticsearch 17 | network_interface: "{{ elasticsearch_network_interface }}" 18 | 19 | - hosts: "minio" 20 | become: false 21 | tasks: 22 | - name: Generate minio IPs for helm 23 | include_tasks: tasks/helm_external.yml 24 | vars: 25 | external_dir_name: minio-external 26 | server_type: minio 27 | network_interface: "{{ minio_network_interface }}" 28 | 29 | - hosts: "cassandra" 30 | become: false 31 | tasks: 32 | - name: Generate cassandra IPs for helm 33 | include_tasks: tasks/helm_external.yml 34 | vars: 35 | external_dir_name: cassandra-external 36 | server_type: cassandra 37 | network_interface: "{{ cassandra_network_interface }}" 38 | 39 | - hosts: "rmq-cluster" 40 | become: false 41 | tasks: 42 | - name: Generate rabbitmq IPs for helm 43 | include_tasks: tasks/helm_external.yml 44 | vars: 45 | external_dir_name: rabbitmq-external 46 | server_type: rmq-cluster 47 | network_interface: "{{ rabbitmq_network_interface }}" 48 | tags: rabbitmq-external 49 | -------------------------------------------------------------------------------- /ansible/host_vars/localhost/python.yml: -------------------------------------------------------------------------------- 1 | ansible_python_interpreter: "{{ lookup('env','LOCALHOST_PYTHON') }}" 2 | -------------------------------------------------------------------------------- /ansible/inventory/offline/group_vars/demo/offline.yml: -------------------------------------------------------------------------------- 1 | # The assethost will host assets other machines will download 2 | # this will be passed post adding the assethost node in the playbook 3 | # assethost_host: "{{ hostvars['assethost'].ansible_host }}:8080" 4 | # When set to true; will set up all the repos below before continuing 5 | # to bootstrap; such that no network access is needed 6 | offline: true 7 | 8 | # This is used nowhere inside kubespray, only inside this file 9 | # and our own playbooks 10 | ubuntu_repos: "http://{{ assethost_host }}/debs-{{ ansible_distribution_release }}/public" 11 | ubuntu_repo_base_url: "{{ ubuntu_repos }}" 12 | ubuntu_repo_gpgkey: "{{ ubuntu_repos }}/gpg" 13 | 14 | docker_ubuntu_repo_base_url: "{{ ubuntu_repos }}" 15 | docker_ubuntu_repo_gpgkey: "{{ ubuntu_repos }}/gpg" 16 | # docker_ubuntu_repo_repokey: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88" 17 | 18 | binaries_url: "http://{{ assethost_host }}/binaries" 19 | 20 | kubeconfig_localhost: true 21 | #resolvconf_mode: none 22 | 23 | # This defaults to true if http://169.254.169.254/latest/meta-data exists; which 24 | # is also available in non-AWS. e.g. in Hetzner. Lets not let this autodetect in offline 25 | is_aws_environment: False 26 | 27 | # IP address for the logging (for example QRadar) server 28 | syslog_target_ip: 12.34.56.78 29 | -------------------------------------------------------------------------------- /ansible/iptables.yml: -------------------------------------------------------------------------------- 1 | - hosts: kube-node 2 | tasks: 3 | - name: "Redirect TLS traffic to the kubernetes ingress" 4 | become: true 5 | iptables: 6 | table: "nat" 7 | chain: "PREROUTING" 8 | in_interface: "eth0" 9 | protocol: "tcp" 10 | match: "tcp" 11 | destination_port: "443" 12 | jump: "REDIRECT" 13 | to_ports: "31773" 14 | state: "present" 15 | - name: "Redirect http traffic to the kubernetes ingress" 16 | become: true 17 | iptables: 18 | table: "nat" 19 | chain: "PREROUTING" 20 | in_interface: "eth0" 21 | protocol: "tcp" 22 | match: "tcp" 23 | destination_port: "80" 24 | jump: "REDIRECT" 25 | to_ports: "31772" 26 | state: "present" 27 | 28 | -------------------------------------------------------------------------------- /ansible/kubernetes-fetch-kubeconfig.yml: -------------------------------------------------------------------------------- 1 | # Fetch the `kubeconfig` file. This is useful when the original `kubeconfig` has 2 | # been lost. 3 | # Run it with e.g. `ENV=bella make create-inventory fetch-kubeconfig`. 4 | 5 | - name: 'Fetch kubeconfig' 6 | hosts: kube-master 7 | tasks: 8 | - name: download kubeconfig 9 | ansible.builtin.fetch: 10 | src: /etc/kubernetes/admin.conf 11 | dest: ./kubeconfig.new 12 | flat: true 13 | 14 | - name: notify user about kubeconfig 15 | ansible.builtin.debug: 16 | msg: 17 | - "./kubeconfig.new has been downloaded to your machine" 18 | -------------------------------------------------------------------------------- /ansible/kubernetes_logging.yml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-cluster 2 | environment: "{{ proxy_env | default({}) }}" 3 | roles: 4 | - role: logrotate 5 | logrotate_scripts: 6 | # The following will rotate pod logs once per day to keep no more than 7 | # 3 days (maxage 1, rotate 2) of logs for data minimization/protection 8 | # reasons. 9 | # 10 | # NOTE for wire-server-deploy maintainers: if you change the following 11 | # options, ensure to also keep the documentation up-to-date, see the 12 | # documentation introduced in 13 | # https://github.com/wireapp/wire-docs/pull/79 14 | - name: podlogs 15 | path: "/var/lib/docker/containers/*/*.log" 16 | options: 17 | - daily 18 | - missingok 19 | - rotate 2 20 | - maxage 1 21 | - copytruncate 22 | - nocreate 23 | - nocompress 24 | -------------------------------------------------------------------------------- /ansible/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Configure using Rsyslog to monitor and forward log files to Qradar/remote logging 3 | # ------------------------------------------------------------------ 4 | # https://qradarinsights.com/2018/12/20/using-rsyslog-to-monitor-and-forward-log-files-to-qradar/ 5 | # See https://wearezeta.atlassian.net/browse/JCT-62 6 | 7 | - name: Change log file permission only on elasticsearch hosts 8 | hosts: 'elasticsearch' 9 | tasks: 10 | # sudo chmod 775 /var/log/elasticsearch 11 | - name: Change permissions of elasticsearch log file 12 | become: true 13 | file: 14 | path: /var/log/elasticsearch 15 | mode: '0775' 16 | 17 | - name: Using Rsyslog to monitor and forward log files 18 | hosts: all 19 | tasks: 20 | - name: Copy the elasticsearch config 21 | become: true 22 | template: 23 | src: templates/elasticsearch.conf.j2 24 | dest: /etc/rsyslog.d/55-elasticsearch.conf 25 | 26 | - name: Copy the logging config 27 | become: true 28 | template: 29 | src: templates/logging.conf.j2 30 | dest: /etc/rsyslog.d/rfc5424-remote.conf 31 | 32 | # service syslog restart 33 | - name: Restart the syslog service 34 | become: true 35 | service: 36 | name: syslog 37 | state: restarted 38 | -------------------------------------------------------------------------------- /ansible/ntp.yml: -------------------------------------------------------------------------------- 1 | - hosts: cassandra 2 | any_errors_fatal: true 3 | become: true 4 | vars: 5 | ntp_server: ntp.ubuntu.com # specify NTP server you wish to use here 6 | tasks: 7 | - name: Install NTP 8 | apt: 9 | name: ntp 10 | state: present 11 | 12 | - name: Deploy ntp.conf 13 | template: 14 | src=ntp.conf.j2 15 | dest=/etc/ntp.conf 16 | owner=root 17 | mode=0644 18 | 19 | - name: Restart ntp service 20 | service: 21 | name=ntp 22 | state=restarted 23 | 24 | - name: Make sure NTP is started 25 | service: 26 | name=ntp 27 | state=started 28 | enabled=yes 29 | -------------------------------------------------------------------------------- /ansible/provision-sft.yml: -------------------------------------------------------------------------------- 1 | # This role requires access to s3 buckets and has a few variables that need to 2 | # be set. When run with any variables missing, it will complain about those 3 | # variables. 4 | - hosts: localhost 5 | become: false 6 | roles: 7 | - role: sft-monitoring-certs 8 | when: "{{ (groups['sft_servers'] | length) > 0 }}" 9 | 10 | - hosts: sft_servers 11 | roles: 12 | - role: sft-server 13 | - role: srv-announcer 14 | tasks: 15 | # The Ubuntu images provided by hetzner have systemd-resolved enabled, 16 | # but don't use the nss module, and direct all traffic through the 17 | # 127.0.0.53 stub resolver 18 | # This one seems to be flaky. 19 | # Instead, configure it to use /run/systemd/resolve/resolv.conf, which points to 20 | # the DNS servers retrieved via DHCP directly 21 | - name: Workaround systemd-resolved being flaky 22 | file: 23 | src: /run/systemd/resolve/resolv.conf 24 | dest: /etc/resolv.conf 25 | owner: root 26 | group: root 27 | state: link 28 | 29 | - hosts: localhost 30 | tasks: 31 | - when: "{{ (groups['sft_servers'] | length) > 0 }}" 32 | block: 33 | - name: Get all SRV recoreds 34 | route53: 35 | zone: "{{ root_domain }}" 36 | type: "SRV" 37 | record: "_sft._tcp.{{ environment_name }}.{{ root_domain }}" 38 | state: get 39 | register: srv_records 40 | - name: Delete all SRV records 41 | route53: 42 | zone: "{{ root_domain }}" 43 | type: "SRV" 44 | record: "_sft._tcp.{{ environment_name }}.{{ root_domain }}" 45 | state: "delete" 46 | value: "{{ srv_records.set.value }}" 47 | ttl: "{{ srv_records.set.ttl }}" 48 | -------------------------------------------------------------------------------- /ansible/rabbitmq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: rmq-cluster 3 | become: yes 4 | roles: 5 | - rabbitmq-cluster 6 | -------------------------------------------------------------------------------- /ansible/roles/etcd-helpers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | etcd_helpers_path: /usr/local/bin 2 | -------------------------------------------------------------------------------- /ansible/roles/etcd-helpers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # TODO - add this task/role to kubespray upstream, potentially. 2 | # 3 | - name: Add etcd helper scripts 4 | template: 5 | src: "{{ item }}.j2" 6 | dest: "{{ etcd_helpers_path }}/{{ item }}" 7 | owner: root 8 | group: root 9 | mode: '0755' 10 | with_items: 11 | - etcd-health.sh 12 | - etcdctl3.sh 13 | -------------------------------------------------------------------------------- /ansible/roles/etcd-helpers/templates/etcd-health.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | HOST={{ ansible_hostname }} 4 | 5 | etcdctl --endpoints https://127.0.0.1:2379 --cacert=/etc/ssl/etcd/ssl/ca.pem --cert=/etc/ssl/etcd/ssl/member-$HOST.pem --key=/etc/ssl/etcd/ssl/member-$HOST-key.pem endpoint --cluster health 6 | -------------------------------------------------------------------------------- /ansible/roles/etcd-helpers/templates/etcdctl3.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | HOST={{ ansible_hostname }} 4 | 5 | export ETCDCTL_API=3 6 | export ETCDCTL_CA_FILE=/etc/ssl/etcd/ssl/ca.pem 7 | export ETCDCTL_CERT=/etc/ssl/etcd/ssl/member-$HOST.pem 8 | export ETCDCTL_KEY=/etc/ssl/etcd/ssl/member-$HOST-key.pem 9 | 10 | #to support etcdctl 3.14 11 | export ETCDCTL_CACERT=/etc/ssl/etcd/ssl/ca.pem 12 | 13 | /usr/local/bin/etcdctl "$@" 14 | -------------------------------------------------------------------------------- /ansible/roles/minio-static-files/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Deeplink configuration defaults 2 | # 3 | # This role requires the following variables to be set: 4 | # 5 | #prefix: example- 6 | #domain: example.com 7 | #deeplink_title: Example Environment 8 | 9 | assetsURL: "https://{{ prefix }}assets.{{ domain }}" 10 | deeplink_config_json: "{{ assetsURL }}/public/deeplink.json" 11 | backendURL: "https://{{ prefix }}nginz-https.{{ domain }}" 12 | backendWSURL: "https://{{ prefix }}nginz-ssl.{{ domain }}" 13 | teamsURL: "https://{{ prefix }}teams.{{ domain }}" 14 | accountsURL: "https://{{ prefix }}account.{{ domain }}" 15 | 16 | # FUTUREWORK: 17 | # Note the website, for Wire, points to https://wire.com 18 | # There is currently no equivalent for custom backends 19 | # So all URLs in the mobile app that point to a url on the wire website will 20 | # not work, as the default of 'www' points nowhere. 21 | websiteURL: "https://{{ prefix }}www.{{ domain }}" 22 | 23 | # This currently assumes android clients only 24 | blackListURL: "{{ assetsURL }}/public/blacklist/android.json" 25 | -------------------------------------------------------------------------------- /ansible/roles/minio-static-files/files/android.json: -------------------------------------------------------------------------------- 1 | { 2 | "oldestAccepted": 322, 3 | "blacklisted": [ 0, 108 ] 4 | } 5 | -------------------------------------------------------------------------------- /ansible/roles/minio-static-files/files/ios.json: -------------------------------------------------------------------------------- 1 | { 2 | "min_version": "1", 3 | "exclude": ["1896", "1921", "1963", "1992", "2006", "2032", "2093", "2135", "2161", "2173", "2191", "2222", "2252", "2301", "2319", "2365", "2377", "2395", "2406", "2424", "2472", "2484", "2516", "2521", "2531", "2538", "2549", "2565", "2584", "2585", "2592", "2620", "2629", "2635", "2650", "2669", "2670", "2675", "2681", "2702", "2713", "2720", "2731", "2735", "2742", "2752", "2766", "2772", "2780", "2787", "2795", "2811"] 4 | } 5 | -------------------------------------------------------------------------------- /ansible/roles/minio-static-files/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # FUTUREWORK: https://github.com/zinfra/backend-issues/issues/1763 2 | # 3 | - name: "create deeplink template files" 4 | template: 5 | src: "{{ item }}.j2" 6 | dest: "/tmp/{{ item }}" 7 | run_once: true 8 | with_items: 9 | - deeplink.html 10 | - deeplink.json 11 | tags: deeplink 12 | 13 | - name: "Add deeplink files to minio" 14 | shell: "mc cp /tmp/{{ item }} local/public/{{ item }}" 15 | run_once: true 16 | with_items: 17 | - deeplink.html 18 | - deeplink.json 19 | tags: deeplink 20 | 21 | - name: "copy blacklist files" 22 | copy: 23 | src: "{{ item }}" 24 | dest: "/tmp/{{ item }}" 25 | run_once: true 26 | with_items: 27 | - android.json 28 | - ios.json 29 | tags: blacklist 30 | 31 | - name: "Add blacklist files to minio" 32 | shell: "mc cp /tmp/{{ item }} local/public/blacklist/{{ item }}" 33 | run_once: true 34 | with_items: 35 | - android.json 36 | - ios.json 37 | tags: blacklist 38 | -------------------------------------------------------------------------------- /ansible/roles/minio-static-files/templates/deeplink.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |

6 | CLICK ME TO CONFIGURE

7 |

Clicking the above link will use the following json configuration file: 8 | wire://access/?config={{ deeplink_config_json }}

9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /ansible/roles/minio-static-files/templates/deeplink.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "endpoints": { 3 | "backendURL": "{{ backendURL }}", 4 | "backendWSURL": "{{ backendWSURL }}", 5 | "blackListURL": "{{ blackListURL }}", 6 | "teamsURL": "{{ teamsURL }}", 7 | "accountsURL": "{{ accountsURL }}", 8 | "websiteURL": "{{ websiteURL }}" 9 | }, 10 | "title": "{{ deeplink_title }}" 11 | } 12 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Handlers for RabbitMQ 3 | - name: restart rabbitmq-server 4 | service: name=rabbitmq-server state=restarted 5 | 6 | - name: start rabbitmq-server 7 | service: name=rabbitmq-server state=started 8 | 9 | - name: stop rabbitmq-server 10 | service: name=rabbitmq-server state=stopped 11 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: make sure rabbitmq server is up 3 | service: 4 | name: rabbitmq-server 5 | state: started 6 | 7 | - name: make sure rabbitmq app is up 8 | command: rabbitmqctl start_app 9 | 10 | - name: check if already in cluster 11 | command: rabbitmqctl cluster_status 12 | register: cluster_status 13 | changed_when: false 14 | 15 | - include_tasks: join_cluster.yml 16 | when: cluster_status.stdout.find("rabbit@{{ rabbitmq_cluster_master }}") == -1 and (ansible_fqdn != rabbitmq_cluster_master) 17 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: rabbitmq default file 3 | template: 4 | src: "{{ item.src }}" 5 | dest: "{{ item.dest }}" 6 | owner: root 7 | group: root 8 | mode: 0644 9 | with_items: 10 | - { src: etc/default/rabbitmq-server.j2 , dest: /etc/default/rabbitmq-server } 11 | - { src: etc/rabbitmq/rabbitmq.config.j2, dest: /etc/rabbitmq/rabbitmq.config } 12 | # - { src: etc/rabbitmq/rabbitmq-env.conf.j2, dest: /etc/rabbitmq/rabbitmq-env.conf } 13 | notify: 14 | restart rabbitmq-server 15 | 16 | - name: restart rabbitmq-server 17 | service: 18 | name: rabbitmq-server 19 | state: restarted 20 | 21 | # - name: Enable the plugins is installed 22 | # rabbitmq_plugin: 23 | # names: "{{ item }}" 24 | # prefix: /usr/lib/rabbitmq 25 | # state: enabled 26 | # new_only: yes 27 | # with_items: "{{ rabbitmq_plugins }}" 28 | # notify: 29 | # restart rabbitmq-server 30 | 31 | - name: restart rabbitmq-server 32 | service: 33 | name: rabbitmq-server 34 | state: restarted 35 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/configure_dns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update /etc/hosts on rmq-cluster nodes 3 | hosts: rmq-cluster 4 | become: yes # This allows the playbook to run with elevated privileges 5 | 6 | tasks: 7 | - name: Add entries to /etc/hosts 8 | lineinfile: 9 | path: /etc/hosts 10 | regexp: "^{{ hostvars[item].ansible_default_ipv4.address }}\\s+{{ item }}\\s+rabbit@{{ item }}$" 11 | line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }} rabbit@{{ item }}" 12 | insertbefore: EOF 13 | with_items: "{{ groups['rmq-cluster'] }}" 14 | when: item != inventory_hostname 15 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/create_users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # rc 70: user already exists 3 | - name: create test user 4 | shell: rabbitmqctl add_user test test 5 | register: res 6 | failed_when: res.rc != 70 and res.rc != 0 7 | changed_when: res.rc != 70 8 | 9 | - name: list permissions for test user 10 | shell: rabbitmqctl list_permissions 11 | register: list_permissions 12 | changed_when: false 13 | 14 | - name: set permissions on / vhost 15 | shell: rabbitmqctl set_permissions test ".*" ".*" ".*" 16 | when: list_permissions.stdout.find("test") == -1 17 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/enable_ha_queues.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if ha-mode is already enabled 3 | shell: rabbitmqctl list_policies 4 | register: list_policies 5 | changed_when: false 6 | 7 | - name: set ha-mode to exactly two nodes for all queues for backup 8 | shell: rabbitmqctl set_policy ha-exactly-two ".*" '{"ha-mode":"exactly","ha-params":2,"ha-sync-mode":"automatic"}' 9 | register: res 10 | failed_when: res.rc != 0 11 | when: list_policies.stdout.find("ha-exactly-two") == -1 12 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/erlang_cookie.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Configure RabbitMQ for cluster 3 | - name: backup old erlang cookie 4 | shell: cp -a /var/lib/rabbitmq/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie.old 5 | changed_when: false 6 | 7 | - name: updating rabbitmq erlang cookie 8 | template: 9 | src: erlang.cookie.j2 10 | dest: /var/lib/rabbitmq/.erlang.cookie 11 | owner: rabbitmq 12 | group: rabbitmq 13 | mode: 0400 14 | notify: 15 | stop rabbitmq-server 16 | 17 | - meta: flush_handlers 18 | 19 | - name: remove old erlang cookie 20 | file: 21 | path: /var/lib/rabbitmq/.erlang.cookie.old 22 | state: absent 23 | changed_when: false 24 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/hosts.yml: -------------------------------------------------------------------------------- 1 | - blockinfile: 2 | path: /etc/hosts 3 | block: "{{ rabbitmq_hosts }}" 4 | owner: root 5 | group: root 6 | mode: 0644 7 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/install.yml: -------------------------------------------------------------------------------- 1 | - name: install rabbitmq-server 2 | apt: 3 | update_cache: yes 4 | force: yes 5 | pkg: "{{ item }}" 6 | state: present 7 | with_items: 8 | - rabbitmq-server 9 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/join_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: stop rabbitmq app 3 | command: rabbitmqctl stop_app 4 | 5 | - name: add this node to cluster 6 | command: rabbitmqctl join_cluster rabbit@{{ rabbitmq_cluster_master }} 7 | 8 | - name: start rabbitmq app 9 | command: rabbitmqctl start_app 10 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: hosts.yml 3 | when: update_hosts 4 | 5 | - include_tasks: install.yml 6 | 7 | - include_tasks: erlang_cookie.yml 8 | when: rabbitmq_create_cluster 9 | 10 | - include_tasks: tls.yml 11 | when: enable_tls 12 | 13 | - include_tasks: config.yml 14 | 15 | #- include_tasks: service.yml 16 | 17 | - include_tasks: cluster.yml 18 | when: rabbitmq_create_cluster == true 19 | 20 | - include_tasks: create_users.yml 21 | 22 | - include_tasks: enable_ha_queues.yml 23 | when: backup_queues_in_two_nodes 24 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: rabbitmq service started and enabled 3 | service: 4 | name: rabbitmq-server 5 | enabled: yes 6 | state: started 7 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/tasks/tls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Copy cacert, cert and key file for TLS/SSL 3 | - name: make sure TLS/SSL certificates exists 4 | template: 5 | src: "{{ item.src }}" 6 | dest: "{{ item.dest }}" 7 | owner: root 8 | group: rabbitmq 9 | mode: 0644 10 | with_items: 11 | - { src: "{{ cacertfile }}", dest: "{{ cacertfile_dest }}" } 12 | - { src: "{{ certfile }}", dest: "{{ certfile_dest }}" } 13 | - { src: "{{ keyfile }}", dest: "{{ keyfile_dest }}" } 14 | -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/templates/erlang.cookie.j2: -------------------------------------------------------------------------------- 1 | {{ rabbitmq_erlang_cookie }} -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/templates/etc/default/rabbitmq-server.j2: -------------------------------------------------------------------------------- 1 | # This file is sourced by /etc/init.d/rabbitmq-server. Its primary 2 | # reason for existing is to allow adjustment of system limits for the 3 | # rabbitmq-server process. 4 | # 5 | # Maximum number of open file handles. This will need to be increased 6 | # to handle many simultaneous connections. Refer to the system 7 | # documentation for ulimit (in man bash) for more information. 8 | # 9 | 10 | ulimit -n {{ rabbitmq_ulimit_open_files }} 11 | 12 | ERL_EPMD_PORT={{ rabbitmq_epmd_port }} 13 | RABBITMQ_NODE_PORT= {{ rabbitmq_node_port }} -------------------------------------------------------------------------------- /ansible/roles/rabbitmq-cluster/templates/etc/rabbitmq/rabbitmq.config.j2: -------------------------------------------------------------------------------- 1 | [ 2 | {rabbit, [ 3 | {% if tls_only %} 4 | {tcp_listeners, []}, 5 | {% endif %} 6 | {% if enable_tls %} 7 | {ssl_listeners, [{{ rabbitmq_tls_port }}]}, 8 | {ssl_options, [{cacertfile,"{{ cacertfile_dest }}"}, 9 | {certfile,"{{ certfile_dest }}"}, 10 | {keyfile,"{{ keyfile_dest }}"}, 11 | {verify,{{ tls_verify }}}, 12 | {fail_if_no_peer_cert,{{tls_fail_if_no_peer_cert|lower}}}]}, 13 | {% endif %} 14 | {loopback_users, []} 15 | ]} 16 | ]. -------------------------------------------------------------------------------- /ansible/roles/systemd-coredump/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Default install_systemd_coredump to False, as Debian makes it unnecessarily 2 | # hard to ship this in an aptly offline bundle. 3 | # 4 | # Environments can explicitly set this to true in their inventory. 5 | install_systemd_coredump: no 6 | -------------------------------------------------------------------------------- /ansible/roles/systemd-coredump/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "Install systemd-coredump" 2 | when: install_systemd_coredump | bool 3 | apt: 4 | name: systemd-coredump 5 | state: latest 6 | -------------------------------------------------------------------------------- /ansible/seed-offline-docker.yml: -------------------------------------------------------------------------------- 1 | - name: Seed system containers 2 | hosts: k8s-cluster:etcd 3 | tags: system-containers 4 | tasks: 5 | - name: load containers 6 | shell: | 7 | for container in $(curl -q {{ assethost_host }}/containers-system/index.txt);do 8 | curl -q "{{ assethost_host }}/containers-system/$container" | docker load 9 | done 10 | 11 | - name: Download helm containers 12 | hosts: k8s-cluster 13 | tags: containers-helm 14 | tasks: 15 | - name: load containers 16 | shell: | 17 | for container in $(curl -q {{ assethost_host }}/containers-helm/index.txt);do 18 | curl -q "{{ assethost_host }}/containers-helm/$container" | docker load 19 | done 20 | -------------------------------------------------------------------------------- /ansible/sync_time.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure NTP on Cassandra nodes 3 | hosts: cassandra 4 | become: true 5 | vars: 6 | authoritative_node: "{{ groups['cassandra'][0] }}" 7 | 8 | tasks: 9 | - name: Install NTP package 10 | apt: 11 | name: ntp 12 | state: present 13 | 14 | - name: Configure NTP servers 15 | lineinfile: 16 | path: /etc/ntp.conf 17 | line: "server {{ hostvars[authoritative_node].ansible_host }} prefer" 18 | state: present 19 | when: inventory_hostname != authoritative_node 20 | 21 | - name: Restart NTP service 22 | service: 23 | name: ntp 24 | state: restarted 25 | 26 | - name: Print current date 27 | command: date 28 | -------------------------------------------------------------------------------- /ansible/tasks/helm_external.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create external IP directory 3 | file: 4 | state: directory 5 | path: "{{ playbook_dir }}/../values/{{ external_dir_name }}" 6 | delegate_to: localhost 7 | 8 | - name: write IPs for helm 9 | template: 10 | src: templates/helm_external.yaml.j2 11 | dest: "{{ playbook_dir }}/../values/{{ external_dir_name }}/values.yaml" 12 | delegate_to: localhost 13 | -------------------------------------------------------------------------------- /ansible/templates/elasticsearch.conf.j2: -------------------------------------------------------------------------------- 1 | module(load="imfile" PollingInterval="10") 2 | input(type="imfile" File="/var/log/elasticsearch/elasticsearch-directory.log" Tag="elasticsearch") -------------------------------------------------------------------------------- /ansible/templates/helm_external.yaml.j2: -------------------------------------------------------------------------------- 1 | IPs: 2 | {% for host in groups[server_type] %} 3 | - {{ hostvars[host]["ansible_" + (network_interface | default(hostvars[host].ansible_default_ipv4.interface, true)) ]["ipv4"]["address"] }} 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /ansible/templates/ntp.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | template# NTP Config 3 | 4 | driftfile /var/lib/ntp/drift 5 | restrict default nomodify notrap nopeer noquery 6 | restrict 127.0.0.1 7 | restrict ::1 8 | 9 | server {{ ntp_server }} -------------------------------------------------------------------------------- /ansible/templates/qradar.conf.j2: -------------------------------------------------------------------------------- 1 | action(type="omfwd" Target="{{ syslog_target_ip }}" Port="514" Protocol="udp") -------------------------------------------------------------------------------- /ansible/tinc.yml: -------------------------------------------------------------------------------- 1 | # EXPERIMENTAL 2 | # 3 | # Deploys tinc VPN - creates a new network interface on all servers in the vpn group. 4 | # Useful for testing different network interface setups. 5 | 6 | - name: Create build directory 7 | hosts: 127.0.0.1 8 | connection: local 9 | gather_facts: false 10 | become: false 11 | tasks: 12 | - name: Create build directory 13 | tempfile: 14 | state: directory 15 | suffix: _tinc 16 | register: build_dir 17 | - name: Make it available to other plays 18 | set_fact: 19 | build_dir: "{{ build_dir.path }}" 20 | - debug: 21 | msg: "Build root: {{ build_dir }}" 22 | 23 | - name: Provision tinc server 24 | hosts: vpn 25 | environment: "{{ proxy_env | default({}) }}" 26 | vars: 27 | tinc_build_root: "{{ hostvars['localhost']['build_dir'] }}" 28 | # CHANGE THIS if your default network interface is not eth0 29 | physical_ip: "{{ ansible_eth0.ipv4.address }}" 30 | roles: 31 | - ansible-tinc 32 | 33 | - name: Cleanup 34 | hosts: 127.0.0.1 35 | connection: local 36 | become: false 37 | gather_facts: false 38 | tasks: 39 | - name: Delete the build folder 40 | file: 41 | state: absent 42 | path: "{{ hostvars['localhost']['build_dir'] }}/" 43 | -------------------------------------------------------------------------------- /ansible/wiab-demo/setup_ssh.yml: -------------------------------------------------------------------------------- 1 | - name: Manage SSH keys 2 | hosts: deploy_node 3 | become: yes 4 | become_user: "{{ ansible_user }}" 5 | tasks: 6 | - name: Setup ssh keys and vars 7 | block: 8 | - name: Ensure the .ssh directory exists 9 | file: 10 | path: "/home/{{ ansible_user }}/.ssh" 11 | state: directory 12 | mode: '0700' 13 | owner: "{{ ansible_user }}" 14 | group: "{{ ansible_user }}" 15 | 16 | - name: Generate SSH key if it does not exist 17 | shell: | 18 | if [ ! -f "/home/{{ ansible_user }}/.ssh/id_rsa_wire" ]; then 19 | ssh-keygen -t rsa -b 4096 -f "/home/{{ ansible_user }}/.ssh/id_rsa_wire" -N "" -C "ansible_generated_key_wire"; 20 | fi 21 | args: 22 | creates: "/home/{{ ansible_user }}/.ssh/id_rsa_wire" 23 | 24 | - name: Read the private key 25 | slurp: 26 | src: "/home/{{ ansible_user }}/.ssh/id_rsa_wire" 27 | register: ssh_key_private 28 | 29 | - name: Read the public key content 30 | slurp: 31 | src: "/home/{{ ansible_user }}/.ssh/id_rsa_wire.pub" 32 | register: ssh_key_content 33 | 34 | - name: Set the public key as a fact 35 | set_fact: 36 | ssh_public_key: "{{ ssh_key_content['content'] | b64decode }}" 37 | 38 | - name: Set the private key as a fact 39 | set_fact: 40 | ssh_private_key: "{{ ssh_key_private['content'] | b64decode }}" 41 | 42 | - name: Add SSH key to the node to use it as a assethost 43 | authorized_key: 44 | user: "{{ ansible_user }}" 45 | state: present 46 | key: "{{ ssh_public_key }}" 47 | -------------------------------------------------------------------------------- /ansible/wiab-demo/verify_dns.yml: -------------------------------------------------------------------------------- 1 | - name: Check DNS A records 2 | hosts: deploy_node 3 | vars: 4 | dns_records: 5 | - sftd.{{ target_domain }} 6 | - nginz-https.{{ target_domain }} 7 | - nginz-ssl.{{ target_domain }} 8 | - webapp.{{ target_domain }} 9 | - assets.{{ target_domain }} 10 | - teams.{{ target_domain }} 11 | - account.{{ target_domain }} 12 | test_port: 3478 13 | tasks: 14 | - name: Check DNS A records 15 | command: "dig +short {{ item }}" 16 | register: dns_result 17 | failed_when: false 18 | changed_when: false 19 | with_items: "{{ dns_records }}" 20 | 21 | - name: Check if DNS records exist 22 | fail: 23 | msg: "DNS record {{ item.item }} does not exist." 24 | when: item.stdout | trim == '' 25 | with_items: "{{ dns_result.results }}" 26 | loop_control: 27 | label: "{% if item.stdout is defined %}{{ item.item }}{% else %}checking{% endif %}" 28 | 29 | # create dns entries on the deploy_node in /etc/hosts and suggest dns records creation in case of a private ip 30 | -------------------------------------------------------------------------------- /bin/accept-invitation.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This is a temporary script to accept team invitations if team settings 4 | # are not available; if team settings is made available, then you do not 5 | # need this script at all! 6 | 7 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.." 8 | 9 | display_usage() { 10 | echo "Usage: accept-invitation " 11 | echo " name - name of the user to be created" 12 | echo " email - email of the user that was just invited" 13 | echo " invitation-url - paste the url exactly as you received on the email" 14 | exit 1 15 | } 16 | 17 | name="$1" 18 | email="$2" 19 | url="$3" 20 | 21 | if [[ $name == "" || $email == "" || $url == "" ]]; then 22 | display_usage 23 | exit 1 24 | fi 25 | 26 | # This assumes that team_code is the last URL parameter 27 | team_code=$(echo $url | awk -F= '{print $NF}') 28 | 29 | # Prompt for a user password 30 | echo -n "Password for the user (8 to 1024 characters long):" 31 | read -s password 32 | 33 | CURL_OUT=$(curl -i -v -s --show-error \ 34 | -XPOST "$url" \ 35 | -H'Content-type: application/json' \ 36 | -d'{"team_code":"'$team_code'", "email":"'$email'","password":"'$password'","name":"'"$name"'"}') 37 | 38 | echo "$CURL_OUT" 39 | -------------------------------------------------------------------------------- /bin/bootstrap/README.md: -------------------------------------------------------------------------------- 1 | # Instroduction 2 | 3 | This is an experimental easy-to-type bootstrap procedure (from ubuntu to a kubernetes cluster - could be extended, possibly). This downloads a bash script which downloads a docker alternative (since kubespray removes any running docker image during installation), and uses that to run the quay.io/wire/networkless-admin image. Within that image, it creates a host file and uses that to install kubernetes to the host system. 4 | 5 | # Status: experimental, may not work for you 6 | 7 | # Procedure for installing kubernetes on ubuntu 8 | 9 | 1. log onto a server running ubuntu 10 | 2. become root: `sudo su -` 11 | 3. Run this init script: 12 | 13 | ``` 14 | curl -sSfL https://raw.githubusercontent.com/wireapp/wire-server-deploy/develop/bin/bootstrap/init.sh > init.sh && chmod +x init.sh && ./init.sh 15 | ``` 16 | -------------------------------------------------------------------------------- /bin/bootstrap/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # See the README.md file next to this script for more information about this script and how it's used. 4 | 5 | mkdir /opt/admin 6 | cd /opt/admin 7 | mkdir -p ../admin_work_dir && cd ../admin_work_dir 8 | mkdir -p ../dot_ssh 9 | mkdir -p ../dot_kube 10 | 11 | if [[ ! -f /root/.ssh/id_rsa ]]; then 12 | # create ssh key and allow self to ssh in (from a docker image) 13 | ssh-keygen -N '' -f /root/.ssh/id_rsa 14 | cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys 15 | fi 16 | 17 | # copy ssh key 18 | cp ~/.ssh/id_rsa ../dot_ssh/ 19 | 20 | # podman 21 | sudo apt-get update -qq 22 | sudo apt-get install -qq -y software-properties-common uidmap 23 | sudo add-apt-repository -y ppa:projectatomic/ppa 24 | sudo apt-get update -qq 25 | sudo apt-get -qq -y install podman 26 | 27 | curl -sSfL https://raw.githubusercontent.com/wireapp/wire-server-deploy/develop/bin/bootstrap/inside.sh > inside.sh 28 | chmod +x inside.sh 29 | 30 | podman run -it --network=host -v $(pwd):/mnt -v $(pwd)/../dot_ssh:/root/.ssh -v $(pwd)/../dot_kube:/root/.kube --entrypoint /mnt/inside.sh quay.io/wire/networkless-admin 31 | -------------------------------------------------------------------------------- /bin/bootstrap/inside.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # See the README.md file next to this script for more information about this script and how it's used. 4 | 5 | set -ex 6 | 7 | # When this script is run the first time, copy files over 8 | if [[ ! -d /mnt/wire-server-deploy ]]; then 9 | cp -a /src/* /mnt 10 | fi 11 | 12 | # run ansible from here. If you make any changes, they will be written to your host file system 13 | # (those files will be owned by root as docker runs as root) 14 | cd /mnt/wire-server-deploy/ansible 15 | 16 | # This code may be brittle... 17 | TARGET_IFACE=$(route | grep default | awk '{print $8}') 18 | TARGET_HOST=$(/sbin/ifconfig "$TARGET_IFACE" | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}') 19 | 20 | if [[ ! -f hosts.ini ]]; then 21 | curl -sSfL https://raw.githubusercontent.com/wireapp/wire-server-deploy/develop/ansible/hosts.example-demo.ini > hosts.example-demo.ini 22 | cp hosts.example-demo.ini hosts.ini 23 | sed -i "s/X.X.X.X/$TARGET_HOST/g" hosts.ini 24 | fi 25 | 26 | ansible-playbook -i hosts.ini kubernetes.yml 27 | 28 | echo "Great, kubernetes is up! Now follow the directions from:" 29 | echo " https://docs.wire.com/how-to/install/helm.html" 30 | -------------------------------------------------------------------------------- /bin/debug_logs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | echo "Printing all pods status" 5 | kubectl get pods --all-namespaces 6 | echo "------------------------------------" 7 | namespaces=$(kubectl get ns -o=jsonpath='{.items[*].metadata.name}') 8 | echo "Namespaces = $namespaces" 9 | for ns in $namespaces; do 10 | pods=$(kubectl get pods --all-namespaces -o=jsonpath='{.items[*].metadata.name}') 11 | echo "Pods in namespace: $ns = $pods" 12 | for pod in $pods; do 13 | echo "Logs for pod: $pod" 14 | kubectl logs --all-containers -n "$ns" "$pod" || true 15 | echo "Description for pod: $pod" 16 | kubectl describe pod -n "$ns" "$pod" || true 17 | echo "------------------------------------" 18 | done 19 | done 20 | -------------------------------------------------------------------------------- /bin/deployment-info.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # disallow unset variables and exit if any command fails 3 | set -eu 4 | 5 | usage="USAGE: $0 " 6 | namespace=${1?$usage} 7 | deployment=${2?$usage} 8 | 9 | wire_server_repo="https://github.com/wireapp/wire-server" 10 | wire_server_deploy_repo="https://github.com/wireapp/wire-server-deploy" 11 | 12 | resource_type=deployment 13 | if [[ "$2" == cannon ]]; then 14 | resource_type=statefulsets 15 | fi 16 | 17 | image=$( 18 | kubectl -n "$namespace" get "$resource_type" "$deployment" -o json | 19 | # Filter out only pod image ids 20 | jq -r '.spec.template.spec.containers[].image' | 21 | # ignore sidecar containers, etc. 22 | grep "/wire/$deployment:" 23 | ) 24 | 25 | # select only docker image tag; not repo 26 | image_tag="image/$(echo "$image" | cut -f2 -d:)" 27 | 28 | wire_server_commit=$( 29 | # get all tags from repo 30 | git ls-remote --tags "$wire_server_repo" | 31 | grep "$image_tag" | 32 | cut -f1 | 33 | tr -d ' \t\n' 34 | ) 35 | 36 | chart_version=$( 37 | helm ls -a | 38 | grep "wire-server" | 39 | cut -f5 | 40 | tr -d ' \t\n' 41 | ) 42 | 43 | wire_server_deploy_commit=$( 44 | git ls-remote --tags "$wire_server_deploy_repo" | 45 | grep "$chart_version" | 46 | cut -f1 | 47 | tr -d ' \t\n' 48 | ) 49 | 50 | # align output nicely 51 | column -t <( 52 | echo -e "docker_image:\t$image" 53 | echo -e "chart_version:\t$chart_version" 54 | echo -e "wire_server_commit:\t$wire_server_commit" 55 | echo -e "wire_server_link:\t$wire_server_repo/releases/tag/$image_tag" 56 | echo -e "wire_server_deploy_commit:\t$wire_server_deploy_commit" 57 | echo -e "wire_server_deploy_link:\t$wire_server_deploy_repo/releases/tag/chart/$chart_version" 58 | ) 59 | -------------------------------------------------------------------------------- /bin/fix_default_router.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | echo " ********************** Editing the configMap for nodelocaldns **********************" 6 | CURRENT_COREFILE=$(kubectl get configmap nodelocaldns -n kube-system -o=jsonpath='{.data.Corefile}') 7 | echo "Current Corefile:" 8 | echo "$CURRENT_COREFILE" 9 | MODIFIED_TEXT=$(echo "$CURRENT_COREFILE" | sed '/forward \. \/etc\/resolv\.conf/d') 10 | echo "Modified Corefile:" 11 | echo "$MODIFIED_TEXT" 12 | kubectl create configmap nodelocaldns -n kube-system --from-literal="Corefile=$MODIFIED_TEXT" --dry-run=client -o yaml | kubectl apply -f - 13 | echo "Printing kubectl describe configMap nodelocaldns -n kube-system after updating" 14 | kubectl describe configMap nodelocaldns -n kube-system 15 | 16 | 17 | echo " ********************** Editing the configMap for coredns **********************" 18 | echo "Printing kubectl describe configMap coredns -n kube-system" 19 | kubectl describe configMap coredns -n kube-system 20 | echo "Updating the configMap coredns -n kube-system" 21 | kubectl get configmap coredns -n kube-system --output yaml > coredns_config.yaml 22 | sed -i coredns_config.yaml -e '/^[ ]*forward.*/{N;N;N;d;}' -e "s/^\([ ]*\)cache/\1forward . 127.0.0.53:9999 {\n\1 max_fails 0\n\1}\n\1cache/" 23 | kubectl apply -f coredns_config.yaml 24 | echo "Printing kubectl get configmap coredns -n kube-system --output yaml after updating" 25 | kubectl get configmap coredns -n kube-system --output yaml 26 | sleep 10 27 | -------------------------------------------------------------------------------- /bin/generate-image-list.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | kubectl get pods --all-namespaces -o jsonpath="{..image}" |\ 4 | tr -s '[[:space:]]' '\n' |\ 5 | sort |\ 6 | uniq 7 | -------------------------------------------------------------------------------- /bin/logging.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | NAMESPACE=${NAMESPACE:-monitoring} 4 | 5 | helm repo update 6 | 7 | helm upgrade --install --namespace "$NAMESPACE" "$NAMESPACE-elasticsearch-ephemeral" wire/elasticsearch-ephemeral 8 | helm upgrade --install --namespace "$NAMESPACE" "$NAMESPACE-fluent-bit" wire/fluent-bit 9 | helm upgrade --install --namespace "$NAMESPACE" "$NAMESPACE-kibana" wire/kibana 10 | helm upgrade --install --namespace "$NAMESPACE" "$NAMESPACE-elasticsearch-curator" wire/elasticsearch-curator 11 | -------------------------------------------------------------------------------- /bin/offline-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | 6 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | 8 | # HACK: hack to stop ssh from idling the connection. Which it will do if there is no output. And ansible is not verbose enough 9 | (while true; do echo "Still deploying..."; sleep 10; done) & 10 | loop_pid=$! 11 | 12 | trap 'kill "$loop_pid"' EXIT 13 | 14 | ZAUTH_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') 15 | export ZAUTH_CONTAINER 16 | 17 | WSD_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') 18 | 19 | ./bin/offline-secrets.sh 20 | 21 | sudo docker run --network=host -v $SSH_AUTH_SOCK:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v $PWD:/wire-server-deploy $WSD_CONTAINER ./bin/offline-cluster.sh 22 | sudo docker run --network=host -v $PWD:/wire-server-deploy $WSD_CONTAINER ./bin/offline-helm.sh 23 | -------------------------------------------------------------------------------- /bin/offline-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 5 | 6 | ZAUTH_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') 7 | export ZAUTH_CONTAINER 8 | 9 | WSD_CONTAINER=$(sudo docker load -i $SCRIPT_DIR/../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') 10 | 11 | alias d="sudo docker run -it --network=host -v ${SSH_AUTH_SOCK:-nonexistent}:/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v $HOME/.ssh:/root/.ssh -v $PWD:/wire-server-deploy $WSD_CONTAINER" 12 | -------------------------------------------------------------------------------- /bin/prod-init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | VALUES_DIR="$( cd "$SCRIPT_DIR/../values" && pwd )" 5 | 6 | init="${VALUES_DIR}/values-init-done" 7 | 8 | if [[ -f $init ]]; then 9 | echo "initialization already done. Not overriding. Exiting." 10 | exit 1 11 | fi 12 | 13 | cp -v $VALUES_DIR/wire-server/{prod-values.example,values}.yaml 14 | cp -v $VALUES_DIR/wire-server/{prod-secrets.example,secrets}.yaml 15 | cp -v $VALUES_DIR/databases-ephemeral/{prod-values.example,values}.yaml 16 | cp -v $VALUES_DIR/fake-aws/{prod-values.example,values}.yaml 17 | cp -v $VALUES_DIR/ingress-nginx-controller/{prod-values.example,values}.yaml 18 | cp -v $VALUES_DIR/nginx-ingress-services/{prod-values.example,values}.yaml 19 | cp -v $VALUES_DIR/nginx-ingress-services/{prod-secrets.example,secrets}.yaml 20 | cp -v $VALUES_DIR/demo-smtp/{prod-values.example,values}.yaml 21 | 22 | #cp "$VALUES_DIR/cassandra-external/{prod-values.example,values}.yaml" 23 | #cp "$VALUES_DIR/minio-external/{prod-values.example,values}.yaml" 24 | #cp "$VALUES_DIR/elasticsearch-external/{prod-values.example,values}.yaml" 25 | 26 | echo "done" > "$init" 27 | -------------------------------------------------------------------------------- /bin/shellcheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | # lint all shell scripts with ShellCheck 6 | # FUTUREWORK: Fix issues of the explicitly (no globbing) excluded files. 7 | 8 | mapfile -t SHELL_FILES_TO_LINT < <( 9 | git ls-files | 10 | grep "\.sh$" | 11 | grep -v "ansible/files/registry/images.sh" | 12 | grep -v "ansible/files/registry/registry-run.sh" | 13 | grep -v "ansible/files/registry/upload_image.sh" | 14 | grep -v "ansible/files/registry/upload_image.sh" | 15 | grep -v "bin/accept-invitation.sh" | 16 | grep -v "bin/bootstrap/init.sh" | 17 | grep -v "bin/demo-setup.sh" | 18 | grep -v "bin/generate-image-list.sh" | 19 | grep -v "bin/offline-cluster.sh" | 20 | grep -v "bin/offline-deploy.sh" | 21 | grep -v "bin/offline-env.sh" | 22 | grep -v "bin/offline-secrets.sh" | 23 | grep -v "bin/prod-init.sh" | 24 | grep -v "bin/prod-setup.sh" | 25 | grep -v "bin/secrets.sh" | 26 | grep -v "bin/test-aws-s3-auth-v4.sh" | 27 | grep -v "examples/team-provisioning-qr-codes/generate-user-pdf.sh" | 28 | grep -v "nix/scripts/create-container-dump.sh" | 29 | grep -v "nix/scripts/list-helm-containers.sh" | 30 | grep -v "offline/cd.sh" 31 | ) 32 | 33 | shellcheck -x "${SHELL_FILES_TO_LINT[@]}" 34 | -------------------------------------------------------------------------------- /bin/wiab-demo/offline-env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | ZAUTH_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/../../containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') 6 | export ZAUTH_CONTAINER 7 | 8 | WSD_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/../../containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') 9 | 10 | alias d="sudo docker run --network=host -ti \ 11 | -v \${SSH_AUTH_SOCK:-nonexistent}:/ssh-agent \ 12 | -e SSH_AUTH_SOCK=/ssh-agent \ 13 | -v \$HOME/.ssh:/root/.ssh \ 14 | -v \$PWD:/wire-server-deploy \ 15 | -v /home/ubuntu/.kube:/root/.kube \ 16 | -v /home/ubuntu/.minikube:/home/ubuntu/.minikube \ 17 | -e KUBECONFIG=/root/.kube/config \ 18 | \$WSD_CONTAINER" 19 | -------------------------------------------------------------------------------- /examples/control-planes-only-k8s/README.md: -------------------------------------------------------------------------------- 1 | EXAMPLE: Control plane only K8s nodes 2 | ===================================== 3 | 4 | This example deploys a group of machines ready to deploy a Kubernetes cluster on top, that 5 | only consists of control plane machines. 6 | 7 | 8 | ### Characteristics: 9 | 10 | * deployed on Hetzner 11 | * no load-balancing 12 | -------------------------------------------------------------------------------- /examples/control-planes-only-k8s/terraform.tfvars: -------------------------------------------------------------------------------- 1 | environment = "CHANGE_ME:generic-name" 2 | 3 | root_domain = "CHANGE_ME:FQDN" 4 | 5 | operator_ssh_public_keys = { 6 | terraform_managed = { 7 | "CHANGE_ME:unique-name" = "CHANGE_ME:key-file-content" 8 | } 9 | preuploaded_key_names = [] 10 | } 11 | 12 | k8s_cluster = { 13 | cloud = "hetzner" 14 | 15 | machine_groups = [ 16 | { 17 | group_name = "cpns" 18 | # NOTE: set to 1 in order to get a single-machine Kubernetes cluster 19 | machine_count = 3 20 | machine_type = "cx21" 21 | component_classes = [ "controlplane", "node" ] 22 | } 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /examples/multi-instance-sft/README.md: -------------------------------------------------------------------------------- 1 | EXAMPLE: SFT server in a blue-green deployment 2 | ============================================== 3 | 4 | This example deploys two groups of SFT servers. 5 | 6 | 7 | ### Characteristics: 8 | 9 | * deployed on Hetzner 10 | -------------------------------------------------------------------------------- /examples/multi-instance-sft/terraform.tfvars: -------------------------------------------------------------------------------- 1 | environment = "CHANGE_ME:generic-name" 2 | 3 | root_domain = "CHANGE_ME:FQDN" 4 | 5 | operator_ssh_public_keys = { 6 | terraform_managed = { 7 | "CHANGE_ME:unique-name" = "CHANGE_ME:key-file-content" 8 | } 9 | preuploaded_key_names = [] 10 | } 11 | 12 | sft_server_names_blue = ["1", "2", "3"] 13 | sft_server_type_blue = "cx31" 14 | sft_server_names_green = ["4", "5", "6"] 15 | sft_server_type_green = "cx31" 16 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/README.md: -------------------------------------------------------------------------------- 1 | EXAMPLE: Multiple K8s nodes 2 | =========================== 3 | 4 | This example deploys Wire on a multi-node Kubernetes cluster with one control plane machine. 5 | 6 | 7 | ### Characteristics: 8 | 9 | * Helm values serve a demonstration purpose only 10 | * 11 | * DNS for all Wire services 12 | * Kubernetes deployed on Hetzner 13 | * Cert-Manager 14 | * Network Load Balancer 15 | * Ephemeral Databases running on Kubernetes 16 | * on-board Mail server 17 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/backend.tfvars: -------------------------------------------------------------------------------- 1 | bucket = "CHANGE_ME:bucket-name" 2 | key = "CHANGE_ME:path/in/bucket/to/terraform.tfstate" 3 | region = "CHANGE_ME:AWS-region" 4 | dynamodb_table = "CHANGE_ME:shared-state-lock" 5 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/helm_vars/demo-smtp/values.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-PROD: This is often a good default when using calico's default CIDR 2 | # https://github.com/kubernetes-sigs/kubespray/blob/master/docs/calico.md#optional--define-the-default-pool-cidr 3 | # or flannel's https://github.com/kubernetes-sigs/kubespray/blob/master/docs/flannel.md#flannel 4 | # If you override those values, etc., then verify that this CIDR still makes sense 5 | # For all variables the "ixdotai/smtp" image supports see: https://github.com/ix-ai/smtp#readme 6 | envVars: 7 | RELAY_NETWORKS: ":10.233.0.0/16" 8 | # 9 | # PORT: "25" 10 | # NET_DEV: eth0 11 | # OTHER_HOSTNAMES: other.example.com 12 | # DISABLE_IPV6: 1 13 | # BIND_IP: 0.0.0.0 14 | # BIND_IP6: ::0 15 | # MAILNAME: mail.example.com 16 | # DKIM_KEY_PATH: /etc/exim4/dkim.key 17 | # KEY_PATH: /path/to/key.crt 18 | # CERTIFICATE_PATH: /path/to/certificate.crt 19 | # SMARTHOST_ADDRESS: mail.example.com 20 | # SMARTHOST_PORT: "587" 21 | # SMARTHOST_USER: exampleuser 22 | # SMARTHOST_PASSWORD: secret 23 | # SMARTHOST_ALIASES: "*.example.com" 24 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/helm_vars/nginx-ingress-services/values.yaml: -------------------------------------------------------------------------------- 1 | teamSettings: 2 | enabled: false 3 | accountPages: 4 | enabled: true 5 | tls: 6 | enabled: true 7 | useCertManager: true 8 | 9 | certManager: 10 | # CHANGE_ME:to-get-valid-cert 11 | # inTestMode: true 12 | certmasterEmail: "CHANGE_ME:valid-email-address" 13 | 14 | # NOTE: corresponds to ./../../terraform.tfvars 15 | config: 16 | dns: 17 | https: nginz-https.CHANGE_ME:generic-name.CHANGE_ME:FQDN 18 | ssl: nginz-ssl.CHANGE_ME:generic-name.CHANGE_ME:FQDN 19 | webapp: webapp.CHANGE_ME:generic-name.CHANGE_ME:FQDN 20 | fakeS3: assets.CHANGE_ME:generic-name.CHANGE_ME:FQDN 21 | teamSettings: teams.CHANGE_ME:generic-name.CHANGE_ME:FQDN 22 | accountPages: account.CHANGE_ME:generic-name.CHANGE_ME:FQDN 23 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/helmfile.yaml: -------------------------------------------------------------------------------- 1 | helmDefaults: 2 | wait: true 3 | timeout: 600 4 | devel: true 5 | 6 | repositories: 7 | - name: wire 8 | url: 'https://s3-eu-west-1.amazonaws.com/public.wire.com/charts' 9 | - name: wire-develop 10 | url: 'https://s3-eu-west-1.amazonaws.com/public.wire.com/charts-develop' 11 | - name: jetstack 12 | url: 'https://charts.jetstack.io' 13 | 14 | releases: 15 | - name: 'fake-aws' 16 | namespace: 'wire' 17 | chart: 'wire/fake-aws' 18 | version: 'CHANGE_ME' 19 | 20 | - name: 'databases-ephemeral' 21 | namespace: 'wire' 22 | chart: 'wire/databases-ephemeral' 23 | version: 'CHANGE_ME' 24 | 25 | - name: 'demo-smtp' 26 | namespace: 'wire' 27 | chart: 'wire/demo-smtp' 28 | version: 'CHANGE_ME' 29 | values: 30 | - './helm_vars/demo-smtp/values.yaml' 31 | 32 | - name: 'cert-manager' 33 | namespace: 'cert-manager' 34 | chart: 'jetstack/cert-manager' 35 | version: '1.5.2' 36 | set: 37 | - name: installCRDs 38 | value: true 39 | 40 | - name: 'wire-server' 41 | namespace: 'wire' 42 | chart: 'wire/wire-server' 43 | version: 'CHANGE_ME' 44 | values: 45 | - './helm_vars/wire-server/values.yaml' 46 | secrets: 47 | - './helm_vars/wire-server/secrets.yaml' 48 | 49 | - name: 'ingress-nginx-controller' 50 | namespace: 'wire' 51 | chart: 'wire/ingress-nginx-controller' 52 | version: 'CHANGE_ME' 53 | 54 | - name: 'nginx-ingress-services' 55 | namespace: 'wire' 56 | chart: 'wire/nginx-ingress-services' 57 | version: 'CHANGE_ME' 58 | values: 59 | - './helm_vars/nginx-ingress-services/values.yaml' 60 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/inventory/inventory.yml: -------------------------------------------------------------------------------- 1 | all: 2 | vars: 3 | root_domain: 'CHANGE_ME' 4 | environment_name: 'CHANGE_ME' 5 | 6 | ansible_ssh_user: 'CHANGE_ME' 7 | 8 | 9 | k8s-cluster: 10 | vars: 11 | kube_version: 'CHANGE_ME' 12 | 13 | container_manager: 'CHANGE_ME' 14 | # NOTE: relax handling a list with more than 3 items 15 | # CHANGE_ME:if-using-docker-and-hetzner 16 | # docker_dns_servers_strict: false 17 | 18 | # NOTE: Make sure that internal kube-apiserver requests are always traveling between cluster machines 19 | # directly, regardless whether an external load balancer exists 20 | # DOCS: https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ha-mode.md 21 | loadbalancer_apiserver_localhost: true 22 | 23 | # NOTE: Necessary for the Hetzner Cloud until Calico v3.17 arrives in Kubespray 24 | calico_mtu: 1450 25 | calico_veth_mtu: 1430 26 | 27 | dashboard_enabled: false 28 | -------------------------------------------------------------------------------- /examples/multi-node-k8s-with-lb-and-dns/terraform.tfvars: -------------------------------------------------------------------------------- 1 | environment = "CHANGE_ME:generic-name" 2 | 3 | root_domain = "CHANGE_ME:FQDN" 4 | # NOTE: corresponds to helm_vars/[wire-server,nginx-ingress-services]/values.yaml 5 | sub_domains = [ 6 | "nginz-https", 7 | "nginz-ssl", 8 | "webapp", 9 | "assets", 10 | "account", 11 | "teams" 12 | ] 13 | create_spf_record = true 14 | 15 | operator_ssh_public_keys = { 16 | terraform_managed = { 17 | "CHANGE_ME:unique-name" = "CHANGE_ME:key-file-content" 18 | } 19 | preuploaded_key_names = [] 20 | } 21 | 22 | k8s_cluster = { 23 | cloud = "hetzner" 24 | 25 | # NOTE: corresponds to wire-server/charts/ingress-nginx-controller/values.yaml#nodePorts 26 | load_balancer_ports = [ 27 | { 28 | name = "http" 29 | protocol = "tcp" 30 | listen = 80 31 | destination = 31772 32 | }, 33 | { 34 | name = "https" 35 | protocol = "tcp" 36 | listen = 443 37 | destination = 31773 38 | } 39 | ] 40 | 41 | machine_groups = [ 42 | { 43 | group_name = "cps" 44 | machine_count = 1 45 | machine_type = "cx21" 46 | component_classes = [ "controlplane" ] 47 | }, 48 | 49 | { 50 | group_name = "nodes" 51 | machine_count = 2 52 | machine_type = "cpx41" 53 | component_classes = [ "node" ] 54 | }, 55 | ] 56 | } 57 | -------------------------------------------------------------------------------- /helm/Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /usr/bin/env bash -eo pipefail 2 | 3 | 4 | # Overwriteable variables 5 | #ENV_DIR 6 | 7 | # Internal variables 8 | ENVIRONMENTS_DIR := $(abspath $(CURDIR)/../../cailleach/environments) 9 | 10 | 11 | 12 | ifndef ENV_DIR 13 | ifndef ENV 14 | $(error please define either ENV or ENV_DIR) 15 | else 16 | ENV_DIR = $(ENVIRONMENTS_DIR)/$(ENV) 17 | endif 18 | endif 19 | 20 | 21 | 22 | ################################### HELM ################################### 23 | 24 | .PHONY: deploy 25 | deploy: check-helm-inputs 26 | KUBECONFIG=$(ENV_DIR)/kubeconfig.dec \ 27 | helmfile \ 28 | --file $(ENV_DIR)/helmfile.yaml \ 29 | sync \ 30 | --concurrency 1 31 | 32 | 33 | 34 | ############################### CREDENTIALS ################################ 35 | 36 | .PHONY: decrypt 37 | decrypt: kubeconfig.dec 38 | 39 | .DELETE_ON_ERROR: $(ENV_DIR)/kubeconfig.dec 40 | .PHONY: kubeconfig.dec 41 | kubeconfig.dec: check-env-dir 42 | @if [ ! -e $(ENV_DIR)/$(basename $(@)) ]; then exit 0; fi 43 | sops -d $(ENV_DIR)/$(basename $(@)) > $(ENV_DIR)/$(@) 44 | chmod 0600 $(ENV_DIR)/$(@) 45 | @test -s $(ENV_DIR)/$(@) || (echo "[ERR] Failed decrypting kubeconfig" && exit 1) 46 | 47 | 48 | 49 | ################################ FAIL-SAFES ################################ 50 | 51 | .PHONY: check-env-dir 52 | check-env-dir: $(ENV_DIR) 53 | $(ENV_DIR): 54 | $(error directory: $(ENV_DIR) must exist) 55 | 56 | 57 | .PHONY: check-helm-inputs 58 | check-helm-inputs: $(ENV_DIR)/kubeconfig.dec 59 | 60 | $(ENV_DIR)/kubeconfig.dec: 61 | $(error please make sure Kubernetes is installed and $(ENV_DIR)/kubeconfig.dec exists) 62 | -------------------------------------------------------------------------------- /nix/docker-alpine.nix: -------------------------------------------------------------------------------- 1 | { 2 | imageName = "alpine"; 3 | imageDigest = "sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300"; 4 | sha256 = "1wmrq8x0l5sjrwlklvfkabmxpn0qphik1gb37i04x8jm8bjiisip"; 5 | finalImageName = "alpine"; 6 | finalImageTag = "3"; 7 | } 8 | -------------------------------------------------------------------------------- /nix/pkgs/helm-mapkubeapis.nix: -------------------------------------------------------------------------------- 1 | { buildGoModule, fetchFromGitHub, lib }: 2 | 3 | buildGoModule rec { 4 | pname = "helm-mapkubeapis"; 5 | # in case you change this version, ensure to set sha256 to empty string, as it will 6 | # otherwise recompile but not actually update the version. Nix is not intuitive 7 | # at all, this sucks! But you've been warned. :) 8 | version = "0.1.0"; 9 | 10 | src = fetchFromGitHub { 11 | owner = "helm"; 12 | repo = pname; 13 | rev = "v${version}"; 14 | sha256 = "sha256-OIom+fMjLkbYXbxCsISuihdr3CWjUnkucTnDfoix9B0="; 15 | }; 16 | 17 | vendorHash = "sha256-jqVzBRlGFhDHaiSF9AArJdt4KRCiUqUuo0CnJUTbSfE="; 18 | 19 | # NOTE: Remove the install and upgrade hooks. 20 | postPatch = '' 21 | sed -i '/^hooks:/,+2 d' plugin.yaml 22 | ''; 23 | 24 | checkPhase = '' 25 | ''; 26 | 27 | postInstall = '' 28 | install -dm755 $out/${pname} 29 | mv $out/bin $out/${pname}/ 30 | install -m644 -Dt $out/${pname}/config/ config/Map.yaml 31 | install -m644 -Dt $out/${pname} plugin.yaml 32 | ''; 33 | 34 | meta = with lib; { 35 | description = "A Helm plugin to map helm release deprecated Kubernetes APIs in-place"; 36 | homepage = "https://github.com/helm/helm-mapkubeapis"; 37 | license = licenses.asl20; 38 | maintainers = with maintainers; [ ]; 39 | }; 40 | } 41 | -------------------------------------------------------------------------------- /nix/pkgs/kubernetes-tools.nix: -------------------------------------------------------------------------------- 1 | { buildGoModule, runtimeShell, fetchFromGitHub, makeWrapper, which, rsync, stdenv, fetchurl }: 2 | 3 | 4 | buildGoModule rec { 5 | pname = "kubernetes"; 6 | version = "1.29.10"; 7 | 8 | src = fetchFromGitHub { 9 | owner = "kubernetes"; 10 | repo = "kubernetes"; 11 | rev = "v${version}"; 12 | hash = "sha256-28cgqn/PRWJyb5uRKW/moX0kCDzEPAxc+YAkZBq/j2U="; 13 | }; 14 | 15 | vendorHash = null; 16 | 17 | doCheck = false; 18 | 19 | nativeBuildInputs = [ makeWrapper which rsync ]; 20 | 21 | outputs = [ "out" ]; 22 | 23 | buildPhase = '' 24 | runHook preBuild 25 | substituteInPlace "hack/update-generated-docs.sh" --replace "make" "make SHELL=${runtimeShell}" 26 | patchShebangs ./hack ./cluster/addons/addon-manager 27 | make "SHELL=${runtimeShell}" "WHAT=cmd/kubeadm cmd/kubectl" 28 | ./hack/update-generated-docs.sh 29 | runHook postBuild 30 | ''; 31 | 32 | installPhase = '' 33 | runHook preInstall 34 | for p in cmd/kubeadm cmd/kubectl; do 35 | install -D _output/local/go/bin/''${p##*/} -t $out/bin 36 | done 37 | 38 | runHook postInstall 39 | ''; 40 | } 41 | -------------------------------------------------------------------------------- /nix/scripts/create-build-entry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eou pipefail 3 | 4 | if [ "$#" -ne 2 ]; then 5 | echo "Usage: $0 " 6 | exit 1 7 | fi 8 | 9 | IMAGE_WITH_TAG=$1 10 | DIRECTORY=$2 11 | 12 | IMAGE=$(echo "$IMAGE_WITH_TAG" | cut -d':' -f1) 13 | TAG=$(echo "$IMAGE_WITH_TAG" | cut -d':' -f2) 14 | 15 | JSON_FILE="$DIRECTORY/images.json" 16 | 17 | if [ ! -d "$DIRECTORY" ]; then 18 | mkdir -p "$DIRECTORY" 19 | fi 20 | 21 | append_image_entry() { 22 | local image=$1 23 | local tag=$2 24 | local json_file=$3 25 | 26 | if [ -f "$json_file" ]; then 27 | existing_content=$(jq '.' "$json_file") 28 | 29 | new_entry=$(jq -n --arg image "$image" --arg tag "$tag" '{$image: $tag}') 30 | updated_content=$(echo "$existing_content" | jq --argjson new_entry "$new_entry" '. += [$new_entry]') 31 | else 32 | updated_content=$(jq -n --arg image "$image" --arg tag "$tag" '[{$image: $tag}]') 33 | fi 34 | 35 | echo "$updated_content" | jq '.' > "$json_file" 36 | } 37 | 38 | append_image_entry "$IMAGE" "$TAG" "$JSON_FILE" 39 | -------------------------------------------------------------------------------- /nix/scripts/create-offline-artifact.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir -p assets assets/containers-{helm,other,system} assets/debs assets/binaries 4 | 5 | mirror-apt-jammy assets/debs 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /nix/scripts/generate-gpg1-key.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eou pipefail 3 | 4 | # This will create a gpg1 private key with uid gpg@wire.com, and output it as 5 | # ascii-armoured to stdout. 6 | 7 | GNUPGHOME=$(mktemp -d) 8 | export GNUPGHOME 9 | trap 'rm -Rf -- "$GNUPGHOME"' EXIT 10 | 11 | # configure gpg to use a custom keyring, because aptly reads from it 12 | gpg="gpg --keyring=$GNUPGHOME/trustedkeys.gpg --no-default-keyring" 13 | 14 | # create a gpg signing key. This is temporary for now, in the future, there 15 | # will be a stable signing key and official releases for this. 16 | cat > "$GNUPGHOME"/keycfg < B[Cailleach pipeline triggered] 11 | B --> C[helm-chart-main pipeline runs] 12 | C --> D[Builds chart manifest in the build.json file and bumps the version in the wire-builds repo] 13 | D --> E[build.json lists all charts] 14 | E --> F[WSD's offline build pipeline runs proc_pull_charts.sh via build.sh] 15 | F --> G[Pipeline downloads chart dependencies & container images] 16 | G --> H[Bundles everything into offline artifacts] 17 | ``` -------------------------------------------------------------------------------- /offline/stackIT-wiab.md: -------------------------------------------------------------------------------- 1 | # StackIT Deployment and Configuration Guide 2 | 3 | This guide has been moved to [Wire-in-a-Box Deployment Guide](./demo-wiab.md#wire-in-a-box-deployment-guide), please find instructions to deploy Wire-in-a-Box (WIAB) using Ansible on a stackIT VM with Ubuntu 24.04 system. 4 | -------------------------------------------------------------------------------- /offline/tasks/build_adminhost_containers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | usage() { 5 | echo "usage: $0 OUTPUT-DIR [--adminhost] [--zauth]" >&2 6 | exit 1 7 | } 8 | 9 | if [[ $# -lt 1 ]]; then 10 | usage 11 | fi 12 | 13 | OUTPUT_DIR="$1" 14 | shift 15 | 16 | ADMINHOST=false 17 | ZAUTH=false 18 | 19 | while [[ $# -gt 0 ]]; do 20 | case $1 in 21 | --adminhost) 22 | ADMINHOST=true 23 | ;; 24 | --zauth) 25 | ZAUTH=true 26 | ;; 27 | *) 28 | usage 29 | ;; 30 | esac 31 | shift 32 | done 33 | 34 | if [ "$ADMINHOST" = false ] && [ "$ZAUTH" = false ]; then 35 | echo "Error: Neither --adminhost nor --zauth option was passed. At least one is required." >&2 36 | usage 37 | fi 38 | 39 | INDEX_FILE="${OUTPUT_DIR}/containers-adminhost/index.txt" 40 | 41 | if [ "$ZAUTH" = true ]; then 42 | echo "Building zauth container image in ${OUTPUT_DIR} ..." 43 | wire_version=$(helm show chart "${OUTPUT_DIR}"/charts/wire-server | yq -r .version) 44 | echo "quay.io/wire/zauth:$wire_version" | create-container-dump "${OUTPUT_DIR}"/containers-adminhost 45 | mv "${OUTPUT_DIR}/containers-adminhost/images.json" "${OUTPUT_DIR}"/versions/containers_adminhost_images.json 46 | fi 47 | 48 | if [ "$ADMINHOST" = true ]; then 49 | echo "Building adminhost container images in ${OUTPUT_DIR} ..." 50 | container_image=$(nix-build --no-out-link -A container) 51 | install -m755 "$container_image" "${OUTPUT_DIR}"/containers-adminhost/container-wire-server-deploy.tgz 52 | echo "container-wire-server-deploy.tgz" >> "${INDEX_FILE}" 53 | fi 54 | -------------------------------------------------------------------------------- /offline/tasks/build_linux_pkgs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | if [[ ! $# -eq 2 ]]; then 5 | echo "usage: $0 OUTPUT-DIR ROOT-DIR" >&2 6 | exit 1 7 | fi 8 | 9 | OUTPUT_DIR="$1" 10 | ROOT_DIR="$2" 11 | 12 | echo "Building Linux packages ${OUTPUT_DIR} ..." 13 | 14 | function write-debian-builds-json() { 15 | 16 | JSON_FILE="${OUTPUT_DIR}/versions/debian-builds.json" 17 | echo "Creating $JSON_FILE" 18 | echo "[]" > "$JSON_FILE" 19 | 20 | find "${OUTPUT_DIR}/debs-jammy/pool/" -type f -name "*.deb" | while read -r pkg; do 21 | pkg_info=$(dpkg-deb --info "$pkg") 22 | name=$(echo "$pkg_info" | awk '/Package:/ {print $2}') 23 | version=$(echo "$pkg_info" | awk '/Version:/ {print $2}') 24 | source=$(echo "$pkg_info" | awk '/Source:/ {print $2}') 25 | jq --arg name "$name" --arg version "$version" --arg source "$source" \ 26 | '. += [{ name: $name, version: $version, source: $source }]' "$JSON_FILE" > "${JSON_FILE}.tmp" && mv "${JSON_FILE}.tmp" "$JSON_FILE" 27 | done 28 | } 29 | 30 | mirror-apt-jammy "${OUTPUT_DIR}"/debs-jammy 31 | write-debian-builds-json 32 | 33 | tar cf "${OUTPUT_DIR}"/debs-jammy.tar -C "${OUTPUT_DIR}" debs-jammy 34 | rm -r "${OUTPUT_DIR}"/debs-jammy 35 | 36 | fingerprint=$(echo "$GPG_PRIVATE_KEY" | gpg --with-colons --import-options show-only --import --fingerprint | awk -F: '$1 == "fpr" {print $10; exit}') 37 | 38 | echo "$fingerprint" 39 | 40 | echo "docker_ubuntu_repo_repokey: '${fingerprint}'" > "${ROOT_DIR}"/ansible/inventory/offline/group_vars/all/key.yml 41 | -------------------------------------------------------------------------------- /offline/tasks/post_chart_process_0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | if [[ ! $# -eq 1 ]]; then 5 | echo "usage: $0 OUTPUT-DIR" >&2 6 | exit 1 7 | fi 8 | 9 | OUTPUT_DIR="$1" 10 | 11 | echo "Running post-chart process script 0 in dir ${OUTPUT_DIR} ..." 12 | 13 | # Undo changes on wire-server values.yaml 14 | sed -i -Ee 's/useSharedFederatorSecret: true/useSharedFederatorSecret: false/' "${OUTPUT_DIR}"/charts/wire-server/charts/federator/values.yaml 15 | sed -i -Ee 's/federation: true/federation: false/' "${OUTPUT_DIR}"/values/wire-server/prod-values.example.yaml 16 | -------------------------------------------------------------------------------- /offline/tasks/pre_chart_process_0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | if [[ ! $# -eq 1 ]]; then 5 | echo "usage: $0 OUTPUT-DIR" >&2 6 | exit 1 7 | fi 8 | 9 | OUTPUT_DIR="$1" 10 | 11 | echo "Running pre-chart process script 0 in dir $OUTPUT_DIR ..." 12 | 13 | # Patch wire-server values.yaml to include federator 14 | # This is needed to bundle it's image. 15 | sed -i -Ee 's/federation: false/federation: true/' "${OUTPUT_DIR}"/values/wire-server/prod-values.example.yaml 16 | sed -i -Ee 's/useSharedFederatorSecret: false/useSharedFederatorSecret: true/' "${OUTPUT_DIR}"/charts/wire-server/charts/federator/values.yaml 17 | 18 | # drop step-certificates/.../test-connection.yaml because it lacks an image tag 19 | # cf. https://github.com/smallstep/helm-charts/pull/196/files 20 | rm -v "${OUTPUT_DIR}"/charts/step-certificates/charts/step-certificates/templates/tests/* 21 | -------------------------------------------------------------------------------- /offline/tasks/pre_clean_values_0.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x -euo pipefail 3 | 4 | # Default exclude list 5 | VALUES_DIR="" 6 | HELM_CHART_EXCLUDE_LIST="inbucket,wire-server-enterprise" 7 | 8 | # Parse the arguments 9 | for arg in "$@" 10 | do 11 | case $arg in 12 | VALUES_DIR=*) 13 | VALUES_DIR="${arg#*=}" 14 | ;; 15 | HELM_CHART_EXCLUDE_LIST=*) 16 | HELM_CHART_EXCLUDE_LIST="${arg#*=}" 17 | ;; 18 | *) 19 | echo "Unknown argument: $arg" >&2 20 | exit 1 21 | ;; 22 | esac 23 | done 24 | 25 | # Check if OUTPUT_DIR is set 26 | if [[ -z "$VALUES_DIR" ]]; then 27 | echo "usage: $0 VALUES_DIR=\"values-dir\" [HELM_CHART_EXCLUDE_LIST=\"chart1,chart2,...\"]" >&2 28 | exit 1 29 | fi 30 | 31 | echo "Running pre-clean values process script 1 in dir $VALUES_DIR ..." 32 | 33 | # Split the HELM_CHART_EXCLUDE_LIST into an array 34 | IFS=',' read -r -a EXCLUDE_ARRAY <<< "$HELM_CHART_EXCLUDE_LIST" 35 | 36 | # Iterate over each chart in the exclude list 37 | for CHART in "${EXCLUDE_ARRAY[@]}"; do 38 | CHART_DIR="$VALUES_DIR/$CHART" 39 | if [[ -d "$CHART_DIR" ]]; then 40 | echo "Removing values directory: $CHART_DIR" 41 | rm -rf "$CHART_DIR" 42 | else 43 | echo "Directory does not exist: $CHART_DIR" 44 | fi 45 | done 46 | -------------------------------------------------------------------------------- /offline/tasks/proc_system_containers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | if [[ ! $# -eq 1 ]]; then 5 | echo "usage: $0 OUTPUT-DIR" >&2 6 | exit 1 7 | fi 8 | 9 | OUTPUT_DIR="$1" 10 | 11 | echo "Creating system containers tarball ${OUTPUT_DIR} ..." 12 | 13 | function list-system-containers() { 14 | # These are manually updated with values from 15 | # https://github.com/kubernetes-sigs/kubespray/blob/release-2.24/roles/kubespray-defaults/defaults/main/download.yml 16 | # TODO: Automate this. This is very wieldy :) 17 | cat <&2 6 | exit 1 7 | fi 8 | 9 | OUTPUT_DIR="$1" 10 | ROOT_DIR="$2" 11 | 12 | echo "Processing wire binaries ${OUTPUT_DIR} ..." 13 | 14 | install -m755 "$(nix-build --no-out-link -A pkgs.wire-binaries)/"* "${OUTPUT_DIR}"/binaries/ 15 | 16 | tar cf "${OUTPUT_DIR}"/binaries.tar -C "${OUTPUT_DIR}" binaries 17 | rm -r "${OUTPUT_DIR}"/binaries 18 | 19 | function write_wire_binaries_json() { 20 | temp_dir=$(mktemp -d -p "${OUTPUT_DIR}") 21 | 22 | # "Get" all the binaries from the .nix file 23 | sed -n '/_version/p' "${ROOT_DIR}/nix/pkgs/wire-binaries.nix" | grep -v '\.version' | grep -v 'url' > "${temp_dir}/wire-binaries.json.tmp" 24 | 25 | echo "[" > "${temp_dir}/wire-binaries.json.formatted" 26 | # Format it into JSON 27 | sed -E '/\.url|\.version/!s/([a-z_]+)_version = "(.*)";/{\n "\1": { "version": "\2" }\n},/' "${temp_dir}/wire-binaries.json.tmp" >> "${temp_dir}/wire-binaries.json.formatted" 28 | # remove trailing comma -.- 29 | sed -i '$ s/,$//' "${temp_dir}/wire-binaries.json.formatted" 30 | 31 | echo "]" >> "${temp_dir}/wire-binaries.json.formatted" 32 | 33 | echo "Writing wire binaries into ${OUTPUT_DIR}/versions/wire-binaries.json" 34 | mv "${temp_dir}/wire-binaries.json.formatted" "${OUTPUT_DIR}/versions/wire-binaries.json" 35 | rm -rf "${temp_dir}" 36 | } 37 | 38 | write_wire_binaries_json 39 | -------------------------------------------------------------------------------- /offline/tasks/process_charts.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | OUTPUT_DIR="" 5 | # Default exclude list 6 | IMAGE_EXCLUDE_LIST="" 7 | 8 | # Parse the arguments 9 | for arg in "$@" 10 | do 11 | case $arg in 12 | OUTPUT_DIR=*) 13 | OUTPUT_DIR="${arg#*=}" 14 | ;; 15 | IMAGE_EXCLUDE_LIST=*) 16 | IMAGE_EXCLUDE_LIST="${arg#*=}" 17 | ;; 18 | *) 19 | echo "Unknown argument: $arg" >&2 20 | exit 1 21 | ;; 22 | esac 23 | done 24 | 25 | # Check if OUTPUT_DIR is set 26 | if [[ -z "$OUTPUT_DIR" ]]; then 27 | echo "usage: $0 OUTPUT_DIR=\"output-dir\" [IMAGE_EXCLUDE_LIST=\"image1\|image2...\"]" >&2 28 | exit 1 29 | fi 30 | 31 | echo "Processing Helm charts in ${OUTPUT_DIR}" 32 | 33 | HELM_IMAGE_TREE_FILE="${OUTPUT_DIR}/versions/helm_image_tree.json" 34 | 35 | # Check if IMAGE_EXCLUDE_LIST is set, otherwise use a default pattern that matches nothing 36 | EXCLUDE_PATTERN=${IMAGE_EXCLUDE_LIST:-".^"} 37 | 38 | echo "Excluding images matching the pattern: $EXCLUDE_PATTERN" 39 | 40 | # Get and dump required containers from Helm charts. Omit integration test 41 | # containers (e.g. `quay.io_wire_galley-integration_4.22.0`.) 42 | for chartPath in "${OUTPUT_DIR}"/charts/*; do 43 | echo "$chartPath" 44 | done | list-helm-containers VALUES_DIR="${OUTPUT_DIR}"/values HELM_IMAGE_TREE_FILE="$HELM_IMAGE_TREE_FILE" | grep -v "\-integration:" > "${OUTPUT_DIR}"/images 45 | 46 | # Omit integration test 47 | # containers (e.g. `quay.io_wire_galley-integration_4.22.0`.) 48 | sed -i '/-integration/d' "${HELM_IMAGE_TREE_FILE}" 49 | 50 | grep -vE "$EXCLUDE_PATTERN" "${OUTPUT_DIR}"/images | create-container-dump "${OUTPUT_DIR}"/containers-helm 51 | 52 | tar cf "${OUTPUT_DIR}"/containers-helm.tar -C "${OUTPUT_DIR}" containers-helm 53 | mv "${OUTPUT_DIR}/containers-helm/images.json" "${OUTPUT_DIR}"/versions/containers_helm_images.json -------------------------------------------------------------------------------- /terraform/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | -------------------------------------------------------------------------------- /terraform/environment/Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /usr/bin/env bash 2 | ROOT_DIR := ${shell dirname ${realpath ${firstword ${MAKEFILE_LIST}}}} 3 | TOK = hcloud-token 4 | SSH = operator-ssh 5 | 6 | # Please ignore the following line if you're not a wire employee 7 | CAILLEACH_DIR:=${abspath ${ROOT_DIR}/../../../cailleach} 8 | 9 | export TF_DATA_DIR=${ENV_DIR}/.terraform 10 | 11 | .PHONY: init 12 | init: check-env 13 | terraform init -backend-config=${ENV_DIR}/backend.tfvars 14 | 15 | .PHONY: output 16 | output: check-env 17 | terraform output -json 18 | 19 | .PHONY: force-unlock 20 | force-unlock: check-env 21 | ifndef LOCK_ID 22 | ${error please define LOCK_ID} 23 | endif 24 | terraform force-unlock ${LOCK_ID} ${ROOT_DIR} 25 | 26 | .PHONY: create-inventory 27 | create-inventory: check-env 28 | mkdir -p ${ENV_DIR}/gen && \ 29 | terraform output -json inventory > ${ENV_DIR}/gen/terraform-inventory.yml 30 | 31 | .PHONY: apply plan console destroy 32 | apply plan console destroy: check-env 33 | source ${ENV_DIR}/hcloud-token.dec && \ 34 | terraform $@ -var-file=${ENV_DIR}/terraform.tfvars 35 | 36 | .PHONY: check-env 37 | check-env: 38 | ifndef ENV_DIR 39 | ifndef ENV 40 | ${error please define either ENV or ENV_DIR} 41 | else 42 | ENV_DIR=${CAILLEACH_DIR}/environments/${ENV} 43 | endif 44 | endif 45 | 46 | .PHONY: decrypt 47 | decrypt: ${ENV_DIR}/${TOK}.dec ${ENV_DIR}/${SSH}.dec 48 | 49 | ${ENV_DIR}/${TOK}.dec: check-env 50 | echo ${ENV_DIR}/${TOK}.dec 51 | sops -d ${ENV_DIR}/${TOK} > ${ENV_DIR}/${TOK}.dec 52 | 53 | ${ENV_DIR}/${SSH}.dec: check-env 54 | sops -d ${ENV_DIR}/${SSH} > ${ENV_DIR}/${SSH}.dec 55 | chmod 0600 ${ENV_DIR}/${SSH}.dec 56 | 57 | .PHONY: clean 58 | clean: check-env 59 | rm ${ENV_DIR}/*.dec 60 | -------------------------------------------------------------------------------- /terraform/environment/aws.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | default = "eu-central-1" 3 | } 4 | 5 | provider "aws" { 6 | region = var.aws_region 7 | } 8 | -------------------------------------------------------------------------------- /terraform/environment/hcloud.tf: -------------------------------------------------------------------------------- 1 | provider "hcloud" { 2 | # NOTE: You must have a HCLOUD_TOKEN environment variable set! 3 | } 4 | 5 | resource "hcloud_ssh_key" "operator_ssh" { 6 | for_each = var.operator_ssh_public_keys.terraform_managed 7 | name = each.key 8 | public_key = each.value 9 | } 10 | 11 | locals { 12 | hcloud_ssh_keys = concat( 13 | [for key in hcloud_ssh_key.operator_ssh: key.name], 14 | tolist(var.operator_ssh_public_keys.preuploaded_key_names) 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /terraform/environment/hcloud.vars.tf: -------------------------------------------------------------------------------- 1 | variable "hcloud_image" { 2 | default = "ubuntu-22.04" 3 | } 4 | 5 | variable "hcloud_location" { 6 | default = "nbg1" 7 | } 8 | 9 | variable "operator_ssh_public_keys" { 10 | type = object({ 11 | terraform_managed = map(string) # Map of key name to the public key content 12 | preuploaded_key_names = set(string) 13 | }) 14 | validation { 15 | condition = ( 16 | length(var.operator_ssh_public_keys.terraform_managed) > 0 || 17 | length(var.operator_ssh_public_keys.preuploaded_key_names) > 0 18 | ) 19 | error_message = "At least one key must be provided." 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /terraform/environment/inventory.tf: -------------------------------------------------------------------------------- 1 | # Generates an inventory file to be used by ansible. Ideally, we would generate 2 | # this outside terraform using outputs, but it is not possible to use 'terraform 3 | # output' when the init directory is different from the root code directory. 4 | # Terraform Issue: https://github.com/hashicorp/terraform/issues/17300 5 | output "inventory" { 6 | value = merge( 7 | local.sft_inventory, 8 | local.k8s_cluster_inventory 9 | ) 10 | } 11 | -------------------------------------------------------------------------------- /terraform/environment/kubernetes.cluster.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | machines = flatten([ 3 | for g in try(var.k8s_cluster.machine_groups, []) : [ 4 | for mid in range(1, 1 + lookup(g, "machine_count", 1)) : merge( 5 | # NOTE: destruct group configuration and removing 'machine_count' 6 | { for k,v in g : k => v if k != "machine_count" }, 7 | { machine_id = format("%02d", mid) } 8 | ) 9 | ] 10 | ]) 11 | # NOTE: set 'with_load_balancer' to true if not defined but LB ports are defined, thus 'load_balancer' may become optional 12 | load_balancer_is_used = lookup(var.k8s_cluster, "load_balancer", length(lookup(var.k8s_cluster, "load_balancer_ports", [])) > 0) 13 | } 14 | 15 | module "hetzner_k8s_cluster" { 16 | for_each = toset(try(var.k8s_cluster.cloud == "hetzner", false) ? [var.environment] : []) 17 | 18 | source = "./../modules/hetzner-kubernetes" 19 | 20 | cluster_name = each.key 21 | machines = local.machines 22 | ssh_keys = local.hcloud_ssh_keys 23 | with_load_balancer = local.load_balancer_is_used 24 | lb_port_mappings = lookup(var.k8s_cluster, "load_balancer_ports", []) 25 | } 26 | -------------------------------------------------------------------------------- /terraform/environment/kubernetes.cluster.vars.tf: -------------------------------------------------------------------------------- 1 | # FUTUREWORK: replace 'any' by implementing https://www.terraform.io/docs/language/functions/defaults.html 2 | # 3 | variable "k8s_cluster" { 4 | description = "represents Kubernetes cluster" 5 | # type = object({ 6 | # cloud = string 7 | # load_balancer = optional(bool) 8 | # load_balancer_ports = optional(list( 9 | # object({ 10 | # name = string 11 | # protocol = string 12 | # listen = number 13 | # destination = number 14 | # }) 15 | # )) 16 | # machine_groups = list(object({ 17 | # group_name = string 18 | # machine_count = optional(number) 19 | # machine_type = string 20 | # component_classes = list(string) 21 | # volume = optional(object({ 22 | # size = number 23 | # format = optional(string) 24 | # })) 25 | # })) 26 | # }) 27 | type = any 28 | default = {} 29 | } 30 | -------------------------------------------------------------------------------- /terraform/environment/kubernetes.dns.tf: -------------------------------------------------------------------------------- 1 | module "kubernetes-dns-records" { 2 | for_each = toset(var.root_domain != null && length(var.sub_domains) > 0 ? [var.environment] : []) 3 | 4 | source = "../modules/aws-dns-records" 5 | 6 | zone_fqdn = var.root_domain 7 | domain = var.environment 8 | subdomains = var.sub_domains 9 | ips = module.hetzner_k8s_cluster[var.environment].ips 10 | # NOTE: this list could have been generated similar to ./kubernetes.inventory.tf, but 11 | # Terraform thinks differently. While building up the dependency tree, it appears 12 | # that it is not able to see indirect dependencies, e.g. local.cluster_machines. # 13 | # It fails at modules/aws-dns-records/resources.route53.tf resource aws_route53_record.spf.count 14 | # with: 15 | # 16 | # The "count" value depends on resource attributes that cannot be determined until apply 17 | # 18 | # So, in order to work around this, a second output for public node IPs is being introduced. 19 | spf_record_ips = module.hetzner_k8s_cluster[var.environment].node_ips 20 | 21 | srvs = var.srvs 22 | } 23 | -------------------------------------------------------------------------------- /terraform/environment/kubernetes.dns.vars.tf: -------------------------------------------------------------------------------- 1 | variable "root_domain" { 2 | type = string 3 | default = null 4 | } 5 | 6 | variable "sub_domains" { 7 | type = list(string) 8 | default = [] 9 | } 10 | 11 | variable "create_spf_record" { 12 | type = bool 13 | default = false 14 | } 15 | 16 | variable "srvs" { 17 | type = map(list(string)) 18 | default = {} 19 | } 20 | -------------------------------------------------------------------------------- /terraform/environment/kubernetes.inventory.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | cluster_machines = try(module.hetzner_k8s_cluster[var.environment].machines, []) 3 | } 4 | 5 | locals { 6 | k8s_cluster_inventory = length(local.cluster_machines) > 0 ? { 7 | kube-master = { hosts = { for m in local.cluster_machines : m.hostname => {} if contains(m.component_classes, "controlplane" ) } } 8 | kube-node = { hosts = { for m in local.cluster_machines : m.hostname => {} if contains(m.component_classes, "node" ) } } 9 | etcd = { hosts = { for m in local.cluster_machines : m.hostname => {} if contains(keys(m), "etcd_member_name" ) } } 10 | k8s-cluster = { 11 | children = { 12 | kube-master = {} 13 | kube-node = {} 14 | } 15 | hosts = {for m in local.cluster_machines : 16 | m.hostname => merge( 17 | { 18 | ansible_host = m.public_ipv4 19 | ip = m.private_ipv4 20 | }, 21 | contains(keys(m), "etcd_member_name" ) ? { etcd_member_name = m.etcd_member_name } : {} 22 | ) 23 | } 24 | vars = merge( 25 | { 26 | # NOTE: instead of setting static inventory variables here, please consider placing them 27 | # instead in the inventory of the respective environment 28 | }, 29 | local.load_balancer_is_used ? { 30 | apiserver_loadbalancer_domain_name = module.hetzner_k8s_cluster[var.environment].ips[0] 31 | loadbalancer_apiserver = { address = module.hetzner_k8s_cluster[var.environment].ips[0] } 32 | } : tomap({}) 33 | ) 34 | } 35 | } : tomap({}) 36 | } 37 | -------------------------------------------------------------------------------- /terraform/environment/main.vars.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | type = string 3 | } 4 | -------------------------------------------------------------------------------- /terraform/environment/sft.inventory.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | sft_instances_blue = flatten(module.sft[*].sft.instances_blue) 3 | sft_instances_green = flatten(module.sft[*].sft.instances_green) 4 | } 5 | 6 | locals { 7 | sft_inventory = { 8 | sft_servers = { 9 | hosts = { for instance in concat(local.sft_instances_blue, local.sft_instances_green): instance.hostname => { 10 | ansible_host = instance.ipaddress 11 | sft_fqdn = instance.fqdn 12 | srv_announcer_record_target = instance.fqdn 13 | srv_announcer_zone_domain = var.root_domain 14 | srv_announcer_aws_key_id = module.sft[0].sft.aws_key_id 15 | srv_announcer_aws_access_key = module.sft[0].sft.aws_access_key 16 | srv_announcer_aws_region = module.sft[0].sft.aws_region 17 | srv_announcer_record_name = "_sft._tcp.${var.environment}" 18 | ansible_python_interpreter = "/usr/bin/python3" 19 | ansible_ssh_user = "root" 20 | }} 21 | } 22 | sft_servers_blue = { 23 | hosts = { for instance in local.sft_instances_blue : instance.hostname => {} } 24 | } 25 | sft_servers_green = { 26 | hosts = { for instance in local.sft_instances_green : instance.hostname => {} } 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /terraform/environment/sft.tf: -------------------------------------------------------------------------------- 1 | module "sft" { 2 | count = min(1, length(setunion(var.sft_server_names_blue, var.sft_server_names_green))) 3 | 4 | source = "../modules/sft" 5 | root_domain = var.root_domain 6 | environment = var.environment 7 | a_record_ttl = var.sft_a_record_ttl 8 | image = var.hcloud_image 9 | location = var.hcloud_location 10 | ssh_keys = local.hcloud_ssh_keys 11 | server_groups = { 12 | blue = { 13 | server_names = var.sft_server_names_blue 14 | server_type = var.sft_server_type_blue 15 | } 16 | green = { 17 | server_names = var.sft_server_names_green 18 | server_type = var.sft_server_type_green 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /terraform/environment/sft.vars.tf: -------------------------------------------------------------------------------- 1 | variable "sft_server_names_blue" { 2 | type = set(string) 3 | default = [] 4 | } 5 | 6 | variable "sft_server_type_blue" { 7 | type = string 8 | default = "cx11" 9 | } 10 | 11 | variable "sft_server_names_green" { 12 | type = set(string) 13 | default = [] 14 | } 15 | 16 | variable "sft_server_type_green" { 17 | type = string 18 | default = "cx11" 19 | } 20 | 21 | variable "sft_a_record_ttl" { 22 | default = 60 23 | } 24 | -------------------------------------------------------------------------------- /terraform/environment/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | 4 | required_providers { 5 | aws = { 6 | source = "hashicorp/aws" 7 | version = "~> 2.58" 8 | } 9 | hcloud = { 10 | source = "hetznercloud/hcloud" 11 | } 12 | } 13 | 14 | backend s3 { 15 | encrypt = true 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /terraform/examples/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | -------------------------------------------------------------------------------- /terraform/examples/README.md: -------------------------------------------------------------------------------- 1 | # Example terraform scripts 2 | 3 | Adapt to your needs as necessary. 4 | 5 | ## create-infrastructure.tf 6 | This terraform script can be used to create a few virtual machines on the hetzner cloud provider, and generate an inventory file to use with ansible. (see: wire-server-deploy/ansible/ ) 7 | 8 | -------------------------------------------------------------------------------- /terraform/examples/wire-server-deploy-offline-hetzner/.envrc: -------------------------------------------------------------------------------- 1 | [[ -f .envrc.local ]] && source_env .envrc.local 2 | # You can set this in .envrc.local to keep it out of VCS 3 | export HCLOUD_TOKEN 4 | source_up 5 | 6 | -------------------------------------------------------------------------------- /terraform/examples/wire-server-deploy-offline-hetzner/README.md: -------------------------------------------------------------------------------- 1 | # Wire-server-deploy-offline-hetzner 2 | 3 | This environment is set up and destroyed on demand to test our offline story, 4 | and to function as a reference network diagram for an offline deploy 5 | 6 | This is almost identical to the `wire` environment. We should probably reuse some code. 7 | I just needed some boxes to test the offline deploy path on-demand. 8 | -------------------------------------------------------------------------------- /terraform/examples/wire-server-deploy-offline-hetzner/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hcloud = { 4 | source = "hetznercloud/hcloud" 5 | } 6 | } 7 | required_version = "~> 1.1" 8 | } 9 | -------------------------------------------------------------------------------- /terraform/modules/README.md: -------------------------------------------------------------------------------- 1 | # terraform modules 2 | 3 | HERE BE DRAGONS 4 | 5 | These terraform modules are work-in-progress in active development. DO NOT rely on these for a production environment. 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-ami-ubuntu-search/README.md: -------------------------------------------------------------------------------- 1 | # terraform modules 2 | 3 | HERE BE DRAGONS 4 | 5 | These terraform modules are work-in-progress in active development. DO NOT rely on these for a production environment. 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-ami-ubuntu-search/main.tf: -------------------------------------------------------------------------------- 1 | # Finding AMIs: 2 | # https://cloud-images.ubuntu.com/locator/ec2/ 3 | 4 | data "aws_ami" "U18_04_arm64" { 5 | most_recent = true 6 | 7 | filter { 8 | name = "name" 9 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-arm64-server-*"] 10 | } 11 | 12 | filter { 13 | name = "virtualization-type" 14 | values = ["hvm"] 15 | } 16 | 17 | owners = ["099720109477"] # Canonical 18 | } 19 | 20 | data "aws_ami" "U18_04_amd64" { 21 | most_recent = true 22 | 23 | filter { 24 | name = "name" 25 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 26 | } 27 | 28 | filter { 29 | name = "virtualization-type" 30 | values = ["hvm"] 31 | } 32 | 33 | owners = ["099720109477"] # Canonical 34 | } 35 | 36 | -------------------------------------------------------------------------------- /terraform/modules/aws-ami-ubuntu-search/outputs.tf: -------------------------------------------------------------------------------- 1 | output "u18_arm64_ami_id" { 2 | 3 | value = data.aws_ami.U18_04_arm64.id 4 | 5 | } 6 | 7 | output "u18_amd64_ami_id" { 8 | 9 | value = data.aws_ami.U18_04_amd64.id 10 | 11 | } 12 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/data.tf: -------------------------------------------------------------------------------- 1 | # NOTE: obtains region that is set in providers.tf by given variable 2 | # 3 | data "aws_region" "current" {} 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/locals.mailing.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | emailing_enabled = var.enable_email_sending ? 1 : 0 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.mailing.tf: -------------------------------------------------------------------------------- 1 | # Output required to configure wire-server 2 | 3 | output "ses_endpoint" { 4 | value = "https://email.${data.aws_region.current.name}.amazonaws.com" 5 | } 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/outputs.tf: -------------------------------------------------------------------------------- 1 | # Output required to configure wire-server 2 | 3 | output "sqs_endpoint" { 4 | value = "https://sqs.${data.aws_region.current.name}.amazonaws.com" 5 | } 6 | 7 | output "dynamodb_endpoint" { 8 | value = "https://dynamodb.${data.aws_region.current.name}.amazonaws.com" 9 | } 10 | 11 | output "brig_access_key" { 12 | value = aws_iam_access_key.brig.id 13 | } 14 | 15 | output "brig_access_secret" { 16 | value = aws_iam_access_key.brig.secret 17 | sensitive = true 18 | } 19 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.dynamodb.tf: -------------------------------------------------------------------------------- 1 | # FUTUREWORK: Potentially look at autoscaling for dynamoDB 2 | # see: https://www.terraform.io/docs/providers/aws/r/appautoscaling_policy.html 3 | # 4 | resource "aws_dynamodb_table" "prekey_locks" { 5 | name = "${var.environment}-brig-prekey-locks" 6 | billing_mode = "PROVISIONED" 7 | read_capacity = var.prekey_table_read_capacity 8 | write_capacity = var.prekey_table_write_capacity 9 | hash_key = "client" 10 | 11 | attribute { 12 | name = "client" 13 | type = "S" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.mailing.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_user_policy" "allow_brig_to_queue_email_events" { 2 | count = local.emailing_enabled 3 | 4 | name = "${var.environment}-brig-email-events-queue-policy" 5 | user = aws_iam_user.brig.name 6 | 7 | policy = <<-EOP 8 | { 9 | "Version": "2012-10-17", 10 | "Statement": [ 11 | { 12 | "Effect": "Allow", 13 | "Action": [ 14 | "sqs:DeleteMessage", 15 | "sqs:GetQueueUrl", 16 | "sqs:ReceiveMessage" 17 | ], 18 | "Resource": [ 19 | "${aws_sqs_queue.email_events[0].arn}" 20 | ] 21 | } 22 | ] 23 | } 24 | EOP 25 | } 26 | 27 | resource "aws_iam_user_policy" "allow_brig_to_send_emails" { 28 | count = local.emailing_enabled 29 | 30 | name = "${var.environment}-brig-send-emails-policy" 31 | user = aws_iam_user.brig.name 32 | 33 | policy = <<-EOP 34 | { 35 | "Version": "2012-10-17", 36 | "Statement": [ 37 | { 38 | "Effect": "Allow", 39 | "Action": [ 40 | "ses:SendEmail", 41 | "ses:SendRawEmail" 42 | ], 43 | "Resource": [ 44 | "*" 45 | ] 46 | } 47 | ] 48 | } 49 | EOP 50 | } 51 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_user" "brig" { 2 | name = "${var.environment}-brig-full-access" 3 | force_destroy = true 4 | } 5 | 6 | resource "aws_iam_access_key" "brig" { 7 | user = aws_iam_user.brig.name 8 | } 9 | 10 | resource "aws_iam_user_policy" "allow_brig_to_lock_prekeys" { 11 | name = "${var.environment}-brig-prekeys-policy" 12 | user = aws_iam_user.brig.name 13 | 14 | policy = <<-EOP 15 | { 16 | "Version": "2012-10-17", 17 | "Statement": [ 18 | { 19 | "Effect": "Allow", 20 | "Action": [ 21 | "dynamodb:GetItem", 22 | "dynamodb:PutItem", 23 | "dynamodb:DeleteItem" 24 | ], 25 | "Resource": [ 26 | "${aws_dynamodb_table.prekey_locks.arn}" 27 | ] 28 | } 29 | ] 30 | } 31 | EOP 32 | } 33 | 34 | resource "aws_iam_user_policy" "allow_brig_to_queue_internal_events" { 35 | name = "${var.environment}-brig-internal-events-queue-policy" 36 | user = aws_iam_user.brig.name 37 | 38 | policy = <<-EOP 39 | { 40 | "Version": "2012-10-17", 41 | "Statement": [ 42 | { 43 | "Effect": "Allow", 44 | "Action": [ 45 | "sqs:DeleteMessage", 46 | "sqs:GetQueueUrl", 47 | "sqs:ReceiveMessage", 48 | "sqs:SendMessage" 49 | ], 50 | "Resource": [ 51 | "${aws_sqs_queue.internal_events.arn}" 52 | ] 53 | } 54 | ] 55 | } 56 | EOP 57 | } 58 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.ses.mailing.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ses_domain_identity" "brig" { 2 | count = local.emailing_enabled 3 | 4 | domain = var.domain 5 | } 6 | 7 | resource "aws_ses_email_identity" "brig" { 8 | count = local.emailing_enabled 9 | 10 | email = "${var.sender_email_username}@${var.domain}" 11 | } 12 | 13 | resource "aws_ses_domain_dkim" "brig" { 14 | count = local.emailing_enabled 15 | 16 | domain = aws_ses_domain_identity.brig[0].domain 17 | } 18 | 19 | resource "aws_ses_domain_mail_from" "brig" { 20 | count = local.emailing_enabled 21 | 22 | domain = aws_ses_domain_identity.brig[0].domain 23 | mail_from_domain = "${var.from_subdomain}.${var.domain}" 24 | } 25 | 26 | 27 | resource "aws_ses_identity_notification_topic" "bounce" { 28 | count = local.emailing_enabled 29 | 30 | topic_arn = aws_sns_topic.email_notifications[0].arn 31 | notification_type = "Bounce" 32 | identity = aws_ses_email_identity.brig[0].arn 33 | include_original_headers = false 34 | } 35 | 36 | resource "aws_ses_identity_notification_topic" "complaint" { 37 | count = local.emailing_enabled 38 | 39 | topic_arn = aws_sns_topic.email_notifications[0].arn 40 | notification_type = "Complaint" 41 | identity = aws_ses_email_identity.brig[0].arn 42 | include_original_headers = false 43 | } 44 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sns.mailling.tf: -------------------------------------------------------------------------------- 1 | resource "aws_sns_topic" "email_notifications" { 2 | count = local.emailing_enabled 3 | 4 | name = aws_sqs_queue.email_events[0].name 5 | } 6 | 7 | resource "aws_sns_topic_subscription" "notify_via_email" { 8 | count = local.emailing_enabled 9 | 10 | topic_arn = aws_sns_topic.email_notifications[0].arn 11 | protocol = "sqs" 12 | endpoint = aws_sqs_queue.email_events[0].arn 13 | raw_message_delivery = true 14 | } 15 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.mailing.tf: -------------------------------------------------------------------------------- 1 | resource "aws_sqs_queue" "email_events" { 2 | count = local.emailing_enabled 3 | 4 | name = "${var.environment}-brig-email-events" 5 | } 6 | 7 | # Ensure that the SNS topic is allowed to publish messages to the SQS queue 8 | 9 | resource "aws_sqs_queue_policy" "allow_email_notification_events" { 10 | count = local.emailing_enabled 11 | 12 | queue_url = aws_sqs_queue.email_events[0].id 13 | 14 | policy = <<-EOP 15 | { 16 | "Version": "2012-10-17", 17 | "Id": "${aws_sqs_queue.email_events[0].arn}/SQSDefaultPolicy", 18 | "Statement": [ 19 | { 20 | "Effect": "Allow", 21 | "Principal": { 22 | "AWS": "*" 23 | }, 24 | "Action": "SQS:SendMessage", 25 | "Resource": "${aws_sqs_queue.email_events[0].arn}", 26 | "Condition": { 27 | "ArnEquals": { 28 | "aws:SourceArn": "${aws_sns_topic.email_notifications[0].arn}" 29 | } 30 | } 31 | } 32 | ] 33 | } 34 | EOP 35 | } 36 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/resources.sqs.tf: -------------------------------------------------------------------------------- 1 | # Create queues for internal events 2 | 3 | resource "aws_sqs_queue" "internal_events" { 4 | name = "${var.environment}-brig-events-internal" 5 | } 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.mailing.tf: -------------------------------------------------------------------------------- 1 | variable "enable_email_sending" { 2 | type = bool 3 | description = "flag to either hand off email sending to AWS or not" 4 | default = true 5 | } 6 | 7 | # NOTE: setting the default to `null` allows to omit this var when instantiating the module 8 | # while still forcing it to be set, when email sending is enabled 9 | # 10 | variable "zone_id" { 11 | type = string 12 | description = "zone ID defined by a 'aws_route53_zone.zone_id' resource (example: Z12345678SQWERTYU)" 13 | default = null 14 | } 15 | variable "domain" { 16 | type = string 17 | description = "FQDN of the email address that is used in 'From' when sending emails (example: example.com)" 18 | default = null 19 | } 20 | 21 | # As to why configuring a MAIL FROM 22 | # docs: https://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html#mail-from-overview 23 | # 24 | variable "from_subdomain" { 25 | type = string 26 | description = "subdomain that is prepended to domain and used to configue MAIL FROM for mails being sent" 27 | default = "email" 28 | } 29 | 30 | variable "sender_email_username" { 31 | type = string 32 | description = "username of the email address that is used in 'From' when sending emails (default: 'no-reply'; result: 'no-reply@$domain')" 33 | default = "no-reply" 34 | } 35 | -------------------------------------------------------------------------------- /terraform/modules/aws-brig-prekey-lock-event-queue-email-sending/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | description = "defines in which region state and lock are being stored (default: 'eu-central-1')" 4 | default = "eu-central-1" 5 | } 6 | 7 | variable "environment" { 8 | type = string 9 | description = "name of the environment as a scope for the created resources (default: 'dev'; example: 'prod', 'staging')" 10 | default = "dev" 11 | } 12 | 13 | # NOTE: tweak to adjust performance/pricng ratio 14 | # see: https://aws.amazon.com/dynamodb/pricing/provisioned/ 15 | # 16 | variable "prekey_table_read_capacity" { 17 | type = number 18 | description = "defines how many reads/sec allowed on the table (default: '10'; example: '100')" 19 | default = 10 20 | } 21 | variable "prekey_table_write_capacity" { 22 | type = number 23 | description = "defines how many writes/sec allowed on the table (default: '10'; example: '100')" 24 | default = 10 25 | } 26 | -------------------------------------------------------------------------------- /terraform/modules/aws-cargohold-asset-storage/README.md: -------------------------------------------------------------------------------- 1 | Terraform module: Cargohold Asset Storage 2 | ========================================= 3 | 4 | State: __experimental__ 5 | 6 | This module creates an Object Storage on AWS for cargohold to store encrypted assets. 7 | 8 | AWS resources: S3 9 | 10 | 11 | #### Important note 12 | 13 | This module causes Terraform to store sensitive data in the `.tfstate` file. Hence, encrypting the state should be 14 | mandatory. 15 | 16 | 17 | #### TODO 18 | 19 | * [ ] add cloudfront support 20 | 21 | 22 | #### How to use the module 23 | 24 | ```hcl 25 | module "cargohold_asset_storage" { 26 | source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-cargohold-asset-storage?ref=CHANGE-ME" 27 | 28 | environment = "staging" 29 | } 30 | ``` 31 | 32 | Outputs are used in [wire-server chart values](https://github.com/wireapp/wire-server-deploy/blob/a55d17afa5ac2f40bd50c5d0b907f60ac028377a/values/wire-server/prod-values.example.yaml#L95) 33 | -------------------------------------------------------------------------------- /terraform/modules/aws-cargohold-asset-storage/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-cargohold-asset-storage/outputs.tf: -------------------------------------------------------------------------------- 1 | # Output required to configure wire-server 2 | 3 | output "bucket_name" { 4 | value = aws_s3_bucket.asset_storage.bucket 5 | } 6 | 7 | output "bucket_id" { 8 | value = aws_s3_bucket.asset_storage.id 9 | } 10 | 11 | output "s3_endpoint" { 12 | value = "https://s3.${aws_s3_bucket.asset_storage.region}.amazonaws.com" 13 | } 14 | 15 | output "s3_endpoint_id" { 16 | value = aws_vpc_endpoint.s3.id 17 | } 18 | 19 | output "cargohold_access_key" { 20 | value = aws_iam_access_key.cargohold.id 21 | } 22 | 23 | output "cargohold_access_secret" { 24 | value = aws_iam_access_key.cargohold.secret 25 | } 26 | 27 | output "talk_to_S3" { 28 | value = aws_security_group.talk_to_S3.id 29 | } 30 | -------------------------------------------------------------------------------- /terraform/modules/aws-cargohold-asset-storage/resources.s3.tf: -------------------------------------------------------------------------------- 1 | resource "aws_s3_bucket" "asset_storage" { 2 | bucket = "${random_string.bucket.keepers.env}-${random_string.bucket.keepers.name}-cargohold-${random_string.bucket.result}" 3 | acl = "private" 4 | region = var.region 5 | 6 | cors_rule { 7 | allowed_headers = ["*"] 8 | allowed_methods = ["GET", "HEAD"] 9 | allowed_origins = ["*"] 10 | max_age_seconds = 3000 11 | } 12 | 13 | } 14 | 15 | resource "random_string" "bucket" { 16 | length = 8 17 | lower = true 18 | upper = false 19 | number = true 20 | special = false 21 | 22 | keepers = { 23 | env = var.environment 24 | name = var.bucket_name 25 | } 26 | } 27 | 28 | resource "aws_vpc_endpoint" "s3" { 29 | vpc_id = var.vpc_id 30 | service_name = "com.amazonaws.${var.region}.s3" 31 | 32 | tags = { 33 | Environment = var.environment 34 | } 35 | } 36 | 37 | -------------------------------------------------------------------------------- /terraform/modules/aws-cargohold-asset-storage/resources.security_groups.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "talk_to_S3" { 2 | name = "talk_to_S3" 3 | description = "hosts that are allowed to talk to S3." 4 | vpc_id = var.vpc_id 5 | 6 | egress { 7 | description = "" 8 | from_port = 443 9 | to_port = 443 10 | protocol = "tcp" 11 | cidr_blocks = aws_vpc_endpoint.s3.cidr_blocks 12 | } 13 | 14 | tags = { 15 | Name = "talk_to_S3" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /terraform/modules/aws-cargohold-asset-storage/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | description = "defines in which region state and lock are being stored (default: 'eu-central-1')" 4 | default = "eu-central-1" 5 | } 6 | 7 | variable "environment" { 8 | type = string 9 | description = "name of the environment as a scope for the created resources (default: 'dev'; example: 'prod', 'staging')" 10 | default = "dev" 11 | } 12 | 13 | variable "bucket_name" { 14 | type = string 15 | description = "Name of the bucket that cargohold uses to store files (default: 'assets'; prefix: $environment) " 16 | default = "assets" 17 | } 18 | 19 | variable "vpc_id" { 20 | type = string 21 | description = "the ID of the VPC to add an S3 endpoint to" 22 | } 23 | 24 | 25 | -------------------------------------------------------------------------------- /terraform/modules/aws-dns-records/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "rz" { 2 | name = "${var.zone_fqdn}." 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-dns-records/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_suffix = concat( 3 | var.domain != null ? [var.domain] : [], 4 | [var.zone_fqdn] 5 | ) 6 | } 7 | -------------------------------------------------------------------------------- /terraform/modules/aws-dns-records/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-dns-records/outputs.tf: -------------------------------------------------------------------------------- 1 | output "fqdns" { 2 | value = concat( 3 | [for record in aws_route53_record.a : record.fqdn], 4 | [for record in aws_route53_record.cname : record.fqdn] 5 | ) 6 | } 7 | -------------------------------------------------------------------------------- /terraform/modules/aws-dns-records/resources.route53.tf: -------------------------------------------------------------------------------- 1 | resource "aws_route53_record" "a" { 2 | for_each = toset(length(var.ips) > 0 ? var.subdomains : []) 3 | 4 | zone_id = data.aws_route53_zone.rz.zone_id 5 | name = join(".", concat([each.value], local.name_suffix)) 6 | type = "A" 7 | ttl = var.ttl 8 | records = var.ips 9 | } 10 | 11 | 12 | resource "aws_route53_record" "cname" { 13 | for_each = toset(length(var.cnames) > 0 ? var.subdomains : []) 14 | 15 | zone_id = data.aws_route53_zone.rz.zone_id 16 | name = join(".", concat([each.value], local.name_suffix)) 17 | type = "CNAME" 18 | ttl = var.ttl 19 | records = var.cnames 20 | } 21 | 22 | resource "aws_route53_record" "spf" { 23 | count = length(var.spf_record_ips) > 0 ? 1 : 0 24 | 25 | zone_id = data.aws_route53_zone.rz.zone_id 26 | name = join(".", local.name_suffix) 27 | type = "TXT" 28 | ttl = "60" 29 | records = [ 30 | join(" ", concat( 31 | ["v=spf1"], 32 | [for ip in var.spf_record_ips : "ip4:${ip}"], 33 | ["-all"] 34 | )) 35 | ] 36 | } 37 | 38 | resource "aws_route53_record" "srv-server" { 39 | for_each = var.srvs 40 | 41 | zone_id = data.aws_route53_zone.rz.zone_id 42 | name = join(".", concat([each.key], local.name_suffix)) 43 | type = "SRV" 44 | ttl = "60" 45 | 46 | records = [for t in each.value : join(".", concat([t], local.name_suffix))] 47 | } 48 | -------------------------------------------------------------------------------- /terraform/modules/aws-dns-records/variables.tf: -------------------------------------------------------------------------------- 1 | variable "zone_fqdn" { 2 | type = string 3 | description = "FQDN of the DNS zone root (required; example: example.com; will append: '.')" 4 | } 5 | 6 | variable "domain" { 7 | type = string 8 | description = "name of the sub-tree all given subdomains are append to (default: not set; example: $subdomains[0].$domain.$zone_fqdn)" 9 | default = null 10 | } 11 | 12 | variable "subdomains" { 13 | type = list(string) 14 | description = "list of sub-domains that will be registered directly under the given zone or otherwise under domain if defined" 15 | } 16 | 17 | variable "ips" { 18 | type = list(string) 19 | description = "a list of IPs used to create A records for the given list of subdomains" 20 | default = [] 21 | } 22 | 23 | variable "cnames" { 24 | type = list(string) 25 | description = "a list of FQDNs used to create CNAME records for the given list of subdomains" 26 | default = [] 27 | } 28 | 29 | variable "ttl" { 30 | type = number 31 | description = "time to live for the DNS entries (defaults to 1 minute)" 32 | default = 60 33 | } 34 | 35 | variable "spf_record_ips" { 36 | type = list(string) 37 | description = "list of IPs converted into a list of 'ip4' mechanisms" 38 | default = [] 39 | } 40 | 41 | variable "srvs" { 42 | type = map(list(string)) 43 | description = "map of SRV records and their list of targets. All strings (record and targets) get an automatic suffix of '.domain.zone_fqdn'. See module README for an example." 44 | default = {} 45 | } 46 | -------------------------------------------------------------------------------- /terraform/modules/aws-network-load-balancer/data.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wireapp/wire-server-deploy/6f8cd01c8e13b83ef452e7e156714976b5af2f2b/terraform/modules/aws-network-load-balancer/data.tf -------------------------------------------------------------------------------- /terraform/modules/aws-network-load-balancer/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-network-load-balancer/outputs.tf: -------------------------------------------------------------------------------- 1 | output "fqdn" { 2 | value = aws_lb.nlb.dns_name 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-network-load-balancer/variables.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | type = string 3 | description = "name of the environment as a scope for the created resources (default: 'dev'; example: 'prod', 'staging')" 4 | default = "dev" 5 | } 6 | 7 | variable "node_port_http" { 8 | type = number 9 | description = "HTTP port on the target machines that the LB forwards ingress from port 80 to" 10 | default = 8080 11 | } 12 | 13 | variable "node_port_https" { 14 | type = number 15 | description = "HTTPS port on the target machines that the LB forwards ingress from port 443 to" 16 | default = 8443 17 | } 18 | 19 | variable "node_ips" { 20 | type = list(string) 21 | description = "a list of private IPs from all nodes the load balancer forwards traffic to" 22 | } 23 | 24 | variable "subnet_ids" { 25 | type = list(string) 26 | description = "a list of IDs from subnets where the nodes are part of, and the load balancer egress is attached to" 27 | } 28 | 29 | variable "aws_vpc_id" { 30 | type = string 31 | description = "the ID of the VPC we are adding our targets to." 32 | } 33 | -------------------------------------------------------------------------------- /terraform/modules/aws-terraform-state-share/README.md: -------------------------------------------------------------------------------- 1 | Terraform module: Terraform state facility 2 | ========================================== 3 | 4 | Ensures the existence of locations in which a Terraform state is stored and locked. It allows working in a collaborative 5 | and distributed fashion on any kind of Terraform code. 6 | 7 | It should be a one-time setup that doesn't need to be touched. 8 | 9 | It makes use of the following AWS services: 10 | 11 | * S3 bucket (Object Storage) 12 | * DynamoDB (document-based Database) 13 | 14 | The module can be used in the following way 15 | ``` 16 | module "initiate-tf-state-sharing" { 17 | source = "github.com/wireapp/wire-server-deploy.git//terraform/modules/aws-terraform-state-share" 18 | bucket_name = "myBucketName" 19 | table_name = "myTableName" 20 | } 21 | ``` 22 | 23 | In order to destroy the previously created instance, one can 24 | 25 | * use the AWS web console 26 | * needs to import the existing state before, like 27 | ``` 28 | terraform import \ 29 | -var 'bucket_name=${myStateBucketName}' \ 30 | -var 'table_name=${myStateLockTableName}' \ 31 | module.${myModuleInstanceName}.aws_s3_bucket.terraform-state-storage \ 32 | ${myStateBucketName} 33 | ``` 34 | 35 | More documentation here: 36 | 37 | * https://medium.com/@jessgreb01/how-to-terraform-locking-state-in-s3-2dc9a5665cb6 38 | * https://www.terraform.io/docs/backends/types/s3.html 39 | -------------------------------------------------------------------------------- /terraform/modules/aws-terraform-state-share/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/aws-terraform-state-share/resources.tf: -------------------------------------------------------------------------------- 1 | # create an S3 bucket to store the state file in 2 | resource "aws_s3_bucket" "terraform-state-storage" { 3 | bucket = var.bucket_name 4 | 5 | versioning { 6 | enabled = true 7 | } 8 | 9 | lifecycle { 10 | prevent_destroy = true 11 | } 12 | 13 | tags = { 14 | Name = "S3 Remote Terraform State Store" 15 | } 16 | } 17 | 18 | # create a dynamodb table for locking the state file 19 | resource "aws_dynamodb_table" "terraform-state-lock" { 20 | name = var.table_name 21 | hash_key = "LockID" 22 | read_capacity = 1 23 | write_capacity = 1 24 | 25 | attribute { 26 | name = "LockID" 27 | type = "S" 28 | } 29 | 30 | tags = { 31 | Name = "DynamoDB Terraform State Lock Table" 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /terraform/modules/aws-terraform-state-share/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | default = "eu-central-1" 4 | description = "defines in which region state and lock are being stored" 5 | } 6 | 7 | variable "bucket_name" { 8 | type = string 9 | description = "the name of the bucket, which needs to be globally unique" 10 | } 11 | 12 | variable "table_name" { 13 | type = string 14 | description = "name of the DynamoDB table which holds the lock to accessing the terraform state" 15 | } 16 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc-security-groups/README.md: -------------------------------------------------------------------------------- 1 | # terraform modules 2 | 3 | HERE BE DRAGONS 4 | 5 | These terraform modules are work-in-progress in active development. DO NOT rely on these for a production environment. 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc-security-groups/outputs.tf: -------------------------------------------------------------------------------- 1 | # the world can ssh to this instance. 2 | output "world_ssh_in" { 3 | value = aws_security_group.world_ssh_in.id 4 | } 5 | 6 | # this instance can SSH into other boxes in the VPC. 7 | output "ssh_from" { 8 | value = aws_security_group.ssh_from.id 9 | } 10 | 11 | # apply to boxes you want "ssh_from" hosts to be able to talk to. 12 | output "has_ssh" { 13 | value = aws_security_group.has_ssh.id 14 | } 15 | 16 | output "world_web_out" { 17 | value = aws_security_group.world_web_out.id 18 | } 19 | 20 | output "talk_to_assets" { 21 | value = aws_security_group.talk_to_assets.id 22 | } 23 | 24 | output "has_assets" { 25 | value = aws_security_group.has_assets.id 26 | } 27 | 28 | output "talk_to_stateful" { 29 | value = aws_security_group.talk_to_stateful.id 30 | } 31 | 32 | output "stateful_node" { 33 | value = aws_security_group.stateful_node.id 34 | } 35 | 36 | output "stateful_private" { 37 | value = aws_security_group.stateful_private.id 38 | } 39 | 40 | output "talk_to_k8s" { 41 | value = aws_security_group.talk_to_k8s.id 42 | } 43 | 44 | output "k8s_private" { 45 | value = aws_security_group.k8s_private.id 46 | } 47 | 48 | output "k8s_node" { 49 | value = aws_security_group.k8s_node.id 50 | } 51 | 52 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc-security-groups/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_id" { 2 | type = string 3 | description = "ID of VPC these security groups are for." 4 | } 5 | 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc/README.md: -------------------------------------------------------------------------------- 1 | # terraform modules 2 | 3 | HERE BE DRAGONS 4 | 5 | These terraform modules are work-in-progress in active development. DO NOT rely on these for a production environment. 6 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.1" 3 | } 4 | 5 | # In AWS, (eu-central-1) 6 | provider "aws" { 7 | region = "eu-central-1" 8 | } 9 | 10 | module "vpc" { 11 | source = "github.com/terraform-aws-modules/terraform-aws-vpc?ref=v2.33.0" 12 | 13 | name = var.name 14 | 15 | cidr = "172.17.0.0/20" 16 | 17 | azs = ["eu-central-1a", "eu-central-1b", "eu-central-1c"] 18 | private_subnets = ["172.17.0.0/22", "172.17.4.0/22", "172.17.8.0/22"] 19 | public_subnets = ["172.17.12.0/24", "172.17.13.0/24", "172.17.14.0/24"] 20 | 21 | enable_dns_hostnames = false 22 | enable_dns_support = true 23 | 24 | # In case we run terraform from within the environment. 25 | # VPC endpoint for DynamoDB 26 | enable_dynamodb_endpoint = true 27 | 28 | enable_nat_gateway = true 29 | one_nat_gateway_per_az = false 30 | # Use this only in productionish environments. 31 | # one_nat_gateway_per_az = true 32 | 33 | tags = { 34 | Owner = "Backend Team" 35 | Environment = var.environment 36 | } 37 | vpc_tags = { 38 | Owner = "Backend Team" 39 | Name = var.name 40 | } 41 | private_subnet_tags = { 42 | Routability = "private" 43 | } 44 | public_subnet_tags = { 45 | Routability = "public" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = module.vpc.vpc_id 3 | } 4 | 5 | output "public_subnets" { 6 | value = module.vpc.public_subnets 7 | } 8 | 9 | output "private_subnets" { 10 | value = module.vpc.private_subnets 11 | } 12 | 13 | output "private_route_table_ids" { 14 | value = module.vpc.private_route_table_ids 15 | } 16 | -------------------------------------------------------------------------------- /terraform/modules/aws-vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "VPC name as appearing in AWS" 4 | } 5 | 6 | variable "environment" { 7 | type = string 8 | description = "Environment name, as appears in the environment definition" 9 | default = "dev" 10 | } 11 | 12 | variable "dhcp_options_domain_name" { 13 | type = string 14 | description = "the default domain given to hosts in this VPC by the AWS DHCP servers" 15 | default = "internal.vpc" 16 | } 17 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/load-balancer.locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | LB_PORT_MAPPINGS = [ 3 | { 4 | name = "http" 5 | protocol = "tcp" 6 | listen = 80 7 | destination = 8080 8 | }, 9 | { 10 | name = "https" 11 | protocol = "tcp" 12 | listen = 443 13 | destination = 8443 14 | }, 15 | { 16 | name = "kube-api" 17 | protocol = "tcp" 18 | listen = 6443 19 | destination = 6443 20 | } 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/load-balancer.variables.tf: -------------------------------------------------------------------------------- 1 | variable "lb_port_mappings" { 2 | description = "list of ports the load balancer is being configured with" 3 | type = list(object({ 4 | name = string 5 | protocol = string 6 | listen = number 7 | destination = number 8 | })) 9 | default = [] 10 | } 11 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | LABEL_PREFIX = "wire.infra" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/machines.outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | servers_private_ip = { for _, snw in hcloud_server_network.snw : snw.server_id => snw.ip } 3 | servers_volume_device_path = { for _, vol in hcloud_volume.volumes : vol.server_id => vol.linux_device } 4 | } 5 | 6 | 7 | output "machines" { 8 | value = [ for _, machine in hcloud_server.machines : 9 | merge( 10 | { 11 | hostname = machine.name 12 | private_ipv4 = local.servers_private_ip[machine.id] 13 | public_ipv4 = machine.ipv4_address 14 | component_classes = [ 15 | for label_name, _ in machine.labels : 16 | split("/", label_name)[1] 17 | if replace(label_name, "component-class.${local.LABEL_PREFIX}", "") != label_name 18 | ] 19 | }, 20 | contains( keys(machine.labels), "etcd_member_name" ) 21 | ? { etcd_member_name = machine.labels.etcd_member_name } 22 | : {}, 23 | contains( keys(local.servers_volume_device_path), machine.id ) 24 | ? { volume = { device_path = local.servers_volume_device_path[machine.id] } } 25 | : {}, 26 | ) 27 | ] 28 | } 29 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/machines.variables.tf: -------------------------------------------------------------------------------- 1 | variable "default_location" { 2 | default = "nbg1" 3 | } 4 | 5 | variable "default_server_type" { 6 | default = "cx51" 7 | } 8 | 9 | variable "default_image" { 10 | default = "ubuntu-22.04" 11 | } 12 | 13 | 14 | # FUTUREWORK: replace 'any' by implementing https://www.terraform.io/docs/language/functions/defaults.html 15 | # 16 | variable "machines" { 17 | description = "list of machines" 18 | # type = list(object({ 19 | # group_name = string 20 | # machine_id = string 21 | # machine_type = string 22 | # component_classes = list(string) 23 | # volume = optional(object({ 24 | # size = number 25 | # format = optional(string) 26 | # })) 27 | # })) 28 | type = any 29 | default = [] 30 | 31 | validation { 32 | condition = length(var.machines) > 0 33 | error_message = "At least one machine must be defined." 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/network.resources.tf: -------------------------------------------------------------------------------- 1 | resource "hcloud_network" "nw" { 2 | name = "k8s-${ var.cluster_name }" 3 | 4 | ip_range = "192.168.0.0/16" 5 | } 6 | 7 | 8 | resource "hcloud_network_subnet" "sn" { 9 | network_id = hcloud_network.nw.id 10 | 11 | ip_range = "192.168.1.0/24" 12 | 13 | # NOTE: No other sensible values available at this time 14 | # DOCS: https://docs.hetzner.cloud/#subnets 15 | type = "cloud" 16 | network_zone = "eu-central" 17 | } 18 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ips" { 2 | value = var.with_load_balancer ? [ hcloud_load_balancer.lb[0].ipv4 ] : [ 3 | for _, machine in hcloud_server.machines : machine.ipv4_address 4 | if contains( keys(machine.labels), "component-class.${local.LABEL_PREFIX}/node" ) 5 | ] 6 | } 7 | 8 | # NOTE: the existence of this output feels indeed odd. What is generated here could and actually should 9 | # be done on the outside since 'machines' is already exposed. See ./../../environment/kubernetes.dns.tf 10 | output "node_ips" { 11 | value = [ 12 | for _, machine in hcloud_server.machines : machine.ipv4_address 13 | if contains( keys(machine.labels), "component-class.${local.LABEL_PREFIX}/node" ) 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | type = string 3 | } 4 | 5 | variable "ssh_keys" { 6 | type = set(string) 7 | } 8 | 9 | variable "with_load_balancer" { 10 | description = "indicates whether a load balancer is being created and placed in front of all K8s machines" 11 | type = bool 12 | default = false 13 | } 14 | -------------------------------------------------------------------------------- /terraform/modules/hetzner-kubernetes/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | hcloud = { 7 | source = "hetznercloud/hcloud" 8 | } 9 | } 10 | required_version = "~> 1.1" 11 | } 12 | -------------------------------------------------------------------------------- /terraform/modules/sft/dns.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "sft_zone" { 2 | name = var.root_domain 3 | } 4 | 5 | resource "aws_route53_record" "sft_a" { 6 | for_each = setunion(var.server_groups.blue.server_names, var.server_groups.green.server_names) 7 | 8 | zone_id = data.aws_route53_zone.sft_zone.zone_id 9 | name = "sft${each.value}.sft.${var.environment}" 10 | type = "A" 11 | ttl = var.a_record_ttl 12 | records = [hcloud_server.sft[each.key].ipv4_address] 13 | } 14 | 15 | resource "aws_route53_record" "metrics_srv" { 16 | zone_id = data.aws_route53_zone.sft_zone.zone_id 17 | name = "_sft-metrics._tcp.${var.environment}" 18 | type = "SRV" 19 | ttl = var.metrics_srv_record_ttl 20 | records = [for a_record in aws_route53_record.sft_a : "0 10 8443 ${a_record.fqdn}"] 21 | } 22 | -------------------------------------------------------------------------------- /terraform/modules/sft/outputs.tf: -------------------------------------------------------------------------------- 1 | # TODO: It is absurd that srv-announcer requires this. All route53 resources are 2 | # scoped globally, figure out if we really need to do this. 3 | data "aws_region" "current" {} 4 | 5 | output "sft" { 6 | value = { 7 | sft_srv = "_sft._tcp.${var.environment}" 8 | aws_key_id = aws_iam_access_key.srv-announcer.id 9 | aws_access_key = aws_iam_access_key.srv-announcer.secret 10 | aws_region = data.aws_region.current.name 11 | instances_blue = [ for server_name, _ in var.server_groups.blue.server_names : 12 | { 13 | hostname = hcloud_server.sft[server_name].name 14 | ipaddress = hcloud_server.sft[server_name].ipv4_address 15 | fqdn = aws_route53_record.sft_a[server_name].fqdn 16 | } 17 | ] 18 | instances_green = [ for server_name, _ in var.server_groups.green.server_names : 19 | { 20 | hostname = hcloud_server.sft[server_name].name 21 | ipaddress = hcloud_server.sft[server_name].ipv4_address 22 | fqdn = aws_route53_record.sft_a[server_name].fqdn 23 | } 24 | ] 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /terraform/modules/sft/server.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | // This duplication is bad, but terraform doesn't allow defining functions. 3 | map_server_name_to_type_green = {for _, server_name in var.server_groups.green.server_names: server_name => var.server_groups.green.server_type} 4 | map_server_name_to_type_blue = {for _, server_name in var.server_groups.blue.server_names : server_name => var.server_groups.blue.server_type} 5 | map_server_name_to_type = merge(local.map_server_name_to_type_blue, local.map_server_name_to_type_green) 6 | } 7 | 8 | 9 | resource "hcloud_server" "sft" { 10 | for_each = local.map_server_name_to_type 11 | 12 | name = "${var.environment}-sft-${each.key}" 13 | server_type = each.value 14 | image = var.image 15 | location = var.location 16 | ssh_keys = var.ssh_keys 17 | } 18 | -------------------------------------------------------------------------------- /terraform/modules/sft/variables.tf: -------------------------------------------------------------------------------- 1 | variable "root_domain" { 2 | type = string 3 | } 4 | 5 | variable "environment" { 6 | type = string 7 | } 8 | 9 | variable "server_groups" { 10 | description = <.." 16 | EOD 17 | type = object({ 18 | //Arbitrary name for the first group 19 | blue = object({ 20 | server_names = set(string) 21 | server_type = string 22 | }) 23 | 24 | //Arbitrary name for the second group 25 | green = object({ 26 | server_names = set(string) 27 | server_type = string 28 | }) 29 | }) 30 | 31 | validation { 32 | condition = length(setintersection(var.server_groups.blue.server_names, var.server_groups.green.server_names)) == 0 33 | error_message = "The server_names in the blue and green server_groups must not intersect." 34 | } 35 | } 36 | 37 | variable "a_record_ttl" { 38 | type = number 39 | } 40 | 41 | variable "metrics_srv_record_ttl" { 42 | default = 60 43 | } 44 | 45 | variable "server_type" { 46 | default = "cx11" 47 | } 48 | 49 | variable "server_type_stale" { 50 | default = "cx11" 51 | } 52 | 53 | variable "image" { 54 | default = "ubuntu-18.04" 55 | } 56 | 57 | variable "location" { 58 | default = "nbg1" 59 | } 60 | 61 | variable "ssh_keys" { 62 | type = list 63 | } 64 | -------------------------------------------------------------------------------- /terraform/modules/sft/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | hcloud = { 7 | source = "hetznercloud/hcloud" 8 | } 9 | } 10 | required_version = "~> 1.1" 11 | } 12 | -------------------------------------------------------------------------------- /utils/generate_graph.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env -S gnuplot -c 2 | 3 | #################################################################### 4 | # GNUPlot script to display reports on packet captured RTP streams # 5 | #################################################################### 6 | 7 | ############################## 8 | # General Usage 9 | # 10 | # once you have a report from rtpstreams_graph.py saved to a file, 11 | # provide it to this utility, and get a graphical output. 12 | 13 | ############################## 14 | # Requirements 15 | # 16 | # If you're not using wire-server-deploy's direnv and nix setup, 17 | # you will need to install a version of gnuplot greater than version 5. 18 | 19 | if (ARGC != 2) { print "usage: ", ARG0, " "; 20 | exit -1 21 | } 22 | 23 | set boxwidth 0.3 24 | set style fill solid 25 | 26 | set style line 1 lc rgb "blue" 27 | set style line 2 lc rgb "red" 28 | 29 | set term pngcairo size 1024,768 enhance font 'Verdana,10' 30 | 31 | set title "Packet size against mean pairwise transmission delay" 32 | 33 | set xlabel "Packet size ranges per bucket (bytes)" 34 | set xrange [0:] 35 | set ylabel "Packet-pairwise transmission delay (microseconds)" 36 | set yrange [0:] 37 | 38 | set output ARG2 39 | 40 | plot sprintf("" 5 | hostname: "" 6 | nginz: 7 | https: 8 | sslCert: "" 9 | hostname: "" 10 | wss: 11 | sslCert: "" 12 | hostname: "" 13 | -------------------------------------------------------------------------------- /values/cassandra-external/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-PROD: List here the real IP addresses of the Cassandra nodes 2 | # IPs: 3 | # - x.y.z.w 4 | -------------------------------------------------------------------------------- /values/coturn/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | secrets: 2 | zrestSecrets: 3 | - "" 4 | -------------------------------------------------------------------------------- /values/coturn/prod-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | secrets: 2 | zrestSecrets: 3 | - "" 4 | -------------------------------------------------------------------------------- /values/coturn/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # using upstream values for coturn helm -------------------------------------------------------------------------------- /values/databases-ephemeral/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # Note that these are the correct values assuming that both Cassandra 2 | # and Elasticsearch are deployed outside of Kubernetes 3 | tags: 4 | cassandra-ephemeral: true 5 | elasticsearch-ephemeral: true 6 | 7 | redis-ephemeral: 8 | redis-ephemeral: 9 | image: 10 | registry: docker.io 11 | repository: bitnami/redis 12 | tag: 6.2.16 13 | usePassword: false 14 | cluster: 15 | enabled: true 16 | # https://artifacthub.io/packages/helm/bitnami-aks/redis/11.3.4#production-configuration 17 | # default slaveCount is 2 18 | slaveCount: 3 19 | master: 20 | persistence: 21 | enabled: false 22 | resources: 23 | limits: 24 | cpu: "1000m" 25 | memory: "1024Mi" 26 | requests: 27 | cpu: "500m" 28 | memory: "512Mi" 29 | slave: 30 | persistence: 31 | enabled: false 32 | resources: 33 | limits: 34 | cpu: "1000m" 35 | memory: "1024Mi" 36 | requests: 37 | cpu: "500m" 38 | memory: "512Mi" 39 | -------------------------------------------------------------------------------- /values/databases-ephemeral/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # Note that these are the correct values assuming that both Cassandra 2 | # and Elasticsearch are deployed outside of Kubernetes 3 | tags: 4 | cassandra-ephemeral: false 5 | elasticsearch-ephemeral: false 6 | 7 | redis-ephemeral: 8 | redis-ephemeral: 9 | image: 10 | registry: docker.io 11 | repository: bitnami/redis 12 | tag: 6.2.16 13 | usePassword: false 14 | cluster: 15 | enabled: true 16 | # https://artifacthub.io/packages/helm/bitnami-aks/redis/11.3.4#production-configuration 17 | # default slaveCount is 2 18 | slaveCount: 3 19 | master: 20 | persistence: 21 | enabled: false 22 | resources: 23 | limits: 24 | cpu: "1000m" 25 | memory: "1024Mi" 26 | requests: 27 | cpu: "500m" 28 | memory: "512Mi" 29 | slave: 30 | persistence: 31 | enabled: false 32 | resources: 33 | limits: 34 | cpu: "1000m" 35 | memory: "1024Mi" 36 | requests: 37 | cpu: "500m" 38 | memory: "512Mi" 39 | -------------------------------------------------------------------------------- /values/demo-smtp/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-DEMO: This is often a good default when using calico's default CIDR 2 | # https://github.com/kubernetes-sigs/kubespray/blob/master/docs/calico.md#optional--define-the-default-pool-cidr 3 | # or flannel's https://github.com/kubernetes-sigs/kubespray/blob/master/docs/flannel.md#flannel 4 | # If you override those values, etc., then verify that this CIDR still makes sense 5 | # For all variables the "ixdotai/smtp" image supports see: https://github.com/ix-ai/smtp#readme 6 | envVars: 7 | RELAY_NETWORKS: ":10.233.0.0/16" 8 | # 9 | # PORT: "25" 10 | # NET_DEV: eth0 11 | # OTHER_HOSTNAMES: other.example.com 12 | # DISABLE_IPV6: 1 13 | # BIND_IP: 0.0.0.0 14 | # BIND_IP6: ::0 15 | # MAILNAME: mail.example.com 16 | # DKIM_KEY_PATH: /etc/exim4/dkim.key 17 | # KEY_PATH: /path/to/key.crt 18 | # CERTIFICATE_PATH: /path/to/certificate.crt 19 | # SMARTHOST_ADDRESS: mail.example.com 20 | # SMARTHOST_PORT: "587" 21 | # SMARTHOST_USER: exampleuser 22 | # SMARTHOST_PASSWORD: secret 23 | # SMARTHOST_ALIASES: "*.example.com" 24 | -------------------------------------------------------------------------------- /values/demo-smtp/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-PROD: This is often a good default when using calico's default CIDR 2 | # https://github.com/kubernetes-sigs/kubespray/blob/master/docs/calico.md#optional--define-the-default-pool-cidr 3 | # or flannel's https://github.com/kubernetes-sigs/kubespray/blob/master/docs/flannel.md#flannel 4 | # If you override those values, etc., then verify that this CIDR still makes sense 5 | # For all variables the "ixdotai/smtp" image supports see: https://github.com/ix-ai/smtp#readme 6 | envVars: 7 | RELAY_NETWORKS: ":10.233.0.0/16" 8 | # 9 | # PORT: "25" 10 | # NET_DEV: eth0 11 | # OTHER_HOSTNAMES: other.example.com 12 | # DISABLE_IPV6: 1 13 | # BIND_IP: 0.0.0.0 14 | # BIND_IP6: ::0 15 | # MAILNAME: mail.example.com 16 | # DKIM_KEY_PATH: /etc/exim4/dkim.key 17 | # KEY_PATH: /path/to/key.crt 18 | # CERTIFICATE_PATH: /path/to/certificate.crt 19 | # SMARTHOST_ADDRESS: mail.example.com 20 | # SMARTHOST_PORT: "587" 21 | # SMARTHOST_USER: exampleuser 22 | # SMARTHOST_PASSWORD: secret 23 | # SMARTHOST_ALIASES: "*.example.com" 24 | -------------------------------------------------------------------------------- /values/elasticsearch-external/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-PROD: List here the real IP addresses of the Elasticsearch nodes 2 | # IPs: 3 | # - x.y.z.w 4 | -------------------------------------------------------------------------------- /values/external-dns/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | external-dns: 2 | provider: aws 3 | rbac: 4 | create: true 5 | aws: 6 | region: "" # e.g. eu-central-1 7 | zoneType: public 8 | domainFilters: [""] 9 | zoneIdFilters: [""] 10 | resources: 11 | requests: 12 | memory: 128Mi 13 | cpu: 200m 14 | limits: 15 | memory: 256Mi 16 | cpu: 400m 17 | -------------------------------------------------------------------------------- /values/fake-aws/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # Disable since prod should use an externally provided minio 2 | fake-aws-s3: 3 | enabled: true 4 | -------------------------------------------------------------------------------- /values/fake-aws/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # Disable since prod should use an externally provided minio 2 | fake-aws-s3: 3 | enabled: false 4 | -------------------------------------------------------------------------------- /values/ingress-nginx-controller/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # Image digests are not supported by our `wire-server-deploy` machinery. 2 | # They cannot be correctly dumped to tar files and then be loaded by 3 | # `containerd`. 4 | ingress-nginx: 5 | controller: 6 | # There's no loadbalancer support in the Hetzner CI environment 7 | kind: DaemonSet 8 | service: 9 | type: NodePort 10 | image: 11 | tag: "v1.10.6" 12 | digest: "" 13 | digestChroot: "" 14 | admissionWebhooks: 15 | patch: 16 | image: 17 | tag: "v20220916-gd32f8c343" 18 | digest: "" 19 | -------------------------------------------------------------------------------- /values/ingress-nginx-controller/hetzner-ci.example.yaml: -------------------------------------------------------------------------------- 1 | # Image digests are not supported by our `wire-server-deploy` machinery. 2 | # They cannot be correctly dumped to tar files and then be loaded by 3 | # `containerd`. 4 | ingress-nginx: 5 | controller: 6 | # There's no loadbalancer support in the Hetzner CI environment 7 | kind: DaemonSet 8 | service: 9 | type: NodePort 10 | image: 11 | tag: "v1.10.6" 12 | digest: "" 13 | digestChroot: "" 14 | admissionWebhooks: 15 | patch: 16 | image: 17 | tag: "v20220916-gd32f8c343" 18 | digest: "" 19 | -------------------------------------------------------------------------------- /values/ingress-nginx-controller/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # Image digests are not supported by our `wire-server-deploy` machinery. 2 | # They cannot be correctly dumped to tar files and then be loaded by 3 | # `containerd`. 4 | ingress-nginx: 5 | controller: 6 | image: 7 | tag: "v1.10.6" 8 | digest: "" 9 | digestChroot: "" 10 | admissionWebhooks: 11 | patch: 12 | image: 13 | tag: "v20220916-gd32f8c343" 14 | digest: "" 15 | -------------------------------------------------------------------------------- /values/k8ssandra-operator/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | k8ssandra-operator: 2 | cass-operator: 3 | disableCertManagerCheck: true 4 | 5 | cleaner: 6 | image: 7 | registry: docker.io 8 | repository: k8ssandra/k8ssandra-tools 9 | tag: 1.6.0-20240328143453-c6f39dad 10 | 11 | client: 12 | image: 13 | registry: docker.io 14 | repository: k8ssandra/k8ssandra-tools 15 | tag: 1.6.0-20240328143453-c6f39dad 16 | -------------------------------------------------------------------------------- /values/k8ssandra-test-cluster/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # The values in k8ssandra-cluster.yaml are well choosen. Please only export and 2 | # override them if you are confident the change is needed. 3 | 4 | # storageClassName: the name storageClass to use. This defines where the data is 5 | # stored. Storage is automatically requested if the storage class is correctly 6 | # setup. 7 | storageClassName: "openebs-hostpath" 8 | 9 | # storageSize: Size of the storage (persistent volume claim) to request. At 10 | # Hetzner's cloud the smallest volume is 10GB. So, even if you need much less 11 | # storage, it's fine to request 10GB. The memory units are described here: 12 | # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory 13 | storageSize: 10G 14 | 15 | # These options relate to the client_encryption_options described in: 16 | # https://cassandra.apache.org/doc/stable/cassandra/configuration/cass_yaml_file.html#client_encryption_options 17 | client_encryption_options: 18 | enabled: false 19 | optional: true 20 | # The password could be secured better. However, this chart is meant to be 21 | # used as test setup. And, protecting a self-signed certificate isn't very 22 | # useful. 23 | keystorePassword: password 24 | 25 | # Guard the private key by syncing only the CA certificate to 26 | # `k8ssandra-test-cluster-tls-ca-certificate` secrets. Requires `trust-manager` 27 | # Helm chart to be installed (including CRDs.) 28 | syncCACertToSecret: false 29 | 30 | # Limit syncing to this namespace. Otherwise, the secret is synced to all 31 | # namespaces. 32 | # syncCACertNamespace: 33 | 34 | # For telemetry data 35 | prometheus: 36 | enabled: false 37 | 38 | # Size of the datacenter 39 | datacenter: 40 | size: 1 41 | -------------------------------------------------------------------------------- /values/keycloakx/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # command: 2 | # - "/opt/keycloak/bin/kc.sh" 3 | # - "start" 4 | # - "--http-enabled=true" 5 | # - "--http-port=8080" 6 | # - "--hostname-strict=false" 7 | # - "--hostname-strict-https=true" 8 | 9 | # extraEnv: 10 | # - name: "JAVA_OPTS_APPEND" 11 | # value: "-Djgroups.dns.query=keycloak-headless" 12 | # - name: "KEYCLOAK_ADMIN" 13 | # value: "admin" 14 | # - name: "KEYCLOAK_ADMIN_PASSWORD" 15 | # value: "admin" 16 | 17 | # database: 18 | # vendor: "postgres" 19 | # hostname: "keycloak-postgres-postgresql" 20 | # port: "5432" 21 | # username: "keycloak" 22 | # password: "keycloak" 23 | 24 | # ingress: 25 | # enabled: true 26 | # ingressClassName: "nginx" 27 | # rules: 28 | # - host: "keycloak.example.com" 29 | # paths: 30 | # - path: "{{ tpl .Values.http.relativePath $ | trimSuffix \"/\" }}/" 31 | # pathType: "Prefix" 32 | # tls: 33 | # - hosts: 34 | # - "keycloak.example.com" 35 | # secretName: "ingress_cert_secret_name" 36 | # console: 37 | # enabled: true 38 | # ingressClassName: "nginx" 39 | # rules: 40 | # - host: "keycloak.example.com" 41 | # paths: 42 | # - path: "{{ tpl .Values.http.relativePath $ | trimSuffix \"/\" }}/admin" 43 | # pathType: "Prefix" 44 | -------------------------------------------------------------------------------- /values/ldap-scim-bridge/values-prod.example.yaml: -------------------------------------------------------------------------------- 1 | # NOTE: you will need an LDAP SCIM bridge deployment for each team! 2 | # by default, run every 5 minutes. 3 | schedule: "*/5 * * * *" 4 | config: 5 | logLevel: "Debug" # one of Trace,Debug,Info,Warn,Error,Fatal; 'Fatal' is least noisy, 'Trace' is most. 6 | ldapSource: 7 | tls: true 8 | host: "ldap-server.example.com" 9 | port: 636 10 | dn: "CN=Admin,DC=example,DC=com" 11 | password: "secret password here" 12 | search: 13 | base: "DC=example,DC=com" 14 | objectClass: "person" 15 | memberOf: "CN=people,OU=engineering,DC=example,DC=com" 16 | codec: "utf8" 17 | # deleteOnAttribute: # optional, related to delete-from-directory. 18 | # key: "deleted" 19 | # value: "true" 20 | # deleteFromDirectory: # optional; ok to use together with delete-on-attribute if you use both. 21 | # base: "ou=DeletedPeople,DC=example,DC=com" 22 | # objectClass: "account" 23 | scimTarget: 24 | tls: false 25 | host: "spar" 26 | port: 8080 27 | path: "/scim/v2" 28 | token: "Bearer BEARERTOKENHERE" 29 | mapping: 30 | displayName: "displayName" 31 | userName: "mailNickname" 32 | externalId: "mail" 33 | email: "mail" 34 | -------------------------------------------------------------------------------- /values/metallb/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # NOTE: Adjust these values to your list of available cidr addresses. 2 | # Intentionally left blank, to avoid booting up metallb without 3 | # IP addresses 4 | # Example with a 1-node kubernetes cluster with a single ip 1.2.3.4: 5 | # cidrAddresses: 6 | # - "1.2.3.4/32" 7 | cidrAddresses: 8 | -------------------------------------------------------------------------------- /values/minio-external/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-PROD: List here the real IP addresses of the Minio nodes 2 | # IPs: 3 | # - x.y.z.w 4 | -------------------------------------------------------------------------------- /values/nginx-ingress-services/demo-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-DEMO: Assuming you run helm directly (and not helm-wrapper with sops), you can 2 | # simply drop your certificate/private key here. Be careful with spaces/indentation, 3 | # as the ingress seems to simply "swallow" errors if any (and serve the Fake default certificate 4 | # which is highly confusing) 5 | secrets: 6 | tlsWildcardCert: | 7 | -----BEGIN CERTIFICATE----- 8 | .... OWN CERTIFICATE ...... 9 | -----END CERTIFICATE------- 10 | -----BEGIN CERTIFICATE----- 11 | .... INTERMEDIATE CERT .... 12 | -----END CERTIFICATE------- 13 | tlsWildcardKey: | 14 | -----BEGIN PRIVATE KEY----- 15 | .... REAL PRIV KEY ....... 16 | -----END PRIVATE KEY------- 17 | -------------------------------------------------------------------------------- /values/nginx-ingress-services/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | teamSettings: 2 | enabled: true 3 | accountPages: 4 | enabled: true 5 | tls: 6 | enabled: true 7 | # NOTE: enable to automate certificate issuing with jetstack/cert-manager instead of 8 | # providing your own certs in secrets.yaml. Cert-manager is not installed automatically, 9 | # it needs to be installed beforehand (see ./../../charts/certificate-manager/README.md) 10 | useCertManager: false 11 | 12 | certManager: 13 | inTestMode: false 14 | # CHANGEME-PROD: required, if certificate manager is used; set to receive cert expiration 15 | # notice and other Letsencrypt related notification 16 | certmasterEmail: 17 | 18 | # CHANGEME-PROD: These values are suggested for deployments on bare metal and 19 | # should be adjusted on a per installation basis 20 | config: 21 | dns: 22 | https: nginz-https.example.com 23 | ssl: nginz-ssl.example.com 24 | webapp: webapp.example.com 25 | fakeS3: assets.example.com 26 | teamSettings: teams.example.com 27 | accountPages: account.example.com 28 | # uncomment below to activate cert acquisition for federator ingress 29 | # federator: federator.example.com 30 | 31 | # Redirection configuration for fake-aws-s3 32 | service: 33 | useFakeS3: true 34 | s3: 35 | externalPort: 9000 36 | serviceName: fake-aws-s3 37 | -------------------------------------------------------------------------------- /values/nginx-ingress-services/prod-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | # CHANGEME-PROD: Assuming you run helm directly (and not helm-wrapper with sops), you can 2 | # simply drop your certificate/private key here. Be careful with spaces/indentation, 3 | # as the ingress seems to simply "swallow" errors if any (and serve the Fake default certificate 4 | # which is highly confusing) 5 | secrets: 6 | tlsWildcardCert: | 7 | -----BEGIN CERTIFICATE----- 8 | .... OWN CERTIFICATE ...... 9 | -----END CERTIFICATE------- 10 | -----BEGIN CERTIFICATE----- 11 | .... INTERMEDIATE CERT .... 12 | -----END CERTIFICATE------- 13 | tlsWildcardKey: | 14 | -----BEGIN PRIVATE KEY----- 15 | .... REAL PRIV KEY ....... 16 | -----END PRIVATE KEY------- 17 | -------------------------------------------------------------------------------- /values/nginx-ingress-services/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | teamSettings: 2 | enabled: true 3 | accountPages: 4 | enabled: true 5 | tls: 6 | enabled: true 7 | # NOTE: enable to automate certificate issuing with jetstack/cert-manager instead of 8 | # providing your own certs in secrets.yaml. Cert-manager is not installed automatically, 9 | # it needs to be installed beforehand (see ./../../charts/certificate-manager/README.md) 10 | useCertManager: false 11 | 12 | certManager: 13 | inTestMode: false 14 | # CHANGEME-PROD: required, if certificate manager is used; set to receive cert expiration 15 | # notice and other Letsencrypt related notification 16 | certmasterEmail: 17 | 18 | # CHANGEME-PROD: These values are suggested for deployments on bare metal and 19 | # should be adjusted on a per installation basis 20 | config: 21 | dns: 22 | https: nginz-https.example.com 23 | ssl: nginz-ssl.example.com 24 | webapp: webapp.example.com 25 | fakeS3: assets.example.com 26 | teamSettings: teams.example.com 27 | accountPages: account.example.com 28 | # uncomment below to activate cert acquisition for federator ingress 29 | # federator: federator.example.com 30 | 31 | # Redirection configuration for fake-aws-s3 32 | service: 33 | useFakeS3: true 34 | s3: 35 | externalPort: 9000 36 | serviceName: minio-external 37 | -------------------------------------------------------------------------------- /values/outlook-addin/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | host: "outlook.example.com" 2 | wireApiBaseUrl: "https://nginz-https.example.com" 3 | wireAuthorizationEndpoint: "https://webapp.example.com/auth" 4 | allowOrigin: "https://webapp.example.com, https://nginz-https.example.com" 5 | # uncomment the tls section if you are using cert-manager for certificates, otherwise, 6 | # follow the documentation in outlook-addin charts README how to deploy using your own certs 7 | #tls: 8 | # issuerRef: 9 | # name: letsencrypt-http01 10 | # clientId is obtained after registering outlook service with wire OAuth, more in README of outlook-addin chart 11 | clientId: "" 12 | -------------------------------------------------------------------------------- /values/rabbitmq/demo-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | rabbitmq: 2 | auth: 3 | username: wire-server 4 | password: verysecurepassword 5 | -------------------------------------------------------------------------------- /values/rabbitmq/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # More settings can be found here: https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq 2 | rabbitmq: 3 | # some Kernel versions does not support modifying ulimit via containers, setting this to empty won't override default ulimit 4 | ulimitNofiles: "" 5 | persistence: 6 | size: 10Gi 7 | enabled: false 8 | ### To use a persistent volume, set the enabled to true 9 | ### set and uncomment the name of your storageClass below, 10 | ### also, you can refer to offline/local_persistent_storage_k8s.md 11 | ### for deploying openebs for dynamic volume provisioning 12 | # storageClass: openebs-hostpath 13 | -------------------------------------------------------------------------------- /values/rabbitmq/prod-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | rabbitmq: 2 | auth: 3 | username: wire-server 4 | password: verysecurepassword 5 | -------------------------------------------------------------------------------- /values/rabbitmq/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | # More settings can be found here: https://github.com/bitnami/charts/tree/main/bitnami/rabbitmq 2 | rabbitmq: 3 | # some Kernel versions does not support modifying ulimit via containers, setting this to empty won't override default ulimit 4 | ulimitNofiles: "" 5 | persistence: 6 | size: 10Gi 7 | enabled: false 8 | ### To use a persistent volume, set the enabled to true 9 | ### set and uncomment the name of your storageClass below, 10 | ### also, you can refer to offline/local_persistent_storage_k8s.md 11 | ### for deploying openebs for dynamic volume provisioning 12 | # storageClass: openebs-hostpath 13 | -------------------------------------------------------------------------------- /values/redis-ephemeral/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | redis-ephemeral: 2 | image: 3 | registry: docker.io 4 | repository: bitnami/redis 5 | tag: 6.2.16 6 | usePassword: false 7 | cluster: 8 | enabled: true 9 | # https://artifacthub.io/packages/helm/bitnami-aks/redis/11.3.4#production-configuration 10 | # default slaveCount is 2 11 | slaveCount: 3 12 | master: 13 | persistence: 14 | enabled: false 15 | resources: 16 | limits: 17 | cpu: "1000m" 18 | memory: "1024Mi" 19 | requests: 20 | cpu: "500m" 21 | memory: "512Mi" 22 | slave: 23 | persistence: 24 | enabled: false 25 | resources: 26 | limits: 27 | cpu: "1000m" 28 | memory: "1024Mi" 29 | requests: 30 | cpu: "500m" 31 | memory: "512Mi" -------------------------------------------------------------------------------- /values/restund/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | secrets: 2 | zrestSecret: "" 3 | -------------------------------------------------------------------------------- /values/sftd/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | allowOrigin: https://webapp.example.com 2 | host: sftd.example.com 3 | replicaCount: 1 4 | joinCall: 5 | replicaCount: 1 6 | tls: 7 | issuerRef: 8 | name: letsencrypt-http01 9 | -------------------------------------------------------------------------------- /values/sftd/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | allowOrigin: https://webapp.example.com 2 | host: sftd.example.com 3 | replicaCount: 3 4 | tls: 5 | issuerRef: 6 | name: letsencrypt-http01 7 | -------------------------------------------------------------------------------- /values/team-settings/demo-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | secrets: 2 | # NOTE: This setting doesn't have to be changed for offline deploys as the team-settings 3 | # container is pre-seeded 4 | # It is just the empty "{}" json hashmap 5 | configJson: "e30K" 6 | -------------------------------------------------------------------------------- /values/team-settings/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | # image: 3 | # tag: some-tag (only override if you want a newer/different version than what is in the chart) 4 | config: 5 | externalUrls: 6 | backendRest: nginz-https.example.com 7 | backendWebsocket: nginz-ssl.example.com 8 | backendDomain: example.com 9 | appHost: teams.example.com 10 | # See full list of available environment variables: https://github.com/wireapp/wire-web-config-wire/blob/master/wire-team-settings/.env.defaults 11 | envVars: 12 | APP_NAME: "Team Settings" 13 | ENFORCE_HTTPS: "true" 14 | FEATURE_CHECK_CONSENT: "false" 15 | FEATURE_ENABLE_DEBUG: "false" 16 | FEATURE_ENABLE_NEW_TEAM: "true" 17 | # NOTE: Uncomment this for legalhold support in the Team-settings UI 18 | # FEATURE_ENABLE_LEGAL_HOLD: "true" 19 | URL_ACCOUNT_BASE: "https://account.example.com" 20 | URL_WEBAPP_BASE: "https://webapp.example.com" 21 | URL_WEBSITE_BASE: "https://www.example.com" 22 | WEBSITE_LABEL: "www.example.com" 23 | CSP_EXTRA_CONNECT_SRC: "https://*.example.com, wss://*.example.com" 24 | CSP_EXTRA_IMG_SRC: "https://*.example.com" 25 | CSP_EXTRA_SCRIPT_SRC: "https://*.example.com" 26 | CSP_EXTRA_DEFAULT_SRC: "https://*.example.com" 27 | CSP_EXTRA_FONT_SRC: "https://*.example.com" 28 | CSP_EXTRA_FRAME_SRC: "https://*.example.com" 29 | CSP_EXTRA_MANIFEST_SRC: "https://*.example.com" 30 | CSP_EXTRA_OBJECT_SRC: "https://*.example.com" 31 | CSP_EXTRA_MEDIA_SRC: "https://*.example.com" 32 | CSP_EXTRA_PREFETCH_SRC: "https://*.example.com" 33 | CSP_EXTRA_STYLE_SRC: "https://*.example.com" 34 | CSP_EXTRA_WORKER_SRC: "https://*.example.com" 35 | IS_SELF_HOSTED: "true" 36 | -------------------------------------------------------------------------------- /values/team-settings/prod-secrets.example.yaml: -------------------------------------------------------------------------------- 1 | secrets: 2 | # NOTE: This setting doesn't have to be changed for offline deploys as the team-settings 3 | # container is pre-seeded 4 | # It is just the empty "{}" json hashmap 5 | configJson: "e30K" 6 | -------------------------------------------------------------------------------- /values/team-settings/prod-values.example.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | # image: 3 | # tag: some-tag (only override if you want a newer/different version than what is in the chart) 4 | config: 5 | externalUrls: 6 | backendRest: nginz-https.example.com 7 | backendWebsocket: nginz-ssl.example.com 8 | backendDomain: example.com 9 | appHost: teams.example.com 10 | # See full list of available environment variables: https://github.com/wireapp/wire-web-config-wire/blob/master/wire-team-settings/.env.defaults 11 | envVars: 12 | APP_NAME: "Team Settings" 13 | ENFORCE_HTTPS: "true" 14 | FEATURE_CHECK_CONSENT: "false" 15 | FEATURE_ENABLE_DEBUG: "false" 16 | FEATURE_ENABLE_NEW_TEAM: "true" 17 | # NOTE: Uncomment this for legalhold support in the Team-settings UI 18 | # FEATURE_ENABLE_LEGAL_HOLD: "true" 19 | URL_ACCOUNT_BASE: "https://account.example.com" 20 | URL_WEBAPP_BASE: "https://webapp.example.com" 21 | URL_WEBSITE_BASE: "https://www.example.com" 22 | WEBSITE_LABEL: "www.example.com" 23 | CSP_EXTRA_CONNECT_SRC: "https://*.example.com, wss://*.example.com" 24 | CSP_EXTRA_IMG_SRC: "https://*.example.com" 25 | CSP_EXTRA_SCRIPT_SRC: "https://*.example.com" 26 | CSP_EXTRA_DEFAULT_SRC: "https://*.example.com" 27 | CSP_EXTRA_FONT_SRC: "https://*.example.com" 28 | CSP_EXTRA_FRAME_SRC: "https://*.example.com" 29 | CSP_EXTRA_MANIFEST_SRC: "https://*.example.com" 30 | CSP_EXTRA_OBJECT_SRC: "https://*.example.com" 31 | CSP_EXTRA_MEDIA_SRC: "https://*.example.com" 32 | CSP_EXTRA_PREFETCH_SRC: "https://*.example.com" 33 | CSP_EXTRA_STYLE_SRC: "https://*.example.com" 34 | CSP_EXTRA_WORKER_SRC: "https://*.example.com" 35 | IS_SELF_HOSTED: "true" 36 | -------------------------------------------------------------------------------- /values/wire-server-metrics/demo-values.example.yaml: -------------------------------------------------------------------------------- 1 | # This configuration switches to use memory instead of disk for metrics services 2 | # NOTE: If the pods are killed you WILL lose all your metrics history 3 | # prometheus-operator: 4 | # grafana: 5 | # persistence: 6 | # enabled: false 7 | # prometheus: 8 | # prometheusSpec: 9 | # storageSpec: null 10 | # alertmanager: 11 | # alertmanagerSpec: 12 | # storage: null 13 | 14 | 15 | # This configuration Allows you to use a custom storage class to provision 16 | # disks for your metrics services 17 | # prometheus-operator: 18 | # grafana: 19 | # persistence: 20 | # storageClassName: "" 21 | # prometheus: 22 | # prometheusSpec: 23 | # storageSpec: 24 | # volumeClaimTemplate: 25 | # spec: 26 | # storageClassName: "" 27 | # alertmanager: 28 | # alertmanagerSpec: 29 | # storage: 30 | # volumeClaimTemplate: 31 | # spec: 32 | # storageClassName: "" 33 | --------------------------------------------------------------------------------