├── .git-crypt ├── .gitattributes └── keys │ └── default │ └── 0 │ ├── 1C9F6546FF9B7C1B097CB9D68A4BF233DB1D650A.gpg │ ├── 1DA91E6CE87E3C1FCE32BC0CB6ED85CC5872D5E4.gpg │ ├── 509CDFFC2D0783A33CF87D2B703EE21DE4D4D9C9.gpg │ ├── 8C05D0E98B7914EDEBDCC8CC8E8E09282F2E17AF.gpg │ ├── 8E56193CE06E24722C7F2DEB1B5B5D1B8BB0BC18.gpg │ └── F8413E8FA339472249D12555DF6738B80C155B71.gpg ├── .gitattributes ├── .github ├── CODEOWNERS ├── ff-bot.yml ├── labeler.yml └── workflows │ ├── block-fixup-commits.yaml │ ├── dns-deploy.yaml │ ├── dns-dry-run.yaml │ ├── docs.yaml │ ├── labeler.yml │ ├── lint-ansible.yaml │ ├── lint-docs.yaml │ ├── lint-kubernetes.yaml │ ├── lint.yaml │ ├── main.yaml │ ├── pr_thanks.yaml │ └── status_embed.yaml ├── .gitignore ├── .gitmodules ├── .pre-commit-config.yaml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── ansible ├── .ansible-lint ├── .gitattributes ├── README.md ├── ansible.cfg ├── group_vars │ └── all │ │ ├── linode.yml │ │ └── nftables.yml ├── host_vars │ └── lovelace │ │ ├── alloy.yml │ │ ├── dmarc_metrics.yml │ │ ├── munin.yml │ │ ├── nginx.yml │ │ └── prometheus.yml ├── inventory │ └── hosts.yaml ├── local_testing │ ├── .gitignore │ ├── README.md │ ├── Vagrantfile │ ├── hosts.yaml │ └── scripts │ │ └── push-keys ├── playbook.yml └── roles │ ├── alloy │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── alloy-override.conf │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── config.alloy.j2 │ └── vars │ │ └── main │ │ ├── main.yml │ │ └── vault.yml │ ├── certbot │ ├── README.md │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main │ │ ├── main.yml │ │ └── vault.yml │ ├── ci-user │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main │ │ └── main.yml │ ├── common │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── etc-hosts.j2 │ │ ├── motd.j2 │ │ ├── sudo_lecture.j2 │ │ └── sudoers.j2 │ ├── dovecot-monitoring │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── maildir-mails.sh.j2 │ │ └── maildir-sizes.sh.j2 │ ├── dovecot │ ├── files │ │ ├── spamc-learn-ham.sh │ │ ├── spamc-learn-spam.sh │ │ └── welcome.sh │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── configs │ │ │ ├── 10-auth.conf.j2 │ │ │ ├── 10-mail.conf.j2 │ │ │ ├── 10-master.conf.j2 │ │ │ ├── 10-ssl.conf.j2 │ │ │ ├── 15-mailboxes.conf.j2 │ │ │ ├── 20-imap.conf.j2 │ │ │ ├── 20-lmtp.conf.j2 │ │ │ └── auth-ldap.conf.ext.j2 │ │ ├── dovecot-ldap.conf.ext.j2 │ │ ├── dovecot.conf.j2 │ │ ├── learn-ham.sieve.j2 │ │ ├── learn-spam.sieve.j2 │ │ ├── prevent-duplicates.sieve.j2 │ │ └── spam-to-folder.sieve.j2 │ └── vars │ │ └── main │ │ ├── main.yml │ │ └── vault.yml │ ├── fail2ban │ ├── README.md │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── jail.local.j2 │ ├── firewalld │ └── handlers │ │ └── main.yml │ ├── git-mirrors │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── cgitrc.j2 │ │ ├── nginx-site.conf.j2 │ │ └── update-mirrors.sh.j2 │ └── vars │ │ └── main.yml │ ├── jitsi │ ├── README.md │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── install.yml │ │ ├── ldap_auth.yml │ │ └── main.yml │ └── vars │ │ └── main.yml │ ├── ldap │ ├── README.md │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── munin-node │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── munin-node.conf.j2 │ │ ├── plugin.conf.j2 │ │ └── plugins │ │ ├── ldap01 │ │ └── .gitkeep │ │ └── lovelace │ │ ├── dovecot_maildirs.sh.j2 │ │ └── lovering_inheritance.py.j2 │ ├── munin │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── munin.conf.j2 │ ├── neomutt │ └── tasks │ │ └── main.yml │ ├── nginx-cloudflare-mtls │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── cloudflare.crt │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── nginx-geoip │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── nginx │ ├── README.md │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── default_server.conf │ │ └── nginx-conf.d │ │ └── charset.conf.j2 │ ├── opendkim │ ├── README.md │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── opendkim.conf.j2 │ └── vars │ │ └── main.yml │ ├── opendmarc-inbox │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── dmarc.sieve.j2 │ └── vars │ │ └── main.yml │ ├── opendmarc │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── opendmarc.conf.j2 │ └── vars │ │ └── main.yml │ ├── podman │ └── tasks │ │ └── main.yml │ ├── postfix │ ├── files │ │ ├── policyd-spf.conf │ │ └── sender_access │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── header-checks-submission.j2 │ │ ├── ldap │ │ │ ├── ldap-group-aliases.cf.j2 │ │ │ ├── ldap-registeredaddress.cf.j2 │ │ │ ├── ldap-relay-recipients.cf.j2 │ │ │ ├── ldap-service-mail.cf.j2 │ │ │ └── ldap-uid.cf.j2 │ │ ├── main.cf.j2 │ │ ├── sender-canonical-maps.j2 │ │ ├── services │ │ │ ├── fortune-reply.sh.j2 │ │ │ └── fredrick-reply.sh.j2 │ │ ├── transport.j2 │ │ └── virtual.j2 │ └── vars │ │ └── main │ │ ├── main.yml │ │ └── vault.yml │ ├── postgres │ ├── README.md │ ├── files │ │ └── ident.conf │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── pg_repack.yml │ ├── templates │ │ └── postgresql.conf.j2 │ └── vars │ │ └── main │ │ ├── db_passwords.yml │ │ └── main.yml │ ├── prometheus-blackbox-exporter │ ├── README.md │ └── tasks │ │ └── main.yaml │ ├── prometheus-node-exporter │ ├── README.md │ └── tasks │ │ └── main.yml │ ├── prometheus-postfix-exporter │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── prometheus │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── pydis-mtls │ ├── README.md │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── requirements.yml │ ├── rrdstats │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── generate-rrdtool-stats.service.j2 │ │ ├── generate-rrdtool-stats.sh.j2 │ │ └── generate-rrdtool-stats.timer.j2 │ ├── sasl │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── user.conf │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── saslauthd.conf.j2 │ └── vars │ │ └── main │ │ ├── main.yml │ │ └── vault.yml │ ├── spamassassin │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── local.cf.j2 │ └── vars │ │ └── main.yml │ ├── ssh │ └── handlers │ │ └── main.yml │ ├── systemd │ └── handlers │ │ └── main.yml │ ├── unattended-upgrades │ └── tasks │ │ └── main.yml │ └── wireguard │ ├── defaults │ └── main │ │ └── vars.yml │ ├── handlers │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ └── wg0.conf.j2 ├── dns ├── README.md ├── production.yaml └── zones │ ├── pydis.com.yaml │ ├── pydis.org.yaml │ ├── pydis.wtf.zone │ ├── box.yaml │ ├── mail.yaml │ └── root.yaml │ ├── pythondiscord.com.yaml │ └── pythondiscord.org.yaml ├── docs ├── README.md ├── docs │ ├── assets │ │ └── logo.svg │ ├── common-queries │ │ ├── index.md │ │ ├── kubernetes.md │ │ ├── loki.md │ │ └── postgresql.md │ ├── index.md │ ├── meeting-notes │ │ ├── .authors.yml │ │ ├── index.md │ │ └── posts │ │ │ ├── 2022-04-07.md │ │ │ ├── 2022-09-18.md │ │ │ ├── 2022-10-05.md │ │ │ ├── 2022-10-19.md │ │ │ ├── 2022-10-26.md │ │ │ ├── 2022-11-02.md │ │ │ ├── 2022-11-23.md │ │ │ ├── 2023-02-08.md │ │ │ ├── 2023-02-21.md │ │ │ ├── 2023-02-28.md │ │ │ ├── 2023-05-16.md │ │ │ ├── 2023-07-11.md │ │ │ ├── 2023-07-18.md │ │ │ ├── 2023-07-25.md │ │ │ ├── 2023-08-01.md │ │ │ ├── 2023-08-08.md │ │ │ ├── 2023-08-22.md │ │ │ ├── 2023-08-29.md │ │ │ ├── 2023-09-05.md │ │ │ ├── 2023-09-12.md │ │ │ ├── 2024-07-02.md │ │ │ ├── 2024-07-25.md │ │ │ └── 2024-08-15.md │ ├── onboarding │ │ ├── access-table.md │ │ ├── index.md │ │ ├── public_folders.md │ │ ├── resources.md │ │ ├── rules.md │ │ └── tools.md │ ├── post-mortems │ │ ├── .authors.yml │ │ ├── index.md │ │ └── posts │ │ │ ├── all-services-outage.md │ │ │ ├── cascading-node-failures.md │ │ │ ├── images │ │ │ ├── 2021-01-12 │ │ │ │ ├── site_cpu_throttle.png │ │ │ │ └── site_resource_abnormal.png │ │ │ └── 2021-01-30 │ │ │ │ ├── linode_loadbalancers.png │ │ │ │ ├── memory_charts.png │ │ │ │ ├── prometheus_status.png │ │ │ │ └── scaleios.png │ │ │ ├── nodebalancer-fails-memory.md │ │ │ ├── postgres-connection-surge.md │ │ │ ├── primary-kubernetes-node-outage.md │ │ │ └── site-cpu-ram-exhaustion.md │ ├── runbooks │ │ ├── index.md │ │ └── upgrading-postgresql.md │ ├── services │ │ ├── LDAP │ │ │ ├── assets │ │ │ │ └── keycloak_user.png │ │ │ ├── components │ │ │ │ ├── freeipa.md │ │ │ │ ├── keycloak.md │ │ │ │ └── ldap.md │ │ │ ├── discord-ldap.md │ │ │ └── index.md │ │ ├── email │ │ │ ├── components │ │ │ │ ├── assets │ │ │ │ │ └── dmarc-report.png │ │ │ │ ├── dovecot │ │ │ │ │ ├── imap.md │ │ │ │ │ ├── index.md │ │ │ │ │ └── local-delivery.md │ │ │ │ ├── postfix.md │ │ │ │ ├── signing.md │ │ │ │ └── validation.md │ │ │ ├── index.md │ │ │ ├── mail-clients.md │ │ │ └── mail-services.md │ │ └── index.md │ └── tooling │ │ └── github-bots.md ├── mkdocs.yml └── overrides │ └── partials │ └── copyright.html ├── kubernetes ├── README.md ├── cluster-wide-secrets │ ├── README.md │ └── ghcr-pull-secrets.yaml ├── ipa-ca-configmap.yaml ├── namespaces │ ├── apis │ │ ├── code-jam-management │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ │ ├── patsy │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ │ ├── quackstack │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ └── service.yaml │ │ └── rtex │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ └── service.yaml │ ├── bots │ │ ├── black-knight │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ └── secrets.yaml │ │ ├── bot │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ └── secrets.yaml │ │ ├── king-arthur │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── secrets.yaml │ │ │ ├── service-account.yaml │ │ │ └── ssh-secrets.yaml │ │ ├── metricity │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ └── secrets.yaml │ │ ├── sir-lancebot │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ └── secrets.yaml │ │ └── sir-robin │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ └── secrets.yaml │ ├── cert-manager │ │ └── cert-manager │ │ │ ├── README.md │ │ │ ├── certificates │ │ │ ├── owlcorp.uk.yaml │ │ │ ├── pydis.wtf.yaml │ │ │ └── pythondiscord.com.yaml │ │ │ ├── issuers │ │ │ ├── letsencrypt-prod.yaml │ │ │ └── letsencrypt-staging.yaml │ │ │ ├── secrets.yaml │ │ │ └── values.yaml │ ├── databases │ │ ├── blackbox │ │ │ ├── README.md │ │ │ ├── blackbox-configmap.yaml │ │ │ ├── cronjob.yaml │ │ │ └── secrets.yaml │ │ ├── mongodb │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── secrets.yaml │ │ │ ├── service.yaml │ │ │ └── volume.yaml │ │ └── redis │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── redis.conf.template │ │ │ ├── secrets.yaml │ │ │ ├── service.yaml │ │ │ └── volume.yaml │ ├── default │ │ ├── graphite │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── service.yaml │ │ │ └── volume.yaml │ │ └── redirects │ │ │ ├── README.md │ │ │ ├── github.yaml │ │ │ ├── paypal.yaml │ │ │ └── sentry.yaml │ ├── forms │ │ └── forms-backend │ │ │ ├── README.md │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ ├── kube-system │ │ ├── README.md │ │ ├── coredns-custom.yaml │ │ ├── metrics-server │ │ │ ├── README.md │ │ │ └── values.yaml │ │ ├── nginx │ │ │ ├── README.md │ │ │ ├── internal-svc.yaml │ │ │ ├── mtls │ │ │ │ ├── Makefile │ │ │ │ ├── ca.crt │ │ │ │ ├── cloudflare-cert.pem │ │ │ │ └── pydis-cert.pem │ │ │ └── values.yaml │ │ └── reflector │ │ │ └── README.md │ ├── loki │ │ ├── README.md │ │ ├── alloy_values.yml │ │ ├── ingress.yml │ │ ├── loki_values.yml │ │ └── secret.yml │ ├── merch │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ ├── secrets.yaml │ │ └── service.yaml │ ├── modmail │ │ ├── README.md │ │ ├── bot │ │ │ ├── README.md │ │ │ └── deployment.yaml │ │ ├── configmap.yaml │ │ ├── secrets.yaml │ │ └── web │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ └── service.yaml │ ├── monitoring │ │ ├── alerts │ │ │ ├── Makefile │ │ │ ├── README.md │ │ │ ├── alertmanager.yaml │ │ │ ├── alertmanager │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── initscript.yaml │ │ │ │ ├── sd-service.yaml │ │ │ │ ├── secrets.yaml │ │ │ │ ├── service-account.yaml │ │ │ │ └── service.yaml │ │ │ └── alerts.d │ │ │ │ ├── alertmanager.yaml │ │ │ │ ├── certificates.yaml │ │ │ │ ├── coredns.yaml │ │ │ │ ├── cpu.yaml │ │ │ │ ├── django.yaml │ │ │ │ ├── etcd.yaml │ │ │ │ ├── jobs.yaml │ │ │ │ ├── mail.yaml │ │ │ │ ├── memory.yaml │ │ │ │ ├── nginx.yaml │ │ │ │ ├── nodes.yaml │ │ │ │ ├── pods.yaml │ │ │ │ ├── prometheus.yaml │ │ │ │ ├── redis.yaml │ │ │ │ └── volumes.yaml │ │ ├── calico-metrics-svc.yaml │ │ ├── exporters │ │ │ ├── README.md │ │ │ └── redis │ │ │ │ ├── redis_exporter.yaml │ │ │ │ └── secrets.yaml │ │ ├── grafana │ │ │ ├── README.md │ │ │ ├── configmap-ldap-toml.yaml │ │ │ ├── configmap-ldap.yaml │ │ │ ├── configmap.yaml │ │ │ ├── deployment-grafana.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ ├── services.yaml │ │ │ └── volume.yaml │ │ ├── kube-state-metrics │ │ │ ├── deployment.yaml │ │ │ ├── service-account.yaml │ │ │ └── service.yaml │ │ ├── kubewatch │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── secrets.yaml │ │ │ └── service-account.yaml │ │ ├── node_exporter │ │ │ ├── daemonset.yaml │ │ │ └── service.yaml │ │ ├── olli │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ └── secrets.yaml │ │ └── prometheus │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── prometheus-config.yaml │ │ │ ├── reloader-script.yaml │ │ │ ├── secrets.yaml │ │ │ ├── service-account.yaml │ │ │ ├── service.yaml │ │ │ └── volume.yaml │ ├── pixels │ │ ├── pixels-modsite │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ │ └── pixels │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ ├── snekbox │ │ ├── snekbox-forms │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ └── service.yaml │ │ └── snekbox │ │ │ ├── README.md │ │ │ └── service.yaml │ ├── tooling │ │ ├── bitwarden │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ │ ├── ff-bot │ │ │ ├── README.md │ │ │ ├── deployment.yml │ │ │ ├── ingress.yml │ │ │ ├── secrets.yml │ │ │ └── service.yml │ │ ├── keycloak │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ │ ├── metabase │ │ │ ├── README.md │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ │ └── policy-bot │ │ │ ├── README.md │ │ │ ├── configmap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── secrets.yaml │ │ │ └── service.yaml │ ├── vault │ │ ├── README.md │ │ └── ingress.yaml │ └── web │ │ ├── pinnwand │ │ ├── README.md │ │ ├── defaults-configmap.yaml │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ ├── secrets.yaml │ │ └── service.yaml │ │ ├── public-stats │ │ ├── README.md │ │ ├── ingress.yaml │ │ └── service.yaml │ │ └── site │ │ ├── README.md │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ ├── redirect.yaml │ │ ├── secrets.yaml │ │ └── service.yaml ├── scripts │ ├── __init__.py │ ├── create_x509_user_config.py │ └── lint_manifests.py └── service-accounts │ ├── internal-tls-issuer.yaml │ └── postgres-issuer.yaml ├── poetry.lock ├── pyproject.toml ├── renovate.json └── server_bootstrap.sh /.git-crypt/.gitattributes: -------------------------------------------------------------------------------- 1 | # Do not edit this file. To specify the files to encrypt, create your own 2 | # .gitattributes file in the directory where your files are. 3 | * !filter !diff 4 | *.gpg binary 5 | -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/1C9F6546FF9B7C1B097CB9D68A4BF233DB1D650A.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/.git-crypt/keys/default/0/1C9F6546FF9B7C1B097CB9D68A4BF233DB1D650A.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/1DA91E6CE87E3C1FCE32BC0CB6ED85CC5872D5E4.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/.git-crypt/keys/default/0/1DA91E6CE87E3C1FCE32BC0CB6ED85CC5872D5E4.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/509CDFFC2D0783A33CF87D2B703EE21DE4D4D9C9.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/.git-crypt/keys/default/0/509CDFFC2D0783A33CF87D2B703EE21DE4D4D9C9.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/8C05D0E98B7914EDEBDCC8CC8E8E09282F2E17AF.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/.git-crypt/keys/default/0/8C05D0E98B7914EDEBDCC8CC8E8E09282F2E17AF.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/8E56193CE06E24722C7F2DEB1B5B5D1B8BB0BC18.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/.git-crypt/keys/default/0/8E56193CE06E24722C7F2DEB1B5B5D1B8BB0BC18.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/F8413E8FA339472249D12555DF6738B80C155B71.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/.git-crypt/keys/default/0/F8413E8FA339472249D12555DF6738B80C155B71.gpg -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | secrets.yaml filter=git-crypt diff=git-crypt 3 | secrets.yml filter=git-crypt diff=git-crypt 4 | secret.yaml filter=git-crypt diff=git-crypt 5 | secret.yml filter=git-crypt diff=git-crypt 6 | ghcr-pull-secrets.yaml filter=git-crypt diff=git-crypt 7 | ssh-secrets.yaml filter=git-crypt diff=git-crypt 8 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | dns/ @jb3 2 | kubernetes/ @jb3 3 | .github/ @jb3 4 | -------------------------------------------------------------------------------- /.github/ff-bot.yml: -------------------------------------------------------------------------------- 1 | # We have a ff-bot.yml policy file to determine who can use the "/merge" 2 | # command in Pull Requests to run an automated fast-forward merge onto 3 | # main. 4 | 5 | # Users or teams can be specified and when "/merge" is run a permission 6 | # check will be performed against the latest version of the policy on the 7 | # default branch. 8 | 9 | # Find all information at https://github.com/jb3/ff_bot 10 | 11 | users: [] 12 | teams: 13 | - python-discord/sudo-devops 14 | -------------------------------------------------------------------------------- /.github/labeler.yml: -------------------------------------------------------------------------------- 1 | "group: ansible": 2 | - changed-files: 3 | - any-glob-to-any-file: "ansible/**" 4 | 5 | "group: dns": 6 | - changed-files: 7 | - any-glob-to-any-file: "dns/**" 8 | 9 | "group: docs": 10 | - changed-files: 11 | - any-glob-to-any-file: "docs/**" 12 | 13 | "group: github actions": 14 | - changed-files: 15 | - any-glob-to-any-file: ".github/workflows/**" 16 | 17 | "group: kubernetes": 18 | - changed-files: 19 | - any-glob-to-any-file: "kubernetes/**" 20 | 21 | "component: database": 22 | - changed-files: 23 | - any-glob-to-any-file: 24 | - "ansible/roles/postgres/**" 25 | - "ansible/roles/prometheus-postgres-exporter/**" 26 | 27 | "component: email": 28 | - changed-files: 29 | - any-glob-to-any-file: 30 | - "ansible/roles/dovecot/**" 31 | - "ansible/roles/neomutt/**" 32 | - "ansible/roles/opendkim/**" 33 | - "ansible/roles/opendmarc/**" 34 | - "ansible/roles/postfix/**" 35 | - "ansible/roles/spamassassin/**" 36 | -------------------------------------------------------------------------------- /.github/workflows/block-fixup-commits.yaml: -------------------------------------------------------------------------------- 1 | name: Block fixup commits 2 | 3 | on: 4 | pull_request_target: 5 | 6 | concurrency: 7 | group: ${{ github.workflow }}-${{ github.ref }} 8 | cancel-in-progress: true 9 | 10 | jobs: 11 | block-fixup-commits: 12 | if: github.ref != 'refs/heads/main' 13 | uses: python-discord/.github/.github/workflows/block-fixup-commits.yaml@main 14 | -------------------------------------------------------------------------------- /.github/workflows/dns-deploy.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy DNS to providers 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - 'dns/**' 9 | jobs: 10 | octodns-sync: 11 | environment: production 12 | name: Sync latest changes to DNS providers 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Install Python Dependencies 17 | uses: HassanAbouelela/actions/setup-python@setup-python_v1.6.0 18 | with: 19 | python_version: '3.12' 20 | install_args: --only dns 21 | - uses: solvaholic/octodns-sync@main 22 | with: 23 | config_path: dns/production.yaml 24 | doit: '--doit' 25 | env: 26 | CLOUDFLARE_TOKEN: ${{ secrets.CLOUDFLARE_TOKEN }} 27 | CLOUDFLARE_ACCOUNT_ID: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} 28 | -------------------------------------------------------------------------------- /.github/workflows/labeler.yml: -------------------------------------------------------------------------------- 1 | name: "Pull Request Labeler" 2 | on: 3 | - pull_request_target 4 | 5 | jobs: 6 | labeler: 7 | permissions: 8 | contents: read 9 | pull-requests: write 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/labeler@v5 13 | -------------------------------------------------------------------------------- /.github/workflows/lint-docs.yaml: -------------------------------------------------------------------------------- 1 | name: Test documentation 2 | on: 3 | pull_request: 4 | 5 | jobs: 6 | lint-docs: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | with: 11 | submodules: recursive 12 | fetch-depth: 0 13 | - name: Install Python Dependencies 14 | uses: HassanAbouelela/actions/setup-python@setup-python_v1.6.0 15 | with: 16 | python_version: "3.12" 17 | install_args: --with docs 18 | - name: Build docs 19 | run: poetry run task build-docs 20 | -------------------------------------------------------------------------------- /.github/workflows/lint-kubernetes.yaml: -------------------------------------------------------------------------------- 1 | name: Lint kubernetes manifests 2 | 3 | on: 4 | workflow_call: 5 | secrets: 6 | kube-config: 7 | required: true 8 | 9 | jobs: 10 | lint-manifests: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | 16 | - name: Get list of files 17 | run: | 18 | echo "manifests<> $GITHUB_OUTPUT 19 | python kubernetes/scripts/lint_manifests.py find >> $GITHUB_OUTPUT 20 | echo "EOF" >> $GITHUB_OUTPUT 21 | id: manifest-files 22 | 23 | - uses: azure/setup-kubectl@v4.0.0 24 | 25 | - name: Authenticate with Kubernetes 26 | uses: azure/k8s-set-context@v4 27 | with: 28 | method: kubeconfig 29 | kubeconfig: ${{ secrets.kube-config }} 30 | 31 | - uses: jb3/k8s-lint@master 32 | with: 33 | lintType: dryrun 34 | manifests: | 35 | ${{ steps.manifest-files.outputs.manifests }} 36 | -------------------------------------------------------------------------------- /.github/workflows/pr_thanks.yaml: -------------------------------------------------------------------------------- 1 | name: PR Thanker 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | types: [closed] 8 | 9 | jobs: 10 | thanks: 11 | runs-on: ubuntu-latest 12 | if: github.event.pull_request.merged == true 13 | steps: 14 | - name: Comment on PR 15 | uses: unsplash/comment-on-pr@master 16 | env: 17 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 18 | with: 19 | msg: "Thanks!" 20 | check_for_duplicate_msg: true 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /ansible/.ansible/ 2 | vault_passwords 3 | docs/site/ 4 | venv 5 | .venv 6 | .cache/ 7 | .vscode/ 8 | *.config 9 | .env 10 | .idea 11 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "ansible/roles/nftables"] 2 | path = ansible/roles/nftables 3 | url = https://git.jchri.st/jc/ansible-role-nftables.git 4 | [submodule "ansible/roles/prometheus-postgres-exporter"] 5 | path = ansible/roles/prometheus-postgres-exporter 6 | url = git@github.com:jchristgit/ansible-role-prometheus-postgres-exporter.git 7 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: (secrets?\.ya?ml)|(ghcr-pull-secrets\.yaml)$ 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v4.5.0 5 | hooks: 6 | - id: check-merge-conflict 7 | - id: check-toml 8 | - id: check-yaml 9 | args: [--allow-multiple-documents, --unsafe] 10 | exclude: ^docs/mkdocs.yml 11 | - id: end-of-file-fixer 12 | - id: trailing-whitespace 13 | args: [--markdown-linebreak-ext=md] 14 | 15 | - repo: local 16 | hooks: 17 | - id: ruff-lint 18 | name: ruff linting 19 | description: Run ruff linting 20 | entry: poetry run ruff check --force-exclude 21 | language: system 22 | "types_or": [python, pyi] 23 | require_serial: true 24 | args: [--fix, --exit-non-zero-on-fix] 25 | 26 | - id: ruff-format 27 | name: ruff formatting 28 | description: Run ruff formatting 29 | entry: poetry run ruff format --force-exclude 30 | language: system 31 | "types_or": [python, pyi] 32 | require_serial: true 33 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | The Contributing Guidelines for Python Discord projects can be found [on our website](https://pydis.com/contributing.md). 4 | 5 | Additionally, **contributions authored by any form of artificial intelligence**, 6 | including, but not limited to, issue or pull request comments, code and 7 | documentation, both full and in partial assistance, **are strictly prohibited**. 8 | Please use your natural intelligence instead. 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Python Discord 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ansible/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | exclude_paths: 3 | - .github # Not ansible roles 4 | - roles/certbot/vars/main/vault.yml 5 | # Submodules 6 | - roles/nftables 7 | - roles/prometheus-postgres-exporter 8 | skip_list: 9 | - fqcn-builtins 10 | - meta-no-info 11 | - role-name 12 | - jinja[spacing] 13 | -------------------------------------------------------------------------------- /ansible/.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory/hosts.yaml 3 | host_key_checking = False 4 | vault_password_file = vault_passwords 5 | collections_path = .ansible/galaxy_collections/ 6 | roles_path = .ansible/galaxy_roles/:roles/ 7 | 8 | ansible_managed = Managed by Ansible (do not edit). Role: {{{{ role_name }}}}, Template: {{{{ template_path | basename }}}} 9 | 10 | [privilege_escalation] 11 | become = yes 12 | 13 | [connection] 14 | pipelining = True 15 | -------------------------------------------------------------------------------- /ansible/group_vars/all/linode.yml: -------------------------------------------------------------------------------- 1 | --- 2 | lke_all_addresses: "{{ lookup('ansible.builtin.url', 'https://geoip.linode.com/', wantlist=True) }}" 3 | lke_frankfurt_addresses: "{{ lke_all_addresses | select('search', '^.*Frankfurt.*$') | map('split', ',') | map(attribute=0) | list }}" 4 | lke_frankfurt_ipv4_addresses: "{{ lke_frankfurt_addresses | select('search', '^.*\\..*$') }}" 5 | lke_frankfurt_ipv6_addresses: "{{ lke_frankfurt_addresses | select('search', '^.*:.*$') }}" 6 | -------------------------------------------------------------------------------- /ansible/host_vars/lovelace/alloy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | alloy_extra_files: 3 | - name: postgresql 4 | path: "/var/log/postgresql/postgresql-*-main.log" 5 | - name: nginx_access 6 | path: "/var/log/nginx/access.log" 7 | - name: nginx_error 8 | path: "/var/log/nginx/error.log" 9 | -------------------------------------------------------------------------------- /ansible/host_vars/lovelace/dmarc_metrics.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dmarc_metrics_imap_username: "dmarc" 3 | dmarc_metrics_imap_host: "mail.pydis.wtf" 4 | dmarc_metrics_imap_password: !vault | 5 | $ANSIBLE_VAULT;1.1;AES256 6 | 37373961393432616163383330616634643639353537353964346361343938653661346662386131 7 | 6338343631373264393237666661333739353430313666340a393236313536303935353230363165 8 | 65323537626336353731313165643935386233373439386237623662613632386135316564653365 9 | 3662396363386362380a386338323834383732343232616666353938323435643338643862623533 10 | 35623234393534623638346635386661313831643864396561663335373034653335 11 | dmarc_metrics_folder_inbox: "Reports.New" 12 | dmarc_metrics_folder_done: "Reports.Done" 13 | dmarc_metrics_folder_error: "Reports.Invalid" 14 | -------------------------------------------------------------------------------- /ansible/host_vars/lovelace/munin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | munin_node__plugins: 3 | # SpamAssassin 4 | - src: spamstats 5 | # Nginx 6 | - src: nginx_request 7 | - src: nginx_status 8 | # Postfix 9 | - src: postfix_mailstats 10 | - src: postfix_mailqueue 11 | - src: postfix_mailvolume 12 | # PostgreSQL 13 | - src: postgres_size_ 14 | dest: postgres_size_metricity 15 | - src: postgres_size_ 16 | dest: postgres_size_site 17 | - src: postgres_size_ 18 | dest: postgres_size_ALL 19 | - src: postgres_xlog 20 | - src: postgres_autovacuum 21 | - src: postgres_bgwriter 22 | - src: postgres_checkpoints 23 | - src: postgres_connections_db 24 | - src: postgres_users 25 | - src: postgres_xlog 26 | -------------------------------------------------------------------------------- /ansible/inventory/hosts.yaml: -------------------------------------------------------------------------------- 1 | all: 2 | hosts: 3 | lovelace: 4 | ansible_host: lovelace.box.pydis.wtf 5 | wireguard_subnet: 10.2.0.0/16 6 | certbot_reload_services: 7 | - dovecot 8 | - postfix@- 9 | ldap01: 10 | ansible_host: ldap01.box.pydis.wtf 11 | wireguard_subnet: 10.3.0.0/16 12 | children: 13 | netcup: 14 | hosts: 15 | lovelace: 16 | nginx: 17 | hosts: 18 | lovelace: 19 | databases: 20 | hosts: 21 | lovelace: 22 | monitoring: 23 | hosts: 24 | lovelace: 25 | ldap: 26 | hosts: 27 | ldap01: 28 | mail: 29 | hosts: 30 | lovelace: 31 | jitsi: 32 | hosts: 33 | lovelace: 34 | vars: 35 | wireguard_port: 46850 36 | -------------------------------------------------------------------------------- /ansible/local_testing/.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | -------------------------------------------------------------------------------- /ansible/local_testing/hosts.yaml: -------------------------------------------------------------------------------- 1 | all: 2 | hosts: 3 | hopper: 4 | ansible_host: 192.168.56.2 5 | ip: 192.168.56.2 6 | access_ip: 192.168.56.2 7 | lovelace: 8 | ansible_host: 192.168.56.3 9 | ip: 192.168.56.3 10 | access_ip: 192.168.56.3 11 | neumann: 12 | ansible_host: 192.168.56.4 13 | ip: 192.168.56.4 14 | access_ip: 192.168.56.4 15 | ritchie: 16 | ansible_host: 192.168.56.5 17 | ip: 192.168.56.5 18 | access_ip: 192.168.56.5 19 | children: 20 | nginx: 21 | hosts: 22 | lovelace: 23 | databases: 24 | hosts: 25 | lovelace: 26 | -------------------------------------------------------------------------------- /ansible/local_testing/scripts/push-keys: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Intended to be used in the "control" VM to push keys to the other hosts 3 | 4 | for i in {1..6} ; do 5 | ssh-keyscan 192.168.56.$i >> ~/.ssh/known_hosts 6 | sshpass -p vagrant ssh-copy-id 192.168.56.$i 7 | done 8 | -------------------------------------------------------------------------------- /ansible/roles/alloy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | alloy_grafana_signing_key: "https://apt.grafana.com/gpg.key" 3 | alloy_grafana_signing_key_fingerprint: "B53AE77BADB630A683046005963FA27710458545" 4 | alloy_grafana_repository: "https://apt.grafana.com stable main" 5 | 6 | alloy_extra_files: [] 7 | -------------------------------------------------------------------------------- /ansible/roles/alloy/files/alloy-override.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | ProtectHome=true 3 | ReadOnlyPaths=/ 4 | ReadWritePaths=/var/lib/alloy 5 | NoNewPrivileges=true 6 | -------------------------------------------------------------------------------- /ansible/roles/alloy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload the alloy service 3 | service: 4 | name: alloy 5 | state: reloaded 6 | tags: 7 | - role::alloy 8 | 9 | - name: Restart the alloy service 10 | service: 11 | name: alloy 12 | state: restarted 13 | tags: 14 | - role::alloy 15 | -------------------------------------------------------------------------------- /ansible/roles/alloy/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - systemd 4 | -------------------------------------------------------------------------------- /ansible/roles/alloy/vars/main/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | alloy_loki_endpoint: "https://loki-gateway.pydis.wtf" 3 | alloy_loki_user: "loki-user" 4 | alloy_loki_password: "{{ vault_alloy_loki_password }}" 5 | -------------------------------------------------------------------------------- /ansible/roles/alloy/vars/main/vault.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 39656334393862643038363764343434633531633732393133383463386230383331666439613033 3 | 3536653837316166336566393664393138356535373966370a623333393536383566383132633566 4 | 63373835373032623834643238383332383135333966663866623364353637663839366334306639 5 | 3361376237653963360a376536643738316632633739383134313335376234363161366631353565 6 | 36386635346563643136363763666235636232343736333234666164366363326337363135363936 7 | 35626438356636613933623930653538343331343362633364336432363933323633633033653032 8 | 61393765333065376561373264626534343332653366386133636336636364626135633736353966 9 | 31333161623234326136663136386231363365643266643533663134386139363331376337363830 10 | 37363334643739613832626562343535656632666236623937393266343633363166 11 | -------------------------------------------------------------------------------- /ansible/roles/certbot/README.md: -------------------------------------------------------------------------------- 1 | # Role "certbot" 2 | 3 | Installs certbot and the Cloudflare DNS plugin for certbot to provision and deploy TLS certificates for web properties. 4 | -------------------------------------------------------------------------------- /ansible/roles/certbot/vars/main/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | certbot_cloudflare_token: "{{ encrypted_cloudflare_token }}" 3 | certbot_email: "joe@jb3.dev" 4 | certbot_domains: 5 | lovelace: 6 | - prometheus.lovelace.box.pydis.wtf 7 | - "pydis.wtf,*.pydis.wtf,cloud.native.is.fun.and.easy.pydis.wtf" 8 | 9 | certbot_cert_users: 10 | lovelace: 11 | - prometheus 12 | - postgres 13 | - postfix 14 | -------------------------------------------------------------------------------- /ansible/roles/certbot/vars/main/vault.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 66336535306366333038666137306135663438346366643735383962623339636236343438633766 3 | 6565343931306531623330373936313730353539303264390a333031363634663236636232386461 4 | 34353239643364653464373531653236383963303137326438343239313136376537336636326162 5 | 3537383737323732310a623836363138646434636165643130366362656661393937346534313632 6 | 37663966613031363036623838326666636231313462363831396366363837343632646131303863 7 | 35363032386463346164623733656463633735376161653361343231326166313466643236623762 8 | 31343562323362353238663666303435353138643463656531373466336639316464376632623731 9 | 32646464393438656134 10 | -------------------------------------------------------------------------------- /ansible/roles/ci-user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create new user for CI tasks 3 | user: 4 | name: "{{ ci_user_username }}" 5 | groups: "{{ ci_user_extra_groups }}" 6 | append: true 7 | system: false 8 | home: "{{ ci_user_homepath }}" 9 | tags: 10 | - role::ci-user 11 | 12 | - name: Add authorized SSH keys to CI user 13 | ansible.posix.authorized_key: 14 | user: "{{ ci_user_username }}" 15 | key: "{{ item.key }}" 16 | key_options: "{{ item.options }}" 17 | comment: "{{ item.comment }}" 18 | loop: "{{ ci_user_keys }}" 19 | loop_control: 20 | label: "{{ item.comment }}" 21 | tags: 22 | - role::ci-user 23 | -------------------------------------------------------------------------------- /ansible/roles/ci-user/vars/main/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ci_user_username: ci 3 | ci_user_homepath: /home/ci 4 | ci_user_extra_groups: 5 | - www-data 6 | 7 | ci_user_keys: 8 | - key: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJTE2hcswFHJ3SQo9PvqhB7SYqk+jE7qrZXCNS7Hp0iZ 9 | comment: github-actions-ci 10 | options: 'command="rrsync /var/www"' 11 | -------------------------------------------------------------------------------- /ansible/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart systemd-timesyncd 3 | service: 4 | name: systemd-timesyncd 5 | state: restarted 6 | tags: 7 | - role::common 8 | -------------------------------------------------------------------------------- /ansible/roles/common/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - ssh 4 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/etc-hosts.j2: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost 2 | 3 | ::1 localhost ip6-localhost ip6-loopback 4 | ff02::1 ip6-allnodes 5 | ff02::2 ip6-allrouters 6 | {{ lookup('dig', ansible_host) }} {{ inventory_hostname }}.box.pydis.wtf {{ inventory_hostname }} 7 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/motd.j2: -------------------------------------------------------------------------------- 1 | [[[ To any NSA and FBI agents accessing our servers: please consider ]]] 2 | [[[ whether defending the US Constitution against all enemies, ]]] 3 | [[[ foreign or domestic, requires you to follow Snowden's example. ]]] 4 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/sudo_lecture.j2: -------------------------------------------------------------------------------- 1 | 2 |  "Bee" careful __ 3 | with sudo! // \ 4 | \\_/ // 5 | ''-.._.-''-.._.. -(||)(') 6 | ''' 7 | -------------------------------------------------------------------------------- /ansible/roles/common/templates/sudoers.j2: -------------------------------------------------------------------------------- 1 | Defaults lecture_file="/etc/sudo_lecture" 2 | Defaults insults 3 | 4 | %sudo ALL=(ALL) NOPASSWD:ALL 5 | 6 | # vim: ft=sudoers.j2: 7 | -------------------------------------------------------------------------------- /ansible/roles/dovecot-monitoring/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dovecot_monitoring_cron_filename: ansible_dovecot_prometheus_textfile_exporter 3 | dovecot_monitoring_scripts_directory: /opt/pydis/dovecot-monitoring 4 | -------------------------------------------------------------------------------- /ansible/roles/dovecot-monitoring/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - dovecot 4 | - prometheus-node-exporter 5 | -------------------------------------------------------------------------------- /ansible/roles/dovecot-monitoring/templates/maildir-mails.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # {{ ansible_managed }} 3 | 4 | cd /var/vmail && \ 5 | find . \ 6 | | awk -F / ' 7 | # Maildir e-mails have the hostname contained in them 8 | $0 ~ "{{ ansible_fqdn }}" { 9 | total[$2] += 1 10 | } 11 | END { 12 | print "# HELP dovecot_maildir_mail_count Count of e-mails by user" 13 | print "# TYPE dovecot_maildir_mail_count gauge" 14 | for (user in total) { 15 | print "dovecot_maildir_mail_count{user=\"" user "\"} " total[user] 16 | } 17 | } 18 | ' \ 19 | | sponge > /var/lib/prometheus/node-exporter/dovecot-maildir-mails.prom 20 | -------------------------------------------------------------------------------- /ansible/roles/dovecot-monitoring/templates/maildir-sizes.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # {{ ansible_managed }} 3 | 4 | cd /var/vmail && \ 5 | du --bytes --summarize -- * \ 6 | | awk ' 7 | BEGIN { 8 | print "# HELP dovecot_maildir_size_bytes Maildir size of e-mail users" 9 | print "# TYPE dovecot_maildir_size_bytes gauge" 10 | } 11 | { 12 | print "dovecot_maildir_size_bytes{user=\"" $2 "\"} " $1 13 | } 14 | ' \ 15 | | sponge > /var/lib/prometheus/node-exporter/dovecot-maildir-sizes.prom 16 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/files/spamc-learn-ham.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Ansible managed 3 | 4 | exec /usr/bin/spamc --learntype=ham -u debian-spamd 5 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/files/spamc-learn-spam.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Ansible managed 3 | 4 | exec /usr/bin/spamc --learntype=spam -u debian-spamd 5 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/files/welcome.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | # 3 | # Ansible Managed 4 | 5 | USER="$1" 6 | 7 | ( 8 | echo "Subject: Welcome to PyDis Mail" 9 | echo "From: PyDis DevOps Team " 10 | echo "To: $USER@pydis.wtf" 11 | echo 12 | echo "Hi $USER!" 13 | echo 14 | echo "Welcome to the Python Discord mailserver." 15 | echo 16 | echo "If you are seeing this, you have connected successfully over IMAP and" 17 | echo "are ready to receive new mail destined for your address." 18 | echo 19 | echo "If you require any help with your mailbox then don't hesitate to ask" 20 | echo "in the #dev-oops channel on Discord, or take a look at some of our" 21 | echo "team documentation at https://docs.pydis.wtf/." 22 | echo 23 | echo "If you have any immediate questions, feel free to reply to this email" 24 | echo "and a member of the DevOps team will get back to you!" 25 | echo 26 | echo "Thanks for flying with us!" 27 | echo 28 | echo "- PyDis DevOps Team" 29 | ) | /usr/sbin/sendmail -t 30 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload Dovecot 3 | service: 4 | name: dovecot 5 | state: reloaded 6 | 7 | - name: Restart Dovecot 8 | service: 9 | name: dovecot 10 | state: restarted 11 | 12 | - name: Recompile spam-to-folder sieve script 13 | command: /usr/bin/sievec /etc/dovecot/sieve-after/spam-to-folder.sieve 14 | changed_when: true 15 | 16 | - name: Recompile prevent-duplicates sieve script 17 | command: /usr/bin/sievec /etc/dovecot/sieve-after/prevent-duplicates.sieve 18 | changed_when: true 19 | 20 | - name: Recompile learn-spam sieve script 21 | command: /usr/bin/sievec /etc/dovecot/sieve/learn-spam.sieve 22 | changed_when: true 23 | 24 | - name: Recompile learn-ham sieve script 25 | command: /usr/bin/sievec /etc/dovecot/sieve/learn-ham.sieve 26 | changed_when: true 27 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/templates/configs/auth-ldap.conf.ext.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | # Authentication for LDAP users. Included from 10-auth.conf. 4 | # 5 | # 6 | 7 | passdb { 8 | driver = ldap 9 | 10 | # Path for LDAP configuration file, see example-config/dovecot-ldap.conf.ext 11 | args = /etc/dovecot/dovecot-ldap.conf.ext 12 | } 13 | 14 | userdb { 15 | driver = prefetch 16 | } 17 | 18 | userdb { 19 | driver = ldap 20 | args = /etc/dovecot/dovecot-ldap.conf.ext 21 | } 22 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/templates/learn-ham.sieve.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | require ["vnd.dovecot.pipe", "copy", "imapsieve", "variables"]; 4 | 5 | # Ignore e-mails being moved into Trash for Ham learning 6 | if string "${mailbox}" "Trash" { 7 | stop; 8 | } 9 | 10 | pipe :copy "spamc-learn-ham.sh"; 11 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/templates/learn-spam.sieve.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | require ["vnd.dovecot.pipe", "copy", "imapsieve"]; 4 | 5 | pipe :copy "spamc-learn-spam.sh"; 6 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/templates/prevent-duplicates.sieve.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | require ["duplicate"]; 4 | 5 | if not exists "List-ID" { 6 | if duplicate { 7 | discard; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/templates/spam-to-folder.sieve.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | require ["fileinto"]; 4 | 5 | if header :contains "X-Spam" "Yes" { 6 | fileinto "Junk"; 7 | stop; 8 | } 9 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/vars/main/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dovecot_ldap_host: "ldaps://ldap01.box.pydis.wtf:636" 3 | dovecot_ldap_user: "uid=dovecot,cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf" 4 | dovecot_ldap_password: "{{ vault_dovecot_ldap_password }}" 5 | dovecot_ldap_tls_ca: "/etc/ipa/ca.crt" 6 | dovecot_vmail_uid: "5000" 7 | dovecot_sieve_pipe_bin_dir: /usr/lib/dovecot/sieve 8 | -------------------------------------------------------------------------------- /ansible/roles/dovecot/vars/main/vault.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 36323936626631376233313136333732393863353536376339306266656234346265316638356364 3 | 6465613561326365646662303433313633353265356336300a633465376134306263333531376466 4 | 37393037363336303332626664326164366664366264303665333431393236383635316436366434 5 | 6539643930396163300a383031636666663966663936333235613431333564636465626433333164 6 | 35323166353532363633356130383033366161643861393837623461653436356134313131636330 7 | 33353461623838633037363331663737353633393263383638633264346437373364636139646561 8 | 38326639356430646361393039623438343839313635376338343061373961373362656531363165 9 | 39323032333839333032 10 | -------------------------------------------------------------------------------- /ansible/roles/fail2ban/README.md: -------------------------------------------------------------------------------- 1 | # Role "fail2ban" 2 | 3 | This role installs and configures fail2ban to all Python Discord hosts. 4 | -------------------------------------------------------------------------------- /ansible/roles/fail2ban/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Reload fail2ban 2 | service: 3 | name: fail2ban 4 | state: reloaded 5 | tags: 6 | - role::fail2ban 7 | -------------------------------------------------------------------------------- /ansible/roles/fail2ban/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install fail2ban package 3 | package: 4 | name: fail2ban 5 | state: present 6 | tags: 7 | - role::fail2ban 8 | 9 | - name: Copy fail2ban config 10 | template: 11 | src: jail.local.j2 12 | dest: /etc/fail2ban/jail.local 13 | owner: root 14 | group: root 15 | mode: "0644" 16 | tags: 17 | - role::fail2ban 18 | notify: 19 | - Reload fail2ban 20 | 21 | - name: Update Postfix unit name 22 | lineinfile: 23 | path: /etc/fail2ban/filter.d/postfix.conf 24 | regexp: "^journalmatch =" 25 | line: "journalmatch = _SYSTEMD_UNIT=postfix@-.service" 26 | tags: 27 | - role::fail2ban 28 | notify: 29 | - Reload fail2ban 30 | 31 | - name: Enable fail2ban service 32 | service: 33 | name: fail2ban 34 | state: started 35 | enabled: true 36 | tags: 37 | - role::fail2ban 38 | -------------------------------------------------------------------------------- /ansible/roles/firewalld/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload the firewall 3 | service: 4 | name: firewalld 5 | state: reloaded 6 | -------------------------------------------------------------------------------- /ansible/roles/git-mirrors/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - nginx 4 | -------------------------------------------------------------------------------- /ansible/roles/git-mirrors/templates/cgitrc.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | # See cgitrc(5) for details 4 | 5 | # Operational config 6 | cache-root=/var/cache/cgit 7 | 8 | # Web config 9 | css=/cgit.css 10 | logo={{ git_mirrors_cgit_logo }} 11 | favicon={{ git_mirrors_cgit_logo }} 12 | virtual-root=/ 13 | root-title={{ git_mirrors_cgit_title }} 14 | root-desc={{ git_mirrors_cgit_description }} 15 | 16 | # Filters config 17 | about-filter=/usr/lib/cgit/filters/about-formatting.sh 18 | source-filter=/usr/lib/cgit/filters/syntax-highlighting.py 19 | email-filter=lua:/usr/lib/cgit/filters/email-gravatar.lua 20 | 21 | # Design options 22 | enable-commit-graph=1 23 | enable-log-linecount=1 24 | enable-blame=1 25 | enable-follow-links=1 26 | enable-index-owner=0 27 | enable-subject-links=1 28 | max-stats=year 29 | 30 | # Content options 31 | readme=:README.md 32 | readme=:README.rst 33 | readme=:README.man 34 | readme=:README.txt 35 | 36 | # Repositories 37 | section-from-path=1 38 | scan-path={{ git_mirrors_base_dir }}/mirrored 39 | repository-sort=age 40 | -------------------------------------------------------------------------------- /ansible/roles/git-mirrors/templates/nginx-site.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | server_name {{ git_mirrors_nginx_domain }}; 3 | 4 | listen 443 ssl http2; 5 | listen [::]:443 ssl http2; 6 | 7 | ssl_certificate {{ git_mirrors_nginx_cert_file }}; 8 | ssl_certificate_key {{ git_mirrors_nginx_cert_key }}; 9 | 10 | access_log /var/log/nginx/cgit-access.log; 11 | error_log /var/log/nginx/cgit-error.log; 12 | 13 | root /usr/share/cgit; 14 | try_files $uri @cgit; 15 | 16 | location @cgit { 17 | include fastcgi_params; 18 | fastcgi_param SCRIPT_FILENAME /usr/lib/cgit/cgit.cgi; 19 | fastcgi_pass unix:/run/fcgiwrap.socket; 20 | 21 | fastcgi_param PATH_INFO $uri; 22 | fastcgi_param QUERY_STRING $args; 23 | fastcgi_param HTTP_HOST $server_name; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /ansible/roles/git-mirrors/templates/update-mirrors.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -u 4 | 5 | # Base location of all mirrors 6 | MIRRORS_BASE_DIR="{{ git_mirrors_base_dir }}/mirrored" 7 | 8 | # Locate repositories knowing that there will be a HEAD file inside them 9 | FOUND_REPOS=$(find "$MIRRORS_BASE_DIR" -name "HEAD" -print0 | xargs -0 dirname) 10 | 11 | for repo in $FOUND_REPOS; do 12 | cd "$repo"; 13 | echo "Updating $repo mirror..." 14 | if ! nice git fetch -q --prune; then 15 | echo "Error: Failed to update repository $repo" 16 | exit 1 17 | fi 18 | echo "Updated repository." 19 | 20 | cd - 21 | done 22 | -------------------------------------------------------------------------------- /ansible/roles/jitsi/README.md: -------------------------------------------------------------------------------- 1 | # Role "jitsi" 2 | 3 | Install Jitsi Meet on target hosts. 4 | -------------------------------------------------------------------------------- /ansible/roles/jitsi/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart the Jitsi video bridge service 3 | service: 4 | name: jitsi-videobridge2 5 | state: restarted 6 | 7 | - name: Restart the Jitsi jicofo service 8 | service: 9 | name: jicofo 10 | state: restarted 11 | 12 | - name: Restart the Jitsi prosody service 13 | service: 14 | name: prosody 15 | state: restarted 16 | -------------------------------------------------------------------------------- /ansible/roles/jitsi/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - nftables 4 | - nginx 5 | - sasl 6 | -------------------------------------------------------------------------------- /ansible/roles/jitsi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install jitsi 3 | include_tasks: install.yml 4 | 5 | - name: Enable LDAP auth for Jitsi 6 | include_tasks: ldap_auth.yml 7 | -------------------------------------------------------------------------------- /ansible/roles/jitsi/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | jitsi_debconf_questions: 5 | - name: 'jitsi-meet-web-config' 6 | question: 'jitsi-meet/cert-choice' 7 | value: 'I want to use my own certificate' 8 | vtype: 'select' 9 | 10 | - name: 'jitsi-meet-web-config' 11 | question: 'jitsi-meet/cert-path-crt' 12 | value: '/etc/letsencrypt/live/pydis.wtf/fullchain.pem' 13 | vtype: 'string' 14 | 15 | - name: 'jitsi-meet-web-config' 16 | question: 'jitsi-meet/cert-path-key' 17 | value: '/etc/letsencrypt/live/pydis.wtf/privkey.pem' 18 | vtype: 'string' 19 | 20 | - name: 'jitsi-meet-web-config' 21 | question: 'jitsi-meet/jaas-choice' 22 | value: 'false' 23 | vtype: 'boolean' 24 | 25 | 26 | - name: 'jitsi-videobridge2' 27 | question: 'jitsi-videobridge/jvb-hostname' 28 | value: 'jitsi.pydis.wtf' 29 | vtype: 'string' 30 | -------------------------------------------------------------------------------- /ansible/roles/ldap/README.md: -------------------------------------------------------------------------------- 1 | # LDAP 2 | 3 | This role prepares the environment for FreeIPA to be installed on our Rocky 4 | Linux-based LDAP host. 5 | 6 | Note that the actual installation process and subsequent setup steps from 7 | `ipa-server-install` must unfortunately be performed manually, as the automation 8 | of this process is not something that we have deemed critical to automate at 9 | this stage. 10 | -------------------------------------------------------------------------------- /ansible/roles/ldap/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - firewalld 4 | -------------------------------------------------------------------------------- /ansible/roles/ldap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install IPA server packages 3 | package: 4 | name: 5 | - ipa-server 6 | state: present 7 | tags: 8 | - role::ldap 9 | 10 | - name: Create firewall rules for FreeIPA 11 | ansible.posix.firewalld: 12 | service: "{{ item }}" 13 | permanent: true 14 | state: enabled 15 | loop: 16 | - http 17 | - https 18 | - dns 19 | - ntp 20 | - freeipa-ldap 21 | - freeipa-ldaps 22 | notify: 23 | - Reload the firewall 24 | tags: 25 | - role::ldap 26 | -------------------------------------------------------------------------------- /ansible/roles/munin-node/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | munin_node__plugin_packages: "{{ ('databases' in group_names) | ternary(['libdbd-pg-perl'], []) }}" 3 | munin_node__plugins: [] 4 | -------------------------------------------------------------------------------- /ansible/roles/munin-node/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart munin-node service 3 | service: 4 | name: munin-node 5 | state: restarted 6 | tags: 7 | - role::munin-node 8 | -------------------------------------------------------------------------------- /ansible/roles/munin-node/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - firewalld 4 | -------------------------------------------------------------------------------- /ansible/roles/munin-node/templates/plugin.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [dovecot_maildirs] 4 | user root 5 | group vmail 6 | 7 | [load] 8 | env.load_warning {{ ansible_processor_nproc * 0.7 }} 9 | env.load_critical {{ ansible_processor_nproc * 0.85 }} 10 | 11 | [memory] 12 | env.apps_warning 70% 13 | env.apps_critical 90% 14 | env.swap_warning 60% 15 | env.swap_critical 80% 16 | -------------------------------------------------------------------------------- /ansible/roles/munin-node/templates/plugins/ldap01/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/ansible/roles/munin-node/templates/plugins/ldap01/.gitkeep -------------------------------------------------------------------------------- /ansible/roles/munin/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install munin packages 3 | package: 4 | name: 5 | - munin 6 | - munin-node 7 | - spawn-fcgi 8 | # Plugin packages 9 | - libdbd-pg-perl 10 | state: present 11 | tags: 12 | - role::munin 13 | 14 | - name: Template munin configuration file 15 | template: 16 | src: munin.conf.j2 17 | dest: /etc/munin/munin.conf 18 | owner: root 19 | group: root 20 | mode: "0444" 21 | tags: 22 | - role::munin 23 | -------------------------------------------------------------------------------- /ansible/roles/neomutt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install neomutt 3 | package: 4 | name: neomutt 5 | state: present 6 | tags: 7 | - role::neomutt 8 | 9 | - name: Configure neomutt folder to standard destination 10 | copy: 11 | content: | 12 | # Ansible managed 13 | 14 | # neomutt will use ~/Mail by default, which with our mailserver 15 | # being backed by Dovecot we do not support. Hence, we configure 16 | # IMAP here to allow mail to be read by users logged in via SSH. 17 | 18 | set spoolfile="imaps://mail.pydis.wtf/" 19 | set imap_user="$USER" 20 | 21 | set folder = $spoolfile 22 | set postponed = "+Drafts" 23 | set record = "+Sent" 24 | set trash = "+Trash" 25 | 26 | mailboxes $spoolfile +Archive $postponed $record +Junk $trash 27 | 28 | owner: root 29 | group: root 30 | mode: "0o444" 31 | dest: /etc/neomuttrc.d/pydis.rc 32 | tags: 33 | - role::neomutt 34 | -------------------------------------------------------------------------------- /ansible/roles/nginx-cloudflare-mtls/README.md: -------------------------------------------------------------------------------- 1 | # Role "nginx-cloudflare-mtls" 2 | 3 | Installs the certificate required for performing mutual TLS authentication 4 | between NGINX and Cloudflare. 5 | 6 | To use mutual TLS in your NGINX virtual hosts, add this configuration snippet: 7 | 8 | ```nginx 9 | ssl_client_certificate {{ nginx_cloudflare_mtls_certificate_path }}; 10 | ssl_verify_client on; 11 | ``` 12 | 13 | 14 | ## Variables 15 | 16 | See [role defaults](./defaults/main.yml) for an annotated overview. 17 | -------------------------------------------------------------------------------- /ansible/roles/nginx-cloudflare-mtls/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # The path at which to install the certificate. 3 | nginx_cloudflare_mtls_certificate_path: /etc/nginx/certs/cloudflare.crt 4 | -------------------------------------------------------------------------------- /ansible/roles/nginx-cloudflare-mtls/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: nginx 4 | -------------------------------------------------------------------------------- /ansible/roles/nginx-cloudflare-mtls/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create nginx certificates directory 3 | file: 4 | path: /etc/nginx/certs 5 | state: directory 6 | owner: root 7 | group: root 8 | mode: "0444" 9 | tags: 10 | - role::nginx-cloudflare-mtls 11 | 12 | - name: Copy the cloudflare mutual TLS certificate 13 | copy: 14 | src: cloudflare.crt 15 | dest: /etc/nginx/certs/cloudflare.crt 16 | owner: root 17 | group: root 18 | mode: "0444" 19 | tags: 20 | - role::nginx-cloudflare-mtls 21 | -------------------------------------------------------------------------------- /ansible/roles/nginx-geoip/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: nginx 4 | -------------------------------------------------------------------------------- /ansible/roles/nginx-geoip/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure the geoip module 3 | copy: 4 | # ref https://nginx.org/en/docs/http/ngx_http_geoip_module.html 5 | content: geoip_country /usr/share/GeoIP/GeoIP.dat; 6 | dest: /etc/nginx/conf.d/geoip.conf 7 | owner: root 8 | group: root 9 | mode: "0444" 10 | tags: 11 | - role::nginx-geoip 12 | notify: 13 | - Reload the nginx service 14 | -------------------------------------------------------------------------------- /ansible/roles/nginx/README.md: -------------------------------------------------------------------------------- 1 | # Role "nginx" 2 | 3 | Installs nginx on target hosts and provides a handler for reloading nginx, for instance on configuration change. 4 | -------------------------------------------------------------------------------- /ansible/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload the nginx service 3 | service: 4 | name: nginx 5 | state: reloaded 6 | tags: 7 | - role::nginx 8 | -------------------------------------------------------------------------------- /ansible/roles/nginx/templates/default_server.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | server { 3 | listen 80 default_server; 4 | 5 | server_name _; 6 | 7 | return 301 https://$host$request_uri; 8 | } 9 | 10 | server { 11 | listen 443 ssl http2 default_server; 12 | 13 | ssl_certificate /etc/letsencrypt/live/{{ nginx_default_cert_name }}/fullchain.pem; 14 | ssl_certificate_key /etc/letsencrypt/live/{{ nginx_default_cert_name }}/privkey.pem; 15 | 16 | location / { 17 | set_by_lua_block $url { 18 | local urls = { 19 | "https://fasterthanli.me/articles/i-want-off-mr-golangs-wild-ride", 20 | "https://en.wikipedia.org/wiki/Tax_evasion", 21 | "https://jchri.st/blog/apfs-sadness-on-macos-big-sur.html", 22 | "https://cdn.discordapp.com/attachments/675756741417369640/852688961516077086/Screenshot_2021-06-11_at_00.21.22.png", 23 | "https://news.ycombinator.com/", 24 | "https://www.hertfordshire.gov.uk/latest/letchworth-webcam.jpg" 25 | } 26 | return urls [ math.random(#urls) ] 27 | } 28 | 29 | return 302 $url; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /ansible/roles/nginx/templates/nginx-conf.d/charset.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | charset utf-8; 3 | -------------------------------------------------------------------------------- /ansible/roles/opendkim/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload OpenDKIM 3 | service: 4 | name: opendkim 5 | state: reloaded 6 | -------------------------------------------------------------------------------- /ansible/roles/opendkim/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | opendkim_domains: 3 | - pydis.wtf 4 | - pydis.com 5 | - pythondiscord.com 6 | - owlcorp.uk 7 | opendkim_extra_signings: 8 | - domain: int.pydis.wtf 9 | use_key: pydis.wtf 10 | opendkim_selector: lovelace 11 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc-inbox/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Compile DMARC Sieve script 3 | become: true 4 | become_user: "{{ opendmarc_inbox_username }}" 5 | command: "sievec /var/vmail/{{ opendmarc_inbox_username }}/.dovecot.sieve" 6 | changed_when: true 7 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc-inbox/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create DMARC user sieve directory 3 | file: 4 | state: directory 5 | path: "/var/vmail/{{ opendmarc_inbox_username }}/sieve" 6 | owner: "{{ opendmarc_inbox_username }}" 7 | group: "{{ opendmarc_inbox_username }}" 8 | mode: "0700" 9 | tags: 10 | - role::opendmarc-inbox 11 | 12 | - name: Template DMARC filing Sieve script 13 | template: 14 | src: dmarc.sieve.j2 15 | dest: "/var/vmail/{{ opendmarc_inbox_username }}/sieve/dmarc.sieve" 16 | owner: "{{ opendmarc_inbox_username }}" 17 | group: "{{ opendmarc_inbox_username }}" 18 | mode: "0400" 19 | tags: 20 | - role::opendmarc-inbox 21 | notify: 22 | - Compile DMARC Sieve script 23 | 24 | - name: Activate DMARC processing Sieve filter 25 | file: 26 | state: link 27 | src: "/var/vmail/{{ opendmarc_inbox_username }}/sieve/dmarc.sieve" 28 | path: "/var/vmail/{{ opendmarc_inbox_username }}/.dovecot.sieve" 29 | tags: 30 | - role::opendmarc-inbox 31 | notify: 32 | - Compile DMARC Sieve script 33 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc-inbox/templates/dmarc.sieve.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | require ["fileinto", "envelope", "subaddress", "mailbox"]; 4 | 5 | if envelope :detail "to" "reports" { 6 | fileinto :create "Reports.New"; 7 | } 8 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc-inbox/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | opendmarc_inbox_username: "dmarc" 3 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart OpenDMARC 3 | service: 4 | name: opendmarc 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install OpenDMARC 3 | package: 4 | name: 5 | - opendmarc 6 | state: present 7 | tags: 8 | - role::opendmarc 9 | 10 | - name: Create spool location for OpenDMARC aggregation 11 | file: 12 | state: directory 13 | path: /var/spool/opendmarc 14 | owner: opendmarc 15 | group: opendmarc 16 | mode: "0700" 17 | tags: 18 | - role::opendmarc 19 | 20 | - name: Template OpenDMARC configuration 21 | template: 22 | src: opendmarc.conf.j2 23 | dest: /etc/opendmarc.conf 24 | mode: "0644" 25 | owner: root 26 | group: root 27 | tags: 28 | - role::opendmarc 29 | notify: 30 | - Restart OpenDMARC 31 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc/templates/opendmarc.conf.j2: -------------------------------------------------------------------------------- 1 | # Server name to identify to others with in generated reports 2 | AuthservID {{ opendmarc_authserv_id }} 3 | 4 | # Copy failed messages to this address for investigation 5 | CopyFailuresTo {{ opendmarc_failures }} 6 | 7 | # Generate failure reports 8 | FailureReports true 9 | FailureReportsBcc {{ opendmarc_failure_reports }} 10 | FailureReportsOnNone true 11 | FailureReportsSentBy {{ opendmarc_failure_reports_from }} 12 | 13 | HistoryFile /var/spool/opendmarc/opendmarc.dat 14 | IgnoreAuthenticatedClients true 15 | 16 | # If needed in future 17 | # IgnoreHosts /etc/opendmarc/ignore.hosts 18 | 19 | RejectFailures true 20 | HoldQuarantinedMessages true 21 | 22 | ReportCommand /usr/sbin/sendmail -t 23 | RequiredHeaders true 24 | Socket inet:8893@localhost 25 | SoftwareHeader true 26 | SPFIgnoreResults false 27 | TrustedAuthservIDs {{ opendmarc_authserv_id }} 28 | PidFile /run/opendmarc/opendmarc.pid 29 | 30 | PublicSuffixList /usr/share/publicsuffix/public_suffix_list.dat 31 | 32 | UMask 0002 33 | UserID opendmarc 34 | -------------------------------------------------------------------------------- /ansible/roles/opendmarc/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | opendmarc_failures: "dmarc+failures@int.pydis.wtf" 3 | opendmarc_failure_reports: "dmarc+failurereports@int.pydis.wtf" 4 | opendmarc_failure_reports_from: "dmarc+noreply@int.pydis.wtf" 5 | opendmarc_authserv_id: "mail.pydis.wtf" 6 | -------------------------------------------------------------------------------- /ansible/roles/podman/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install podman 3 | package: 4 | name: podman 5 | state: present 6 | tags: 7 | - role::podman 8 | -------------------------------------------------------------------------------- /ansible/roles/postfix/files/policyd-spf.conf: -------------------------------------------------------------------------------- 1 | # Ansible Managed 2 | 3 | debugLevel = 1 4 | TestOnly = 1 5 | 6 | HELO_reject = Fail 7 | Mail_From_reject = Fail 8 | 9 | PermError_reject = False 10 | TempError_Defer = False 11 | 12 | skip_addresses = 127.0.0.0/8,::ffff:127.0.0.0/104,::1 13 | -------------------------------------------------------------------------------- /ansible/roles/postfix/files/sender_access: -------------------------------------------------------------------------------- 1 | # Managed by Ansible 2 | 3 | emails.buzzfeed.com REJECT Sorry, we're not feeling it today. 4 | flippa.com REJECT Please try sending this mail to a brick wall. 5 | email.crosswalk.com REJECT I'm starting to think your email is a personal attack on my sanity. Please stop. 6 | -------------------------------------------------------------------------------- /ansible/roles/postfix/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload postfix 3 | service: 4 | name: postfix 5 | state: reloaded 6 | 7 | - name: Restart postfix 8 | service: 9 | name: postfix 10 | state: restarted 11 | 12 | - name: Regenerate virtual table 13 | command: postmap /etc/postfix/virtual 14 | changed_when: true 15 | 16 | - name: Regenerate transport table 17 | command: postmap /etc/postfix/transport 18 | changed_when: true 19 | 20 | - name: Regenerate sender access table 21 | command: postmap /etc/postfix/sender_access 22 | changed_when: true 23 | 24 | - name: Restart postsrsd 25 | service: 26 | name: postsrsd 27 | state: restarted 28 | -------------------------------------------------------------------------------- /ansible/roles/postfix/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | # Mail client 4 | - neomutt 5 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/ldap/ldap-group-aliases.cf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | server_host = {{ postfix_bind_server }} 4 | bind = yes 5 | bind_dn = {{ postfix_bind_user }} 6 | bind_pw = {{ postfix_bind_password }} 7 | search_base = cn=groups,cn=accounts,dc=box,dc=pydis,dc=wtf 8 | query_filter = (cn=%u) 9 | domain = {{ postfix_destination_domains | join(', ') }} 10 | special_result_attribute = member 11 | result_attribute = mail 12 | version = 3 13 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/ldap/ldap-registeredaddress.cf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | server_host = {{ postfix_bind_server }} 4 | bind = yes 5 | bind_dn = {{ postfix_bind_user }} 6 | bind_pw = {{ postfix_bind_password }} 7 | search_base = {{ postfix_bind_search_base }} 8 | query_filter = (mail=%u@{{ postfix_mailname }}) 9 | domain = {{ postfix_destination_domains | join(', ') }} 10 | result_attribute = registeredAddress 11 | result_format = %s 12 | version = 3 13 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/ldap/ldap-relay-recipients.cf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | server_host = {{ postfix_bind_server }} 4 | bind = yes 5 | bind_dn = {{ postfix_bind_user }} 6 | bind_pw = {{ postfix_bind_password }} 7 | search_base = {{ postfix_bind_search_base }} 8 | query_filter = (mail=%u@{{ postfix_mailname }}) 9 | domain = {{ postfix_destination_domains | join(', ') }} 10 | result_attribute = mail 11 | result_format = %s 12 | version = 3 13 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/ldap/ldap-service-mail.cf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | server_host = {{ postfix_bind_server }} 4 | bind = yes 5 | bind_dn = {{ postfix_bind_user }} 6 | bind_pw = {{ postfix_bind_password }} 7 | search_base = {{ postfix_bind_search_base }} 8 | query_filter = (&(mail=%u@int.pydis.wtf)(mail=%s)) 9 | domain = {{ postfix_destination_domains | join(', ') }} 10 | result_attribute = uid 11 | result_format = %s@localhost 12 | version = 3 13 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/ldap/ldap-uid.cf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | server_host = {{ postfix_bind_server }} 4 | bind = yes 5 | bind_dn = {{ postfix_bind_user }} 6 | bind_pw = {{ postfix_bind_password }} 7 | search_base = {{ postfix_bind_search_base }} 8 | query_filter = (mail=%u@{{ postfix_mailname }}) 9 | domain = {{ postfix_destination_domains | join(', ') }} 10 | result_attribute = uid 11 | result_format = %s@localhost 12 | version = 3 13 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/sender-canonical-maps.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | {% for domain in postfix_destination_domains %} 4 | /^(.+@{{ domain | replace(".", "\\.") }})$/ ${1} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/transport.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # 3 | # Postfix Mail Transport Map 4 | 5 | fortune@int.pydis.wtf fortune-pipe: 6 | 7 | fredrick@pydis.wtf fredrick-pipe: 8 | -------------------------------------------------------------------------------- /ansible/roles/postfix/templates/virtual.j2: -------------------------------------------------------------------------------- 1 | # DevOps Aliases 2 | postmaster {{ postfix_postmaster_email }} 3 | root {{ postfix_postmaster_email }} 4 | webmaster {{ postfix_postmaster_email }} 5 | abuse {{ postfix_postmaster_email }} 6 | ops {{ postfix_postmaster_email }} 7 | 8 | # Events Emails 9 | merch-support events 10 | streamyard events,joe 11 | 12 | # Director Emails 13 | owners directors 14 | patreon directors 15 | donations directors 16 | twitter directors 17 | 18 | # Admins Emails 19 | admins administrators 20 | staff administrators 21 | 22 | # DevOps Admins Emails 23 | cloudflare joe,cj 24 | netcup joe,cj 25 | linode joe,cj 26 | devops-alerts jc,joe 27 | 28 | # Admin Domain Email 29 | conferences stelercus 30 | outreach keiththeee 31 | -------------------------------------------------------------------------------- /ansible/roles/postfix/vars/main/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postfix_mailname: "pydis.wtf" 3 | 4 | postfix_bind_user: "uid=postfix,cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf" 5 | postfix_bind_password: "{{ vault_postfix_bind_password }}" 6 | postfix_bind_server: "ldaps://ldap01.box.pydis.wtf:636" 7 | postfix_bind_search_base: "cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf" 8 | 9 | postfix_postmaster_email: "devops@pydis.wtf" 10 | 11 | postfix_tls_cert: /etc/letsencrypt/live/pydis.wtf/fullchain.pem 12 | postfix_tls_key: /etc/letsencrypt/live/pydis.wtf/privkey.pem 13 | 14 | postfix_mailserver_name: "mail.pydis.wtf" 15 | postfix_destination_domains: 16 | - pydis.wtf 17 | - int.pydis.wtf 18 | - pydis.com 19 | - pythondiscord.com 20 | - owlcorp.uk 21 | 22 | postfix_fredrick_generator_token: "{{ vault_postfix_fredrick_generator_token }}" 23 | -------------------------------------------------------------------------------- /ansible/roles/postfix/vars/main/vault.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 36633262313030356566383062626664306635646566336232333063363433353938636337333431 3 | 3530313130643237333131616233393132303761393065620a666361316130303439303133313335 4 | 61373034303638666239656134336631306131386334393664613163396639363934313333343366 5 | 3261626332313236630a383464656436386232386639323961653265376333396532643630346332 6 | 64363733306232633731303565393161386639393437613366653964363662653162306137666462 7 | 35383331666431383664363634646565323266633463366362663934613639386264343831353533 8 | 66616130353463653031663438383537383562636139623336336361363966386637656137626561 9 | 64646462633063396266346431363233613436656437373130373933363564316639386535343030 10 | 38643936643037313038626238343466323534356563306232343762313139373730663734343539 11 | 6566323663333130343431643365653232336134663530376434 12 | -------------------------------------------------------------------------------- /ansible/roles/postgres/files/ident.conf: -------------------------------------------------------------------------------- 1 | # Managed by Ansible 2 | 3 | # MAP NAME MATCH VALUE DATABASE USERNAME 4 | mtls_cn_map /^(\w+)\.postgres\.tls\.pydis\.wtf$ \1 5 | -------------------------------------------------------------------------------- /ansible/roles/postgres/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart the postgres service 2 | service: 3 | name: '{{ postgres_daemon }}' 4 | state: "restarted" 5 | tags: 6 | - role::postgres 7 | 8 | - name: Reload the postgres service 9 | service: 10 | name: '{{ postgres_daemon }}' 11 | state: reloaded 12 | tags: 13 | - role::postgres 14 | -------------------------------------------------------------------------------- /ansible/roles/postgres/tasks/pg_repack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install pg_repack 3 | apt: 4 | name: 5 | - postgresql-{{ postgres_version }}-repack 6 | state: present 7 | tags: 8 | - role::postgres 9 | - role::postgres-pg_repack 10 | 11 | - name: Add pg_repack to all databases 12 | become: true 13 | become_user: "{{ postgres_user }}" 14 | community.postgresql.postgresql_ext: 15 | db: "{{ item.name }}" 16 | name: pg_repack 17 | state: present 18 | loop: "{{ postgres_databases }}" 19 | tags: 20 | - role::postgres 21 | - role::postgres-pg_repack 22 | 23 | - name: Add cronjob for running pg_repack on all databases 24 | cron: 25 | name: "Run pg_repack on all databases" 26 | special_time: "weekly" 27 | job: "pg_repack -a" 28 | user: "{{ postgres_user }}" 29 | cron_file: "{{ postgres_pg_repack_cron_filename }}" 30 | tags: 31 | - role::postgres 32 | - role::postgres-pg_repack 33 | -------------------------------------------------------------------------------- /ansible/roles/prometheus-blackbox-exporter/README.md: -------------------------------------------------------------------------------- 1 | # Role "prometheus-blackbox-exporter" 2 | 3 | Installs `prometheus-blackbox-exporter` on target hosts. 4 | -------------------------------------------------------------------------------- /ansible/roles/prometheus-blackbox-exporter/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install prometheus-blackbox-exporter 3 | package: 4 | name: prometheus-blackbox-exporter 5 | state: present 6 | tags: 7 | - role::prometheus-blackbox-exporter 8 | -------------------------------------------------------------------------------- /ansible/roles/prometheus-node-exporter/README.md: -------------------------------------------------------------------------------- 1 | # Role "prometheus-node-exporter" 2 | 3 | Installs prometheus-node-exporter on target hosts. 4 | -------------------------------------------------------------------------------- /ansible/roles/prometheus-node-exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install prometheus-node-exporter 3 | package: 4 | name: prometheus-node-exporter 5 | state: present 6 | tags: 7 | - role::prometheus-node-exporter 8 | -------------------------------------------------------------------------------- /ansible/roles/prometheus-postfix-exporter/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - postfix 4 | -------------------------------------------------------------------------------- /ansible/roles/prometheus-postfix-exporter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install prometheus-postfix-exporter 3 | package: 4 | name: prometheus-postfix-exporter 5 | state: present 6 | tags: 7 | - role::prometheus-postfix-exporter 8 | -------------------------------------------------------------------------------- /ansible/roles/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Role "prometheus" 2 | 3 | Installs and configured Prometheus on target servers. 4 | 5 | 6 | ## Variables 7 | 8 | - `prometheus_cmdline_options` configures arguments to be added 9 | to the prometheus command line, and changing it will result in 10 | a restart. 11 | 12 | - `prometheus_configuration` is the prometheus configuration, serialized to 13 | YAML by Ansible. If unset, the default Prometheus configuration is used. 14 | -------------------------------------------------------------------------------- /ansible/roles/prometheus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | prometheus_version: "2.52.0" 3 | prometheus_release_name: prometheus-{{ prometheus_version }}.linux-amd64 4 | prometheus_release_url: https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/{{ prometheus_release_name }}.tar.gz 5 | prometheus_release_sha256sum_url: https://github.com/prometheus/prometheus/releases/download/v{{ prometheus_version }}/sha256sums.txt 6 | prometheus_base_directory: /opt/prometheus 7 | prometheus_installation_directory: "{{ prometheus_base_directory }}/versions" 8 | prometheus_tarball_name: "{{ prometheus_release_url | basename }}" 9 | prometheus_base_release_dest: "{{ prometheus_base_directory }}/sources" 10 | prometheus_release_tarball_dest: "{{ prometheus_base_release_dest }}/{{ prometheus_tarball_name }}" 11 | 12 | # web assets? 13 | # prometheus_cmdline_options: "--config.file=/etc/prometheus/prometheus.yml --web.page-title='Python Discord Helper Monitoring And Supervision Service'" 14 | -------------------------------------------------------------------------------- /ansible/roles/prometheus/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload the prometheus service 3 | service: 4 | name: prometheus 5 | state: reloaded 6 | tags: 7 | - role::prometheus 8 | 9 | - name: Restart the prometheus service 10 | service: 11 | name: prometheus 12 | state: restarted 13 | tags: 14 | - role::prometheus 15 | -------------------------------------------------------------------------------- /ansible/roles/prometheus/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - systemd 4 | -------------------------------------------------------------------------------- /ansible/roles/pydis-mtls/README.md: -------------------------------------------------------------------------------- 1 | # Role "pydis-mtls" 2 | 3 | This role adds a copy of the Python Discord Root CA used for mutual TLS 4 | authentication to a specified location on all hosts. 5 | 6 | Services that need mutual TLS support should validate any incoming request 7 | against this client certificate, the default provided with this role will always 8 | be a subdomain of `tls.pydis.wtf` and the CN can be used for further 9 | authorization validation. 10 | 11 | ## Variables 12 | 13 | `pydis_mtls_certificate`: The CA Certificate contents to be copied to the host. 14 | The default should be fine here and is the current production CA. 15 | 16 | `pydis_mtls_location`: The location to copy the CA file to, defaults to 17 | `/opt/pydis/ca.pem`. 18 | -------------------------------------------------------------------------------- /ansible/roles/pydis-mtls/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure directory to store mTLS CA in is created 2 | file: 3 | path: '{{ pydis_mtls_location | dirname }}' 4 | state: directory 5 | mode: '0755' 6 | tags: 7 | - role::pydis-mtls 8 | 9 | - name: Copy mTLS Certificate Authority to host 10 | copy: 11 | content: '{{ pydis_mtls_certificate }}' 12 | dest: '{{ pydis_mtls_location }}' 13 | owner: root 14 | group: root 15 | mode: '0744' 16 | tags: 17 | - role::pydis-mtls 18 | -------------------------------------------------------------------------------- /ansible/roles/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.posix 4 | version: 2.0.0 5 | - name: community.crypto 6 | version: 2.26.1 7 | - name: community.general 8 | version: 10.6.0 9 | - name: community.postgresql 10 | version: 3.14.0 11 | 12 | roles: 13 | - name: dmarc_metrics_exporter 14 | src: https://github.com/jgosmann/dmarc-metrics-exporter.git 15 | -------------------------------------------------------------------------------- /ansible/roles/rrdstats/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rrdstats_script_path: /opt/pydis/generate-rrdtool-stats.sh 3 | rrdstats_env_path: /etc/opt/pydis-rrdstats.env 4 | 5 | rrdstats_pg_username: rrdtool 6 | rrdstats_pg_password: "{{ vault_postgres_user_passwords[rrdstats_pg_username] }}" 7 | -------------------------------------------------------------------------------- /ansible/roles/rrdstats/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart the rrdstats timer 3 | ansible.builtin.service: 4 | name: generate-rrdtool-stats.timer 5 | state: restarted 6 | tags: 7 | - role::rrdstats 8 | -------------------------------------------------------------------------------- /ansible/roles/rrdstats/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - postgres 4 | - systemd 5 | -------------------------------------------------------------------------------- /ansible/roles/rrdstats/templates/generate-rrdtool-stats.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description = Generate Python Discord statistics via rrdtool 4 | After = postgresql.service 5 | Requires = postgresql.service 6 | 7 | [Service] 8 | ExecStart = {{ rrdstats_script_path }} 9 | DynamicUser = true 10 | StateDirectory = pydis-rrdstats 11 | User = pydis-rrdstats 12 | EnvironmentFile = {{ rrdstats_env_path }} 13 | 14 | # Sandboxing 15 | NoNewPrivileges = true 16 | 17 | # Resource control 18 | # Most of the resource expenditure of this unit will be dished out in the psql connection. 19 | # The rest here is just to ensure it doesn't impede the server's stability. 20 | Nice = 10 21 | CPUQuota = 10% 22 | MemoryMax = 100M 23 | TasksMax = 20 24 | 25 | # vim: ft=dosini.jinja2: 26 | -------------------------------------------------------------------------------- /ansible/roles/rrdstats/templates/generate-rrdtool-stats.timer.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | [Unit] 4 | Description = Generate rrdtool stats minutely 5 | 6 | [Timer] 7 | OnCalendar = minutely 8 | 9 | [Install] 10 | WantedBy = timers.target 11 | 12 | # vim: ft=dosini.j2: 13 | -------------------------------------------------------------------------------- /ansible/roles/sasl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sasl_mux_path: /var/run/saslauthd 3 | -------------------------------------------------------------------------------- /ansible/roles/sasl/files/user.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | User=sasl 3 | Group=sasl 4 | -------------------------------------------------------------------------------- /ansible/roles/sasl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload SASL 3 | service: 4 | name: saslauthd 5 | state: reloaded 6 | 7 | - name: Restart SASL 8 | service: 9 | name: saslauthd 10 | state: restarted 11 | -------------------------------------------------------------------------------- /ansible/roles/sasl/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | # Mail server 4 | - postfix 5 | -------------------------------------------------------------------------------- /ansible/roles/sasl/templates/saslauthd.conf.j2: -------------------------------------------------------------------------------- 1 | ldap_servers: {{ sasl_ldap_server }} 2 | ldap_search_base: {{ sasl_ldap_search_base }} 3 | ldap_bind_dn: {{ sasl_ldap_bind_dn }} 4 | ldap_bind_pw: {{ sasl_ldap_bind_pw }} 5 | ldap_filter: (&(uid=%u)(!(nsAccountLock=true))) 6 | -------------------------------------------------------------------------------- /ansible/roles/sasl/vars/main/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sasl_ldap_server: "ldaps://ldap01.box.pydis.wtf" 3 | sasl_ldap_bind_dn: "uid=postfix,cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf" 4 | sasl_ldap_bind_pw: "{{ vault_sasl_ldap_bind_pw }}" 5 | sasl_ldap_search_base: "cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf" 6 | -------------------------------------------------------------------------------- /ansible/roles/sasl/vars/main/vault.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 37316631656330303862393230316432376565656430623863343439376436363966323562663163 3 | 3164666663376361316363633366393732326530366165320a333862393032383364303034333666 4 | 38363538643937303863346166313939646130303263313238386465363763623362383366316331 5 | 3163316333366665360a636333633732643836376539373264393738393265633037323266643164 6 | 31306535333436333739343364653839306566663733383436653164306237323438363039393635 7 | 64613163363931323230623738336366633632663738663665363436343237343038313139656437 8 | 626638663962643465613234653639623263 9 | -------------------------------------------------------------------------------- /ansible/roles/spamassassin/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart spamd 3 | service: 4 | name: spamd 5 | state: restarted 6 | 7 | - name: Reload spamd 8 | service: 9 | name: spamd 10 | state: reloaded 11 | -------------------------------------------------------------------------------- /ansible/roles/spamassassin/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | spamassassin_contact_email: "devops@pydis.wtf" 3 | -------------------------------------------------------------------------------- /ansible/roles/ssh/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload ssh 3 | service: 4 | name: "{{ (ansible_distribution == 'Rocky') | ternary('sshd', 'ssh') }}" 5 | state: reloaded 6 | tags: 7 | - role::ssh 8 | -------------------------------------------------------------------------------- /ansible/roles/systemd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload the systemd daemon 3 | systemd: 4 | daemon_reload: true 5 | tags: 6 | - role::systemd 7 | -------------------------------------------------------------------------------- /ansible/roles/unattended-upgrades/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install unattended-upgrades 3 | package: 4 | name: unattended-upgrades 5 | state: present 6 | tags: 7 | - role::unattended-upgrades 8 | -------------------------------------------------------------------------------- /ansible/roles/wireguard/defaults/main/vars.yml: -------------------------------------------------------------------------------- 1 | wireguard_extra_keys: 2 | - name: Joe 3 | pubkey: /dJ+tKXzxv7nrUleNlF+CGyq7OIVlqL8/9Sn8j+cEAc= 4 | subnet: 10.0.1.0/24 5 | 6 | wireguard_os_packages: 7 | Debian: 8 | - wireguard 9 | - wireguard-tools 10 | - linux-headers-{{ ansible_kernel }} 11 | Rocky: 12 | - wireguard-tools 13 | -------------------------------------------------------------------------------- /ansible/roles/wireguard/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Reload wg-quick 2 | service: 3 | name: wg-quick@wg0 4 | state: reloaded 5 | -------------------------------------------------------------------------------- /ansible/roles/wireguard/templates/wg0.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Interface] 3 | Address = {{ wireguard_subnet }} 4 | ListenPort = {{ wireguard_port }} 5 | PrivateKey = {{ wg_priv_key['content'] | b64decode | trim }} 6 | 7 | PostUp = ip route add local {{ wireguard_subnet }} dev eth0 8 | PreDown = ip route del local {{ wireguard_subnet }} dev eth0 9 | 10 | {% for host in hostvars if not host == inventory_hostname %} 11 | # Peer config for: {{ host }} 12 | [Peer] 13 | AllowedIPs = {{ hostvars[host]['wireguard_subnet'] }} 14 | PublicKey = {{ hostvars[host]['wg_pub_key']['content'] | b64decode | trim }} 15 | Endpoint = {{ host }}.box.pydis.wtf:{{ wireguard_port }} 16 | PersistentKeepalive = 30 17 | 18 | {% endfor %} 19 | 20 | {% for key in wireguard_extra_keys %} 21 | # DevOps config for: {{ key.name }} 22 | [Peer] 23 | AllowedIPs = {{ key.subnet }} 24 | PublicKey = {{ key.pubkey }} 25 | 26 | {% endfor %} 27 | -------------------------------------------------------------------------------- /dns/production.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | manager: 3 | plan_outputs: 4 | html: 5 | class: octodns.provider.plan.PlanMarkdown 6 | 7 | providers: 8 | zone_config: 9 | class: octodns.provider.yaml.YamlProvider 10 | directory: dns/zones 11 | default_ttl: 300 12 | enforce_order: true 13 | split_extension: ".zone" 14 | cloudflare: 15 | class: octodns_cloudflare.CloudflareProvider 16 | token: env/CLOUDFLARE_TOKEN 17 | account_id: env/CLOUDFLARE_ACCOUNT_ID 18 | pagerules: false 19 | 20 | zones: 21 | "*": 22 | sources: 23 | - zone_config 24 | targets: 25 | - cloudflare 26 | -------------------------------------------------------------------------------- /dns/zones/pydis.org.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | '': 3 | - octodns: 4 | cloudflare: 5 | auto-ttl: true 6 | ttl: 300 7 | type: ALIAS 8 | value: pythondiscord.com. 9 | - octodns: 10 | cloudflare: 11 | auto-ttl: true 12 | ttl: 300 13 | type: TXT 14 | value: keybase-site-verification=cZ4S0PFxbk_QNaQ8uCQUhgAfODykPWOWpuMrUyBF9AQ 15 | 16 | '*': 17 | octodns: 18 | cloudflare: 19 | auto-ttl: true 20 | ttl: 300 21 | type: CNAME 22 | value: pythondiscord.com. 23 | 24 | _acme-challenge.forms: 25 | octodns: 26 | cloudflare: 27 | auto-ttl: true 28 | ttl: 300 29 | type: TXT 30 | value: vksE5-PftvlUnw71gBe7X1W14IZX-aTxDSwYMlJLkN0 31 | 32 | forms: 33 | octodns: 34 | cloudflare: 35 | auto-ttl: true 36 | proxied: true 37 | ttl: 300 38 | type: CNAME 39 | value: forms-frontend.pages.dev. 40 | -------------------------------------------------------------------------------- /dns/zones/pythondiscord.org.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | '': 3 | - octodns: 4 | cloudflare: 5 | auto-ttl: true 6 | ttl: 300 7 | type: ALIAS 8 | value: pythondiscord.com. 9 | - octodns: 10 | cloudflare: 11 | auto-ttl: true 12 | ttl: 300 13 | type: TXT 14 | values: 15 | - keybase-site-verification=rtJ28Y8bGMHa1qJ_Qk6TAM6ktqebuWDwUtbMFCt_5qg 16 | '*': 17 | octodns: 18 | cloudflare: 19 | auto-ttl: true 20 | ttl: 300 21 | type: CNAME 22 | value: pythondiscord.com. 23 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Project Documentation 2 | 3 | This directory contains the mkdocs configuration and markdown files for the 4 | PyDis DevOps documentation. 5 | 6 | To work on these files, install the docs dependencies with `poetry install 7 | --with docs` in the root folder. 8 | 9 | You can work locally on the site by navigating to the root directory and running 10 | `poetry run task serve-docs` which will start a local server with live 11 | reloading. You can also run `poetry run task build-docs` in the root to build a 12 | local copy of the documentation, which will be placed in the `docs/site` folder 13 | (`site` folder of this directory). 14 | -------------------------------------------------------------------------------- /docs/docs/common-queries/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Find common queries for our monitoring services to help diagnose issues 3 | --- 4 | # Common Queries 5 | 6 | Here you will find common queries for several services within our monitoring and alerting suite to try help diagnose issues and preemptively stop problems arising. 7 | 8 |
9 | 10 | - :simple-kubernetes:{ .lg .middle } __Kubernetes__ 11 | 12 | --- 13 | 14 | Find common troubleshooting tips to run against our Kubernetes cluster. 15 | 16 | [:octicons-arrow-right-24: Kubernetes tips](./kubernetes.md) 17 | 18 | - :simple-grafana:{ .lg .middle } __Loki__ 19 | 20 | --- 21 | 22 | A short guide to LogQL to query logs with Loki. 23 | 24 | [:octicons-arrow-right-24: LogQL queries](./loki.md) 25 | 26 | - :simple-postgresql:{ .lg .middle } __PostgreSQL__ 27 | 28 | --- 29 | 30 | Pre-written queries to debug the state of our PostgreSQL instance. 31 | 32 | [:octicons-arrow-right-24: PostgreSQL queries](./postgresql.md) 33 | 34 |
35 | -------------------------------------------------------------------------------- /docs/docs/common-queries/kubernetes.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Debugging tips for our Kubernetes cluster 3 | --- 4 | # Kubernetes tips 5 | 6 | ## Find top pods by CPU/memory 7 | 8 | ``` bash 9 | $ kubectl top pods --all-namespaces --sort-by='memory' 10 | $ top pods --all-namespaces --sort-by='cpu' 11 | ``` 12 | 13 | ## Find top nodes by CPU/memory 14 | 15 | ``` bash 16 | $ kubectl top nodes --sort-by='cpu' 17 | $ kubectl top nodes --sort-by='memory' 18 | ``` 19 | 20 | ## Kubernetes cheat sheet 21 | 22 | [Open Kubernetes cheat 23 | sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) 24 | 25 | ## Lens IDE 26 | 27 | [OpenLens](https://github.com/MuhammedKalkan/OpenLens) 28 | -------------------------------------------------------------------------------- /docs/docs/common-queries/loki.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: A brief overview of LogQL syntax for querying logs stored in Loki. 3 | --- 4 | 5 | # Loki queries 6 | 7 | ## Find any logs containing "ERROR" 8 | 9 | ``` shell 10 | {job=~"default/.+"} |= "ERROR" 11 | ``` 12 | 13 | ## Find all logs from bot service 14 | 15 | ``` shell 16 | {job="default/bot"} 17 | ``` 18 | 19 | The format is `namespace/object` 20 | 21 | ## Rate of logs from a service 22 | 23 | ``` shell 24 | rate(({job="default/bot"} |= "error" != "timeout")[10s]) 25 | ``` 26 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/.authors.yml: -------------------------------------------------------------------------------- 1 | authors: 2 | joe: 3 | name: Joe Banks 4 | description: DevOps Team Member 5 | avatar: https://github.com/jb3.png 6 | jc: 7 | name: Johannes 8 | description: Caretaker 9 | avatar: https://github.com/pydis-bot.png 10 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/index.md: -------------------------------------------------------------------------------- 1 | # Meeting Notes 2 | 3 | Here you will find notes from our previous DevOps team meetings. 4 | 5 | ## Meetings 6 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2022-04-07.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2022-04-07 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2022-04-07 7 | --- 8 | # 2022-04-07 9 | 10 | ## Agenda 11 | 12 | - No updates, as last week's meeting did not take place 13 | 14 | 15 | 16 | ## Roadmap review & planning 17 | 18 | What are we working on for the next meeting? 19 | 20 | - Help wanted for \#57 (h-asgi) 21 | - \#58 (postgres exporter) needs a new review 22 | - \#54 (firewall in VPN) will be done by Johannes 23 | - We need a testing environment \#67 24 | - Johannes will add a Graphite role \#31 25 | - Sofi will take a look at \#29 26 | - \#41 (policy bot) will be taken care of by Johannes 27 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2022-10-05.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2022-10-05 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2022-10-05 7 | --- 8 | # 2022-10-05 9 | 10 | *Migrated from Notion*. 11 | 12 | ## Agenda 13 | 14 | - Joe Banks configured proper RBAC for Chris, Johannes and Joe himself 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2022-10-26.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2022-10-26 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2022-10-26 7 | --- 8 | # 2022-10-26 9 | 10 | *Migrated from Notion*. 11 | 12 | 13 | 14 | ## Agenda 15 | 16 | - Chris upgraded PostgreSQL to 15 in production 17 | - Johannes added the Kubernetes user creation script into the Kubernetes 18 | repository in the docs 19 | 20 | *(The rest of the meeting was discussion about the NetKube setup, which 21 | has been scrapped since)*. 22 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2022-11-02.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2022-11-02 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2022-11-02 7 | --- 8 | # 2022-11-02 9 | 10 | *Migrated from Notion*. 11 | 12 | ## Agenda 13 | 14 | ### Hanging behaviour of ModMail 15 | 16 | - [Source](https://discord.com/channels/267624335836053506/675756741417369640/1036720683067134052) 17 | - Maybe use [Signals + a 18 | debugger](https://stackoverflow.com/a/25329467)? 19 | - ... using [something like pdb for the 20 | debugger](https://wiki.python.org/moin/PythonDebuggingTools)? 21 | - Or [GDB, as it seems handy to poke at stuck multi-threaded python 22 | software](https://wiki.python.org/moin/DebuggingWithGdb)? 23 | - ModMail has been upgraded to version 4 24 | 25 | 26 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2023-02-08.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-02-08 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2023-02-08 7 | --- 8 | # 2023-02-08 9 | 10 | *Migrated from Notion*. 11 | 12 | 13 | 14 | ## Agenda 15 | 16 | - Investigation into deploying a VPN tool such as WireGuard to have 17 | inter-node communication between the Netcup hosts. 18 | 19 | *(The rest of this meeting was mostly about NetKube, which has since 20 | been scrapped)*. 21 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2023-02-21.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-02-21 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2023-02-21 7 | --- 8 | # 2023-02-21 9 | 10 | *Migrated from Notion*. 11 | 12 | 13 | 14 | ## Agenda 15 | 16 | ### Reusable status embed workflows 17 | 18 | - Further discussion with Bella followed 19 | - Upstream pull request can be found at 20 | [python-discord/bot#2400](https://github.com/python-discord/bot/pull/2400) 21 | 22 | ### Local vagrant testing setup 23 | 24 | - Our new [testing setup using Vagrant 25 | VMs](https://github.com/python-discord/infra/pull/78) has been merged. 26 | 27 | ### A visit from Mina 28 | 29 | Mina checked in to make sure we're operating at peak Volkswagen-like 30 | efficiency. 31 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2023-02-28.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-02-28 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2023-02-28 7 | --- 8 | # 2023-02-28 9 | 10 | *Migrated from Notion*. 11 | 12 | ## Agenda 13 | 14 | - Black knight's CI & dependabot configuration has been mirrored across 15 | all important repositories 16 | - The test server has been updated for the new configuration 17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2023-05-16.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-05-16 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2023-05-16 7 | --- 8 | # 2023-05-16 9 | 10 | *Migrated from Notion*. 11 | 12 | ## Agenda 13 | 14 | - Bella set up [CI bot docker image 15 | build](https://github.com/python-discord/bot/pull/2603) to make sure 16 | that wheels are available. 17 | 18 | 19 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2023-07-25.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-07-25 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2023-07-25 7 | --- 8 | # 2023-07-25 9 | 10 | Postponed to next week due to Joe having a severe bellyache. 11 | 12 | 13 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2023-08-22.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2023-08-22 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2023-08-22 7 | --- 8 | # 2023-08-22 9 | 10 | 11 | 12 | ## Agenda 13 | 14 | - Bella said he is on the streets. **We should start a gofundme**. 15 | - After some more conversation this just means he is on vacation and 16 | currently taking a walk. 17 | - Chris has been busy with turning his living room into a picasso art 18 | collection, Johannes has been busy with renovating his bedroom, and 19 | Bella is not home. 20 | - Our next priority is winning. 21 | - We checked out some issues with documentation generation in `bot-core` 22 | that Bella has mentioned. We managed to fix one issue with pydantic by 23 | adding it to an exclude list but ran into another problem next. 24 | -------------------------------------------------------------------------------- /docs/docs/meeting-notes/posts/2024-07-25.md: -------------------------------------------------------------------------------- 1 | --- 2 | draft: false 3 | date: 2024-07-25 4 | authors: 5 | - joe 6 | description: Meeting minutes from 2024-07-25 7 | --- 8 | # 2024-07-25 9 | 10 | ## Attendees 11 | 12 | Bella, Joe, Fredrick, Chris, Johannes 13 | 14 | 15 | 16 | ## Agenda 17 | 18 | - **Open issues and pull requests in Joe's repositories** 19 | 20 | Joe has plenty of pending changes in his open source repositories on 21 | GitHub. Together with Chris, he went through these and reviewed them. 22 | Most were accepted. Fredrick proposed some further changes to the 23 | ff-bot merge routine which Joe will check out after the meeting. 24 | 25 | - **LDAP** 26 | 27 | Bella is instructed to enter his street address into LDAP for t-shirt 28 | shipping. 29 | 30 | - **New documentation** 31 | 32 | Johannes merged our new documentation. Unfortunately, he forgot to 33 | test it first. Joe visits it and discovers some problems. Johannes 34 | fixes it live. 35 | 36 | - **Turing** 37 | 38 | - **SMTP server** 39 | -------------------------------------------------------------------------------- /docs/docs/onboarding/public_folders.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Documentation on using the ~/public folders on lovelace 3 | --- 4 | # Web-accessible `public` folders 5 | 6 | DevOps team members are automatically provided access to `lovelace`, one of our 7 | Debian machines. Access is granted via their LDAP account which should be 8 | configured with a valid SSH public key. 9 | 10 | Once you have access to the host, any files you host in your `public` folder 11 | (i.e. `/home/joe/public`) are made accessible via `https://pydis.wtf/~username/`. 12 | 13 | By default, an autoindex page will be returned for each folder in this directory 14 | allowing anyone to view file contents. 15 | 16 | If you wish to disable this or wish to display some other content at that 17 | location (i.e. a landing page) you can create an `index.html` file anywhere 18 | which will be returned to browsing users. 19 | 20 | If you require any help with setting this up please let other members of the 21 | DevOps team know. 22 | -------------------------------------------------------------------------------- /docs/docs/onboarding/rules.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: The rules any DevOps team member must follow. 3 | --- 4 | # Rules 5 | 6 | The rules any DevOps team member must follow. 7 | 8 | 1. LMAO - **L**ogging, **M**onitoring, **A**lerting, **O**bservability 9 | 2. Modmail is the greatest piece of software ever written 10 | 3. Modmail needs at least 5 minutes to gather all its greatness at startup 11 | 4. We never blame Chris, it's always <@233481908342882304>'s fault 12 | 5. LKE isn't bad, it's your fault for not paying for the high availability control plane 13 | 6. Our software is never legacy, it's merely well-aged 14 | 7. Ignore these rules (however maybe not 1, 1 seems important to remember) 15 | -------------------------------------------------------------------------------- /docs/docs/post-mortems/.authors.yml: -------------------------------------------------------------------------------- 1 | authors: 2 | joe: 3 | name: Joe Banks 4 | description: DevOps Team Member 5 | avatar: https://github.com/jb3.png 6 | -------------------------------------------------------------------------------- /docs/docs/post-mortems/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Here we document post-mortems for incidents that have occurred in the past. We aim to learn from our mistakes and prevent them in the future. 3 | --- 4 | # Post Mortems 5 | 6 | Here we document post-mortems for incidents that have occurred in the past. We aim to learn from our mistakes and prevent them in the future. 7 | 8 | Posts will follow a standardised format and conclude with tasks that we aim to implement to avoid recurrence of similar incidents. 9 | 10 | ## Posts 11 | -------------------------------------------------------------------------------- /docs/docs/post-mortems/posts/images/2021-01-12/site_cpu_throttle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/post-mortems/posts/images/2021-01-12/site_cpu_throttle.png -------------------------------------------------------------------------------- /docs/docs/post-mortems/posts/images/2021-01-12/site_resource_abnormal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/post-mortems/posts/images/2021-01-12/site_resource_abnormal.png -------------------------------------------------------------------------------- /docs/docs/post-mortems/posts/images/2021-01-30/linode_loadbalancers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/post-mortems/posts/images/2021-01-30/linode_loadbalancers.png -------------------------------------------------------------------------------- /docs/docs/post-mortems/posts/images/2021-01-30/memory_charts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/post-mortems/posts/images/2021-01-30/memory_charts.png -------------------------------------------------------------------------------- /docs/docs/post-mortems/posts/images/2021-01-30/prometheus_status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/post-mortems/posts/images/2021-01-30/prometheus_status.png -------------------------------------------------------------------------------- /docs/docs/post-mortems/posts/images/2021-01-30/scaleios.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/post-mortems/posts/images/2021-01-30/scaleios.png -------------------------------------------------------------------------------- /docs/docs/runbooks/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: A collection of runbooks for common maintenances tasks for Python Discord services. 3 | --- 4 | # Runbooks 5 | 6 | Here you will find a collection of runbooks for common maintenance tasks for 7 | Python Discord services. 8 | 9 |
10 | 11 | - :simple-postgresql:{ .lg .middle } __Upgrading PostgreSQL__ 12 | 13 | --- 14 | 15 | An end-to-end guide on how to upgrade a major PostgreSQL version for Python 16 | Discord. 17 | 18 | [:octicons-arrow-right-24: View runbook](./upgrading-postgresql.md) 19 | 20 |
21 | -------------------------------------------------------------------------------- /docs/docs/services/LDAP/assets/keycloak_user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/services/LDAP/assets/keycloak_user.png -------------------------------------------------------------------------------- /docs/docs/services/LDAP/components/ldap.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Maintenance guidance of the Python Discord LDAP service 3 | --- 4 | # LDAP 5 | 6 | You can interact directly with LDAP using command line tools such as 7 | `ldapsearch` and `ldapmodify`. If you prefer a graphical interface, you can use 8 | tools like Apache Directory Studio. 9 | 10 | Our LDAP Base DN is `dc=box,dc=pydis,dc=wtf`, so users reside under a DN like 11 | `uid=yourusername,cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf`. 12 | 13 | You can authenticate with these tools using your own LDAP credentials which have 14 | administrator privileges. 15 | 16 | In order for connections to be trusted, you may need a copy of the CA 17 | certificate. 18 | 19 | You can fetch a copy using the following command on either the `ldap01` host or 20 | `lovelace`. 21 | 22 | ```bash 23 | $ rsync ldap01.box.pydis.wtf:/etc/ipa/ca.crt . 24 | ``` 25 | 26 | Once you have this certificate, you can prepend the `ldapsearch` command with 27 | the following: 28 | 29 | ```bash 30 | $ LDAPTLS_CACERT=ca.crt ldapsearch -x -H ldaps://ldap01.box.pydis.wtf -D "uid=yourusername,cn=users,cn=accounts,dc=box,dc=pydis,dc=wtf" -W 31 | ``` 32 | -------------------------------------------------------------------------------- /docs/docs/services/email/components/assets/dmarc-report.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/docs/docs/services/email/components/assets/dmarc-report.png -------------------------------------------------------------------------------- /docs/docs/services/email/components/dovecot/local-delivery.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Configuration for getting mail delivered to Postfix into a local folder 3 | --- 4 | # Local Delivery 5 | 6 | We use the Dovecot LMTP daemon to deliver mail destined to local mailboxes (i.e. 7 | that has not been forwarded or processed by a service) into the relevant folders 8 | for IMAP consumption. 9 | 10 | As shown in the [Postfix Overview](../../components/postfix.md), mail destined to 11 | local addresses is sent to the Dovecot LMTP agent to be placed into the relevant 12 | `/var/vmail` folder. 13 | 14 | Postfix still performs all pre-delivery checks and handles rejections for 15 | messages that have not met the delivery criteria (i.e. spoofed SPF or DKIM). 16 | 17 | *[LMTP]: Local Mail Transfer Protocol 18 | -------------------------------------------------------------------------------- /docs/docs/services/email/components/signing.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: DKIM signing for Python Discord services 3 | --- 4 | # Mail Signing 5 | 6 | Outbound mail is signed using DKIM to validate the authenticity of the sending 7 | domain. DKIM is a DNS record that contains a public key used to sign outbound 8 | mail. 9 | 10 | We use our [OpenDKIM 11 | role](https://github.com/python-discord/infra/tree/main/ansible/roles/opendkim) 12 | which allows us to use the same OpenDKIM instance to sign mail from multiple 13 | domains. 14 | 15 | On sending an outbound mail, Postfix passes the mail to OpenDKIM for signing. 16 | OpenDKIM signs the mail with the private key and adds the DKIM signature to the 17 | mail headers. 18 | 19 | DKIM keys are stored in DNS as a TXT record, this is also configured in the 20 | infra repository using our OctoDNS setup. 21 | -------------------------------------------------------------------------------- /docs/docs/services/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | description: Services hosted by Python Discord 3 | --- 4 | # Service Documentation 5 | 6 | Here you will find an overview of services hosted by Python Discord. 7 | 8 | We have overviews here for how the services work as a user as well as how to 9 | administrate them as a DevOps team member. 10 | -------------------------------------------------------------------------------- /kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes 2 | Configuration and documentation for Python Discord's Kubernetes setup! 3 | 4 | ## Secrets 5 | We use [git-crypt](https://www.agwa.name/projects/git-crypt/) ([Github](https://github.com/AGWA/git-crypt))to secure secrets. Using this means we can commit secrets to change control, without the secrets being leaked. 6 | 7 | The [.gitattributes](.gitattributes) is used to determine which files to encrypt. See [git-crypt](https://www.agwa.name/projects/git-crypt/) documentation for more information. 8 | 9 | To work with our secrets, you must have your GPG key's [validity attested](https://en.wikipedia.org/wiki/Keysigning) **and** added to the repo by a member of the devops team. Once that is done, you can use git-crypt as documented. 10 | 11 | ### git-crypt tl;dr 12 | - Get/build a git-crypt binary from [GitHub](https://github.com/AGWA/git-crypt) or your favourite package manager 13 | - Rename the binary to `git-crypt` 14 | - Add binary to your PATH 15 | - Run `git-crypt unlock` from this project's root directory. 16 | See [git-crypt](https://www.agwa.name/projects/git-crypt/) documentation for more information. 17 | -------------------------------------------------------------------------------- /kubernetes/cluster-wide-secrets/README.md: -------------------------------------------------------------------------------- 1 | # Cluster wide secrets 2 | 3 | These are secrets that are re-used across multiple services in the cluster. 4 | 5 | `ghcr-pull-secret` - Used by deployments to pull images from GHCR where the image isn't public. 6 | -------------------------------------------------------------------------------- /kubernetes/cluster-wide-secrets/ghcr-pull-secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/cluster-wide-secrets/ghcr-pull-secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/code-jam-management/README.md: -------------------------------------------------------------------------------- 1 | # Code Jam Management 2 | 3 | This contains the deployment for the internal [code jam management](https://github.com/python-discord/code-jam-management) service. 4 | 5 | ### Required Secret 6 | In a secret named `code-jam-management-env`: 7 | 8 | | Environment | Description | 9 | |--------------|------------------------------------------------------------------------| 10 | | API_TOKEN | A random string to use as the auth token for making requests to CJMS | 11 | | DATABASE_URL | `postgres://:@:/` | 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/code-jam-management/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/apis/code-jam-management/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/code-jam-management/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: code-jam-management 5 | namespace: apis 6 | spec: 7 | selector: 8 | app: code-jam-management 9 | ports: 10 | - protocol: TCP 11 | port: 8000 12 | targetPort: 8000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/patsy/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/apis/patsy/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/patsy/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: patsy 5 | namespace: apis 6 | spec: 7 | selector: 8 | app: patsy 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/quackstack/README.md: -------------------------------------------------------------------------------- 1 | # QuackStack 2 | 3 | The deployment for the [QuackStack](https://github.com/python-discord/quackstack) project, hosted at https://quackstack.pythondiscord.com. 4 | 5 | This project doesn't need any configuration right now. 6 | 7 | To deploy this application run `kubectl apply -f .` from this directory. This will create a deployment, service and ingress. 8 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/quackstack/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: quackstack 5 | namespace: apis 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: quackstack 11 | template: 12 | metadata: 13 | labels: 14 | app: quackstack 15 | spec: 16 | containers: 17 | - name: quackstack 18 | image: ghcr.io/python-discord/quackstack:main 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/quackstack/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: quackstack 9 | namespace: apis 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: quackstack.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: quackstack 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/quackstack/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: quackstack 5 | namespace: apis 6 | spec: 7 | selector: 8 | app: quackstack 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/rtex/README.md: -------------------------------------------------------------------------------- 1 | # rTeX 2 | 3 | This contains the deployment for the [rTeX rendering server](https://github.com/DXsmiley/rtex) service. 4 | 5 | This is used by Sir-Lancebot to render images for the LaTeX command 6 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/rtex/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rtex 5 | namespace: apis 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: rtex 11 | template: 12 | metadata: 13 | labels: 14 | app: rtex 15 | spec: 16 | containers: 17 | - name: rtex 18 | image: ghcr.io/owl-corp/rtex:latest 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 5000 22 | -------------------------------------------------------------------------------- /kubernetes/namespaces/apis/rtex/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rtex 5 | namespace: apis 6 | spec: 7 | selector: 8 | app: rtex 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 5000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/black-knight/README.md: -------------------------------------------------------------------------------- 1 | ## Black Knight 2 | Deployment file for @Black-Knight, our courageous and ever present anti-raid bot. 3 | 4 | ## Secrets 5 | This deployment expects a number of secrets/environment variables to exist in a secret called `black-knight-env`. 6 | 7 | | Environment | Description | 8 | |-----------------------|-------------------------------------------------------------------| 9 | | BOT_TOKEN | The Discord bot token for Black Knight to connect to Discord with | 10 | | DATABASE_URL | A full PostgreSQL connection string to the postgres db | 11 | | BOT_SENTRY_DSN | The DSN to connect send sentry reports to | 12 | 13 | Black knight also requires a redis password, which is pulled from the `redis-credentials` secret. 14 | ``` 15 | REDIS_PASSWORD - The password to redis 16 | ``` 17 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/black-knight/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: black-knight 5 | namespace: bots 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: black-knight 11 | template: 12 | metadata: 13 | labels: 14 | app: black-knight 15 | spec: 16 | securityContext: 17 | fsGroup: 2000 18 | runAsUser: 1000 19 | runAsNonRoot: true 20 | containers: 21 | - name: black-knight 22 | image: ghcr.io/python-discord/black-knight:latest 23 | imagePullPolicy: Always 24 | resources: 25 | requests: 26 | cpu: 500m 27 | memory: 300Mi 28 | limits: 29 | cpu: 750m 30 | memory: 600Mi 31 | envFrom: 32 | - secretRef: 33 | name: black-knight-env 34 | - secretRef: 35 | name: redis-credentials 36 | securityContext: 37 | readOnlyRootFilesystem: true 38 | imagePullSecrets: 39 | - name: ghcr-pull-secret 40 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/black-knight/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/black-knight/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/bot/README.md: -------------------------------------------------------------------------------- 1 | ## Bot 2 | 3 | Deployment file for @Python, our valiant community bot and workhorse. 4 | 5 | ## Secrets 6 | This deployment expects a number of secrets and environment variables to exist in a secret called `bot-env`. 7 | 8 | | Environment | Description | 9 | |-------------------|-------------------------------------------------------------| 10 | | API_KEYS_GITHUB | An API key for Github's API. | 11 | | API_KEYS_SITE_API | The token to access our site's API. | 12 | | BOT_SENTRY_DSN | The sentry DSN to send sentry events to. | 13 | | BOT_TOKEN | The Discord bot token to run the bot on. | 14 | | METABASE_PASSWORD | Password for Metabase | 15 | | METABASE_USERNAME | Username for Metabase | 16 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/bot/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: bot-config-env 5 | namespace: bots 6 | data: 7 | BOT_TRACE_LOGGERS: bot.utils.scheduling.ModPingsOnOff,bot.exts.moderation.modpings,bot.exts.backend.sync._syncers 8 | DEBUG: 'False' 9 | URLS_PASTE_URL: https://paste.pythondiscord.com 10 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/bot/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/bot/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/king-arthur/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/king-arthur/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/king-arthur/service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: king-arthur 6 | rules: 7 | - apiGroups: ["", "extensions", "apps", "batch", "rbac.authorization.k8s.io", "cert-manager.io"] 8 | resources: ["*"] 9 | verbs: ["*"] 10 | --- 11 | apiVersion: v1 12 | kind: ServiceAccount 13 | metadata: 14 | name: king-arthur 15 | namespace: bots 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: king-arthur 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: king-arthur 25 | subjects: 26 | - kind: ServiceAccount 27 | name: king-arthur 28 | namespace: bots 29 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/king-arthur/ssh-secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/king-arthur/ssh-secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/metricity/README.md: -------------------------------------------------------------------------------- 1 | # Metricity 2 | 3 | This folder contains the secrets for the metricity service. 4 | 5 | The actual metricity deployment manifest can be found inside the metricity repository at [python-discord/metricity](https://github.com/python-discord/metricity). 6 | 7 | ## Secrets 8 | A single secret of name `metricity-env` is used with the following values: 9 | 10 | | Environment | Description | 11 | |--------------|------------------------------------| 12 | | BOT_TOKEN | The Discord bot token to run under | 13 | | DATABASE_URI | Database URI to save the states to | 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/metricity/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: metricity 5 | namespace: bots 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | selector: 11 | matchLabels: 12 | app: metricity 13 | template: 14 | metadata: 15 | labels: 16 | app: metricity 17 | spec: 18 | securityContext: 19 | fsGroup: 2000 20 | runAsUser: 1000 21 | runAsNonRoot: true 22 | containers: 23 | - name: metricity 24 | image: ghcr.io/python-discord/metricity:latest 25 | imagePullPolicy: "Always" 26 | envFrom: 27 | - secretRef: 28 | name: metricity-env 29 | securityContext: 30 | readOnlyRootFilesystem: true 31 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/metricity/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/metricity/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/sir-lancebot/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/sir-lancebot/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/sir-robin/README.md: -------------------------------------------------------------------------------- 1 | ## Sir-Robin 2 | Deployment file for @Sir-Robin, the not-quite-so-bot as Sir Lancebot, is our humble events bot. 3 | He is tasked with dealing with all the things that the event team can throw at it! 4 | 5 | ## Secrets 6 | This deployment expects a number of secrets/environment variables to exist in a secret called `sir-robin-env`. The bot also relies on redis credentials being available in a secret named `redis-credentials` 7 | 8 | | Environment | Description | 9 | |---------------------------|------------------------------------------------| 10 | | AOC_RAW_LEADERBOARDS | A list of all AOC leaderboards to use | 11 | | AOC_STAFF_LEADERBOARD_ID | The staff AOC leaderboard. | 12 | | BOT_SENTRY_DSN | The sentry DSN to send warning & error logs to | 13 | | BOT_TOKEN | The bot token to run the bot on. | 14 | | CODE_JAM_API_KEY | The API key to the code jam management system | 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/sir-robin/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: sir-robin-config-env 5 | namespace: bots 6 | data: 7 | AOC_YEAR: '2024' 8 | BOT_DEBUG: 'False' 9 | CATEGORY_SUMMER_CODE_JAM: '1258091972246376468' 10 | CHANNEL_ADVENT_OF_CODE_ANNOUNCE: '1312400095123341465' 11 | CHANNEL_SUMMER_CODE_JAM_ANNOUNCEMENTS: '1258830961718857778' 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/sir-robin/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: sir-robin 5 | namespace: bots 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: sir-robin 11 | template: 12 | metadata: 13 | labels: 14 | app: sir-robin 15 | spec: 16 | securityContext: 17 | fsGroup: 2000 18 | runAsUser: 1000 19 | runAsNonRoot: true 20 | containers: 21 | - name: sir-robin 22 | image: ghcr.io/python-discord/sir-robin:latest 23 | imagePullPolicy: Always 24 | resources: 25 | requests: 26 | cpu: 500m 27 | memory: 300Mi 28 | limits: 29 | cpu: 750m 30 | memory: 600Mi 31 | envFrom: 32 | - secretRef: 33 | name: sir-robin-env 34 | - secretRef: 35 | name: redis-credentials 36 | - configMapRef: 37 | name: sir-robin-config-env 38 | securityContext: 39 | readOnlyRootFilesystem: true 40 | -------------------------------------------------------------------------------- /kubernetes/namespaces/bots/sir-robin/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/bots/sir-robin/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/README.md: -------------------------------------------------------------------------------- 1 | # cert-manager 2 | 3 | X.509 certificate management for Kubernetes. 4 | 5 | > cert-manager builds on top of Kubernetes, introducing certificate authorities and certificates as first-class resource types in the Kubernetes API. This makes it possible to provide to developers 'certificates as a service' in your Kubernetes cluster. 6 | 7 | We install cert-manager through [Helm using this guide](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm). 8 | 9 | ## Directories 10 | 11 | `issuers`: Contains configured issuers, right now only letsencrypt production & staging. 12 | 13 | `certificates`: Contains TLS certificates that should be provisioned and where they should be stored. 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/certificates/owlcorp.uk.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: owlcorp-uk 5 | spec: 6 | secretName: owlcorp.uk-tls 7 | dnsNames: 8 | - owlcorp.uk 9 | - "*.owlcorp.uk" 10 | issuerRef: 11 | name: letsencrypt 12 | kind: ClusterIssuer 13 | secretTemplate: 14 | annotations: 15 | reflector.v1.k8s.emberstack.com/reflection-allowed: "true" 16 | reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "merch" 17 | reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true" 18 | reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "merch" 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/certificates/pydis.wtf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: pydis-wtf 5 | spec: 6 | secretName: pydis.wtf-tls 7 | dnsNames: 8 | - pydis.wtf 9 | - "*.pydis.wtf" 10 | issuerRef: 11 | name: letsencrypt 12 | kind: ClusterIssuer 13 | secretTemplate: 14 | annotations: 15 | reflector.v1.k8s.emberstack.com/reflection-allowed: "true" 16 | reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "monitoring,modmail,tooling,pixels,vault,loki,merch" 17 | reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true" 18 | reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "monitoring,modmail,tooling,pixels,vault,loki,merch" 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/certificates/pythondiscord.com.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: pythondiscord-com 5 | spec: 6 | secretName: pythondiscord.com-tls 7 | dnsNames: 8 | - pythondiscord.com 9 | - '*.pythondiscord.com' 10 | issuerRef: 11 | name: letsencrypt 12 | kind: ClusterIssuer 13 | secretTemplate: 14 | annotations: 15 | reflector.v1.k8s.emberstack.com/reflection-allowed: "true" 16 | reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "apis,forms,merch,monitoring,pixels,tooling,web" 17 | reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true" 18 | reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "apis,forms,merch,monitoring,pixels,tooling,web" 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/issuers/letsencrypt-prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt 5 | namespace: cert-manager 6 | spec: 7 | acme: 8 | email: letsencrypt@pythondiscord.com 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | privateKeySecretRef: 11 | name: letsencrypt-account-key 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: cloudflare@pydis.com 16 | apiTokenSecretRef: 17 | name: cloudflare-api-token 18 | key: api-token 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/issuers/letsencrypt-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-staging 5 | namespace: cert-manager 6 | spec: 7 | acme: 8 | email: letsencrypt@pythondiscord.com 9 | server: https://acme-staging-v02.api.letsencrypt.org/directory 10 | privateKeySecretRef: 11 | name: letsencrypt-staging-account-key 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: cloudflare@pydis.com 16 | apiTokenSecretRef: 17 | name: cloudflare-api-token 18 | key: api-token 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/cert-manager/cert-manager/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/cert-manager/cert-manager/values.yaml: -------------------------------------------------------------------------------- 1 | crds: 2 | enabled: true 3 | startupapicheck: 4 | timeout: 5m 5 | 6 | webhook: 7 | hostNetwork: true 8 | securePort: 10260 9 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/blackbox/README.md: -------------------------------------------------------------------------------- 1 | # Blackbox 2 | These manifests provision a CronJob for blackbox, our database backup tool. 3 | 4 | You can find the repository for blackbox at [lemonsaurus/blackbox](https://github.com/lemonsaurus/blackbox). 5 | 6 | ## Secrets 7 | blackbox requires the following secrets in a secret titled `blackbox-env`: 8 | 9 | | Variable | Description | 10 | |--------------------------------|------------------------| 11 | | **POSTGRES_USER** | Postgres username | 12 | | **POSTGRES_PASSWORD** | Postgres password | 13 | | **REDIS_PASSWORD** | Redis password | 14 | | **MONGO_INITDB_ROOT_USERNAME** | MongoDB username | 15 | | **MONGO_INITDB_ROOT_PASSWORD** | MongoDB password | 16 | | **AWS_ACCESS_KEY_ID** | Access key for S3 | 17 | | **AWS_SECRET_ACCESS_KEY** | Secret key for S3 | 18 | | **DEVOPS_WEBHOOK** | Webhook for #dev-ops | 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/blackbox/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/databases/blackbox/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/mongodb/README.md: -------------------------------------------------------------------------------- 1 | # Python Discord MongoDB 2 | This folder contains the configuration for Python Discord's MongoDB instance. 3 | 4 | ## Volume 5 | A 10Gi volume is provisioned on the Linode Block Storage (Retain) storage class. 6 | 7 | ## Secrets 8 | | Key | Value | Description | 9 | | ---------------------------- | -------------------------- | ------------------------------- | 10 | | `MONGO_INITDB_ROOT_USERNAME` | `pythondiscord` | Username of root user | 11 | | `MONGO_INITDB_ROOT_PASSWORD` | Root password for database | Password for the root user | 12 | 13 | 14 | ## Deployment 15 | The deployment will pull the `mongo:latest` image from DockerHub. 16 | 17 | It will mount the created volume at `/data/db`. 18 | 19 | It will expose port `27017` to connect to MongoDB. 20 | 21 | ## Service 22 | A service called `mongodb` will be created to give the deployment a cluster local DNS record of `mongodb.databases.svc.cluster.local`. 23 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/mongodb/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/databases/mongodb/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/mongodb/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongodb 5 | namespace: databases 6 | spec: 7 | ports: 8 | - port: 27017 9 | selector: 10 | app: mongodb 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/mongodb/volume.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: mongodb-storage 5 | labels: 6 | app: mongodb 7 | namespace: databases 8 | spec: 9 | storageClassName: linode-block-storage-retain 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/redis/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: redis-conf 5 | namespace: databases 6 | data: 7 | redis.conf: | 8 | # Store all commands used and replay on server startup 9 | appendonly yes 10 | 11 | # Set working directory 12 | dir /data 13 | 14 | # Set a memory maximum 15 | maxmemory 1gb 16 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/redis/redis.conf.template: -------------------------------------------------------------------------------- 1 | # Store all commands used and replay on server startup 2 | appendonly yes 3 | 4 | # Set password 5 | requirepass 6 | 7 | # Set working directory 8 | dir /data 9 | 10 | # Set a memory maximum 11 | maxmemory 1gb 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/redis/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/databases/redis/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/redis/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis 5 | namespace: databases 6 | spec: 7 | ports: 8 | - port: 6379 # Redis default port 9 | selector: 10 | app: redis 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/databases/redis/volume.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: redis-storage 5 | namespace: databases 6 | labels: 7 | app: redis 8 | spec: 9 | storageClassName: linode-block-storage-retain 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/graphite/README.md: -------------------------------------------------------------------------------- 1 | # Graphite 2 | 3 | These files provision an instance of the [graphite-statsd](https://hub.docker.com/r/graphiteapp/graphite-statsd/) image. 4 | 5 | The following ports are exposed by the service: 6 | 7 | **80**: NGINX 8 | **8125**: StatsD Ingest 9 | **8126**: StatsD Admin 10 | 11 | There is a 10Gi persistent volume mounted at `/opt/graphite/storage` which holds our statistic data. 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/graphite/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: graphite 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app: graphite 12 | template: 13 | metadata: 14 | labels: 15 | app: graphite 16 | spec: 17 | containers: 18 | - name: graphite 19 | image: graphiteapp/graphite-statsd:latest 20 | imagePullPolicy: Always 21 | resources: 22 | requests: 23 | cpu: 200m 24 | memory: 500Mi 25 | limits: 26 | cpu: 1000m 27 | memory: 750Mi 28 | ports: 29 | - containerPort: 80 30 | - containerPort: 8125 31 | - containerPort: 8126 32 | volumeMounts: 33 | - mountPath: /opt/graphite/storage 34 | name: graphite-volume 35 | volumes: 36 | - name: graphite-volume 37 | persistentVolumeClaim: 38 | claimName: graphite-storage 39 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/graphite/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: graphite 5 | spec: 6 | ports: 7 | - port: 80 8 | name: nginx 9 | - port: 8125 10 | name: statsd 11 | protocol: UDP 12 | - port: 8126 13 | name: statsd-admin 14 | selector: 15 | app: graphite 16 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/graphite/volume.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: graphite-storage 5 | labels: 6 | app: graphite 7 | spec: 8 | storageClassName: linode-block-storage-retain 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 30Gi 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/redirects/README.md: -------------------------------------------------------------------------------- 1 | # Redirects 2 | Some of our point to an external service, for example https://git.pythondiscord.com/ points towards our GitHub organisation. 3 | 4 | This folder contains all the redirects for our subdomains. 5 | 6 | They consist of an Ingress to handle the redirection through rewrite annotations. 7 | 8 | To deploy these routes simply run `kubectl apply -f .` in this folder. 9 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/redirects/github.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/rewrite-target: "https://github.com/python-discord/$1" 7 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 8 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 9 | name: github-redirect 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: git.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: site 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/redirects/paypal.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/rewrite-target: "https://www.paypal.com/paypalme/pythondiscord" 7 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 8 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 9 | name: paypal-redirect 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: paypal.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: site 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/default/redirects/sentry.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/rewrite-target: "https://sentry.io/organizations/python-discord/issues/" 7 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 8 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 9 | name: sentry-redirect 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: sentry.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: site 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/forms/forms-backend/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: forms-backend 9 | namespace: forms 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: forms-api.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: forms-backend 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/forms/forms-backend/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/forms/forms-backend/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/forms/forms-backend/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: forms-backend 5 | namespace: forms 6 | spec: 7 | selector: 8 | app: forms-backend 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/coredns-custom.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: coredns-custom 5 | namespace: kube-system 6 | data: 7 | log.include: | 8 | log postgres.default.svc.cluster.local 9 | log postgres.default 10 | log postgres 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/metrics-server/README.md: -------------------------------------------------------------------------------- 1 | # `metrics-server` 2 | 3 | We deploy the Kubernetes Metrics Server from https://github.com/kubernetes-sigs/metrics-server 4 | 5 | This service allows commands like `kubectl top` to return resource usage values 6 | for nodes and pods in the cluster. 7 | 8 | ## Deployment 9 | 10 | We use Helm for this deployment, the deployment steps are as follows: 11 | 12 | ``` sh 13 | $ helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/ 14 | $ helm upgrade -n kube-system --install metrics-server -f values.yaml metrics-server/metrics-server 15 | ``` 16 | 17 | You can validate a successful deployment by confirming that the following gives 18 | a valid output: 19 | 20 | ``` sh 21 | $ kubectl top nodes 22 | ``` 23 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/metrics-server/values.yaml: -------------------------------------------------------------------------------- 1 | args: 2 | - "--kubelet-insecure-tls" 3 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/nginx/README.md: -------------------------------------------------------------------------------- 1 | # NGINX 2 | 3 | NGINX ingress is our ingress controller for all PyDis web properties. 4 | 5 | This directory contains resources for the Helm chart we use to deploy. 6 | 7 | Documentation for deploying nginx-ingress with Helm is located [here](https://kubernetes.github.io/ingress-nginx/deploy/#using-helm), the chart is located [here](https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx). 8 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/nginx/internal-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ingress-nginx-internal 5 | namespace: kube-system 6 | spec: 7 | selector: 8 | app.kubernetes.io/instance: ingress-nginx 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | name: http 13 | targetPort: 80 14 | - protocol: TCP 15 | port: 443 16 | name: https 17 | targetPort: 443 18 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/nginx/mtls/Makefile: -------------------------------------------------------------------------------- 1 | certs: 2 | cat *.pem > ca.crt 3 | 4 | secret: 5 | kubectl create secret -n kube-system generic mtls-client-crt-bundle --from-file=ca.crt=ca.crt 6 | 7 | all: certs secret 8 | 9 | delete: 10 | kubectl delete secret -n kube-system mtls-client-crt-bundle 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/kube-system/reflector/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes reflector 2 | 3 | We use [kubernetes-reflector](github.com/emberstack/kubernetes-reflector) to mirror certificate resources into all namespaces that need access to the wildcard certificates used for the cluster. 4 | 5 | It is deployed using Helm with no additional configuration using the following steps: 6 | 7 | ``` sh 8 | $ helm repo add emberstack https://emberstack.github.io/helm-charts 9 | $ helm repo update 10 | $ helm upgrade -n kube-system --install reflector emberstack/reflector 11 | ``` 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/loki/README.md: -------------------------------------------------------------------------------- 1 | # Logging 2 | 3 | We deploy multiple components through Helm to solve logging within our cluster. 4 | 5 | ## Loki 6 | 7 | We use `loki_values.yml` to deploy the `grafana/loki` Helm chart. 8 | 9 | Once this is deployed, the service `loki-gateway.loki.svc.cluster.local` will point to one of the Loki replicas. 10 | 11 | ``` 12 | $ helm repo add grafana https://grafana.github.io/helm-charts 13 | $ helm repo update 14 | $ helm upgrade --install -n loki --values loki_values.yml loki grafana/loki 15 | ``` 16 | 17 | ## Alloy 18 | 19 | [Alloy](https://grafana.com/oss/alloy-opentelemetry-collector/) ships logs from all pods through to Loki. 20 | 21 | This requires no additional configuration, just make sure Loki is up and Alloy will start shipping logs. 22 | 23 | ``` 24 | $ helm upgrade --install -n loki --values alloy_values.yml alloy grafana/alloy 25 | ``` 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/loki/ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: loki-gateway 5 | namespace: loki 6 | annotations: 7 | nginx.ingress.kubernetes.io/auth-type: basic 8 | nginx.ingress.kubernetes.io/auth-secret: loki-basic-auth 9 | nginx.ingress.kubernetes.io/auth-realm: "Loki" 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: loki-gateway.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: loki-gateway 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/loki/secret.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/loki/secret.yml -------------------------------------------------------------------------------- /kubernetes/namespaces/merch/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: thallium-config-env 5 | namespace: merch 6 | data: 7 | BACKEND_CORS_ORIGINS: '["https://merch.pydis.wtf","https://merch.owlcorp.uk"]' 8 | BACKEND_PRINTFUL_STORE_ID: '6179998' 9 | -------------------------------------------------------------------------------- /kubernetes/namespaces/merch/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: thallium-backend 5 | namespace: merch 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: thallium-backend 11 | template: 12 | metadata: 13 | labels: 14 | app: thallium-backend 15 | spec: 16 | containers: 17 | - name: thallium-backend 18 | image: ghcr.io/owl-corp/thallium-backend:latest 19 | imagePullPolicy: "Always" 20 | envFrom: 21 | - secretRef: 22 | name: thallium-backend-env 23 | - configMapRef: 24 | name: thallium-config-env 25 | securityContext: 26 | readOnlyRootFilesystem: true 27 | imagePullSecrets: 28 | - name: ghcr-pull-secret 29 | securityContext: 30 | fsGroup: 2000 31 | runAsUser: 1000 32 | runAsNonRoot: true 33 | -------------------------------------------------------------------------------- /kubernetes/namespaces/merch/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/merch/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/merch/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: thallium-backend-svc 5 | namespace: merch 6 | spec: 7 | selector: 8 | app: thallium-backend 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/README.md: -------------------------------------------------------------------------------- 1 | # Modmail 2 | 3 | This folder contains the manifests for our Modmail service. 4 | 5 | ## Secrets 6 | 7 | The services require one shared secret called `modmail` containing the following: 8 | 9 | | Key | Value | Description | 10 | | ------------------------| ---------------------------------|--------------------------------------------------------------| 11 | | `CONNECTION_URI` | MongoDB connection URI | Used for storing data | 12 | | `TOKEN` | Discord Token | Used to connect to Discord | 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/bot/README.md: -------------------------------------------------------------------------------- 1 | # Modmail bot 2 | These manifests will provision the resources for an instance of our Modmail bot. 3 | 4 | To deploy this bot simply run: 5 | ``` 6 | kubectl apply -f deployment.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: modmail-config-env 5 | namespace: modmail 6 | data: 7 | DATABASE_TYPE: 'mongodb' # The type of database to use, only supports mongodb right now 8 | DATA_COLLECTION: 'false' # Disable bot metadata collection by modmail devs 9 | DISABLE_AUTOUPDATES: 'yes' 10 | GUILD_ID: '267624335836053506' 11 | LOG_URL: https://modmail.pydis.wtf/ 12 | OWNERS: 165023948638126080,95872159741644800,336843820513755157 13 | REGISTRY_PLUGINS_ONLY: 'false' # Allow the usage of plugins outside of the official registry 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/modmail/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/web/README.md: -------------------------------------------------------------------------------- 1 | # Modmail web 2 | These manifests provision an instance of the web logviewer for our Modmail system. 3 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/web/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: modmail-web 5 | namespace: modmail 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: modmail-web 11 | template: 12 | metadata: 13 | labels: 14 | app: modmail-web 15 | spec: 16 | containers: 17 | - name: modmail-web 18 | image: ghcr.io/python-discord/logviewer:latest 19 | imagePullPolicy: Always 20 | resources: 21 | requests: 22 | cpu: 50m 23 | memory: 100Mi 24 | limits: 25 | cpu: 100m 26 | memory: 150Mi 27 | ports: 28 | - containerPort: 8000 29 | envFrom: 30 | - secretRef: 31 | name: modmail 32 | - configMapRef: 33 | name: modmail-config-env 34 | securityContext: 35 | readOnlyRootFilesystem: true 36 | securityContext: 37 | fsGroup: 2000 38 | runAsUser: 1000 39 | runAsNonRoot: true 40 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/web/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: modmail-web 9 | namespace: modmail 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: modmail.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: modmail-web 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/modmail/web/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: modmail-web 5 | namespace: modmail 6 | spec: 7 | selector: 8 | app: modmail-web 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: alerts alertmanager 2 | 3 | all: alerts alertmanager 4 | 5 | # Upload the alerting rules to the Kubernetes cluster 6 | alerts: 7 | kubectl create configmap -n monitoring prometheus-alert-rules --from-file=alerts.d/ -o yaml --dry-run=client | kubectl apply -f - 8 | 9 | # Upload the alertmanager configuration to the Kubernetes cluster 10 | alertmanager: 11 | kubectl create configmap -n monitoring alertmanager-config --from-file=alertmanager.yaml=alertmanager.yaml -o yaml --dry-run=client | kubectl apply -f - 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/README.md: -------------------------------------------------------------------------------- 1 | # Alerts 2 | 3 | This directory contains alerting rules and routing configuration for production. 4 | 5 | To build and upload this configuration, see the annotated `Makefile` in this directory. 6 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alertmanager/sd-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: alertmanager-sd 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | app: alertmanager 9 | clusterIP: None 10 | ports: 11 | - port: 9093 12 | targetPort: 9093 13 | name: am 14 | - port: 9094 15 | targetPort: 9094 16 | name: am-peering 17 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alertmanager/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/monitoring/alerts/alertmanager/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alertmanager/service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: alertmanager 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "endpoints"] 9 | verbs: ["get", "list"] 10 | --- 11 | apiVersion: v1 12 | kind: ServiceAccount 13 | metadata: 14 | name: alertmanager 15 | namespace: monitoring 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: alertmanager 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: alertmanager 25 | subjects: 26 | - kind: ServiceAccount 27 | name: alertmanager 28 | namespace: monitoring 29 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alertmanager/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: alertmanager 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: "true" 8 | prometheus.io/port: "9093" 9 | spec: 10 | selector: 11 | app: alertmanager 12 | ports: 13 | - port: 9093 14 | targetPort: 9093 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: alertmanager 3 | rules: 4 | 5 | - alert: alert-manager/cluster-failed-peers 6 | expr: alertmanager_cluster_failed_peers > 0 7 | for: 1m 8 | labels: 9 | severity: page 10 | annotations: 11 | summary: "An Alertmanager node is reporting failed peers" 12 | description: "AM {{ $labels.instance }} is reporting that {{ $value }} of it's peers is invalid." 13 | 14 | - alert: alert-manager/health-score 15 | expr: alertmanager_cluster_health_score > 0 16 | for: 1m 17 | labels: 18 | severity: page 19 | annotations: 20 | summary: "An AlertManagerNode is reporting an unhealthy cluster" 21 | description: "AM {{ $labels.instance }} is reporting that the cluster has a health score of {{ $value }} (where 0 is healthy.)" 22 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/certificates.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: certificates 3 | interval: 1d 4 | rules: 5 | 6 | - alert: cert-manager/certificate-expiring-soon 7 | expr: (certmanager_certificate_expiration_timestamp_seconds - time()) / 60 / 60 / 24 < 7 8 | for: 0m 9 | labels: 10 | # This isn't critical, but if we have reached this point, renewal probably needs manual intervention. 11 | severity: page 12 | annotations: 13 | summary: "Certificate is expiring in < 7 days" 14 | description: "The certificate named {{ $labels.name }} is due for expiry in {{ $value | humanize }} days." 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/coredns.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: coredns 3 | rules: 4 | 5 | - alert: core-dns/panics 6 | expr: increase(coredns_panics_total[1m]) > 0 7 | for: 0m 8 | labels: 9 | severity: page 10 | annotations: 11 | summary: "CoreDNS is experiencing panic" 12 | description: "Number of CoreDNS panics encountered: {{ $value }}" 13 | 14 | - alert: core-dns/cache-misses 15 | expr: rate(coredns_cache_misses_total{}[10m]) / rate(coredns_cache_misses_total{}[10m] offset 10m) > 5.00 16 | for: 2h 17 | labels: 18 | severity: page 19 | annotations: 20 | summary: "High CoreDNS cache misses in last 2 hours" 21 | description: "This can sometimes be an indication of networking troubles, currently {{ $value | humanizePercentage }} over last 2 hours." 22 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/cpu.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: cpu 3 | rules: 4 | 5 | - alert: containers/high-cpu-throttling 6 | expr: rate(container_cpu_cfs_throttled_seconds_total{pod=~".+", container_name!="POD", image!=""}[5m]) > 1 7 | for: 5m 8 | labels: 9 | severity: page 10 | annotations: 11 | summary: "Container {{ $labels.container_name }} in {{ $labels.pod }} high throttling " 12 | description: "{{ $labels.container_name }} inside {{ $labels.pod }} is at {{ $value }}" 13 | 14 | - alert: kubernetes/high-node-cpu 15 | expr: 100 - (avg by (kubernetes_node) (irate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[5m])) * 100) > 80 16 | for: 5m 17 | labels: 18 | severity: page 19 | annotations: 20 | summary: "Node {{ $labels.kubernetes_node }} has CPU over 80% for last 5 minute" 21 | description: "CPU on {{ $labels.kubernetes_node }} is averaging {{ $value }}" 22 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/etcd.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: etcd 3 | rules: 4 | - alert: etcd/error-spike 5 | expr: rate(etcd_request_error_total[5m]) > 0 6 | for: 5m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: "Etcd request errors are spiking" 11 | description: "Etcd request errors are spiking on {{ $labels.instance }} for {{ $labels.operation }}s to {{ $labels.type }}" 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/jobs.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: jobs 3 | rules: 4 | - alert: kubernetes/cronjob-suspended 5 | expr: kube_cronjob_spec_suspend != 0 6 | for: 0m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: "Kubernetes CronJob suspended: {{ $labels.cronjob }}" 11 | description: "CronJob {{ $labels.kubernetes_namespace }}/{{ $labels.cronjob }} is suspended" 12 | 13 | - alert: kubernetes/jobs-failed 14 | expr: kube_job_status_failed > 0 15 | for: 0m 16 | labels: 17 | severity: page 18 | annotations: 19 | summary: "Kubernetes Job failed: {{ $labels.job_name }}" 20 | description: "Job {{$labels.kubernetes_namespacenamespace}}/{{$labels.job_name}} failed to complete" 21 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/mail.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: postfix 3 | rules: 4 | - alert: postfix/down 5 | expr: postfix_up != 1 6 | for: 5m 7 | labels: 8 | severity: warning 9 | annotations: 10 | summary: Postfix is down (instance {{ $labels.instance }}) 11 | - alert: postfix/smtp-temporary-errors 12 | expr: rate(postfix_smtpd_messages_rejected_total{code=~"^4.*"}[15m]) > 0 13 | for: 0m 14 | labels: 15 | severity: warning 16 | annotations: 17 | summary: Postfix is rejecting messages due to errors (instance {{ $labels.instance }}) 18 | description: Postfix has seen code {{ $labels.code }} errors recently 19 | and temporarily rejected emails. 20 | https://en.wikipedia.org/wiki/List_of_SMTP_server_return_codes and 21 | `sudo journalctl -xeu postfix@-` may provide more information on 22 | the current issue. 23 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/memory.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: memory 3 | rules: 4 | 5 | - alert: node/high-memory-usage 6 | expr: node_memory_Active_bytes / node_memory_MemTotal_bytes > 0.8 7 | for: 30s 8 | labels: 9 | severity: page 10 | annotations: 11 | summary: "Node {{ $labels.kubernetes_node }} has RAM usage >80% for 5 minutes" 12 | description: 'RAM usage is currently {{ $value | humanizePercentage }} on {{ $labels.kubernetes_node }}' 13 | 14 | - alert: container/oom 15 | expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1 16 | for: 0m 17 | labels: 18 | severity: page 19 | annotations: 20 | summary: Kubernetes Container oom killer (instance {{ $labels.instance }}) 21 | description: "Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 22 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/pods.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: pods 3 | rules: 4 | - alert: kubernetes/pod-not-healthy 5 | expr: min_over_time(sum by (namespace, pod) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"})[3m:1m]) > 0 6 | for: 3m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: "Kubernetes Pod not healthy: {{ $labels.namespace }}/{{ $labels.pod }}" 11 | description: "Pod has been in a non-ready state for longer than 3 minutes." 12 | 13 | - alert: kubernetes/pod-crash-looping 14 | expr: increase(kube_pod_container_status_restarts_total[5m]) > 3 15 | for: 1m 16 | labels: 17 | severity: warning 18 | annotations: 19 | summary: "Kubernetes pod crash looping: {{ $labels.kubernetes_namespace }}/{{ $labels.pod }}" 20 | description: "Pod {{ $labels.pod }} is crash looping" 21 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/prometheus.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: prometheus 3 | rules: 4 | 5 | # Alert for any instance that is unreachable for >5 minutes. 6 | - alert: prometheus/instance-down 7 | expr: up == 0 8 | for: 5m 9 | labels: 10 | severity: page 11 | annotations: 12 | summary: "Instance {{ $labels.instance }} down" 13 | description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes." 14 | 15 | - alert: prometheus/config-failed 16 | expr: prometheus_config_last_reload_successful == 0 17 | for: 0m 18 | labels: 19 | severity: page 20 | annotations: 21 | summary: "Prometheus config reload in pod {{ $labels.kubernetes_pod_name }} has failed" 22 | description: "Prometheus instance {{ $labels.kubernetes_pod_name }} (`{{ $labels.instance }}`) has failed to reload its config." 23 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/redis.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: redis 3 | rules: 4 | - alert: redis/down 5 | expr: redis_up == 0 6 | for: 1m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: "Redis is offline" 11 | description: "Redis Exporter cannot connect to Redis." 12 | 13 | - alert: redis/oom 14 | expr: redis_memory_used_bytes / redis_memory_max_bytes > 0.9 15 | for: 0m 16 | labels: 17 | severity: page 18 | annotations: 19 | summary: "Redis is approaching it's memory limit" 20 | description: "Redis is currently using {{ $value | humanizePercentage }} of configured memory." 21 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/alerts/alerts.d/volumes.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: volumes 3 | rules: 4 | - alert: kubernetes/volume-out-of-space 5 | expr: kubelet_volume_stats_available_bytes{persistentvolumeclaim!="prometheus-storage"} / kubelet_volume_stats_capacity_bytes * 100 < 10 6 | for: 2m 7 | labels: 8 | severity: page 9 | annotations: 10 | summary: Kubernetes Volume {{ $labels.kubernetes_namespace }}/{{ $labels.persistentvolumeclaim }} is running low on disk space 11 | description: "Volume is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/calico-metrics-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: felix-metrics-svc 5 | namespace: kube-system 6 | annotations: 7 | prometheus.io/scrape: "true" 8 | prometheus.io/port: "9091" 9 | spec: 10 | selector: 11 | k8s-app: calico-node 12 | ports: 13 | - port: 9091 14 | targetPort: 9091 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/exporters/README.md: -------------------------------------------------------------------------------- 1 | # Exporters 2 | This directory contains prometheus exporters for various services running on our cluster. 3 | 4 | If any secrets are required for each exporter they will be in a secrets.yaml file next to the deployment. 5 | 6 | Below is a list of the exporters: 7 | - [redis_exporter](https://github.com/oliver006/redis_exporter) 8 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/exporters/redis/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/monitoring/exporters/redis/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/README.md: -------------------------------------------------------------------------------- 1 | # Grafana 2 | 3 | This folder contains the manifests for deploying our Grafana instance, the service we use to query our data. 4 | 5 | This deployment expects a number of secrets and environment variables to exist in a secret called `grafana-secret-env`. 6 | 7 | | Environment | Description | 8 | |------------------------------|-----------------------------------------------------| 9 | | GF_AUTH_GITHUB_CLIENT_ID | The client ID of the Github app to use for auth | 10 | | GF_AUTH_GITHUB_CLIENT_SECRET | The client secret of the Github app to use for auth | 11 | | GF_SECURITY_ADMIN_PASSWORD | The admin password the the grafana admin console | 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/configmap-ldap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-ldap 5 | namespace: monitoring 6 | data: 7 | # Enable LDAP Auth 8 | GF_AUTH_LDAP_ENABLED: "true" 9 | 10 | # Set config file to the LDAP toml we mount in 11 | GF_AUTH_LDAP_CONFIG_FILE: "/opt/pydis/grafana-ldap.toml" 12 | 13 | # Allow new users to be created from LDAP data 14 | GF_AUTH_LDAP_ALLOW_SIGN_UP: "true" 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: grafana-default 5 | namespace: monitoring 6 | data: 7 | # Root settings 8 | GF_INSTANCE_NAME: "pythondiscord" 9 | GF_SERVER_DOMAIN: "grafana.pydis.wtf" 10 | GF_SERVER_ROOT_URL: "https://grafana.pydis.wtf" 11 | GF_SECURITY_COOKIE_SECURE: "true" 12 | 13 | # Image storage 14 | GF_EXTERNAL_IMAGE_STORAGE_PROVIDED: "local" 15 | 16 | # Metrics 17 | GF_METRICS_ENABLED: "false" 18 | 19 | # User sign up 20 | GF_USERS_AUTO_ASSIGN_ORG: "true" 21 | GF_USERS_AUTO_ASSIGN_ORG_ID: "2" 22 | 23 | # Feature toggles 24 | GF_FEATURE_TOGGLES_ENABLE: "autoMigrateOldPanels,nestedFolders,newVizTooltips,prometheusMetricEncyclopedia,datatrails" 25 | 26 | # Plugins 27 | GF_INSTALL_PLUGINS: "https://storage.googleapis.com/integration-artifacts/grafana-lokiexplore-app/grafana-lokiexplore-app-latest.zip;grafana-lokiexplore-app" 28 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: grafana 9 | namespace: monitoring 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: grafana.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: grafana 24 | port: 25 | number: 3000 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/monitoring/grafana/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: grafana 5 | namespace: monitoring 6 | spec: 7 | ports: 8 | - port: 3000 9 | selector: 10 | app: grafana 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/grafana/volume.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: grafana-storage 5 | namespace: monitoring 6 | labels: 7 | app: grafana 8 | spec: 9 | storageClassName: linode-block-storage-retain 10 | accessModes: 11 | - ReadWriteOncePod 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kube-state-metrics/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: kube-state-metrics 10 | template: 11 | metadata: 12 | labels: 13 | app: kube-state-metrics 14 | spec: 15 | serviceAccountName: kube-state-metrics 16 | containers: 17 | - image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0 18 | imagePullPolicy: Always 19 | args: 20 | - --metric-labels-allowlist=pods=[*] 21 | name: kube-state-metrics 22 | securityContext: 23 | readOnlyRootFilesystem: true 24 | imagePullSecrets: 25 | - name: ghcr-pull-secret 26 | restartPolicy: Always 27 | securityContext: 28 | fsGroup: 2000 29 | runAsUser: 1000 30 | runAsNonRoot: true 31 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kube-state-metrics/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-state-metrics 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: "true" 8 | prometheus.io/port: "8080" 9 | spec: 10 | selector: 11 | app: kube-state-metrics 12 | ports: 13 | - protocol: TCP 14 | port: 8080 15 | targetPort: 8080 16 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kubewatch/README.md: -------------------------------------------------------------------------------- 1 | # Kubewatch 2 | 3 | > **kubewatch** is a Kubernetes watcher that currently publishes notification to available collaboration hubs/notification channels. Run it in your k8s cluster, and you will get event notifications through webhooks. 4 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kubewatch/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kubewatch-config 5 | namespace: monitoring 6 | data: 7 | .kubewatch.yaml: | 8 | namespace: "" 9 | handler: 10 | discord: 11 | webhook: "" 12 | ignores: 13 | - pixels-discord-channel 14 | - cert-manager-cainjector-leader-election 15 | - cert-manager-controller 16 | - ingress-controller-leader-nginx 17 | - cluster-autoscaler-status 18 | - ingress-controller-leader 19 | resource: 20 | replicationcontroller: true 21 | clusterrole: true 22 | serviceaccount: true 23 | persistentvolume: true 24 | configmap: true 25 | namespace: true 26 | secret: true 27 | deployment: false 28 | replicaset: false 29 | daemonset: false 30 | services: false 31 | pod: false 32 | job: false 33 | node: false 34 | ingress: false 35 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kubewatch/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kubewatch 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: kubewatch 10 | template: 11 | metadata: 12 | labels: 13 | app: kubewatch 14 | spec: 15 | serviceAccountName: kubewatch 16 | containers: 17 | - image: ghcr.io/python-discord/kubewatch:latest 18 | imagePullPolicy: Always 19 | name: kubewatch 20 | volumeMounts: 21 | - name: config-volume 22 | mountPath: /root 23 | envFrom: 24 | - secretRef: 25 | name: kubewatch-secrets 26 | securityContext: 27 | readOnlyRootFilesystem: true 28 | restartPolicy: Always 29 | volumes: 30 | - name: config-volume 31 | configMap: 32 | name: kubewatch-config 33 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kubewatch/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/monitoring/kubewatch/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/kubewatch/service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: kubewatch 6 | namespace: monitoring 7 | rules: 8 | - apiGroups: ["", "extensions", "apps", "batch", "rbac.authorization.k8s.io", ] 9 | resources: ["*"] 10 | verbs: ["get", "watch", "list"] 11 | --- 12 | apiVersion: v1 13 | kind: ServiceAccount 14 | metadata: 15 | name: kubewatch 16 | namespace: monitoring 17 | --- 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: ClusterRoleBinding 20 | metadata: 21 | name: kubewatch 22 | namespace: monitoring 23 | roleRef: 24 | apiGroup: rbac.authorization.k8s.io 25 | kind: ClusterRole 26 | name: kubewatch 27 | subjects: 28 | - kind: ServiceAccount 29 | name: kubewatch 30 | namespace: monitoring 31 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/node_exporter/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | spec: 9 | type: ClusterIP 10 | clusterIP: None 11 | selector: 12 | name: node-exporter 13 | phase: prod 14 | ports: 15 | - name: metrics 16 | protocol: TCP 17 | port: 80 18 | targetPort: 9100 19 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/olli/README.md: -------------------------------------------------------------------------------- 1 | # Olli 2 | 3 | This folder contains the deployment information for [Olli](https://github.com/python-discord/olli), our Loki-Discord relay. 4 | 5 | The deployment manifest is located within the repository. 6 | 7 | The rest of the configuration can be applied through `kubectl apply -f .` in this directory. 8 | 9 | A secret called `olli-env` with the following a key `WEBHOOK_URL` with the configured Discord webhook. 10 | 11 | | Key | Description | 12 | | --------------| -------------------------- | 13 | | `WEBHOOK_URL` | The Discord webhook to use | 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/olli/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: olli-config-env 5 | namespace: monitoring 6 | data: 7 | LOKI_API_URL: http://loki.loki.svc.cluster.local:3100 8 | LOKI_JOBS: bots/bot,web/site,bots/sir-lancebot,bots/sir-robin,bots/metricity,snekbox/snekbox,apis/quackstack,bots/black-knight 9 | SERVICE_INTERVAL_MINUTES: '30' 10 | SERVICE_TOKENS: '[{"token":"CRITICAL","color":"#ff5f5f"},{"token":"ERROR","color":"#ff5f5f"},{"token":"WARN","color":"#ffe24d","case_sensitive":true}]' 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/olli/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/monitoring/olli/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/prometheus/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: prometheus 9 | namespace: monitoring 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: prometheus.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: prometheus 24 | port: 25 | number: 9090 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/prometheus/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/monitoring/prometheus/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/prometheus/service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: prometheus 6 | rules: 7 | - apiGroups: ["*"] 8 | resources: ["*"] 9 | verbs: ["get", "list", "watch"] 10 | - nonResourceURLs: 11 | - "/metrics" 12 | verbs: 13 | - get 14 | --- 15 | apiVersion: v1 16 | kind: ServiceAccount 17 | metadata: 18 | name: prometheus 19 | namespace: monitoring 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: ClusterRoleBinding 23 | metadata: 24 | name: prometheus 25 | roleRef: 26 | apiGroup: rbac.authorization.k8s.io 27 | kind: ClusterRole 28 | name: prometheus 29 | subjects: 30 | - kind: ServiceAccount 31 | name: prometheus 32 | namespace: monitoring 33 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/prometheus/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | app: prometheus 9 | ports: 10 | - port: 9090 11 | targetPort: 9090 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/monitoring/prometheus/volume.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: prometheus-storage 5 | namespace: monitoring 6 | labels: 7 | app: prometheus 8 | spec: 9 | storageClassName: linode-block-storage-retain 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 30Gi 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels-modsite/README.md: -------------------------------------------------------------------------------- 1 | # Pixels 2 | 3 | The deployment for the [Pixels modsite project](https://git.pydis.com/pixels-modsite), hosted at https://pixels-mod.pydis.wtf. 4 | 5 | This mod site will give Discord mods easy access to moderation actions for the pixels event. 6 | 7 | ## Secret 8 | 9 | It requires a `pixels-modsite-env` secret with the following entries: 10 | 11 | | Environment | Description | 12 | |-------------------|-----------------------------------------------------------------| 13 | | DISCORD_BOT_TOKEN | The Discord bot token to use to check roles of users logging in | 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels-modsite/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pixels-modsite 5 | namespace: pixels 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: pixels-modsite 11 | template: 12 | metadata: 13 | labels: 14 | app: pixels-modsite 15 | spec: 16 | containers: 17 | - name: pixels-modsite 18 | image: ghcr.io/python-discord/pixels-modsite:latest 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 3000 22 | envFrom: 23 | - secretRef: 24 | name: pixels-modsite-env 25 | securityContext: 26 | readOnlyRootFilesystem: true 27 | imagePullSecrets: 28 | - name: ghcr-pull-secret 29 | securityContext: 30 | fsGroup: 2000 31 | runAsUser: 1000 32 | runAsNonRoot: true 33 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels-modsite/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: pixels-modsite 9 | namespace: pixels 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: pixels-mod.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: pixels-modsite 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels-modsite/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/pixels/pixels-modsite/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels-modsite/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: pixels-modsite 5 | namespace: pixels 6 | spec: 7 | selector: 8 | app: pixels-modsite 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 3000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: pixels-config-env 5 | namespace: pixels 6 | data: 7 | BASE_URL: https://pixels.pythondiscord.com 8 | FORCE_LOGIN: 'True' 9 | GUILD_ID: '267624335836053506' 10 | HELPERS_ROLE: '267630620367257601' 11 | LOG_LEVEL: debug 12 | MOD_ROLE: '267630620367257601' 13 | PRODUCTION: 'true' 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: pixels 9 | namespace: pixels 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: pixels.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: pixels 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/pixels/pixels/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/pixels/pixels/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: pixels 5 | namespace: pixels 6 | spec: 7 | selector: 8 | app: pixels 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/snekbox/snekbox-forms/README.md: -------------------------------------------------------------------------------- 1 | # Snekbox-forms 2 | 3 | This folder contains manifests for a Snekbox service specific to the forms project. This instance has no 3rd party libs installed, unlike regular snekbox, so submissions via forms can only use the stdlib. 4 | 5 | The deployment manifest for this service is based on in manifest found inside the snekbox repository at [python-discord/snekbox](https://github.com/python-discord/snekbox), modified only by removing the volume mount, and 3rd party dep installation script. 6 | -------------------------------------------------------------------------------- /kubernetes/namespaces/snekbox/snekbox-forms/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: snekbox-forms 5 | namespace: snekbox 6 | spec: 7 | selector: 8 | app: snekbox-forms 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8060 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/snekbox/snekbox/README.md: -------------------------------------------------------------------------------- 1 | # Snekbox 2 | 3 | This folder contains manifests for Snekbox service. 4 | 5 | The actual snekbox deployment manifest can be found inside the snekbox repository at [python-discord/snekbox](https://github.com/python-discord/snekbox). 6 | -------------------------------------------------------------------------------- /kubernetes/namespaces/snekbox/snekbox/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: snekbox 5 | namespace: snekbox 6 | spec: 7 | selector: 8 | app: snekbox 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8060 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/bitwarden/README.md: -------------------------------------------------------------------------------- 1 | # BitWarden 2 | 3 | Our internal password manager, used by the admins to share passwords for our services. Hosted at https://bitwarden.pydis.wtf 4 | 5 | To deploy this, first set up the secrets (see below) and then run `kubectl apply -f .` in this folder. 6 | 7 | ## Secrets 8 | This deployment expects a few secrets to exist in a secret called `bitwarden-secret-env`. 9 | 10 | 11 | | Environment | Description | 12 | |-----------------------|-------------------------------------------| 13 | | ADMIN_TOKEN | 64-character token used for initial login | 14 | | DATABASE_URL | Database string: host://user:pass/db | 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/bitwarden/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: bitwarden-config-env 5 | namespace: tooling 6 | data: 7 | # Domain to access bitwarden by 8 | DOMAIN: "https://bitwarden.pydis.wtf" 9 | 10 | # Password hint must be sent to an email when this is false. 11 | # When it's true, it'll be shown right on the page. 12 | SHOW_PASSWORD_HINT: "false" 13 | 14 | # Admins only, please! 15 | SIGNUPS_ALLOWED: "false" 16 | 17 | # Used for LiveSync 18 | WEBSOCKET_ENABLED: "true" 19 | 20 | # Max conns to the DB 21 | DATABASE_MAX_CONNS: "2" 22 | 23 | # Force bitwarden to use postgres, rather than it's own volume 24 | I_REALLY_WANT_VOLATILE_STORAGE: "true" 25 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/bitwarden/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: bitwarden 5 | namespace: tooling 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: bitwarden 11 | template: 12 | metadata: 13 | labels: 14 | app: bitwarden 15 | spec: 16 | containers: 17 | - name: bitwarden 18 | image: vaultwarden/server:latest 19 | imagePullPolicy: Always 20 | resources: 21 | requests: 22 | cpu: 1m 23 | memory: 50Mi 24 | limits: 25 | cpu: 50m 26 | memory: 100Mi 27 | ports: 28 | - containerPort: 80 29 | envFrom: 30 | - secretRef: 31 | name: bitwarden-secret-env 32 | - configMapRef: 33 | name: bitwarden-config-env 34 | securityContext: 35 | readOnlyRootFilesystem: true 36 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/bitwarden/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: bitwarden 9 | namespace: tooling 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: bitwarden.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: bitwarden 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/bitwarden/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/tooling/bitwarden/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/bitwarden/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: bitwarden 5 | namespace: tooling 6 | spec: 7 | ports: 8 | - port: 80 9 | selector: 10 | app: bitwarden 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/ff-bot/README.md: -------------------------------------------------------------------------------- 1 | # Fast-Forward Bot 2 | 3 | We deploy an instance of [ff_bot](https://github.com/jb3/ff_bot) to aid in 4 | repository management. 5 | 6 | See the documentation of the `ff_bot` project for information on deployment and 7 | usage, but it boils down to authorised users (dictated by the repository policy 8 | file) running `/merge` and the bot running a fast-forward merge. 9 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/ff-bot/deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ff-bot 5 | namespace: tooling 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: ff-bot 11 | template: 12 | metadata: 13 | labels: 14 | app: ff-bot 15 | spec: 16 | containers: 17 | - name: ff-bot 18 | image: ghcr.io/jb3/ff_bot:latest 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 4000 22 | envFrom: 23 | - secretRef: 24 | name: ff-bot-env 25 | securityContext: 26 | readOnlyRootFilesystem: true 27 | volumeMounts: 28 | - name: tmpfs 29 | mountPath: /tmp 30 | securityContext: 31 | fsGroup: 2000 32 | runAsUser: 1000 33 | runAsNonRoot: true 34 | volumes: 35 | - name: tmpfs 36 | emptyDir: 37 | medium: Memory 38 | sizeLimit: 500Mi 39 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/ff-bot/ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: ff-bot 9 | namespace: tooling 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: ff-bot.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: ff-bot 24 | port: 25 | number: 4000 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/ff-bot/secrets.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/tooling/ff-bot/secrets.yml -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/ff-bot/service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ff-bot 5 | namespace: tooling 6 | spec: 7 | selector: 8 | app: ff-bot 9 | ports: 10 | - port: 4000 11 | targetPort: 4000 12 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/keycloak/README.md: -------------------------------------------------------------------------------- 1 | # Keycloak 2 | 3 | This folder contains manifests for the deployment of 4 | [Keycloak](https://keycloak.org). 5 | 6 | We use this as an OpenID and SAML provider to authenticate to other services 7 | with SSO. 8 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/keycloak/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: keycloak-config-env 5 | namespace: tooling 6 | data: 7 | # Set the hostname for outbound traffic and enable the feature to read that 8 | # environment variable 9 | KC_HOSTNAME: "id.pydis.wtf" 10 | 11 | # Set the location of the TLS certificates generated by Vault 12 | # KC_HTTPS_CERTIFICATE_FILE: "/vault/secrets/server.crt" 13 | # KC_HTTPS_CERTIFICATE_KEY_FILE: "/vault/secrets/server.key" 14 | 15 | # Proxy settings 16 | KC_HTTP_ENABLED: "true" 17 | KC_PROXY_HEADERS: "xforwarded" 18 | 19 | # Database configuration 20 | KC_DB: "postgres" 21 | KC_DB_USERNAME: "keycloak" 22 | KC_DB_URL_DATABASE: "keycloak" 23 | KC_DB_URL_HOST: "lovelace.box.pydis.wtf" 24 | 25 | # Trusted cert for the connection to the LDAP server 26 | KC_TRUSTSTORE_PATHS: "/opt/pydis/ca-store/pydis-ipa-cert.pem" 27 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/keycloak/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/tooling/keycloak/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/keycloak/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: keycloak 5 | namespace: tooling 6 | spec: 7 | ports: 8 | - name: http 9 | port: 8080 10 | - name: https 11 | port: 8443 12 | selector: 13 | app: keycloak 14 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/metabase/README.md: -------------------------------------------------------------------------------- 1 | # Metabase 2 | These manifests provision an instance of Metabase, our database analysis tool. 3 | 4 | ## Secrets 5 | A single secret of name `metabase-env` is used with the following values: 6 | 7 | | Environment | Description | 8 | |--------------|-------------------------------------------| 9 | | MB_DB_DBNAME | Database name for internal metabase usage | 10 | | MB_DB_HOST | Address of PostgreSQL database | 11 | | MB_DB_TYPE | Always postgres | 12 | | MB_DB_PASS | Database user password | 13 | | MB_DB_PORT | Always 5432 | 14 | | MB_DB_USER | User for metabase internal | 15 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/metabase/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: metabase 5 | namespace: tooling 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: metabase 11 | template: 12 | metadata: 13 | labels: 14 | app: metabase 15 | spec: 16 | securityContext: 17 | fsGroup: 2000 18 | runAsUser: 1000 19 | runAsNonRoot: true 20 | containers: 21 | - name: metabase 22 | image: metabase/metabase:latest 23 | imagePullPolicy: Always 24 | ports: 25 | - containerPort: 3000 26 | envFrom: 27 | - secretRef: 28 | name: metabase-env 29 | volumeMounts: 30 | - mountPath: /plugins 31 | name: mb-plugins-volume 32 | securityContext: 33 | readOnlyRootFilesystem: true 34 | volumes: 35 | - name: mb-plugins-volume 36 | emptyDir: {} 37 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/metabase/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: metabase 9 | namespace: tooling 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: metabase.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: metabase 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/metabase/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/tooling/metabase/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/metabase/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: metabase 5 | namespace: tooling 6 | spec: 7 | selector: 8 | app: metabase 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 3000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/policy-bot/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: policy-bot 9 | namespace: tooling 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: policy-bot.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: policy-bot 24 | port: 25 | number: 80 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/policy-bot/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/tooling/policy-bot/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/tooling/policy-bot/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: policy-bot 5 | namespace: tooling 6 | spec: 7 | selector: 8 | app: policy-bot 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8080 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/vault/README.md: -------------------------------------------------------------------------------- 1 | # Vault 2 | 3 | We deploy an instance of [HashiCorp 4 | Vault](https://developer.hashicorp.com/vault) to store various types of secret 5 | data used on the cluster. 6 | 7 | We use it to issue X509 certificates for mutual TLS setups. 8 | 9 | As well as this, the Helm chart used allows for secret injection based on the 10 | annotations present in pods deployed to the cluster. See more 11 | [here](https://developer.hashicorp.com/vault/docs/platform/k8s/injector) 12 | 13 | # Setup 14 | 15 | 1. Add the Helm repository for HashiCorp: 16 | 17 | ``` sh 18 | $ helm repo add hashicorp https://helm.releases.hashicorp.com 19 | "hashicorp" has been added to your repositories 20 | ``` 21 | 22 | 2. Install Vault to the `vault` namespace using the following command: 23 | 24 | ``` sh 25 | $ helm install --create-namespace vault hashicorp/vault --namespace vault 26 | ... 27 | ``` 28 | 29 | 3. Voila, Vault is deployed in the `vault` namespace. 30 | -------------------------------------------------------------------------------- /kubernetes/namespaces/vault/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: vault 9 | namespace: vault 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pydis.wtf" 14 | secretName: pydis.wtf-tls 15 | rules: 16 | - host: vault.pydis.wtf 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: vault 24 | port: 25 | number: 8200 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/pinnwand/README.md: -------------------------------------------------------------------------------- 1 | # pinnwand 2 | These manifests provision an instance of the pinnwand service used on https://paste.pythondiscord.com. 3 | 4 | A init-service is used to download the Python Discord banner logo and save it to a volume, as pinnwand expects it to be present within the image. 5 | 6 | ## Secrets & config 7 | This deployment expects an env var named `PINNWAND_DATABASE_URI` to exist in a secret called `pinnwand-postgres-connection`. 8 | All other configuration can be found in `defaults-configmap.yaml`. 9 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/pinnwand/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | # block HEAD requests 9 | nginx.ingress.kubernetes.io/configuration-snippet: | 10 | if ($request_method = HEAD) { 11 | return 444; 12 | } 13 | name: pinnwand 14 | namespace: web 15 | spec: 16 | tls: 17 | - hosts: 18 | - "*.pythondiscord.com" 19 | secretName: pythondiscord.com-tls 20 | rules: 21 | - host: paste.pythondiscord.com 22 | http: 23 | paths: 24 | - path: / 25 | pathType: Prefix 26 | backend: 27 | service: 28 | name: pinnwand 29 | port: 30 | number: 80 31 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/pinnwand/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/web/pinnwand/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/web/pinnwand/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: pinnwand 5 | namespace: web 6 | spec: 7 | selector: 8 | app: pinnwand 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 8000 13 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/public-stats/README.md: -------------------------------------------------------------------------------- 1 | # Public Stats 2 | 3 | Python Discord Public Stats is the public stats portal for viewing server statistics at https://stats.pythondiscord.com/ 4 | 5 | The deployment manifest is located in the [python-discord/public-stats](https://github.com/python-discord/public-stats/blob/master/deployment.yaml) repo. 6 | 7 | To apply the service and ingress run `kubectl apply -f .` in this folder. 8 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/public-stats/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | name: public-stats 9 | namespace: web 10 | spec: 11 | tls: 12 | - hosts: 13 | - "*.pythondiscord.com" 14 | secretName: pythondiscord.com-tls 15 | rules: 16 | - host: stats.pythondiscord.com 17 | http: 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: public-stats 24 | port: 25 | number: 8000 26 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/public-stats/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: public-stats 5 | namespace: web 6 | spec: 7 | ports: 8 | - port: 8000 9 | selector: 10 | app: public-stats 11 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/site/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 7 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 8 | nginx.ingress.kubernetes.io/server-snippet: | 9 | location ~* /metrics { 10 | deny all; 11 | return 403; 12 | } 13 | name: site 14 | namespace: web 15 | spec: 16 | tls: 17 | - hosts: 18 | - "*.pythondiscord.com" 19 | secretName: pythondiscord.com-tls 20 | rules: 21 | - host: www.pythondiscord.com 22 | http: 23 | paths: 24 | - path: / 25 | pathType: Prefix 26 | backend: 27 | service: 28 | name: site 29 | port: 30 | number: 80 31 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/site/redirect.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | nginx.ingress.kubernetes.io/auth-tls-verify-client: "on" 6 | nginx.ingress.kubernetes.io/server-snippet: | 7 | location ~* / { 8 | return 308 https://www.pythondiscord.com$request_uri; 9 | } 10 | nginx.ingress.kubernetes.io/auth-tls-secret: "kube-system/mtls-client-crt-bundle" 11 | nginx.ingress.kubernetes.io/auth-tls-error-page: "https://maintenance.python-discord.workers.dev/" 12 | name: www-redirect 13 | namespace: web 14 | spec: 15 | tls: 16 | - hosts: 17 | - "pythondiscord.com" 18 | secretName: pythondiscord.com-tls 19 | rules: 20 | - host: pythondiscord.com 21 | http: 22 | paths: 23 | - path: / 24 | pathType: Prefix 25 | backend: 26 | service: 27 | name: site 28 | port: 29 | number: 80 30 | -------------------------------------------------------------------------------- /kubernetes/namespaces/web/site/secrets.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/namespaces/web/site/secrets.yaml -------------------------------------------------------------------------------- /kubernetes/namespaces/web/site/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: site 5 | namespace: web 6 | annotations: 7 | prometheus.io/scrape: "true" 8 | prometheus.io/port: "8000" 9 | spec: 10 | selector: 11 | app: site 12 | ports: 13 | - protocol: TCP 14 | port: 80 15 | targetPort: 8000 16 | -------------------------------------------------------------------------------- /kubernetes/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python-discord/infra/239e4c9071ff744f5aa4ce3ca8143107e5d4759a/kubernetes/scripts/__init__.py -------------------------------------------------------------------------------- /kubernetes/scripts/lint_manifests.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from pathlib import Path 4 | 5 | 6 | def get_all_manifests() -> list[str]: 7 | """Return a list of file paths that look like k8s manifests.""" 8 | likely_manifests = [] 9 | for file in Path.cwd().glob("**/*.yaml"): 10 | if file.name in {"secrets.yaml", "ghcr-pull-secrets.yaml", "ssh-secrets.yaml"}: 11 | # Don't lint secret files as they're git-crypted 12 | continue 13 | if file.stem.startswith("_"): 14 | # Ignore manifests that start with _ 15 | continue 16 | if "apiVersion:" not in file.read_text(): 17 | # Probably not a manifest 18 | continue 19 | likely_manifests.append(str(file)) 20 | return likely_manifests 21 | 22 | 23 | if __name__ == "__main__": 24 | if sys.argv[1] == "diff": 25 | arg = " -f ".join([""] + get_all_manifests()) 26 | os.system("kubectl diff" + arg) # noqa: S605 27 | elif sys.argv[1] == "find": 28 | print("\n".join(get_all_manifests())) # noqa: T201 29 | -------------------------------------------------------------------------------- /kubernetes/service-accounts/internal-tls-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: internal-tls-issuer 5 | namespace: tooling 6 | -------------------------------------------------------------------------------- /kubernetes/service-accounts/postgres-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: postgres-issuer 5 | namespace: apis 6 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ], 6 | "kubernetes": { 7 | "fileMatch": "kubernetes/.+\\.ya?ml" 8 | }, 9 | "git-submodules": { 10 | "enabled": true 11 | }, 12 | "packageRules": [ 13 | { 14 | "matchManagers": ["ansible", "ansible-galaxy"], 15 | "groupName": "ansible" 16 | }, 17 | { 18 | "matchManagers": ["poetry"], 19 | "groupName": "poetry" 20 | }, 21 | { 22 | "matchManagers": ["github-actions"], 23 | "groupName": "github-actions" 24 | } 25 | ], 26 | "prHourlyLimit": 0, 27 | "commitBodyTable": true, 28 | "rebaseWhen": "behind-base-branch" 29 | } 30 | --------------------------------------------------------------------------------